]>
Commit | Line | Data |
---|---|---|
dcc68b5e MS |
1 | /* |
2 | * Addpath TX ID selection, and related utilities | |
3 | * Copyright (C) 2018 Amazon.com, Inc. or its affiliates | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License as published by the Free | |
7 | * Software Foundation; either version 2 of the License, or (at your option) | |
8 | * any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | * more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along | |
16 | * with this program; see the file COPYING; if not, write to the Free Software | |
17 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
18 | */ | |
19 | ||
20 | #include "bgp_addpath.h" | |
21 | #include "bgp_route.h" | |
22 | ||
23 | static struct bgp_addpath_strategy_names strat_names[BGP_ADDPATH_MAX] = { | |
24 | { | |
25 | .config_name = "addpath-tx-all-paths", | |
26 | .human_name = "All", | |
27 | .human_description = "Advertise all paths via addpath", | |
28 | .type_json_name = "addpathTxAllPaths", | |
29 | .id_json_name = "addpathTxIdAll" | |
30 | }, | |
31 | { | |
32 | .config_name = "addpath-tx-bestpath-per-AS", | |
33 | .human_name = "Best-Per-AS", | |
34 | .human_description = "Advertise bestpath per AS via addpath", | |
35 | .type_json_name = "addpathTxBestpathPerAS", | |
36 | .id_json_name = "addpathTxIdBestPerAS" | |
37 | } | |
38 | }; | |
39 | ||
40 | static struct bgp_addpath_strategy_names unknown_names = { | |
41 | .config_name = "addpath-tx-unknown", | |
42 | .human_name = "Unknown-Addpath-Strategy", | |
43 | .human_description = "Unknown Addpath Strategy", | |
44 | .type_json_name = "addpathTxUnknown", | |
45 | .id_json_name = "addpathTxIdUnknown" | |
46 | }; | |
47 | ||
48 | /* | |
49 | * Returns a structure full of strings associated with an addpath type. Will | |
50 | * never return null. | |
51 | */ | |
52 | struct bgp_addpath_strategy_names * | |
53 | bgp_addpath_names(enum bgp_addpath_strat strat) | |
54 | { | |
55 | if (strat < BGP_ADDPATH_MAX) | |
56 | return &(strat_names[strat]); | |
57 | else | |
58 | return &unknown_names; | |
59 | }; | |
60 | ||
61 | /* | |
62 | * Returns if any peer is transmitting addpaths for a given afi/safi. | |
63 | */ | |
64 | int bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data *d, afi_t afi, | |
65 | safi_t safi) | |
66 | { | |
67 | return d->total_peercount[afi][safi] > 0; | |
68 | } | |
69 | ||
70 | /* | |
71 | * Initialize the BGP instance level data for addpath. | |
72 | */ | |
73 | void bgp_addpath_init_bgp_data(struct bgp_addpath_bgp_data *d) | |
74 | { | |
75 | safi_t safi; | |
76 | afi_t afi; | |
77 | int i; | |
78 | ||
79 | for (afi = AFI_IP; afi < AFI_MAX; afi++) { | |
80 | for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++) { | |
81 | for (i = 0; i < BGP_ADDPATH_MAX; i++) { | |
82 | d->id_allocators[afi][safi][i] = NULL; | |
83 | d->peercount[afi][safi][i] = 0; | |
84 | } | |
85 | d->total_peercount[afi][safi] = 0; | |
86 | } | |
87 | } | |
88 | } | |
89 | ||
90 | /* | |
91 | * Free up resources associated with BGP route info structures. | |
92 | */ | |
93 | void bgp_addpath_free_info_data(struct bgp_addpath_info_data *d, | |
94 | struct bgp_addpath_node_data *nd) | |
95 | { | |
96 | int i; | |
97 | ||
98 | for (i = 0; i < BGP_ADDPATH_MAX; i++) { | |
99 | if (d->addpath_tx_id[i] != IDALLOC_INVALID) | |
100 | idalloc_free_to_pool(&nd->free_ids[i], | |
101 | d->addpath_tx_id[i]); | |
102 | } | |
103 | } | |
104 | ||
105 | /* | |
106 | * Return the addpath ID used to send a particular route, to a particular peer, | |
107 | * in a particular AFI/SAFI. | |
108 | */ | |
109 | uint32_t bgp_addpath_id_for_peer(struct peer *peer, afi_t afi, safi_t safi, | |
110 | struct bgp_addpath_info_data *d) | |
111 | { | |
112 | if (peer->addpath_type[afi][safi] < BGP_ADDPATH_MAX) | |
113 | return d->addpath_tx_id[peer->addpath_type[afi][safi]]; | |
114 | else | |
115 | return IDALLOC_INVALID; | |
116 | } | |
117 | ||
118 | /* | |
119 | * Returns true if the path has an assigned addpath ID for any of the addpath | |
120 | * strategies. | |
121 | */ | |
122 | int bgp_addpath_info_has_ids(struct bgp_addpath_info_data *d) | |
123 | { | |
124 | int i; | |
125 | ||
126 | for (i = 0; i < BGP_ADDPATH_MAX; i++) | |
127 | if (d->addpath_tx_id[i] != 0) | |
128 | return 1; | |
129 | ||
130 | return 0; | |
131 | } | |
132 | ||
133 | /* | |
134 | * Releases any ID's associated with the BGP prefix. | |
135 | */ | |
136 | void bgp_addpath_free_node_data(struct bgp_addpath_bgp_data *bd, | |
137 | struct bgp_addpath_node_data *nd, afi_t afi, | |
138 | safi_t safi) | |
139 | { | |
140 | int i; | |
141 | ||
142 | for (i = 0; i < BGP_ADDPATH_MAX; i++) { | |
143 | idalloc_drain_pool(bd->id_allocators[afi][safi][i], | |
144 | &(nd->free_ids[i])); | |
145 | } | |
146 | } | |
147 | ||
148 | /* | |
149 | * Check to see if the addpath strategy requires DMED to be configured to work. | |
150 | */ | |
151 | int bgp_addpath_dmed_required(int strategy) | |
152 | { | |
153 | return strategy == BGP_ADDPATH_BEST_PER_AS; | |
154 | } | |
155 | ||
156 | /* | |
157 | * Return true if this is a path we should advertise due to a | |
158 | * configured addpath-tx knob | |
159 | */ | |
160 | int bgp_addpath_tx_path(enum bgp_addpath_strat strat, | |
161 | struct bgp_path_info *pi) | |
162 | { | |
163 | switch (strat) { | |
164 | case BGP_ADDPATH_NONE: | |
165 | return 0; | |
166 | case BGP_ADDPATH_ALL: | |
167 | return 1; | |
168 | case BGP_ADDPATH_BEST_PER_AS: | |
169 | if (CHECK_FLAG(pi->flags, BGP_PATH_DMED_SELECTED)) | |
170 | return 1; | |
171 | else | |
172 | return 0; | |
173 | default: | |
174 | return 0; | |
175 | } | |
176 | } | |
177 | ||
6ff96d00 RW |
178 | static void bgp_addpath_flush_type_rn(struct bgp *bgp, afi_t afi, safi_t safi, |
179 | enum bgp_addpath_strat addpath_type, | |
180 | struct bgp_node *rn) | |
181 | { | |
182 | struct bgp_path_info *pi; | |
183 | ||
184 | idalloc_drain_pool( | |
185 | bgp->tx_addpath.id_allocators[afi][safi][addpath_type], | |
186 | &(rn->tx_addpath.free_ids[addpath_type])); | |
187 | for (pi = bgp_node_get_bgp_path_info(rn); pi; pi = pi->next) { | |
188 | if (pi->tx_addpath.addpath_tx_id[addpath_type] | |
189 | != IDALLOC_INVALID) { | |
190 | idalloc_free( | |
191 | bgp->tx_addpath | |
192 | .id_allocators[afi][safi][addpath_type], | |
193 | pi->tx_addpath.addpath_tx_id[addpath_type]); | |
194 | pi->tx_addpath.addpath_tx_id[addpath_type] = | |
195 | IDALLOC_INVALID; | |
196 | } | |
197 | } | |
198 | } | |
199 | ||
dcc68b5e MS |
200 | /* |
201 | * Purge all addpath ID's on a BGP instance associated with the addpath | |
202 | * strategy, and afi/safi combination. This lets us let go of all memory held to | |
203 | * track ID numbers associated with an addpath type not in use. Since | |
204 | * post-bestpath ID processing is skipped for types not used, this is the only | |
205 | * chance to free this data. | |
206 | */ | |
207 | static void bgp_addpath_flush_type(struct bgp *bgp, afi_t afi, safi_t safi, | |
208 | enum bgp_addpath_strat addpath_type) | |
209 | { | |
6ff96d00 | 210 | struct bgp_node *rn, *nrn; |
dcc68b5e MS |
211 | |
212 | for (rn = bgp_table_top(bgp->rib[afi][safi]); rn; | |
213 | rn = bgp_route_next(rn)) { | |
6ff96d00 RW |
214 | if (safi == SAFI_MPLS_VPN) { |
215 | struct bgp_table *table; | |
216 | ||
217 | table = bgp_node_get_bgp_table_info(rn); | |
218 | if (!table) | |
219 | continue; | |
220 | ||
221 | for (nrn = bgp_table_top(table); nrn; | |
222 | nrn = bgp_route_next(nrn)) | |
223 | bgp_addpath_flush_type_rn(bgp, afi, safi, | |
224 | addpath_type, nrn); | |
225 | } else { | |
226 | bgp_addpath_flush_type_rn(bgp, afi, safi, addpath_type, | |
227 | rn); | |
dcc68b5e MS |
228 | } |
229 | } | |
230 | ||
231 | idalloc_destroy(bgp->tx_addpath.id_allocators[afi][safi][addpath_type]); | |
232 | bgp->tx_addpath.id_allocators[afi][safi][addpath_type] = NULL; | |
233 | } | |
234 | ||
235 | /* | |
236 | * Allocate an Addpath ID for the given type on a path, if necessary. | |
237 | */ | |
238 | static void bgp_addpath_populate_path(struct id_alloc *allocator, | |
239 | struct bgp_path_info *path, | |
240 | enum bgp_addpath_strat addpath_type) | |
241 | { | |
242 | if (bgp_addpath_tx_path(addpath_type, path)) { | |
243 | path->tx_addpath.addpath_tx_id[addpath_type] = | |
244 | idalloc_allocate(allocator); | |
245 | } | |
246 | } | |
247 | ||
248 | /* | |
249 | * Compute addpath ID's on a BGP instance associated with the addpath strategy, | |
250 | * and afi/safi combination. Since we won't waste the time computing addpath IDs | |
251 | * for unused strategies, the first time a peer is configured to use a strategy, | |
252 | * we have to backfill the data. | |
253 | */ | |
254 | static void bgp_addpath_populate_type(struct bgp *bgp, afi_t afi, safi_t safi, | |
255 | enum bgp_addpath_strat addpath_type) | |
256 | { | |
6ff96d00 | 257 | struct bgp_node *rn, *nrn; |
dcc68b5e MS |
258 | char buf[200]; |
259 | struct id_alloc *allocator; | |
260 | ||
261 | snprintf(buf, sizeof(buf), "Addpath ID Allocator %s:%d/%d", | |
262 | bgp_addpath_names(addpath_type)->config_name, (int)afi, | |
263 | (int)safi); | |
264 | buf[sizeof(buf) - 1] = '\0'; | |
265 | zlog_info("Computing addpath IDs for addpath type %s", | |
266 | bgp_addpath_names(addpath_type)->human_name); | |
267 | ||
268 | bgp->tx_addpath.id_allocators[afi][safi][addpath_type] = | |
269 | idalloc_new(buf); | |
270 | ||
271 | idalloc_reserve(bgp->tx_addpath.id_allocators[afi][safi][addpath_type], | |
272 | BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE); | |
273 | ||
274 | allocator = bgp->tx_addpath.id_allocators[afi][safi][addpath_type]; | |
275 | ||
276 | for (rn = bgp_table_top(bgp->rib[afi][safi]); rn; | |
6ff96d00 RW |
277 | rn = bgp_route_next(rn)) { |
278 | struct bgp_path_info *bi; | |
279 | ||
280 | if (safi == SAFI_MPLS_VPN) { | |
281 | struct bgp_table *table; | |
282 | ||
283 | table = bgp_node_get_bgp_table_info(rn); | |
284 | if (!table) | |
285 | continue; | |
286 | ||
287 | for (nrn = bgp_table_top(table); nrn; | |
288 | nrn = bgp_route_next(nrn)) | |
289 | for (bi = bgp_node_get_bgp_path_info(nrn); bi; | |
290 | bi = bi->next) | |
291 | bgp_addpath_populate_path(allocator, bi, | |
292 | addpath_type); | |
293 | } else { | |
294 | for (bi = bgp_node_get_bgp_path_info(rn); bi; | |
295 | bi = bi->next) | |
296 | bgp_addpath_populate_path(allocator, bi, | |
297 | addpath_type); | |
298 | } | |
299 | } | |
dcc68b5e MS |
300 | } |
301 | ||
302 | /* | |
303 | * Handle updates to a peer or group's addpath strategy. If after adjusting | |
304 | * counts a addpath strategy is in use for the first time, or no longer in use, | |
305 | * the IDs for that strategy will be populated or flushed. | |
306 | */ | |
307 | void bgp_addpath_type_changed(struct bgp *bgp) | |
308 | { | |
309 | afi_t afi; | |
310 | safi_t safi; | |
311 | struct listnode *node, *nnode; | |
312 | struct peer *peer; | |
313 | int peer_count[AFI_MAX][SAFI_MAX][BGP_ADDPATH_MAX]; | |
314 | enum bgp_addpath_strat type; | |
315 | ||
316 | FOREACH_AFI_SAFI(afi, safi) { | |
317 | for (type=0; type<BGP_ADDPATH_MAX; type++) { | |
318 | peer_count[afi][safi][type] = 0; | |
319 | } | |
320 | } | |
321 | ||
322 | for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { | |
323 | FOREACH_AFI_SAFI(afi, safi) { | |
324 | type = peer->addpath_type[afi][safi]; | |
325 | if (type != BGP_ADDPATH_NONE) { | |
326 | peer_count[afi][safi][type] += 1; | |
327 | } | |
328 | } | |
329 | } | |
330 | ||
331 | FOREACH_AFI_SAFI(afi, safi) { | |
332 | for (type=0; type<BGP_ADDPATH_MAX; type++) { | |
333 | int old = bgp->tx_addpath.peercount[afi][safi][type]; | |
334 | int new = peer_count[afi][safi][type]; | |
335 | ||
336 | bgp->tx_addpath.peercount[afi][safi][type] = new; | |
337 | ||
338 | if (old == 0 && new != 0) { | |
339 | bgp_addpath_populate_type(bgp, afi, safi, | |
340 | type); | |
341 | } else if (old != 0 && new == 0) { | |
342 | bgp_addpath_flush_type(bgp, afi, safi, type); | |
343 | } | |
344 | } | |
345 | } | |
346 | } | |
347 | ||
348 | /* | |
349 | * Change the addpath type assigned to a peer, or peer group. In addition to | |
350 | * adjusting the counts, peer sessions will be reset as needed to make the | |
351 | * change take effect. | |
352 | */ | |
353 | void bgp_addpath_set_peer_type(struct peer *peer, afi_t afi, safi_t safi, | |
354 | enum bgp_addpath_strat addpath_type) | |
355 | { | |
356 | struct bgp *bgp = peer->bgp; | |
357 | enum bgp_addpath_strat old_type = peer->addpath_type[afi][safi]; | |
358 | struct listnode *node, *nnode; | |
359 | struct peer *tmp_peer; | |
360 | struct peer_group *group; | |
361 | ||
362 | if (addpath_type == old_type) | |
363 | return; | |
364 | ||
365 | if (addpath_type == BGP_ADDPATH_NONE && peer->group && | |
366 | !CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { | |
367 | /* A "no" config on a group member inherits group */ | |
368 | addpath_type = peer->group->conf->addpath_type[afi][safi]; | |
369 | } | |
370 | ||
371 | peer->addpath_type[afi][safi] = addpath_type; | |
372 | ||
373 | bgp_addpath_type_changed(bgp); | |
374 | ||
375 | if (addpath_type != BGP_ADDPATH_NONE) { | |
376 | if (bgp_addpath_dmed_required(addpath_type)) { | |
377 | if (!bgp_flag_check(bgp, BGP_FLAG_DETERMINISTIC_MED)) { | |
378 | zlog_warn( | |
379 | "%s: enabling bgp deterministic-med, this is required for addpath-tx-bestpath-per-AS", | |
380 | peer->host); | |
381 | bgp_flag_set(bgp, BGP_FLAG_DETERMINISTIC_MED); | |
382 | bgp_recalculate_all_bestpaths(bgp); | |
383 | } | |
384 | } | |
385 | } | |
386 | ||
9165c5f5 | 387 | zlog_info("Resetting peer %s%s due to change in addpath config", |
dcc68b5e MS |
388 | CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP) ? "group " : "", |
389 | peer->host); | |
390 | ||
391 | if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { | |
392 | group = peer->group; | |
393 | ||
394 | /* group will be null as peer_group_delete calls peer_delete on | |
395 | * group->conf. That peer_delete will eventuallly end up here | |
396 | * if the group was configured to tx addpaths. | |
397 | */ | |
398 | if (group != NULL) { | |
399 | for (ALL_LIST_ELEMENTS(group->peer, node, nnode, | |
400 | tmp_peer)) { | |
401 | if (tmp_peer->addpath_type[afi][safi] == | |
402 | old_type) { | |
403 | bgp_addpath_set_peer_type(tmp_peer, | |
404 | afi, | |
405 | safi, | |
406 | addpath_type); | |
407 | } | |
408 | } | |
409 | } | |
410 | } else { | |
411 | peer_change_action(peer, afi, safi, peer_change_reset); | |
412 | } | |
413 | ||
414 | } | |
415 | ||
416 | /* | |
417 | * Intended to run after bestpath. This function will take TX IDs from paths | |
418 | * that no longer need them, and give them to paths that do. This prevents | |
419 | * best-per-as updates from needing to do a separate withdraw and update just to | |
420 | * swap out which path is sent. | |
421 | */ | |
422 | void bgp_addpath_update_ids(struct bgp *bgp, struct bgp_node *bn, afi_t afi, | |
423 | safi_t safi) | |
424 | { | |
425 | int i; | |
426 | struct bgp_path_info *pi; | |
427 | struct id_alloc_pool **pool_ptr; | |
428 | ||
429 | for (i = 0; i < BGP_ADDPATH_MAX; i++) { | |
430 | struct id_alloc *alloc = | |
431 | bgp->tx_addpath.id_allocators[afi][safi][i]; | |
432 | pool_ptr = &(bn->tx_addpath.free_ids[i]); | |
433 | ||
434 | if (bgp->tx_addpath.peercount[afi][safi][i] == 0) | |
435 | continue; | |
436 | ||
437 | /* Free Unused IDs back to the pool.*/ | |
6f94b685 | 438 | for (pi = bgp_node_get_bgp_path_info(bn); pi; pi = pi->next) { |
dcc68b5e MS |
439 | if (pi->tx_addpath.addpath_tx_id[i] != IDALLOC_INVALID |
440 | && !bgp_addpath_tx_path(i, pi)) { | |
441 | idalloc_free_to_pool(pool_ptr, | |
442 | pi->tx_addpath.addpath_tx_id[i]); | |
443 | pi->tx_addpath.addpath_tx_id[i] = | |
444 | IDALLOC_INVALID; | |
445 | } | |
446 | } | |
447 | ||
448 | /* Give IDs to paths that need them (pulling from the pool) */ | |
6f94b685 | 449 | for (pi = bgp_node_get_bgp_path_info(bn); pi; pi = pi->next) { |
dcc68b5e MS |
450 | if (pi->tx_addpath.addpath_tx_id[i] == IDALLOC_INVALID |
451 | && bgp_addpath_tx_path(i, pi)) { | |
452 | pi->tx_addpath.addpath_tx_id[i] = | |
453 | idalloc_allocate_prefer_pool( | |
454 | alloc, pool_ptr); | |
455 | } | |
456 | } | |
457 | ||
458 | /* Free any IDs left in the pool to the main allocator */ | |
459 | idalloc_drain_pool(alloc, pool_ptr); | |
460 | } | |
461 | } |