]>
Commit | Line | Data |
---|---|---|
dcc68b5e MS |
1 | /* |
2 | * Addpath TX ID selection, and related utilities | |
3 | * Copyright (C) 2018 Amazon.com, Inc. or its affiliates | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License as published by the Free | |
7 | * Software Foundation; either version 2 of the License, or (at your option) | |
8 | * any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | * more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along | |
16 | * with this program; see the file COPYING; if not, write to the Free Software | |
17 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
18 | */ | |
19 | ||
20 | #include "bgp_addpath.h" | |
21 | #include "bgp_route.h" | |
22 | ||
23 | static struct bgp_addpath_strategy_names strat_names[BGP_ADDPATH_MAX] = { | |
24 | { | |
25 | .config_name = "addpath-tx-all-paths", | |
26 | .human_name = "All", | |
27 | .human_description = "Advertise all paths via addpath", | |
28 | .type_json_name = "addpathTxAllPaths", | |
29 | .id_json_name = "addpathTxIdAll" | |
30 | }, | |
31 | { | |
32 | .config_name = "addpath-tx-bestpath-per-AS", | |
33 | .human_name = "Best-Per-AS", | |
34 | .human_description = "Advertise bestpath per AS via addpath", | |
35 | .type_json_name = "addpathTxBestpathPerAS", | |
36 | .id_json_name = "addpathTxIdBestPerAS" | |
37 | } | |
38 | }; | |
39 | ||
40 | static struct bgp_addpath_strategy_names unknown_names = { | |
41 | .config_name = "addpath-tx-unknown", | |
42 | .human_name = "Unknown-Addpath-Strategy", | |
43 | .human_description = "Unknown Addpath Strategy", | |
44 | .type_json_name = "addpathTxUnknown", | |
45 | .id_json_name = "addpathTxIdUnknown" | |
46 | }; | |
47 | ||
48 | /* | |
49 | * Returns a structure full of strings associated with an addpath type. Will | |
50 | * never return null. | |
51 | */ | |
52 | struct bgp_addpath_strategy_names * | |
53 | bgp_addpath_names(enum bgp_addpath_strat strat) | |
54 | { | |
55 | if (strat < BGP_ADDPATH_MAX) | |
56 | return &(strat_names[strat]); | |
57 | else | |
58 | return &unknown_names; | |
59 | }; | |
60 | ||
61 | /* | |
62 | * Returns if any peer is transmitting addpaths for a given afi/safi. | |
63 | */ | |
64 | int bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data *d, afi_t afi, | |
65 | safi_t safi) | |
66 | { | |
67 | return d->total_peercount[afi][safi] > 0; | |
68 | } | |
69 | ||
70 | /* | |
71 | * Initialize the BGP instance level data for addpath. | |
72 | */ | |
73 | void bgp_addpath_init_bgp_data(struct bgp_addpath_bgp_data *d) | |
74 | { | |
75 | safi_t safi; | |
76 | afi_t afi; | |
77 | int i; | |
78 | ||
79 | for (afi = AFI_IP; afi < AFI_MAX; afi++) { | |
80 | for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++) { | |
81 | for (i = 0; i < BGP_ADDPATH_MAX; i++) { | |
82 | d->id_allocators[afi][safi][i] = NULL; | |
83 | d->peercount[afi][safi][i] = 0; | |
84 | } | |
85 | d->total_peercount[afi][safi] = 0; | |
86 | } | |
87 | } | |
88 | } | |
89 | ||
90 | /* | |
91 | * Free up resources associated with BGP route info structures. | |
92 | */ | |
93 | void bgp_addpath_free_info_data(struct bgp_addpath_info_data *d, | |
94 | struct bgp_addpath_node_data *nd) | |
95 | { | |
96 | int i; | |
97 | ||
98 | for (i = 0; i < BGP_ADDPATH_MAX; i++) { | |
99 | if (d->addpath_tx_id[i] != IDALLOC_INVALID) | |
100 | idalloc_free_to_pool(&nd->free_ids[i], | |
101 | d->addpath_tx_id[i]); | |
102 | } | |
103 | } | |
104 | ||
105 | /* | |
106 | * Return the addpath ID used to send a particular route, to a particular peer, | |
107 | * in a particular AFI/SAFI. | |
108 | */ | |
109 | uint32_t bgp_addpath_id_for_peer(struct peer *peer, afi_t afi, safi_t safi, | |
110 | struct bgp_addpath_info_data *d) | |
111 | { | |
112 | if (peer->addpath_type[afi][safi] < BGP_ADDPATH_MAX) | |
113 | return d->addpath_tx_id[peer->addpath_type[afi][safi]]; | |
114 | else | |
115 | return IDALLOC_INVALID; | |
116 | } | |
117 | ||
118 | /* | |
119 | * Returns true if the path has an assigned addpath ID for any of the addpath | |
120 | * strategies. | |
121 | */ | |
122 | int bgp_addpath_info_has_ids(struct bgp_addpath_info_data *d) | |
123 | { | |
124 | int i; | |
125 | ||
126 | for (i = 0; i < BGP_ADDPATH_MAX; i++) | |
127 | if (d->addpath_tx_id[i] != 0) | |
128 | return 1; | |
129 | ||
130 | return 0; | |
131 | } | |
132 | ||
133 | /* | |
134 | * Releases any ID's associated with the BGP prefix. | |
135 | */ | |
136 | void bgp_addpath_free_node_data(struct bgp_addpath_bgp_data *bd, | |
137 | struct bgp_addpath_node_data *nd, afi_t afi, | |
138 | safi_t safi) | |
139 | { | |
140 | int i; | |
141 | ||
142 | for (i = 0; i < BGP_ADDPATH_MAX; i++) { | |
143 | idalloc_drain_pool(bd->id_allocators[afi][safi][i], | |
144 | &(nd->free_ids[i])); | |
145 | } | |
146 | } | |
147 | ||
148 | /* | |
149 | * Check to see if the addpath strategy requires DMED to be configured to work. | |
150 | */ | |
151 | int bgp_addpath_dmed_required(int strategy) | |
152 | { | |
153 | return strategy == BGP_ADDPATH_BEST_PER_AS; | |
154 | } | |
155 | ||
156 | /* | |
157 | * Return true if this is a path we should advertise due to a | |
158 | * configured addpath-tx knob | |
159 | */ | |
160 | int bgp_addpath_tx_path(enum bgp_addpath_strat strat, | |
161 | struct bgp_path_info *pi) | |
162 | { | |
163 | switch (strat) { | |
164 | case BGP_ADDPATH_NONE: | |
165 | return 0; | |
166 | case BGP_ADDPATH_ALL: | |
167 | return 1; | |
168 | case BGP_ADDPATH_BEST_PER_AS: | |
169 | if (CHECK_FLAG(pi->flags, BGP_PATH_DMED_SELECTED)) | |
170 | return 1; | |
171 | else | |
172 | return 0; | |
173 | default: | |
174 | return 0; | |
175 | } | |
176 | } | |
177 | ||
178 | /* | |
179 | * Purge all addpath ID's on a BGP instance associated with the addpath | |
180 | * strategy, and afi/safi combination. This lets us let go of all memory held to | |
181 | * track ID numbers associated with an addpath type not in use. Since | |
182 | * post-bestpath ID processing is skipped for types not used, this is the only | |
183 | * chance to free this data. | |
184 | */ | |
185 | static void bgp_addpath_flush_type(struct bgp *bgp, afi_t afi, safi_t safi, | |
186 | enum bgp_addpath_strat addpath_type) | |
187 | { | |
188 | struct bgp_node *rn; | |
189 | struct bgp_path_info *pi; | |
190 | ||
191 | for (rn = bgp_table_top(bgp->rib[afi][safi]); rn; | |
192 | rn = bgp_route_next(rn)) { | |
193 | idalloc_drain_pool( | |
194 | bgp->tx_addpath.id_allocators[afi][safi][addpath_type], | |
195 | &(rn->tx_addpath.free_ids[addpath_type])); | |
196 | for (pi = rn->info; pi; pi = pi->next) { | |
197 | if (pi->tx_addpath.addpath_tx_id[addpath_type] | |
198 | != IDALLOC_INVALID) { | |
199 | idalloc_free( | |
200 | bgp->tx_addpath | |
201 | .id_allocators[afi][safi] | |
202 | [addpath_type], | |
203 | pi->tx_addpath | |
204 | .addpath_tx_id[addpath_type]); | |
205 | pi->tx_addpath.addpath_tx_id[addpath_type] = | |
206 | IDALLOC_INVALID; | |
207 | } | |
208 | } | |
209 | } | |
210 | ||
211 | idalloc_destroy(bgp->tx_addpath.id_allocators[afi][safi][addpath_type]); | |
212 | bgp->tx_addpath.id_allocators[afi][safi][addpath_type] = NULL; | |
213 | } | |
214 | ||
215 | /* | |
216 | * Allocate an Addpath ID for the given type on a path, if necessary. | |
217 | */ | |
218 | static void bgp_addpath_populate_path(struct id_alloc *allocator, | |
219 | struct bgp_path_info *path, | |
220 | enum bgp_addpath_strat addpath_type) | |
221 | { | |
222 | if (bgp_addpath_tx_path(addpath_type, path)) { | |
223 | path->tx_addpath.addpath_tx_id[addpath_type] = | |
224 | idalloc_allocate(allocator); | |
225 | } | |
226 | } | |
227 | ||
228 | /* | |
229 | * Compute addpath ID's on a BGP instance associated with the addpath strategy, | |
230 | * and afi/safi combination. Since we won't waste the time computing addpath IDs | |
231 | * for unused strategies, the first time a peer is configured to use a strategy, | |
232 | * we have to backfill the data. | |
233 | */ | |
234 | static void bgp_addpath_populate_type(struct bgp *bgp, afi_t afi, safi_t safi, | |
235 | enum bgp_addpath_strat addpath_type) | |
236 | { | |
237 | struct bgp_node *rn; | |
238 | struct bgp_path_info *bi; | |
239 | char buf[200]; | |
240 | struct id_alloc *allocator; | |
241 | ||
242 | snprintf(buf, sizeof(buf), "Addpath ID Allocator %s:%d/%d", | |
243 | bgp_addpath_names(addpath_type)->config_name, (int)afi, | |
244 | (int)safi); | |
245 | buf[sizeof(buf) - 1] = '\0'; | |
246 | zlog_info("Computing addpath IDs for addpath type %s", | |
247 | bgp_addpath_names(addpath_type)->human_name); | |
248 | ||
249 | bgp->tx_addpath.id_allocators[afi][safi][addpath_type] = | |
250 | idalloc_new(buf); | |
251 | ||
252 | idalloc_reserve(bgp->tx_addpath.id_allocators[afi][safi][addpath_type], | |
253 | BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE); | |
254 | ||
255 | allocator = bgp->tx_addpath.id_allocators[afi][safi][addpath_type]; | |
256 | ||
257 | for (rn = bgp_table_top(bgp->rib[afi][safi]); rn; | |
258 | rn = bgp_route_next(rn)) | |
259 | for (bi = rn->info; bi; bi = bi->next) | |
260 | bgp_addpath_populate_path(allocator, bi, addpath_type); | |
261 | } | |
262 | ||
263 | /* | |
264 | * Handle updates to a peer or group's addpath strategy. If after adjusting | |
265 | * counts a addpath strategy is in use for the first time, or no longer in use, | |
266 | * the IDs for that strategy will be populated or flushed. | |
267 | */ | |
268 | void bgp_addpath_type_changed(struct bgp *bgp) | |
269 | { | |
270 | afi_t afi; | |
271 | safi_t safi; | |
272 | struct listnode *node, *nnode; | |
273 | struct peer *peer; | |
274 | int peer_count[AFI_MAX][SAFI_MAX][BGP_ADDPATH_MAX]; | |
275 | enum bgp_addpath_strat type; | |
276 | ||
277 | FOREACH_AFI_SAFI(afi, safi) { | |
278 | for (type=0; type<BGP_ADDPATH_MAX; type++) { | |
279 | peer_count[afi][safi][type] = 0; | |
280 | } | |
281 | } | |
282 | ||
283 | for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { | |
284 | FOREACH_AFI_SAFI(afi, safi) { | |
285 | type = peer->addpath_type[afi][safi]; | |
286 | if (type != BGP_ADDPATH_NONE) { | |
287 | peer_count[afi][safi][type] += 1; | |
288 | } | |
289 | } | |
290 | } | |
291 | ||
292 | FOREACH_AFI_SAFI(afi, safi) { | |
293 | for (type=0; type<BGP_ADDPATH_MAX; type++) { | |
294 | int old = bgp->tx_addpath.peercount[afi][safi][type]; | |
295 | int new = peer_count[afi][safi][type]; | |
296 | ||
297 | bgp->tx_addpath.peercount[afi][safi][type] = new; | |
298 | ||
299 | if (old == 0 && new != 0) { | |
300 | bgp_addpath_populate_type(bgp, afi, safi, | |
301 | type); | |
302 | } else if (old != 0 && new == 0) { | |
303 | bgp_addpath_flush_type(bgp, afi, safi, type); | |
304 | } | |
305 | } | |
306 | } | |
307 | } | |
308 | ||
309 | /* | |
310 | * Change the addpath type assigned to a peer, or peer group. In addition to | |
311 | * adjusting the counts, peer sessions will be reset as needed to make the | |
312 | * change take effect. | |
313 | */ | |
314 | void bgp_addpath_set_peer_type(struct peer *peer, afi_t afi, safi_t safi, | |
315 | enum bgp_addpath_strat addpath_type) | |
316 | { | |
317 | struct bgp *bgp = peer->bgp; | |
318 | enum bgp_addpath_strat old_type = peer->addpath_type[afi][safi]; | |
319 | struct listnode *node, *nnode; | |
320 | struct peer *tmp_peer; | |
321 | struct peer_group *group; | |
322 | ||
323 | if (addpath_type == old_type) | |
324 | return; | |
325 | ||
326 | if (addpath_type == BGP_ADDPATH_NONE && peer->group && | |
327 | !CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { | |
328 | /* A "no" config on a group member inherits group */ | |
329 | addpath_type = peer->group->conf->addpath_type[afi][safi]; | |
330 | } | |
331 | ||
332 | peer->addpath_type[afi][safi] = addpath_type; | |
333 | ||
334 | bgp_addpath_type_changed(bgp); | |
335 | ||
336 | if (addpath_type != BGP_ADDPATH_NONE) { | |
337 | if (bgp_addpath_dmed_required(addpath_type)) { | |
338 | if (!bgp_flag_check(bgp, BGP_FLAG_DETERMINISTIC_MED)) { | |
339 | zlog_warn( | |
340 | "%s: enabling bgp deterministic-med, this is required for addpath-tx-bestpath-per-AS", | |
341 | peer->host); | |
342 | bgp_flag_set(bgp, BGP_FLAG_DETERMINISTIC_MED); | |
343 | bgp_recalculate_all_bestpaths(bgp); | |
344 | } | |
345 | } | |
346 | } | |
347 | ||
348 | zlog_info("Resetting peer %s%s due to change in addpath config\n", | |
349 | CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP) ? "group " : "", | |
350 | peer->host); | |
351 | ||
352 | if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { | |
353 | group = peer->group; | |
354 | ||
355 | /* group will be null as peer_group_delete calls peer_delete on | |
356 | * group->conf. That peer_delete will eventuallly end up here | |
357 | * if the group was configured to tx addpaths. | |
358 | */ | |
359 | if (group != NULL) { | |
360 | for (ALL_LIST_ELEMENTS(group->peer, node, nnode, | |
361 | tmp_peer)) { | |
362 | if (tmp_peer->addpath_type[afi][safi] == | |
363 | old_type) { | |
364 | bgp_addpath_set_peer_type(tmp_peer, | |
365 | afi, | |
366 | safi, | |
367 | addpath_type); | |
368 | } | |
369 | } | |
370 | } | |
371 | } else { | |
372 | peer_change_action(peer, afi, safi, peer_change_reset); | |
373 | } | |
374 | ||
375 | } | |
376 | ||
377 | /* | |
378 | * Intended to run after bestpath. This function will take TX IDs from paths | |
379 | * that no longer need them, and give them to paths that do. This prevents | |
380 | * best-per-as updates from needing to do a separate withdraw and update just to | |
381 | * swap out which path is sent. | |
382 | */ | |
383 | void bgp_addpath_update_ids(struct bgp *bgp, struct bgp_node *bn, afi_t afi, | |
384 | safi_t safi) | |
385 | { | |
386 | int i; | |
387 | struct bgp_path_info *pi; | |
388 | struct id_alloc_pool **pool_ptr; | |
389 | ||
390 | for (i = 0; i < BGP_ADDPATH_MAX; i++) { | |
391 | struct id_alloc *alloc = | |
392 | bgp->tx_addpath.id_allocators[afi][safi][i]; | |
393 | pool_ptr = &(bn->tx_addpath.free_ids[i]); | |
394 | ||
395 | if (bgp->tx_addpath.peercount[afi][safi][i] == 0) | |
396 | continue; | |
397 | ||
398 | /* Free Unused IDs back to the pool.*/ | |
399 | for (pi = bn->info; pi; pi = pi->next) { | |
400 | if (pi->tx_addpath.addpath_tx_id[i] != IDALLOC_INVALID | |
401 | && !bgp_addpath_tx_path(i, pi)) { | |
402 | idalloc_free_to_pool(pool_ptr, | |
403 | pi->tx_addpath.addpath_tx_id[i]); | |
404 | pi->tx_addpath.addpath_tx_id[i] = | |
405 | IDALLOC_INVALID; | |
406 | } | |
407 | } | |
408 | ||
409 | /* Give IDs to paths that need them (pulling from the pool) */ | |
410 | for (pi = bn->info; pi; pi = pi->next) { | |
411 | if (pi->tx_addpath.addpath_tx_id[i] == IDALLOC_INVALID | |
412 | && bgp_addpath_tx_path(i, pi)) { | |
413 | pi->tx_addpath.addpath_tx_id[i] = | |
414 | idalloc_allocate_prefer_pool( | |
415 | alloc, pool_ptr); | |
416 | } | |
417 | } | |
418 | ||
419 | /* Free any IDs left in the pool to the main allocator */ | |
420 | idalloc_drain_pool(alloc, pool_ptr); | |
421 | } | |
422 | } |