]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_updgrp_adv.c
Merge pull request #13649 from donaldsharp/unlock_the_node_or_else
[mirror_frr.git] / bgpd / bgp_updgrp_adv.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**
3 * bgp_updgrp_adv.c: BGP update group advertisement and adjacency
4 * maintenance
5 *
6 *
7 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
8 *
9 * @author Avneesh Sachdev <avneesh@sproute.net>
10 * @author Rajesh Varadarajan <rajesh@sproute.net>
11 * @author Pradosh Mohapatra <pradosh@sproute.net>
12 */
13
14 #include <zebra.h>
15
16 #include "command.h"
17 #include "memory.h"
18 #include "prefix.h"
19 #include "hash.h"
20 #include "frrevent.h"
21 #include "queue.h"
22 #include "routemap.h"
23 #include "filter.h"
24
25 #include "bgpd/bgpd.h"
26 #include "bgpd/bgp_table.h"
27 #include "bgpd/bgp_debug.h"
28 #include "bgpd/bgp_route.h"
29 #include "bgpd/bgp_advertise.h"
30 #include "bgpd/bgp_attr.h"
31 #include "bgpd/bgp_aspath.h"
32 #include "bgpd/bgp_packet.h"
33 #include "bgpd/bgp_fsm.h"
34 #include "bgpd/bgp_mplsvpn.h"
35 #include "bgpd/bgp_updgrp.h"
36 #include "bgpd/bgp_advertise.h"
37 #include "bgpd/bgp_addpath.h"
38
39
40 /********************
41 * PRIVATE FUNCTIONS
42 ********************/
43 static int bgp_adj_out_compare(const struct bgp_adj_out *o1,
44 const struct bgp_adj_out *o2)
45 {
46 if (o1->subgroup < o2->subgroup)
47 return -1;
48
49 if (o1->subgroup > o2->subgroup)
50 return 1;
51
52 if (o1->addpath_tx_id < o2->addpath_tx_id)
53 return -1;
54
55 if (o1->addpath_tx_id > o2->addpath_tx_id)
56 return 1;
57
58 return 0;
59 }
60 RB_GENERATE(bgp_adj_out_rb, bgp_adj_out, adj_entry, bgp_adj_out_compare);
61
62 static inline struct bgp_adj_out *adj_lookup(struct bgp_dest *dest,
63 struct update_subgroup *subgrp,
64 uint32_t addpath_tx_id)
65 {
66 struct bgp_adj_out lookup;
67
68 if (!dest || !subgrp)
69 return NULL;
70
71 /* update-groups that do not support addpath will pass 0 for
72 * addpath_tx_id. */
73 lookup.subgroup = subgrp;
74 lookup.addpath_tx_id = addpath_tx_id;
75
76 return RB_FIND(bgp_adj_out_rb, &dest->adj_out, &lookup);
77 }
78
79 static void adj_free(struct bgp_adj_out *adj)
80 {
81 TAILQ_REMOVE(&(adj->subgroup->adjq), adj, subgrp_adj_train);
82 SUBGRP_DECR_STAT(adj->subgroup, adj_count);
83
84 RB_REMOVE(bgp_adj_out_rb, &adj->dest->adj_out, adj);
85 bgp_dest_unlock_node(adj->dest);
86
87 XFREE(MTYPE_BGP_ADJ_OUT, adj);
88 }
89
90 static void subgrp_withdraw_stale_addpath(struct updwalk_context *ctx,
91 struct update_subgroup *subgrp)
92 {
93 struct bgp_adj_out *adj, *adj_next;
94 uint32_t id;
95 struct bgp_path_info *pi;
96 afi_t afi = SUBGRP_AFI(subgrp);
97 safi_t safi = SUBGRP_SAFI(subgrp);
98 struct peer *peer = SUBGRP_PEER(subgrp);
99
100 /* Look through all of the paths we have advertised for this rn and send
101 * a withdraw for the ones that are no longer present */
102 RB_FOREACH_SAFE (adj, bgp_adj_out_rb, &ctx->dest->adj_out, adj_next) {
103 if (adj->subgroup != subgrp)
104 continue;
105
106 for (pi = bgp_dest_get_bgp_path_info(ctx->dest); pi;
107 pi = pi->next) {
108 id = bgp_addpath_id_for_peer(peer, afi, safi,
109 &pi->tx_addpath);
110
111 if (id == adj->addpath_tx_id) {
112 break;
113 }
114 }
115
116 if (!pi) {
117 subgroup_process_announce_selected(subgrp, NULL,
118 ctx->dest, afi, safi,
119 adj->addpath_tx_id);
120 }
121 }
122 }
123
124 static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
125 {
126 struct updwalk_context *ctx = arg;
127 struct update_subgroup *subgrp;
128 struct bgp_path_info *pi;
129 afi_t afi;
130 safi_t safi;
131 struct peer *peer;
132 struct bgp_adj_out *adj, *adj_next;
133 bool addpath_capable;
134
135 afi = UPDGRP_AFI(updgrp);
136 safi = UPDGRP_SAFI(updgrp);
137 peer = UPDGRP_PEER(updgrp);
138 addpath_capable = bgp_addpath_encode_tx(peer, afi, safi);
139
140 if (BGP_DEBUG(update, UPDATE_OUT))
141 zlog_debug("%s: afi=%s, safi=%s, p=%pRN", __func__,
142 afi2str(afi), safi2str(safi),
143 bgp_dest_to_rnode(ctx->dest));
144
145 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
146
147 /*
148 * Skip the subgroups that have coalesce timer running. We will
149 * walk the entire prefix table for those subgroups when the
150 * coalesce timer fires.
151 */
152 if (!subgrp->t_coalesce) {
153
154 /* An update-group that uses addpath */
155 if (addpath_capable) {
156 subgrp_withdraw_stale_addpath(ctx, subgrp);
157
158 for (pi = bgp_dest_get_bgp_path_info(ctx->dest);
159 pi; pi = pi->next) {
160 /* Skip the bestpath for now */
161 if (pi == ctx->pi)
162 continue;
163
164 subgroup_process_announce_selected(
165 subgrp, pi, ctx->dest, afi,
166 safi,
167 bgp_addpath_id_for_peer(
168 peer, afi, safi,
169 &pi->tx_addpath));
170 }
171
172 /* Process the bestpath last so the "show [ip]
173 * bgp neighbor x.x.x.x advertised"
174 * output shows the attributes from the bestpath
175 */
176 if (ctx->pi)
177 subgroup_process_announce_selected(
178 subgrp, ctx->pi, ctx->dest, afi,
179 safi,
180 bgp_addpath_id_for_peer(
181 peer, afi, safi,
182 &ctx->pi->tx_addpath));
183 }
184 /* An update-group that does not use addpath */
185 else {
186 if (ctx->pi) {
187 subgroup_process_announce_selected(
188 subgrp, ctx->pi, ctx->dest, afi,
189 safi,
190 bgp_addpath_id_for_peer(
191 peer, afi, safi,
192 &ctx->pi->tx_addpath));
193 } else {
194 /* Find the addpath_tx_id of the path we
195 * had advertised and
196 * send a withdraw */
197 RB_FOREACH_SAFE (adj, bgp_adj_out_rb,
198 &ctx->dest->adj_out,
199 adj_next) {
200 if (adj->subgroup == subgrp) {
201 subgroup_process_announce_selected(
202 subgrp, NULL,
203 ctx->dest, afi,
204 safi,
205 adj->addpath_tx_id);
206 }
207 }
208 }
209 }
210 }
211
212 /* Notify BGP Conditional advertisement */
213 bgp_notify_conditional_adv_scanner(subgrp);
214 }
215
216 return UPDWALK_CONTINUE;
217 }
218
219 static void subgrp_show_adjq_vty(struct update_subgroup *subgrp,
220 struct vty *vty, uint8_t flags)
221 {
222 struct bgp_table *table;
223 struct bgp_adj_out *adj;
224 unsigned long output_count;
225 struct bgp_dest *dest;
226 int header1 = 1;
227 struct bgp *bgp;
228 int header2 = 1;
229
230 bgp = SUBGRP_INST(subgrp);
231 if (!bgp)
232 return;
233
234 table = bgp->rib[SUBGRP_AFI(subgrp)][SUBGRP_SAFI(subgrp)];
235
236 output_count = 0;
237
238 for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest)) {
239 const struct prefix *dest_p = bgp_dest_get_prefix(dest);
240
241 RB_FOREACH (adj, bgp_adj_out_rb, &dest->adj_out) {
242 if (adj->subgroup != subgrp)
243 continue;
244
245 if (header1) {
246 vty_out(vty,
247 "BGP table version is %" PRIu64
248 ", local router ID is %pI4\n",
249 table->version, &bgp->router_id);
250 vty_out(vty, BGP_SHOW_SCODE_HEADER);
251 vty_out(vty, BGP_SHOW_OCODE_HEADER);
252 header1 = 0;
253 }
254 if (header2) {
255 vty_out(vty, BGP_SHOW_HEADER);
256 header2 = 0;
257 }
258 if ((flags & UPDWALK_FLAGS_ADVQUEUE) && adj->adv &&
259 adj->adv->baa) {
260 route_vty_out_tmp(
261 vty, dest, dest_p, adj->adv->baa->attr,
262 SUBGRP_SAFI(subgrp), 0, NULL, false);
263 output_count++;
264 }
265 if ((flags & UPDWALK_FLAGS_ADVERTISED) && adj->attr) {
266 route_vty_out_tmp(vty, dest, dest_p, adj->attr,
267 SUBGRP_SAFI(subgrp), 0, NULL,
268 false);
269 output_count++;
270 }
271 }
272 }
273 if (output_count != 0)
274 vty_out(vty, "\nTotal number of prefixes %ld\n", output_count);
275 }
276
277 static int updgrp_show_adj_walkcb(struct update_group *updgrp, void *arg)
278 {
279 struct updwalk_context *ctx = arg;
280 struct update_subgroup *subgrp;
281 struct vty *vty;
282
283 vty = ctx->vty;
284 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
285 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
286 continue;
287 vty_out(vty, "update group %" PRIu64 ", subgroup %" PRIu64 "\n",
288 updgrp->id, subgrp->id);
289 subgrp_show_adjq_vty(subgrp, vty, ctx->flags);
290 }
291 return UPDWALK_CONTINUE;
292 }
293
294 static void updgrp_show_adj(struct bgp *bgp, afi_t afi, safi_t safi,
295 struct vty *vty, uint64_t id, uint8_t flags)
296 {
297 struct updwalk_context ctx;
298 memset(&ctx, 0, sizeof(ctx));
299 ctx.vty = vty;
300 ctx.subgrp_id = id;
301 ctx.flags = flags;
302
303 update_group_af_walk(bgp, afi, safi, updgrp_show_adj_walkcb, &ctx);
304 }
305
306 static void subgroup_coalesce_timer(struct event *thread)
307 {
308 struct update_subgroup *subgrp;
309 struct bgp *bgp;
310
311 subgrp = EVENT_ARG(thread);
312 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
313 zlog_debug("u%" PRIu64 ":s%" PRIu64" announcing routes upon coalesce timer expiry(%u ms)",
314 (SUBGRP_UPDGRP(subgrp))->id, subgrp->id,
315 subgrp->v_coalesce);
316 subgrp->t_coalesce = NULL;
317 subgrp->v_coalesce = 0;
318 bgp = SUBGRP_INST(subgrp);
319 subgroup_announce_route(subgrp);
320
321
322 /* While the announce_route() may kick off the route advertisement timer
323 * for
324 * the members of the subgroup, we'd like to send the initial updates
325 * much
326 * faster (i.e., without enforcing MRAI). Also, if there were no routes
327 * to
328 * announce, this is the method currently employed to trigger the EOR.
329 */
330 if (!bgp_update_delay_active(SUBGRP_INST(subgrp)) &&
331 !(BGP_SUPPRESS_FIB_ENABLED(bgp))) {
332 struct peer_af *paf;
333 struct peer *peer;
334
335 SUBGRP_FOREACH_PEER (subgrp, paf) {
336 peer = PAF_PEER(paf);
337 EVENT_OFF(peer->t_routeadv);
338 BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
339 }
340 }
341 }
342
343 static int update_group_announce_walkcb(struct update_group *updgrp, void *arg)
344 {
345 struct update_subgroup *subgrp;
346
347 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
348 /* Avoid supressing duplicate routes later
349 * when processing in subgroup_announce_table().
350 */
351 SET_FLAG(subgrp->sflags, SUBGRP_STATUS_FORCE_UPDATES);
352
353 subgroup_announce_all(subgrp);
354 }
355
356 return UPDWALK_CONTINUE;
357 }
358
359 static int update_group_announce_rrc_walkcb(struct update_group *updgrp,
360 void *arg)
361 {
362 struct update_subgroup *subgrp;
363 afi_t afi;
364 safi_t safi;
365 struct peer *peer;
366
367 afi = UPDGRP_AFI(updgrp);
368 safi = UPDGRP_SAFI(updgrp);
369 peer = UPDGRP_PEER(updgrp);
370
371 /* Only announce if this is a group of route-reflector-clients */
372 if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_REFLECTOR_CLIENT)) {
373 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
374 subgroup_announce_all(subgrp);
375 }
376 }
377
378 return UPDWALK_CONTINUE;
379 }
380
381 /********************
382 * PUBLIC FUNCTIONS
383 ********************/
384
385 /**
386 * Allocate an adj-out object. Do proper initialization of its fields,
387 * primarily its association with the subgroup and the prefix.
388 */
389 struct bgp_adj_out *bgp_adj_out_alloc(struct update_subgroup *subgrp,
390 struct bgp_dest *dest,
391 uint32_t addpath_tx_id)
392 {
393 struct bgp_adj_out *adj;
394
395 adj = XCALLOC(MTYPE_BGP_ADJ_OUT, sizeof(struct bgp_adj_out));
396 adj->subgroup = subgrp;
397 adj->addpath_tx_id = addpath_tx_id;
398
399 RB_INSERT(bgp_adj_out_rb, &dest->adj_out, adj);
400 bgp_dest_lock_node(dest);
401 adj->dest = dest;
402
403 TAILQ_INSERT_TAIL(&(subgrp->adjq), adj, subgrp_adj_train);
404 SUBGRP_INCR_STAT(subgrp, adj_count);
405 return adj;
406 }
407
408
409 struct bgp_advertise *
410 bgp_advertise_clean_subgroup(struct update_subgroup *subgrp,
411 struct bgp_adj_out *adj)
412 {
413 struct bgp_advertise *adv;
414 struct bgp_advertise_attr *baa;
415 struct bgp_advertise *next;
416 struct bgp_adv_fifo_head *fhead;
417
418 adv = adj->adv;
419 baa = adv->baa;
420 next = NULL;
421
422 if (baa) {
423 fhead = &subgrp->sync->update;
424
425 /* Unlink myself from advertise attribute FIFO. */
426 bgp_advertise_delete(baa, adv);
427
428 /* Fetch next advertise candidate. */
429 next = baa->adv;
430
431 /* Unintern BGP advertise attribute. */
432 bgp_advertise_attr_unintern(subgrp->hash, baa);
433 } else
434 fhead = &subgrp->sync->withdraw;
435
436
437 /* Unlink myself from advertisement FIFO. */
438 bgp_adv_fifo_del(fhead, adv);
439
440 /* Free memory. */
441 bgp_advertise_free(adj->adv);
442 adj->adv = NULL;
443
444 return next;
445 }
446
447 void bgp_adj_out_set_subgroup(struct bgp_dest *dest,
448 struct update_subgroup *subgrp, struct attr *attr,
449 struct bgp_path_info *path)
450 {
451 struct bgp_adj_out *adj = NULL;
452 struct bgp_advertise *adv;
453 struct peer *peer;
454 afi_t afi;
455 safi_t safi;
456 struct peer *adv_peer;
457 struct peer_af *paf;
458 struct bgp *bgp;
459 uint32_t attr_hash = attrhash_key_make(attr);
460
461 peer = SUBGRP_PEER(subgrp);
462 afi = SUBGRP_AFI(subgrp);
463 safi = SUBGRP_SAFI(subgrp);
464 bgp = SUBGRP_INST(subgrp);
465
466 if (DISABLE_BGP_ANNOUNCE)
467 return;
468
469 /* Look for adjacency information. */
470 adj = adj_lookup(
471 dest, subgrp,
472 bgp_addpath_id_for_peer(peer, afi, safi, &path->tx_addpath));
473
474 if (adj) {
475 if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_TABLE_REPARSING))
476 subgrp->pscount++;
477 } else {
478 adj = bgp_adj_out_alloc(
479 subgrp, dest,
480 bgp_addpath_id_for_peer(peer, afi, safi,
481 &path->tx_addpath));
482 if (!adj)
483 return;
484
485 subgrp->pscount++;
486 }
487
488 /* Check if we are sending the same route. This is needed to
489 * avoid duplicate UPDATES. For instance, filtering communities
490 * at egress, neighbors will see duplicate UPDATES despite
491 * the route wasn't changed actually.
492 * Do not suppress BGP UPDATES for route-refresh.
493 */
494 if (CHECK_FLAG(bgp->flags, BGP_FLAG_SUPPRESS_DUPLICATES)
495 && !CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_FORCE_UPDATES)
496 && adj->attr_hash == attr_hash) {
497 if (BGP_DEBUG(update, UPDATE_OUT)) {
498 char attr_str[BUFSIZ] = {0};
499
500 bgp_dump_attr(attr, attr_str, sizeof(attr_str));
501
502 zlog_debug("%s suppress UPDATE w/ attr: %s", peer->host,
503 attr_str);
504 }
505
506 /*
507 * If BGP is skipping sending this value to it's peers
508 * the version number should be updated just like it
509 * would if it sent the data. Why? Because update
510 * groups will not be coalesced until such time that
511 * the version numbers are the same.
512 *
513 * Imagine a scenario with say 2 peers and they come
514 * up and are placed in the same update group. Then
515 * a new peer comes up a bit later. Then a prefix is
516 * flapped that we decide for the first 2 peers are
517 * mapped to and we decide not to send the data to
518 * it. Then unless more network changes happen we
519 * will never be able to coalesce the 3rd peer down
520 */
521 subgrp->version = MAX(subgrp->version, dest->version);
522 return;
523 }
524
525 if (adj->adv)
526 bgp_advertise_clean_subgroup(subgrp, adj);
527 adj->adv = bgp_advertise_new();
528
529 adv = adj->adv;
530 adv->dest = dest;
531 assert(adv->pathi == NULL);
532 /* bgp_path_info adj_out reference */
533 adv->pathi = bgp_path_info_lock(path);
534
535 adv->baa = bgp_advertise_attr_intern(subgrp->hash, attr);
536 adv->adj = adj;
537 adj->attr_hash = attr_hash;
538
539 /* Add new advertisement to advertisement attribute list. */
540 bgp_advertise_add(adv->baa, adv);
541
542 /*
543 * If the update adv list is empty, trigger the member peers'
544 * mrai timers so the socket writes can happen.
545 */
546 if (!bgp_adv_fifo_count(&subgrp->sync->update)) {
547 SUBGRP_FOREACH_PEER (subgrp, paf) {
548 /* If there are no routes in the withdraw list, set
549 * the flag PEER_STATUS_ADV_DELAY which will allow
550 * more routes to be sent in the update message
551 */
552 if (BGP_SUPPRESS_FIB_ENABLED(bgp)) {
553 adv_peer = PAF_PEER(paf);
554 if (!bgp_adv_fifo_count(
555 &subgrp->sync->withdraw))
556 SET_FLAG(adv_peer->thread_flags,
557 PEER_THREAD_SUBGRP_ADV_DELAY);
558 else
559 UNSET_FLAG(adv_peer->thread_flags,
560 PEER_THREAD_SUBGRP_ADV_DELAY);
561 }
562 bgp_adjust_routeadv(PAF_PEER(paf));
563 }
564 }
565
566 bgp_adv_fifo_add_tail(&subgrp->sync->update, adv);
567
568 subgrp->version = MAX(subgrp->version, dest->version);
569 }
570
571 /* The only time 'withdraw' will be false is if we are sending
572 * the "neighbor x.x.x.x default-originate" default and need to clear
573 * bgp_adj_out for the 0.0.0.0/0 route in the BGP table.
574 */
575 void bgp_adj_out_unset_subgroup(struct bgp_dest *dest,
576 struct update_subgroup *subgrp, char withdraw,
577 uint32_t addpath_tx_id)
578 {
579 struct bgp_adj_out *adj;
580 struct bgp_advertise *adv;
581 bool trigger_write;
582
583 if (DISABLE_BGP_ANNOUNCE)
584 return;
585
586 /* Lookup existing adjacency */
587 adj = adj_lookup(dest, subgrp, addpath_tx_id);
588 if (adj != NULL) {
589 /* Clean up previous advertisement. */
590 if (adj->adv)
591 bgp_advertise_clean_subgroup(subgrp, adj);
592
593 /* If default originate is enabled and the route is default
594 * route, do not send withdraw. This will prevent deletion of
595 * the default route at the peer.
596 */
597 if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE)
598 && is_default_prefix(bgp_dest_get_prefix(dest)))
599 return;
600
601 if (adj->attr && withdraw) {
602 /* We need advertisement structure. */
603 adj->adv = bgp_advertise_new();
604 adv = adj->adv;
605 adv->dest = dest;
606 adv->adj = adj;
607
608 /* Note if we need to trigger a packet write */
609 trigger_write =
610 !bgp_adv_fifo_count(&subgrp->sync->withdraw);
611
612 /* Add to synchronization entry for withdraw
613 * announcement. */
614 bgp_adv_fifo_add_tail(&subgrp->sync->withdraw, adv);
615
616 if (trigger_write)
617 subgroup_trigger_write(subgrp);
618 } else {
619 /* Free allocated information. */
620 adj_free(adj);
621 }
622 if (!CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_TABLE_REPARSING))
623 subgrp->pscount--;
624 }
625
626 subgrp->version = MAX(subgrp->version, dest->version);
627 }
628
629 void bgp_adj_out_remove_subgroup(struct bgp_dest *dest, struct bgp_adj_out *adj,
630 struct update_subgroup *subgrp)
631 {
632 if (adj->attr)
633 bgp_attr_unintern(&adj->attr);
634
635 if (adj->adv)
636 bgp_advertise_clean_subgroup(subgrp, adj);
637
638 adj_free(adj);
639 }
640
641 /*
642 * Go through all the routes and clean up the adj/adv structures corresponding
643 * to the subgroup.
644 */
645 void subgroup_clear_table(struct update_subgroup *subgrp)
646 {
647 struct bgp_adj_out *aout, *taout;
648
649 SUBGRP_FOREACH_ADJ_SAFE (subgrp, aout, taout)
650 bgp_adj_out_remove_subgroup(aout->dest, aout, subgrp);
651 }
652
653 /*
654 * subgroup_announce_table
655 */
656 void subgroup_announce_table(struct update_subgroup *subgrp,
657 struct bgp_table *table)
658 {
659 struct bgp_dest *dest;
660 struct bgp_path_info *ri;
661 struct peer *peer;
662 afi_t afi;
663 safi_t safi;
664 safi_t safi_rib;
665 bool addpath_capable;
666
667 peer = SUBGRP_PEER(subgrp);
668 afi = SUBGRP_AFI(subgrp);
669 safi = SUBGRP_SAFI(subgrp);
670 addpath_capable = bgp_addpath_encode_tx(peer, afi, safi);
671
672 if (safi == SAFI_LABELED_UNICAST)
673 safi_rib = SAFI_UNICAST;
674 else
675 safi_rib = safi;
676
677 if (!table)
678 table = peer->bgp->rib[afi][safi_rib];
679
680 if (safi != SAFI_MPLS_VPN && safi != SAFI_ENCAP && safi != SAFI_EVPN
681 && CHECK_FLAG(peer->af_flags[afi][safi],
682 PEER_FLAG_DEFAULT_ORIGINATE))
683 subgroup_default_originate(subgrp, 0);
684
685 subgrp->pscount = 0;
686 SET_FLAG(subgrp->sflags, SUBGRP_STATUS_TABLE_REPARSING);
687
688 for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest)) {
689 for (ri = bgp_dest_get_bgp_path_info(dest); ri; ri = ri->next) {
690
691 if (!bgp_check_selected(ri, peer, addpath_capable, afi,
692 safi_rib))
693 continue;
694
695 /* If default originate is enabled for
696 * the peer, do not send explicit
697 * withdraw. This will prevent deletion
698 * of default route advertised through
699 * default originate
700 */
701 if (CHECK_FLAG(peer->af_flags[afi][safi],
702 PEER_FLAG_DEFAULT_ORIGINATE) &&
703 is_default_prefix(bgp_dest_get_prefix(dest)))
704 break;
705
706 subgroup_process_announce_selected(
707 subgrp, ri, dest, afi, safi_rib,
708 bgp_addpath_id_for_peer(peer, afi, safi_rib,
709 &ri->tx_addpath));
710 }
711 }
712 UNSET_FLAG(subgrp->sflags, SUBGRP_STATUS_TABLE_REPARSING);
713
714 /*
715 * We walked through the whole table -- make sure our version number
716 * is consistent with the one on the table. This should allow
717 * subgroups to merge sooner if a peer comes up when the route node
718 * with the largest version is no longer in the table. This also
719 * covers the pathological case where all routes in the table have
720 * now been deleted.
721 */
722 subgrp->version = MAX(subgrp->version, table->version);
723
724 /*
725 * Start a task to merge the subgroup if necessary.
726 */
727 update_subgroup_trigger_merge_check(subgrp, 0);
728 }
729
730 /*
731 * subgroup_announce_route
732 *
733 * Refresh all routes out to a subgroup.
734 */
735 void subgroup_announce_route(struct update_subgroup *subgrp)
736 {
737 struct bgp_dest *dest;
738 struct bgp_table *table;
739 struct peer *onlypeer;
740
741 if (update_subgroup_needs_refresh(subgrp)) {
742 update_subgroup_set_needs_refresh(subgrp, 0);
743 }
744
745 /*
746 * First update is deferred until ORF or ROUTE-REFRESH is received
747 */
748 onlypeer = ((SUBGRP_PCOUNT(subgrp) == 1) ? (SUBGRP_PFIRST(subgrp))->peer
749 : NULL);
750 if (onlypeer && CHECK_FLAG(onlypeer->af_sflags[SUBGRP_AFI(subgrp)]
751 [SUBGRP_SAFI(subgrp)],
752 PEER_STATUS_ORF_WAIT_REFRESH))
753 return;
754
755 if (SUBGRP_SAFI(subgrp) != SAFI_MPLS_VPN
756 && SUBGRP_SAFI(subgrp) != SAFI_ENCAP
757 && SUBGRP_SAFI(subgrp) != SAFI_EVPN)
758 subgroup_announce_table(subgrp, NULL);
759 else
760 for (dest = bgp_table_top(update_subgroup_rib(subgrp)); dest;
761 dest = bgp_route_next(dest)) {
762 table = bgp_dest_get_bgp_table_info(dest);
763 if (!table)
764 continue;
765 subgroup_announce_table(subgrp, table);
766 }
767 }
768
769 void subgroup_default_originate(struct update_subgroup *subgrp, int withdraw)
770 {
771 struct bgp *bgp;
772 struct attr attr;
773 struct attr *new_attr = &attr;
774 struct prefix p;
775 struct peer *from;
776 struct bgp_dest *dest;
777 struct bgp_path_info *pi;
778 struct peer *peer;
779 struct bgp_adj_out *adj;
780 route_map_result_t ret = RMAP_DENYMATCH;
781 route_map_result_t new_ret = RMAP_DENYMATCH;
782 afi_t afi;
783 safi_t safi;
784 safi_t safi_rib;
785 int pref = 65536;
786 int new_pref = 0;
787
788 if (!subgrp)
789 return;
790
791 peer = SUBGRP_PEER(subgrp);
792 afi = SUBGRP_AFI(subgrp);
793 safi = SUBGRP_SAFI(subgrp);
794
795 if (!(afi == AFI_IP || afi == AFI_IP6))
796 return;
797
798 if (safi == SAFI_LABELED_UNICAST)
799 safi_rib = SAFI_UNICAST;
800 else
801 safi_rib = safi;
802
803 bgp = peer->bgp;
804 from = bgp->peer_self;
805
806 bgp_attr_default_set(&attr, bgp, BGP_ORIGIN_IGP);
807
808 /* make coverity happy */
809 assert(attr.aspath);
810
811 attr.med = 0;
812 attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC);
813
814 if ((afi == AFI_IP6) || peer_cap_enhe(peer, afi, safi)) {
815 /* IPv6 global nexthop must be included. */
816 attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL;
817
818 /* If the peer is on shared nextwork and we have link-local
819 nexthop set it. */
820 if (peer->shared_network
821 && !IN6_IS_ADDR_UNSPECIFIED(&peer->nexthop.v6_local))
822 attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL;
823 }
824
825 if (peer->default_rmap[afi][safi].name) {
826 struct bgp_path_info tmp_pi = {0};
827
828 tmp_pi.peer = bgp->peer_self;
829
830 SET_FLAG(bgp->peer_self->rmap_type, PEER_RMAP_TYPE_DEFAULT);
831
832 /* Iterate over the RIB to see if we can announce
833 * the default route. We announce the default
834 * route only if route-map has a match.
835 */
836 for (dest = bgp_table_top(bgp->rib[afi][safi_rib]); dest;
837 dest = bgp_route_next(dest)) {
838 if (!bgp_dest_has_bgp_path_info_data(dest))
839 continue;
840
841 for (pi = bgp_dest_get_bgp_path_info(dest); pi;
842 pi = pi->next) {
843 struct attr tmp_attr = attr;
844
845 tmp_pi.attr = &tmp_attr;
846
847 new_ret = route_map_apply_ext(
848 peer->default_rmap[afi][safi].map,
849 bgp_dest_get_prefix(dest), pi, &tmp_pi,
850 &new_pref);
851
852 if (new_ret == RMAP_PERMITMATCH) {
853 if (new_pref < pref) {
854 pref = new_pref;
855 bgp_attr_flush(new_attr);
856 new_attr = bgp_attr_intern(
857 tmp_pi.attr);
858 bgp_attr_flush(tmp_pi.attr);
859 }
860 subgroup_announce_reset_nhop(
861 (peer_cap_enhe(peer, afi, safi)
862 ? AF_INET6
863 : AF_INET),
864 new_attr);
865 ret = new_ret;
866 } else
867 bgp_attr_flush(&tmp_attr);
868 }
869 }
870 bgp->peer_self->rmap_type = 0;
871
872 if (ret == RMAP_DENYMATCH) {
873 /*
874 * If its a implicit withdraw due to routemap
875 * deny operation need to set the flag back.
876 * This is a convertion of update flow to
877 * withdraw flow.
878 */
879 if (!withdraw &&
880 (!CHECK_FLAG(subgrp->sflags,
881 SUBGRP_STATUS_DEFAULT_ORIGINATE)))
882 SET_FLAG(subgrp->sflags,
883 SUBGRP_STATUS_DEFAULT_ORIGINATE);
884 withdraw = 1;
885 }
886 }
887
888 /* Check if the default route is in local BGP RIB which is
889 * installed through redistribute or network command
890 */
891 memset(&p, 0, sizeof(p));
892 p.family = afi2family(afi);
893 p.prefixlen = 0;
894 dest = bgp_safi_node_lookup(bgp->rib[afi][safi_rib], safi_rib, &p,
895 NULL);
896
897 if (withdraw) {
898 /* Withdraw the default route advertised using default
899 * originate
900 */
901 if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
902 subgroup_default_withdraw_packet(subgrp);
903 UNSET_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE);
904
905 /* If default route is present in the local RIB, advertise the
906 * route
907 */
908 if (dest) {
909 for (pi = bgp_dest_get_bgp_path_info(dest); pi;
910 pi = pi->next) {
911 if (CHECK_FLAG(pi->flags, BGP_PATH_SELECTED))
912 if (subgroup_announce_check(
913 dest, pi, subgrp,
914 bgp_dest_get_prefix(dest),
915 &attr, NULL)) {
916 struct attr *default_attr =
917 bgp_attr_intern(&attr);
918
919 bgp_adj_out_set_subgroup(
920 dest, subgrp,
921 default_attr, pi);
922 }
923 }
924 bgp_dest_unlock_node(dest);
925 }
926 } else {
927 if (!CHECK_FLAG(subgrp->sflags,
928 SUBGRP_STATUS_DEFAULT_ORIGINATE)) {
929
930 /* The 'neighbor x.x.x.x default-originate' default will
931 * act as an
932 * implicit withdraw for any previous UPDATEs sent for
933 * 0.0.0.0/0 so
934 * clear adj_out for the 0.0.0.0/0 prefix in the BGP
935 * table.
936 */
937 if (dest) {
938 /* Remove the adjacency for the previously
939 * advertised default route
940 */
941 adj = adj_lookup(
942 dest, subgrp,
943 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
944 if (adj != NULL) {
945 /* Clean up previous advertisement. */
946 if (adj->adv)
947 bgp_advertise_clean_subgroup(
948 subgrp, adj);
949
950 /* Free allocated information. */
951 adj_free(adj);
952 }
953 bgp_dest_unlock_node(dest);
954 }
955
956 /* Advertise the default route */
957 if (bgp_in_graceful_shutdown(bgp))
958 bgp_attr_add_gshut_community(new_attr);
959
960 SET_FLAG(subgrp->sflags,
961 SUBGRP_STATUS_DEFAULT_ORIGINATE);
962 subgroup_default_update_packet(subgrp, new_attr, from);
963 }
964 }
965
966 aspath_unintern(&attr.aspath);
967 }
968
969 /*
970 * Announce the BGP table to a subgroup.
971 *
972 * At startup, we try to optimize route announcement by coalescing the
973 * peer-up events. This is done only the first time - from then on,
974 * subgrp->v_coalesce will be set to zero and the normal logic
975 * prevails.
976 */
977 void subgroup_announce_all(struct update_subgroup *subgrp)
978 {
979 if (!subgrp)
980 return;
981
982 /*
983 * If coalesce timer value is not set, announce routes immediately.
984 */
985 if (!subgrp->v_coalesce) {
986 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
987 zlog_debug("u%" PRIu64 ":s%" PRIu64" announcing all routes",
988 subgrp->update_group->id, subgrp->id);
989 subgroup_announce_route(subgrp);
990 return;
991 }
992
993 /*
994 * We should wait for the coalesce timer. Arm the timer if not done.
995 */
996 if (!subgrp->t_coalesce) {
997 event_add_timer_msec(bm->master, subgroup_coalesce_timer,
998 subgrp, subgrp->v_coalesce,
999 &subgrp->t_coalesce);
1000 }
1001 }
1002
1003 /*
1004 * Go through all update subgroups and set up the adv queue for the
1005 * input route.
1006 */
1007 void group_announce_route(struct bgp *bgp, afi_t afi, safi_t safi,
1008 struct bgp_dest *dest, struct bgp_path_info *pi)
1009 {
1010 struct updwalk_context ctx;
1011 ctx.pi = pi;
1012 ctx.dest = dest;
1013
1014 /* If suppress fib is enabled, the route will be advertised when
1015 * FIB status is received
1016 */
1017 if (!bgp_check_advertise(bgp, dest))
1018 return;
1019
1020 update_group_af_walk(bgp, afi, safi, group_announce_route_walkcb, &ctx);
1021 }
1022
1023 void update_group_show_adj_queue(struct bgp *bgp, afi_t afi, safi_t safi,
1024 struct vty *vty, uint64_t id)
1025 {
1026 updgrp_show_adj(bgp, afi, safi, vty, id, UPDWALK_FLAGS_ADVQUEUE);
1027 }
1028
1029 void update_group_show_advertised(struct bgp *bgp, afi_t afi, safi_t safi,
1030 struct vty *vty, uint64_t id)
1031 {
1032 updgrp_show_adj(bgp, afi, safi, vty, id, UPDWALK_FLAGS_ADVERTISED);
1033 }
1034
1035 void update_group_announce(struct bgp *bgp)
1036 {
1037 update_group_walk(bgp, update_group_announce_walkcb, NULL);
1038 }
1039
1040 void update_group_announce_rrclients(struct bgp *bgp)
1041 {
1042 update_group_walk(bgp, update_group_announce_rrc_walkcb, NULL);
1043 }