]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_updgrp_adv.c
Merge pull request #3449 from opensourcerouting/network-wide-transactions
[mirror_frr.git] / bgpd / bgp_updgrp_adv.c
1 /**
2 * bgp_updgrp_adv.c: BGP update group advertisement and adjacency
3 * maintenance
4 *
5 *
6 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
7 *
8 * @author Avneesh Sachdev <avneesh@sproute.net>
9 * @author Rajesh Varadarajan <rajesh@sproute.net>
10 * @author Pradosh Mohapatra <pradosh@sproute.net>
11 *
12 * This file is part of GNU Zebra.
13 *
14 * GNU Zebra is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
17 * later version.
18 *
19 * GNU Zebra is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; see the file COPYING; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29 #include <zebra.h>
30
31 #include "command.h"
32 #include "memory.h"
33 #include "prefix.h"
34 #include "hash.h"
35 #include "thread.h"
36 #include "queue.h"
37 #include "routemap.h"
38 #include "filter.h"
39
40 #include "bgpd/bgpd.h"
41 #include "bgpd/bgp_table.h"
42 #include "bgpd/bgp_debug.h"
43 #include "bgpd/bgp_route.h"
44 #include "bgpd/bgp_advertise.h"
45 #include "bgpd/bgp_attr.h"
46 #include "bgpd/bgp_aspath.h"
47 #include "bgpd/bgp_packet.h"
48 #include "bgpd/bgp_fsm.h"
49 #include "bgpd/bgp_mplsvpn.h"
50 #include "bgpd/bgp_updgrp.h"
51 #include "bgpd/bgp_advertise.h"
52 #include "bgpd/bgp_addpath.h"
53
54
55 /********************
56 * PRIVATE FUNCTIONS
57 ********************/
58 static int bgp_adj_out_compare(const struct bgp_adj_out *o1,
59 const struct bgp_adj_out *o2)
60 {
61 if (o1->subgroup < o2->subgroup)
62 return -1;
63
64 if (o1->subgroup > o2->subgroup)
65 return 1;
66
67 return 0;
68 }
69 RB_GENERATE(bgp_adj_out_rb, bgp_adj_out, adj_entry, bgp_adj_out_compare);
70
71 static inline struct bgp_adj_out *adj_lookup(struct bgp_node *rn,
72 struct update_subgroup *subgrp,
73 uint32_t addpath_tx_id)
74 {
75 struct bgp_adj_out *adj, lookup;
76 struct peer *peer;
77 afi_t afi;
78 safi_t safi;
79 int addpath_capable;
80
81 if (!rn || !subgrp)
82 return NULL;
83
84 peer = SUBGRP_PEER(subgrp);
85 afi = SUBGRP_AFI(subgrp);
86 safi = SUBGRP_SAFI(subgrp);
87 addpath_capable = bgp_addpath_encode_tx(peer, afi, safi);
88
89 /* update-groups that do not support addpath will pass 0 for
90 * addpath_tx_id so do not both matching against it */
91 lookup.subgroup = subgrp;
92 adj = RB_FIND(bgp_adj_out_rb, &rn->adj_out, &lookup);
93 if (adj) {
94 if (addpath_capable) {
95 if (adj->addpath_tx_id == addpath_tx_id)
96 return adj;
97 } else
98 return adj;
99 }
100 return NULL;
101 }
102
103 static void adj_free(struct bgp_adj_out *adj)
104 {
105 TAILQ_REMOVE(&(adj->subgroup->adjq), adj, subgrp_adj_train);
106 SUBGRP_DECR_STAT(adj->subgroup, adj_count);
107 XFREE(MTYPE_BGP_ADJ_OUT, adj);
108 }
109
110 static void subgrp_withdraw_stale_addpath(struct updwalk_context *ctx,
111 struct update_subgroup *subgrp)
112 {
113 struct bgp_adj_out *adj, *adj_next;
114 uint32_t id;
115 struct bgp_path_info *pi;
116 afi_t afi = SUBGRP_AFI(subgrp);
117 safi_t safi = SUBGRP_SAFI(subgrp);
118 struct peer *peer = SUBGRP_PEER(subgrp);
119
120 /* Look through all of the paths we have advertised for this rn and send
121 * a withdraw for the ones that are no longer present */
122 RB_FOREACH_SAFE (adj, bgp_adj_out_rb, &ctx->rn->adj_out, adj_next) {
123
124 if (adj->subgroup == subgrp) {
125 for (pi = ctx->rn->info; pi; pi = pi->next) {
126 id = bgp_addpath_id_for_peer(peer, afi, safi,
127 &pi->tx_addpath);
128
129 if (id == adj->addpath_tx_id) {
130 break;
131 }
132 }
133
134 if (!pi) {
135 subgroup_process_announce_selected(
136 subgrp, NULL, ctx->rn,
137 adj->addpath_tx_id);
138 }
139 }
140 }
141 }
142
143 static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
144 {
145 struct updwalk_context *ctx = arg;
146 struct update_subgroup *subgrp;
147 struct bgp_path_info *pi;
148 afi_t afi;
149 safi_t safi;
150 struct peer *peer;
151 struct bgp_adj_out *adj, *adj_next;
152 int addpath_capable;
153
154 afi = UPDGRP_AFI(updgrp);
155 safi = UPDGRP_SAFI(updgrp);
156 peer = UPDGRP_PEER(updgrp);
157 addpath_capable = bgp_addpath_encode_tx(peer, afi, safi);
158
159 if (BGP_DEBUG(update, UPDATE_OUT)) {
160 char buf_prefix[PREFIX_STRLEN];
161 prefix2str(&ctx->rn->p, buf_prefix, sizeof(buf_prefix));
162 zlog_debug("%s: afi=%s, safi=%s, p=%s", __func__, afi2str(afi),
163 safi2str(safi), buf_prefix);
164 }
165
166
167 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
168
169 /*
170 * Skip the subgroups that have coalesce timer running. We will
171 * walk the entire prefix table for those subgroups when the
172 * coalesce timer fires.
173 */
174 if (!subgrp->t_coalesce) {
175 /* An update-group that uses addpath */
176 if (addpath_capable) {
177 subgrp_withdraw_stale_addpath(ctx, subgrp);
178
179 for (pi = ctx->rn->info; pi; pi = pi->next) {
180 /* Skip the bestpath for now */
181 if (pi == ctx->pi)
182 continue;
183
184 subgroup_process_announce_selected(
185 subgrp, pi, ctx->rn,
186 bgp_addpath_id_for_peer(
187 peer, afi, safi,
188 &pi->tx_addpath));
189 }
190
191 /* Process the bestpath last so the "show [ip]
192 * bgp neighbor x.x.x.x advertised"
193 * output shows the attributes from the bestpath
194 */
195 if (ctx->pi)
196 subgroup_process_announce_selected(
197 subgrp, ctx->pi, ctx->rn,
198 bgp_addpath_id_for_peer(
199 peer, afi, safi,
200 &ctx->pi->tx_addpath));
201 }
202
203 /* An update-group that does not use addpath */
204 else {
205 if (ctx->pi) {
206 subgroup_process_announce_selected(
207 subgrp, ctx->pi, ctx->rn,
208 bgp_addpath_id_for_peer(
209 peer, afi, safi,
210 &ctx->pi->tx_addpath));
211 } else {
212 /* Find the addpath_tx_id of the path we
213 * had advertised and
214 * send a withdraw */
215 RB_FOREACH_SAFE (adj, bgp_adj_out_rb,
216 &ctx->rn->adj_out,
217 adj_next) {
218 if (adj->subgroup == subgrp) {
219 subgroup_process_announce_selected(
220 subgrp, NULL,
221 ctx->rn,
222 adj->addpath_tx_id);
223 }
224 }
225 }
226 }
227 }
228 }
229
230 return UPDWALK_CONTINUE;
231 }
232
233 static void subgrp_show_adjq_vty(struct update_subgroup *subgrp,
234 struct vty *vty, uint8_t flags)
235 {
236 struct bgp_table *table;
237 struct bgp_adj_out *adj;
238 unsigned long output_count;
239 struct bgp_node *rn;
240 int header1 = 1;
241 struct bgp *bgp;
242 int header2 = 1;
243
244 bgp = SUBGRP_INST(subgrp);
245 if (!bgp)
246 return;
247
248 table = bgp->rib[SUBGRP_AFI(subgrp)][SUBGRP_SAFI(subgrp)];
249
250 output_count = 0;
251
252 for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn))
253 RB_FOREACH (adj, bgp_adj_out_rb, &rn->adj_out)
254 if (adj->subgroup == subgrp) {
255 if (header1) {
256 vty_out(vty,
257 "BGP table version is %" PRIu64
258 ", local router ID is %s\n",
259 table->version,
260 inet_ntoa(bgp->router_id));
261 vty_out(vty, BGP_SHOW_SCODE_HEADER);
262 vty_out(vty, BGP_SHOW_OCODE_HEADER);
263 header1 = 0;
264 }
265 if (header2) {
266 vty_out(vty, BGP_SHOW_HEADER);
267 header2 = 0;
268 }
269 if ((flags & UPDWALK_FLAGS_ADVQUEUE) && adj->adv
270 && adj->adv->baa) {
271 route_vty_out_tmp(vty, &rn->p,
272 adj->adv->baa->attr,
273 SUBGRP_SAFI(subgrp),
274 0, NULL);
275 output_count++;
276 }
277 if ((flags & UPDWALK_FLAGS_ADVERTISED)
278 && adj->attr) {
279 route_vty_out_tmp(
280 vty, &rn->p, adj->attr,
281 SUBGRP_SAFI(subgrp), 0, NULL);
282 output_count++;
283 }
284 }
285 if (output_count != 0)
286 vty_out(vty, "\nTotal number of prefixes %ld\n", output_count);
287 }
288
289 static int updgrp_show_adj_walkcb(struct update_group *updgrp, void *arg)
290 {
291 struct updwalk_context *ctx = arg;
292 struct update_subgroup *subgrp;
293 struct vty *vty;
294
295 vty = ctx->vty;
296 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
297 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
298 continue;
299 vty_out(vty, "update group %" PRIu64 ", subgroup %" PRIu64 "\n",
300 updgrp->id, subgrp->id);
301 subgrp_show_adjq_vty(subgrp, vty, ctx->flags);
302 }
303 return UPDWALK_CONTINUE;
304 }
305
306 static void updgrp_show_adj(struct bgp *bgp, afi_t afi, safi_t safi,
307 struct vty *vty, uint64_t id, uint8_t flags)
308 {
309 struct updwalk_context ctx;
310 memset(&ctx, 0, sizeof(ctx));
311 ctx.vty = vty;
312 ctx.subgrp_id = id;
313 ctx.flags = flags;
314
315 update_group_af_walk(bgp, afi, safi, updgrp_show_adj_walkcb, &ctx);
316 }
317
318 static int subgroup_coalesce_timer(struct thread *thread)
319 {
320 struct update_subgroup *subgrp;
321
322 subgrp = THREAD_ARG(thread);
323 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
324 zlog_debug("u%" PRIu64 ":s%" PRIu64
325 " announcing routes upon coalesce timer expiry",
326 (SUBGRP_UPDGRP(subgrp))->id, subgrp->id);
327 subgrp->t_coalesce = NULL;
328 subgrp->v_coalesce = 0;
329 subgroup_announce_route(subgrp);
330
331
332 /* While the announce_route() may kick off the route advertisement timer
333 * for
334 * the members of the subgroup, we'd like to send the initial updates
335 * much
336 * faster (i.e., without enforcing MRAI). Also, if there were no routes
337 * to
338 * announce, this is the method currently employed to trigger the EOR.
339 */
340 if (!bgp_update_delay_active(SUBGRP_INST(subgrp))) {
341 struct peer_af *paf;
342 struct peer *peer;
343
344 SUBGRP_FOREACH_PEER (subgrp, paf) {
345 peer = PAF_PEER(paf);
346 BGP_TIMER_OFF(peer->t_routeadv);
347 BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
348 }
349 }
350
351 return 0;
352 }
353
354 static int update_group_announce_walkcb(struct update_group *updgrp, void *arg)
355 {
356 struct update_subgroup *subgrp;
357
358 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
359 subgroup_announce_all(subgrp);
360 }
361
362 return UPDWALK_CONTINUE;
363 }
364
365 static int update_group_announce_rrc_walkcb(struct update_group *updgrp,
366 void *arg)
367 {
368 struct update_subgroup *subgrp;
369 afi_t afi;
370 safi_t safi;
371 struct peer *peer;
372
373 afi = UPDGRP_AFI(updgrp);
374 safi = UPDGRP_SAFI(updgrp);
375 peer = UPDGRP_PEER(updgrp);
376
377 /* Only announce if this is a group of route-reflector-clients */
378 if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_REFLECTOR_CLIENT)) {
379 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
380 subgroup_announce_all(subgrp);
381 }
382 }
383
384 return UPDWALK_CONTINUE;
385 }
386
387 /********************
388 * PUBLIC FUNCTIONS
389 ********************/
390
391 /**
392 * Allocate an adj-out object. Do proper initialization of its fields,
393 * primarily its association with the subgroup and the prefix.
394 */
395 struct bgp_adj_out *bgp_adj_out_alloc(struct update_subgroup *subgrp,
396 struct bgp_node *rn,
397 uint32_t addpath_tx_id)
398 {
399 struct bgp_adj_out *adj;
400
401 adj = XCALLOC(MTYPE_BGP_ADJ_OUT, sizeof(struct bgp_adj_out));
402 adj->subgroup = subgrp;
403 if (rn) {
404 RB_INSERT(bgp_adj_out_rb, &rn->adj_out, adj);
405 bgp_lock_node(rn);
406 adj->rn = rn;
407 }
408
409 adj->addpath_tx_id = addpath_tx_id;
410 TAILQ_INSERT_TAIL(&(subgrp->adjq), adj, subgrp_adj_train);
411 SUBGRP_INCR_STAT(subgrp, adj_count);
412 return adj;
413 }
414
415
416 struct bgp_advertise *
417 bgp_advertise_clean_subgroup(struct update_subgroup *subgrp,
418 struct bgp_adj_out *adj)
419 {
420 struct bgp_advertise *adv;
421 struct bgp_advertise_attr *baa;
422 struct bgp_advertise *next;
423 struct bgp_advertise_fifo *fhead;
424
425 adv = adj->adv;
426 baa = adv->baa;
427 next = NULL;
428
429 if (baa) {
430 fhead = &subgrp->sync->update;
431
432 /* Unlink myself from advertise attribute FIFO. */
433 bgp_advertise_delete(baa, adv);
434
435 /* Fetch next advertise candidate. */
436 next = baa->adv;
437
438 /* Unintern BGP advertise attribute. */
439 bgp_advertise_unintern(subgrp->hash, baa);
440 } else
441 fhead = &subgrp->sync->withdraw;
442
443
444 /* Unlink myself from advertisement FIFO. */
445 BGP_ADV_FIFO_DEL(fhead, adv);
446
447 /* Free memory. */
448 bgp_advertise_free(adj->adv);
449 adj->adv = NULL;
450
451 return next;
452 }
453
454 void bgp_adj_out_set_subgroup(struct bgp_node *rn,
455 struct update_subgroup *subgrp, struct attr *attr,
456 struct bgp_path_info *path)
457 {
458 struct bgp_adj_out *adj = NULL;
459 struct bgp_advertise *adv;
460 struct peer *peer;
461 afi_t afi;
462 safi_t safi;
463
464 peer = SUBGRP_PEER(subgrp);
465 afi = SUBGRP_AFI(subgrp);
466 safi = SUBGRP_SAFI(subgrp);
467
468 if (DISABLE_BGP_ANNOUNCE)
469 return;
470
471 /* Look for adjacency information. */
472 adj = adj_lookup(
473 rn, subgrp,
474 bgp_addpath_id_for_peer(peer, afi, safi, &path->tx_addpath));
475
476 if (!adj) {
477 adj = bgp_adj_out_alloc(
478 subgrp, rn,
479 bgp_addpath_id_for_peer(peer, afi, safi,
480 &path->tx_addpath));
481 if (!adj)
482 return;
483 }
484
485 if (adj->adv)
486 bgp_advertise_clean_subgroup(subgrp, adj);
487 adj->adv = bgp_advertise_new();
488
489 adv = adj->adv;
490 adv->rn = rn;
491 assert(adv->pathi == NULL);
492 /* bgp_path_info adj_out reference */
493 adv->pathi = bgp_path_info_lock(path);
494
495 if (attr)
496 adv->baa = bgp_advertise_intern(subgrp->hash, attr);
497 else
498 adv->baa = baa_new();
499 adv->adj = adj;
500
501 /* Add new advertisement to advertisement attribute list. */
502 bgp_advertise_add(adv->baa, adv);
503
504 /*
505 * If the update adv list is empty, trigger the member peers'
506 * mrai timers so the socket writes can happen.
507 */
508 if (BGP_ADV_FIFO_EMPTY(&subgrp->sync->update)) {
509 struct peer_af *paf;
510
511 SUBGRP_FOREACH_PEER (subgrp, paf) {
512 bgp_adjust_routeadv(PAF_PEER(paf));
513 }
514 }
515
516 BGP_ADV_FIFO_ADD(&subgrp->sync->update, &adv->fifo);
517
518 subgrp->version = max(subgrp->version, rn->version);
519 }
520
521 /* The only time 'withdraw' will be false is if we are sending
522 * the "neighbor x.x.x.x default-originate" default and need to clear
523 * bgp_adj_out for the 0.0.0.0/0 route in the BGP table.
524 */
525 void bgp_adj_out_unset_subgroup(struct bgp_node *rn,
526 struct update_subgroup *subgrp, char withdraw,
527 uint32_t addpath_tx_id)
528 {
529 struct bgp_adj_out *adj;
530 struct bgp_advertise *adv;
531 bool trigger_write;
532
533 if (DISABLE_BGP_ANNOUNCE)
534 return;
535
536 /* Lookup existing adjacency */
537 if ((adj = adj_lookup(rn, subgrp, addpath_tx_id)) != NULL) {
538 /* Clean up previous advertisement. */
539 if (adj->adv)
540 bgp_advertise_clean_subgroup(subgrp, adj);
541
542 if (adj->attr && withdraw) {
543 /* We need advertisement structure. */
544 adj->adv = bgp_advertise_new();
545 adv = adj->adv;
546 adv->rn = rn;
547 adv->adj = adj;
548
549 /* Note if we need to trigger a packet write */
550 trigger_write =
551 BGP_ADV_FIFO_EMPTY(&subgrp->sync->withdraw);
552
553 /* Add to synchronization entry for withdraw
554 * announcement. */
555 BGP_ADV_FIFO_ADD(&subgrp->sync->withdraw, &adv->fifo);
556
557 if (trigger_write)
558 subgroup_trigger_write(subgrp);
559 } else {
560 /* Remove myself from adjacency. */
561 RB_REMOVE(bgp_adj_out_rb, &rn->adj_out, adj);
562
563 /* Free allocated information. */
564 adj_free(adj);
565
566 bgp_unlock_node(rn);
567 }
568 }
569
570 subgrp->version = max(subgrp->version, rn->version);
571 }
572
573 void bgp_adj_out_remove_subgroup(struct bgp_node *rn, struct bgp_adj_out *adj,
574 struct update_subgroup *subgrp)
575 {
576 if (adj->attr)
577 bgp_attr_unintern(&adj->attr);
578
579 if (adj->adv)
580 bgp_advertise_clean_subgroup(subgrp, adj);
581
582 RB_REMOVE(bgp_adj_out_rb, &rn->adj_out, adj);
583 adj_free(adj);
584 }
585
586 /*
587 * Go through all the routes and clean up the adj/adv structures corresponding
588 * to the subgroup.
589 */
590 void subgroup_clear_table(struct update_subgroup *subgrp)
591 {
592 struct bgp_adj_out *aout, *taout;
593
594 SUBGRP_FOREACH_ADJ_SAFE (subgrp, aout, taout) {
595 struct bgp_node *rn = aout->rn;
596 bgp_adj_out_remove_subgroup(rn, aout, subgrp);
597 bgp_unlock_node(rn);
598 }
599 }
600
601 /*
602 * subgroup_announce_table
603 */
604 void subgroup_announce_table(struct update_subgroup *subgrp,
605 struct bgp_table *table)
606 {
607 struct bgp_node *rn;
608 struct bgp_path_info *ri;
609 struct attr attr;
610 struct peer *peer;
611 afi_t afi;
612 safi_t safi;
613 int addpath_capable;
614
615 peer = SUBGRP_PEER(subgrp);
616 afi = SUBGRP_AFI(subgrp);
617 safi = SUBGRP_SAFI(subgrp);
618 addpath_capable = bgp_addpath_encode_tx(peer, afi, safi);
619
620 if (safi == SAFI_LABELED_UNICAST)
621 safi = SAFI_UNICAST;
622
623 if (!table)
624 table = peer->bgp->rib[afi][safi];
625
626 if (safi != SAFI_MPLS_VPN && safi != SAFI_ENCAP && safi != SAFI_EVPN
627 && CHECK_FLAG(peer->af_flags[afi][safi],
628 PEER_FLAG_DEFAULT_ORIGINATE))
629 subgroup_default_originate(subgrp, 0);
630
631 for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn))
632 for (ri = rn->info; ri; ri = ri->next)
633
634 if (CHECK_FLAG(ri->flags, BGP_PATH_SELECTED)
635 || (addpath_capable
636 && bgp_addpath_tx_path(
637 peer->addpath_type[afi][safi],
638 ri))) {
639 if (subgroup_announce_check(rn, ri, subgrp,
640 &rn->p, &attr))
641 bgp_adj_out_set_subgroup(rn, subgrp,
642 &attr, ri);
643 else
644 bgp_adj_out_unset_subgroup(
645 rn, subgrp, 1,
646 bgp_addpath_id_for_peer(
647 peer, afi, safi,
648 &ri->tx_addpath));
649 }
650
651 /*
652 * We walked through the whole table -- make sure our version number
653 * is consistent with the one on the table. This should allow
654 * subgroups to merge sooner if a peer comes up when the route node
655 * with the largest version is no longer in the table. This also
656 * covers the pathological case where all routes in the table have
657 * now been deleted.
658 */
659 subgrp->version = max(subgrp->version, table->version);
660
661 /*
662 * Start a task to merge the subgroup if necessary.
663 */
664 update_subgroup_trigger_merge_check(subgrp, 0);
665 }
666
667 /*
668 * subgroup_announce_route
669 *
670 * Refresh all routes out to a subgroup.
671 */
672 void subgroup_announce_route(struct update_subgroup *subgrp)
673 {
674 struct bgp_node *rn;
675 struct bgp_table *table;
676 struct peer *onlypeer;
677
678 if (update_subgroup_needs_refresh(subgrp)) {
679 update_subgroup_set_needs_refresh(subgrp, 0);
680 }
681
682 /*
683 * First update is deferred until ORF or ROUTE-REFRESH is received
684 */
685 onlypeer = ((SUBGRP_PCOUNT(subgrp) == 1) ? (SUBGRP_PFIRST(subgrp))->peer
686 : NULL);
687 if (onlypeer && CHECK_FLAG(onlypeer->af_sflags[SUBGRP_AFI(subgrp)]
688 [SUBGRP_SAFI(subgrp)],
689 PEER_STATUS_ORF_WAIT_REFRESH))
690 return;
691
692 if (SUBGRP_SAFI(subgrp) != SAFI_MPLS_VPN
693 && SUBGRP_SAFI(subgrp) != SAFI_ENCAP
694 && SUBGRP_SAFI(subgrp) != SAFI_EVPN)
695 subgroup_announce_table(subgrp, NULL);
696 else
697 for (rn = bgp_table_top(update_subgroup_rib(subgrp)); rn;
698 rn = bgp_route_next(rn))
699 if ((table = (rn->info)) != NULL)
700 subgroup_announce_table(subgrp, table);
701 }
702
703 void subgroup_default_originate(struct update_subgroup *subgrp, int withdraw)
704 {
705 struct bgp *bgp;
706 struct attr attr;
707 struct aspath *aspath;
708 struct bgp_path_info tmp_info;
709 struct prefix p;
710 struct peer *from;
711 struct bgp_node *rn;
712 struct bgp_path_info *ri;
713 struct peer *peer;
714 int ret = RMAP_DENYMATCH;
715 afi_t afi;
716 safi_t safi;
717
718 if (!subgrp)
719 return;
720
721 peer = SUBGRP_PEER(subgrp);
722 afi = SUBGRP_AFI(subgrp);
723 safi = SUBGRP_SAFI(subgrp);
724
725 if (!(afi == AFI_IP || afi == AFI_IP6))
726 return;
727
728 bgp = peer->bgp;
729 from = bgp->peer_self;
730
731 bgp_attr_default_set(&attr, BGP_ORIGIN_IGP);
732 aspath = attr.aspath;
733
734 attr.local_pref = bgp->default_local_pref;
735
736 memset(&p, 0, sizeof(p));
737 p.family = afi2family(afi);
738 p.prefixlen = 0;
739
740 if ((afi == AFI_IP6) || peer_cap_enhe(peer, afi, safi)) {
741 /* IPv6 global nexthop must be included. */
742 attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL;
743
744 /* If the peer is on shared nextwork and we have link-local
745 nexthop set it. */
746 if (peer->shared_network
747 && !IN6_IS_ADDR_UNSPECIFIED(&peer->nexthop.v6_local))
748 attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL;
749 }
750
751 if (peer->default_rmap[afi][safi].name) {
752 SET_FLAG(bgp->peer_self->rmap_type, PEER_RMAP_TYPE_DEFAULT);
753 for (rn = bgp_table_top(bgp->rib[afi][safi]); rn;
754 rn = bgp_route_next(rn)) {
755 for (ri = rn->info; ri; ri = ri->next) {
756 struct attr dummy_attr;
757
758 /* Provide dummy so the route-map can't modify
759 * the attributes */
760 bgp_attr_dup(&dummy_attr, ri->attr);
761 tmp_info.peer = ri->peer;
762 tmp_info.attr = &dummy_attr;
763
764 ret = route_map_apply(
765 peer->default_rmap[afi][safi].map,
766 &rn->p, RMAP_BGP, &tmp_info);
767
768 /* The route map might have set attributes. If
769 * we don't flush them
770 * here, they will be leaked. */
771 bgp_attr_flush(&dummy_attr);
772 if (ret != RMAP_DENYMATCH)
773 break;
774 }
775 if (ret != RMAP_DENYMATCH)
776 break;
777 }
778 bgp->peer_self->rmap_type = 0;
779
780 if (ret == RMAP_DENYMATCH)
781 withdraw = 1;
782 }
783
784 if (withdraw) {
785 if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
786 subgroup_default_withdraw_packet(subgrp);
787 UNSET_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE);
788 } else {
789 if (!CHECK_FLAG(subgrp->sflags,
790 SUBGRP_STATUS_DEFAULT_ORIGINATE)) {
791
792 if (bgp_flag_check(bgp, BGP_FLAG_GRACEFUL_SHUTDOWN)) {
793 bgp_attr_add_gshut_community(&attr);
794 }
795
796 SET_FLAG(subgrp->sflags,
797 SUBGRP_STATUS_DEFAULT_ORIGINATE);
798 subgroup_default_update_packet(subgrp, &attr, from);
799
800 /* The 'neighbor x.x.x.x default-originate' default will
801 * act as an
802 * implicit withdraw for any previous UPDATEs sent for
803 * 0.0.0.0/0 so
804 * clear adj_out for the 0.0.0.0/0 prefix in the BGP
805 * table.
806 */
807 memset(&p, 0, sizeof(p));
808 p.family = afi2family(afi);
809 p.prefixlen = 0;
810
811 rn = bgp_afi_node_get(bgp->rib[afi][safi], afi, safi,
812 &p, NULL);
813 bgp_adj_out_unset_subgroup(
814 rn, subgrp, 0,
815 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
816 }
817 }
818
819 aspath_unintern(&aspath);
820 }
821
822 /*
823 * Announce the BGP table to a subgroup.
824 *
825 * At startup, we try to optimize route announcement by coalescing the
826 * peer-up events. This is done only the first time - from then on,
827 * subgrp->v_coalesce will be set to zero and the normal logic
828 * prevails.
829 */
830 void subgroup_announce_all(struct update_subgroup *subgrp)
831 {
832 if (!subgrp)
833 return;
834
835 /*
836 * If coalesce timer value is not set, announce routes immediately.
837 */
838 if (!subgrp->v_coalesce) {
839 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
840 zlog_debug("u%" PRIu64 ":s%" PRIu64
841 " announcing all routes",
842 subgrp->update_group->id, subgrp->id);
843 subgroup_announce_route(subgrp);
844 return;
845 }
846
847 /*
848 * We should wait for the coalesce timer. Arm the timer if not done.
849 */
850 if (!subgrp->t_coalesce) {
851 thread_add_timer_msec(bm->master, subgroup_coalesce_timer,
852 subgrp, subgrp->v_coalesce,
853 &subgrp->t_coalesce);
854 }
855 }
856
857 /*
858 * Go through all update subgroups and set up the adv queue for the
859 * input route.
860 */
861 void group_announce_route(struct bgp *bgp, afi_t afi, safi_t safi,
862 struct bgp_node *rn, struct bgp_path_info *pi)
863 {
864 struct updwalk_context ctx;
865 ctx.pi = pi;
866 ctx.rn = rn;
867 update_group_af_walk(bgp, afi, safi, group_announce_route_walkcb, &ctx);
868 }
869
870 void update_group_show_adj_queue(struct bgp *bgp, afi_t afi, safi_t safi,
871 struct vty *vty, uint64_t id)
872 {
873 updgrp_show_adj(bgp, afi, safi, vty, id, UPDWALK_FLAGS_ADVQUEUE);
874 }
875
876 void update_group_show_advertised(struct bgp *bgp, afi_t afi, safi_t safi,
877 struct vty *vty, uint64_t id)
878 {
879 updgrp_show_adj(bgp, afi, safi, vty, id, UPDWALK_FLAGS_ADVERTISED);
880 }
881
882 void update_group_announce(struct bgp *bgp)
883 {
884 update_group_walk(bgp, update_group_announce_walkcb, NULL);
885 }
886
887 void update_group_announce_rrclients(struct bgp *bgp)
888 {
889 update_group_walk(bgp, update_group_announce_rrc_walkcb, NULL);
890 }