]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_updgrp_adv.c
Merge pull request #6011 from patrasar/pim-no-msdp-group-cmd
[mirror_frr.git] / bgpd / bgp_updgrp_adv.c
1 /**
2 * bgp_updgrp_adv.c: BGP update group advertisement and adjacency
3 * maintenance
4 *
5 *
6 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
7 *
8 * @author Avneesh Sachdev <avneesh@sproute.net>
9 * @author Rajesh Varadarajan <rajesh@sproute.net>
10 * @author Pradosh Mohapatra <pradosh@sproute.net>
11 *
12 * This file is part of GNU Zebra.
13 *
14 * GNU Zebra is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
17 * later version.
18 *
19 * GNU Zebra is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; see the file COPYING; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29 #include <zebra.h>
30
31 #include "command.h"
32 #include "memory.h"
33 #include "prefix.h"
34 #include "hash.h"
35 #include "thread.h"
36 #include "queue.h"
37 #include "routemap.h"
38 #include "filter.h"
39
40 #include "bgpd/bgpd.h"
41 #include "bgpd/bgp_table.h"
42 #include "bgpd/bgp_debug.h"
43 #include "bgpd/bgp_route.h"
44 #include "bgpd/bgp_advertise.h"
45 #include "bgpd/bgp_attr.h"
46 #include "bgpd/bgp_aspath.h"
47 #include "bgpd/bgp_packet.h"
48 #include "bgpd/bgp_fsm.h"
49 #include "bgpd/bgp_mplsvpn.h"
50 #include "bgpd/bgp_updgrp.h"
51 #include "bgpd/bgp_advertise.h"
52 #include "bgpd/bgp_addpath.h"
53
54
55 /********************
56 * PRIVATE FUNCTIONS
57 ********************/
58 static int bgp_adj_out_compare(const struct bgp_adj_out *o1,
59 const struct bgp_adj_out *o2)
60 {
61 if (o1->subgroup < o2->subgroup)
62 return -1;
63
64 if (o1->subgroup > o2->subgroup)
65 return 1;
66
67 if (o1->addpath_tx_id < o2->addpath_tx_id)
68 return -1;
69
70 if (o1->addpath_tx_id > o2->addpath_tx_id)
71 return 1;
72
73 return 0;
74 }
75 RB_GENERATE(bgp_adj_out_rb, bgp_adj_out, adj_entry, bgp_adj_out_compare);
76
77 static inline struct bgp_adj_out *adj_lookup(struct bgp_node *rn,
78 struct update_subgroup *subgrp,
79 uint32_t addpath_tx_id)
80 {
81 struct bgp_adj_out lookup;
82
83 if (!rn || !subgrp)
84 return NULL;
85
86 /* update-groups that do not support addpath will pass 0 for
87 * addpath_tx_id. */
88 lookup.subgroup = subgrp;
89 lookup.addpath_tx_id = addpath_tx_id;
90
91 return RB_FIND(bgp_adj_out_rb, &rn->adj_out, &lookup);
92 }
93
94 static void adj_free(struct bgp_adj_out *adj)
95 {
96 TAILQ_REMOVE(&(adj->subgroup->adjq), adj, subgrp_adj_train);
97 SUBGRP_DECR_STAT(adj->subgroup, adj_count);
98 XFREE(MTYPE_BGP_ADJ_OUT, adj);
99 }
100
101 static void subgrp_withdraw_stale_addpath(struct updwalk_context *ctx,
102 struct update_subgroup *subgrp)
103 {
104 struct bgp_adj_out *adj, *adj_next;
105 uint32_t id;
106 struct bgp_path_info *pi;
107 afi_t afi = SUBGRP_AFI(subgrp);
108 safi_t safi = SUBGRP_SAFI(subgrp);
109 struct peer *peer = SUBGRP_PEER(subgrp);
110
111 /* Look through all of the paths we have advertised for this rn and send
112 * a withdraw for the ones that are no longer present */
113 RB_FOREACH_SAFE (adj, bgp_adj_out_rb, &ctx->rn->adj_out, adj_next) {
114
115 if (adj->subgroup == subgrp) {
116 for (pi = bgp_node_get_bgp_path_info(ctx->rn);
117 pi; pi = pi->next) {
118 id = bgp_addpath_id_for_peer(peer, afi, safi,
119 &pi->tx_addpath);
120
121 if (id == adj->addpath_tx_id) {
122 break;
123 }
124 }
125
126 if (!pi) {
127 subgroup_process_announce_selected(
128 subgrp, NULL, ctx->rn,
129 adj->addpath_tx_id);
130 }
131 }
132 }
133 }
134
135 static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
136 {
137 struct updwalk_context *ctx = arg;
138 struct update_subgroup *subgrp;
139 struct bgp_path_info *pi;
140 afi_t afi;
141 safi_t safi;
142 struct peer *peer;
143 struct bgp_adj_out *adj, *adj_next;
144 int addpath_capable;
145
146 afi = UPDGRP_AFI(updgrp);
147 safi = UPDGRP_SAFI(updgrp);
148 peer = UPDGRP_PEER(updgrp);
149 addpath_capable = bgp_addpath_encode_tx(peer, afi, safi);
150
151 if (BGP_DEBUG(update, UPDATE_OUT))
152 zlog_debug("%s: afi=%s, safi=%s, p=%pRN", __func__,
153 afi2str(afi), safi2str(safi), ctx->rn);
154
155
156 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
157
158 /*
159 * Skip the subgroups that have coalesce timer running. We will
160 * walk the entire prefix table for those subgroups when the
161 * coalesce timer fires.
162 */
163 if (!subgrp->t_coalesce) {
164 /* An update-group that uses addpath */
165 if (addpath_capable) {
166 subgrp_withdraw_stale_addpath(ctx, subgrp);
167
168 for (pi = bgp_node_get_bgp_path_info(ctx->rn);
169 pi; pi = pi->next) {
170 /* Skip the bestpath for now */
171 if (pi == ctx->pi)
172 continue;
173
174 subgroup_process_announce_selected(
175 subgrp, pi, ctx->rn,
176 bgp_addpath_id_for_peer(
177 peer, afi, safi,
178 &pi->tx_addpath));
179 }
180
181 /* Process the bestpath last so the "show [ip]
182 * bgp neighbor x.x.x.x advertised"
183 * output shows the attributes from the bestpath
184 */
185 if (ctx->pi)
186 subgroup_process_announce_selected(
187 subgrp, ctx->pi, ctx->rn,
188 bgp_addpath_id_for_peer(
189 peer, afi, safi,
190 &ctx->pi->tx_addpath));
191 }
192
193 /* An update-group that does not use addpath */
194 else {
195 if (ctx->pi) {
196 subgroup_process_announce_selected(
197 subgrp, ctx->pi, ctx->rn,
198 bgp_addpath_id_for_peer(
199 peer, afi, safi,
200 &ctx->pi->tx_addpath));
201 } else {
202 /* Find the addpath_tx_id of the path we
203 * had advertised and
204 * send a withdraw */
205 RB_FOREACH_SAFE (adj, bgp_adj_out_rb,
206 &ctx->rn->adj_out,
207 adj_next) {
208 if (adj->subgroup == subgrp) {
209 subgroup_process_announce_selected(
210 subgrp, NULL,
211 ctx->rn,
212 adj->addpath_tx_id);
213 }
214 }
215 }
216 }
217 }
218 }
219
220 return UPDWALK_CONTINUE;
221 }
222
223 static void subgrp_show_adjq_vty(struct update_subgroup *subgrp,
224 struct vty *vty, uint8_t flags)
225 {
226 struct bgp_table *table;
227 struct bgp_adj_out *adj;
228 unsigned long output_count;
229 struct bgp_node *rn;
230 int header1 = 1;
231 struct bgp *bgp;
232 int header2 = 1;
233
234 bgp = SUBGRP_INST(subgrp);
235 if (!bgp)
236 return;
237
238 table = bgp->rib[SUBGRP_AFI(subgrp)][SUBGRP_SAFI(subgrp)];
239
240 output_count = 0;
241
242 for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn)) {
243 const struct prefix *rn_p = bgp_node_get_prefix(rn);
244
245 RB_FOREACH (adj, bgp_adj_out_rb, &rn->adj_out)
246 if (adj->subgroup == subgrp) {
247 if (header1) {
248 vty_out(vty,
249 "BGP table version is %" PRIu64
250 ", local router ID is %s\n",
251 table->version,
252 inet_ntoa(bgp->router_id));
253 vty_out(vty, BGP_SHOW_SCODE_HEADER);
254 vty_out(vty, BGP_SHOW_OCODE_HEADER);
255 header1 = 0;
256 }
257 if (header2) {
258 vty_out(vty, BGP_SHOW_HEADER);
259 header2 = 0;
260 }
261 if ((flags & UPDWALK_FLAGS_ADVQUEUE) && adj->adv
262 && adj->adv->baa) {
263 route_vty_out_tmp(
264 vty, rn_p, adj->adv->baa->attr,
265 SUBGRP_SAFI(subgrp), 0, NULL);
266 output_count++;
267 }
268 if ((flags & UPDWALK_FLAGS_ADVERTISED)
269 && adj->attr) {
270 route_vty_out_tmp(vty, rn_p, adj->attr,
271 SUBGRP_SAFI(subgrp),
272 0, NULL);
273 output_count++;
274 }
275 }
276 }
277 if (output_count != 0)
278 vty_out(vty, "\nTotal number of prefixes %ld\n", output_count);
279 }
280
281 static int updgrp_show_adj_walkcb(struct update_group *updgrp, void *arg)
282 {
283 struct updwalk_context *ctx = arg;
284 struct update_subgroup *subgrp;
285 struct vty *vty;
286
287 vty = ctx->vty;
288 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
289 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
290 continue;
291 vty_out(vty, "update group %" PRIu64 ", subgroup %" PRIu64 "\n",
292 updgrp->id, subgrp->id);
293 subgrp_show_adjq_vty(subgrp, vty, ctx->flags);
294 }
295 return UPDWALK_CONTINUE;
296 }
297
298 static void updgrp_show_adj(struct bgp *bgp, afi_t afi, safi_t safi,
299 struct vty *vty, uint64_t id, uint8_t flags)
300 {
301 struct updwalk_context ctx;
302 memset(&ctx, 0, sizeof(ctx));
303 ctx.vty = vty;
304 ctx.subgrp_id = id;
305 ctx.flags = flags;
306
307 update_group_af_walk(bgp, afi, safi, updgrp_show_adj_walkcb, &ctx);
308 }
309
310 static int subgroup_coalesce_timer(struct thread *thread)
311 {
312 struct update_subgroup *subgrp;
313
314 subgrp = THREAD_ARG(thread);
315 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
316 zlog_debug("u%" PRIu64 ":s%" PRIu64
317 " announcing routes upon coalesce timer expiry(%u ms)",
318 (SUBGRP_UPDGRP(subgrp))->id, subgrp->id,
319 subgrp->v_coalesce),
320 subgrp->t_coalesce = NULL;
321 subgrp->v_coalesce = 0;
322 subgroup_announce_route(subgrp);
323
324
325 /* While the announce_route() may kick off the route advertisement timer
326 * for
327 * the members of the subgroup, we'd like to send the initial updates
328 * much
329 * faster (i.e., without enforcing MRAI). Also, if there were no routes
330 * to
331 * announce, this is the method currently employed to trigger the EOR.
332 */
333 if (!bgp_update_delay_active(SUBGRP_INST(subgrp))) {
334 struct peer_af *paf;
335 struct peer *peer;
336
337 SUBGRP_FOREACH_PEER (subgrp, paf) {
338 peer = PAF_PEER(paf);
339 BGP_TIMER_OFF(peer->t_routeadv);
340 BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
341 }
342 }
343
344 return 0;
345 }
346
347 static int update_group_announce_walkcb(struct update_group *updgrp, void *arg)
348 {
349 struct update_subgroup *subgrp;
350
351 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
352 subgroup_announce_all(subgrp);
353 }
354
355 return UPDWALK_CONTINUE;
356 }
357
358 static int update_group_announce_rrc_walkcb(struct update_group *updgrp,
359 void *arg)
360 {
361 struct update_subgroup *subgrp;
362 afi_t afi;
363 safi_t safi;
364 struct peer *peer;
365
366 afi = UPDGRP_AFI(updgrp);
367 safi = UPDGRP_SAFI(updgrp);
368 peer = UPDGRP_PEER(updgrp);
369
370 /* Only announce if this is a group of route-reflector-clients */
371 if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_REFLECTOR_CLIENT)) {
372 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
373 subgroup_announce_all(subgrp);
374 }
375 }
376
377 return UPDWALK_CONTINUE;
378 }
379
380 /********************
381 * PUBLIC FUNCTIONS
382 ********************/
383
384 /**
385 * Allocate an adj-out object. Do proper initialization of its fields,
386 * primarily its association with the subgroup and the prefix.
387 */
388 struct bgp_adj_out *bgp_adj_out_alloc(struct update_subgroup *subgrp,
389 struct bgp_node *rn,
390 uint32_t addpath_tx_id)
391 {
392 struct bgp_adj_out *adj;
393
394 adj = XCALLOC(MTYPE_BGP_ADJ_OUT, sizeof(struct bgp_adj_out));
395 adj->subgroup = subgrp;
396 adj->addpath_tx_id = addpath_tx_id;
397
398 if (rn) {
399 RB_INSERT(bgp_adj_out_rb, &rn->adj_out, adj);
400 bgp_lock_node(rn);
401 adj->rn = rn;
402 }
403
404 TAILQ_INSERT_TAIL(&(subgrp->adjq), adj, subgrp_adj_train);
405 SUBGRP_INCR_STAT(subgrp, adj_count);
406 return adj;
407 }
408
409
410 struct bgp_advertise *
411 bgp_advertise_clean_subgroup(struct update_subgroup *subgrp,
412 struct bgp_adj_out *adj)
413 {
414 struct bgp_advertise *adv;
415 struct bgp_advertise_attr *baa;
416 struct bgp_advertise *next;
417 struct bgp_adv_fifo_head *fhead;
418
419 adv = adj->adv;
420 baa = adv->baa;
421 next = NULL;
422
423 if (baa) {
424 fhead = &subgrp->sync->update;
425
426 /* Unlink myself from advertise attribute FIFO. */
427 bgp_advertise_delete(baa, adv);
428
429 /* Fetch next advertise candidate. */
430 next = baa->adv;
431
432 /* Unintern BGP advertise attribute. */
433 bgp_advertise_unintern(subgrp->hash, baa);
434 } else
435 fhead = &subgrp->sync->withdraw;
436
437
438 /* Unlink myself from advertisement FIFO. */
439 bgp_adv_fifo_del(fhead, adv);
440
441 /* Free memory. */
442 bgp_advertise_free(adj->adv);
443 adj->adv = NULL;
444
445 return next;
446 }
447
448 void bgp_adj_out_set_subgroup(struct bgp_node *rn,
449 struct update_subgroup *subgrp, struct attr *attr,
450 struct bgp_path_info *path)
451 {
452 struct bgp_adj_out *adj = NULL;
453 struct bgp_advertise *adv;
454 struct peer *peer;
455 afi_t afi;
456 safi_t safi;
457
458 peer = SUBGRP_PEER(subgrp);
459 afi = SUBGRP_AFI(subgrp);
460 safi = SUBGRP_SAFI(subgrp);
461
462 if (DISABLE_BGP_ANNOUNCE)
463 return;
464
465 /* Look for adjacency information. */
466 adj = adj_lookup(
467 rn, subgrp,
468 bgp_addpath_id_for_peer(peer, afi, safi, &path->tx_addpath));
469
470 if (!adj) {
471 adj = bgp_adj_out_alloc(
472 subgrp, rn,
473 bgp_addpath_id_for_peer(peer, afi, safi,
474 &path->tx_addpath));
475 if (!adj)
476 return;
477 }
478
479 if (adj->adv)
480 bgp_advertise_clean_subgroup(subgrp, adj);
481 adj->adv = bgp_advertise_new();
482
483 adv = adj->adv;
484 adv->rn = rn;
485 assert(adv->pathi == NULL);
486 /* bgp_path_info adj_out reference */
487 adv->pathi = bgp_path_info_lock(path);
488
489 if (attr)
490 adv->baa = bgp_advertise_intern(subgrp->hash, attr);
491 else
492 adv->baa = baa_new();
493 adv->adj = adj;
494
495 /* Add new advertisement to advertisement attribute list. */
496 bgp_advertise_add(adv->baa, adv);
497
498 /*
499 * If the update adv list is empty, trigger the member peers'
500 * mrai timers so the socket writes can happen.
501 */
502 if (!bgp_adv_fifo_count(&subgrp->sync->update)) {
503 struct peer_af *paf;
504
505 SUBGRP_FOREACH_PEER (subgrp, paf) {
506 bgp_adjust_routeadv(PAF_PEER(paf));
507 }
508 }
509
510 bgp_adv_fifo_add_tail(&subgrp->sync->update, adv);
511
512 subgrp->version = max(subgrp->version, rn->version);
513 }
514
515 /* The only time 'withdraw' will be false is if we are sending
516 * the "neighbor x.x.x.x default-originate" default and need to clear
517 * bgp_adj_out for the 0.0.0.0/0 route in the BGP table.
518 */
519 void bgp_adj_out_unset_subgroup(struct bgp_node *rn,
520 struct update_subgroup *subgrp, char withdraw,
521 uint32_t addpath_tx_id)
522 {
523 struct bgp_adj_out *adj;
524 struct bgp_advertise *adv;
525 bool trigger_write;
526
527 if (DISABLE_BGP_ANNOUNCE)
528 return;
529
530 /* Lookup existing adjacency */
531 if ((adj = adj_lookup(rn, subgrp, addpath_tx_id)) != NULL) {
532 /* Clean up previous advertisement. */
533 if (adj->adv)
534 bgp_advertise_clean_subgroup(subgrp, adj);
535
536 if (adj->attr && withdraw) {
537 /* We need advertisement structure. */
538 adj->adv = bgp_advertise_new();
539 adv = adj->adv;
540 adv->rn = rn;
541 adv->adj = adj;
542
543 /* Note if we need to trigger a packet write */
544 trigger_write =
545 !bgp_adv_fifo_count(&subgrp->sync->withdraw);
546
547 /* Add to synchronization entry for withdraw
548 * announcement. */
549 bgp_adv_fifo_add_tail(&subgrp->sync->withdraw, adv);
550
551 if (trigger_write)
552 subgroup_trigger_write(subgrp);
553 } else {
554 /* Remove myself from adjacency. */
555 RB_REMOVE(bgp_adj_out_rb, &rn->adj_out, adj);
556
557 /* Free allocated information. */
558 adj_free(adj);
559
560 bgp_unlock_node(rn);
561 }
562 }
563
564 subgrp->version = max(subgrp->version, rn->version);
565 }
566
567 void bgp_adj_out_remove_subgroup(struct bgp_node *rn, struct bgp_adj_out *adj,
568 struct update_subgroup *subgrp)
569 {
570 if (adj->attr)
571 bgp_attr_unintern(&adj->attr);
572
573 if (adj->adv)
574 bgp_advertise_clean_subgroup(subgrp, adj);
575
576 RB_REMOVE(bgp_adj_out_rb, &rn->adj_out, adj);
577 adj_free(adj);
578 }
579
580 /*
581 * Go through all the routes and clean up the adj/adv structures corresponding
582 * to the subgroup.
583 */
584 void subgroup_clear_table(struct update_subgroup *subgrp)
585 {
586 struct bgp_adj_out *aout, *taout;
587
588 SUBGRP_FOREACH_ADJ_SAFE (subgrp, aout, taout) {
589 struct bgp_node *rn = aout->rn;
590 bgp_adj_out_remove_subgroup(rn, aout, subgrp);
591 bgp_unlock_node(rn);
592 }
593 }
594
595 /*
596 * subgroup_announce_table
597 */
598 void subgroup_announce_table(struct update_subgroup *subgrp,
599 struct bgp_table *table)
600 {
601 struct bgp_node *rn;
602 struct bgp_path_info *ri;
603 struct attr attr;
604 struct peer *peer;
605 afi_t afi;
606 safi_t safi;
607 int addpath_capable;
608
609 peer = SUBGRP_PEER(subgrp);
610 afi = SUBGRP_AFI(subgrp);
611 safi = SUBGRP_SAFI(subgrp);
612 addpath_capable = bgp_addpath_encode_tx(peer, afi, safi);
613
614 if (safi == SAFI_LABELED_UNICAST)
615 safi = SAFI_UNICAST;
616
617 if (!table)
618 table = peer->bgp->rib[afi][safi];
619
620 if (safi != SAFI_MPLS_VPN && safi != SAFI_ENCAP && safi != SAFI_EVPN
621 && CHECK_FLAG(peer->af_flags[afi][safi],
622 PEER_FLAG_DEFAULT_ORIGINATE))
623 subgroup_default_originate(subgrp, 0);
624
625 for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn)) {
626 const struct prefix *rn_p = bgp_node_get_prefix(rn);
627
628 for (ri = bgp_node_get_bgp_path_info(rn); ri; ri = ri->next)
629
630 if (CHECK_FLAG(ri->flags, BGP_PATH_SELECTED)
631 || (addpath_capable
632 && bgp_addpath_tx_path(
633 peer->addpath_type[afi][safi],
634 ri))) {
635 if (subgroup_announce_check(rn, ri, subgrp,
636 rn_p, &attr))
637 bgp_adj_out_set_subgroup(rn, subgrp,
638 &attr, ri);
639 else
640 bgp_adj_out_unset_subgroup(
641 rn, subgrp, 1,
642 bgp_addpath_id_for_peer(
643 peer, afi, safi,
644 &ri->tx_addpath));
645 }
646 }
647
648 /*
649 * We walked through the whole table -- make sure our version number
650 * is consistent with the one on the table. This should allow
651 * subgroups to merge sooner if a peer comes up when the route node
652 * with the largest version is no longer in the table. This also
653 * covers the pathological case where all routes in the table have
654 * now been deleted.
655 */
656 subgrp->version = max(subgrp->version, table->version);
657
658 /*
659 * Start a task to merge the subgroup if necessary.
660 */
661 update_subgroup_trigger_merge_check(subgrp, 0);
662 }
663
664 /*
665 * subgroup_announce_route
666 *
667 * Refresh all routes out to a subgroup.
668 */
669 void subgroup_announce_route(struct update_subgroup *subgrp)
670 {
671 struct bgp_node *rn;
672 struct bgp_table *table;
673 struct peer *onlypeer;
674
675 if (update_subgroup_needs_refresh(subgrp)) {
676 update_subgroup_set_needs_refresh(subgrp, 0);
677 }
678
679 /*
680 * First update is deferred until ORF or ROUTE-REFRESH is received
681 */
682 onlypeer = ((SUBGRP_PCOUNT(subgrp) == 1) ? (SUBGRP_PFIRST(subgrp))->peer
683 : NULL);
684 if (onlypeer && CHECK_FLAG(onlypeer->af_sflags[SUBGRP_AFI(subgrp)]
685 [SUBGRP_SAFI(subgrp)],
686 PEER_STATUS_ORF_WAIT_REFRESH))
687 return;
688
689 if (SUBGRP_SAFI(subgrp) != SAFI_MPLS_VPN
690 && SUBGRP_SAFI(subgrp) != SAFI_ENCAP
691 && SUBGRP_SAFI(subgrp) != SAFI_EVPN)
692 subgroup_announce_table(subgrp, NULL);
693 else
694 for (rn = bgp_table_top(update_subgroup_rib(subgrp)); rn;
695 rn = bgp_route_next(rn)) {
696 table = bgp_node_get_bgp_table_info(rn);
697 if (!table)
698 continue;
699 subgroup_announce_table(subgrp, table);
700 }
701 }
702
703 void subgroup_default_originate(struct update_subgroup *subgrp, int withdraw)
704 {
705 struct bgp *bgp;
706 struct attr attr;
707 struct attr *new_attr = &attr;
708 struct aspath *aspath;
709 struct prefix p;
710 struct peer *from;
711 struct bgp_node *rn;
712 struct peer *peer;
713 route_map_result_t ret = RMAP_DENYMATCH;
714 afi_t afi;
715 safi_t safi;
716
717 if (!subgrp)
718 return;
719
720 peer = SUBGRP_PEER(subgrp);
721 afi = SUBGRP_AFI(subgrp);
722 safi = SUBGRP_SAFI(subgrp);
723
724 if (!(afi == AFI_IP || afi == AFI_IP6))
725 return;
726
727 bgp = peer->bgp;
728 from = bgp->peer_self;
729
730 bgp_attr_default_set(&attr, BGP_ORIGIN_IGP);
731 aspath = attr.aspath;
732
733 attr.local_pref = bgp->default_local_pref;
734
735 memset(&p, 0, sizeof(p));
736 p.family = afi2family(afi);
737 p.prefixlen = 0;
738
739 if ((afi == AFI_IP6) || peer_cap_enhe(peer, afi, safi)) {
740 /* IPv6 global nexthop must be included. */
741 attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL;
742
743 /* If the peer is on shared nextwork and we have link-local
744 nexthop set it. */
745 if (peer->shared_network
746 && !IN6_IS_ADDR_UNSPECIFIED(&peer->nexthop.v6_local))
747 attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL;
748 }
749
750 if (peer->default_rmap[afi][safi].name) {
751 struct attr attr_tmp = attr;
752 struct bgp_path_info bpi_rmap = {0};
753
754 bpi_rmap.peer = bgp->peer_self;
755 bpi_rmap.attr = &attr_tmp;
756
757 SET_FLAG(bgp->peer_self->rmap_type, PEER_RMAP_TYPE_DEFAULT);
758
759 /* Iterate over the RIB to see if we can announce
760 * the default route. We announce the default
761 * route only if route-map has a match.
762 */
763 for (rn = bgp_table_top(bgp->rib[afi][safi]); rn;
764 rn = bgp_route_next(rn)) {
765 ret = route_map_apply(peer->default_rmap[afi][safi].map,
766 bgp_node_get_prefix(rn), RMAP_BGP,
767 &bpi_rmap);
768
769 if (ret != RMAP_DENYMATCH)
770 break;
771 }
772 bgp->peer_self->rmap_type = 0;
773 new_attr = bgp_attr_intern(&attr_tmp);
774
775 if (ret == RMAP_DENYMATCH) {
776 bgp_attr_flush(&attr_tmp);
777 withdraw = 1;
778 }
779 }
780
781 if (withdraw) {
782 if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
783 subgroup_default_withdraw_packet(subgrp);
784 UNSET_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE);
785 } else {
786 if (!CHECK_FLAG(subgrp->sflags,
787 SUBGRP_STATUS_DEFAULT_ORIGINATE)) {
788
789 if (CHECK_FLAG(bgp->flags, BGP_FLAG_GRACEFUL_SHUTDOWN))
790 bgp_attr_add_gshut_community(new_attr);
791
792 SET_FLAG(subgrp->sflags,
793 SUBGRP_STATUS_DEFAULT_ORIGINATE);
794 subgroup_default_update_packet(subgrp, new_attr, from);
795
796 /* The 'neighbor x.x.x.x default-originate' default will
797 * act as an
798 * implicit withdraw for any previous UPDATEs sent for
799 * 0.0.0.0/0 so
800 * clear adj_out for the 0.0.0.0/0 prefix in the BGP
801 * table.
802 */
803 memset(&p, 0, sizeof(p));
804 p.family = afi2family(afi);
805 p.prefixlen = 0;
806
807 rn = bgp_afi_node_get(bgp->rib[afi][safi], afi, safi,
808 &p, NULL);
809 bgp_adj_out_unset_subgroup(
810 rn, subgrp, 0,
811 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
812 }
813 }
814
815 aspath_unintern(&aspath);
816 }
817
818 /*
819 * Announce the BGP table to a subgroup.
820 *
821 * At startup, we try to optimize route announcement by coalescing the
822 * peer-up events. This is done only the first time - from then on,
823 * subgrp->v_coalesce will be set to zero and the normal logic
824 * prevails.
825 */
826 void subgroup_announce_all(struct update_subgroup *subgrp)
827 {
828 if (!subgrp)
829 return;
830
831 /*
832 * If coalesce timer value is not set, announce routes immediately.
833 */
834 if (!subgrp->v_coalesce) {
835 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
836 zlog_debug("u%" PRIu64 ":s%" PRIu64
837 " announcing all routes",
838 subgrp->update_group->id, subgrp->id);
839 subgroup_announce_route(subgrp);
840 return;
841 }
842
843 /*
844 * We should wait for the coalesce timer. Arm the timer if not done.
845 */
846 if (!subgrp->t_coalesce) {
847 thread_add_timer_msec(bm->master, subgroup_coalesce_timer,
848 subgrp, subgrp->v_coalesce,
849 &subgrp->t_coalesce);
850 }
851 }
852
853 /*
854 * Go through all update subgroups and set up the adv queue for the
855 * input route.
856 */
857 void group_announce_route(struct bgp *bgp, afi_t afi, safi_t safi,
858 struct bgp_node *rn, struct bgp_path_info *pi)
859 {
860 struct updwalk_context ctx;
861 ctx.pi = pi;
862 ctx.rn = rn;
863 update_group_af_walk(bgp, afi, safi, group_announce_route_walkcb, &ctx);
864 }
865
866 void update_group_show_adj_queue(struct bgp *bgp, afi_t afi, safi_t safi,
867 struct vty *vty, uint64_t id)
868 {
869 updgrp_show_adj(bgp, afi, safi, vty, id, UPDWALK_FLAGS_ADVQUEUE);
870 }
871
872 void update_group_show_advertised(struct bgp *bgp, afi_t afi, safi_t safi,
873 struct vty *vty, uint64_t id)
874 {
875 updgrp_show_adj(bgp, afi, safi, vty, id, UPDWALK_FLAGS_ADVERTISED);
876 }
877
878 void update_group_announce(struct bgp *bgp)
879 {
880 update_group_walk(bgp, update_group_announce_walkcb, NULL);
881 }
882
883 void update_group_announce_rrclients(struct bgp *bgp)
884 {
885 update_group_walk(bgp, update_group_announce_rrc_walkcb, NULL);
886 }