]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_updgrp_adv.c
Merge pull request #5805 from donaldsharp/babel_int_return
[mirror_frr.git] / bgpd / bgp_updgrp_adv.c
1 /**
2 * bgp_updgrp_adv.c: BGP update group advertisement and adjacency
3 * maintenance
4 *
5 *
6 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
7 *
8 * @author Avneesh Sachdev <avneesh@sproute.net>
9 * @author Rajesh Varadarajan <rajesh@sproute.net>
10 * @author Pradosh Mohapatra <pradosh@sproute.net>
11 *
12 * This file is part of GNU Zebra.
13 *
14 * GNU Zebra is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
17 * later version.
18 *
19 * GNU Zebra is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; see the file COPYING; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29 #include <zebra.h>
30
31 #include "command.h"
32 #include "memory.h"
33 #include "prefix.h"
34 #include "hash.h"
35 #include "thread.h"
36 #include "queue.h"
37 #include "routemap.h"
38 #include "filter.h"
39
40 #include "bgpd/bgpd.h"
41 #include "bgpd/bgp_table.h"
42 #include "bgpd/bgp_debug.h"
43 #include "bgpd/bgp_route.h"
44 #include "bgpd/bgp_advertise.h"
45 #include "bgpd/bgp_attr.h"
46 #include "bgpd/bgp_aspath.h"
47 #include "bgpd/bgp_packet.h"
48 #include "bgpd/bgp_fsm.h"
49 #include "bgpd/bgp_mplsvpn.h"
50 #include "bgpd/bgp_updgrp.h"
51 #include "bgpd/bgp_advertise.h"
52 #include "bgpd/bgp_addpath.h"
53
54
55 /********************
56 * PRIVATE FUNCTIONS
57 ********************/
58 static int bgp_adj_out_compare(const struct bgp_adj_out *o1,
59 const struct bgp_adj_out *o2)
60 {
61 if (o1->subgroup < o2->subgroup)
62 return -1;
63
64 if (o1->subgroup > o2->subgroup)
65 return 1;
66
67 if (o1->addpath_tx_id < o2->addpath_tx_id)
68 return -1;
69
70 if (o1->addpath_tx_id > o2->addpath_tx_id)
71 return 1;
72
73 return 0;
74 }
75 RB_GENERATE(bgp_adj_out_rb, bgp_adj_out, adj_entry, bgp_adj_out_compare);
76
77 static inline struct bgp_adj_out *adj_lookup(struct bgp_node *rn,
78 struct update_subgroup *subgrp,
79 uint32_t addpath_tx_id)
80 {
81 struct bgp_adj_out lookup;
82
83 if (!rn || !subgrp)
84 return NULL;
85
86 /* update-groups that do not support addpath will pass 0 for
87 * addpath_tx_id. */
88 lookup.subgroup = subgrp;
89 lookup.addpath_tx_id = addpath_tx_id;
90
91 return RB_FIND(bgp_adj_out_rb, &rn->adj_out, &lookup);
92 }
93
94 static void adj_free(struct bgp_adj_out *adj)
95 {
96 TAILQ_REMOVE(&(adj->subgroup->adjq), adj, subgrp_adj_train);
97 SUBGRP_DECR_STAT(adj->subgroup, adj_count);
98 XFREE(MTYPE_BGP_ADJ_OUT, adj);
99 }
100
101 static void subgrp_withdraw_stale_addpath(struct updwalk_context *ctx,
102 struct update_subgroup *subgrp)
103 {
104 struct bgp_adj_out *adj, *adj_next;
105 uint32_t id;
106 struct bgp_path_info *pi;
107 afi_t afi = SUBGRP_AFI(subgrp);
108 safi_t safi = SUBGRP_SAFI(subgrp);
109 struct peer *peer = SUBGRP_PEER(subgrp);
110
111 /* Look through all of the paths we have advertised for this rn and send
112 * a withdraw for the ones that are no longer present */
113 RB_FOREACH_SAFE (adj, bgp_adj_out_rb, &ctx->rn->adj_out, adj_next) {
114
115 if (adj->subgroup == subgrp) {
116 for (pi = bgp_node_get_bgp_path_info(ctx->rn);
117 pi; pi = pi->next) {
118 id = bgp_addpath_id_for_peer(peer, afi, safi,
119 &pi->tx_addpath);
120
121 if (id == adj->addpath_tx_id) {
122 break;
123 }
124 }
125
126 if (!pi) {
127 subgroup_process_announce_selected(
128 subgrp, NULL, ctx->rn,
129 adj->addpath_tx_id);
130 }
131 }
132 }
133 }
134
135 static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
136 {
137 struct updwalk_context *ctx = arg;
138 struct update_subgroup *subgrp;
139 struct bgp_path_info *pi;
140 afi_t afi;
141 safi_t safi;
142 struct peer *peer;
143 struct bgp_adj_out *adj, *adj_next;
144 int addpath_capable;
145
146 afi = UPDGRP_AFI(updgrp);
147 safi = UPDGRP_SAFI(updgrp);
148 peer = UPDGRP_PEER(updgrp);
149 addpath_capable = bgp_addpath_encode_tx(peer, afi, safi);
150
151 if (BGP_DEBUG(update, UPDATE_OUT)) {
152 char buf_prefix[PREFIX_STRLEN];
153 prefix2str(&ctx->rn->p, buf_prefix, sizeof(buf_prefix));
154 zlog_debug("%s: afi=%s, safi=%s, p=%s", __func__, afi2str(afi),
155 safi2str(safi), buf_prefix);
156 }
157
158
159 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
160
161 /*
162 * Skip the subgroups that have coalesce timer running. We will
163 * walk the entire prefix table for those subgroups when the
164 * coalesce timer fires.
165 */
166 if (!subgrp->t_coalesce) {
167 /* An update-group that uses addpath */
168 if (addpath_capable) {
169 subgrp_withdraw_stale_addpath(ctx, subgrp);
170
171 for (pi = bgp_node_get_bgp_path_info(ctx->rn);
172 pi; pi = pi->next) {
173 /* Skip the bestpath for now */
174 if (pi == ctx->pi)
175 continue;
176
177 subgroup_process_announce_selected(
178 subgrp, pi, ctx->rn,
179 bgp_addpath_id_for_peer(
180 peer, afi, safi,
181 &pi->tx_addpath));
182 }
183
184 /* Process the bestpath last so the "show [ip]
185 * bgp neighbor x.x.x.x advertised"
186 * output shows the attributes from the bestpath
187 */
188 if (ctx->pi)
189 subgroup_process_announce_selected(
190 subgrp, ctx->pi, ctx->rn,
191 bgp_addpath_id_for_peer(
192 peer, afi, safi,
193 &ctx->pi->tx_addpath));
194 }
195
196 /* An update-group that does not use addpath */
197 else {
198 if (ctx->pi) {
199 subgroup_process_announce_selected(
200 subgrp, ctx->pi, ctx->rn,
201 bgp_addpath_id_for_peer(
202 peer, afi, safi,
203 &ctx->pi->tx_addpath));
204 } else {
205 /* Find the addpath_tx_id of the path we
206 * had advertised and
207 * send a withdraw */
208 RB_FOREACH_SAFE (adj, bgp_adj_out_rb,
209 &ctx->rn->adj_out,
210 adj_next) {
211 if (adj->subgroup == subgrp) {
212 subgroup_process_announce_selected(
213 subgrp, NULL,
214 ctx->rn,
215 adj->addpath_tx_id);
216 }
217 }
218 }
219 }
220 }
221 }
222
223 return UPDWALK_CONTINUE;
224 }
225
226 static void subgrp_show_adjq_vty(struct update_subgroup *subgrp,
227 struct vty *vty, uint8_t flags)
228 {
229 struct bgp_table *table;
230 struct bgp_adj_out *adj;
231 unsigned long output_count;
232 struct bgp_node *rn;
233 int header1 = 1;
234 struct bgp *bgp;
235 int header2 = 1;
236
237 bgp = SUBGRP_INST(subgrp);
238 if (!bgp)
239 return;
240
241 table = bgp->rib[SUBGRP_AFI(subgrp)][SUBGRP_SAFI(subgrp)];
242
243 output_count = 0;
244
245 for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn))
246 RB_FOREACH (adj, bgp_adj_out_rb, &rn->adj_out)
247 if (adj->subgroup == subgrp) {
248 if (header1) {
249 vty_out(vty,
250 "BGP table version is %" PRIu64
251 ", local router ID is %s\n",
252 table->version,
253 inet_ntoa(bgp->router_id));
254 vty_out(vty, BGP_SHOW_SCODE_HEADER);
255 vty_out(vty, BGP_SHOW_OCODE_HEADER);
256 header1 = 0;
257 }
258 if (header2) {
259 vty_out(vty, BGP_SHOW_HEADER);
260 header2 = 0;
261 }
262 if ((flags & UPDWALK_FLAGS_ADVQUEUE) && adj->adv
263 && adj->adv->baa) {
264 route_vty_out_tmp(vty, &rn->p,
265 adj->adv->baa->attr,
266 SUBGRP_SAFI(subgrp),
267 0, NULL);
268 output_count++;
269 }
270 if ((flags & UPDWALK_FLAGS_ADVERTISED)
271 && adj->attr) {
272 route_vty_out_tmp(
273 vty, &rn->p, adj->attr,
274 SUBGRP_SAFI(subgrp), 0, NULL);
275 output_count++;
276 }
277 }
278 if (output_count != 0)
279 vty_out(vty, "\nTotal number of prefixes %ld\n", output_count);
280 }
281
282 static int updgrp_show_adj_walkcb(struct update_group *updgrp, void *arg)
283 {
284 struct updwalk_context *ctx = arg;
285 struct update_subgroup *subgrp;
286 struct vty *vty;
287
288 vty = ctx->vty;
289 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
290 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
291 continue;
292 vty_out(vty, "update group %" PRIu64 ", subgroup %" PRIu64 "\n",
293 updgrp->id, subgrp->id);
294 subgrp_show_adjq_vty(subgrp, vty, ctx->flags);
295 }
296 return UPDWALK_CONTINUE;
297 }
298
299 static void updgrp_show_adj(struct bgp *bgp, afi_t afi, safi_t safi,
300 struct vty *vty, uint64_t id, uint8_t flags)
301 {
302 struct updwalk_context ctx;
303 memset(&ctx, 0, sizeof(ctx));
304 ctx.vty = vty;
305 ctx.subgrp_id = id;
306 ctx.flags = flags;
307
308 update_group_af_walk(bgp, afi, safi, updgrp_show_adj_walkcb, &ctx);
309 }
310
311 static int subgroup_coalesce_timer(struct thread *thread)
312 {
313 struct update_subgroup *subgrp;
314
315 subgrp = THREAD_ARG(thread);
316 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
317 zlog_debug("u%" PRIu64 ":s%" PRIu64
318 " announcing routes upon coalesce timer expiry(%u ms)",
319 (SUBGRP_UPDGRP(subgrp))->id, subgrp->id,
320 subgrp->v_coalesce),
321 subgrp->t_coalesce = NULL;
322 subgrp->v_coalesce = 0;
323 subgroup_announce_route(subgrp);
324
325
326 /* While the announce_route() may kick off the route advertisement timer
327 * for
328 * the members of the subgroup, we'd like to send the initial updates
329 * much
330 * faster (i.e., without enforcing MRAI). Also, if there were no routes
331 * to
332 * announce, this is the method currently employed to trigger the EOR.
333 */
334 if (!bgp_update_delay_active(SUBGRP_INST(subgrp))) {
335 struct peer_af *paf;
336 struct peer *peer;
337
338 SUBGRP_FOREACH_PEER (subgrp, paf) {
339 peer = PAF_PEER(paf);
340 BGP_TIMER_OFF(peer->t_routeadv);
341 BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
342 }
343 }
344
345 return 0;
346 }
347
348 static int update_group_announce_walkcb(struct update_group *updgrp, void *arg)
349 {
350 struct update_subgroup *subgrp;
351
352 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
353 subgroup_announce_all(subgrp);
354 }
355
356 return UPDWALK_CONTINUE;
357 }
358
359 static int update_group_announce_rrc_walkcb(struct update_group *updgrp,
360 void *arg)
361 {
362 struct update_subgroup *subgrp;
363 afi_t afi;
364 safi_t safi;
365 struct peer *peer;
366
367 afi = UPDGRP_AFI(updgrp);
368 safi = UPDGRP_SAFI(updgrp);
369 peer = UPDGRP_PEER(updgrp);
370
371 /* Only announce if this is a group of route-reflector-clients */
372 if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_REFLECTOR_CLIENT)) {
373 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
374 subgroup_announce_all(subgrp);
375 }
376 }
377
378 return UPDWALK_CONTINUE;
379 }
380
381 /********************
382 * PUBLIC FUNCTIONS
383 ********************/
384
385 /**
386 * Allocate an adj-out object. Do proper initialization of its fields,
387 * primarily its association with the subgroup and the prefix.
388 */
389 struct bgp_adj_out *bgp_adj_out_alloc(struct update_subgroup *subgrp,
390 struct bgp_node *rn,
391 uint32_t addpath_tx_id)
392 {
393 struct bgp_adj_out *adj;
394
395 adj = XCALLOC(MTYPE_BGP_ADJ_OUT, sizeof(struct bgp_adj_out));
396 adj->subgroup = subgrp;
397 adj->addpath_tx_id = addpath_tx_id;
398
399 if (rn) {
400 RB_INSERT(bgp_adj_out_rb, &rn->adj_out, adj);
401 bgp_lock_node(rn);
402 adj->rn = rn;
403 }
404
405 TAILQ_INSERT_TAIL(&(subgrp->adjq), adj, subgrp_adj_train);
406 SUBGRP_INCR_STAT(subgrp, adj_count);
407 return adj;
408 }
409
410
411 struct bgp_advertise *
412 bgp_advertise_clean_subgroup(struct update_subgroup *subgrp,
413 struct bgp_adj_out *adj)
414 {
415 struct bgp_advertise *adv;
416 struct bgp_advertise_attr *baa;
417 struct bgp_advertise *next;
418 struct bgp_adv_fifo_head *fhead;
419
420 adv = adj->adv;
421 baa = adv->baa;
422 next = NULL;
423
424 if (baa) {
425 fhead = &subgrp->sync->update;
426
427 /* Unlink myself from advertise attribute FIFO. */
428 bgp_advertise_delete(baa, adv);
429
430 /* Fetch next advertise candidate. */
431 next = baa->adv;
432
433 /* Unintern BGP advertise attribute. */
434 bgp_advertise_unintern(subgrp->hash, baa);
435 } else
436 fhead = &subgrp->sync->withdraw;
437
438
439 /* Unlink myself from advertisement FIFO. */
440 bgp_adv_fifo_del(fhead, adv);
441
442 /* Free memory. */
443 bgp_advertise_free(adj->adv);
444 adj->adv = NULL;
445
446 return next;
447 }
448
449 void bgp_adj_out_set_subgroup(struct bgp_node *rn,
450 struct update_subgroup *subgrp, struct attr *attr,
451 struct bgp_path_info *path)
452 {
453 struct bgp_adj_out *adj = NULL;
454 struct bgp_advertise *adv;
455 struct peer *peer;
456 afi_t afi;
457 safi_t safi;
458
459 peer = SUBGRP_PEER(subgrp);
460 afi = SUBGRP_AFI(subgrp);
461 safi = SUBGRP_SAFI(subgrp);
462
463 if (DISABLE_BGP_ANNOUNCE)
464 return;
465
466 /* Look for adjacency information. */
467 adj = adj_lookup(
468 rn, subgrp,
469 bgp_addpath_id_for_peer(peer, afi, safi, &path->tx_addpath));
470
471 if (!adj) {
472 adj = bgp_adj_out_alloc(
473 subgrp, rn,
474 bgp_addpath_id_for_peer(peer, afi, safi,
475 &path->tx_addpath));
476 if (!adj)
477 return;
478 }
479
480 if (adj->adv)
481 bgp_advertise_clean_subgroup(subgrp, adj);
482 adj->adv = bgp_advertise_new();
483
484 adv = adj->adv;
485 adv->rn = rn;
486 assert(adv->pathi == NULL);
487 /* bgp_path_info adj_out reference */
488 adv->pathi = bgp_path_info_lock(path);
489
490 if (attr)
491 adv->baa = bgp_advertise_intern(subgrp->hash, attr);
492 else
493 adv->baa = baa_new();
494 adv->adj = adj;
495
496 /* Add new advertisement to advertisement attribute list. */
497 bgp_advertise_add(adv->baa, adv);
498
499 /*
500 * If the update adv list is empty, trigger the member peers'
501 * mrai timers so the socket writes can happen.
502 */
503 if (!bgp_adv_fifo_count(&subgrp->sync->update)) {
504 struct peer_af *paf;
505
506 SUBGRP_FOREACH_PEER (subgrp, paf) {
507 bgp_adjust_routeadv(PAF_PEER(paf));
508 }
509 }
510
511 bgp_adv_fifo_add_tail(&subgrp->sync->update, adv);
512
513 subgrp->version = max(subgrp->version, rn->version);
514 }
515
516 /* The only time 'withdraw' will be false is if we are sending
517 * the "neighbor x.x.x.x default-originate" default and need to clear
518 * bgp_adj_out for the 0.0.0.0/0 route in the BGP table.
519 */
520 void bgp_adj_out_unset_subgroup(struct bgp_node *rn,
521 struct update_subgroup *subgrp, char withdraw,
522 uint32_t addpath_tx_id)
523 {
524 struct bgp_adj_out *adj;
525 struct bgp_advertise *adv;
526 bool trigger_write;
527
528 if (DISABLE_BGP_ANNOUNCE)
529 return;
530
531 /* Lookup existing adjacency */
532 if ((adj = adj_lookup(rn, subgrp, addpath_tx_id)) != NULL) {
533 /* Clean up previous advertisement. */
534 if (adj->adv)
535 bgp_advertise_clean_subgroup(subgrp, adj);
536
537 if (adj->attr && withdraw) {
538 /* We need advertisement structure. */
539 adj->adv = bgp_advertise_new();
540 adv = adj->adv;
541 adv->rn = rn;
542 adv->adj = adj;
543
544 /* Note if we need to trigger a packet write */
545 trigger_write =
546 !bgp_adv_fifo_count(&subgrp->sync->withdraw);
547
548 /* Add to synchronization entry for withdraw
549 * announcement. */
550 bgp_adv_fifo_add_tail(&subgrp->sync->withdraw, adv);
551
552 if (trigger_write)
553 subgroup_trigger_write(subgrp);
554 } else {
555 /* Remove myself from adjacency. */
556 RB_REMOVE(bgp_adj_out_rb, &rn->adj_out, adj);
557
558 /* Free allocated information. */
559 adj_free(adj);
560
561 bgp_unlock_node(rn);
562 }
563 }
564
565 subgrp->version = max(subgrp->version, rn->version);
566 }
567
568 void bgp_adj_out_remove_subgroup(struct bgp_node *rn, struct bgp_adj_out *adj,
569 struct update_subgroup *subgrp)
570 {
571 if (adj->attr)
572 bgp_attr_unintern(&adj->attr);
573
574 if (adj->adv)
575 bgp_advertise_clean_subgroup(subgrp, adj);
576
577 RB_REMOVE(bgp_adj_out_rb, &rn->adj_out, adj);
578 adj_free(adj);
579 }
580
581 /*
582 * Go through all the routes and clean up the adj/adv structures corresponding
583 * to the subgroup.
584 */
585 void subgroup_clear_table(struct update_subgroup *subgrp)
586 {
587 struct bgp_adj_out *aout, *taout;
588
589 SUBGRP_FOREACH_ADJ_SAFE (subgrp, aout, taout) {
590 struct bgp_node *rn = aout->rn;
591 bgp_adj_out_remove_subgroup(rn, aout, subgrp);
592 bgp_unlock_node(rn);
593 }
594 }
595
596 /*
597 * subgroup_announce_table
598 */
599 void subgroup_announce_table(struct update_subgroup *subgrp,
600 struct bgp_table *table)
601 {
602 struct bgp_node *rn;
603 struct bgp_path_info *ri;
604 struct attr attr;
605 struct peer *peer;
606 afi_t afi;
607 safi_t safi;
608 int addpath_capable;
609
610 peer = SUBGRP_PEER(subgrp);
611 afi = SUBGRP_AFI(subgrp);
612 safi = SUBGRP_SAFI(subgrp);
613 addpath_capable = bgp_addpath_encode_tx(peer, afi, safi);
614
615 if (safi == SAFI_LABELED_UNICAST)
616 safi = SAFI_UNICAST;
617
618 if (!table)
619 table = peer->bgp->rib[afi][safi];
620
621 if (safi != SAFI_MPLS_VPN && safi != SAFI_ENCAP && safi != SAFI_EVPN
622 && CHECK_FLAG(peer->af_flags[afi][safi],
623 PEER_FLAG_DEFAULT_ORIGINATE))
624 subgroup_default_originate(subgrp, 0);
625
626 for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn))
627 for (ri = bgp_node_get_bgp_path_info(rn); ri; ri = ri->next)
628
629 if (CHECK_FLAG(ri->flags, BGP_PATH_SELECTED)
630 || (addpath_capable
631 && bgp_addpath_tx_path(
632 peer->addpath_type[afi][safi],
633 ri))) {
634 if (subgroup_announce_check(rn, ri, subgrp,
635 &rn->p, &attr))
636 bgp_adj_out_set_subgroup(rn, subgrp,
637 &attr, ri);
638 else
639 bgp_adj_out_unset_subgroup(
640 rn, subgrp, 1,
641 bgp_addpath_id_for_peer(
642 peer, afi, safi,
643 &ri->tx_addpath));
644 }
645
646 /*
647 * We walked through the whole table -- make sure our version number
648 * is consistent with the one on the table. This should allow
649 * subgroups to merge sooner if a peer comes up when the route node
650 * with the largest version is no longer in the table. This also
651 * covers the pathological case where all routes in the table have
652 * now been deleted.
653 */
654 subgrp->version = max(subgrp->version, table->version);
655
656 /*
657 * Start a task to merge the subgroup if necessary.
658 */
659 update_subgroup_trigger_merge_check(subgrp, 0);
660 }
661
662 /*
663 * subgroup_announce_route
664 *
665 * Refresh all routes out to a subgroup.
666 */
667 void subgroup_announce_route(struct update_subgroup *subgrp)
668 {
669 struct bgp_node *rn;
670 struct bgp_table *table;
671 struct peer *onlypeer;
672
673 if (update_subgroup_needs_refresh(subgrp)) {
674 update_subgroup_set_needs_refresh(subgrp, 0);
675 }
676
677 /*
678 * First update is deferred until ORF or ROUTE-REFRESH is received
679 */
680 onlypeer = ((SUBGRP_PCOUNT(subgrp) == 1) ? (SUBGRP_PFIRST(subgrp))->peer
681 : NULL);
682 if (onlypeer && CHECK_FLAG(onlypeer->af_sflags[SUBGRP_AFI(subgrp)]
683 [SUBGRP_SAFI(subgrp)],
684 PEER_STATUS_ORF_WAIT_REFRESH))
685 return;
686
687 if (SUBGRP_SAFI(subgrp) != SAFI_MPLS_VPN
688 && SUBGRP_SAFI(subgrp) != SAFI_ENCAP
689 && SUBGRP_SAFI(subgrp) != SAFI_EVPN)
690 subgroup_announce_table(subgrp, NULL);
691 else
692 for (rn = bgp_table_top(update_subgroup_rib(subgrp)); rn;
693 rn = bgp_route_next(rn)) {
694 table = bgp_node_get_bgp_table_info(rn);
695 if (!table)
696 continue;
697 subgroup_announce_table(subgrp, table);
698 }
699 }
700
701 void subgroup_default_originate(struct update_subgroup *subgrp, int withdraw)
702 {
703 struct bgp *bgp;
704 struct attr attr;
705 struct attr *new_attr = &attr;
706 struct aspath *aspath;
707 struct prefix p;
708 struct peer *from;
709 struct bgp_node *rn;
710 struct peer *peer;
711 route_map_result_t ret = RMAP_DENYMATCH;
712 afi_t afi;
713 safi_t safi;
714
715 if (!subgrp)
716 return;
717
718 peer = SUBGRP_PEER(subgrp);
719 afi = SUBGRP_AFI(subgrp);
720 safi = SUBGRP_SAFI(subgrp);
721
722 if (!(afi == AFI_IP || afi == AFI_IP6))
723 return;
724
725 bgp = peer->bgp;
726 from = bgp->peer_self;
727
728 bgp_attr_default_set(&attr, BGP_ORIGIN_IGP);
729 aspath = attr.aspath;
730
731 attr.local_pref = bgp->default_local_pref;
732
733 memset(&p, 0, sizeof(p));
734 p.family = afi2family(afi);
735 p.prefixlen = 0;
736
737 if ((afi == AFI_IP6) || peer_cap_enhe(peer, afi, safi)) {
738 /* IPv6 global nexthop must be included. */
739 attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL;
740
741 /* If the peer is on shared nextwork and we have link-local
742 nexthop set it. */
743 if (peer->shared_network
744 && !IN6_IS_ADDR_UNSPECIFIED(&peer->nexthop.v6_local))
745 attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL;
746 }
747
748 if (peer->default_rmap[afi][safi].name) {
749 struct attr attr_tmp = attr;
750 struct bgp_path_info bpi_rmap = {0};
751
752 bpi_rmap.peer = bgp->peer_self;
753 bpi_rmap.attr = &attr_tmp;
754
755 SET_FLAG(bgp->peer_self->rmap_type, PEER_RMAP_TYPE_DEFAULT);
756
757 /* Iterate over the RIB to see if we can announce
758 * the default route. We announce the default
759 * route only if route-map has a match.
760 */
761 for (rn = bgp_table_top(bgp->rib[afi][safi]); rn;
762 rn = bgp_route_next(rn)) {
763 ret = route_map_apply(peer->default_rmap[afi][safi].map,
764 &rn->p, RMAP_BGP, &bpi_rmap);
765
766 if (ret != RMAP_DENYMATCH)
767 break;
768 }
769 bgp->peer_self->rmap_type = 0;
770 new_attr = bgp_attr_intern(&attr_tmp);
771
772 if (ret == RMAP_DENYMATCH) {
773 bgp_attr_flush(&attr_tmp);
774 withdraw = 1;
775 }
776 }
777
778 if (withdraw) {
779 if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
780 subgroup_default_withdraw_packet(subgrp);
781 UNSET_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE);
782 } else {
783 if (!CHECK_FLAG(subgrp->sflags,
784 SUBGRP_STATUS_DEFAULT_ORIGINATE)) {
785
786 if (CHECK_FLAG(bgp->flags, BGP_FLAG_GRACEFUL_SHUTDOWN))
787 bgp_attr_add_gshut_community(new_attr);
788
789 SET_FLAG(subgrp->sflags,
790 SUBGRP_STATUS_DEFAULT_ORIGINATE);
791 subgroup_default_update_packet(subgrp, new_attr, from);
792
793 /* The 'neighbor x.x.x.x default-originate' default will
794 * act as an
795 * implicit withdraw for any previous UPDATEs sent for
796 * 0.0.0.0/0 so
797 * clear adj_out for the 0.0.0.0/0 prefix in the BGP
798 * table.
799 */
800 memset(&p, 0, sizeof(p));
801 p.family = afi2family(afi);
802 p.prefixlen = 0;
803
804 rn = bgp_afi_node_get(bgp->rib[afi][safi], afi, safi,
805 &p, NULL);
806 bgp_adj_out_unset_subgroup(
807 rn, subgrp, 0,
808 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
809 }
810 }
811
812 aspath_unintern(&aspath);
813 }
814
815 /*
816 * Announce the BGP table to a subgroup.
817 *
818 * At startup, we try to optimize route announcement by coalescing the
819 * peer-up events. This is done only the first time - from then on,
820 * subgrp->v_coalesce will be set to zero and the normal logic
821 * prevails.
822 */
823 void subgroup_announce_all(struct update_subgroup *subgrp)
824 {
825 if (!subgrp)
826 return;
827
828 /*
829 * If coalesce timer value is not set, announce routes immediately.
830 */
831 if (!subgrp->v_coalesce) {
832 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
833 zlog_debug("u%" PRIu64 ":s%" PRIu64
834 " announcing all routes",
835 subgrp->update_group->id, subgrp->id);
836 subgroup_announce_route(subgrp);
837 return;
838 }
839
840 /*
841 * We should wait for the coalesce timer. Arm the timer if not done.
842 */
843 if (!subgrp->t_coalesce) {
844 thread_add_timer_msec(bm->master, subgroup_coalesce_timer,
845 subgrp, subgrp->v_coalesce,
846 &subgrp->t_coalesce);
847 }
848 }
849
850 /*
851 * Go through all update subgroups and set up the adv queue for the
852 * input route.
853 */
854 void group_announce_route(struct bgp *bgp, afi_t afi, safi_t safi,
855 struct bgp_node *rn, struct bgp_path_info *pi)
856 {
857 struct updwalk_context ctx;
858 ctx.pi = pi;
859 ctx.rn = rn;
860 update_group_af_walk(bgp, afi, safi, group_announce_route_walkcb, &ctx);
861 }
862
863 void update_group_show_adj_queue(struct bgp *bgp, afi_t afi, safi_t safi,
864 struct vty *vty, uint64_t id)
865 {
866 updgrp_show_adj(bgp, afi, safi, vty, id, UPDWALK_FLAGS_ADVQUEUE);
867 }
868
869 void update_group_show_advertised(struct bgp *bgp, afi_t afi, safi_t safi,
870 struct vty *vty, uint64_t id)
871 {
872 updgrp_show_adj(bgp, afi, safi, vty, id, UPDWALK_FLAGS_ADVERTISED);
873 }
874
875 void update_group_announce(struct bgp *bgp)
876 {
877 update_group_walk(bgp, update_group_announce_walkcb, NULL);
878 }
879
880 void update_group_announce_rrclients(struct bgp *bgp)
881 {
882 update_group_walk(bgp, update_group_announce_rrc_walkcb, NULL);
883 }