]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_updgrp_adv.c
bgpd: Cleanup bgp_connected_set|get function names
[mirror_frr.git] / bgpd / bgp_updgrp_adv.c
1 /**
2 * bgp_updgrp_adv.c: BGP update group advertisement and adjacency
3 * maintenance
4 *
5 *
6 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
7 *
8 * @author Avneesh Sachdev <avneesh@sproute.net>
9 * @author Rajesh Varadarajan <rajesh@sproute.net>
10 * @author Pradosh Mohapatra <pradosh@sproute.net>
11 *
12 * This file is part of GNU Zebra.
13 *
14 * GNU Zebra is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
17 * later version.
18 *
19 * GNU Zebra is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; see the file COPYING; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29 #include <zebra.h>
30
31 #include "command.h"
32 #include "memory.h"
33 #include "prefix.h"
34 #include "hash.h"
35 #include "thread.h"
36 #include "queue.h"
37 #include "routemap.h"
38 #include "filter.h"
39
40 #include "bgpd/bgpd.h"
41 #include "bgpd/bgp_table.h"
42 #include "bgpd/bgp_debug.h"
43 #include "bgpd/bgp_route.h"
44 #include "bgpd/bgp_advertise.h"
45 #include "bgpd/bgp_attr.h"
46 #include "bgpd/bgp_aspath.h"
47 #include "bgpd/bgp_packet.h"
48 #include "bgpd/bgp_fsm.h"
49 #include "bgpd/bgp_mplsvpn.h"
50 #include "bgpd/bgp_updgrp.h"
51 #include "bgpd/bgp_advertise.h"
52 #include "bgpd/bgp_addpath.h"
53
54
55 /********************
56 * PRIVATE FUNCTIONS
57 ********************/
58
59 static inline struct bgp_adj_out *adj_lookup(struct bgp_node *rn,
60 struct update_subgroup *subgrp,
61 uint32_t addpath_tx_id)
62 {
63 struct bgp_adj_out *adj;
64 struct peer *peer;
65 afi_t afi;
66 safi_t safi;
67 int addpath_capable;
68
69 if (!rn || !subgrp)
70 return NULL;
71
72 peer = SUBGRP_PEER(subgrp);
73 afi = SUBGRP_AFI(subgrp);
74 safi = SUBGRP_SAFI(subgrp);
75 addpath_capable = bgp_addpath_encode_tx(peer, afi, safi);
76
77 /* update-groups that do not support addpath will pass 0 for
78 * addpath_tx_id so do not both matching against it */
79 for (adj = rn->adj_out; adj; adj = adj->next) {
80 if (adj->subgroup == subgrp) {
81 if (addpath_capable) {
82 if (adj->addpath_tx_id == addpath_tx_id) {
83 break;
84 }
85 } else {
86 break;
87 }
88 }
89 }
90
91 return adj;
92 }
93
94 static void adj_free(struct bgp_adj_out *adj)
95 {
96 TAILQ_REMOVE(&(adj->subgroup->adjq), adj, subgrp_adj_train);
97 SUBGRP_DECR_STAT(adj->subgroup, adj_count);
98 XFREE(MTYPE_BGP_ADJ_OUT, adj);
99 }
100
101 static void subgrp_withdraw_stale_addpath(struct updwalk_context *ctx,
102 struct update_subgroup *subgrp)
103 {
104 struct bgp_adj_out *adj, *adj_next;
105 uint32_t id;
106 struct bgp_path_info *pi;
107 afi_t afi = SUBGRP_AFI(subgrp);
108 safi_t safi = SUBGRP_SAFI(subgrp);
109 struct peer *peer = SUBGRP_PEER(subgrp);
110
111 /* Look through all of the paths we have advertised for this rn and send
112 * a withdraw for the ones that are no longer present */
113 for (adj = ctx->rn->adj_out; adj; adj = adj_next) {
114 adj_next = adj->next;
115
116 if (adj->subgroup == subgrp) {
117 for (pi = bgp_node_get_bgp_path_info(ctx->rn);
118 pi; pi = pi->next) {
119 id = bgp_addpath_id_for_peer(peer, afi, safi,
120 &pi->tx_addpath);
121
122 if (id == adj->addpath_tx_id) {
123 break;
124 }
125 }
126
127 if (!pi) {
128 subgroup_process_announce_selected(
129 subgrp, NULL, ctx->rn,
130 adj->addpath_tx_id);
131 }
132 }
133 }
134 }
135
136 static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
137 {
138 struct updwalk_context *ctx = arg;
139 struct update_subgroup *subgrp;
140 struct bgp_path_info *pi;
141 afi_t afi;
142 safi_t safi;
143 struct peer *peer;
144 struct bgp_adj_out *adj, *adj_next;
145 int addpath_capable;
146
147 afi = UPDGRP_AFI(updgrp);
148 safi = UPDGRP_SAFI(updgrp);
149 peer = UPDGRP_PEER(updgrp);
150 addpath_capable = bgp_addpath_encode_tx(peer, afi, safi);
151
152 if (BGP_DEBUG(update, UPDATE_OUT)) {
153 char buf_prefix[PREFIX_STRLEN];
154 prefix2str(&ctx->rn->p, buf_prefix, sizeof(buf_prefix));
155 zlog_debug("%s: afi=%s, safi=%s, p=%s", __func__, afi2str(afi),
156 safi2str(safi), buf_prefix);
157 }
158
159
160 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
161
162 /*
163 * Skip the subgroups that have coalesce timer running. We will
164 * walk the entire prefix table for those subgroups when the
165 * coalesce timer fires.
166 */
167 if (!subgrp->t_coalesce) {
168 /* An update-group that uses addpath */
169 if (addpath_capable) {
170 subgrp_withdraw_stale_addpath(ctx, subgrp);
171
172 for (pi = bgp_node_get_bgp_path_info(ctx->rn);
173 pi; pi = pi->next) {
174 /* Skip the bestpath for now */
175 if (pi == ctx->pi)
176 continue;
177
178 subgroup_process_announce_selected(
179 subgrp, pi, ctx->rn,
180 bgp_addpath_id_for_peer(
181 peer, afi, safi,
182 &pi->tx_addpath));
183 }
184
185 /* Process the bestpath last so the "show [ip]
186 * bgp neighbor x.x.x.x advertised"
187 * output shows the attributes from the bestpath
188 */
189 if (ctx->pi)
190 subgroup_process_announce_selected(
191 subgrp, ctx->pi, ctx->rn,
192 bgp_addpath_id_for_peer(
193 peer, afi, safi,
194 &ctx->pi->tx_addpath));
195 }
196
197 /* An update-group that does not use addpath */
198 else {
199 if (ctx->pi) {
200 subgroup_process_announce_selected(
201 subgrp, ctx->pi, ctx->rn,
202 bgp_addpath_id_for_peer(
203 peer, afi, safi,
204 &ctx->pi->tx_addpath));
205 } else {
206 /* Find the addpath_tx_id of the path we
207 * had advertised and
208 * send a withdraw */
209 for (adj = ctx->rn->adj_out; adj;
210 adj = adj_next) {
211 adj_next = adj->next;
212
213 if (adj->subgroup == subgrp) {
214 subgroup_process_announce_selected(
215 subgrp, NULL,
216 ctx->rn,
217 adj->addpath_tx_id);
218 }
219 }
220 }
221 }
222 }
223 }
224
225 return UPDWALK_CONTINUE;
226 }
227
228 static void subgrp_show_adjq_vty(struct update_subgroup *subgrp,
229 struct vty *vty, uint8_t flags)
230 {
231 struct bgp_table *table;
232 struct bgp_adj_out *adj;
233 unsigned long output_count;
234 struct bgp_node *rn;
235 int header1 = 1;
236 struct bgp *bgp;
237 int header2 = 1;
238
239 bgp = SUBGRP_INST(subgrp);
240 if (!bgp)
241 return;
242
243 table = bgp->rib[SUBGRP_AFI(subgrp)][SUBGRP_SAFI(subgrp)];
244
245 output_count = 0;
246
247 for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn))
248 for (adj = rn->adj_out; adj; adj = adj->next)
249 if (adj->subgroup == subgrp) {
250 if (header1) {
251 vty_out(vty,
252 "BGP table version is %" PRIu64
253 ", local router ID is %s\n",
254 table->version,
255 inet_ntoa(bgp->router_id));
256 vty_out(vty, BGP_SHOW_SCODE_HEADER);
257 vty_out(vty, BGP_SHOW_OCODE_HEADER);
258 header1 = 0;
259 }
260 if (header2) {
261 vty_out(vty, BGP_SHOW_HEADER);
262 header2 = 0;
263 }
264 if ((flags & UPDWALK_FLAGS_ADVQUEUE) && adj->adv
265 && adj->adv->baa) {
266 route_vty_out_tmp(vty, &rn->p,
267 adj->adv->baa->attr,
268 SUBGRP_SAFI(subgrp),
269 0, NULL);
270 output_count++;
271 }
272 if ((flags & UPDWALK_FLAGS_ADVERTISED)
273 && adj->attr) {
274 route_vty_out_tmp(
275 vty, &rn->p, adj->attr,
276 SUBGRP_SAFI(subgrp), 0, NULL);
277 output_count++;
278 }
279 }
280 if (output_count != 0)
281 vty_out(vty, "\nTotal number of prefixes %ld\n", output_count);
282 }
283
284 static int updgrp_show_adj_walkcb(struct update_group *updgrp, void *arg)
285 {
286 struct updwalk_context *ctx = arg;
287 struct update_subgroup *subgrp;
288 struct vty *vty;
289
290 vty = ctx->vty;
291 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
292 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
293 continue;
294 vty_out(vty, "update group %" PRIu64 ", subgroup %" PRIu64 "\n",
295 updgrp->id, subgrp->id);
296 subgrp_show_adjq_vty(subgrp, vty, ctx->flags);
297 }
298 return UPDWALK_CONTINUE;
299 }
300
301 static void updgrp_show_adj(struct bgp *bgp, afi_t afi, safi_t safi,
302 struct vty *vty, uint64_t id, uint8_t flags)
303 {
304 struct updwalk_context ctx;
305 memset(&ctx, 0, sizeof(ctx));
306 ctx.vty = vty;
307 ctx.subgrp_id = id;
308 ctx.flags = flags;
309
310 update_group_af_walk(bgp, afi, safi, updgrp_show_adj_walkcb, &ctx);
311 }
312
313 static int subgroup_coalesce_timer(struct thread *thread)
314 {
315 struct update_subgroup *subgrp;
316
317 subgrp = THREAD_ARG(thread);
318 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
319 zlog_debug("u%" PRIu64 ":s%" PRIu64
320 " announcing routes upon coalesce timer expiry",
321 (SUBGRP_UPDGRP(subgrp))->id, subgrp->id);
322 subgrp->t_coalesce = NULL;
323 subgrp->v_coalesce = 0;
324 subgroup_announce_route(subgrp);
325
326
327 /* While the announce_route() may kick off the route advertisement timer
328 * for
329 * the members of the subgroup, we'd like to send the initial updates
330 * much
331 * faster (i.e., without enforcing MRAI). Also, if there were no routes
332 * to
333 * announce, this is the method currently employed to trigger the EOR.
334 */
335 if (!bgp_update_delay_active(SUBGRP_INST(subgrp))) {
336 struct peer_af *paf;
337 struct peer *peer;
338
339 SUBGRP_FOREACH_PEER (subgrp, paf) {
340 peer = PAF_PEER(paf);
341 BGP_TIMER_OFF(peer->t_routeadv);
342 BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
343 }
344 }
345
346 return 0;
347 }
348
349 static int update_group_announce_walkcb(struct update_group *updgrp, void *arg)
350 {
351 struct update_subgroup *subgrp;
352
353 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
354 subgroup_announce_all(subgrp);
355 }
356
357 return UPDWALK_CONTINUE;
358 }
359
360 static int update_group_announce_rrc_walkcb(struct update_group *updgrp,
361 void *arg)
362 {
363 struct update_subgroup *subgrp;
364 afi_t afi;
365 safi_t safi;
366 struct peer *peer;
367
368 afi = UPDGRP_AFI(updgrp);
369 safi = UPDGRP_SAFI(updgrp);
370 peer = UPDGRP_PEER(updgrp);
371
372 /* Only announce if this is a group of route-reflector-clients */
373 if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_REFLECTOR_CLIENT)) {
374 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
375 subgroup_announce_all(subgrp);
376 }
377 }
378
379 return UPDWALK_CONTINUE;
380 }
381
382 /********************
383 * PUBLIC FUNCTIONS
384 ********************/
385
386 /**
387 * Allocate an adj-out object. Do proper initialization of its fields,
388 * primarily its association with the subgroup and the prefix.
389 */
390 struct bgp_adj_out *bgp_adj_out_alloc(struct update_subgroup *subgrp,
391 struct bgp_node *rn,
392 uint32_t addpath_tx_id)
393 {
394 struct bgp_adj_out *adj;
395
396 adj = XCALLOC(MTYPE_BGP_ADJ_OUT, sizeof(struct bgp_adj_out));
397 adj->subgroup = subgrp;
398 if (rn) {
399 BGP_ADJ_OUT_ADD(rn, adj);
400 bgp_lock_node(rn);
401 adj->rn = rn;
402 }
403
404 adj->addpath_tx_id = addpath_tx_id;
405 TAILQ_INSERT_TAIL(&(subgrp->adjq), adj, subgrp_adj_train);
406 SUBGRP_INCR_STAT(subgrp, adj_count);
407 return adj;
408 }
409
410
411 struct bgp_advertise *
412 bgp_advertise_clean_subgroup(struct update_subgroup *subgrp,
413 struct bgp_adj_out *adj)
414 {
415 struct bgp_advertise *adv;
416 struct bgp_advertise_attr *baa;
417 struct bgp_advertise *next;
418 struct bgp_advertise_fifo *fhead;
419
420 adv = adj->adv;
421 baa = adv->baa;
422 next = NULL;
423
424 if (baa) {
425 fhead = &subgrp->sync->update;
426
427 /* Unlink myself from advertise attribute FIFO. */
428 bgp_advertise_delete(baa, adv);
429
430 /* Fetch next advertise candidate. */
431 next = baa->adv;
432
433 /* Unintern BGP advertise attribute. */
434 bgp_advertise_unintern(subgrp->hash, baa);
435 } else
436 fhead = &subgrp->sync->withdraw;
437
438
439 /* Unlink myself from advertisement FIFO. */
440 BGP_ADV_FIFO_DEL(fhead, adv);
441
442 /* Free memory. */
443 bgp_advertise_free(adj->adv);
444 adj->adv = NULL;
445
446 return next;
447 }
448
449 void bgp_adj_out_set_subgroup(struct bgp_node *rn,
450 struct update_subgroup *subgrp, struct attr *attr,
451 struct bgp_path_info *path)
452 {
453 struct bgp_adj_out *adj = NULL;
454 struct bgp_advertise *adv;
455 struct peer *peer;
456 afi_t afi;
457 safi_t safi;
458
459 peer = SUBGRP_PEER(subgrp);
460 afi = SUBGRP_AFI(subgrp);
461 safi = SUBGRP_SAFI(subgrp);
462
463 if (DISABLE_BGP_ANNOUNCE)
464 return;
465
466 /* Look for adjacency information. */
467 adj = adj_lookup(
468 rn, subgrp,
469 bgp_addpath_id_for_peer(peer, afi, safi, &path->tx_addpath));
470
471 if (!adj) {
472 adj = bgp_adj_out_alloc(
473 subgrp, rn,
474 bgp_addpath_id_for_peer(peer, afi, safi,
475 &path->tx_addpath));
476 if (!adj)
477 return;
478 }
479
480 if (adj->adv)
481 bgp_advertise_clean_subgroup(subgrp, adj);
482 adj->adv = bgp_advertise_new();
483
484 adv = adj->adv;
485 adv->rn = rn;
486 assert(adv->pathi == NULL);
487 /* bgp_path_info adj_out reference */
488 adv->pathi = bgp_path_info_lock(path);
489
490 if (attr)
491 adv->baa = bgp_advertise_intern(subgrp->hash, attr);
492 else
493 adv->baa = baa_new();
494 adv->adj = adj;
495
496 /* Add new advertisement to advertisement attribute list. */
497 bgp_advertise_add(adv->baa, adv);
498
499 /*
500 * If the update adv list is empty, trigger the member peers'
501 * mrai timers so the socket writes can happen.
502 */
503 if (BGP_ADV_FIFO_EMPTY(&subgrp->sync->update)) {
504 struct peer_af *paf;
505
506 SUBGRP_FOREACH_PEER (subgrp, paf) {
507 bgp_adjust_routeadv(PAF_PEER(paf));
508 }
509 }
510
511 BGP_ADV_FIFO_ADD(&subgrp->sync->update, &adv->fifo);
512
513 subgrp->version = max(subgrp->version, rn->version);
514 }
515
516 /* The only time 'withdraw' will be false is if we are sending
517 * the "neighbor x.x.x.x default-originate" default and need to clear
518 * bgp_adj_out for the 0.0.0.0/0 route in the BGP table.
519 */
520 void bgp_adj_out_unset_subgroup(struct bgp_node *rn,
521 struct update_subgroup *subgrp, char withdraw,
522 uint32_t addpath_tx_id)
523 {
524 struct bgp_adj_out *adj;
525 struct bgp_advertise *adv;
526 bool trigger_write;
527
528 if (DISABLE_BGP_ANNOUNCE)
529 return;
530
531 /* Lookup existing adjacency */
532 if ((adj = adj_lookup(rn, subgrp, addpath_tx_id)) != NULL) {
533 /* Clean up previous advertisement. */
534 if (adj->adv)
535 bgp_advertise_clean_subgroup(subgrp, adj);
536
537 if (adj->attr && withdraw) {
538 /* We need advertisement structure. */
539 adj->adv = bgp_advertise_new();
540 adv = adj->adv;
541 adv->rn = rn;
542 adv->adj = adj;
543
544 /* Note if we need to trigger a packet write */
545 trigger_write =
546 BGP_ADV_FIFO_EMPTY(&subgrp->sync->withdraw);
547
548 /* Add to synchronization entry for withdraw
549 * announcement. */
550 BGP_ADV_FIFO_ADD(&subgrp->sync->withdraw, &adv->fifo);
551
552 if (trigger_write)
553 subgroup_trigger_write(subgrp);
554 } else {
555 /* Remove myself from adjacency. */
556 BGP_ADJ_OUT_DEL(rn, adj);
557
558 /* Free allocated information. */
559 adj_free(adj);
560
561 bgp_unlock_node(rn);
562 }
563 }
564
565 subgrp->version = max(subgrp->version, rn->version);
566 }
567
568 void bgp_adj_out_remove_subgroup(struct bgp_node *rn, struct bgp_adj_out *adj,
569 struct update_subgroup *subgrp)
570 {
571 if (adj->attr)
572 bgp_attr_unintern(&adj->attr);
573
574 if (adj->adv)
575 bgp_advertise_clean_subgroup(subgrp, adj);
576
577 BGP_ADJ_OUT_DEL(rn, adj);
578 adj_free(adj);
579 }
580
581 /*
582 * Go through all the routes and clean up the adj/adv structures corresponding
583 * to the subgroup.
584 */
585 void subgroup_clear_table(struct update_subgroup *subgrp)
586 {
587 struct bgp_adj_out *aout, *taout;
588
589 SUBGRP_FOREACH_ADJ_SAFE (subgrp, aout, taout) {
590 struct bgp_node *rn = aout->rn;
591 bgp_adj_out_remove_subgroup(rn, aout, subgrp);
592 bgp_unlock_node(rn);
593 }
594 }
595
596 /*
597 * subgroup_announce_table
598 */
599 void subgroup_announce_table(struct update_subgroup *subgrp,
600 struct bgp_table *table)
601 {
602 struct bgp_node *rn;
603 struct bgp_path_info *ri;
604 struct attr attr;
605 struct peer *peer;
606 afi_t afi;
607 safi_t safi;
608 int addpath_capable;
609
610 peer = SUBGRP_PEER(subgrp);
611 afi = SUBGRP_AFI(subgrp);
612 safi = SUBGRP_SAFI(subgrp);
613 addpath_capable = bgp_addpath_encode_tx(peer, afi, safi);
614
615 if (safi == SAFI_LABELED_UNICAST)
616 safi = SAFI_UNICAST;
617
618 if (!table)
619 table = peer->bgp->rib[afi][safi];
620
621 if (safi != SAFI_MPLS_VPN && safi != SAFI_ENCAP && safi != SAFI_EVPN
622 && CHECK_FLAG(peer->af_flags[afi][safi],
623 PEER_FLAG_DEFAULT_ORIGINATE))
624 subgroup_default_originate(subgrp, 0);
625
626 for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn))
627 for (ri = bgp_node_get_bgp_path_info(rn); ri; ri = ri->next)
628
629 if (CHECK_FLAG(ri->flags, BGP_PATH_SELECTED)
630 || (addpath_capable
631 && bgp_addpath_tx_path(
632 peer->addpath_type[afi][safi],
633 ri))) {
634 if (subgroup_announce_check(rn, ri, subgrp,
635 &rn->p, &attr))
636 bgp_adj_out_set_subgroup(rn, subgrp,
637 &attr, ri);
638 else
639 bgp_adj_out_unset_subgroup(
640 rn, subgrp, 1,
641 bgp_addpath_id_for_peer(
642 peer, afi, safi,
643 &ri->tx_addpath));
644 }
645
646 /*
647 * We walked through the whole table -- make sure our version number
648 * is consistent with the one on the table. This should allow
649 * subgroups to merge sooner if a peer comes up when the route node
650 * with the largest version is no longer in the table. This also
651 * covers the pathological case where all routes in the table have
652 * now been deleted.
653 */
654 subgrp->version = max(subgrp->version, table->version);
655
656 /*
657 * Start a task to merge the subgroup if necessary.
658 */
659 update_subgroup_trigger_merge_check(subgrp, 0);
660 }
661
662 /*
663 * subgroup_announce_route
664 *
665 * Refresh all routes out to a subgroup.
666 */
667 void subgroup_announce_route(struct update_subgroup *subgrp)
668 {
669 struct bgp_node *rn;
670 struct bgp_table *table;
671 struct peer *onlypeer;
672
673 if (update_subgroup_needs_refresh(subgrp)) {
674 update_subgroup_set_needs_refresh(subgrp, 0);
675 }
676
677 /*
678 * First update is deferred until ORF or ROUTE-REFRESH is received
679 */
680 onlypeer = ((SUBGRP_PCOUNT(subgrp) == 1) ? (SUBGRP_PFIRST(subgrp))->peer
681 : NULL);
682 if (onlypeer && CHECK_FLAG(onlypeer->af_sflags[SUBGRP_AFI(subgrp)]
683 [SUBGRP_SAFI(subgrp)],
684 PEER_STATUS_ORF_WAIT_REFRESH))
685 return;
686
687 if (SUBGRP_SAFI(subgrp) != SAFI_MPLS_VPN
688 && SUBGRP_SAFI(subgrp) != SAFI_ENCAP
689 && SUBGRP_SAFI(subgrp) != SAFI_EVPN)
690 subgroup_announce_table(subgrp, NULL);
691 else
692 for (rn = bgp_table_top(update_subgroup_rib(subgrp)); rn;
693 rn = bgp_route_next(rn)) {
694 table = bgp_node_get_bgp_table_info(rn);
695 if (!table)
696 continue;
697 subgroup_announce_table(subgrp, table);
698 }
699 }
700
701 void subgroup_default_originate(struct update_subgroup *subgrp, int withdraw)
702 {
703 struct bgp *bgp;
704 struct attr attr;
705 struct aspath *aspath;
706 struct bgp_path_info tmp_info;
707 struct prefix p;
708 struct peer *from;
709 struct bgp_node *rn;
710 struct bgp_path_info *ri;
711 struct peer *peer;
712 int ret = RMAP_DENYMATCH;
713 afi_t afi;
714 safi_t safi;
715
716 if (!subgrp)
717 return;
718
719 peer = SUBGRP_PEER(subgrp);
720 afi = SUBGRP_AFI(subgrp);
721 safi = SUBGRP_SAFI(subgrp);
722
723 if (!(afi == AFI_IP || afi == AFI_IP6))
724 return;
725
726 bgp = peer->bgp;
727 from = bgp->peer_self;
728
729 bgp_attr_default_set(&attr, BGP_ORIGIN_IGP);
730 aspath = attr.aspath;
731
732 attr.local_pref = bgp->default_local_pref;
733
734 memset(&p, 0, sizeof(p));
735 p.family = afi2family(afi);
736 p.prefixlen = 0;
737
738 if ((afi == AFI_IP6) || peer_cap_enhe(peer, afi, safi)) {
739 /* IPv6 global nexthop must be included. */
740 attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL;
741
742 /* If the peer is on shared nextwork and we have link-local
743 nexthop set it. */
744 if (peer->shared_network
745 && !IN6_IS_ADDR_UNSPECIFIED(&peer->nexthop.v6_local))
746 attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL;
747 }
748
749 if (peer->default_rmap[afi][safi].name) {
750 SET_FLAG(bgp->peer_self->rmap_type, PEER_RMAP_TYPE_DEFAULT);
751 for (rn = bgp_table_top(bgp->rib[afi][safi]); rn;
752 rn = bgp_route_next(rn)) {
753 for (ri = bgp_node_get_bgp_path_info(rn);
754 ri; ri = ri->next) {
755 struct attr dummy_attr;
756
757 /* Provide dummy so the route-map can't modify
758 * the attributes */
759 bgp_attr_dup(&dummy_attr, ri->attr);
760 tmp_info.peer = ri->peer;
761 tmp_info.attr = &dummy_attr;
762
763 ret = route_map_apply(
764 peer->default_rmap[afi][safi].map,
765 &rn->p, RMAP_BGP, &tmp_info);
766
767 /* The route map might have set attributes. If
768 * we don't flush them
769 * here, they will be leaked. */
770 bgp_attr_flush(&dummy_attr);
771 if (ret != RMAP_DENYMATCH)
772 break;
773 }
774 if (ret != RMAP_DENYMATCH)
775 break;
776 }
777 bgp->peer_self->rmap_type = 0;
778
779 if (ret == RMAP_DENYMATCH)
780 withdraw = 1;
781 }
782
783 if (withdraw) {
784 if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
785 subgroup_default_withdraw_packet(subgrp);
786 UNSET_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE);
787 } else {
788 if (!CHECK_FLAG(subgrp->sflags,
789 SUBGRP_STATUS_DEFAULT_ORIGINATE)) {
790
791 if (bgp_flag_check(bgp, BGP_FLAG_GRACEFUL_SHUTDOWN)) {
792 bgp_attr_add_gshut_community(&attr);
793 }
794
795 SET_FLAG(subgrp->sflags,
796 SUBGRP_STATUS_DEFAULT_ORIGINATE);
797 subgroup_default_update_packet(subgrp, &attr, from);
798
799 /* The 'neighbor x.x.x.x default-originate' default will
800 * act as an
801 * implicit withdraw for any previous UPDATEs sent for
802 * 0.0.0.0/0 so
803 * clear adj_out for the 0.0.0.0/0 prefix in the BGP
804 * table.
805 */
806 memset(&p, 0, sizeof(p));
807 p.family = afi2family(afi);
808 p.prefixlen = 0;
809
810 rn = bgp_afi_node_get(bgp->rib[afi][safi], afi, safi,
811 &p, NULL);
812 bgp_adj_out_unset_subgroup(
813 rn, subgrp, 0,
814 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
815 }
816 }
817
818 aspath_unintern(&aspath);
819 }
820
821 /*
822 * Announce the BGP table to a subgroup.
823 *
824 * At startup, we try to optimize route announcement by coalescing the
825 * peer-up events. This is done only the first time - from then on,
826 * subgrp->v_coalesce will be set to zero and the normal logic
827 * prevails.
828 */
829 void subgroup_announce_all(struct update_subgroup *subgrp)
830 {
831 if (!subgrp)
832 return;
833
834 /*
835 * If coalesce timer value is not set, announce routes immediately.
836 */
837 if (!subgrp->v_coalesce) {
838 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
839 zlog_debug("u%" PRIu64 ":s%" PRIu64
840 " announcing all routes",
841 subgrp->update_group->id, subgrp->id);
842 subgroup_announce_route(subgrp);
843 return;
844 }
845
846 /*
847 * We should wait for the coalesce timer. Arm the timer if not done.
848 */
849 if (!subgrp->t_coalesce) {
850 thread_add_timer_msec(bm->master, subgroup_coalesce_timer,
851 subgrp, subgrp->v_coalesce,
852 &subgrp->t_coalesce);
853 }
854 }
855
856 /*
857 * Go through all update subgroups and set up the adv queue for the
858 * input route.
859 */
860 void group_announce_route(struct bgp *bgp, afi_t afi, safi_t safi,
861 struct bgp_node *rn, struct bgp_path_info *pi)
862 {
863 struct updwalk_context ctx;
864 ctx.pi = pi;
865 ctx.rn = rn;
866 update_group_af_walk(bgp, afi, safi, group_announce_route_walkcb, &ctx);
867 }
868
869 void update_group_show_adj_queue(struct bgp *bgp, afi_t afi, safi_t safi,
870 struct vty *vty, uint64_t id)
871 {
872 updgrp_show_adj(bgp, afi, safi, vty, id, UPDWALK_FLAGS_ADVQUEUE);
873 }
874
875 void update_group_show_advertised(struct bgp *bgp, afi_t afi, safi_t safi,
876 struct vty *vty, uint64_t id)
877 {
878 updgrp_show_adj(bgp, afi, safi, vty, id, UPDWALK_FLAGS_ADVERTISED);
879 }
880
881 void update_group_announce(struct bgp *bgp)
882 {
883 update_group_walk(bgp, update_group_announce_walkcb, NULL);
884 }
885
886 void update_group_announce_rrclients(struct bgp *bgp)
887 {
888 update_group_walk(bgp, update_group_announce_rrc_walkcb, NULL);
889 }