]> git.proxmox.com Git - mirror_frr.git/blame - bgpd/bgp_updgrp.c
Merge pull request #531 from qlyoung/fix-stack-ref
[mirror_frr.git] / bgpd / bgp_updgrp.c
CommitLineData
3f9c7369
DS
1/**
2 * bgp_updgrp.c: BGP update group structures
3 *
4 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
5 *
6 * @author Avneesh Sachdev <avneesh@sproute.net>
7 * @author Rajesh Varadarajan <rajesh@sproute.net>
8 * @author Pradosh Mohapatra <pradosh@sproute.net>
9 *
10 * This file is part of GNU Zebra.
11 *
12 * GNU Zebra is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2, or (at your option) any
15 * later version.
16 *
17 * GNU Zebra is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with GNU Zebra; see the file COPYING. If not, write to the Free
24 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
25 * 02111-1307, USA.
26 */
27
28#include <zebra.h>
29
30#include "prefix.h"
31#include "thread.h"
32#include "buffer.h"
33#include "stream.h"
34#include "command.h"
35#include "sockunion.h"
36#include "network.h"
37#include "memory.h"
38#include "filter.h"
39#include "routemap.h"
3f9c7369
DS
40#include "log.h"
41#include "plist.h"
42#include "linklist.h"
43#include "workqueue.h"
44#include "hash.h"
45#include "jhash.h"
46#include "queue.h"
47
48#include "bgpd/bgpd.h"
49#include "bgpd/bgp_table.h"
50#include "bgpd/bgp_debug.h"
51#include "bgpd/bgp_fsm.h"
52#include "bgpd/bgp_advertise.h"
53#include "bgpd/bgp_packet.h"
54#include "bgpd/bgp_updgrp.h"
55#include "bgpd/bgp_route.h"
56#include "bgpd/bgp_filter.h"
57
58/********************
59 * PRIVATE FUNCTIONS
60 ********************/
61
62/**
63 * assign a unique ID to update group and subgroup. Mostly for display/
64 * debugging purposes. It's a 64-bit space - used leisurely without a
65 * worry about its wrapping and about filling gaps. While at it, timestamp
66 * the creation.
67 */
68static void
69update_group_checkin (struct update_group *updgrp)
70{
71 updgrp->id = ++bm->updgrp_idspace;
72 updgrp->uptime = bgp_clock ();
73}
74
75static void
76update_subgroup_checkin (struct update_subgroup *subgrp,
77 struct update_group *updgrp)
78{
79 subgrp->id = ++bm->subgrp_idspace;
80 subgrp->uptime = bgp_clock ();
81}
82
83static void
84sync_init (struct update_subgroup *subgrp)
85{
86 subgrp->sync = XCALLOC (MTYPE_BGP_SYNCHRONISE,
87 sizeof (struct bgp_synchronize));
88 BGP_ADV_FIFO_INIT (&subgrp->sync->update);
89 BGP_ADV_FIFO_INIT (&subgrp->sync->withdraw);
90 BGP_ADV_FIFO_INIT (&subgrp->sync->withdraw_low);
91 subgrp->hash = hash_create (baa_hash_key, baa_hash_cmp);
92
93 /* We use a larger buffer for subgrp->work in the event that:
94 * - We RX a BGP_UPDATE where the attributes alone are just
95 * under BGP_MAX_PACKET_SIZE
96 * - The user configures an outbound route-map that does many as-path
97 * prepends or adds many communities. At most they can have CMD_ARGC_MAX
98 * args in a route-map so there is a finite limit on how large they can
99 * make the attributes.
100 *
101 * Having a buffer with BGP_MAX_PACKET_SIZE_OVERFLOW allows us to avoid bounds
102 * checking for every single attribute as we construct an UPDATE.
103 */
104 subgrp->work = stream_new (BGP_MAX_PACKET_SIZE + BGP_MAX_PACKET_SIZE_OVERFLOW);
105 subgrp->scratch = stream_new (BGP_MAX_PACKET_SIZE);
106}
107
108static void
109sync_delete (struct update_subgroup *subgrp)
110{
111 if (subgrp->sync)
112 XFREE (MTYPE_BGP_SYNCHRONISE, subgrp->sync);
113 subgrp->sync = NULL;
114 if (subgrp->hash)
115 hash_free (subgrp->hash);
116 subgrp->hash = NULL;
117 if (subgrp->work)
118 stream_free (subgrp->work);
119 subgrp->work = NULL;
120 if (subgrp->scratch)
121 stream_free (subgrp->scratch);
122 subgrp->scratch = NULL;
123}
124
125/**
126 * conf_copy
127 *
128 * copy only those fields that are relevant to update group match
129 */
130static void
131conf_copy (struct peer *dst, struct peer *src, afi_t afi, safi_t safi)
132{
133 struct bgp_filter *srcfilter;
134 struct bgp_filter *dstfilter;
135
136 srcfilter = &src->filter[afi][safi];
137 dstfilter = &dst->filter[afi][safi];
138
139 dst->bgp = src->bgp;
140 dst->sort = src->sort;
141 dst->as = src->as;
3f9c7369
DS
142 dst->v_routeadv = src->v_routeadv;
143 dst->flags = src->flags;
144 dst->af_flags[afi][safi] = src->af_flags[afi][safi];
6e919709
DS
145 if (dst->host)
146 XFREE(MTYPE_BGP_PEER_HOST, dst->host);
147
148 dst->host = XSTRDUP(MTYPE_BGP_PEER_HOST, src->host);
3f9c7369
DS
149 dst->cap = src->cap;
150 dst->af_cap[afi][safi] = src->af_cap[afi][safi];
151 dst->afc_nego[afi][safi] = src->afc_nego[afi][safi];
40d2700d 152 dst->orf_plist[afi][safi] = src->orf_plist[afi][safi];
3f9c7369
DS
153 dst->local_as = src->local_as;
154 dst->change_local_as = src->change_local_as;
155 dst->shared_network = src->shared_network;
156 memcpy (&(dst->nexthop), &(src->nexthop), sizeof (struct bgp_nexthop));
157
158 dst->group = src->group;
159
160 if (src->default_rmap[afi][safi].name)
161 {
162 dst->default_rmap[afi][safi].name =
6e919709 163 XSTRDUP(MTYPE_ROUTE_MAP_NAME, src->default_rmap[afi][safi].name);
3f9c7369
DS
164 dst->default_rmap[afi][safi].map = src->default_rmap[afi][safi].map;
165 }
166
167 if (DISTRIBUTE_OUT_NAME(srcfilter))
168 {
6e919709 169 DISTRIBUTE_OUT_NAME(dstfilter) = XSTRDUP(MTYPE_BGP_FILTER_NAME, DISTRIBUTE_OUT_NAME(srcfilter));
3f9c7369
DS
170 DISTRIBUTE_OUT(dstfilter) = DISTRIBUTE_OUT(srcfilter);
171 }
172
173 if (PREFIX_LIST_OUT_NAME(srcfilter))
174 {
6e919709 175 PREFIX_LIST_OUT_NAME(dstfilter) = XSTRDUP(MTYPE_BGP_FILTER_NAME, PREFIX_LIST_OUT_NAME(srcfilter));
3f9c7369
DS
176 PREFIX_LIST_OUT(dstfilter) = PREFIX_LIST_OUT(srcfilter);
177 }
178
179 if (FILTER_LIST_OUT_NAME(srcfilter))
180 {
6e919709 181 FILTER_LIST_OUT_NAME(dstfilter) = XSTRDUP(MTYPE_BGP_FILTER_NAME, FILTER_LIST_OUT_NAME(srcfilter));
3f9c7369
DS
182 FILTER_LIST_OUT(dstfilter) = FILTER_LIST_OUT(srcfilter);
183 }
184
185 if (ROUTE_MAP_OUT_NAME(srcfilter))
186 {
6e919709 187 ROUTE_MAP_OUT_NAME(dstfilter) = XSTRDUP(MTYPE_BGP_FILTER_NAME, ROUTE_MAP_OUT_NAME(srcfilter));
3f9c7369
DS
188 ROUTE_MAP_OUT(dstfilter) = ROUTE_MAP_OUT(srcfilter);
189 }
190
191 if (UNSUPPRESS_MAP_NAME(srcfilter))
192 {
6e919709 193 UNSUPPRESS_MAP_NAME(dstfilter) = XSTRDUP(MTYPE_BGP_FILTER_NAME, UNSUPPRESS_MAP_NAME(srcfilter));
3f9c7369
DS
194 UNSUPPRESS_MAP(dstfilter) = UNSUPPRESS_MAP(srcfilter);
195 }
196}
197
198/**
6e919709 199 * since we did a bunch of XSTRDUP's in conf_copy, time to free them up
3f9c7369
DS
200 */
201static void
202conf_release (struct peer *src, afi_t afi, safi_t safi)
203{
204 struct bgp_filter *srcfilter;
205
206 srcfilter = &src->filter[afi][safi];
207
208 if (src->default_rmap[afi][safi].name)
6e919709 209 XFREE(MTYPE_ROUTE_MAP_NAME, src->default_rmap[afi][safi].name);
3f9c7369
DS
210
211 if (srcfilter->dlist[FILTER_OUT].name)
6e919709 212 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->dlist[FILTER_OUT].name);
3f9c7369
DS
213
214 if (srcfilter->plist[FILTER_OUT].name)
6e919709 215 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->plist[FILTER_OUT].name);
3f9c7369
DS
216
217 if (srcfilter->aslist[FILTER_OUT].name)
6e919709 218 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->aslist[FILTER_OUT].name);
3f9c7369
DS
219
220 if (srcfilter->map[RMAP_OUT].name)
6e919709 221 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->map[RMAP_OUT].name);
3f9c7369
DS
222
223 if (srcfilter->usmap.name)
6e919709 224 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->usmap.name);
495f0b13
DS
225
226 if (src->host)
227 XFREE(MTYPE_BGP_PEER_HOST, src->host);
228 src->host = NULL;
3f9c7369
DS
229}
230
231static void
232peer2_updgrp_copy (struct update_group *updgrp, struct peer_af *paf)
233{
234 struct peer *src;
235 struct peer *dst;
236
237 if (!updgrp || !paf)
238 return;
239
240 src = paf->peer;
241 dst = updgrp->conf;
242 if (!src || !dst)
243 return;
244
245 updgrp->afi = paf->afi;
246 updgrp->safi = paf->safi;
247 updgrp->afid = paf->afid;
248 updgrp->bgp = src->bgp;
249
250 conf_copy (dst, src, paf->afi, paf->safi);
251}
252
253/**
254 * auxiliary functions to maintain the hash table.
255 * - updgrp_hash_alloc - to create a new entry, passed to hash_get
256 * - updgrp_hash_key_make - makes the key for update group search
257 * - updgrp_hash_cmp - compare two update groups.
258 */
259static void *
260updgrp_hash_alloc (void *p)
261{
262 struct update_group *updgrp;
ffd0c037 263 const struct update_group *in;
3f9c7369 264
ffd0c037 265 in = (const struct update_group *)p;
3f9c7369
DS
266 updgrp = XCALLOC (MTYPE_BGP_UPDGRP, sizeof (struct update_group));
267 memcpy (updgrp, in, sizeof (struct update_group));
268 updgrp->conf = XCALLOC (MTYPE_BGP_PEER, sizeof (struct peer));
269 conf_copy (updgrp->conf, in->conf, in->afi, in->safi);
270 return updgrp;
271}
272
273/**
274 * The hash value for a peer is computed from the following variables:
275 * v = f(
276 * 1. IBGP (1) or EBGP (2)
277 * 2. FLAGS based on configuration:
278 * LOCAL_AS_NO_PREPEND
279 * LOCAL_AS_REPLACE_AS
280 * 3. AF_FLAGS based on configuration:
281 * Refer to definition in bgp_updgrp.h
282 * 4. (AF-independent) Capability flags:
283 * AS4_RCV capability
284 * 5. (AF-dependent) Capability flags:
285 * ORF_PREFIX_SM_RCV (peer can send prefix ORF)
286 * 6. MRAI
287 * 7. peer-group name
288 * 8. Outbound route-map name (neighbor route-map <> out)
289 * 9. Outbound distribute-list name (neighbor distribute-list <> out)
290 * 10. Outbound prefix-list name (neighbor prefix-list <> out)
291 * 11. Outbound as-list name (neighbor filter-list <> out)
292 * 12. Unsuppress map name (neighbor unsuppress-map <>)
293 * 13. default rmap name (neighbor default-originate route-map <>)
294 * 14. encoding both global and link-local nexthop?
295 * 15. If peer is configured to be a lonesoul, peer ip address
296 * 16. Local-as should match, if configured.
297 * )
298 */
299static unsigned int
300updgrp_hash_key_make (void *p)
301{
302 const struct update_group *updgrp;
303 const struct peer *peer;
304 const struct bgp_filter *filter;
305 uint32_t flags;
306 uint32_t key;
307 afi_t afi;
308 safi_t safi;
309
310#define SEED1 999331
311#define SEED2 2147483647
312
313 updgrp = p;
314 peer = updgrp->conf;
315 afi = updgrp->afi;
316 safi = updgrp->safi;
317 flags = peer->af_flags[afi][safi];
318 filter = &peer->filter[afi][safi];
319
320 key = 0;
321
322 key = jhash_1word (peer->sort, key); /* EBGP or IBGP */
323 key = jhash_1word ((peer->flags & PEER_UPDGRP_FLAGS), key);
324 key = jhash_1word ((flags & PEER_UPDGRP_AF_FLAGS), key);
325 key = jhash_1word ((peer->cap & PEER_UPDGRP_CAP_FLAGS), key);
326 key = jhash_1word ((peer->af_cap[afi][safi] &
327 PEER_UPDGRP_AF_CAP_FLAGS), key);
328 key = jhash_1word (peer->v_routeadv, key);
329 key = jhash_1word (peer->change_local_as, key);
330
331 if (peer->group)
332 key = jhash_1word (jhash (peer->group->name,
333 strlen (peer->group->name), SEED1), key);
334
335 if (filter->map[RMAP_OUT].name)
336 key = jhash_1word (jhash (filter->map[RMAP_OUT].name,
337 strlen (filter->map[RMAP_OUT].name), SEED1),
338 key);
339
340 if (filter->dlist[FILTER_OUT].name)
341 key = jhash_1word (jhash (filter->dlist[FILTER_OUT].name,
342 strlen (filter->dlist[FILTER_OUT].name), SEED1),
343 key);
344
345 if (filter->plist[FILTER_OUT].name)
346 key = jhash_1word (jhash (filter->plist[FILTER_OUT].name,
347 strlen (filter->plist[FILTER_OUT].name), SEED1),
348 key);
349
350 if (filter->aslist[FILTER_OUT].name)
351 key = jhash_1word (jhash (filter->aslist[FILTER_OUT].name,
352 strlen (filter->aslist[FILTER_OUT].name),
353 SEED1), key);
354
355 if (filter->usmap.name)
356 key = jhash_1word (jhash (filter->usmap.name,
357 strlen (filter->usmap.name), SEED1), key);
358
359 if (peer->default_rmap[afi][safi].name)
360 key = jhash_1word (jhash (peer->default_rmap[afi][safi].name,
361 strlen (peer->default_rmap[afi][safi].name),
362 SEED1), key);
363
364 /* If peer is on a shared network and is exchanging IPv6 prefixes,
365 * it needs to include link-local address. That's different from
366 * non-shared-network peers (nexthop encoded with 32 bytes vs 16
367 * bytes). We create different update groups to take care of that.
368 */
369 key = jhash_1word ((peer->shared_network &&
370 peer_afi_active_nego (peer, AFI_IP6)),
371 key);
372
373 /*
40d2700d
DW
374 * There are certain peers that must get their own update-group:
375 * - lonesoul peers
40d2700d 376 * - peers that negotiated ORF
3f9c7369
DS
377 */
378 if (CHECK_FLAG (peer->flags, PEER_FLAG_LONESOUL) ||
40d2700d 379 CHECK_FLAG (peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV) ||
2a3d5731 380 CHECK_FLAG (peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_OLD_RCV))
3f9c7369
DS
381 key = jhash_1word (jhash (peer->host, strlen (peer->host), SEED2), key);
382
383 return key;
384}
385
386static int
387updgrp_hash_cmp (const void *p1, const void *p2)
388{
389 const struct update_group *grp1;
390 const struct update_group *grp2;
391 const struct peer *pe1;
392 const struct peer *pe2;
393 uint32_t flags1;
394 uint32_t flags2;
395 const struct bgp_filter *fl1;
396 const struct bgp_filter *fl2;
397 afi_t afi;
398 safi_t safi;
399
400 if (!p1 || !p2)
401 return 0;
402
403 grp1 = p1;
404 grp2 = p2;
405 pe1 = grp1->conf;
406 pe2 = grp2->conf;
407 afi = grp1->afi;
408 safi = grp1->safi;
409 flags1 = pe1->af_flags[afi][safi];
410 flags2 = pe2->af_flags[afi][safi];
411 fl1 = &pe1->filter[afi][safi];
412 fl2 = &pe2->filter[afi][safi];
413
414 /* put EBGP and IBGP peers in different update groups */
415 if (pe1->sort != pe2->sort)
416 return 0;
417
418 /* check peer flags */
419 if ((pe1->flags & PEER_UPDGRP_FLAGS) !=
420 (pe2->flags & PEER_UPDGRP_FLAGS))
421 return 0;
422
423 /* If there is 'local-as' configured, it should match. */
424 if (pe1->change_local_as != pe2->change_local_as)
425 return 0;
426
427 /* flags like route reflector client */
428 if ((flags1 & PEER_UPDGRP_AF_FLAGS) != (flags2 & PEER_UPDGRP_AF_FLAGS))
429 return 0;
430
431 if ((pe1->cap & PEER_UPDGRP_CAP_FLAGS) !=
432 (pe2->cap & PEER_UPDGRP_CAP_FLAGS))
433 return 0;
434
435 if ((pe1->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS) !=
436 (pe2->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS))
437 return 0;
438
439 if (pe1->v_routeadv != pe2->v_routeadv)
440 return 0;
441
442 if (pe1->group != pe2->group)
443 return 0;
444
445 /* route-map names should be the same */
446 if ((fl1->map[RMAP_OUT].name && !fl2->map[RMAP_OUT].name) ||
447 (!fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name) ||
448 (fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name &&
449 strcmp (fl1->map[RMAP_OUT].name, fl2->map[RMAP_OUT].name)))
450 return 0;
451
452 if ((fl1->dlist[FILTER_OUT].name && !fl2->dlist[FILTER_OUT].name) ||
453 (!fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name) ||
454 (fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name &&
455 strcmp (fl1->dlist[FILTER_OUT].name, fl2->dlist[FILTER_OUT].name)))
456 return 0;
457
458 if ((fl1->plist[FILTER_OUT].name && !fl2->plist[FILTER_OUT].name) ||
459 (!fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name) ||
460 (fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name &&
461 strcmp (fl1->plist[FILTER_OUT].name, fl2->plist[FILTER_OUT].name)))
462 return 0;
463
464 if ((fl1->aslist[FILTER_OUT].name && !fl2->aslist[FILTER_OUT].name) ||
465 (!fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name) ||
466 (fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name &&
467 strcmp (fl1->aslist[FILTER_OUT].name, fl2->aslist[FILTER_OUT].name)))
468 return 0;
469
470 if ((fl1->usmap.name && !fl2->usmap.name) ||
471 (!fl1->usmap.name && fl2->usmap.name) ||
472 (fl1->usmap.name && fl2->usmap.name &&
473 strcmp (fl1->usmap.name, fl2->usmap.name)))
474 return 0;
475
476 if ((pe1->default_rmap[afi][safi].name &&
477 !pe2->default_rmap[afi][safi].name) ||
478 (!pe1->default_rmap[afi][safi].name &&
479 pe2->default_rmap[afi][safi].name) ||
480 (pe1->default_rmap[afi][safi].name &&
481 pe2->default_rmap[afi][safi].name &&
482 strcmp (pe1->default_rmap[afi][safi].name,
483 pe2->default_rmap[afi][safi].name)))
484 return 0;
485
486 if ((afi == AFI_IP6) && (pe1->shared_network != pe2->shared_network))
487 return 0;
488
489 if ((CHECK_FLAG (pe1->flags, PEER_FLAG_LONESOUL) ||
40d2700d 490 CHECK_FLAG (pe1->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV) ||
2a3d5731 491 CHECK_FLAG (pe1->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_OLD_RCV)) &&
3f9c7369
DS
492 !sockunion_same (&pe1->su, &pe2->su))
493 return 0;
494
495 return 1;
496}
497
498static void
499peer_lonesoul_or_not (struct peer *peer, int set)
500{
501 /* no change in status? */
502 if (set == (CHECK_FLAG (peer->flags, PEER_FLAG_LONESOUL) > 0))
503 return;
504
505 if (set)
506 SET_FLAG (peer->flags, PEER_FLAG_LONESOUL);
507 else
508 UNSET_FLAG (peer->flags, PEER_FLAG_LONESOUL);
509
510 update_group_adjust_peer_afs (peer);
511}
512
513/*
514 * subgroup_total_packets_enqueued
515 *
516 * Returns the total number of packets enqueued to a subgroup.
517 */
518static unsigned int
519subgroup_total_packets_enqueued (struct update_subgroup *subgrp)
520{
521 struct bpacket *pkt;
522
523 pkt = bpacket_queue_last (SUBGRP_PKTQ (subgrp));
524
525 return pkt->ver - 1;
526}
527
528static int
529update_group_show_walkcb (struct update_group *updgrp, void *arg)
530{
8fe8a7f6
DS
531 struct updwalk_context *ctx = arg;
532 struct vty *vty;
3f9c7369
DS
533 struct update_subgroup *subgrp;
534 struct peer_af *paf;
535 struct bgp_filter *filter;
8fe8a7f6
DS
536 int match = 0;
537
538 if (!ctx)
ffd0c037 539 return CMD_SUCCESS;
8fe8a7f6
DS
540
541 if (ctx->subgrp_id)
542 {
543 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp)
544 {
545 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
546 continue;
547 else
548 {
549 match = 1;
550 break;
551 }
552 }
553 }
554 else
555 {
556 match = 1;
557 }
558
559 if (!match)
560 {
561 /* Since this routine is invoked from a walk, we cannot signal any */
562 /* error here, can only return. */
563 return CMD_SUCCESS;
564 }
565
566 vty = ctx->vty;
3f9c7369 567
ffd0c037 568 vty_out (vty, "Update-group %" PRIu64 ":%s", updgrp->id, VTY_NEWLINE);
3f9c7369
DS
569 vty_out (vty, " Created: %s", timestamp_string (updgrp->uptime));
570 filter = &updgrp->conf->filter[updgrp->afi][updgrp->safi];
571 if (filter->map[RMAP_OUT].name)
572 vty_out (vty, " Outgoing route map: %s%s%s",
573 filter->map[RMAP_OUT].map ? "X" : "",
574 filter->map[RMAP_OUT].name, VTY_NEWLINE);
575 vty_out (vty, " MRAI value (seconds): %d%s",
576 updgrp->conf->v_routeadv, VTY_NEWLINE);
577 if (updgrp->conf->change_local_as)
578 vty_out (vty, " Local AS %u%s%s%s",
579 updgrp->conf->change_local_as,
580 CHECK_FLAG (updgrp->conf->flags,
581 PEER_FLAG_LOCAL_AS_NO_PREPEND) ? " no-prepend" : "",
582 CHECK_FLAG (updgrp->conf->flags,
583 PEER_FLAG_LOCAL_AS_REPLACE_AS) ? " replace-as" : "",
584 VTY_NEWLINE);
585
586 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp)
587 {
8fe8a7f6
DS
588 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
589 continue;
3f9c7369 590 vty_out (vty, "%s", VTY_NEWLINE);
ffd0c037 591 vty_out (vty, " Update-subgroup %" PRIu64 ":%s", subgrp->id, VTY_NEWLINE);
3f9c7369
DS
592 vty_out (vty, " Created: %s", timestamp_string (subgrp->uptime));
593
594 if (subgrp->split_from.update_group_id || subgrp->split_from.subgroup_id)
595 {
ffd0c037 596 vty_out (vty, " Split from group id: %" PRIu64 "%s",
3f9c7369 597 subgrp->split_from.update_group_id, VTY_NEWLINE);
ffd0c037 598 vty_out (vty, " Split from subgroup id: %" PRIu64 "%s",
3f9c7369
DS
599 subgrp->split_from.subgroup_id, VTY_NEWLINE);
600 }
601
602 vty_out (vty, " Join events: %u%s", subgrp->join_events, VTY_NEWLINE);
603 vty_out (vty, " Prune events: %u%s",
604 subgrp->prune_events, VTY_NEWLINE);
605 vty_out (vty, " Merge events: %u%s",
606 subgrp->merge_events, VTY_NEWLINE);
607 vty_out (vty, " Split events: %u%s",
608 subgrp->split_events, VTY_NEWLINE);
609 vty_out (vty, " Update group switch events: %u%s",
610 subgrp->updgrp_switch_events, VTY_NEWLINE);
611 vty_out (vty, " Peer refreshes combined: %u%s",
612 subgrp->peer_refreshes_combined, VTY_NEWLINE);
613 vty_out (vty, " Merge checks triggered: %u%s",
614 subgrp->merge_checks_triggered, VTY_NEWLINE);
ffd0c037 615 vty_out (vty, " Version: %" PRIu64 "%s", subgrp->version, VTY_NEWLINE);
3f9c7369
DS
616 vty_out (vty, " Packet queue length: %d%s",
617 bpacket_queue_length (SUBGRP_PKTQ (subgrp)), VTY_NEWLINE);
618 vty_out (vty, " Total packets enqueued: %u%s",
619 subgroup_total_packets_enqueued (subgrp), VTY_NEWLINE);
620 vty_out (vty, " Packet queue high watermark: %d%s",
621 bpacket_queue_hwm_length (SUBGRP_PKTQ (subgrp)), VTY_NEWLINE);
622 vty_out (vty, " Adj-out list count: %u%s",
623 subgrp->adj_count, VTY_NEWLINE);
624 vty_out (vty, " Advertise list: %s%s",
625 advertise_list_is_empty (subgrp) ? "empty" : "not empty",
626 VTY_NEWLINE);
627 vty_out (vty, " Flags: %s%s",
628 CHECK_FLAG (subgrp->flags,
629 SUBGRP_FLAG_NEEDS_REFRESH) ? "R" : "", VTY_NEWLINE);
630 if (subgrp->peer_count > 0)
631 {
632 vty_out (vty, " Peers:%s", VTY_NEWLINE);
633 SUBGRP_FOREACH_PEER (subgrp, paf)
634 vty_out (vty, " - %s%s", paf->peer->host, VTY_NEWLINE);
635 }
636 }
637 return UPDWALK_CONTINUE;
638}
639
640/*
641 * Helper function to show the packet queue for each subgroup of update group.
642 * Will be constrained to a particular subgroup id if id !=0
643 */
644static int
645updgrp_show_packet_queue_walkcb (struct update_group *updgrp, void *arg)
646{
647 struct updwalk_context *ctx = arg;
648 struct update_subgroup *subgrp;
649 struct vty *vty;
650
651 vty = ctx->vty;
652 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp)
653 {
654 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
655 continue;
ffd0c037 656 vty_out (vty, "update group %" PRIu64 ", subgroup %" PRIu64 "%s", updgrp->id,
3f9c7369
DS
657 subgrp->id, VTY_NEWLINE);
658 bpacket_queue_show_vty (SUBGRP_PKTQ (subgrp), vty);
659 }
660 return UPDWALK_CONTINUE;
661}
662
663/*
664 * Show the packet queue for each subgroup of update group. Will be
665 * constrained to a particular subgroup id if id !=0
666 */
667void
668update_group_show_packet_queue (struct bgp *bgp, afi_t afi, safi_t safi,
f43e655e 669 struct vty *vty, uint64_t id)
3f9c7369
DS
670{
671 struct updwalk_context ctx;
672
673 memset (&ctx, 0, sizeof (ctx));
674 ctx.vty = vty;
675 ctx.subgrp_id = id;
676 ctx.flags = 0;
677 update_group_af_walk (bgp, afi, safi, updgrp_show_packet_queue_walkcb,
678 &ctx);
679}
680
681static struct update_group *
682update_group_find (struct peer_af *paf)
683{
684 struct update_group *updgrp;
685 struct update_group tmp;
686 struct peer tmp_conf;
687
688 if (!peer_established (PAF_PEER (paf)))
689 return NULL;
690
691 memset (&tmp, 0, sizeof (tmp));
692 memset (&tmp_conf, 0, sizeof (tmp_conf));
693 tmp.conf = &tmp_conf;
694 peer2_updgrp_copy (&tmp, paf);
695
696 updgrp = hash_lookup (paf->peer->bgp->update_groups[paf->afid], &tmp);
697 conf_release (&tmp_conf, paf->afi, paf->safi);
698 return updgrp;
699}
700
701static struct update_group *
702update_group_create (struct peer_af *paf)
703{
704 struct update_group *updgrp;
705 struct update_group tmp;
706 struct peer tmp_conf;
707
708 memset (&tmp, 0, sizeof (tmp));
709 memset (&tmp_conf, 0, sizeof (tmp_conf));
710 tmp.conf = &tmp_conf;
711 peer2_updgrp_copy (&tmp, paf);
712
713 updgrp = hash_get (paf->peer->bgp->update_groups[paf->afid], &tmp,
714 updgrp_hash_alloc);
715 if (!updgrp)
716 return NULL;
717 update_group_checkin (updgrp);
718
719 if (BGP_DEBUG (update_groups, UPDATE_GROUPS))
ffd0c037 720 zlog_debug ("create update group %" PRIu64, updgrp->id);
3f9c7369
DS
721
722 UPDGRP_GLOBAL_STAT (updgrp, updgrps_created) += 1;
723
495f0b13 724 conf_release(&tmp_conf, paf->afi, paf->safi);
3f9c7369
DS
725 return updgrp;
726}
727
728static void
729update_group_delete (struct update_group *updgrp)
730{
731 if (BGP_DEBUG (update_groups, UPDATE_GROUPS))
ffd0c037 732 zlog_debug ("delete update group %" PRIu64, updgrp->id);
3f9c7369
DS
733
734 UPDGRP_GLOBAL_STAT (updgrp, updgrps_deleted) += 1;
735
736 hash_release (updgrp->bgp->update_groups[updgrp->afid], updgrp);
737 conf_release (updgrp->conf, updgrp->afi, updgrp->safi);
3d68677e 738
6e919709
DS
739 if (updgrp->conf->host)
740 XFREE(MTYPE_BGP_PEER_HOST, updgrp->conf->host);
741 updgrp->conf->host = NULL;
742
743 if (updgrp->conf->ifname)
744 XFREE(MTYPE_BGP_PEER_IFNAME, updgrp->conf->ifname);
745
3f9c7369
DS
746 XFREE (MTYPE_BGP_PEER, updgrp->conf);
747 XFREE (MTYPE_BGP_UPDGRP, updgrp);
748}
749
750static void
751update_group_add_subgroup (struct update_group *updgrp,
752 struct update_subgroup *subgrp)
753{
754 if (!updgrp || !subgrp)
755 return;
756
757 LIST_INSERT_HEAD (&(updgrp->subgrps), subgrp, updgrp_train);
758 subgrp->update_group = updgrp;
759}
760
761static void
762update_group_remove_subgroup (struct update_group *updgrp,
763 struct update_subgroup *subgrp)
764{
765 if (!updgrp || !subgrp)
766 return;
767
768 LIST_REMOVE (subgrp, updgrp_train);
769 subgrp->update_group = NULL;
770 if (LIST_EMPTY (&(updgrp->subgrps)))
771 update_group_delete (updgrp);
772}
773
774static struct update_subgroup *
775update_subgroup_create (struct update_group *updgrp)
776{
777 struct update_subgroup *subgrp;
778
779 subgrp = XCALLOC (MTYPE_BGP_UPD_SUBGRP, sizeof (struct update_subgroup));
780 update_subgroup_checkin (subgrp, updgrp);
781 subgrp->v_coalesce = (UPDGRP_INST (updgrp))->coalesce_time;
782 sync_init (subgrp);
783 bpacket_queue_init (SUBGRP_PKTQ (subgrp));
784 bpacket_queue_add (SUBGRP_PKTQ (subgrp), NULL, NULL);
785 TAILQ_INIT (&(subgrp->adjq));
786 if (BGP_DEBUG (update_groups, UPDATE_GROUPS))
ffd0c037 787 zlog_debug ("create subgroup u%" PRIu64 ":s%" PRIu64,
3f9c7369
DS
788 updgrp->id, subgrp->id);
789
790 update_group_add_subgroup (updgrp, subgrp);
791
792 UPDGRP_INCR_STAT (updgrp, subgrps_created);
793
794 return subgrp;
795}
796
797static void
798update_subgroup_delete (struct update_subgroup *subgrp)
799{
800 if (!subgrp)
801 return;
802
803 if (subgrp->update_group)
804 UPDGRP_INCR_STAT (subgrp->update_group, subgrps_deleted);
805
806 if (subgrp->t_merge_check)
807 THREAD_OFF (subgrp->t_merge_check);
808
809 if (subgrp->t_coalesce)
810 THREAD_TIMER_OFF (subgrp->t_coalesce);
811
812 bpacket_queue_cleanup (SUBGRP_PKTQ (subgrp));
813 subgroup_clear_table (subgrp);
814
815 if (subgrp->t_coalesce)
816 THREAD_TIMER_OFF (subgrp->t_coalesce);
817 sync_delete (subgrp);
818
819 if (BGP_DEBUG (update_groups, UPDATE_GROUPS))
ffd0c037 820 zlog_debug ("delete subgroup u%" PRIu64 ":s%" PRIu64,
3f9c7369
DS
821 subgrp->update_group->id, subgrp->id);
822
823 update_group_remove_subgroup (subgrp->update_group, subgrp);
824
825 XFREE (MTYPE_BGP_UPD_SUBGRP, subgrp);
826}
827
828void
829update_subgroup_inherit_info (struct update_subgroup *to,
830 struct update_subgroup *from)
831{
832 if (!to || !from)
833 return;
834
835 to->sflags = from->sflags;
836}
837
838/*
839 * update_subgroup_check_delete
840 *
841 * Delete a subgroup if it is ready to be deleted.
842 *
843 * Returns TRUE if the subgroup was deleted.
844 */
845static int
846update_subgroup_check_delete (struct update_subgroup *subgrp)
847{
848 if (!subgrp)
849 return 0;
850
851 if (!LIST_EMPTY (&(subgrp->peers)))
852 return 0;
853
854 update_subgroup_delete (subgrp);
855
856 return 1;
857}
858
859/*
860 * update_subgroup_add_peer
861 *
862 * @param send_enqueued_packets If true all currently enqueued packets will
863 * also be sent to the peer.
864 */
865static void
866update_subgroup_add_peer (struct update_subgroup *subgrp, struct peer_af *paf,
867 int send_enqueued_pkts)
868{
869 struct bpacket *pkt;
870
871 if (!subgrp || !paf)
872 return;
873
874 LIST_INSERT_HEAD (&(subgrp->peers), paf, subgrp_train);
875 paf->subgroup = subgrp;
876 subgrp->peer_count++;
877
ffd0c037 878 if (bgp_debug_peer_updout_enabled(paf->peer->host))
3f9c7369
DS
879 {
880 UPDGRP_PEER_DBG_EN(subgrp->update_group);
881 }
882
883 SUBGRP_INCR_STAT (subgrp, join_events);
884
885 if (send_enqueued_pkts)
886 {
887 pkt = bpacket_queue_first (SUBGRP_PKTQ (subgrp));
888 }
889 else
890 {
891
892 /*
893 * Hang the peer off of the last, placeholder, packet in the
894 * queue. This means it won't see any of the packets that are
895 * currently the queue.
896 */
897 pkt = bpacket_queue_last (SUBGRP_PKTQ (subgrp));
898 assert (pkt->buffer == NULL);
899 }
900
901 bpacket_add_peer (pkt, paf);
902
903 bpacket_queue_sanity_check (SUBGRP_PKTQ (subgrp));
904}
905
906/*
907 * update_subgroup_remove_peer_internal
908 *
909 * Internal function that removes a peer from a subgroup, but does not
910 * delete the subgroup. A call to this function must almost always be
911 * followed by a call to update_subgroup_check_delete().
912 *
913 * @see update_subgroup_remove_peer
914 */
915static void
916update_subgroup_remove_peer_internal (struct update_subgroup *subgrp,
917 struct peer_af *paf)
918{
919 assert (subgrp && paf);
920
ffd0c037 921 if (bgp_debug_peer_updout_enabled(paf->peer->host))
3f9c7369
DS
922 {
923 UPDGRP_PEER_DBG_DIS(subgrp->update_group);
924 }
925
926 bpacket_queue_remove_peer (paf);
927 LIST_REMOVE (paf, subgrp_train);
928 paf->subgroup = NULL;
929 subgrp->peer_count--;
930
931 SUBGRP_INCR_STAT (subgrp, prune_events);
932}
933
934/*
935 * update_subgroup_remove_peer
936 */
937void
938update_subgroup_remove_peer (struct update_subgroup *subgrp,
939 struct peer_af *paf)
940{
941 if (!subgrp || !paf)
942 return;
943
944 update_subgroup_remove_peer_internal (subgrp, paf);
945
946 if (update_subgroup_check_delete (subgrp))
947 return;
948
949 /*
950 * The deletion of the peer may have caused some packets to be
951 * deleted from the subgroup packet queue. Check if the subgroup can
952 * be merged now.
953 */
954 update_subgroup_check_merge (subgrp, "removed peer from subgroup");
955}
956
957static struct update_subgroup *
958update_subgroup_find (struct update_group *updgrp, struct peer_af *paf)
959{
960 struct update_subgroup *subgrp = NULL;
961 uint64_t version;
962
963 if (paf->subgroup)
964 {
965 assert (0);
966 return NULL;
967 }
968 else
969 version = 0;
970
971 if (!peer_established (PAF_PEER (paf)))
972 return NULL;
973
974 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp)
975 {
f910ef58 976 if (subgrp->version != version ||
977 CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
3f9c7369
DS
978 continue;
979
980 /*
981 * The version number is not meaningful on a subgroup that needs
982 * a refresh.
983 */
984 if (update_subgroup_needs_refresh (subgrp))
985 continue;
986
987 break;
988 }
989
990 return subgrp;
991}
992
993/*
994 * update_subgroup_ready_for_merge
995 *
996 * Returns TRUE if this subgroup is in a state that allows it to be
997 * merged into another subgroup.
998 */
7717b183 999static int
3f9c7369
DS
1000update_subgroup_ready_for_merge (struct update_subgroup *subgrp)
1001{
1002
1003 /*
1004 * Not ready if there are any encoded packets waiting to be written
1005 * out to peers.
1006 */
1007 if (!bpacket_queue_is_empty (SUBGRP_PKTQ (subgrp)))
1008 return 0;
1009
1010 /*
1011 * Not ready if there enqueued updates waiting to be encoded.
1012 */
1013 if (!advertise_list_is_empty (subgrp))
1014 return 0;
1015
1016 /*
1017 * Don't attempt to merge a subgroup that needs a refresh. For one,
1018 * we can't determine if the adj_out of such a group matches that of
1019 * another group.
1020 */
1021 if (update_subgroup_needs_refresh (subgrp))
1022 return 0;
1023
1024 return 1;
1025}
1026
1027/*
1028 * update_subgrp_can_merge_into
1029 *
1030 * Returns TRUE if the first subgroup can merge into the second
1031 * subgroup.
1032 */
7717b183 1033static int
3f9c7369
DS
1034update_subgroup_can_merge_into (struct update_subgroup *subgrp,
1035 struct update_subgroup *target)
1036{
1037
1038 if (subgrp == target)
1039 return 0;
1040
1041 /*
1042 * Both must have processed the BRIB to the same point in order to
1043 * be merged.
1044 */
1045 if (subgrp->version != target->version)
1046 return 0;
1047
f910ef58 1048 if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE) !=
1049 CHECK_FLAG(target->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
1050 return 0;
1051
7717b183 1052 if (subgrp->adj_count != target->adj_count)
3f9c7369
DS
1053 return 0;
1054
7717b183 1055 return update_subgroup_ready_for_merge (target);
3f9c7369
DS
1056}
1057
1058/*
1059 * update_subgroup_merge
1060 *
1061 * Merge the first subgroup into the second one.
1062 */
1063static void
1064update_subgroup_merge (struct update_subgroup *subgrp,
1065 struct update_subgroup *target, const char *reason)
1066{
1067 struct peer_af *paf;
1068 int result;
1069 int peer_count;
1070
1071 assert (subgrp->adj_count == target->adj_count);
1072
1073 peer_count = subgrp->peer_count;
1074
1075 while (1)
1076 {
1077 paf = LIST_FIRST (&subgrp->peers);
1078 if (!paf)
1079 break;
1080
1081 update_subgroup_remove_peer_internal (subgrp, paf);
1082
1083 /*
1084 * Add the peer to the target subgroup, while making sure that
1085 * any currently enqueued packets won't be sent to it. Enqueued
1086 * packets could, for example, result in an unnecessary withdraw
1087 * followed by an advertise.
1088 */
1089 update_subgroup_add_peer (target, paf, 0);
1090 }
1091
1092 SUBGRP_INCR_STAT (target, merge_events);
1093
1094 if (BGP_DEBUG (update_groups, UPDATE_GROUPS))
ffd0c037 1095 zlog_debug ("u%" PRIu64 ":s%" PRIu64 " (%d peers) merged into u%" PRIu64 ":s%" PRIu64 ", "
3f9c7369
DS
1096 "trigger: %s", subgrp->update_group->id, subgrp->id, peer_count,
1097 target->update_group->id, target->id, reason ? reason : "unknown");
1098
1099 result = update_subgroup_check_delete (subgrp);
1100 assert (result);
1101}
1102
1103/*
1104 * update_subgroup_check_merge
1105 *
1106 * Merge this subgroup into another subgroup if possible.
1107 *
1108 * Returns TRUE if the subgroup has been merged. The subgroup pointer
1109 * should not be accessed in this case.
1110 */
1111int
1112update_subgroup_check_merge (struct update_subgroup *subgrp,
1113 const char *reason)
1114{
1115 struct update_subgroup *target;
1116
1117 if (!update_subgroup_ready_for_merge (subgrp))
1118 return 0;
1119
1120 /*
1121 * Look for a subgroup to merge into.
1122 */
1123 UPDGRP_FOREACH_SUBGRP (subgrp->update_group, target)
1124 {
1125 if (update_subgroup_can_merge_into (subgrp, target))
1126 break;
1127 }
1128
1129 if (!target)
1130 return 0;
1131
1132 update_subgroup_merge (subgrp, target, reason);
1133 return 1;
1134}
1135
1136 /*
1137 * update_subgroup_merge_check_thread_cb
1138 */
1139static int
1140update_subgroup_merge_check_thread_cb (struct thread *thread)
1141{
1142 struct update_subgroup *subgrp;
1143
1144 subgrp = THREAD_ARG (thread);
1145
1146 subgrp->t_merge_check = NULL;
1147
1148 update_subgroup_check_merge (subgrp, "triggered merge check");
1149 return 0;
1150}
1151
1152/*
1153 * update_subgroup_trigger_merge_check
1154 *
1155 * Triggers a call to update_subgroup_check_merge() on a clean context.
1156 *
1157 * @param force If true, the merge check will be triggered even if the
1158 * subgroup doesn't currently look ready for a merge.
1159 *
1160 * Returns TRUE if a merge check will be performed shortly.
1161 */
1162int
1163update_subgroup_trigger_merge_check (struct update_subgroup *subgrp,
1164 int force)
1165{
1166 if (subgrp->t_merge_check)
1167 return 1;
1168
1169 if (!force && !update_subgroup_ready_for_merge (subgrp))
1170 return 0;
1171
66e78ae6
QY
1172 subgrp->t_merge_check = NULL;
1173 thread_add_background(bm->master, update_subgroup_merge_check_thread_cb, subgrp, 0,
1174 &subgrp->t_merge_check);
3f9c7369
DS
1175
1176 SUBGRP_INCR_STAT (subgrp, merge_checks_triggered);
1177
1178 return 1;
1179}
1180
1181/*
1182 * update_subgroup_copy_adj_out
1183 *
1184 * Helper function that clones the adj out (state about advertised
1185 * routes) from one subgroup to another. It assumes that the adj out
1186 * of the target subgroup is empty.
1187 */
1188static void
1189update_subgroup_copy_adj_out (struct update_subgroup *source,
1190 struct update_subgroup *dest)
1191{
1192 struct bgp_adj_out *aout, *aout_copy;
1193
1194 SUBGRP_FOREACH_ADJ (source, aout)
1195 {
1196 /*
1197 * Copy the adj out.
1198 */
adbac85e 1199 aout_copy = bgp_adj_out_alloc (dest, aout->rn, aout->addpath_tx_id);
3f9c7369
DS
1200 aout_copy->attr = aout->attr ? bgp_attr_refcount (aout->attr) : NULL;
1201 }
1202}
1203
1204/*
1205 * update_subgroup_copy_packets
1206 *
1207 * Copy packets after and including the given packet to the subgroup
1208 * 'dest'.
1209 *
1210 * Returns the number of packets copied.
1211 */
1212static int
1213update_subgroup_copy_packets (struct update_subgroup *dest,
1214 struct bpacket *pkt)
1215{
1216 int count;
1217
1218 count = 0;
1219 while (pkt && pkt->buffer)
1220 {
1221 bpacket_queue_add (SUBGRP_PKTQ (dest), stream_dup (pkt->buffer),
1222 &pkt->arr);
1223 count++;
1224 pkt = bpacket_next (pkt);
1225 }
1226
1227 bpacket_queue_sanity_check (SUBGRP_PKTQ (dest));
1228
1229 return count;
1230}
1231
1232static int
ffd0c037 1233updgrp_prefix_list_update (struct update_group *updgrp, const char *name)
3f9c7369
DS
1234{
1235 struct peer *peer;
1236 struct bgp_filter *filter;
1237
1238 peer = UPDGRP_PEER (updgrp);
1239 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1240
1241 if (PREFIX_LIST_OUT_NAME(filter) &&
1242 (strcmp (name, PREFIX_LIST_OUT_NAME(filter)) == 0))
1243 {
1244 PREFIX_LIST_OUT(filter) =
1245 prefix_list_lookup (UPDGRP_AFI(updgrp), PREFIX_LIST_OUT_NAME(filter));
1246 return 1;
1247 }
1248 return 0;
1249}
1250
1251static int
ffd0c037 1252updgrp_filter_list_update (struct update_group *updgrp, const char *name)
3f9c7369
DS
1253{
1254 struct peer *peer;
1255 struct bgp_filter *filter;
1256
1257 peer = UPDGRP_PEER (updgrp);
1258 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1259
1260 if (FILTER_LIST_OUT_NAME(filter) &&
1261 (strcmp (name, FILTER_LIST_OUT_NAME(filter)) == 0))
1262 {
1263 FILTER_LIST_OUT(filter) = as_list_lookup (FILTER_LIST_OUT_NAME(filter));
1264 return 1;
1265 }
1266 return 0;
1267}
1268
1269static int
ffd0c037 1270updgrp_distribute_list_update (struct update_group *updgrp, const char *name)
3f9c7369
DS
1271{
1272 struct peer *peer;
1273 struct bgp_filter *filter;
1274
1275 peer = UPDGRP_PEER(updgrp);
1276 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1277
1278 if (DISTRIBUTE_OUT_NAME(filter) &&
1279 (strcmp (name, DISTRIBUTE_OUT_NAME(filter)) == 0))
1280 {
1281 DISTRIBUTE_OUT(filter) = access_list_lookup(UPDGRP_AFI(updgrp),
1282 DISTRIBUTE_OUT_NAME(filter));
1283 return 1;
1284 }
1285 return 0;
1286}
1287
1288static int
ffd0c037 1289updgrp_route_map_update (struct update_group *updgrp, const char *name,
3f9c7369
DS
1290 int *def_rmap_changed)
1291{
1292 struct peer *peer;
1293 struct bgp_filter *filter;
1294 int changed = 0;
1295 afi_t afi;
1296 safi_t safi;
1297
1298 peer = UPDGRP_PEER (updgrp);
1299 afi = UPDGRP_AFI (updgrp);
1300 safi = UPDGRP_SAFI (updgrp);
1301 filter = &peer->filter[afi][safi];
1302
1303 if (ROUTE_MAP_OUT_NAME(filter) &&
1304 (strcmp (name, ROUTE_MAP_OUT_NAME(filter)) == 0))
1305 {
1306 ROUTE_MAP_OUT(filter) = route_map_lookup_by_name (name);
1307
1308 changed = 1;
1309 }
1310
1311 if (UNSUPPRESS_MAP_NAME(filter) &&
1312 (strcmp (name, UNSUPPRESS_MAP_NAME(filter)) == 0))
1313 {
1314 UNSUPPRESS_MAP(filter) = route_map_lookup_by_name (name);
1315 changed = 1;
1316 }
1317
1318 /* process default-originate route-map */
1319 if (peer->default_rmap[afi][safi].name &&
1320 (strcmp (name, peer->default_rmap[afi][safi].name) == 0))
1321 {
1322 peer->default_rmap[afi][safi].map = route_map_lookup_by_name (name);
1323 if (def_rmap_changed)
1324 *def_rmap_changed = 1;
1325 }
1326 return changed;
1327}
1328
1329/*
1330 * hash iteration callback function to process a policy change for an
1331 * update group. Check if the changed policy matches the updgrp's
1332 * outbound route-map or unsuppress-map or default-originate map or
1333 * filter-list or prefix-list or distribute-list.
1334 * Trigger update generation accordingly.
1335 */
1336static int
1337updgrp_policy_update_walkcb (struct update_group *updgrp, void *arg)
1338{
1339 struct updwalk_context *ctx = arg;
1340 struct update_subgroup *subgrp;
1341 int changed = 0;
1342 int def_changed = 0;
1343
1344 if (!updgrp || !ctx || !ctx->policy_name)
1345 return UPDWALK_CONTINUE;
1346
1347 switch (ctx->policy_type) {
1348 case BGP_POLICY_ROUTE_MAP:
1349 changed = updgrp_route_map_update(updgrp, ctx->policy_name, &def_changed);
1350 break;
1351 case BGP_POLICY_FILTER_LIST:
1352 changed = updgrp_filter_list_update(updgrp, ctx->policy_name);
1353 break;
1354 case BGP_POLICY_PREFIX_LIST:
1355 changed = updgrp_prefix_list_update(updgrp, ctx->policy_name);
1356 break;
1357 case BGP_POLICY_DISTRIBUTE_LIST:
1358 changed = updgrp_distribute_list_update(updgrp, ctx->policy_name);
1359 break;
1360 default:
1361 break;
1362 }
1363
1364 /* If not doing route update, return after updating "config" */
1365 if (!ctx->policy_route_update)
1366 return UPDWALK_CONTINUE;
1367
1368 /* If nothing has changed, return after updating "config" */
1369 if (!changed && !def_changed)
1370 return UPDWALK_CONTINUE;
1371
1372 /*
1373 * If something has changed, at the beginning of a route-map modification
1374 * event, mark each subgroup's needs-refresh bit. For one, it signals to
1375 * whoever that the subgroup needs a refresh. Second, it prevents premature
1376 * merge of this subgroup with another before a complete (outbound) refresh.
1377 */
1378 if (ctx->policy_event_start_flag)
1379 {
1380 UPDGRP_FOREACH_SUBGRP(updgrp, subgrp)
1381 {
1382 update_subgroup_set_needs_refresh(subgrp, 1);
1383 }
1384 return UPDWALK_CONTINUE;
1385 }
1386
1387 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp)
1388 {
1389 if (changed)
1390 {
1391 if (bgp_debug_update(NULL, NULL, updgrp, 0))
ffd0c037 1392 zlog_debug ("u%" PRIu64 ":s%" PRIu64 " announcing routes upon policy %s (type %d) change",
3f9c7369
DS
1393 updgrp->id, subgrp->id, ctx->policy_name, ctx->policy_type);
1394 subgroup_announce_route (subgrp);
1395 }
1396 if (def_changed)
1397 {
1398 if (bgp_debug_update(NULL, NULL, updgrp, 0))
ffd0c037 1399 zlog_debug ("u%" PRIu64 ":s%" PRIu64 " announcing default upon default routemap %s change",
3f9c7369
DS
1400 updgrp->id, subgrp->id, ctx->policy_name);
1401 subgroup_default_originate (subgrp, 0);
1402 }
1403 update_subgroup_set_needs_refresh(subgrp, 0);
1404 }
1405 return UPDWALK_CONTINUE;
1406}
1407
1408static int
1409update_group_walkcb (struct hash_backet *backet, void *arg)
1410{
1411 struct update_group *updgrp = backet->data;
1412 struct updwalk_context *wctx = arg;
1413 int ret = (*wctx->cb) (updgrp, wctx->context);
1414 return ret;
1415}
1416
1417static int
1418update_group_periodic_merge_walkcb (struct update_group *updgrp, void *arg)
1419{
1420 struct update_subgroup *subgrp;
1421 struct update_subgroup *tmp_subgrp;
1422 const char *reason = arg;
1423
1424 UPDGRP_FOREACH_SUBGRP_SAFE (updgrp, subgrp, tmp_subgrp)
1425 update_subgroup_check_merge (subgrp, reason);
1426 return UPDWALK_CONTINUE;
1427}
1428
1429/********************
1430 * PUBLIC FUNCTIONS
1431 ********************/
1432
1433/*
1434 * trigger function when a policy (route-map/filter-list/prefix-list/
1435 * distribute-list etc.) content changes. Go through all the
1436 * update groups and process the change.
1437 *
1438 * bgp: the bgp instance
1439 * ptype: the type of policy that got modified, see bgpd.h
1440 * pname: name of the policy
1441 * route_update: flag to control if an automatic update generation should
1442 * occur
1443 * start_event: flag that indicates if it's the beginning of the change.
1444 * Esp. when the user is changing the content interactively
1445 * over multiple statements. Useful to set dirty flag on
1446 * update groups.
1447 */
1448void
1449update_group_policy_update (struct bgp *bgp, bgp_policy_type_e ptype,
ffd0c037 1450 const char *pname, int route_update, int start_event)
3f9c7369
DS
1451{
1452 struct updwalk_context ctx;
1453
1454 memset (&ctx, 0, sizeof (ctx));
1455 ctx.policy_type = ptype;
1456 ctx.policy_name = pname;
1457 ctx.policy_route_update = route_update;
1458 ctx.policy_event_start_flag = start_event;
1459 ctx.flags = 0;
1460
1461 update_group_walk (bgp, updgrp_policy_update_walkcb, &ctx);
1462}
1463
1464/*
1465 * update_subgroup_split_peer
1466 *
1467 * Ensure that the given peer is in a subgroup of its own in the
1468 * specified update group.
1469 */
1470void
1471update_subgroup_split_peer (struct peer_af *paf, struct update_group *updgrp)
1472{
1473 struct update_subgroup *old_subgrp, *subgrp;
1474 uint64_t old_id;
1475
1476
1477 old_subgrp = paf->subgroup;
1478
1479 if (!updgrp)
1480 updgrp = old_subgrp->update_group;
1481
1482 /*
1483 * If the peer is alone in its subgroup, reuse the existing
1484 * subgroup.
1485 */
1486 if (old_subgrp->peer_count == 1)
1487 {
1488 if (updgrp == old_subgrp->update_group)
1489 return;
1490
1491 subgrp = old_subgrp;
1492 old_id = old_subgrp->update_group->id;
1493
ffd0c037 1494 if (bgp_debug_peer_updout_enabled(paf->peer->host))
3f9c7369
DS
1495 {
1496 UPDGRP_PEER_DBG_DIS(old_subgrp->update_group);
1497 }
1498
1499 update_group_remove_subgroup (old_subgrp->update_group, old_subgrp);
1500 update_group_add_subgroup (updgrp, subgrp);
1501
ffd0c037 1502 if (bgp_debug_peer_updout_enabled(paf->peer->host))
3f9c7369
DS
1503 {
1504 UPDGRP_PEER_DBG_EN(updgrp);
1505 }
1506 if (BGP_DEBUG (update_groups, UPDATE_GROUPS))
ffd0c037 1507 zlog_debug ("u%" PRIu64 ":s%" PRIu64 " peer %s moved to u%" PRIu64 ":s%" PRIu64,
3f9c7369
DS
1508 old_id, subgrp->id, paf->peer->host, updgrp->id, subgrp->id);
1509
1510 /*
1511 * The state of the subgroup (adj_out, advs, packet queue etc)
1512 * is consistent internally, but may not be identical to other
1513 * subgroups in the new update group even if the version number
1514 * matches up. Make sure a full refresh is done before the
1515 * subgroup is merged with another.
1516 */
1517 update_subgroup_set_needs_refresh (subgrp, 1);
1518
1519 SUBGRP_INCR_STAT (subgrp, updgrp_switch_events);
1520 return;
1521 }
1522
1523 /*
1524 * Create a new subgroup under the specified update group, and copy
1525 * over relevant state to it.
1526 */
1527 subgrp = update_subgroup_create (updgrp);
1528 update_subgroup_inherit_info (subgrp, old_subgrp);
1529
1530 subgrp->split_from.update_group_id = old_subgrp->update_group->id;
1531 subgrp->split_from.subgroup_id = old_subgrp->id;
1532
1533 /*
1534 * Copy out relevant state from the old subgroup.
1535 */
1536 update_subgroup_copy_adj_out (paf->subgroup, subgrp);
1537 update_subgroup_copy_packets (subgrp, paf->next_pkt_to_send);
1538
1539 if (BGP_DEBUG (update_groups, UPDATE_GROUPS))
ffd0c037 1540 zlog_debug ("u%" PRIu64 ":s%" PRIu64 " peer %s split and moved into u%" PRIu64 ":s%" PRIu64,
3f9c7369
DS
1541 paf->subgroup->update_group->id, paf->subgroup->id,
1542 paf->peer->host, updgrp->id, subgrp->id);
1543
1544 SUBGRP_INCR_STAT (paf->subgroup, split_events);
1545
1546 /*
1547 * Since queued advs were left behind, this new subgroup needs a
1548 * refresh.
1549 */
1550 update_subgroup_set_needs_refresh (subgrp, 1);
1551
1552 /*
1553 * Remove peer from old subgroup, and add it to the new one.
1554 */
1555 update_subgroup_remove_peer (paf->subgroup, paf);
1556
1557 update_subgroup_add_peer (subgrp, paf, 1);
1558}
1559
1560void
3d68677e 1561update_bgp_group_init (struct bgp *bgp)
3f9c7369
DS
1562{
1563 int afid;
1564
1565 AF_FOREACH (afid)
1566 bgp->update_groups[afid] = hash_create (updgrp_hash_key_make,
1567 updgrp_hash_cmp);
1568}
1569
3d68677e
DS
1570void
1571update_bgp_group_free (struct bgp *bgp)
1572{
1573 int afid;
1574
1575 AF_FOREACH (afid)
1576 {
3ffe142a
DW
1577 if (bgp->update_groups[afid])
1578 {
1579 hash_free(bgp->update_groups[afid]);
1580 bgp->update_groups[afid] = NULL;
1581 }
3d68677e
DS
1582 }
1583}
1584
3f9c7369 1585void
8fe8a7f6 1586update_group_show (struct bgp *bgp, afi_t afi, safi_t safi, struct vty *vty,
f43e655e 1587 uint64_t subgrp_id)
3f9c7369 1588{
8fe8a7f6
DS
1589 struct updwalk_context ctx;
1590 memset (&ctx, 0, sizeof (ctx));
1591 ctx.vty = vty;
1592 ctx.subgrp_id = subgrp_id;
1593
1594 update_group_af_walk (bgp, afi, safi, update_group_show_walkcb, &ctx);
3f9c7369
DS
1595}
1596
1597/*
1598 * update_group_show_stats
1599 *
1600 * Show global statistics about update groups.
1601 */
1602void
1603update_group_show_stats (struct bgp *bgp, struct vty *vty)
1604{
1605 vty_out (vty, "Update groups created: %u%s",
1606 bgp->update_group_stats.updgrps_created, VTY_NEWLINE);
1607 vty_out (vty, "Update groups deleted: %u%s",
1608 bgp->update_group_stats.updgrps_deleted, VTY_NEWLINE);
1609 vty_out (vty, "Update subgroups created: %u%s",
1610 bgp->update_group_stats.subgrps_created, VTY_NEWLINE);
1611 vty_out (vty, "Update subgroups deleted: %u%s",
1612 bgp->update_group_stats.subgrps_deleted, VTY_NEWLINE);
1613 vty_out (vty, "Join events: %u%s",
1614 bgp->update_group_stats.join_events, VTY_NEWLINE);
1615 vty_out (vty, "Prune events: %u%s",
1616 bgp->update_group_stats.prune_events, VTY_NEWLINE);
1617 vty_out (vty, "Merge events: %u%s",
1618 bgp->update_group_stats.merge_events, VTY_NEWLINE);
1619 vty_out (vty, "Split events: %u%s",
1620 bgp->update_group_stats.split_events, VTY_NEWLINE);
1621 vty_out (vty, "Update group switch events: %u%s",
1622 bgp->update_group_stats.updgrp_switch_events, VTY_NEWLINE);
1623 vty_out (vty, "Peer route refreshes combined: %u%s",
1624 bgp->update_group_stats.peer_refreshes_combined, VTY_NEWLINE);
1625 vty_out (vty, "Merge checks triggered: %u%s",
1626 bgp->update_group_stats.merge_checks_triggered, VTY_NEWLINE);
1627}
1628
1629/*
1630 * update_group_adjust_peer
1631 */
1632void
1633update_group_adjust_peer (struct peer_af *paf)
1634{
1635 struct update_group *updgrp;
1636 struct update_subgroup *subgrp, *old_subgrp;
1637 struct peer *peer;
1638
1639 if (!paf)
1640 return;
1641
1642 peer = PAF_PEER (paf);
1643 if (!peer_established (peer))
1644 {
1645 return;
1646 }
1647
1648 if (!CHECK_FLAG (peer->flags, PEER_FLAG_CONFIG_NODE))
1649 {
1650 return;
1651 }
1652
1653 if (!peer->afc_nego[paf->afi][paf->safi])
1654 {
1655 return;
1656 }
1657
1658 updgrp = update_group_find (paf);
1659 if (!updgrp)
1660 {
1661 updgrp = update_group_create (paf);
1662 if (!updgrp)
1663 {
1664 zlog_err ("couldn't create update group for peer %s",
1665 paf->peer->host);
1666 return;
1667 }
1668 }
1669
1670 old_subgrp = paf->subgroup;
1671
1672 if (old_subgrp)
1673 {
1674
1675 /*
1676 * If the update group of the peer is unchanged, the peer can stay
1677 * in its existing subgroup and we're done.
1678 */
1679 if (old_subgrp->update_group == updgrp)
1680 return;
1681
1682 /*
1683 * The peer is switching between update groups. Put it in its
1684 * own subgroup under the new update group.
1685 */
1686 update_subgroup_split_peer (paf, updgrp);
1687 return;
1688 }
1689
1690 subgrp = update_subgroup_find (updgrp, paf);
1691 if (!subgrp)
1692 {
1693 subgrp = update_subgroup_create (updgrp);
1694 if (!subgrp)
1695 return;
1696 }
1697
1698 update_subgroup_add_peer (subgrp, paf, 1);
1699 if (BGP_DEBUG (update_groups, UPDATE_GROUPS))
ffd0c037 1700 zlog_debug ("u%" PRIu64 ":s%" PRIu64 " add peer %s",
3f9c7369
DS
1701 updgrp->id, subgrp->id, paf->peer->host);
1702
1703 return;
1704}
1705
1706int
1707update_group_adjust_soloness (struct peer *peer, int set)
1708{
1709 struct peer_group *group;
1710 struct listnode *node, *nnode;
1711
3f9c7369
DS
1712 if (!CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
1713 {
1714 peer_lonesoul_or_not (peer, set);
1715 if (peer->status == Established)
1716 bgp_announce_route_all (peer);
1717 }
1718 else
1719 {
1720 group = peer->group;
1721 for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
1722 {
1723 peer_lonesoul_or_not (peer, set);
1724 if (peer->status == Established)
1725 bgp_announce_route_all (peer);
1726 }
1727 }
1728 return 0;
1729}
1730
1731/*
1732 * update_subgroup_rib
1733 */
1734struct bgp_table *
1735update_subgroup_rib (struct update_subgroup *subgrp)
1736{
1737 struct bgp *bgp;
1738
1739 bgp = SUBGRP_INST (subgrp);
1740 if (!bgp)
1741 return NULL;
1742
1743 return bgp->rib[SUBGRP_AFI (subgrp)][SUBGRP_SAFI (subgrp)];
1744}
1745
1746void
1747update_group_af_walk (struct bgp *bgp, afi_t afi, safi_t safi,
1748 updgrp_walkcb cb, void *ctx)
1749{
1750 struct updwalk_context wctx;
1751 int afid;
1752
1753 if (!bgp)
1754 return;
1755 afid = afindex (afi, safi);
1756 if (afid >= BGP_AF_MAX)
1757 return;
1758
1759 memset (&wctx, 0, sizeof (wctx));
1760 wctx.cb = cb;
1761 wctx.context = ctx;
0de4848d
DS
1762
1763 if (bgp->update_groups[afid])
1764 hash_walk (bgp->update_groups[afid], update_group_walkcb, &wctx);
3f9c7369
DS
1765}
1766
1767void
1768update_group_walk (struct bgp *bgp, updgrp_walkcb cb, void *ctx)
1769{
1770 afi_t afi;
1771 safi_t safi;
1772
1773 FOREACH_AFI_SAFI (afi, safi)
1774 {
1775 update_group_af_walk (bgp, afi, safi, cb, ctx);
1776 }
1777}
1778
1779void
1780update_group_periodic_merge (struct bgp *bgp)
1781{
1782 char reason[] = "periodic merge check";
1783
1784 update_group_walk (bgp, update_group_periodic_merge_walkcb,
1785 (void *) reason);
1786}
1787
0de4848d
DS
1788static int
1789update_group_default_originate_route_map_walkcb(struct update_group *updgrp,
1790 void *arg)
1791{
1792 struct update_subgroup *subgrp;
1793 struct peer *peer;
0de4848d
DS
1794 afi_t afi;
1795 safi_t safi;
1796
1797 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp)
1798 {
1799 peer = SUBGRP_PEER (subgrp);
1800 afi = SUBGRP_AFI (subgrp);
1801 safi = SUBGRP_SAFI (subgrp);
1802
1803 if (peer->default_rmap[afi][safi].name)
1804 {
1805 subgroup_default_originate (subgrp, 0);
1806 }
1807 }
1808
1809 return UPDWALK_CONTINUE;
1810}
1811
ffd0c037 1812int
0de4848d
DS
1813update_group_refresh_default_originate_route_map (struct thread *thread)
1814{
1815 struct bgp *bgp;
1816 char reason[] = "refresh default-originate route-map";
1817
1818 bgp = THREAD_ARG(thread);
1819 update_group_walk (bgp, update_group_default_originate_route_map_walkcb,
1820 reason);
1821 THREAD_TIMER_OFF (bgp->t_rmap_def_originate_eval);
1822 bgp_unlock(bgp);
ffd0c037
DS
1823
1824 return(0);
0de4848d
DS
1825}
1826
3f9c7369
DS
1827/*
1828 * peer_af_announce_route
1829 *
1830 * Refreshes routes out to a peer_af immediately.
1831 *
1832 * If the combine parameter is TRUE, then this function will try to
1833 * gather other peers in the subgroup for which a route announcement
1834 * is pending and efficently announce routes to all of them.
1835 *
1836 * For now, the 'combine' option has an effect only if all peers in
1837 * the subgroup have a route announcement pending.
1838 */
1839void
1840peer_af_announce_route (struct peer_af *paf, int combine)
1841{
1842 struct update_subgroup *subgrp;
1843 struct peer_af *cur_paf;
1844 int all_pending;
1845
1846 subgrp = paf->subgroup;
1847 all_pending = 0;
1848
1849 if (combine)
1850 {
3f9c7369
DS
1851 /*
1852 * If there are other peers in the old subgroup that also need
1853 * routes to be announced, pull them into the peer's new
1854 * subgroup.
1855 * Combine route announcement with other peers if possible.
1856 *
1857 * For now, we combine only if all peers in the subgroup have an
1858 * announcement pending.
1859 */
1860 all_pending = 1;
1861
1862 SUBGRP_FOREACH_PEER (subgrp, cur_paf)
1863 {
1864 if (cur_paf == paf)
1865 continue;
1866
1867 if (cur_paf->t_announce_route)
1868 continue;
1869
1870 all_pending = 0;
1871 break;
1872 }
1873 }
1874 /*
1875 * Announce to the peer alone if we were not asked to combine peers,
1876 * or if some peers don't have a route annoucement pending.
1877 */
1878 if (!combine || !all_pending)
1879 {
1880 update_subgroup_split_peer (paf, NULL);
1881 if (!paf->subgroup)
1882 return;
1883
1884 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
ffd0c037 1885 zlog_debug ("u%" PRIu64 ":s%" PRIu64 " %s announcing routes",
3f9c7369
DS
1886 subgrp->update_group->id, subgrp->id, paf->peer->host);
1887
1888 subgroup_announce_route (paf->subgroup);
1889 return;
1890 }
1891
1892 /*
1893 * We will announce routes the entire subgroup.
1894 *
1895 * First stop refresh timers on all the other peers.
1896 */
1897 SUBGRP_FOREACH_PEER (subgrp, cur_paf)
1898 {
1899 if (cur_paf == paf)
1900 continue;
1901
1902 bgp_stop_announce_route_timer (cur_paf);
1903 }
1904
1905 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
ffd0c037 1906 zlog_debug ("u%" PRIu64 ":s%" PRIu64 " announcing routes to %s, combined into %d peers",
3f9c7369
DS
1907 subgrp->update_group->id, subgrp->id,
1908 paf->peer->host, subgrp->peer_count);
1909
1910 subgroup_announce_route (subgrp);
1911
1912 SUBGRP_INCR_STAT_BY (subgrp, peer_refreshes_combined,
1913 subgrp->peer_count - 1);
1914}
1915
1916void
1917subgroup_trigger_write (struct update_subgroup *subgrp)
1918{
1919 struct peer_af *paf;
1920
1921#if 0
1922 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
1923 zlog_debug("u%llu:s%llu scheduling write thread for peers",
1924 subgrp->update_group->id, subgrp->id);
1925#endif
1926 SUBGRP_FOREACH_PEER (subgrp, paf)
1927 {
1928 if (paf->peer->status == Established)
1929 {
1930 BGP_PEER_WRITE_ON (paf->peer->t_write, bgp_write, paf->peer->fd,
1931 paf->peer);
1932 }
1933 }
1934}
1935
1936int
1937update_group_clear_update_dbg (struct update_group *updgrp, void *arg)
1938{
1939 UPDGRP_PEER_DBG_OFF(updgrp);
1940 return UPDWALK_CONTINUE;
1941}
adbac85e 1942
06370dac 1943/* Return true if we should addpath encode NLRI to this peer */
adbac85e
DW
1944int
1945bgp_addpath_encode_tx (struct peer *peer, afi_t afi, safi_t safi)
1946{
1947 return (CHECK_FLAG (peer->af_cap[afi][safi], PEER_CAP_ADDPATH_AF_TX_ADV) &&
1948 CHECK_FLAG (peer->af_cap[afi][safi], PEER_CAP_ADDPATH_AF_RX_RCV));
1949}
06370dac
DW
1950
1951/*
1952 * Return true if this is a path we should advertise due to a
1953 * configured addpath-tx knob
1954 */
1955int
1956bgp_addpath_tx_path (struct peer *peer, afi_t afi, safi_t safi,
1957 struct bgp_info *ri)
1958{
1959 if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_ADDPATH_TX_ALL_PATHS))
1960 return 1;
1961
1962 if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS) &&
1963 CHECK_FLAG (ri->flags, BGP_INFO_DMED_SELECTED))
1964 return 1;
1965
1966 return 0;
1967}