]> git.proxmox.com Git - mirror_frr.git/blame - bgpd/bgp_updgrp.c
bgpd: cleanup vty bgp_node_afi/safi utils
[mirror_frr.git] / bgpd / bgp_updgrp.c
CommitLineData
3f9c7369
DS
1/**
2 * bgp_updgrp.c: BGP update group structures
3 *
4 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
5 *
6 * @author Avneesh Sachdev <avneesh@sproute.net>
7 * @author Rajesh Varadarajan <rajesh@sproute.net>
8 * @author Pradosh Mohapatra <pradosh@sproute.net>
9 *
10 * This file is part of GNU Zebra.
11 *
12 * GNU Zebra is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2, or (at your option) any
15 * later version.
16 *
17 * GNU Zebra is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with GNU Zebra; see the file COPYING. If not, write to the Free
24 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
25 * 02111-1307, USA.
26 */
27
28#include <zebra.h>
29
30#include "prefix.h"
31#include "thread.h"
32#include "buffer.h"
33#include "stream.h"
34#include "command.h"
35#include "sockunion.h"
36#include "network.h"
37#include "memory.h"
38#include "filter.h"
39#include "routemap.h"
40#include "str.h"
41#include "log.h"
42#include "plist.h"
43#include "linklist.h"
44#include "workqueue.h"
45#include "hash.h"
46#include "jhash.h"
47#include "queue.h"
48
49#include "bgpd/bgpd.h"
50#include "bgpd/bgp_table.h"
51#include "bgpd/bgp_debug.h"
52#include "bgpd/bgp_fsm.h"
53#include "bgpd/bgp_advertise.h"
54#include "bgpd/bgp_packet.h"
55#include "bgpd/bgp_updgrp.h"
56#include "bgpd/bgp_route.h"
57#include "bgpd/bgp_filter.h"
58
59/********************
60 * PRIVATE FUNCTIONS
61 ********************/
62
63/**
64 * assign a unique ID to update group and subgroup. Mostly for display/
65 * debugging purposes. It's a 64-bit space - used leisurely without a
66 * worry about its wrapping and about filling gaps. While at it, timestamp
67 * the creation.
68 */
69static void
70update_group_checkin (struct update_group *updgrp)
71{
72 updgrp->id = ++bm->updgrp_idspace;
73 updgrp->uptime = bgp_clock ();
74}
75
76static void
77update_subgroup_checkin (struct update_subgroup *subgrp,
78 struct update_group *updgrp)
79{
80 subgrp->id = ++bm->subgrp_idspace;
81 subgrp->uptime = bgp_clock ();
82}
83
84static void
85sync_init (struct update_subgroup *subgrp)
86{
87 subgrp->sync = XCALLOC (MTYPE_BGP_SYNCHRONISE,
88 sizeof (struct bgp_synchronize));
89 BGP_ADV_FIFO_INIT (&subgrp->sync->update);
90 BGP_ADV_FIFO_INIT (&subgrp->sync->withdraw);
91 BGP_ADV_FIFO_INIT (&subgrp->sync->withdraw_low);
92 subgrp->hash = hash_create (baa_hash_key, baa_hash_cmp);
93
94 /* We use a larger buffer for subgrp->work in the event that:
95 * - We RX a BGP_UPDATE where the attributes alone are just
96 * under BGP_MAX_PACKET_SIZE
97 * - The user configures an outbound route-map that does many as-path
98 * prepends or adds many communities. At most they can have CMD_ARGC_MAX
99 * args in a route-map so there is a finite limit on how large they can
100 * make the attributes.
101 *
102 * Having a buffer with BGP_MAX_PACKET_SIZE_OVERFLOW allows us to avoid bounds
103 * checking for every single attribute as we construct an UPDATE.
104 */
105 subgrp->work = stream_new (BGP_MAX_PACKET_SIZE + BGP_MAX_PACKET_SIZE_OVERFLOW);
106 subgrp->scratch = stream_new (BGP_MAX_PACKET_SIZE);
107}
108
109static void
110sync_delete (struct update_subgroup *subgrp)
111{
112 if (subgrp->sync)
113 XFREE (MTYPE_BGP_SYNCHRONISE, subgrp->sync);
114 subgrp->sync = NULL;
115 if (subgrp->hash)
116 hash_free (subgrp->hash);
117 subgrp->hash = NULL;
118 if (subgrp->work)
119 stream_free (subgrp->work);
120 subgrp->work = NULL;
121 if (subgrp->scratch)
122 stream_free (subgrp->scratch);
123 subgrp->scratch = NULL;
124}
125
126/**
127 * conf_copy
128 *
129 * copy only those fields that are relevant to update group match
130 */
131static void
132conf_copy (struct peer *dst, struct peer *src, afi_t afi, safi_t safi)
133{
134 struct bgp_filter *srcfilter;
135 struct bgp_filter *dstfilter;
136
137 srcfilter = &src->filter[afi][safi];
138 dstfilter = &dst->filter[afi][safi];
139
140 dst->bgp = src->bgp;
141 dst->sort = src->sort;
142 dst->as = src->as;
143 dst->weight = src->weight;
144 dst->v_routeadv = src->v_routeadv;
145 dst->flags = src->flags;
146 dst->af_flags[afi][safi] = src->af_flags[afi][safi];
6e919709
DS
147 if (dst->host)
148 XFREE(MTYPE_BGP_PEER_HOST, dst->host);
149
150 dst->host = XSTRDUP(MTYPE_BGP_PEER_HOST, src->host);
3f9c7369
DS
151 dst->cap = src->cap;
152 dst->af_cap[afi][safi] = src->af_cap[afi][safi];
153 dst->afc_nego[afi][safi] = src->afc_nego[afi][safi];
40d2700d 154 dst->orf_plist[afi][safi] = src->orf_plist[afi][safi];
3f9c7369
DS
155 dst->local_as = src->local_as;
156 dst->change_local_as = src->change_local_as;
157 dst->shared_network = src->shared_network;
158 memcpy (&(dst->nexthop), &(src->nexthop), sizeof (struct bgp_nexthop));
159
160 dst->group = src->group;
161
162 if (src->default_rmap[afi][safi].name)
163 {
164 dst->default_rmap[afi][safi].name =
6e919709 165 XSTRDUP(MTYPE_ROUTE_MAP_NAME, src->default_rmap[afi][safi].name);
3f9c7369
DS
166 dst->default_rmap[afi][safi].map = src->default_rmap[afi][safi].map;
167 }
168
169 if (DISTRIBUTE_OUT_NAME(srcfilter))
170 {
6e919709 171 DISTRIBUTE_OUT_NAME(dstfilter) = XSTRDUP(MTYPE_BGP_FILTER_NAME, DISTRIBUTE_OUT_NAME(srcfilter));
3f9c7369
DS
172 DISTRIBUTE_OUT(dstfilter) = DISTRIBUTE_OUT(srcfilter);
173 }
174
175 if (PREFIX_LIST_OUT_NAME(srcfilter))
176 {
6e919709 177 PREFIX_LIST_OUT_NAME(dstfilter) = XSTRDUP(MTYPE_BGP_FILTER_NAME, PREFIX_LIST_OUT_NAME(srcfilter));
3f9c7369
DS
178 PREFIX_LIST_OUT(dstfilter) = PREFIX_LIST_OUT(srcfilter);
179 }
180
181 if (FILTER_LIST_OUT_NAME(srcfilter))
182 {
6e919709 183 FILTER_LIST_OUT_NAME(dstfilter) = XSTRDUP(MTYPE_BGP_FILTER_NAME, FILTER_LIST_OUT_NAME(srcfilter));
3f9c7369
DS
184 FILTER_LIST_OUT(dstfilter) = FILTER_LIST_OUT(srcfilter);
185 }
186
187 if (ROUTE_MAP_OUT_NAME(srcfilter))
188 {
6e919709 189 ROUTE_MAP_OUT_NAME(dstfilter) = XSTRDUP(MTYPE_BGP_FILTER_NAME, ROUTE_MAP_OUT_NAME(srcfilter));
3f9c7369
DS
190 ROUTE_MAP_OUT(dstfilter) = ROUTE_MAP_OUT(srcfilter);
191 }
192
193 if (UNSUPPRESS_MAP_NAME(srcfilter))
194 {
6e919709 195 UNSUPPRESS_MAP_NAME(dstfilter) = XSTRDUP(MTYPE_BGP_FILTER_NAME, UNSUPPRESS_MAP_NAME(srcfilter));
3f9c7369
DS
196 UNSUPPRESS_MAP(dstfilter) = UNSUPPRESS_MAP(srcfilter);
197 }
198}
199
200/**
6e919709 201 * since we did a bunch of XSTRDUP's in conf_copy, time to free them up
3f9c7369
DS
202 */
203static void
204conf_release (struct peer *src, afi_t afi, safi_t safi)
205{
206 struct bgp_filter *srcfilter;
207
208 srcfilter = &src->filter[afi][safi];
209
210 if (src->default_rmap[afi][safi].name)
6e919709 211 XFREE(MTYPE_ROUTE_MAP_NAME, src->default_rmap[afi][safi].name);
3f9c7369
DS
212
213 if (srcfilter->dlist[FILTER_OUT].name)
6e919709 214 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->dlist[FILTER_OUT].name);
3f9c7369
DS
215
216 if (srcfilter->plist[FILTER_OUT].name)
6e919709 217 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->plist[FILTER_OUT].name);
3f9c7369
DS
218
219 if (srcfilter->aslist[FILTER_OUT].name)
6e919709 220 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->aslist[FILTER_OUT].name);
3f9c7369
DS
221
222 if (srcfilter->map[RMAP_OUT].name)
6e919709 223 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->map[RMAP_OUT].name);
3f9c7369
DS
224
225 if (srcfilter->usmap.name)
6e919709 226 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->usmap.name);
495f0b13
DS
227
228 if (src->host)
229 XFREE(MTYPE_BGP_PEER_HOST, src->host);
230 src->host = NULL;
3f9c7369
DS
231}
232
233static void
234peer2_updgrp_copy (struct update_group *updgrp, struct peer_af *paf)
235{
236 struct peer *src;
237 struct peer *dst;
238
239 if (!updgrp || !paf)
240 return;
241
242 src = paf->peer;
243 dst = updgrp->conf;
244 if (!src || !dst)
245 return;
246
247 updgrp->afi = paf->afi;
248 updgrp->safi = paf->safi;
249 updgrp->afid = paf->afid;
250 updgrp->bgp = src->bgp;
251
252 conf_copy (dst, src, paf->afi, paf->safi);
253}
254
255/**
256 * auxiliary functions to maintain the hash table.
257 * - updgrp_hash_alloc - to create a new entry, passed to hash_get
258 * - updgrp_hash_key_make - makes the key for update group search
259 * - updgrp_hash_cmp - compare two update groups.
260 */
261static void *
262updgrp_hash_alloc (void *p)
263{
264 struct update_group *updgrp;
ffd0c037 265 const struct update_group *in;
3f9c7369 266
ffd0c037 267 in = (const struct update_group *)p;
3f9c7369
DS
268 updgrp = XCALLOC (MTYPE_BGP_UPDGRP, sizeof (struct update_group));
269 memcpy (updgrp, in, sizeof (struct update_group));
270 updgrp->conf = XCALLOC (MTYPE_BGP_PEER, sizeof (struct peer));
271 conf_copy (updgrp->conf, in->conf, in->afi, in->safi);
272 return updgrp;
273}
274
275/**
276 * The hash value for a peer is computed from the following variables:
277 * v = f(
278 * 1. IBGP (1) or EBGP (2)
279 * 2. FLAGS based on configuration:
280 * LOCAL_AS_NO_PREPEND
281 * LOCAL_AS_REPLACE_AS
282 * 3. AF_FLAGS based on configuration:
283 * Refer to definition in bgp_updgrp.h
284 * 4. (AF-independent) Capability flags:
285 * AS4_RCV capability
286 * 5. (AF-dependent) Capability flags:
287 * ORF_PREFIX_SM_RCV (peer can send prefix ORF)
288 * 6. MRAI
289 * 7. peer-group name
290 * 8. Outbound route-map name (neighbor route-map <> out)
291 * 9. Outbound distribute-list name (neighbor distribute-list <> out)
292 * 10. Outbound prefix-list name (neighbor prefix-list <> out)
293 * 11. Outbound as-list name (neighbor filter-list <> out)
294 * 12. Unsuppress map name (neighbor unsuppress-map <>)
295 * 13. default rmap name (neighbor default-originate route-map <>)
296 * 14. encoding both global and link-local nexthop?
297 * 15. If peer is configured to be a lonesoul, peer ip address
298 * 16. Local-as should match, if configured.
299 * )
300 */
301static unsigned int
302updgrp_hash_key_make (void *p)
303{
304 const struct update_group *updgrp;
305 const struct peer *peer;
306 const struct bgp_filter *filter;
307 uint32_t flags;
308 uint32_t key;
309 afi_t afi;
310 safi_t safi;
311
312#define SEED1 999331
313#define SEED2 2147483647
314
315 updgrp = p;
316 peer = updgrp->conf;
317 afi = updgrp->afi;
318 safi = updgrp->safi;
319 flags = peer->af_flags[afi][safi];
320 filter = &peer->filter[afi][safi];
321
322 key = 0;
323
324 key = jhash_1word (peer->sort, key); /* EBGP or IBGP */
325 key = jhash_1word ((peer->flags & PEER_UPDGRP_FLAGS), key);
326 key = jhash_1word ((flags & PEER_UPDGRP_AF_FLAGS), key);
327 key = jhash_1word ((peer->cap & PEER_UPDGRP_CAP_FLAGS), key);
328 key = jhash_1word ((peer->af_cap[afi][safi] &
329 PEER_UPDGRP_AF_CAP_FLAGS), key);
330 key = jhash_1word (peer->v_routeadv, key);
331 key = jhash_1word (peer->change_local_as, key);
332
333 if (peer->group)
334 key = jhash_1word (jhash (peer->group->name,
335 strlen (peer->group->name), SEED1), key);
336
337 if (filter->map[RMAP_OUT].name)
338 key = jhash_1word (jhash (filter->map[RMAP_OUT].name,
339 strlen (filter->map[RMAP_OUT].name), SEED1),
340 key);
341
342 if (filter->dlist[FILTER_OUT].name)
343 key = jhash_1word (jhash (filter->dlist[FILTER_OUT].name,
344 strlen (filter->dlist[FILTER_OUT].name), SEED1),
345 key);
346
347 if (filter->plist[FILTER_OUT].name)
348 key = jhash_1word (jhash (filter->plist[FILTER_OUT].name,
349 strlen (filter->plist[FILTER_OUT].name), SEED1),
350 key);
351
352 if (filter->aslist[FILTER_OUT].name)
353 key = jhash_1word (jhash (filter->aslist[FILTER_OUT].name,
354 strlen (filter->aslist[FILTER_OUT].name),
355 SEED1), key);
356
357 if (filter->usmap.name)
358 key = jhash_1word (jhash (filter->usmap.name,
359 strlen (filter->usmap.name), SEED1), key);
360
361 if (peer->default_rmap[afi][safi].name)
362 key = jhash_1word (jhash (peer->default_rmap[afi][safi].name,
363 strlen (peer->default_rmap[afi][safi].name),
364 SEED1), key);
365
366 /* If peer is on a shared network and is exchanging IPv6 prefixes,
367 * it needs to include link-local address. That's different from
368 * non-shared-network peers (nexthop encoded with 32 bytes vs 16
369 * bytes). We create different update groups to take care of that.
370 */
371 key = jhash_1word ((peer->shared_network &&
372 peer_afi_active_nego (peer, AFI_IP6)),
373 key);
374
375 /*
40d2700d
DW
376 * There are certain peers that must get their own update-group:
377 * - lonesoul peers
40d2700d 378 * - peers that negotiated ORF
3f9c7369
DS
379 */
380 if (CHECK_FLAG (peer->flags, PEER_FLAG_LONESOUL) ||
40d2700d 381 CHECK_FLAG (peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV) ||
2a3d5731 382 CHECK_FLAG (peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_OLD_RCV))
3f9c7369
DS
383 key = jhash_1word (jhash (peer->host, strlen (peer->host), SEED2), key);
384
385 return key;
386}
387
388static int
389updgrp_hash_cmp (const void *p1, const void *p2)
390{
391 const struct update_group *grp1;
392 const struct update_group *grp2;
393 const struct peer *pe1;
394 const struct peer *pe2;
395 uint32_t flags1;
396 uint32_t flags2;
397 const struct bgp_filter *fl1;
398 const struct bgp_filter *fl2;
399 afi_t afi;
400 safi_t safi;
401
402 if (!p1 || !p2)
403 return 0;
404
405 grp1 = p1;
406 grp2 = p2;
407 pe1 = grp1->conf;
408 pe2 = grp2->conf;
409 afi = grp1->afi;
410 safi = grp1->safi;
411 flags1 = pe1->af_flags[afi][safi];
412 flags2 = pe2->af_flags[afi][safi];
413 fl1 = &pe1->filter[afi][safi];
414 fl2 = &pe2->filter[afi][safi];
415
416 /* put EBGP and IBGP peers in different update groups */
417 if (pe1->sort != pe2->sort)
418 return 0;
419
420 /* check peer flags */
421 if ((pe1->flags & PEER_UPDGRP_FLAGS) !=
422 (pe2->flags & PEER_UPDGRP_FLAGS))
423 return 0;
424
425 /* If there is 'local-as' configured, it should match. */
426 if (pe1->change_local_as != pe2->change_local_as)
427 return 0;
428
429 /* flags like route reflector client */
430 if ((flags1 & PEER_UPDGRP_AF_FLAGS) != (flags2 & PEER_UPDGRP_AF_FLAGS))
431 return 0;
432
433 if ((pe1->cap & PEER_UPDGRP_CAP_FLAGS) !=
434 (pe2->cap & PEER_UPDGRP_CAP_FLAGS))
435 return 0;
436
437 if ((pe1->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS) !=
438 (pe2->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS))
439 return 0;
440
441 if (pe1->v_routeadv != pe2->v_routeadv)
442 return 0;
443
444 if (pe1->group != pe2->group)
445 return 0;
446
447 /* route-map names should be the same */
448 if ((fl1->map[RMAP_OUT].name && !fl2->map[RMAP_OUT].name) ||
449 (!fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name) ||
450 (fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name &&
451 strcmp (fl1->map[RMAP_OUT].name, fl2->map[RMAP_OUT].name)))
452 return 0;
453
454 if ((fl1->dlist[FILTER_OUT].name && !fl2->dlist[FILTER_OUT].name) ||
455 (!fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name) ||
456 (fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name &&
457 strcmp (fl1->dlist[FILTER_OUT].name, fl2->dlist[FILTER_OUT].name)))
458 return 0;
459
460 if ((fl1->plist[FILTER_OUT].name && !fl2->plist[FILTER_OUT].name) ||
461 (!fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name) ||
462 (fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name &&
463 strcmp (fl1->plist[FILTER_OUT].name, fl2->plist[FILTER_OUT].name)))
464 return 0;
465
466 if ((fl1->aslist[FILTER_OUT].name && !fl2->aslist[FILTER_OUT].name) ||
467 (!fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name) ||
468 (fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name &&
469 strcmp (fl1->aslist[FILTER_OUT].name, fl2->aslist[FILTER_OUT].name)))
470 return 0;
471
472 if ((fl1->usmap.name && !fl2->usmap.name) ||
473 (!fl1->usmap.name && fl2->usmap.name) ||
474 (fl1->usmap.name && fl2->usmap.name &&
475 strcmp (fl1->usmap.name, fl2->usmap.name)))
476 return 0;
477
478 if ((pe1->default_rmap[afi][safi].name &&
479 !pe2->default_rmap[afi][safi].name) ||
480 (!pe1->default_rmap[afi][safi].name &&
481 pe2->default_rmap[afi][safi].name) ||
482 (pe1->default_rmap[afi][safi].name &&
483 pe2->default_rmap[afi][safi].name &&
484 strcmp (pe1->default_rmap[afi][safi].name,
485 pe2->default_rmap[afi][safi].name)))
486 return 0;
487
488 if ((afi == AFI_IP6) && (pe1->shared_network != pe2->shared_network))
489 return 0;
490
491 if ((CHECK_FLAG (pe1->flags, PEER_FLAG_LONESOUL) ||
40d2700d 492 CHECK_FLAG (pe1->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV) ||
2a3d5731 493 CHECK_FLAG (pe1->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_OLD_RCV)) &&
3f9c7369
DS
494 !sockunion_same (&pe1->su, &pe2->su))
495 return 0;
496
497 return 1;
498}
499
500static void
501peer_lonesoul_or_not (struct peer *peer, int set)
502{
503 /* no change in status? */
504 if (set == (CHECK_FLAG (peer->flags, PEER_FLAG_LONESOUL) > 0))
505 return;
506
507 if (set)
508 SET_FLAG (peer->flags, PEER_FLAG_LONESOUL);
509 else
510 UNSET_FLAG (peer->flags, PEER_FLAG_LONESOUL);
511
512 update_group_adjust_peer_afs (peer);
513}
514
515/*
516 * subgroup_total_packets_enqueued
517 *
518 * Returns the total number of packets enqueued to a subgroup.
519 */
520static unsigned int
521subgroup_total_packets_enqueued (struct update_subgroup *subgrp)
522{
523 struct bpacket *pkt;
524
525 pkt = bpacket_queue_last (SUBGRP_PKTQ (subgrp));
526
527 return pkt->ver - 1;
528}
529
530static int
531update_group_show_walkcb (struct update_group *updgrp, void *arg)
532{
8fe8a7f6
DS
533 struct updwalk_context *ctx = arg;
534 struct vty *vty;
3f9c7369
DS
535 struct update_subgroup *subgrp;
536 struct peer_af *paf;
537 struct bgp_filter *filter;
8fe8a7f6
DS
538 int match = 0;
539
540 if (!ctx)
ffd0c037 541 return CMD_SUCCESS;
8fe8a7f6
DS
542
543 if (ctx->subgrp_id)
544 {
545 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp)
546 {
547 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
548 continue;
549 else
550 {
551 match = 1;
552 break;
553 }
554 }
555 }
556 else
557 {
558 match = 1;
559 }
560
561 if (!match)
562 {
563 /* Since this routine is invoked from a walk, we cannot signal any */
564 /* error here, can only return. */
565 return CMD_SUCCESS;
566 }
567
568 vty = ctx->vty;
3f9c7369 569
ffd0c037 570 vty_out (vty, "Update-group %" PRIu64 ":%s", updgrp->id, VTY_NEWLINE);
3f9c7369
DS
571 vty_out (vty, " Created: %s", timestamp_string (updgrp->uptime));
572 filter = &updgrp->conf->filter[updgrp->afi][updgrp->safi];
573 if (filter->map[RMAP_OUT].name)
574 vty_out (vty, " Outgoing route map: %s%s%s",
575 filter->map[RMAP_OUT].map ? "X" : "",
576 filter->map[RMAP_OUT].name, VTY_NEWLINE);
577 vty_out (vty, " MRAI value (seconds): %d%s",
578 updgrp->conf->v_routeadv, VTY_NEWLINE);
579 if (updgrp->conf->change_local_as)
580 vty_out (vty, " Local AS %u%s%s%s",
581 updgrp->conf->change_local_as,
582 CHECK_FLAG (updgrp->conf->flags,
583 PEER_FLAG_LOCAL_AS_NO_PREPEND) ? " no-prepend" : "",
584 CHECK_FLAG (updgrp->conf->flags,
585 PEER_FLAG_LOCAL_AS_REPLACE_AS) ? " replace-as" : "",
586 VTY_NEWLINE);
587
588 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp)
589 {
8fe8a7f6
DS
590 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
591 continue;
3f9c7369 592 vty_out (vty, "%s", VTY_NEWLINE);
ffd0c037 593 vty_out (vty, " Update-subgroup %" PRIu64 ":%s", subgrp->id, VTY_NEWLINE);
3f9c7369
DS
594 vty_out (vty, " Created: %s", timestamp_string (subgrp->uptime));
595
596 if (subgrp->split_from.update_group_id || subgrp->split_from.subgroup_id)
597 {
ffd0c037 598 vty_out (vty, " Split from group id: %" PRIu64 "%s",
3f9c7369 599 subgrp->split_from.update_group_id, VTY_NEWLINE);
ffd0c037 600 vty_out (vty, " Split from subgroup id: %" PRIu64 "%s",
3f9c7369
DS
601 subgrp->split_from.subgroup_id, VTY_NEWLINE);
602 }
603
604 vty_out (vty, " Join events: %u%s", subgrp->join_events, VTY_NEWLINE);
605 vty_out (vty, " Prune events: %u%s",
606 subgrp->prune_events, VTY_NEWLINE);
607 vty_out (vty, " Merge events: %u%s",
608 subgrp->merge_events, VTY_NEWLINE);
609 vty_out (vty, " Split events: %u%s",
610 subgrp->split_events, VTY_NEWLINE);
611 vty_out (vty, " Update group switch events: %u%s",
612 subgrp->updgrp_switch_events, VTY_NEWLINE);
613 vty_out (vty, " Peer refreshes combined: %u%s",
614 subgrp->peer_refreshes_combined, VTY_NEWLINE);
615 vty_out (vty, " Merge checks triggered: %u%s",
616 subgrp->merge_checks_triggered, VTY_NEWLINE);
ffd0c037 617 vty_out (vty, " Version: %" PRIu64 "%s", subgrp->version, VTY_NEWLINE);
3f9c7369
DS
618 vty_out (vty, " Packet queue length: %d%s",
619 bpacket_queue_length (SUBGRP_PKTQ (subgrp)), VTY_NEWLINE);
620 vty_out (vty, " Total packets enqueued: %u%s",
621 subgroup_total_packets_enqueued (subgrp), VTY_NEWLINE);
622 vty_out (vty, " Packet queue high watermark: %d%s",
623 bpacket_queue_hwm_length (SUBGRP_PKTQ (subgrp)), VTY_NEWLINE);
624 vty_out (vty, " Adj-out list count: %u%s",
625 subgrp->adj_count, VTY_NEWLINE);
626 vty_out (vty, " Advertise list: %s%s",
627 advertise_list_is_empty (subgrp) ? "empty" : "not empty",
628 VTY_NEWLINE);
629 vty_out (vty, " Flags: %s%s",
630 CHECK_FLAG (subgrp->flags,
631 SUBGRP_FLAG_NEEDS_REFRESH) ? "R" : "", VTY_NEWLINE);
632 if (subgrp->peer_count > 0)
633 {
634 vty_out (vty, " Peers:%s", VTY_NEWLINE);
635 SUBGRP_FOREACH_PEER (subgrp, paf)
636 vty_out (vty, " - %s%s", paf->peer->host, VTY_NEWLINE);
637 }
638 }
639 return UPDWALK_CONTINUE;
640}
641
642/*
643 * Helper function to show the packet queue for each subgroup of update group.
644 * Will be constrained to a particular subgroup id if id !=0
645 */
646static int
647updgrp_show_packet_queue_walkcb (struct update_group *updgrp, void *arg)
648{
649 struct updwalk_context *ctx = arg;
650 struct update_subgroup *subgrp;
651 struct vty *vty;
652
653 vty = ctx->vty;
654 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp)
655 {
656 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
657 continue;
ffd0c037 658 vty_out (vty, "update group %" PRIu64 ", subgroup %" PRIu64 "%s", updgrp->id,
3f9c7369
DS
659 subgrp->id, VTY_NEWLINE);
660 bpacket_queue_show_vty (SUBGRP_PKTQ (subgrp), vty);
661 }
662 return UPDWALK_CONTINUE;
663}
664
665/*
666 * Show the packet queue for each subgroup of update group. Will be
667 * constrained to a particular subgroup id if id !=0
668 */
669void
670update_group_show_packet_queue (struct bgp *bgp, afi_t afi, safi_t safi,
f43e655e 671 struct vty *vty, uint64_t id)
3f9c7369
DS
672{
673 struct updwalk_context ctx;
674
675 memset (&ctx, 0, sizeof (ctx));
676 ctx.vty = vty;
677 ctx.subgrp_id = id;
678 ctx.flags = 0;
679 update_group_af_walk (bgp, afi, safi, updgrp_show_packet_queue_walkcb,
680 &ctx);
681}
682
683static struct update_group *
684update_group_find (struct peer_af *paf)
685{
686 struct update_group *updgrp;
687 struct update_group tmp;
688 struct peer tmp_conf;
689
690 if (!peer_established (PAF_PEER (paf)))
691 return NULL;
692
693 memset (&tmp, 0, sizeof (tmp));
694 memset (&tmp_conf, 0, sizeof (tmp_conf));
695 tmp.conf = &tmp_conf;
696 peer2_updgrp_copy (&tmp, paf);
697
698 updgrp = hash_lookup (paf->peer->bgp->update_groups[paf->afid], &tmp);
699 conf_release (&tmp_conf, paf->afi, paf->safi);
700 return updgrp;
701}
702
703static struct update_group *
704update_group_create (struct peer_af *paf)
705{
706 struct update_group *updgrp;
707 struct update_group tmp;
708 struct peer tmp_conf;
709
710 memset (&tmp, 0, sizeof (tmp));
711 memset (&tmp_conf, 0, sizeof (tmp_conf));
712 tmp.conf = &tmp_conf;
713 peer2_updgrp_copy (&tmp, paf);
714
715 updgrp = hash_get (paf->peer->bgp->update_groups[paf->afid], &tmp,
716 updgrp_hash_alloc);
717 if (!updgrp)
718 return NULL;
719 update_group_checkin (updgrp);
720
721 if (BGP_DEBUG (update_groups, UPDATE_GROUPS))
ffd0c037 722 zlog_debug ("create update group %" PRIu64, updgrp->id);
3f9c7369
DS
723
724 UPDGRP_GLOBAL_STAT (updgrp, updgrps_created) += 1;
725
495f0b13 726 conf_release(&tmp_conf, paf->afi, paf->safi);
3f9c7369
DS
727 return updgrp;
728}
729
730static void
731update_group_delete (struct update_group *updgrp)
732{
733 if (BGP_DEBUG (update_groups, UPDATE_GROUPS))
ffd0c037 734 zlog_debug ("delete update group %" PRIu64, updgrp->id);
3f9c7369
DS
735
736 UPDGRP_GLOBAL_STAT (updgrp, updgrps_deleted) += 1;
737
738 hash_release (updgrp->bgp->update_groups[updgrp->afid], updgrp);
739 conf_release (updgrp->conf, updgrp->afi, updgrp->safi);
3d68677e 740
6e919709
DS
741 if (updgrp->conf->host)
742 XFREE(MTYPE_BGP_PEER_HOST, updgrp->conf->host);
743 updgrp->conf->host = NULL;
744
745 if (updgrp->conf->ifname)
746 XFREE(MTYPE_BGP_PEER_IFNAME, updgrp->conf->ifname);
747
3f9c7369
DS
748 XFREE (MTYPE_BGP_PEER, updgrp->conf);
749 XFREE (MTYPE_BGP_UPDGRP, updgrp);
750}
751
752static void
753update_group_add_subgroup (struct update_group *updgrp,
754 struct update_subgroup *subgrp)
755{
756 if (!updgrp || !subgrp)
757 return;
758
759 LIST_INSERT_HEAD (&(updgrp->subgrps), subgrp, updgrp_train);
760 subgrp->update_group = updgrp;
761}
762
763static void
764update_group_remove_subgroup (struct update_group *updgrp,
765 struct update_subgroup *subgrp)
766{
767 if (!updgrp || !subgrp)
768 return;
769
770 LIST_REMOVE (subgrp, updgrp_train);
771 subgrp->update_group = NULL;
772 if (LIST_EMPTY (&(updgrp->subgrps)))
773 update_group_delete (updgrp);
774}
775
776static struct update_subgroup *
777update_subgroup_create (struct update_group *updgrp)
778{
779 struct update_subgroup *subgrp;
780
781 subgrp = XCALLOC (MTYPE_BGP_UPD_SUBGRP, sizeof (struct update_subgroup));
782 update_subgroup_checkin (subgrp, updgrp);
783 subgrp->v_coalesce = (UPDGRP_INST (updgrp))->coalesce_time;
784 sync_init (subgrp);
785 bpacket_queue_init (SUBGRP_PKTQ (subgrp));
786 bpacket_queue_add (SUBGRP_PKTQ (subgrp), NULL, NULL);
787 TAILQ_INIT (&(subgrp->adjq));
788 if (BGP_DEBUG (update_groups, UPDATE_GROUPS))
ffd0c037 789 zlog_debug ("create subgroup u%" PRIu64 ":s%" PRIu64,
3f9c7369
DS
790 updgrp->id, subgrp->id);
791
792 update_group_add_subgroup (updgrp, subgrp);
793
794 UPDGRP_INCR_STAT (updgrp, subgrps_created);
795
796 return subgrp;
797}
798
799static void
800update_subgroup_delete (struct update_subgroup *subgrp)
801{
802 if (!subgrp)
803 return;
804
805 if (subgrp->update_group)
806 UPDGRP_INCR_STAT (subgrp->update_group, subgrps_deleted);
807
808 if (subgrp->t_merge_check)
809 THREAD_OFF (subgrp->t_merge_check);
810
811 if (subgrp->t_coalesce)
812 THREAD_TIMER_OFF (subgrp->t_coalesce);
813
814 bpacket_queue_cleanup (SUBGRP_PKTQ (subgrp));
815 subgroup_clear_table (subgrp);
816
817 if (subgrp->t_coalesce)
818 THREAD_TIMER_OFF (subgrp->t_coalesce);
819 sync_delete (subgrp);
820
821 if (BGP_DEBUG (update_groups, UPDATE_GROUPS))
ffd0c037 822 zlog_debug ("delete subgroup u%" PRIu64 ":s%" PRIu64,
3f9c7369
DS
823 subgrp->update_group->id, subgrp->id);
824
825 update_group_remove_subgroup (subgrp->update_group, subgrp);
826
827 XFREE (MTYPE_BGP_UPD_SUBGRP, subgrp);
828}
829
830void
831update_subgroup_inherit_info (struct update_subgroup *to,
832 struct update_subgroup *from)
833{
834 if (!to || !from)
835 return;
836
837 to->sflags = from->sflags;
838}
839
840/*
841 * update_subgroup_check_delete
842 *
843 * Delete a subgroup if it is ready to be deleted.
844 *
845 * Returns TRUE if the subgroup was deleted.
846 */
847static int
848update_subgroup_check_delete (struct update_subgroup *subgrp)
849{
850 if (!subgrp)
851 return 0;
852
853 if (!LIST_EMPTY (&(subgrp->peers)))
854 return 0;
855
856 update_subgroup_delete (subgrp);
857
858 return 1;
859}
860
861/*
862 * update_subgroup_add_peer
863 *
864 * @param send_enqueued_packets If true all currently enqueued packets will
865 * also be sent to the peer.
866 */
867static void
868update_subgroup_add_peer (struct update_subgroup *subgrp, struct peer_af *paf,
869 int send_enqueued_pkts)
870{
871 struct bpacket *pkt;
872
873 if (!subgrp || !paf)
874 return;
875
876 LIST_INSERT_HEAD (&(subgrp->peers), paf, subgrp_train);
877 paf->subgroup = subgrp;
878 subgrp->peer_count++;
879
ffd0c037 880 if (bgp_debug_peer_updout_enabled(paf->peer->host))
3f9c7369
DS
881 {
882 UPDGRP_PEER_DBG_EN(subgrp->update_group);
883 }
884
885 SUBGRP_INCR_STAT (subgrp, join_events);
886
887 if (send_enqueued_pkts)
888 {
889 pkt = bpacket_queue_first (SUBGRP_PKTQ (subgrp));
890 }
891 else
892 {
893
894 /*
895 * Hang the peer off of the last, placeholder, packet in the
896 * queue. This means it won't see any of the packets that are
897 * currently the queue.
898 */
899 pkt = bpacket_queue_last (SUBGRP_PKTQ (subgrp));
900 assert (pkt->buffer == NULL);
901 }
902
903 bpacket_add_peer (pkt, paf);
904
905 bpacket_queue_sanity_check (SUBGRP_PKTQ (subgrp));
906}
907
908/*
909 * update_subgroup_remove_peer_internal
910 *
911 * Internal function that removes a peer from a subgroup, but does not
912 * delete the subgroup. A call to this function must almost always be
913 * followed by a call to update_subgroup_check_delete().
914 *
915 * @see update_subgroup_remove_peer
916 */
917static void
918update_subgroup_remove_peer_internal (struct update_subgroup *subgrp,
919 struct peer_af *paf)
920{
921 assert (subgrp && paf);
922
ffd0c037 923 if (bgp_debug_peer_updout_enabled(paf->peer->host))
3f9c7369
DS
924 {
925 UPDGRP_PEER_DBG_DIS(subgrp->update_group);
926 }
927
928 bpacket_queue_remove_peer (paf);
929 LIST_REMOVE (paf, subgrp_train);
930 paf->subgroup = NULL;
931 subgrp->peer_count--;
932
933 SUBGRP_INCR_STAT (subgrp, prune_events);
934}
935
936/*
937 * update_subgroup_remove_peer
938 */
939void
940update_subgroup_remove_peer (struct update_subgroup *subgrp,
941 struct peer_af *paf)
942{
943 if (!subgrp || !paf)
944 return;
945
946 update_subgroup_remove_peer_internal (subgrp, paf);
947
948 if (update_subgroup_check_delete (subgrp))
949 return;
950
951 /*
952 * The deletion of the peer may have caused some packets to be
953 * deleted from the subgroup packet queue. Check if the subgroup can
954 * be merged now.
955 */
956 update_subgroup_check_merge (subgrp, "removed peer from subgroup");
957}
958
959static struct update_subgroup *
960update_subgroup_find (struct update_group *updgrp, struct peer_af *paf)
961{
962 struct update_subgroup *subgrp = NULL;
963 uint64_t version;
964
965 if (paf->subgroup)
966 {
967 assert (0);
968 return NULL;
969 }
970 else
971 version = 0;
972
973 if (!peer_established (PAF_PEER (paf)))
974 return NULL;
975
976 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp)
977 {
f910ef58 978 if (subgrp->version != version ||
979 CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
3f9c7369
DS
980 continue;
981
982 /*
983 * The version number is not meaningful on a subgroup that needs
984 * a refresh.
985 */
986 if (update_subgroup_needs_refresh (subgrp))
987 continue;
988
989 break;
990 }
991
992 return subgrp;
993}
994
995/*
996 * update_subgroup_ready_for_merge
997 *
998 * Returns TRUE if this subgroup is in a state that allows it to be
999 * merged into another subgroup.
1000 */
7717b183 1001static int
3f9c7369
DS
1002update_subgroup_ready_for_merge (struct update_subgroup *subgrp)
1003{
1004
1005 /*
1006 * Not ready if there are any encoded packets waiting to be written
1007 * out to peers.
1008 */
1009 if (!bpacket_queue_is_empty (SUBGRP_PKTQ (subgrp)))
1010 return 0;
1011
1012 /*
1013 * Not ready if there enqueued updates waiting to be encoded.
1014 */
1015 if (!advertise_list_is_empty (subgrp))
1016 return 0;
1017
1018 /*
1019 * Don't attempt to merge a subgroup that needs a refresh. For one,
1020 * we can't determine if the adj_out of such a group matches that of
1021 * another group.
1022 */
1023 if (update_subgroup_needs_refresh (subgrp))
1024 return 0;
1025
1026 return 1;
1027}
1028
1029/*
1030 * update_subgrp_can_merge_into
1031 *
1032 * Returns TRUE if the first subgroup can merge into the second
1033 * subgroup.
1034 */
7717b183 1035static int
3f9c7369
DS
1036update_subgroup_can_merge_into (struct update_subgroup *subgrp,
1037 struct update_subgroup *target)
1038{
1039
1040 if (subgrp == target)
1041 return 0;
1042
1043 /*
1044 * Both must have processed the BRIB to the same point in order to
1045 * be merged.
1046 */
1047 if (subgrp->version != target->version)
1048 return 0;
1049
f910ef58 1050 if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE) !=
1051 CHECK_FLAG(target->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
1052 return 0;
1053
7717b183 1054 if (subgrp->adj_count != target->adj_count)
3f9c7369
DS
1055 return 0;
1056
7717b183 1057 return update_subgroup_ready_for_merge (target);
3f9c7369
DS
1058}
1059
1060/*
1061 * update_subgroup_merge
1062 *
1063 * Merge the first subgroup into the second one.
1064 */
1065static void
1066update_subgroup_merge (struct update_subgroup *subgrp,
1067 struct update_subgroup *target, const char *reason)
1068{
1069 struct peer_af *paf;
1070 int result;
1071 int peer_count;
1072
1073 assert (subgrp->adj_count == target->adj_count);
1074
1075 peer_count = subgrp->peer_count;
1076
1077 while (1)
1078 {
1079 paf = LIST_FIRST (&subgrp->peers);
1080 if (!paf)
1081 break;
1082
1083 update_subgroup_remove_peer_internal (subgrp, paf);
1084
1085 /*
1086 * Add the peer to the target subgroup, while making sure that
1087 * any currently enqueued packets won't be sent to it. Enqueued
1088 * packets could, for example, result in an unnecessary withdraw
1089 * followed by an advertise.
1090 */
1091 update_subgroup_add_peer (target, paf, 0);
1092 }
1093
1094 SUBGRP_INCR_STAT (target, merge_events);
1095
1096 if (BGP_DEBUG (update_groups, UPDATE_GROUPS))
ffd0c037 1097 zlog_debug ("u%" PRIu64 ":s%" PRIu64 " (%d peers) merged into u%" PRIu64 ":s%" PRIu64 ", "
3f9c7369
DS
1098 "trigger: %s", subgrp->update_group->id, subgrp->id, peer_count,
1099 target->update_group->id, target->id, reason ? reason : "unknown");
1100
1101 result = update_subgroup_check_delete (subgrp);
1102 assert (result);
1103}
1104
1105/*
1106 * update_subgroup_check_merge
1107 *
1108 * Merge this subgroup into another subgroup if possible.
1109 *
1110 * Returns TRUE if the subgroup has been merged. The subgroup pointer
1111 * should not be accessed in this case.
1112 */
1113int
1114update_subgroup_check_merge (struct update_subgroup *subgrp,
1115 const char *reason)
1116{
1117 struct update_subgroup *target;
1118
1119 if (!update_subgroup_ready_for_merge (subgrp))
1120 return 0;
1121
1122 /*
1123 * Look for a subgroup to merge into.
1124 */
1125 UPDGRP_FOREACH_SUBGRP (subgrp->update_group, target)
1126 {
1127 if (update_subgroup_can_merge_into (subgrp, target))
1128 break;
1129 }
1130
1131 if (!target)
1132 return 0;
1133
1134 update_subgroup_merge (subgrp, target, reason);
1135 return 1;
1136}
1137
1138 /*
1139 * update_subgroup_merge_check_thread_cb
1140 */
1141static int
1142update_subgroup_merge_check_thread_cb (struct thread *thread)
1143{
1144 struct update_subgroup *subgrp;
1145
1146 subgrp = THREAD_ARG (thread);
1147
1148 subgrp->t_merge_check = NULL;
1149
1150 update_subgroup_check_merge (subgrp, "triggered merge check");
1151 return 0;
1152}
1153
1154/*
1155 * update_subgroup_trigger_merge_check
1156 *
1157 * Triggers a call to update_subgroup_check_merge() on a clean context.
1158 *
1159 * @param force If true, the merge check will be triggered even if the
1160 * subgroup doesn't currently look ready for a merge.
1161 *
1162 * Returns TRUE if a merge check will be performed shortly.
1163 */
1164int
1165update_subgroup_trigger_merge_check (struct update_subgroup *subgrp,
1166 int force)
1167{
1168 if (subgrp->t_merge_check)
1169 return 1;
1170
1171 if (!force && !update_subgroup_ready_for_merge (subgrp))
1172 return 0;
1173
1174 subgrp->t_merge_check =
9229d914 1175 thread_add_background (bm->master,
3f9c7369
DS
1176 update_subgroup_merge_check_thread_cb,
1177 subgrp, 0);
1178
1179 SUBGRP_INCR_STAT (subgrp, merge_checks_triggered);
1180
1181 return 1;
1182}
1183
1184/*
1185 * update_subgroup_copy_adj_out
1186 *
1187 * Helper function that clones the adj out (state about advertised
1188 * routes) from one subgroup to another. It assumes that the adj out
1189 * of the target subgroup is empty.
1190 */
1191static void
1192update_subgroup_copy_adj_out (struct update_subgroup *source,
1193 struct update_subgroup *dest)
1194{
1195 struct bgp_adj_out *aout, *aout_copy;
1196
1197 SUBGRP_FOREACH_ADJ (source, aout)
1198 {
1199 /*
1200 * Copy the adj out.
1201 */
adbac85e 1202 aout_copy = bgp_adj_out_alloc (dest, aout->rn, aout->addpath_tx_id);
3f9c7369
DS
1203 aout_copy->attr = aout->attr ? bgp_attr_refcount (aout->attr) : NULL;
1204 }
1205}
1206
1207/*
1208 * update_subgroup_copy_packets
1209 *
1210 * Copy packets after and including the given packet to the subgroup
1211 * 'dest'.
1212 *
1213 * Returns the number of packets copied.
1214 */
1215static int
1216update_subgroup_copy_packets (struct update_subgroup *dest,
1217 struct bpacket *pkt)
1218{
1219 int count;
1220
1221 count = 0;
1222 while (pkt && pkt->buffer)
1223 {
1224 bpacket_queue_add (SUBGRP_PKTQ (dest), stream_dup (pkt->buffer),
1225 &pkt->arr);
1226 count++;
1227 pkt = bpacket_next (pkt);
1228 }
1229
1230 bpacket_queue_sanity_check (SUBGRP_PKTQ (dest));
1231
1232 return count;
1233}
1234
1235static int
ffd0c037 1236updgrp_prefix_list_update (struct update_group *updgrp, const char *name)
3f9c7369
DS
1237{
1238 struct peer *peer;
1239 struct bgp_filter *filter;
1240
1241 peer = UPDGRP_PEER (updgrp);
1242 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1243
1244 if (PREFIX_LIST_OUT_NAME(filter) &&
1245 (strcmp (name, PREFIX_LIST_OUT_NAME(filter)) == 0))
1246 {
1247 PREFIX_LIST_OUT(filter) =
1248 prefix_list_lookup (UPDGRP_AFI(updgrp), PREFIX_LIST_OUT_NAME(filter));
1249 return 1;
1250 }
1251 return 0;
1252}
1253
1254static int
ffd0c037 1255updgrp_filter_list_update (struct update_group *updgrp, const char *name)
3f9c7369
DS
1256{
1257 struct peer *peer;
1258 struct bgp_filter *filter;
1259
1260 peer = UPDGRP_PEER (updgrp);
1261 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1262
1263 if (FILTER_LIST_OUT_NAME(filter) &&
1264 (strcmp (name, FILTER_LIST_OUT_NAME(filter)) == 0))
1265 {
1266 FILTER_LIST_OUT(filter) = as_list_lookup (FILTER_LIST_OUT_NAME(filter));
1267 return 1;
1268 }
1269 return 0;
1270}
1271
1272static int
ffd0c037 1273updgrp_distribute_list_update (struct update_group *updgrp, const char *name)
3f9c7369
DS
1274{
1275 struct peer *peer;
1276 struct bgp_filter *filter;
1277
1278 peer = UPDGRP_PEER(updgrp);
1279 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1280
1281 if (DISTRIBUTE_OUT_NAME(filter) &&
1282 (strcmp (name, DISTRIBUTE_OUT_NAME(filter)) == 0))
1283 {
1284 DISTRIBUTE_OUT(filter) = access_list_lookup(UPDGRP_AFI(updgrp),
1285 DISTRIBUTE_OUT_NAME(filter));
1286 return 1;
1287 }
1288 return 0;
1289}
1290
1291static int
ffd0c037 1292updgrp_route_map_update (struct update_group *updgrp, const char *name,
3f9c7369
DS
1293 int *def_rmap_changed)
1294{
1295 struct peer *peer;
1296 struct bgp_filter *filter;
1297 int changed = 0;
1298 afi_t afi;
1299 safi_t safi;
1300
1301 peer = UPDGRP_PEER (updgrp);
1302 afi = UPDGRP_AFI (updgrp);
1303 safi = UPDGRP_SAFI (updgrp);
1304 filter = &peer->filter[afi][safi];
1305
1306 if (ROUTE_MAP_OUT_NAME(filter) &&
1307 (strcmp (name, ROUTE_MAP_OUT_NAME(filter)) == 0))
1308 {
1309 ROUTE_MAP_OUT(filter) = route_map_lookup_by_name (name);
1310
1311 changed = 1;
1312 }
1313
1314 if (UNSUPPRESS_MAP_NAME(filter) &&
1315 (strcmp (name, UNSUPPRESS_MAP_NAME(filter)) == 0))
1316 {
1317 UNSUPPRESS_MAP(filter) = route_map_lookup_by_name (name);
1318 changed = 1;
1319 }
1320
1321 /* process default-originate route-map */
1322 if (peer->default_rmap[afi][safi].name &&
1323 (strcmp (name, peer->default_rmap[afi][safi].name) == 0))
1324 {
1325 peer->default_rmap[afi][safi].map = route_map_lookup_by_name (name);
1326 if (def_rmap_changed)
1327 *def_rmap_changed = 1;
1328 }
1329 return changed;
1330}
1331
1332/*
1333 * hash iteration callback function to process a policy change for an
1334 * update group. Check if the changed policy matches the updgrp's
1335 * outbound route-map or unsuppress-map or default-originate map or
1336 * filter-list or prefix-list or distribute-list.
1337 * Trigger update generation accordingly.
1338 */
1339static int
1340updgrp_policy_update_walkcb (struct update_group *updgrp, void *arg)
1341{
1342 struct updwalk_context *ctx = arg;
1343 struct update_subgroup *subgrp;
1344 int changed = 0;
1345 int def_changed = 0;
1346
1347 if (!updgrp || !ctx || !ctx->policy_name)
1348 return UPDWALK_CONTINUE;
1349
1350 switch (ctx->policy_type) {
1351 case BGP_POLICY_ROUTE_MAP:
1352 changed = updgrp_route_map_update(updgrp, ctx->policy_name, &def_changed);
1353 break;
1354 case BGP_POLICY_FILTER_LIST:
1355 changed = updgrp_filter_list_update(updgrp, ctx->policy_name);
1356 break;
1357 case BGP_POLICY_PREFIX_LIST:
1358 changed = updgrp_prefix_list_update(updgrp, ctx->policy_name);
1359 break;
1360 case BGP_POLICY_DISTRIBUTE_LIST:
1361 changed = updgrp_distribute_list_update(updgrp, ctx->policy_name);
1362 break;
1363 default:
1364 break;
1365 }
1366
1367 /* If not doing route update, return after updating "config" */
1368 if (!ctx->policy_route_update)
1369 return UPDWALK_CONTINUE;
1370
1371 /* If nothing has changed, return after updating "config" */
1372 if (!changed && !def_changed)
1373 return UPDWALK_CONTINUE;
1374
1375 /*
1376 * If something has changed, at the beginning of a route-map modification
1377 * event, mark each subgroup's needs-refresh bit. For one, it signals to
1378 * whoever that the subgroup needs a refresh. Second, it prevents premature
1379 * merge of this subgroup with another before a complete (outbound) refresh.
1380 */
1381 if (ctx->policy_event_start_flag)
1382 {
1383 UPDGRP_FOREACH_SUBGRP(updgrp, subgrp)
1384 {
1385 update_subgroup_set_needs_refresh(subgrp, 1);
1386 }
1387 return UPDWALK_CONTINUE;
1388 }
1389
1390 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp)
1391 {
1392 if (changed)
1393 {
1394 if (bgp_debug_update(NULL, NULL, updgrp, 0))
ffd0c037 1395 zlog_debug ("u%" PRIu64 ":s%" PRIu64 " announcing routes upon policy %s (type %d) change",
3f9c7369
DS
1396 updgrp->id, subgrp->id, ctx->policy_name, ctx->policy_type);
1397 subgroup_announce_route (subgrp);
1398 }
1399 if (def_changed)
1400 {
1401 if (bgp_debug_update(NULL, NULL, updgrp, 0))
ffd0c037 1402 zlog_debug ("u%" PRIu64 ":s%" PRIu64 " announcing default upon default routemap %s change",
3f9c7369
DS
1403 updgrp->id, subgrp->id, ctx->policy_name);
1404 subgroup_default_originate (subgrp, 0);
1405 }
1406 update_subgroup_set_needs_refresh(subgrp, 0);
1407 }
1408 return UPDWALK_CONTINUE;
1409}
1410
1411static int
1412update_group_walkcb (struct hash_backet *backet, void *arg)
1413{
1414 struct update_group *updgrp = backet->data;
1415 struct updwalk_context *wctx = arg;
1416 int ret = (*wctx->cb) (updgrp, wctx->context);
1417 return ret;
1418}
1419
1420static int
1421update_group_periodic_merge_walkcb (struct update_group *updgrp, void *arg)
1422{
1423 struct update_subgroup *subgrp;
1424 struct update_subgroup *tmp_subgrp;
1425 const char *reason = arg;
1426
1427 UPDGRP_FOREACH_SUBGRP_SAFE (updgrp, subgrp, tmp_subgrp)
1428 update_subgroup_check_merge (subgrp, reason);
1429 return UPDWALK_CONTINUE;
1430}
1431
1432/********************
1433 * PUBLIC FUNCTIONS
1434 ********************/
1435
1436/*
1437 * trigger function when a policy (route-map/filter-list/prefix-list/
1438 * distribute-list etc.) content changes. Go through all the
1439 * update groups and process the change.
1440 *
1441 * bgp: the bgp instance
1442 * ptype: the type of policy that got modified, see bgpd.h
1443 * pname: name of the policy
1444 * route_update: flag to control if an automatic update generation should
1445 * occur
1446 * start_event: flag that indicates if it's the beginning of the change.
1447 * Esp. when the user is changing the content interactively
1448 * over multiple statements. Useful to set dirty flag on
1449 * update groups.
1450 */
1451void
1452update_group_policy_update (struct bgp *bgp, bgp_policy_type_e ptype,
ffd0c037 1453 const char *pname, int route_update, int start_event)
3f9c7369
DS
1454{
1455 struct updwalk_context ctx;
1456
1457 memset (&ctx, 0, sizeof (ctx));
1458 ctx.policy_type = ptype;
1459 ctx.policy_name = pname;
1460 ctx.policy_route_update = route_update;
1461 ctx.policy_event_start_flag = start_event;
1462 ctx.flags = 0;
1463
1464 update_group_walk (bgp, updgrp_policy_update_walkcb, &ctx);
1465}
1466
1467/*
1468 * update_subgroup_split_peer
1469 *
1470 * Ensure that the given peer is in a subgroup of its own in the
1471 * specified update group.
1472 */
1473void
1474update_subgroup_split_peer (struct peer_af *paf, struct update_group *updgrp)
1475{
1476 struct update_subgroup *old_subgrp, *subgrp;
1477 uint64_t old_id;
1478
1479
1480 old_subgrp = paf->subgroup;
1481
1482 if (!updgrp)
1483 updgrp = old_subgrp->update_group;
1484
1485 /*
1486 * If the peer is alone in its subgroup, reuse the existing
1487 * subgroup.
1488 */
1489 if (old_subgrp->peer_count == 1)
1490 {
1491 if (updgrp == old_subgrp->update_group)
1492 return;
1493
1494 subgrp = old_subgrp;
1495 old_id = old_subgrp->update_group->id;
1496
ffd0c037 1497 if (bgp_debug_peer_updout_enabled(paf->peer->host))
3f9c7369
DS
1498 {
1499 UPDGRP_PEER_DBG_DIS(old_subgrp->update_group);
1500 }
1501
1502 update_group_remove_subgroup (old_subgrp->update_group, old_subgrp);
1503 update_group_add_subgroup (updgrp, subgrp);
1504
ffd0c037 1505 if (bgp_debug_peer_updout_enabled(paf->peer->host))
3f9c7369
DS
1506 {
1507 UPDGRP_PEER_DBG_EN(updgrp);
1508 }
1509 if (BGP_DEBUG (update_groups, UPDATE_GROUPS))
ffd0c037 1510 zlog_debug ("u%" PRIu64 ":s%" PRIu64 " peer %s moved to u%" PRIu64 ":s%" PRIu64,
3f9c7369
DS
1511 old_id, subgrp->id, paf->peer->host, updgrp->id, subgrp->id);
1512
1513 /*
1514 * The state of the subgroup (adj_out, advs, packet queue etc)
1515 * is consistent internally, but may not be identical to other
1516 * subgroups in the new update group even if the version number
1517 * matches up. Make sure a full refresh is done before the
1518 * subgroup is merged with another.
1519 */
1520 update_subgroup_set_needs_refresh (subgrp, 1);
1521
1522 SUBGRP_INCR_STAT (subgrp, updgrp_switch_events);
1523 return;
1524 }
1525
1526 /*
1527 * Create a new subgroup under the specified update group, and copy
1528 * over relevant state to it.
1529 */
1530 subgrp = update_subgroup_create (updgrp);
1531 update_subgroup_inherit_info (subgrp, old_subgrp);
1532
1533 subgrp->split_from.update_group_id = old_subgrp->update_group->id;
1534 subgrp->split_from.subgroup_id = old_subgrp->id;
1535
1536 /*
1537 * Copy out relevant state from the old subgroup.
1538 */
1539 update_subgroup_copy_adj_out (paf->subgroup, subgrp);
1540 update_subgroup_copy_packets (subgrp, paf->next_pkt_to_send);
1541
1542 if (BGP_DEBUG (update_groups, UPDATE_GROUPS))
ffd0c037 1543 zlog_debug ("u%" PRIu64 ":s%" PRIu64 " peer %s split and moved into u%" PRIu64 ":s%" PRIu64,
3f9c7369
DS
1544 paf->subgroup->update_group->id, paf->subgroup->id,
1545 paf->peer->host, updgrp->id, subgrp->id);
1546
1547 SUBGRP_INCR_STAT (paf->subgroup, split_events);
1548
1549 /*
1550 * Since queued advs were left behind, this new subgroup needs a
1551 * refresh.
1552 */
1553 update_subgroup_set_needs_refresh (subgrp, 1);
1554
1555 /*
1556 * Remove peer from old subgroup, and add it to the new one.
1557 */
1558 update_subgroup_remove_peer (paf->subgroup, paf);
1559
1560 update_subgroup_add_peer (subgrp, paf, 1);
1561}
1562
1563void
3d68677e 1564update_bgp_group_init (struct bgp *bgp)
3f9c7369
DS
1565{
1566 int afid;
1567
1568 AF_FOREACH (afid)
1569 bgp->update_groups[afid] = hash_create (updgrp_hash_key_make,
1570 updgrp_hash_cmp);
1571}
1572
3d68677e
DS
1573void
1574update_bgp_group_free (struct bgp *bgp)
1575{
1576 int afid;
1577
1578 AF_FOREACH (afid)
1579 {
3ffe142a
DW
1580 if (bgp->update_groups[afid])
1581 {
1582 hash_free(bgp->update_groups[afid]);
1583 bgp->update_groups[afid] = NULL;
1584 }
3d68677e
DS
1585 }
1586}
1587
3f9c7369 1588void
8fe8a7f6 1589update_group_show (struct bgp *bgp, afi_t afi, safi_t safi, struct vty *vty,
f43e655e 1590 uint64_t subgrp_id)
3f9c7369 1591{
8fe8a7f6
DS
1592 struct updwalk_context ctx;
1593 memset (&ctx, 0, sizeof (ctx));
1594 ctx.vty = vty;
1595 ctx.subgrp_id = subgrp_id;
1596
1597 update_group_af_walk (bgp, afi, safi, update_group_show_walkcb, &ctx);
3f9c7369
DS
1598}
1599
1600/*
1601 * update_group_show_stats
1602 *
1603 * Show global statistics about update groups.
1604 */
1605void
1606update_group_show_stats (struct bgp *bgp, struct vty *vty)
1607{
1608 vty_out (vty, "Update groups created: %u%s",
1609 bgp->update_group_stats.updgrps_created, VTY_NEWLINE);
1610 vty_out (vty, "Update groups deleted: %u%s",
1611 bgp->update_group_stats.updgrps_deleted, VTY_NEWLINE);
1612 vty_out (vty, "Update subgroups created: %u%s",
1613 bgp->update_group_stats.subgrps_created, VTY_NEWLINE);
1614 vty_out (vty, "Update subgroups deleted: %u%s",
1615 bgp->update_group_stats.subgrps_deleted, VTY_NEWLINE);
1616 vty_out (vty, "Join events: %u%s",
1617 bgp->update_group_stats.join_events, VTY_NEWLINE);
1618 vty_out (vty, "Prune events: %u%s",
1619 bgp->update_group_stats.prune_events, VTY_NEWLINE);
1620 vty_out (vty, "Merge events: %u%s",
1621 bgp->update_group_stats.merge_events, VTY_NEWLINE);
1622 vty_out (vty, "Split events: %u%s",
1623 bgp->update_group_stats.split_events, VTY_NEWLINE);
1624 vty_out (vty, "Update group switch events: %u%s",
1625 bgp->update_group_stats.updgrp_switch_events, VTY_NEWLINE);
1626 vty_out (vty, "Peer route refreshes combined: %u%s",
1627 bgp->update_group_stats.peer_refreshes_combined, VTY_NEWLINE);
1628 vty_out (vty, "Merge checks triggered: %u%s",
1629 bgp->update_group_stats.merge_checks_triggered, VTY_NEWLINE);
1630}
1631
1632/*
1633 * update_group_adjust_peer
1634 */
1635void
1636update_group_adjust_peer (struct peer_af *paf)
1637{
1638 struct update_group *updgrp;
1639 struct update_subgroup *subgrp, *old_subgrp;
1640 struct peer *peer;
1641
1642 if (!paf)
1643 return;
1644
1645 peer = PAF_PEER (paf);
1646 if (!peer_established (peer))
1647 {
1648 return;
1649 }
1650
1651 if (!CHECK_FLAG (peer->flags, PEER_FLAG_CONFIG_NODE))
1652 {
1653 return;
1654 }
1655
1656 if (!peer->afc_nego[paf->afi][paf->safi])
1657 {
1658 return;
1659 }
1660
1661 updgrp = update_group_find (paf);
1662 if (!updgrp)
1663 {
1664 updgrp = update_group_create (paf);
1665 if (!updgrp)
1666 {
1667 zlog_err ("couldn't create update group for peer %s",
1668 paf->peer->host);
1669 return;
1670 }
1671 }
1672
1673 old_subgrp = paf->subgroup;
1674
1675 if (old_subgrp)
1676 {
1677
1678 /*
1679 * If the update group of the peer is unchanged, the peer can stay
1680 * in its existing subgroup and we're done.
1681 */
1682 if (old_subgrp->update_group == updgrp)
1683 return;
1684
1685 /*
1686 * The peer is switching between update groups. Put it in its
1687 * own subgroup under the new update group.
1688 */
1689 update_subgroup_split_peer (paf, updgrp);
1690 return;
1691 }
1692
1693 subgrp = update_subgroup_find (updgrp, paf);
1694 if (!subgrp)
1695 {
1696 subgrp = update_subgroup_create (updgrp);
1697 if (!subgrp)
1698 return;
1699 }
1700
1701 update_subgroup_add_peer (subgrp, paf, 1);
1702 if (BGP_DEBUG (update_groups, UPDATE_GROUPS))
ffd0c037 1703 zlog_debug ("u%" PRIu64 ":s%" PRIu64 " add peer %s",
3f9c7369
DS
1704 updgrp->id, subgrp->id, paf->peer->host);
1705
1706 return;
1707}
1708
1709int
1710update_group_adjust_soloness (struct peer *peer, int set)
1711{
1712 struct peer_group *group;
1713 struct listnode *node, *nnode;
1714
3f9c7369
DS
1715 if (!CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
1716 {
1717 peer_lonesoul_or_not (peer, set);
1718 if (peer->status == Established)
1719 bgp_announce_route_all (peer);
1720 }
1721 else
1722 {
1723 group = peer->group;
1724 for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
1725 {
1726 peer_lonesoul_or_not (peer, set);
1727 if (peer->status == Established)
1728 bgp_announce_route_all (peer);
1729 }
1730 }
1731 return 0;
1732}
1733
1734/*
1735 * update_subgroup_rib
1736 */
1737struct bgp_table *
1738update_subgroup_rib (struct update_subgroup *subgrp)
1739{
1740 struct bgp *bgp;
1741
1742 bgp = SUBGRP_INST (subgrp);
1743 if (!bgp)
1744 return NULL;
1745
1746 return bgp->rib[SUBGRP_AFI (subgrp)][SUBGRP_SAFI (subgrp)];
1747}
1748
1749void
1750update_group_af_walk (struct bgp *bgp, afi_t afi, safi_t safi,
1751 updgrp_walkcb cb, void *ctx)
1752{
1753 struct updwalk_context wctx;
1754 int afid;
1755
1756 if (!bgp)
1757 return;
1758 afid = afindex (afi, safi);
1759 if (afid >= BGP_AF_MAX)
1760 return;
1761
1762 memset (&wctx, 0, sizeof (wctx));
1763 wctx.cb = cb;
1764 wctx.context = ctx;
0de4848d
DS
1765
1766 if (bgp->update_groups[afid])
1767 hash_walk (bgp->update_groups[afid], update_group_walkcb, &wctx);
3f9c7369
DS
1768}
1769
1770void
1771update_group_walk (struct bgp *bgp, updgrp_walkcb cb, void *ctx)
1772{
1773 afi_t afi;
1774 safi_t safi;
1775
1776 FOREACH_AFI_SAFI (afi, safi)
1777 {
1778 update_group_af_walk (bgp, afi, safi, cb, ctx);
1779 }
1780}
1781
1782void
1783update_group_periodic_merge (struct bgp *bgp)
1784{
1785 char reason[] = "periodic merge check";
1786
1787 update_group_walk (bgp, update_group_periodic_merge_walkcb,
1788 (void *) reason);
1789}
1790
0de4848d
DS
1791static int
1792update_group_default_originate_route_map_walkcb(struct update_group *updgrp,
1793 void *arg)
1794{
1795 struct update_subgroup *subgrp;
1796 struct peer *peer;
0de4848d
DS
1797 afi_t afi;
1798 safi_t safi;
1799
1800 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp)
1801 {
1802 peer = SUBGRP_PEER (subgrp);
1803 afi = SUBGRP_AFI (subgrp);
1804 safi = SUBGRP_SAFI (subgrp);
1805
1806 if (peer->default_rmap[afi][safi].name)
1807 {
1808 subgroup_default_originate (subgrp, 0);
1809 }
1810 }
1811
1812 return UPDWALK_CONTINUE;
1813}
1814
ffd0c037 1815int
0de4848d
DS
1816update_group_refresh_default_originate_route_map (struct thread *thread)
1817{
1818 struct bgp *bgp;
1819 char reason[] = "refresh default-originate route-map";
1820
1821 bgp = THREAD_ARG(thread);
1822 update_group_walk (bgp, update_group_default_originate_route_map_walkcb,
1823 reason);
1824 THREAD_TIMER_OFF (bgp->t_rmap_def_originate_eval);
1825 bgp_unlock(bgp);
ffd0c037
DS
1826
1827 return(0);
0de4848d
DS
1828}
1829
3f9c7369
DS
1830/*
1831 * peer_af_announce_route
1832 *
1833 * Refreshes routes out to a peer_af immediately.
1834 *
1835 * If the combine parameter is TRUE, then this function will try to
1836 * gather other peers in the subgroup for which a route announcement
1837 * is pending and efficently announce routes to all of them.
1838 *
1839 * For now, the 'combine' option has an effect only if all peers in
1840 * the subgroup have a route announcement pending.
1841 */
1842void
1843peer_af_announce_route (struct peer_af *paf, int combine)
1844{
1845 struct update_subgroup *subgrp;
1846 struct peer_af *cur_paf;
1847 int all_pending;
1848
1849 subgrp = paf->subgroup;
1850 all_pending = 0;
1851
1852 if (combine)
1853 {
3f9c7369
DS
1854 /*
1855 * If there are other peers in the old subgroup that also need
1856 * routes to be announced, pull them into the peer's new
1857 * subgroup.
1858 * Combine route announcement with other peers if possible.
1859 *
1860 * For now, we combine only if all peers in the subgroup have an
1861 * announcement pending.
1862 */
1863 all_pending = 1;
1864
1865 SUBGRP_FOREACH_PEER (subgrp, cur_paf)
1866 {
1867 if (cur_paf == paf)
1868 continue;
1869
1870 if (cur_paf->t_announce_route)
1871 continue;
1872
1873 all_pending = 0;
1874 break;
1875 }
1876 }
1877 /*
1878 * Announce to the peer alone if we were not asked to combine peers,
1879 * or if some peers don't have a route annoucement pending.
1880 */
1881 if (!combine || !all_pending)
1882 {
1883 update_subgroup_split_peer (paf, NULL);
1884 if (!paf->subgroup)
1885 return;
1886
1887 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
ffd0c037 1888 zlog_debug ("u%" PRIu64 ":s%" PRIu64 " %s announcing routes",
3f9c7369
DS
1889 subgrp->update_group->id, subgrp->id, paf->peer->host);
1890
1891 subgroup_announce_route (paf->subgroup);
1892 return;
1893 }
1894
1895 /*
1896 * We will announce routes the entire subgroup.
1897 *
1898 * First stop refresh timers on all the other peers.
1899 */
1900 SUBGRP_FOREACH_PEER (subgrp, cur_paf)
1901 {
1902 if (cur_paf == paf)
1903 continue;
1904
1905 bgp_stop_announce_route_timer (cur_paf);
1906 }
1907
1908 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
ffd0c037 1909 zlog_debug ("u%" PRIu64 ":s%" PRIu64 " announcing routes to %s, combined into %d peers",
3f9c7369
DS
1910 subgrp->update_group->id, subgrp->id,
1911 paf->peer->host, subgrp->peer_count);
1912
1913 subgroup_announce_route (subgrp);
1914
1915 SUBGRP_INCR_STAT_BY (subgrp, peer_refreshes_combined,
1916 subgrp->peer_count - 1);
1917}
1918
1919void
1920subgroup_trigger_write (struct update_subgroup *subgrp)
1921{
1922 struct peer_af *paf;
1923
1924#if 0
1925 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
1926 zlog_debug("u%llu:s%llu scheduling write thread for peers",
1927 subgrp->update_group->id, subgrp->id);
1928#endif
1929 SUBGRP_FOREACH_PEER (subgrp, paf)
1930 {
1931 if (paf->peer->status == Established)
1932 {
1933 BGP_PEER_WRITE_ON (paf->peer->t_write, bgp_write, paf->peer->fd,
1934 paf->peer);
1935 }
1936 }
1937}
1938
1939int
1940update_group_clear_update_dbg (struct update_group *updgrp, void *arg)
1941{
1942 UPDGRP_PEER_DBG_OFF(updgrp);
1943 return UPDWALK_CONTINUE;
1944}
adbac85e 1945
06370dac 1946/* Return true if we should addpath encode NLRI to this peer */
adbac85e
DW
1947int
1948bgp_addpath_encode_tx (struct peer *peer, afi_t afi, safi_t safi)
1949{
1950 return (CHECK_FLAG (peer->af_cap[afi][safi], PEER_CAP_ADDPATH_AF_TX_ADV) &&
1951 CHECK_FLAG (peer->af_cap[afi][safi], PEER_CAP_ADDPATH_AF_RX_RCV));
1952}
06370dac
DW
1953
1954/*
1955 * Return true if this is a path we should advertise due to a
1956 * configured addpath-tx knob
1957 */
1958int
1959bgp_addpath_tx_path (struct peer *peer, afi_t afi, safi_t safi,
1960 struct bgp_info *ri)
1961{
1962 if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_ADDPATH_TX_ALL_PATHS))
1963 return 1;
1964
1965 if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS) &&
1966 CHECK_FLAG (ri->flags, BGP_INFO_DMED_SELECTED))
1967 return 1;
1968
1969 return 0;
1970}