]> git.proxmox.com Git - mirror_frr.git/blame - bgpd/bgp_updgrp.c
*: conform with COMMUNITY.md formatting rules, via 'make indent'
[mirror_frr.git] / bgpd / bgp_updgrp.c
CommitLineData
3f9c7369
DS
1/**
2 * bgp_updgrp.c: BGP update group structures
3 *
4 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
5 *
6 * @author Avneesh Sachdev <avneesh@sproute.net>
7 * @author Rajesh Varadarajan <rajesh@sproute.net>
8 * @author Pradosh Mohapatra <pradosh@sproute.net>
9 *
10 * This file is part of GNU Zebra.
11 *
12 * GNU Zebra is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2, or (at your option) any
15 * later version.
16 *
17 * GNU Zebra is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
896014f4
DL
22 * You should have received a copy of the GNU General Public License along
23 * with this program; see the file COPYING; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
3f9c7369
DS
25 */
26
27#include <zebra.h>
28
29#include "prefix.h"
30#include "thread.h"
31#include "buffer.h"
32#include "stream.h"
33#include "command.h"
34#include "sockunion.h"
35#include "network.h"
36#include "memory.h"
37#include "filter.h"
38#include "routemap.h"
3f9c7369
DS
39#include "log.h"
40#include "plist.h"
41#include "linklist.h"
42#include "workqueue.h"
43#include "hash.h"
44#include "jhash.h"
45#include "queue.h"
46
47#include "bgpd/bgpd.h"
48#include "bgpd/bgp_table.h"
49#include "bgpd/bgp_debug.h"
50#include "bgpd/bgp_fsm.h"
51#include "bgpd/bgp_advertise.h"
52#include "bgpd/bgp_packet.h"
53#include "bgpd/bgp_updgrp.h"
54#include "bgpd/bgp_route.h"
55#include "bgpd/bgp_filter.h"
2fc102e1 56#include "bgpd/bgp_io.h"
3f9c7369
DS
57
58/********************
59 * PRIVATE FUNCTIONS
60 ********************/
61
62/**
63 * assign a unique ID to update group and subgroup. Mostly for display/
64 * debugging purposes. It's a 64-bit space - used leisurely without a
65 * worry about its wrapping and about filling gaps. While at it, timestamp
66 * the creation.
67 */
d62a17ae 68static void update_group_checkin(struct update_group *updgrp)
3f9c7369 69{
d62a17ae 70 updgrp->id = ++bm->updgrp_idspace;
71 updgrp->uptime = bgp_clock();
3f9c7369
DS
72}
73
d62a17ae 74static void update_subgroup_checkin(struct update_subgroup *subgrp,
75 struct update_group *updgrp)
3f9c7369 76{
d62a17ae 77 subgrp->id = ++bm->subgrp_idspace;
78 subgrp->uptime = bgp_clock();
3f9c7369
DS
79}
80
d62a17ae 81static void sync_init(struct update_subgroup *subgrp)
3f9c7369 82{
d62a17ae 83 subgrp->sync =
84 XCALLOC(MTYPE_BGP_SYNCHRONISE, sizeof(struct bgp_synchronize));
85 BGP_ADV_FIFO_INIT(&subgrp->sync->update);
86 BGP_ADV_FIFO_INIT(&subgrp->sync->withdraw);
87 BGP_ADV_FIFO_INIT(&subgrp->sync->withdraw_low);
996c9314
LB
88 subgrp->hash =
89 hash_create(baa_hash_key, baa_hash_cmp, "BGP SubGroup Hash");
d62a17ae 90
91 /* We use a larger buffer for subgrp->work in the event that:
92 * - We RX a BGP_UPDATE where the attributes alone are just
93 * under BGP_MAX_PACKET_SIZE
94 * - The user configures an outbound route-map that does many as-path
95 * prepends or adds many communities. At most they can have
96 * CMD_ARGC_MAX
97 * args in a route-map so there is a finite limit on how large they
98 * can
99 * make the attributes.
100 *
101 * Having a buffer with BGP_MAX_PACKET_SIZE_OVERFLOW allows us to avoid
102 * bounds
103 * checking for every single attribute as we construct an UPDATE.
104 */
105 subgrp->work =
106 stream_new(BGP_MAX_PACKET_SIZE + BGP_MAX_PACKET_SIZE_OVERFLOW);
107 subgrp->scratch = stream_new(BGP_MAX_PACKET_SIZE);
3f9c7369
DS
108}
109
d62a17ae 110static void sync_delete(struct update_subgroup *subgrp)
3f9c7369 111{
d62a17ae 112 if (subgrp->sync)
113 XFREE(MTYPE_BGP_SYNCHRONISE, subgrp->sync);
114 subgrp->sync = NULL;
115 if (subgrp->hash)
116 hash_free(subgrp->hash);
117 subgrp->hash = NULL;
118 if (subgrp->work)
119 stream_free(subgrp->work);
120 subgrp->work = NULL;
121 if (subgrp->scratch)
122 stream_free(subgrp->scratch);
123 subgrp->scratch = NULL;
3f9c7369
DS
124}
125
126/**
127 * conf_copy
128 *
129 * copy only those fields that are relevant to update group match
130 */
d62a17ae 131static void conf_copy(struct peer *dst, struct peer *src, afi_t afi,
132 safi_t safi)
3f9c7369 133{
d62a17ae 134 struct bgp_filter *srcfilter;
135 struct bgp_filter *dstfilter;
136
137 srcfilter = &src->filter[afi][safi];
138 dstfilter = &dst->filter[afi][safi];
139
140 dst->bgp = src->bgp;
141 dst->sort = src->sort;
142 dst->as = src->as;
143 dst->v_routeadv = src->v_routeadv;
144 dst->flags = src->flags;
145 dst->af_flags[afi][safi] = src->af_flags[afi][safi];
146 if (dst->host)
147 XFREE(MTYPE_BGP_PEER_HOST, dst->host);
148
149 dst->host = XSTRDUP(MTYPE_BGP_PEER_HOST, src->host);
150 dst->cap = src->cap;
151 dst->af_cap[afi][safi] = src->af_cap[afi][safi];
152 dst->afc_nego[afi][safi] = src->afc_nego[afi][safi];
153 dst->orf_plist[afi][safi] = src->orf_plist[afi][safi];
154 dst->local_as = src->local_as;
155 dst->change_local_as = src->change_local_as;
156 dst->shared_network = src->shared_network;
157 memcpy(&(dst->nexthop), &(src->nexthop), sizeof(struct bgp_nexthop));
158
159 dst->group = src->group;
160
161 if (src->default_rmap[afi][safi].name) {
162 dst->default_rmap[afi][safi].name =
163 XSTRDUP(MTYPE_ROUTE_MAP_NAME,
164 src->default_rmap[afi][safi].name);
165 dst->default_rmap[afi][safi].map =
166 src->default_rmap[afi][safi].map;
167 }
168
169 if (DISTRIBUTE_OUT_NAME(srcfilter)) {
170 DISTRIBUTE_OUT_NAME(dstfilter) = XSTRDUP(
171 MTYPE_BGP_FILTER_NAME, DISTRIBUTE_OUT_NAME(srcfilter));
172 DISTRIBUTE_OUT(dstfilter) = DISTRIBUTE_OUT(srcfilter);
173 }
174
175 if (PREFIX_LIST_OUT_NAME(srcfilter)) {
176 PREFIX_LIST_OUT_NAME(dstfilter) = XSTRDUP(
177 MTYPE_BGP_FILTER_NAME, PREFIX_LIST_OUT_NAME(srcfilter));
178 PREFIX_LIST_OUT(dstfilter) = PREFIX_LIST_OUT(srcfilter);
179 }
180
181 if (FILTER_LIST_OUT_NAME(srcfilter)) {
182 FILTER_LIST_OUT_NAME(dstfilter) = XSTRDUP(
183 MTYPE_BGP_FILTER_NAME, FILTER_LIST_OUT_NAME(srcfilter));
184 FILTER_LIST_OUT(dstfilter) = FILTER_LIST_OUT(srcfilter);
185 }
186
187 if (ROUTE_MAP_OUT_NAME(srcfilter)) {
188 ROUTE_MAP_OUT_NAME(dstfilter) = XSTRDUP(
189 MTYPE_BGP_FILTER_NAME, ROUTE_MAP_OUT_NAME(srcfilter));
190 ROUTE_MAP_OUT(dstfilter) = ROUTE_MAP_OUT(srcfilter);
191 }
192
193 if (UNSUPPRESS_MAP_NAME(srcfilter)) {
194 UNSUPPRESS_MAP_NAME(dstfilter) = XSTRDUP(
195 MTYPE_BGP_FILTER_NAME, UNSUPPRESS_MAP_NAME(srcfilter));
196 UNSUPPRESS_MAP(dstfilter) = UNSUPPRESS_MAP(srcfilter);
197 }
3f9c7369
DS
198}
199
200/**
6e919709 201 * since we did a bunch of XSTRDUP's in conf_copy, time to free them up
3f9c7369 202 */
d62a17ae 203static void conf_release(struct peer *src, afi_t afi, safi_t safi)
3f9c7369 204{
d62a17ae 205 struct bgp_filter *srcfilter;
3f9c7369 206
d62a17ae 207 srcfilter = &src->filter[afi][safi];
3f9c7369 208
d62a17ae 209 if (src->default_rmap[afi][safi].name)
210 XFREE(MTYPE_ROUTE_MAP_NAME, src->default_rmap[afi][safi].name);
3f9c7369 211
d62a17ae 212 if (srcfilter->dlist[FILTER_OUT].name)
213 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->dlist[FILTER_OUT].name);
3f9c7369 214
d62a17ae 215 if (srcfilter->plist[FILTER_OUT].name)
216 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->plist[FILTER_OUT].name);
3f9c7369 217
d62a17ae 218 if (srcfilter->aslist[FILTER_OUT].name)
219 XFREE(MTYPE_BGP_FILTER_NAME,
220 srcfilter->aslist[FILTER_OUT].name);
3f9c7369 221
d62a17ae 222 if (srcfilter->map[RMAP_OUT].name)
223 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->map[RMAP_OUT].name);
3f9c7369 224
d62a17ae 225 if (srcfilter->usmap.name)
226 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->usmap.name);
495f0b13 227
d62a17ae 228 if (src->host)
229 XFREE(MTYPE_BGP_PEER_HOST, src->host);
230 src->host = NULL;
3f9c7369
DS
231}
232
d62a17ae 233static void peer2_updgrp_copy(struct update_group *updgrp, struct peer_af *paf)
3f9c7369 234{
d62a17ae 235 struct peer *src;
236 struct peer *dst;
3f9c7369 237
d62a17ae 238 if (!updgrp || !paf)
239 return;
3f9c7369 240
d62a17ae 241 src = paf->peer;
242 dst = updgrp->conf;
243 if (!src || !dst)
244 return;
3f9c7369 245
d62a17ae 246 updgrp->afi = paf->afi;
247 updgrp->safi = paf->safi;
248 updgrp->afid = paf->afid;
249 updgrp->bgp = src->bgp;
3f9c7369 250
d62a17ae 251 conf_copy(dst, src, paf->afi, paf->safi);
3f9c7369
DS
252}
253
254/**
255 * auxiliary functions to maintain the hash table.
256 * - updgrp_hash_alloc - to create a new entry, passed to hash_get
257 * - updgrp_hash_key_make - makes the key for update group search
258 * - updgrp_hash_cmp - compare two update groups.
259 */
d62a17ae 260static void *updgrp_hash_alloc(void *p)
3f9c7369 261{
d62a17ae 262 struct update_group *updgrp;
263 const struct update_group *in;
264
265 in = (const struct update_group *)p;
266 updgrp = XCALLOC(MTYPE_BGP_UPDGRP, sizeof(struct update_group));
267 memcpy(updgrp, in, sizeof(struct update_group));
268 updgrp->conf = XCALLOC(MTYPE_BGP_PEER, sizeof(struct peer));
269 conf_copy(updgrp->conf, in->conf, in->afi, in->safi);
270 return updgrp;
3f9c7369
DS
271}
272
273/**
274 * The hash value for a peer is computed from the following variables:
275 * v = f(
276 * 1. IBGP (1) or EBGP (2)
277 * 2. FLAGS based on configuration:
278 * LOCAL_AS_NO_PREPEND
279 * LOCAL_AS_REPLACE_AS
280 * 3. AF_FLAGS based on configuration:
281 * Refer to definition in bgp_updgrp.h
282 * 4. (AF-independent) Capability flags:
283 * AS4_RCV capability
284 * 5. (AF-dependent) Capability flags:
285 * ORF_PREFIX_SM_RCV (peer can send prefix ORF)
286 * 6. MRAI
287 * 7. peer-group name
288 * 8. Outbound route-map name (neighbor route-map <> out)
289 * 9. Outbound distribute-list name (neighbor distribute-list <> out)
290 * 10. Outbound prefix-list name (neighbor prefix-list <> out)
291 * 11. Outbound as-list name (neighbor filter-list <> out)
292 * 12. Unsuppress map name (neighbor unsuppress-map <>)
293 * 13. default rmap name (neighbor default-originate route-map <>)
294 * 14. encoding both global and link-local nexthop?
295 * 15. If peer is configured to be a lonesoul, peer ip address
296 * 16. Local-as should match, if configured.
297 * )
298 */
d62a17ae 299static unsigned int updgrp_hash_key_make(void *p)
3f9c7369 300{
d62a17ae 301 const struct update_group *updgrp;
302 const struct peer *peer;
303 const struct bgp_filter *filter;
304 uint32_t flags;
305 uint32_t key;
306 afi_t afi;
307 safi_t safi;
3f9c7369
DS
308
309#define SEED1 999331
310#define SEED2 2147483647
311
d62a17ae 312 updgrp = p;
313 peer = updgrp->conf;
314 afi = updgrp->afi;
315 safi = updgrp->safi;
316 flags = peer->af_flags[afi][safi];
317 filter = &peer->filter[afi][safi];
318
319 key = 0;
320
321 key = jhash_1word(peer->sort, key); /* EBGP or IBGP */
322 key = jhash_1word((peer->flags & PEER_UPDGRP_FLAGS), key);
323 key = jhash_1word((flags & PEER_UPDGRP_AF_FLAGS), key);
324 key = jhash_1word((peer->cap & PEER_UPDGRP_CAP_FLAGS), key);
325 key = jhash_1word((peer->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS),
326 key);
327 key = jhash_1word(peer->v_routeadv, key);
328 key = jhash_1word(peer->change_local_as, key);
329
330 if (peer->group)
331 key = jhash_1word(jhash(peer->group->name,
332 strlen(peer->group->name), SEED1),
333 key);
334
335 if (filter->map[RMAP_OUT].name)
336 key = jhash_1word(jhash(filter->map[RMAP_OUT].name,
337 strlen(filter->map[RMAP_OUT].name),
338 SEED1),
339 key);
340
341 if (filter->dlist[FILTER_OUT].name)
342 key = jhash_1word(jhash(filter->dlist[FILTER_OUT].name,
343 strlen(filter->dlist[FILTER_OUT].name),
344 SEED1),
345 key);
346
347 if (filter->plist[FILTER_OUT].name)
348 key = jhash_1word(jhash(filter->plist[FILTER_OUT].name,
349 strlen(filter->plist[FILTER_OUT].name),
350 SEED1),
351 key);
352
353 if (filter->aslist[FILTER_OUT].name)
354 key = jhash_1word(jhash(filter->aslist[FILTER_OUT].name,
355 strlen(filter->aslist[FILTER_OUT].name),
356 SEED1),
357 key);
358
359 if (filter->usmap.name)
360 key = jhash_1word(jhash(filter->usmap.name,
361 strlen(filter->usmap.name), SEED1),
362 key);
363
364 if (peer->default_rmap[afi][safi].name)
365 key = jhash_1word(
366 jhash(peer->default_rmap[afi][safi].name,
367 strlen(peer->default_rmap[afi][safi].name),
368 SEED1),
369 key);
370
371 /* If peer is on a shared network and is exchanging IPv6 prefixes,
372 * it needs to include link-local address. That's different from
373 * non-shared-network peers (nexthop encoded with 32 bytes vs 16
374 * bytes). We create different update groups to take care of that.
375 */
376 key = jhash_1word(
377 (peer->shared_network && peer_afi_active_nego(peer, AFI_IP6)),
378 key);
379
380 /*
381 * There are certain peers that must get their own update-group:
382 * - lonesoul peers
383 * - peers that negotiated ORF
384 */
385 if (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL)
386 || CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV)
387 || CHECK_FLAG(peer->af_cap[afi][safi],
388 PEER_CAP_ORF_PREFIX_SM_OLD_RCV))
389 key = jhash_1word(jhash(peer->host, strlen(peer->host), SEED2),
390 key);
391
392 return key;
3f9c7369
DS
393}
394
d62a17ae 395static int updgrp_hash_cmp(const void *p1, const void *p2)
3f9c7369 396{
d62a17ae 397 const struct update_group *grp1;
398 const struct update_group *grp2;
399 const struct peer *pe1;
400 const struct peer *pe2;
401 uint32_t flags1;
402 uint32_t flags2;
403 const struct bgp_filter *fl1;
404 const struct bgp_filter *fl2;
405 afi_t afi;
406 safi_t safi;
407
408 if (!p1 || !p2)
409 return 0;
410
411 grp1 = p1;
412 grp2 = p2;
413 pe1 = grp1->conf;
414 pe2 = grp2->conf;
415 afi = grp1->afi;
416 safi = grp1->safi;
417 flags1 = pe1->af_flags[afi][safi];
418 flags2 = pe2->af_flags[afi][safi];
419 fl1 = &pe1->filter[afi][safi];
420 fl2 = &pe2->filter[afi][safi];
421
422 /* put EBGP and IBGP peers in different update groups */
423 if (pe1->sort != pe2->sort)
424 return 0;
425
426 /* check peer flags */
427 if ((pe1->flags & PEER_UPDGRP_FLAGS)
428 != (pe2->flags & PEER_UPDGRP_FLAGS))
429 return 0;
430
431 /* If there is 'local-as' configured, it should match. */
432 if (pe1->change_local_as != pe2->change_local_as)
433 return 0;
434
435 /* flags like route reflector client */
436 if ((flags1 & PEER_UPDGRP_AF_FLAGS) != (flags2 & PEER_UPDGRP_AF_FLAGS))
437 return 0;
438
439 if ((pe1->cap & PEER_UPDGRP_CAP_FLAGS)
440 != (pe2->cap & PEER_UPDGRP_CAP_FLAGS))
441 return 0;
442
443 if ((pe1->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS)
444 != (pe2->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS))
445 return 0;
446
447 if (pe1->v_routeadv != pe2->v_routeadv)
448 return 0;
449
450 if (pe1->group != pe2->group)
451 return 0;
452
453 /* route-map names should be the same */
454 if ((fl1->map[RMAP_OUT].name && !fl2->map[RMAP_OUT].name)
455 || (!fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name)
456 || (fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name
457 && strcmp(fl1->map[RMAP_OUT].name, fl2->map[RMAP_OUT].name)))
458 return 0;
459
460 if ((fl1->dlist[FILTER_OUT].name && !fl2->dlist[FILTER_OUT].name)
461 || (!fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name)
462 || (fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name
463 && strcmp(fl1->dlist[FILTER_OUT].name,
464 fl2->dlist[FILTER_OUT].name)))
465 return 0;
466
467 if ((fl1->plist[FILTER_OUT].name && !fl2->plist[FILTER_OUT].name)
468 || (!fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name)
469 || (fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name
470 && strcmp(fl1->plist[FILTER_OUT].name,
471 fl2->plist[FILTER_OUT].name)))
472 return 0;
473
474 if ((fl1->aslist[FILTER_OUT].name && !fl2->aslist[FILTER_OUT].name)
475 || (!fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name)
476 || (fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name
477 && strcmp(fl1->aslist[FILTER_OUT].name,
478 fl2->aslist[FILTER_OUT].name)))
479 return 0;
480
481 if ((fl1->usmap.name && !fl2->usmap.name)
482 || (!fl1->usmap.name && fl2->usmap.name)
483 || (fl1->usmap.name && fl2->usmap.name
484 && strcmp(fl1->usmap.name, fl2->usmap.name)))
485 return 0;
486
487 if ((pe1->default_rmap[afi][safi].name
488 && !pe2->default_rmap[afi][safi].name)
489 || (!pe1->default_rmap[afi][safi].name
490 && pe2->default_rmap[afi][safi].name)
491 || (pe1->default_rmap[afi][safi].name
492 && pe2->default_rmap[afi][safi].name
493 && strcmp(pe1->default_rmap[afi][safi].name,
494 pe2->default_rmap[afi][safi].name)))
495 return 0;
496
497 if ((afi == AFI_IP6) && (pe1->shared_network != pe2->shared_network))
498 return 0;
499
500 if ((CHECK_FLAG(pe1->flags, PEER_FLAG_LONESOUL)
501 || CHECK_FLAG(pe1->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV)
502 || CHECK_FLAG(pe1->af_cap[afi][safi],
503 PEER_CAP_ORF_PREFIX_SM_OLD_RCV))
504 && !sockunion_same(&pe1->su, &pe2->su))
505 return 0;
506
507 return 1;
3f9c7369
DS
508}
509
d62a17ae 510static void peer_lonesoul_or_not(struct peer *peer, int set)
3f9c7369 511{
d62a17ae 512 /* no change in status? */
513 if (set == (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL) > 0))
514 return;
3f9c7369 515
d62a17ae 516 if (set)
517 SET_FLAG(peer->flags, PEER_FLAG_LONESOUL);
518 else
519 UNSET_FLAG(peer->flags, PEER_FLAG_LONESOUL);
3f9c7369 520
d62a17ae 521 update_group_adjust_peer_afs(peer);
3f9c7369
DS
522}
523
524/*
525 * subgroup_total_packets_enqueued
526 *
527 * Returns the total number of packets enqueued to a subgroup.
528 */
529static unsigned int
d62a17ae 530subgroup_total_packets_enqueued(struct update_subgroup *subgrp)
3f9c7369 531{
d62a17ae 532 struct bpacket *pkt;
3f9c7369 533
d62a17ae 534 pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp));
3f9c7369 535
d62a17ae 536 return pkt->ver - 1;
3f9c7369
DS
537}
538
d62a17ae 539static int update_group_show_walkcb(struct update_group *updgrp, void *arg)
3f9c7369 540{
d62a17ae 541 struct updwalk_context *ctx = arg;
542 struct vty *vty;
543 struct update_subgroup *subgrp;
544 struct peer_af *paf;
545 struct bgp_filter *filter;
546 int match = 0;
547
548 if (!ctx)
549 return CMD_SUCCESS;
550
551 if (ctx->subgrp_id) {
a2addae8 552 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
d62a17ae 553 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
554 continue;
555 else {
556 match = 1;
557 break;
558 }
559 }
560 } else {
561 match = 1;
562 }
563
564 if (!match) {
565 /* Since this routine is invoked from a walk, we cannot signal
566 * any */
567 /* error here, can only return. */
568 return CMD_SUCCESS;
569 }
570
571 vty = ctx->vty;
572
573 vty_out(vty, "Update-group %" PRIu64 ":\n", updgrp->id);
574 vty_out(vty, " Created: %s", timestamp_string(updgrp->uptime));
575 filter = &updgrp->conf->filter[updgrp->afi][updgrp->safi];
576 if (filter->map[RMAP_OUT].name)
577 vty_out(vty, " Outgoing route map: %s%s\n",
578 filter->map[RMAP_OUT].map ? "X" : "",
579 filter->map[RMAP_OUT].name);
580 vty_out(vty, " MRAI value (seconds): %d\n", updgrp->conf->v_routeadv);
581 if (updgrp->conf->change_local_as)
582 vty_out(vty, " Local AS %u%s%s\n",
583 updgrp->conf->change_local_as,
584 CHECK_FLAG(updgrp->conf->flags,
585 PEER_FLAG_LOCAL_AS_NO_PREPEND)
586 ? " no-prepend"
587 : "",
588 CHECK_FLAG(updgrp->conf->flags,
589 PEER_FLAG_LOCAL_AS_REPLACE_AS)
590 ? " replace-as"
591 : "");
592
a2addae8 593 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
d62a17ae 594 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
595 continue;
596 vty_out(vty, "\n");
597 vty_out(vty, " Update-subgroup %" PRIu64 ":\n", subgrp->id);
598 vty_out(vty, " Created: %s",
599 timestamp_string(subgrp->uptime));
600
601 if (subgrp->split_from.update_group_id
602 || subgrp->split_from.subgroup_id) {
603 vty_out(vty, " Split from group id: %" PRIu64 "\n",
604 subgrp->split_from.update_group_id);
605 vty_out(vty,
606 " Split from subgroup id: %" PRIu64 "\n",
607 subgrp->split_from.subgroup_id);
608 }
609
610 vty_out(vty, " Join events: %u\n", subgrp->join_events);
611 vty_out(vty, " Prune events: %u\n", subgrp->prune_events);
612 vty_out(vty, " Merge events: %u\n", subgrp->merge_events);
613 vty_out(vty, " Split events: %u\n", subgrp->split_events);
614 vty_out(vty, " Update group switch events: %u\n",
615 subgrp->updgrp_switch_events);
616 vty_out(vty, " Peer refreshes combined: %u\n",
617 subgrp->peer_refreshes_combined);
618 vty_out(vty, " Merge checks triggered: %u\n",
619 subgrp->merge_checks_triggered);
620 vty_out(vty, " Version: %" PRIu64 "\n", subgrp->version);
621 vty_out(vty, " Packet queue length: %d\n",
622 bpacket_queue_length(SUBGRP_PKTQ(subgrp)));
623 vty_out(vty, " Total packets enqueued: %u\n",
624 subgroup_total_packets_enqueued(subgrp));
625 vty_out(vty, " Packet queue high watermark: %d\n",
626 bpacket_queue_hwm_length(SUBGRP_PKTQ(subgrp)));
627 vty_out(vty, " Adj-out list count: %u\n", subgrp->adj_count);
628 vty_out(vty, " Advertise list: %s\n",
629 advertise_list_is_empty(subgrp) ? "empty"
630 : "not empty");
631 vty_out(vty, " Flags: %s\n",
632 CHECK_FLAG(subgrp->flags, SUBGRP_FLAG_NEEDS_REFRESH)
633 ? "R"
634 : "");
635 if (subgrp->peer_count > 0) {
636 vty_out(vty, " Peers:\n");
a2addae8
RW
637 SUBGRP_FOREACH_PEER (subgrp, paf)
638 vty_out(vty, " - %s\n", paf->peer->host);
d62a17ae 639 }
8fe8a7f6 640 }
d62a17ae 641 return UPDWALK_CONTINUE;
3f9c7369
DS
642}
643
644/*
645 * Helper function to show the packet queue for each subgroup of update group.
646 * Will be constrained to a particular subgroup id if id !=0
647 */
d62a17ae 648static int updgrp_show_packet_queue_walkcb(struct update_group *updgrp,
649 void *arg)
3f9c7369 650{
d62a17ae 651 struct updwalk_context *ctx = arg;
652 struct update_subgroup *subgrp;
653 struct vty *vty;
654
655 vty = ctx->vty;
a2addae8 656 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
d62a17ae 657 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
658 continue;
659 vty_out(vty, "update group %" PRIu64 ", subgroup %" PRIu64 "\n",
660 updgrp->id, subgrp->id);
661 bpacket_queue_show_vty(SUBGRP_PKTQ(subgrp), vty);
662 }
663 return UPDWALK_CONTINUE;
3f9c7369
DS
664}
665
666/*
667 * Show the packet queue for each subgroup of update group. Will be
668 * constrained to a particular subgroup id if id !=0
669 */
d62a17ae 670void update_group_show_packet_queue(struct bgp *bgp, afi_t afi, safi_t safi,
671 struct vty *vty, uint64_t id)
3f9c7369 672{
d62a17ae 673 struct updwalk_context ctx;
674
675 memset(&ctx, 0, sizeof(ctx));
676 ctx.vty = vty;
677 ctx.subgrp_id = id;
678 ctx.flags = 0;
679 update_group_af_walk(bgp, afi, safi, updgrp_show_packet_queue_walkcb,
680 &ctx);
3f9c7369
DS
681}
682
d62a17ae 683static struct update_group *update_group_find(struct peer_af *paf)
3f9c7369 684{
d62a17ae 685 struct update_group *updgrp;
686 struct update_group tmp;
687 struct peer tmp_conf;
3f9c7369 688
d62a17ae 689 if (!peer_established(PAF_PEER(paf)))
690 return NULL;
3f9c7369 691
d62a17ae 692 memset(&tmp, 0, sizeof(tmp));
693 memset(&tmp_conf, 0, sizeof(tmp_conf));
694 tmp.conf = &tmp_conf;
695 peer2_updgrp_copy(&tmp, paf);
3f9c7369 696
d62a17ae 697 updgrp = hash_lookup(paf->peer->bgp->update_groups[paf->afid], &tmp);
698 conf_release(&tmp_conf, paf->afi, paf->safi);
699 return updgrp;
3f9c7369
DS
700}
701
d62a17ae 702static struct update_group *update_group_create(struct peer_af *paf)
3f9c7369 703{
d62a17ae 704 struct update_group *updgrp;
705 struct update_group tmp;
706 struct peer tmp_conf;
3f9c7369 707
d62a17ae 708 memset(&tmp, 0, sizeof(tmp));
709 memset(&tmp_conf, 0, sizeof(tmp_conf));
710 tmp.conf = &tmp_conf;
711 peer2_updgrp_copy(&tmp, paf);
3f9c7369 712
d62a17ae 713 updgrp = hash_get(paf->peer->bgp->update_groups[paf->afid], &tmp,
714 updgrp_hash_alloc);
715 if (!updgrp)
716 return NULL;
717 update_group_checkin(updgrp);
3f9c7369 718
d62a17ae 719 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
720 zlog_debug("create update group %" PRIu64, updgrp->id);
3f9c7369 721
d62a17ae 722 UPDGRP_GLOBAL_STAT(updgrp, updgrps_created) += 1;
3f9c7369 723
d62a17ae 724 conf_release(&tmp_conf, paf->afi, paf->safi);
725 return updgrp;
3f9c7369
DS
726}
727
d62a17ae 728static void update_group_delete(struct update_group *updgrp)
3f9c7369 729{
d62a17ae 730 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
731 zlog_debug("delete update group %" PRIu64, updgrp->id);
3f9c7369 732
d62a17ae 733 UPDGRP_GLOBAL_STAT(updgrp, updgrps_deleted) += 1;
3f9c7369 734
d62a17ae 735 hash_release(updgrp->bgp->update_groups[updgrp->afid], updgrp);
736 conf_release(updgrp->conf, updgrp->afi, updgrp->safi);
3d68677e 737
d62a17ae 738 if (updgrp->conf->host)
739 XFREE(MTYPE_BGP_PEER_HOST, updgrp->conf->host);
740 updgrp->conf->host = NULL;
6e919709 741
d62a17ae 742 if (updgrp->conf->ifname)
743 XFREE(MTYPE_BGP_PEER_IFNAME, updgrp->conf->ifname);
6e919709 744
d62a17ae 745 XFREE(MTYPE_BGP_PEER, updgrp->conf);
746 XFREE(MTYPE_BGP_UPDGRP, updgrp);
3f9c7369
DS
747}
748
d62a17ae 749static void update_group_add_subgroup(struct update_group *updgrp,
750 struct update_subgroup *subgrp)
3f9c7369 751{
d62a17ae 752 if (!updgrp || !subgrp)
753 return;
3f9c7369 754
d62a17ae 755 LIST_INSERT_HEAD(&(updgrp->subgrps), subgrp, updgrp_train);
756 subgrp->update_group = updgrp;
3f9c7369
DS
757}
758
d62a17ae 759static void update_group_remove_subgroup(struct update_group *updgrp,
760 struct update_subgroup *subgrp)
3f9c7369 761{
d62a17ae 762 if (!updgrp || !subgrp)
763 return;
3f9c7369 764
d62a17ae 765 LIST_REMOVE(subgrp, updgrp_train);
766 subgrp->update_group = NULL;
767 if (LIST_EMPTY(&(updgrp->subgrps)))
768 update_group_delete(updgrp);
3f9c7369
DS
769}
770
771static struct update_subgroup *
d62a17ae 772update_subgroup_create(struct update_group *updgrp)
3f9c7369 773{
d62a17ae 774 struct update_subgroup *subgrp;
3f9c7369 775
d62a17ae 776 subgrp = XCALLOC(MTYPE_BGP_UPD_SUBGRP, sizeof(struct update_subgroup));
777 update_subgroup_checkin(subgrp, updgrp);
778 subgrp->v_coalesce = (UPDGRP_INST(updgrp))->coalesce_time;
779 sync_init(subgrp);
780 bpacket_queue_init(SUBGRP_PKTQ(subgrp));
781 bpacket_queue_add(SUBGRP_PKTQ(subgrp), NULL, NULL);
782 TAILQ_INIT(&(subgrp->adjq));
783 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
784 zlog_debug("create subgroup u%" PRIu64 ":s%" PRIu64, updgrp->id,
785 subgrp->id);
3f9c7369 786
d62a17ae 787 update_group_add_subgroup(updgrp, subgrp);
3f9c7369 788
d62a17ae 789 UPDGRP_INCR_STAT(updgrp, subgrps_created);
3f9c7369 790
d62a17ae 791 return subgrp;
3f9c7369
DS
792}
793
d62a17ae 794static void update_subgroup_delete(struct update_subgroup *subgrp)
3f9c7369 795{
d62a17ae 796 if (!subgrp)
797 return;
3f9c7369 798
d62a17ae 799 if (subgrp->update_group)
800 UPDGRP_INCR_STAT(subgrp->update_group, subgrps_deleted);
3f9c7369 801
d62a17ae 802 if (subgrp->t_merge_check)
803 THREAD_OFF(subgrp->t_merge_check);
3f9c7369 804
d62a17ae 805 if (subgrp->t_coalesce)
806 THREAD_TIMER_OFF(subgrp->t_coalesce);
3f9c7369 807
d62a17ae 808 bpacket_queue_cleanup(SUBGRP_PKTQ(subgrp));
809 subgroup_clear_table(subgrp);
3f9c7369 810
d62a17ae 811 if (subgrp->t_coalesce)
812 THREAD_TIMER_OFF(subgrp->t_coalesce);
813 sync_delete(subgrp);
3f9c7369 814
d62a17ae 815 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
816 zlog_debug("delete subgroup u%" PRIu64 ":s%" PRIu64,
817 subgrp->update_group->id, subgrp->id);
3f9c7369 818
d62a17ae 819 update_group_remove_subgroup(subgrp->update_group, subgrp);
3f9c7369 820
d62a17ae 821 XFREE(MTYPE_BGP_UPD_SUBGRP, subgrp);
3f9c7369
DS
822}
823
d62a17ae 824void update_subgroup_inherit_info(struct update_subgroup *to,
825 struct update_subgroup *from)
3f9c7369 826{
d62a17ae 827 if (!to || !from)
828 return;
3f9c7369 829
d62a17ae 830 to->sflags = from->sflags;
3f9c7369
DS
831}
832
833/*
834 * update_subgroup_check_delete
835 *
836 * Delete a subgroup if it is ready to be deleted.
837 *
838 * Returns TRUE if the subgroup was deleted.
839 */
d62a17ae 840static int update_subgroup_check_delete(struct update_subgroup *subgrp)
3f9c7369 841{
d62a17ae 842 if (!subgrp)
843 return 0;
3f9c7369 844
d62a17ae 845 if (!LIST_EMPTY(&(subgrp->peers)))
846 return 0;
3f9c7369 847
d62a17ae 848 update_subgroup_delete(subgrp);
3f9c7369 849
d62a17ae 850 return 1;
3f9c7369
DS
851}
852
853/*
854 * update_subgroup_add_peer
855 *
856 * @param send_enqueued_packets If true all currently enqueued packets will
857 * also be sent to the peer.
858 */
d62a17ae 859static void update_subgroup_add_peer(struct update_subgroup *subgrp,
860 struct peer_af *paf,
861 int send_enqueued_pkts)
3f9c7369 862{
d62a17ae 863 struct bpacket *pkt;
3f9c7369 864
d62a17ae 865 if (!subgrp || !paf)
866 return;
3f9c7369 867
d62a17ae 868 LIST_INSERT_HEAD(&(subgrp->peers), paf, subgrp_train);
869 paf->subgroup = subgrp;
870 subgrp->peer_count++;
3f9c7369 871
d62a17ae 872 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
873 UPDGRP_PEER_DBG_EN(subgrp->update_group);
874 }
3f9c7369 875
d62a17ae 876 SUBGRP_INCR_STAT(subgrp, join_events);
3f9c7369 877
d62a17ae 878 if (send_enqueued_pkts) {
879 pkt = bpacket_queue_first(SUBGRP_PKTQ(subgrp));
880 } else {
3f9c7369 881
d62a17ae 882 /*
883 * Hang the peer off of the last, placeholder, packet in the
884 * queue. This means it won't see any of the packets that are
885 * currently the queue.
886 */
887 pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp));
888 assert(pkt->buffer == NULL);
889 }
3f9c7369 890
d62a17ae 891 bpacket_add_peer(pkt, paf);
3f9c7369 892
d62a17ae 893 bpacket_queue_sanity_check(SUBGRP_PKTQ(subgrp));
3f9c7369
DS
894}
895
896/*
897 * update_subgroup_remove_peer_internal
898 *
899 * Internal function that removes a peer from a subgroup, but does not
900 * delete the subgroup. A call to this function must almost always be
901 * followed by a call to update_subgroup_check_delete().
902 *
903 * @see update_subgroup_remove_peer
904 */
d62a17ae 905static void update_subgroup_remove_peer_internal(struct update_subgroup *subgrp,
906 struct peer_af *paf)
3f9c7369 907{
d62a17ae 908 assert(subgrp && paf);
3f9c7369 909
d62a17ae 910 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
911 UPDGRP_PEER_DBG_DIS(subgrp->update_group);
912 }
3f9c7369 913
d62a17ae 914 bpacket_queue_remove_peer(paf);
915 LIST_REMOVE(paf, subgrp_train);
916 paf->subgroup = NULL;
917 subgrp->peer_count--;
3f9c7369 918
d62a17ae 919 SUBGRP_INCR_STAT(subgrp, prune_events);
3f9c7369
DS
920}
921
922/*
923 * update_subgroup_remove_peer
924 */
d62a17ae 925void update_subgroup_remove_peer(struct update_subgroup *subgrp,
926 struct peer_af *paf)
3f9c7369 927{
d62a17ae 928 if (!subgrp || !paf)
929 return;
3f9c7369 930
d62a17ae 931 update_subgroup_remove_peer_internal(subgrp, paf);
3f9c7369 932
d62a17ae 933 if (update_subgroup_check_delete(subgrp))
934 return;
3f9c7369 935
d62a17ae 936 /*
937 * The deletion of the peer may have caused some packets to be
938 * deleted from the subgroup packet queue. Check if the subgroup can
939 * be merged now.
940 */
941 update_subgroup_check_merge(subgrp, "removed peer from subgroup");
3f9c7369
DS
942}
943
d62a17ae 944static struct update_subgroup *update_subgroup_find(struct update_group *updgrp,
945 struct peer_af *paf)
3f9c7369 946{
d62a17ae 947 struct update_subgroup *subgrp = NULL;
948 uint64_t version;
949
950 if (paf->subgroup) {
951 assert(0);
952 return NULL;
953 } else
954 version = 0;
955
956 if (!peer_established(PAF_PEER(paf)))
957 return NULL;
958
a2addae8 959 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
d62a17ae 960 if (subgrp->version != version
961 || CHECK_FLAG(subgrp->sflags,
962 SUBGRP_STATUS_DEFAULT_ORIGINATE))
963 continue;
964
965 /*
966 * The version number is not meaningful on a subgroup that needs
967 * a refresh.
968 */
969 if (update_subgroup_needs_refresh(subgrp))
970 continue;
971
972 break;
973 }
974
975 return subgrp;
3f9c7369
DS
976}
977
978/*
979 * update_subgroup_ready_for_merge
980 *
981 * Returns TRUE if this subgroup is in a state that allows it to be
982 * merged into another subgroup.
983 */
d62a17ae 984static int update_subgroup_ready_for_merge(struct update_subgroup *subgrp)
3f9c7369
DS
985{
986
d62a17ae 987 /*
988 * Not ready if there are any encoded packets waiting to be written
989 * out to peers.
990 */
991 if (!bpacket_queue_is_empty(SUBGRP_PKTQ(subgrp)))
992 return 0;
993
994 /*
995 * Not ready if there enqueued updates waiting to be encoded.
996 */
997 if (!advertise_list_is_empty(subgrp))
998 return 0;
999
1000 /*
1001 * Don't attempt to merge a subgroup that needs a refresh. For one,
1002 * we can't determine if the adj_out of such a group matches that of
1003 * another group.
1004 */
1005 if (update_subgroup_needs_refresh(subgrp))
1006 return 0;
1007
1008 return 1;
3f9c7369
DS
1009}
1010
1011/*
1012 * update_subgrp_can_merge_into
1013 *
1014 * Returns TRUE if the first subgroup can merge into the second
1015 * subgroup.
1016 */
d62a17ae 1017static int update_subgroup_can_merge_into(struct update_subgroup *subgrp,
1018 struct update_subgroup *target)
3f9c7369
DS
1019{
1020
d62a17ae 1021 if (subgrp == target)
1022 return 0;
3f9c7369 1023
d62a17ae 1024 /*
1025 * Both must have processed the BRIB to the same point in order to
1026 * be merged.
1027 */
1028 if (subgrp->version != target->version)
1029 return 0;
3f9c7369 1030
d62a17ae 1031 if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE)
1032 != CHECK_FLAG(target->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
1033 return 0;
f910ef58 1034
d62a17ae 1035 if (subgrp->adj_count != target->adj_count)
1036 return 0;
3f9c7369 1037
d62a17ae 1038 return update_subgroup_ready_for_merge(target);
3f9c7369
DS
1039}
1040
1041/*
1042 * update_subgroup_merge
1043 *
1044 * Merge the first subgroup into the second one.
1045 */
d62a17ae 1046static void update_subgroup_merge(struct update_subgroup *subgrp,
1047 struct update_subgroup *target,
1048 const char *reason)
3f9c7369 1049{
d62a17ae 1050 struct peer_af *paf;
1051 int result;
1052 int peer_count;
3f9c7369 1053
d62a17ae 1054 assert(subgrp->adj_count == target->adj_count);
3f9c7369 1055
d62a17ae 1056 peer_count = subgrp->peer_count;
3f9c7369 1057
d62a17ae 1058 while (1) {
1059 paf = LIST_FIRST(&subgrp->peers);
1060 if (!paf)
1061 break;
3f9c7369 1062
d62a17ae 1063 update_subgroup_remove_peer_internal(subgrp, paf);
3f9c7369 1064
d62a17ae 1065 /*
1066 * Add the peer to the target subgroup, while making sure that
1067 * any currently enqueued packets won't be sent to it. Enqueued
1068 * packets could, for example, result in an unnecessary withdraw
1069 * followed by an advertise.
1070 */
1071 update_subgroup_add_peer(target, paf, 0);
1072 }
3f9c7369 1073
d62a17ae 1074 SUBGRP_INCR_STAT(target, merge_events);
3f9c7369 1075
d62a17ae 1076 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1077 zlog_debug("u%" PRIu64 ":s%" PRIu64
1078 " (%d peers) merged into u%" PRIu64 ":s%" PRIu64
1079 ", "
1080 "trigger: %s",
1081 subgrp->update_group->id, subgrp->id, peer_count,
1082 target->update_group->id, target->id,
1083 reason ? reason : "unknown");
3f9c7369 1084
d62a17ae 1085 result = update_subgroup_check_delete(subgrp);
1086 assert(result);
3f9c7369
DS
1087}
1088
1089/*
1090 * update_subgroup_check_merge
1091 *
1092 * Merge this subgroup into another subgroup if possible.
1093 *
1094 * Returns TRUE if the subgroup has been merged. The subgroup pointer
1095 * should not be accessed in this case.
1096 */
d62a17ae 1097int update_subgroup_check_merge(struct update_subgroup *subgrp,
1098 const char *reason)
3f9c7369 1099{
d62a17ae 1100 struct update_subgroup *target;
3f9c7369 1101
d62a17ae 1102 if (!update_subgroup_ready_for_merge(subgrp))
1103 return 0;
3f9c7369 1104
d62a17ae 1105 /*
1106 * Look for a subgroup to merge into.
1107 */
a2addae8 1108 UPDGRP_FOREACH_SUBGRP (subgrp->update_group, target) {
d62a17ae 1109 if (update_subgroup_can_merge_into(subgrp, target))
1110 break;
1111 }
3f9c7369 1112
d62a17ae 1113 if (!target)
1114 return 0;
3f9c7369 1115
d62a17ae 1116 update_subgroup_merge(subgrp, target, reason);
1117 return 1;
3f9c7369
DS
1118}
1119
d62a17ae 1120/*
9d303b37
DL
1121* update_subgroup_merge_check_thread_cb
1122*/
d62a17ae 1123static int update_subgroup_merge_check_thread_cb(struct thread *thread)
3f9c7369 1124{
d62a17ae 1125 struct update_subgroup *subgrp;
3f9c7369 1126
d62a17ae 1127 subgrp = THREAD_ARG(thread);
3f9c7369 1128
d62a17ae 1129 subgrp->t_merge_check = NULL;
3f9c7369 1130
d62a17ae 1131 update_subgroup_check_merge(subgrp, "triggered merge check");
1132 return 0;
3f9c7369
DS
1133}
1134
1135/*
1136 * update_subgroup_trigger_merge_check
1137 *
1138 * Triggers a call to update_subgroup_check_merge() on a clean context.
1139 *
1140 * @param force If true, the merge check will be triggered even if the
1141 * subgroup doesn't currently look ready for a merge.
1142 *
1143 * Returns TRUE if a merge check will be performed shortly.
1144 */
d62a17ae 1145int update_subgroup_trigger_merge_check(struct update_subgroup *subgrp,
1146 int force)
3f9c7369 1147{
d62a17ae 1148 if (subgrp->t_merge_check)
1149 return 1;
3f9c7369 1150
d62a17ae 1151 if (!force && !update_subgroup_ready_for_merge(subgrp))
1152 return 0;
3f9c7369 1153
d62a17ae 1154 subgrp->t_merge_check = NULL;
1155 thread_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb,
1156 subgrp, 0, &subgrp->t_merge_check);
3f9c7369 1157
d62a17ae 1158 SUBGRP_INCR_STAT(subgrp, merge_checks_triggered);
3f9c7369 1159
d62a17ae 1160 return 1;
3f9c7369
DS
1161}
1162
1163/*
1164 * update_subgroup_copy_adj_out
1165 *
1166 * Helper function that clones the adj out (state about advertised
1167 * routes) from one subgroup to another. It assumes that the adj out
1168 * of the target subgroup is empty.
1169 */
d62a17ae 1170static void update_subgroup_copy_adj_out(struct update_subgroup *source,
1171 struct update_subgroup *dest)
3f9c7369 1172{
d62a17ae 1173 struct bgp_adj_out *aout, *aout_copy;
1174
a2addae8 1175 SUBGRP_FOREACH_ADJ (source, aout) {
d62a17ae 1176 /*
1177 * Copy the adj out.
1178 */
1179 aout_copy =
1180 bgp_adj_out_alloc(dest, aout->rn, aout->addpath_tx_id);
1181 aout_copy->attr =
7c87afac 1182 aout->attr ? bgp_attr_intern(aout->attr) : NULL;
d62a17ae 1183 }
3f9c7369
DS
1184}
1185
1186/*
1187 * update_subgroup_copy_packets
1188 *
1189 * Copy packets after and including the given packet to the subgroup
1190 * 'dest'.
1191 *
1192 * Returns the number of packets copied.
1193 */
d62a17ae 1194static int update_subgroup_copy_packets(struct update_subgroup *dest,
1195 struct bpacket *pkt)
3f9c7369 1196{
d62a17ae 1197 int count;
1198
1199 count = 0;
1200 while (pkt && pkt->buffer) {
1201 bpacket_queue_add(SUBGRP_PKTQ(dest), stream_dup(pkt->buffer),
1202 &pkt->arr);
1203 count++;
1204 pkt = bpacket_next(pkt);
1205 }
3f9c7369 1206
d62a17ae 1207 bpacket_queue_sanity_check(SUBGRP_PKTQ(dest));
3f9c7369 1208
d62a17ae 1209 return count;
3f9c7369
DS
1210}
1211
d62a17ae 1212static int updgrp_prefix_list_update(struct update_group *updgrp,
1213 const char *name)
3f9c7369 1214{
d62a17ae 1215 struct peer *peer;
1216 struct bgp_filter *filter;
1217
1218 peer = UPDGRP_PEER(updgrp);
1219 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1220
1221 if (PREFIX_LIST_OUT_NAME(filter)
1222 && (strcmp(name, PREFIX_LIST_OUT_NAME(filter)) == 0)) {
1223 PREFIX_LIST_OUT(filter) = prefix_list_lookup(
1224 UPDGRP_AFI(updgrp), PREFIX_LIST_OUT_NAME(filter));
1225 return 1;
1226 }
1227 return 0;
3f9c7369
DS
1228}
1229
d62a17ae 1230static int updgrp_filter_list_update(struct update_group *updgrp,
1231 const char *name)
3f9c7369 1232{
d62a17ae 1233 struct peer *peer;
1234 struct bgp_filter *filter;
1235
1236 peer = UPDGRP_PEER(updgrp);
1237 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1238
1239 if (FILTER_LIST_OUT_NAME(filter)
1240 && (strcmp(name, FILTER_LIST_OUT_NAME(filter)) == 0)) {
1241 FILTER_LIST_OUT(filter) =
1242 as_list_lookup(FILTER_LIST_OUT_NAME(filter));
1243 return 1;
1244 }
1245 return 0;
3f9c7369
DS
1246}
1247
d62a17ae 1248static int updgrp_distribute_list_update(struct update_group *updgrp,
1249 const char *name)
3f9c7369 1250{
d62a17ae 1251 struct peer *peer;
1252 struct bgp_filter *filter;
1253
1254 peer = UPDGRP_PEER(updgrp);
1255 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1256
1257 if (DISTRIBUTE_OUT_NAME(filter)
1258 && (strcmp(name, DISTRIBUTE_OUT_NAME(filter)) == 0)) {
1259 DISTRIBUTE_OUT(filter) = access_list_lookup(
1260 UPDGRP_AFI(updgrp), DISTRIBUTE_OUT_NAME(filter));
1261 return 1;
1262 }
1263 return 0;
3f9c7369
DS
1264}
1265
d62a17ae 1266static int updgrp_route_map_update(struct update_group *updgrp,
1267 const char *name, int *def_rmap_changed)
3f9c7369 1268{
d62a17ae 1269 struct peer *peer;
1270 struct bgp_filter *filter;
1271 int changed = 0;
1272 afi_t afi;
1273 safi_t safi;
1274
1275 peer = UPDGRP_PEER(updgrp);
1276 afi = UPDGRP_AFI(updgrp);
1277 safi = UPDGRP_SAFI(updgrp);
1278 filter = &peer->filter[afi][safi];
1279
1280 if (ROUTE_MAP_OUT_NAME(filter)
1281 && (strcmp(name, ROUTE_MAP_OUT_NAME(filter)) == 0)) {
1282 ROUTE_MAP_OUT(filter) = route_map_lookup_by_name(name);
1283
1284 changed = 1;
1285 }
1286
1287 if (UNSUPPRESS_MAP_NAME(filter)
1288 && (strcmp(name, UNSUPPRESS_MAP_NAME(filter)) == 0)) {
1289 UNSUPPRESS_MAP(filter) = route_map_lookup_by_name(name);
1290 changed = 1;
1291 }
1292
1293 /* process default-originate route-map */
1294 if (peer->default_rmap[afi][safi].name
1295 && (strcmp(name, peer->default_rmap[afi][safi].name) == 0)) {
1296 peer->default_rmap[afi][safi].map =
1297 route_map_lookup_by_name(name);
1298 if (def_rmap_changed)
1299 *def_rmap_changed = 1;
1300 }
1301 return changed;
3f9c7369
DS
1302}
1303
1304/*
1305 * hash iteration callback function to process a policy change for an
1306 * update group. Check if the changed policy matches the updgrp's
1307 * outbound route-map or unsuppress-map or default-originate map or
1308 * filter-list or prefix-list or distribute-list.
1309 * Trigger update generation accordingly.
1310 */
d62a17ae 1311static int updgrp_policy_update_walkcb(struct update_group *updgrp, void *arg)
3f9c7369 1312{
d62a17ae 1313 struct updwalk_context *ctx = arg;
1314 struct update_subgroup *subgrp;
1315 int changed = 0;
1316 int def_changed = 0;
1317
1318 if (!updgrp || !ctx || !ctx->policy_name)
1319 return UPDWALK_CONTINUE;
1320
1321 switch (ctx->policy_type) {
1322 case BGP_POLICY_ROUTE_MAP:
1323 changed = updgrp_route_map_update(updgrp, ctx->policy_name,
1324 &def_changed);
1325 break;
1326 case BGP_POLICY_FILTER_LIST:
1327 changed = updgrp_filter_list_update(updgrp, ctx->policy_name);
1328 break;
1329 case BGP_POLICY_PREFIX_LIST:
1330 changed = updgrp_prefix_list_update(updgrp, ctx->policy_name);
1331 break;
1332 case BGP_POLICY_DISTRIBUTE_LIST:
1333 changed =
1334 updgrp_distribute_list_update(updgrp, ctx->policy_name);
1335 break;
1336 default:
1337 break;
1338 }
1339
1340 /* If not doing route update, return after updating "config" */
1341 if (!ctx->policy_route_update)
1342 return UPDWALK_CONTINUE;
1343
1344 /* If nothing has changed, return after updating "config" */
1345 if (!changed && !def_changed)
1346 return UPDWALK_CONTINUE;
1347
1348 /*
1349 * If something has changed, at the beginning of a route-map
1350 * modification
1351 * event, mark each subgroup's needs-refresh bit. For one, it signals to
1352 * whoever that the subgroup needs a refresh. Second, it prevents
1353 * premature
1354 * merge of this subgroup with another before a complete (outbound)
1355 * refresh.
1356 */
1357 if (ctx->policy_event_start_flag) {
a2addae8 1358 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
d62a17ae 1359 update_subgroup_set_needs_refresh(subgrp, 1);
1360 }
1361 return UPDWALK_CONTINUE;
1362 }
1363
a2addae8 1364 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
d62a17ae 1365 if (changed) {
1366 if (bgp_debug_update(NULL, NULL, updgrp, 0))
1367 zlog_debug(
1368 "u%" PRIu64 ":s%" PRIu64
1369 " announcing routes upon policy %s (type %d) change",
1370 updgrp->id, subgrp->id,
1371 ctx->policy_name, ctx->policy_type);
1372 subgroup_announce_route(subgrp);
1373 }
1374 if (def_changed) {
1375 if (bgp_debug_update(NULL, NULL, updgrp, 0))
1376 zlog_debug(
1377 "u%" PRIu64 ":s%" PRIu64
1378 " announcing default upon default routemap %s change",
1379 updgrp->id, subgrp->id,
1380 ctx->policy_name);
1381 subgroup_default_originate(subgrp, 0);
1382 }
1383 update_subgroup_set_needs_refresh(subgrp, 0);
1384 }
1385 return UPDWALK_CONTINUE;
3f9c7369
DS
1386}
1387
d62a17ae 1388static int update_group_walkcb(struct hash_backet *backet, void *arg)
3f9c7369 1389{
d62a17ae 1390 struct update_group *updgrp = backet->data;
1391 struct updwalk_context *wctx = arg;
1392 int ret = (*wctx->cb)(updgrp, wctx->context);
1393 return ret;
3f9c7369
DS
1394}
1395
d62a17ae 1396static int update_group_periodic_merge_walkcb(struct update_group *updgrp,
1397 void *arg)
3f9c7369 1398{
d62a17ae 1399 struct update_subgroup *subgrp;
1400 struct update_subgroup *tmp_subgrp;
1401 const char *reason = arg;
3f9c7369 1402
a2addae8
RW
1403 UPDGRP_FOREACH_SUBGRP_SAFE (updgrp, subgrp, tmp_subgrp)
1404 update_subgroup_check_merge(subgrp, reason);
d62a17ae 1405 return UPDWALK_CONTINUE;
3f9c7369
DS
1406}
1407
1408/********************
1409 * PUBLIC FUNCTIONS
1410 ********************/
1411
1412/*
1413 * trigger function when a policy (route-map/filter-list/prefix-list/
1414 * distribute-list etc.) content changes. Go through all the
1415 * update groups and process the change.
1416 *
1417 * bgp: the bgp instance
1418 * ptype: the type of policy that got modified, see bgpd.h
1419 * pname: name of the policy
1420 * route_update: flag to control if an automatic update generation should
1421 * occur
1422 * start_event: flag that indicates if it's the beginning of the change.
1423 * Esp. when the user is changing the content interactively
1424 * over multiple statements. Useful to set dirty flag on
1425 * update groups.
1426 */
d62a17ae 1427void update_group_policy_update(struct bgp *bgp, bgp_policy_type_e ptype,
1428 const char *pname, int route_update,
1429 int start_event)
3f9c7369 1430{
d62a17ae 1431 struct updwalk_context ctx;
3f9c7369 1432
d62a17ae 1433 memset(&ctx, 0, sizeof(ctx));
1434 ctx.policy_type = ptype;
1435 ctx.policy_name = pname;
1436 ctx.policy_route_update = route_update;
1437 ctx.policy_event_start_flag = start_event;
1438 ctx.flags = 0;
3f9c7369 1439
d62a17ae 1440 update_group_walk(bgp, updgrp_policy_update_walkcb, &ctx);
3f9c7369
DS
1441}
1442
1443/*
1444 * update_subgroup_split_peer
1445 *
1446 * Ensure that the given peer is in a subgroup of its own in the
1447 * specified update group.
1448 */
d62a17ae 1449void update_subgroup_split_peer(struct peer_af *paf,
1450 struct update_group *updgrp)
3f9c7369 1451{
d62a17ae 1452 struct update_subgroup *old_subgrp, *subgrp;
1453 uint64_t old_id;
1454
1455
1456 old_subgrp = paf->subgroup;
1457
1458 if (!updgrp)
1459 updgrp = old_subgrp->update_group;
1460
1461 /*
1462 * If the peer is alone in its subgroup, reuse the existing
1463 * subgroup.
1464 */
1465 if (old_subgrp->peer_count == 1) {
1466 if (updgrp == old_subgrp->update_group)
1467 return;
1468
1469 subgrp = old_subgrp;
1470 old_id = old_subgrp->update_group->id;
1471
1472 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
1473 UPDGRP_PEER_DBG_DIS(old_subgrp->update_group);
1474 }
1475
1476 update_group_remove_subgroup(old_subgrp->update_group,
1477 old_subgrp);
1478 update_group_add_subgroup(updgrp, subgrp);
1479
1480 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
1481 UPDGRP_PEER_DBG_EN(updgrp);
1482 }
1483 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1484 zlog_debug("u%" PRIu64 ":s%" PRIu64
1485 " peer %s moved to u%" PRIu64 ":s%" PRIu64,
1486 old_id, subgrp->id, paf->peer->host,
1487 updgrp->id, subgrp->id);
1488
1489 /*
1490 * The state of the subgroup (adj_out, advs, packet queue etc)
1491 * is consistent internally, but may not be identical to other
1492 * subgroups in the new update group even if the version number
1493 * matches up. Make sure a full refresh is done before the
1494 * subgroup is merged with another.
1495 */
1496 update_subgroup_set_needs_refresh(subgrp, 1);
1497
1498 SUBGRP_INCR_STAT(subgrp, updgrp_switch_events);
1499 return;
1500 }
3f9c7369 1501
d62a17ae 1502 /*
1503 * Create a new subgroup under the specified update group, and copy
1504 * over relevant state to it.
1505 */
1506 subgrp = update_subgroup_create(updgrp);
1507 update_subgroup_inherit_info(subgrp, old_subgrp);
1508
1509 subgrp->split_from.update_group_id = old_subgrp->update_group->id;
1510 subgrp->split_from.subgroup_id = old_subgrp->id;
1511
1512 /*
1513 * Copy out relevant state from the old subgroup.
1514 */
1515 update_subgroup_copy_adj_out(paf->subgroup, subgrp);
1516 update_subgroup_copy_packets(subgrp, paf->next_pkt_to_send);
1517
1518 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1519 zlog_debug("u%" PRIu64 ":s%" PRIu64
1520 " peer %s split and moved into u%" PRIu64
1521 ":s%" PRIu64,
1522 paf->subgroup->update_group->id, paf->subgroup->id,
1523 paf->peer->host, updgrp->id, subgrp->id);
1524
1525 SUBGRP_INCR_STAT(paf->subgroup, split_events);
1526
1527 /*
1528 * Since queued advs were left behind, this new subgroup needs a
1529 * refresh.
1530 */
1531 update_subgroup_set_needs_refresh(subgrp, 1);
1532
1533 /*
1534 * Remove peer from old subgroup, and add it to the new one.
1535 */
1536 update_subgroup_remove_peer(paf->subgroup, paf);
1537
1538 update_subgroup_add_peer(subgrp, paf, 1);
3f9c7369
DS
1539}
1540
d62a17ae 1541void update_bgp_group_init(struct bgp *bgp)
3f9c7369 1542{
d62a17ae 1543 int afid;
3f9c7369 1544
a2addae8 1545 AF_FOREACH (afid)
3f65c5b1 1546 bgp->update_groups[afid] =
996c9314 1547 hash_create(updgrp_hash_key_make, updgrp_hash_cmp,
3f65c5b1 1548 "BGP Update Group Hash");
3f9c7369
DS
1549}
1550
d62a17ae 1551void update_bgp_group_free(struct bgp *bgp)
3d68677e 1552{
d62a17ae 1553 int afid;
1554
a2addae8 1555 AF_FOREACH (afid) {
d62a17ae 1556 if (bgp->update_groups[afid]) {
1557 hash_free(bgp->update_groups[afid]);
1558 bgp->update_groups[afid] = NULL;
1559 }
1560 }
3d68677e
DS
1561}
1562
d62a17ae 1563void update_group_show(struct bgp *bgp, afi_t afi, safi_t safi, struct vty *vty,
1564 uint64_t subgrp_id)
3f9c7369 1565{
d62a17ae 1566 struct updwalk_context ctx;
1567 memset(&ctx, 0, sizeof(ctx));
1568 ctx.vty = vty;
1569 ctx.subgrp_id = subgrp_id;
8fe8a7f6 1570
d62a17ae 1571 update_group_af_walk(bgp, afi, safi, update_group_show_walkcb, &ctx);
3f9c7369
DS
1572}
1573
1574/*
1575 * update_group_show_stats
1576 *
1577 * Show global statistics about update groups.
1578 */
d62a17ae 1579void update_group_show_stats(struct bgp *bgp, struct vty *vty)
3f9c7369 1580{
d62a17ae 1581 vty_out(vty, "Update groups created: %u\n",
1582 bgp->update_group_stats.updgrps_created);
1583 vty_out(vty, "Update groups deleted: %u\n",
1584 bgp->update_group_stats.updgrps_deleted);
1585 vty_out(vty, "Update subgroups created: %u\n",
1586 bgp->update_group_stats.subgrps_created);
1587 vty_out(vty, "Update subgroups deleted: %u\n",
1588 bgp->update_group_stats.subgrps_deleted);
1589 vty_out(vty, "Join events: %u\n", bgp->update_group_stats.join_events);
1590 vty_out(vty, "Prune events: %u\n",
1591 bgp->update_group_stats.prune_events);
1592 vty_out(vty, "Merge events: %u\n",
1593 bgp->update_group_stats.merge_events);
1594 vty_out(vty, "Split events: %u\n",
1595 bgp->update_group_stats.split_events);
1596 vty_out(vty, "Update group switch events: %u\n",
1597 bgp->update_group_stats.updgrp_switch_events);
1598 vty_out(vty, "Peer route refreshes combined: %u\n",
1599 bgp->update_group_stats.peer_refreshes_combined);
1600 vty_out(vty, "Merge checks triggered: %u\n",
1601 bgp->update_group_stats.merge_checks_triggered);
3f9c7369
DS
1602}
1603
1604/*
1605 * update_group_adjust_peer
1606 */
d62a17ae 1607void update_group_adjust_peer(struct peer_af *paf)
3f9c7369 1608{
d62a17ae 1609 struct update_group *updgrp;
1610 struct update_subgroup *subgrp, *old_subgrp;
1611 struct peer *peer;
1612
1613 if (!paf)
1614 return;
1615
1616 peer = PAF_PEER(paf);
1617 if (!peer_established(peer)) {
1618 return;
1619 }
1620
1621 if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE)) {
1622 return;
3f9c7369 1623 }
3f9c7369 1624
d62a17ae 1625 if (!peer->afc_nego[paf->afi][paf->safi]) {
1626 return;
1627 }
3f9c7369 1628
d62a17ae 1629 updgrp = update_group_find(paf);
1630 if (!updgrp) {
1631 updgrp = update_group_create(paf);
1632 if (!updgrp) {
1633 zlog_err("couldn't create update group for peer %s",
1634 paf->peer->host);
1635 return;
1636 }
1637 }
3f9c7369 1638
d62a17ae 1639 old_subgrp = paf->subgroup;
3f9c7369 1640
d62a17ae 1641 if (old_subgrp) {
3f9c7369 1642
d62a17ae 1643 /*
1644 * If the update group of the peer is unchanged, the peer can
1645 * stay
1646 * in its existing subgroup and we're done.
1647 */
1648 if (old_subgrp->update_group == updgrp)
1649 return;
1650
1651 /*
1652 * The peer is switching between update groups. Put it in its
1653 * own subgroup under the new update group.
1654 */
1655 update_subgroup_split_peer(paf, updgrp);
1656 return;
1657 }
1658
1659 subgrp = update_subgroup_find(updgrp, paf);
1660 if (!subgrp) {
1661 subgrp = update_subgroup_create(updgrp);
1662 if (!subgrp)
1663 return;
1664 }
3f9c7369 1665
d62a17ae 1666 update_subgroup_add_peer(subgrp, paf, 1);
1667 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1668 zlog_debug("u%" PRIu64 ":s%" PRIu64 " add peer %s", updgrp->id,
1669 subgrp->id, paf->peer->host);
1670
1671 return;
3f9c7369
DS
1672}
1673
d62a17ae 1674int update_group_adjust_soloness(struct peer *peer, int set)
3f9c7369 1675{
d62a17ae 1676 struct peer_group *group;
1677 struct listnode *node, *nnode;
1678
1679 if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
1680 peer_lonesoul_or_not(peer, set);
1681 if (peer->status == Established)
1682 bgp_announce_route_all(peer);
1683 } else {
1684 group = peer->group;
1685 for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
1686 peer_lonesoul_or_not(peer, set);
1687 if (peer->status == Established)
1688 bgp_announce_route_all(peer);
1689 }
1690 }
1691 return 0;
3f9c7369
DS
1692}
1693
1694/*
1695 * update_subgroup_rib
1696 */
d62a17ae 1697struct bgp_table *update_subgroup_rib(struct update_subgroup *subgrp)
3f9c7369 1698{
d62a17ae 1699 struct bgp *bgp;
3f9c7369 1700
d62a17ae 1701 bgp = SUBGRP_INST(subgrp);
1702 if (!bgp)
1703 return NULL;
3f9c7369 1704
d62a17ae 1705 return bgp->rib[SUBGRP_AFI(subgrp)][SUBGRP_SAFI(subgrp)];
3f9c7369
DS
1706}
1707
d62a17ae 1708void update_group_af_walk(struct bgp *bgp, afi_t afi, safi_t safi,
1709 updgrp_walkcb cb, void *ctx)
3f9c7369 1710{
d62a17ae 1711 struct updwalk_context wctx;
1712 int afid;
3f9c7369 1713
d62a17ae 1714 if (!bgp)
1715 return;
1716 afid = afindex(afi, safi);
1717 if (afid >= BGP_AF_MAX)
1718 return;
3f9c7369 1719
d62a17ae 1720 memset(&wctx, 0, sizeof(wctx));
1721 wctx.cb = cb;
1722 wctx.context = ctx;
0de4848d 1723
d62a17ae 1724 if (bgp->update_groups[afid])
1725 hash_walk(bgp->update_groups[afid], update_group_walkcb, &wctx);
3f9c7369
DS
1726}
1727
d62a17ae 1728void update_group_walk(struct bgp *bgp, updgrp_walkcb cb, void *ctx)
3f9c7369 1729{
d62a17ae 1730 afi_t afi;
1731 safi_t safi;
3f9c7369 1732
a2addae8 1733 FOREACH_AFI_SAFI (afi, safi) {
d62a17ae 1734 update_group_af_walk(bgp, afi, safi, cb, ctx);
1735 }
3f9c7369
DS
1736}
1737
d62a17ae 1738void update_group_periodic_merge(struct bgp *bgp)
3f9c7369 1739{
d62a17ae 1740 char reason[] = "periodic merge check";
3f9c7369 1741
d62a17ae 1742 update_group_walk(bgp, update_group_periodic_merge_walkcb,
1743 (void *)reason);
3f9c7369
DS
1744}
1745
0de4848d
DS
1746static int
1747update_group_default_originate_route_map_walkcb(struct update_group *updgrp,
d62a17ae 1748 void *arg)
0de4848d 1749{
d62a17ae 1750 struct update_subgroup *subgrp;
1751 struct peer *peer;
1752 afi_t afi;
1753 safi_t safi;
1754
a2addae8 1755 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
d62a17ae 1756 peer = SUBGRP_PEER(subgrp);
1757 afi = SUBGRP_AFI(subgrp);
1758 safi = SUBGRP_SAFI(subgrp);
1759
1760 if (peer->default_rmap[afi][safi].name) {
1761 subgroup_default_originate(subgrp, 0);
1762 }
1763 }
1764
1765 return UPDWALK_CONTINUE;
0de4848d
DS
1766}
1767
d62a17ae 1768int update_group_refresh_default_originate_route_map(struct thread *thread)
0de4848d 1769{
d62a17ae 1770 struct bgp *bgp;
1771 char reason[] = "refresh default-originate route-map";
0de4848d 1772
d62a17ae 1773 bgp = THREAD_ARG(thread);
1774 update_group_walk(bgp, update_group_default_originate_route_map_walkcb,
1775 reason);
1776 THREAD_TIMER_OFF(bgp->t_rmap_def_originate_eval);
1777 bgp_unlock(bgp);
ffd0c037 1778
d62a17ae 1779 return (0);
0de4848d
DS
1780}
1781
3f9c7369
DS
1782/*
1783 * peer_af_announce_route
1784 *
1785 * Refreshes routes out to a peer_af immediately.
1786 *
1787 * If the combine parameter is TRUE, then this function will try to
1788 * gather other peers in the subgroup for which a route announcement
1789 * is pending and efficently announce routes to all of them.
1790 *
1791 * For now, the 'combine' option has an effect only if all peers in
1792 * the subgroup have a route announcement pending.
1793 */
d62a17ae 1794void peer_af_announce_route(struct peer_af *paf, int combine)
3f9c7369 1795{
d62a17ae 1796 struct update_subgroup *subgrp;
1797 struct peer_af *cur_paf;
1798 int all_pending;
1799
1800 subgrp = paf->subgroup;
1801 all_pending = 0;
1802
1803 if (combine) {
1804 /*
1805 * If there are other peers in the old subgroup that also need
1806 * routes to be announced, pull them into the peer's new
1807 * subgroup.
1808 * Combine route announcement with other peers if possible.
1809 *
1810 * For now, we combine only if all peers in the subgroup have an
1811 * announcement pending.
1812 */
1813 all_pending = 1;
1814
a2addae8 1815 SUBGRP_FOREACH_PEER (subgrp, cur_paf) {
d62a17ae 1816 if (cur_paf == paf)
1817 continue;
1818
1819 if (cur_paf->t_announce_route)
1820 continue;
1821
1822 all_pending = 0;
1823 break;
1824 }
1825 }
1826 /*
1827 * Announce to the peer alone if we were not asked to combine peers,
1828 * or if some peers don't have a route annoucement pending.
1829 */
1830 if (!combine || !all_pending) {
1831 update_subgroup_split_peer(paf, NULL);
1832 if (!paf->subgroup)
1833 return;
1834
1835 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
1836 zlog_debug("u%" PRIu64 ":s%" PRIu64
1837 " %s announcing routes",
1838 subgrp->update_group->id, subgrp->id,
1839 paf->peer->host);
1840
1841 subgroup_announce_route(paf->subgroup);
1842 return;
3f9c7369 1843 }
3f9c7369 1844
d62a17ae 1845 /*
1846 * We will announce routes the entire subgroup.
1847 *
1848 * First stop refresh timers on all the other peers.
1849 */
a2addae8 1850 SUBGRP_FOREACH_PEER (subgrp, cur_paf) {
d62a17ae 1851 if (cur_paf == paf)
1852 continue;
3f9c7369 1853
d62a17ae 1854 bgp_stop_announce_route_timer(cur_paf);
1855 }
3f9c7369 1856
d62a17ae 1857 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
1858 zlog_debug("u%" PRIu64 ":s%" PRIu64
1859 " announcing routes to %s, combined into %d peers",
1860 subgrp->update_group->id, subgrp->id,
1861 paf->peer->host, subgrp->peer_count);
3f9c7369 1862
d62a17ae 1863 subgroup_announce_route(subgrp);
3f9c7369 1864
d62a17ae 1865 SUBGRP_INCR_STAT_BY(subgrp, peer_refreshes_combined,
1866 subgrp->peer_count - 1);
3f9c7369
DS
1867}
1868
2fc102e1
QY
1869void subgroup_trigger_write(struct update_subgroup *subgrp)
1870{
1871 struct peer_af *paf;
1872
becedef6
QY
1873 /*
1874 * For each peer in the subgroup, schedule a job to pull packets from
1875 * the subgroup output queue into their own output queue. This action
1876 * will trigger a write job on the I/O thread.
1877 */
996c9314
LB
1878 SUBGRP_FOREACH_PEER (subgrp, paf)
1879 if (paf->peer->status == Established)
1880 thread_add_timer_msec(
1881 bm->master, bgp_generate_updgrp_packets,
1882 paf->peer, 0,
1883 &paf->peer->t_generate_updgrp_packets);
2fc102e1
QY
1884}
1885
d62a17ae 1886int update_group_clear_update_dbg(struct update_group *updgrp, void *arg)
3f9c7369 1887{
d62a17ae 1888 UPDGRP_PEER_DBG_OFF(updgrp);
1889 return UPDWALK_CONTINUE;
3f9c7369 1890}
adbac85e 1891
06370dac 1892/* Return true if we should addpath encode NLRI to this peer */
d62a17ae 1893int bgp_addpath_encode_tx(struct peer *peer, afi_t afi, safi_t safi)
adbac85e 1894{
d62a17ae 1895 return (CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ADDPATH_AF_TX_ADV)
1896 && CHECK_FLAG(peer->af_cap[afi][safi],
1897 PEER_CAP_ADDPATH_AF_RX_RCV));
adbac85e 1898}
06370dac
DW
1899
1900/*
1901 * Return true if this is a path we should advertise due to a
1902 * configured addpath-tx knob
1903 */
d62a17ae 1904int bgp_addpath_tx_path(struct peer *peer, afi_t afi, safi_t safi,
1905 struct bgp_info *ri)
06370dac 1906{
d62a17ae 1907 if (CHECK_FLAG(peer->af_flags[afi][safi],
1908 PEER_FLAG_ADDPATH_TX_ALL_PATHS))
1909 return 1;
06370dac 1910
d62a17ae 1911 if (CHECK_FLAG(peer->af_flags[afi][safi],
1912 PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS)
1913 && CHECK_FLAG(ri->flags, BGP_INFO_DMED_SELECTED))
1914 return 1;
06370dac 1915
d62a17ae 1916 return 0;
06370dac 1917}