]> git.proxmox.com Git - mirror_frr.git/blame - bgpd/bgp_updgrp.c
Merge pull request #5703 from ton31337/feature/limit_outgoing_prefixes
[mirror_frr.git] / bgpd / bgp_updgrp.c
CommitLineData
3f9c7369
DS
1/**
2 * bgp_updgrp.c: BGP update group structures
3 *
4 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
5 *
6 * @author Avneesh Sachdev <avneesh@sproute.net>
7 * @author Rajesh Varadarajan <rajesh@sproute.net>
8 * @author Pradosh Mohapatra <pradosh@sproute.net>
9 *
10 * This file is part of GNU Zebra.
11 *
12 * GNU Zebra is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2, or (at your option) any
15 * later version.
16 *
17 * GNU Zebra is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
896014f4
DL
22 * You should have received a copy of the GNU General Public License along
23 * with this program; see the file COPYING; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
3f9c7369
DS
25 */
26
27#include <zebra.h>
28
29#include "prefix.h"
30#include "thread.h"
31#include "buffer.h"
32#include "stream.h"
33#include "command.h"
34#include "sockunion.h"
35#include "network.h"
36#include "memory.h"
37#include "filter.h"
38#include "routemap.h"
3f9c7369
DS
39#include "log.h"
40#include "plist.h"
41#include "linklist.h"
42#include "workqueue.h"
43#include "hash.h"
44#include "jhash.h"
45#include "queue.h"
46
47#include "bgpd/bgpd.h"
48#include "bgpd/bgp_table.h"
49#include "bgpd/bgp_debug.h"
14454c9f 50#include "bgpd/bgp_errors.h"
3f9c7369
DS
51#include "bgpd/bgp_fsm.h"
52#include "bgpd/bgp_advertise.h"
53#include "bgpd/bgp_packet.h"
54#include "bgpd/bgp_updgrp.h"
55#include "bgpd/bgp_route.h"
56#include "bgpd/bgp_filter.h"
2fc102e1 57#include "bgpd/bgp_io.h"
3f9c7369
DS
58
59/********************
60 * PRIVATE FUNCTIONS
61 ********************/
62
63/**
64 * assign a unique ID to update group and subgroup. Mostly for display/
65 * debugging purposes. It's a 64-bit space - used leisurely without a
66 * worry about its wrapping and about filling gaps. While at it, timestamp
67 * the creation.
68 */
d62a17ae 69static void update_group_checkin(struct update_group *updgrp)
3f9c7369 70{
d62a17ae 71 updgrp->id = ++bm->updgrp_idspace;
72 updgrp->uptime = bgp_clock();
3f9c7369
DS
73}
74
d62a17ae 75static void update_subgroup_checkin(struct update_subgroup *subgrp,
76 struct update_group *updgrp)
3f9c7369 77{
d62a17ae 78 subgrp->id = ++bm->subgrp_idspace;
79 subgrp->uptime = bgp_clock();
3f9c7369
DS
80}
81
d62a17ae 82static void sync_init(struct update_subgroup *subgrp)
3f9c7369 83{
d62a17ae 84 subgrp->sync =
85 XCALLOC(MTYPE_BGP_SYNCHRONISE, sizeof(struct bgp_synchronize));
a274fef8
DL
86 bgp_adv_fifo_init(&subgrp->sync->update);
87 bgp_adv_fifo_init(&subgrp->sync->withdraw);
88 bgp_adv_fifo_init(&subgrp->sync->withdraw_low);
996c9314
LB
89 subgrp->hash =
90 hash_create(baa_hash_key, baa_hash_cmp, "BGP SubGroup Hash");
d62a17ae 91
92 /* We use a larger buffer for subgrp->work in the event that:
93 * - We RX a BGP_UPDATE where the attributes alone are just
94 * under BGP_MAX_PACKET_SIZE
95 * - The user configures an outbound route-map that does many as-path
96 * prepends or adds many communities. At most they can have
97 * CMD_ARGC_MAX
98 * args in a route-map so there is a finite limit on how large they
99 * can
100 * make the attributes.
101 *
102 * Having a buffer with BGP_MAX_PACKET_SIZE_OVERFLOW allows us to avoid
103 * bounds
104 * checking for every single attribute as we construct an UPDATE.
105 */
106 subgrp->work =
107 stream_new(BGP_MAX_PACKET_SIZE + BGP_MAX_PACKET_SIZE_OVERFLOW);
108 subgrp->scratch = stream_new(BGP_MAX_PACKET_SIZE);
3f9c7369
DS
109}
110
d62a17ae 111static void sync_delete(struct update_subgroup *subgrp)
3f9c7369 112{
0a22ddfb 113 XFREE(MTYPE_BGP_SYNCHRONISE, subgrp->sync);
d62a17ae 114 subgrp->sync = NULL;
115 if (subgrp->hash)
116 hash_free(subgrp->hash);
117 subgrp->hash = NULL;
118 if (subgrp->work)
119 stream_free(subgrp->work);
120 subgrp->work = NULL;
121 if (subgrp->scratch)
122 stream_free(subgrp->scratch);
123 subgrp->scratch = NULL;
3f9c7369
DS
124}
125
126/**
127 * conf_copy
128 *
129 * copy only those fields that are relevant to update group match
130 */
d62a17ae 131static void conf_copy(struct peer *dst, struct peer *src, afi_t afi,
132 safi_t safi)
3f9c7369 133{
d62a17ae 134 struct bgp_filter *srcfilter;
135 struct bgp_filter *dstfilter;
136
137 srcfilter = &src->filter[afi][safi];
138 dstfilter = &dst->filter[afi][safi];
139
140 dst->bgp = src->bgp;
141 dst->sort = src->sort;
142 dst->as = src->as;
143 dst->v_routeadv = src->v_routeadv;
144 dst->flags = src->flags;
145 dst->af_flags[afi][safi] = src->af_flags[afi][safi];
fde246e8 146 dst->pmax_out[afi][safi] = src->pmax_out[afi][safi];
0a22ddfb 147 XFREE(MTYPE_BGP_PEER_HOST, dst->host);
d62a17ae 148
149 dst->host = XSTRDUP(MTYPE_BGP_PEER_HOST, src->host);
150 dst->cap = src->cap;
151 dst->af_cap[afi][safi] = src->af_cap[afi][safi];
152 dst->afc_nego[afi][safi] = src->afc_nego[afi][safi];
153 dst->orf_plist[afi][safi] = src->orf_plist[afi][safi];
dcc68b5e 154 dst->addpath_type[afi][safi] = src->addpath_type[afi][safi];
d62a17ae 155 dst->local_as = src->local_as;
156 dst->change_local_as = src->change_local_as;
157 dst->shared_network = src->shared_network;
158 memcpy(&(dst->nexthop), &(src->nexthop), sizeof(struct bgp_nexthop));
159
160 dst->group = src->group;
161
162 if (src->default_rmap[afi][safi].name) {
163 dst->default_rmap[afi][safi].name =
164 XSTRDUP(MTYPE_ROUTE_MAP_NAME,
165 src->default_rmap[afi][safi].name);
166 dst->default_rmap[afi][safi].map =
167 src->default_rmap[afi][safi].map;
168 }
169
170 if (DISTRIBUTE_OUT_NAME(srcfilter)) {
171 DISTRIBUTE_OUT_NAME(dstfilter) = XSTRDUP(
172 MTYPE_BGP_FILTER_NAME, DISTRIBUTE_OUT_NAME(srcfilter));
173 DISTRIBUTE_OUT(dstfilter) = DISTRIBUTE_OUT(srcfilter);
174 }
175
176 if (PREFIX_LIST_OUT_NAME(srcfilter)) {
177 PREFIX_LIST_OUT_NAME(dstfilter) = XSTRDUP(
178 MTYPE_BGP_FILTER_NAME, PREFIX_LIST_OUT_NAME(srcfilter));
179 PREFIX_LIST_OUT(dstfilter) = PREFIX_LIST_OUT(srcfilter);
180 }
181
182 if (FILTER_LIST_OUT_NAME(srcfilter)) {
183 FILTER_LIST_OUT_NAME(dstfilter) = XSTRDUP(
184 MTYPE_BGP_FILTER_NAME, FILTER_LIST_OUT_NAME(srcfilter));
185 FILTER_LIST_OUT(dstfilter) = FILTER_LIST_OUT(srcfilter);
186 }
187
188 if (ROUTE_MAP_OUT_NAME(srcfilter)) {
189 ROUTE_MAP_OUT_NAME(dstfilter) = XSTRDUP(
190 MTYPE_BGP_FILTER_NAME, ROUTE_MAP_OUT_NAME(srcfilter));
191 ROUTE_MAP_OUT(dstfilter) = ROUTE_MAP_OUT(srcfilter);
192 }
193
194 if (UNSUPPRESS_MAP_NAME(srcfilter)) {
195 UNSUPPRESS_MAP_NAME(dstfilter) = XSTRDUP(
196 MTYPE_BGP_FILTER_NAME, UNSUPPRESS_MAP_NAME(srcfilter));
197 UNSUPPRESS_MAP(dstfilter) = UNSUPPRESS_MAP(srcfilter);
198 }
3f9c7369
DS
199}
200
201/**
6e919709 202 * since we did a bunch of XSTRDUP's in conf_copy, time to free them up
3f9c7369 203 */
d62a17ae 204static void conf_release(struct peer *src, afi_t afi, safi_t safi)
3f9c7369 205{
d62a17ae 206 struct bgp_filter *srcfilter;
3f9c7369 207
d62a17ae 208 srcfilter = &src->filter[afi][safi];
3f9c7369 209
0a22ddfb 210 XFREE(MTYPE_ROUTE_MAP_NAME, src->default_rmap[afi][safi].name);
3f9c7369 211
0a22ddfb 212 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->dlist[FILTER_OUT].name);
3f9c7369 213
0a22ddfb 214 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->plist[FILTER_OUT].name);
3f9c7369 215
0a22ddfb 216 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->aslist[FILTER_OUT].name);
3f9c7369 217
0a22ddfb 218 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->map[RMAP_OUT].name);
3f9c7369 219
0a22ddfb 220 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->usmap.name);
495f0b13 221
0a22ddfb 222 XFREE(MTYPE_BGP_PEER_HOST, src->host);
d62a17ae 223 src->host = NULL;
3f9c7369
DS
224}
225
d62a17ae 226static void peer2_updgrp_copy(struct update_group *updgrp, struct peer_af *paf)
3f9c7369 227{
d62a17ae 228 struct peer *src;
229 struct peer *dst;
3f9c7369 230
d62a17ae 231 if (!updgrp || !paf)
232 return;
3f9c7369 233
d62a17ae 234 src = paf->peer;
235 dst = updgrp->conf;
236 if (!src || !dst)
237 return;
3f9c7369 238
d62a17ae 239 updgrp->afi = paf->afi;
240 updgrp->safi = paf->safi;
241 updgrp->afid = paf->afid;
242 updgrp->bgp = src->bgp;
3f9c7369 243
d62a17ae 244 conf_copy(dst, src, paf->afi, paf->safi);
3f9c7369
DS
245}
246
247/**
248 * auxiliary functions to maintain the hash table.
249 * - updgrp_hash_alloc - to create a new entry, passed to hash_get
250 * - updgrp_hash_key_make - makes the key for update group search
251 * - updgrp_hash_cmp - compare two update groups.
252 */
d62a17ae 253static void *updgrp_hash_alloc(void *p)
3f9c7369 254{
d62a17ae 255 struct update_group *updgrp;
256 const struct update_group *in;
257
258 in = (const struct update_group *)p;
259 updgrp = XCALLOC(MTYPE_BGP_UPDGRP, sizeof(struct update_group));
260 memcpy(updgrp, in, sizeof(struct update_group));
261 updgrp->conf = XCALLOC(MTYPE_BGP_PEER, sizeof(struct peer));
262 conf_copy(updgrp->conf, in->conf, in->afi, in->safi);
263 return updgrp;
3f9c7369
DS
264}
265
266/**
267 * The hash value for a peer is computed from the following variables:
268 * v = f(
269 * 1. IBGP (1) or EBGP (2)
270 * 2. FLAGS based on configuration:
271 * LOCAL_AS_NO_PREPEND
272 * LOCAL_AS_REPLACE_AS
273 * 3. AF_FLAGS based on configuration:
274 * Refer to definition in bgp_updgrp.h
275 * 4. (AF-independent) Capability flags:
276 * AS4_RCV capability
277 * 5. (AF-dependent) Capability flags:
278 * ORF_PREFIX_SM_RCV (peer can send prefix ORF)
279 * 6. MRAI
280 * 7. peer-group name
281 * 8. Outbound route-map name (neighbor route-map <> out)
282 * 9. Outbound distribute-list name (neighbor distribute-list <> out)
283 * 10. Outbound prefix-list name (neighbor prefix-list <> out)
284 * 11. Outbound as-list name (neighbor filter-list <> out)
285 * 12. Unsuppress map name (neighbor unsuppress-map <>)
286 * 13. default rmap name (neighbor default-originate route-map <>)
287 * 14. encoding both global and link-local nexthop?
288 * 15. If peer is configured to be a lonesoul, peer ip address
289 * 16. Local-as should match, if configured.
290 * )
291 */
d8b87afe 292static unsigned int updgrp_hash_key_make(const void *p)
3f9c7369 293{
d62a17ae 294 const struct update_group *updgrp;
295 const struct peer *peer;
296 const struct bgp_filter *filter;
297 uint32_t flags;
298 uint32_t key;
299 afi_t afi;
300 safi_t safi;
3f9c7369
DS
301
302#define SEED1 999331
303#define SEED2 2147483647
304
d62a17ae 305 updgrp = p;
306 peer = updgrp->conf;
307 afi = updgrp->afi;
308 safi = updgrp->safi;
309 flags = peer->af_flags[afi][safi];
310 filter = &peer->filter[afi][safi];
311
312 key = 0;
313
314 key = jhash_1word(peer->sort, key); /* EBGP or IBGP */
315 key = jhash_1word((peer->flags & PEER_UPDGRP_FLAGS), key);
316 key = jhash_1word((flags & PEER_UPDGRP_AF_FLAGS), key);
dcc68b5e 317 key = jhash_1word((uint32_t)peer->addpath_type[afi][safi], key);
d62a17ae 318 key = jhash_1word((peer->cap & PEER_UPDGRP_CAP_FLAGS), key);
319 key = jhash_1word((peer->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS),
320 key);
321 key = jhash_1word(peer->v_routeadv, key);
322 key = jhash_1word(peer->change_local_as, key);
323
324 if (peer->group)
325 key = jhash_1word(jhash(peer->group->name,
326 strlen(peer->group->name), SEED1),
327 key);
328
329 if (filter->map[RMAP_OUT].name)
330 key = jhash_1word(jhash(filter->map[RMAP_OUT].name,
331 strlen(filter->map[RMAP_OUT].name),
332 SEED1),
333 key);
334
335 if (filter->dlist[FILTER_OUT].name)
336 key = jhash_1word(jhash(filter->dlist[FILTER_OUT].name,
337 strlen(filter->dlist[FILTER_OUT].name),
338 SEED1),
339 key);
340
341 if (filter->plist[FILTER_OUT].name)
342 key = jhash_1word(jhash(filter->plist[FILTER_OUT].name,
343 strlen(filter->plist[FILTER_OUT].name),
344 SEED1),
345 key);
346
347 if (filter->aslist[FILTER_OUT].name)
348 key = jhash_1word(jhash(filter->aslist[FILTER_OUT].name,
349 strlen(filter->aslist[FILTER_OUT].name),
350 SEED1),
351 key);
352
353 if (filter->usmap.name)
354 key = jhash_1word(jhash(filter->usmap.name,
355 strlen(filter->usmap.name), SEED1),
356 key);
357
358 if (peer->default_rmap[afi][safi].name)
359 key = jhash_1word(
360 jhash(peer->default_rmap[afi][safi].name,
361 strlen(peer->default_rmap[afi][safi].name),
362 SEED1),
363 key);
364
365 /* If peer is on a shared network and is exchanging IPv6 prefixes,
366 * it needs to include link-local address. That's different from
367 * non-shared-network peers (nexthop encoded with 32 bytes vs 16
368 * bytes). We create different update groups to take care of that.
369 */
370 key = jhash_1word(
371 (peer->shared_network && peer_afi_active_nego(peer, AFI_IP6)),
372 key);
373
374 /*
375 * There are certain peers that must get their own update-group:
376 * - lonesoul peers
377 * - peers that negotiated ORF
378 */
379 if (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL)
380 || CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV)
381 || CHECK_FLAG(peer->af_cap[afi][safi],
382 PEER_CAP_ORF_PREFIX_SM_OLD_RCV))
383 key = jhash_1word(jhash(peer->host, strlen(peer->host), SEED2),
384 key);
385
386 return key;
3f9c7369
DS
387}
388
74df8d6d 389static bool updgrp_hash_cmp(const void *p1, const void *p2)
3f9c7369 390{
d62a17ae 391 const struct update_group *grp1;
392 const struct update_group *grp2;
393 const struct peer *pe1;
394 const struct peer *pe2;
395 uint32_t flags1;
396 uint32_t flags2;
397 const struct bgp_filter *fl1;
398 const struct bgp_filter *fl2;
399 afi_t afi;
400 safi_t safi;
401
402 if (!p1 || !p2)
74df8d6d 403 return false;
d62a17ae 404
405 grp1 = p1;
406 grp2 = p2;
407 pe1 = grp1->conf;
408 pe2 = grp2->conf;
409 afi = grp1->afi;
410 safi = grp1->safi;
411 flags1 = pe1->af_flags[afi][safi];
412 flags2 = pe2->af_flags[afi][safi];
413 fl1 = &pe1->filter[afi][safi];
414 fl2 = &pe2->filter[afi][safi];
415
416 /* put EBGP and IBGP peers in different update groups */
417 if (pe1->sort != pe2->sort)
74df8d6d 418 return false;
d62a17ae 419
420 /* check peer flags */
421 if ((pe1->flags & PEER_UPDGRP_FLAGS)
422 != (pe2->flags & PEER_UPDGRP_FLAGS))
74df8d6d 423 return false;
d62a17ae 424
425 /* If there is 'local-as' configured, it should match. */
426 if (pe1->change_local_as != pe2->change_local_as)
74df8d6d 427 return false;
d62a17ae 428
429 /* flags like route reflector client */
430 if ((flags1 & PEER_UPDGRP_AF_FLAGS) != (flags2 & PEER_UPDGRP_AF_FLAGS))
74df8d6d 431 return false;
d62a17ae 432
dcc68b5e 433 if (pe1->addpath_type[afi][safi] != pe2->addpath_type[afi][safi])
b08047f8 434 return false;
dcc68b5e 435
d62a17ae 436 if ((pe1->cap & PEER_UPDGRP_CAP_FLAGS)
437 != (pe2->cap & PEER_UPDGRP_CAP_FLAGS))
74df8d6d 438 return false;
d62a17ae 439
440 if ((pe1->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS)
441 != (pe2->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS))
74df8d6d 442 return false;
d62a17ae 443
444 if (pe1->v_routeadv != pe2->v_routeadv)
74df8d6d 445 return false;
d62a17ae 446
447 if (pe1->group != pe2->group)
74df8d6d 448 return false;
d62a17ae 449
450 /* route-map names should be the same */
451 if ((fl1->map[RMAP_OUT].name && !fl2->map[RMAP_OUT].name)
452 || (!fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name)
453 || (fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name
454 && strcmp(fl1->map[RMAP_OUT].name, fl2->map[RMAP_OUT].name)))
74df8d6d 455 return false;
d62a17ae 456
457 if ((fl1->dlist[FILTER_OUT].name && !fl2->dlist[FILTER_OUT].name)
458 || (!fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name)
459 || (fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name
460 && strcmp(fl1->dlist[FILTER_OUT].name,
461 fl2->dlist[FILTER_OUT].name)))
74df8d6d 462 return false;
d62a17ae 463
464 if ((fl1->plist[FILTER_OUT].name && !fl2->plist[FILTER_OUT].name)
465 || (!fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name)
466 || (fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name
467 && strcmp(fl1->plist[FILTER_OUT].name,
468 fl2->plist[FILTER_OUT].name)))
74df8d6d 469 return false;
d62a17ae 470
471 if ((fl1->aslist[FILTER_OUT].name && !fl2->aslist[FILTER_OUT].name)
472 || (!fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name)
473 || (fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name
474 && strcmp(fl1->aslist[FILTER_OUT].name,
475 fl2->aslist[FILTER_OUT].name)))
74df8d6d 476 return false;
d62a17ae 477
478 if ((fl1->usmap.name && !fl2->usmap.name)
479 || (!fl1->usmap.name && fl2->usmap.name)
480 || (fl1->usmap.name && fl2->usmap.name
481 && strcmp(fl1->usmap.name, fl2->usmap.name)))
74df8d6d 482 return false;
d62a17ae 483
484 if ((pe1->default_rmap[afi][safi].name
485 && !pe2->default_rmap[afi][safi].name)
486 || (!pe1->default_rmap[afi][safi].name
487 && pe2->default_rmap[afi][safi].name)
488 || (pe1->default_rmap[afi][safi].name
489 && pe2->default_rmap[afi][safi].name
490 && strcmp(pe1->default_rmap[afi][safi].name,
491 pe2->default_rmap[afi][safi].name)))
74df8d6d 492 return false;
d62a17ae 493
494 if ((afi == AFI_IP6) && (pe1->shared_network != pe2->shared_network))
74df8d6d 495 return false;
d62a17ae 496
497 if ((CHECK_FLAG(pe1->flags, PEER_FLAG_LONESOUL)
498 || CHECK_FLAG(pe1->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV)
499 || CHECK_FLAG(pe1->af_cap[afi][safi],
500 PEER_CAP_ORF_PREFIX_SM_OLD_RCV))
501 && !sockunion_same(&pe1->su, &pe2->su))
74df8d6d 502 return false;
d62a17ae 503
74df8d6d 504 return true;
3f9c7369
DS
505}
506
d62a17ae 507static void peer_lonesoul_or_not(struct peer *peer, int set)
3f9c7369 508{
d62a17ae 509 /* no change in status? */
510 if (set == (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL) > 0))
511 return;
3f9c7369 512
d62a17ae 513 if (set)
514 SET_FLAG(peer->flags, PEER_FLAG_LONESOUL);
515 else
516 UNSET_FLAG(peer->flags, PEER_FLAG_LONESOUL);
3f9c7369 517
d62a17ae 518 update_group_adjust_peer_afs(peer);
3f9c7369
DS
519}
520
521/*
522 * subgroup_total_packets_enqueued
523 *
524 * Returns the total number of packets enqueued to a subgroup.
525 */
526static unsigned int
d62a17ae 527subgroup_total_packets_enqueued(struct update_subgroup *subgrp)
3f9c7369 528{
d62a17ae 529 struct bpacket *pkt;
3f9c7369 530
d62a17ae 531 pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp));
3f9c7369 532
d62a17ae 533 return pkt->ver - 1;
3f9c7369
DS
534}
535
d62a17ae 536static int update_group_show_walkcb(struct update_group *updgrp, void *arg)
3f9c7369 537{
d62a17ae 538 struct updwalk_context *ctx = arg;
539 struct vty *vty;
540 struct update_subgroup *subgrp;
541 struct peer_af *paf;
542 struct bgp_filter *filter;
543 int match = 0;
544
545 if (!ctx)
546 return CMD_SUCCESS;
547
548 if (ctx->subgrp_id) {
a2addae8 549 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
d62a17ae 550 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
551 continue;
552 else {
553 match = 1;
554 break;
555 }
556 }
557 } else {
558 match = 1;
559 }
560
561 if (!match) {
562 /* Since this routine is invoked from a walk, we cannot signal
563 * any */
564 /* error here, can only return. */
565 return CMD_SUCCESS;
566 }
567
568 vty = ctx->vty;
569
570 vty_out(vty, "Update-group %" PRIu64 ":\n", updgrp->id);
571 vty_out(vty, " Created: %s", timestamp_string(updgrp->uptime));
572 filter = &updgrp->conf->filter[updgrp->afi][updgrp->safi];
573 if (filter->map[RMAP_OUT].name)
7dba67ff 574 vty_out(vty, " Outgoing route map: %s\n",
d62a17ae 575 filter->map[RMAP_OUT].name);
576 vty_out(vty, " MRAI value (seconds): %d\n", updgrp->conf->v_routeadv);
577 if (updgrp->conf->change_local_as)
578 vty_out(vty, " Local AS %u%s%s\n",
579 updgrp->conf->change_local_as,
580 CHECK_FLAG(updgrp->conf->flags,
581 PEER_FLAG_LOCAL_AS_NO_PREPEND)
582 ? " no-prepend"
583 : "",
584 CHECK_FLAG(updgrp->conf->flags,
585 PEER_FLAG_LOCAL_AS_REPLACE_AS)
586 ? " replace-as"
587 : "");
588
a2addae8 589 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
d62a17ae 590 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
591 continue;
592 vty_out(vty, "\n");
593 vty_out(vty, " Update-subgroup %" PRIu64 ":\n", subgrp->id);
594 vty_out(vty, " Created: %s",
595 timestamp_string(subgrp->uptime));
596
597 if (subgrp->split_from.update_group_id
598 || subgrp->split_from.subgroup_id) {
599 vty_out(vty, " Split from group id: %" PRIu64 "\n",
600 subgrp->split_from.update_group_id);
601 vty_out(vty,
602 " Split from subgroup id: %" PRIu64 "\n",
603 subgrp->split_from.subgroup_id);
604 }
605
606 vty_out(vty, " Join events: %u\n", subgrp->join_events);
607 vty_out(vty, " Prune events: %u\n", subgrp->prune_events);
608 vty_out(vty, " Merge events: %u\n", subgrp->merge_events);
609 vty_out(vty, " Split events: %u\n", subgrp->split_events);
610 vty_out(vty, " Update group switch events: %u\n",
611 subgrp->updgrp_switch_events);
612 vty_out(vty, " Peer refreshes combined: %u\n",
613 subgrp->peer_refreshes_combined);
614 vty_out(vty, " Merge checks triggered: %u\n",
615 subgrp->merge_checks_triggered);
5b18ef82
DS
616 vty_out(vty, " Coalesce Time: %u%s\n",
617 (UPDGRP_INST(subgrp->update_group))->coalesce_time,
618 subgrp->t_coalesce ? "(Running)" : "");
d62a17ae 619 vty_out(vty, " Version: %" PRIu64 "\n", subgrp->version);
620 vty_out(vty, " Packet queue length: %d\n",
621 bpacket_queue_length(SUBGRP_PKTQ(subgrp)));
622 vty_out(vty, " Total packets enqueued: %u\n",
623 subgroup_total_packets_enqueued(subgrp));
624 vty_out(vty, " Packet queue high watermark: %d\n",
625 bpacket_queue_hwm_length(SUBGRP_PKTQ(subgrp)));
626 vty_out(vty, " Adj-out list count: %u\n", subgrp->adj_count);
627 vty_out(vty, " Advertise list: %s\n",
628 advertise_list_is_empty(subgrp) ? "empty"
629 : "not empty");
630 vty_out(vty, " Flags: %s\n",
631 CHECK_FLAG(subgrp->flags, SUBGRP_FLAG_NEEDS_REFRESH)
632 ? "R"
633 : "");
634 if (subgrp->peer_count > 0) {
635 vty_out(vty, " Peers:\n");
a2addae8
RW
636 SUBGRP_FOREACH_PEER (subgrp, paf)
637 vty_out(vty, " - %s\n", paf->peer->host);
d62a17ae 638 }
8fe8a7f6 639 }
d62a17ae 640 return UPDWALK_CONTINUE;
3f9c7369
DS
641}
642
643/*
644 * Helper function to show the packet queue for each subgroup of update group.
645 * Will be constrained to a particular subgroup id if id !=0
646 */
d62a17ae 647static int updgrp_show_packet_queue_walkcb(struct update_group *updgrp,
648 void *arg)
3f9c7369 649{
d62a17ae 650 struct updwalk_context *ctx = arg;
651 struct update_subgroup *subgrp;
652 struct vty *vty;
653
654 vty = ctx->vty;
a2addae8 655 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
d62a17ae 656 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
657 continue;
658 vty_out(vty, "update group %" PRIu64 ", subgroup %" PRIu64 "\n",
659 updgrp->id, subgrp->id);
660 bpacket_queue_show_vty(SUBGRP_PKTQ(subgrp), vty);
661 }
662 return UPDWALK_CONTINUE;
3f9c7369
DS
663}
664
665/*
666 * Show the packet queue for each subgroup of update group. Will be
667 * constrained to a particular subgroup id if id !=0
668 */
d62a17ae 669void update_group_show_packet_queue(struct bgp *bgp, afi_t afi, safi_t safi,
670 struct vty *vty, uint64_t id)
3f9c7369 671{
d62a17ae 672 struct updwalk_context ctx;
673
674 memset(&ctx, 0, sizeof(ctx));
675 ctx.vty = vty;
676 ctx.subgrp_id = id;
677 ctx.flags = 0;
678 update_group_af_walk(bgp, afi, safi, updgrp_show_packet_queue_walkcb,
679 &ctx);
3f9c7369
DS
680}
681
d62a17ae 682static struct update_group *update_group_find(struct peer_af *paf)
3f9c7369 683{
d62a17ae 684 struct update_group *updgrp;
685 struct update_group tmp;
686 struct peer tmp_conf;
3f9c7369 687
d62a17ae 688 if (!peer_established(PAF_PEER(paf)))
689 return NULL;
3f9c7369 690
d62a17ae 691 memset(&tmp, 0, sizeof(tmp));
692 memset(&tmp_conf, 0, sizeof(tmp_conf));
693 tmp.conf = &tmp_conf;
694 peer2_updgrp_copy(&tmp, paf);
3f9c7369 695
d62a17ae 696 updgrp = hash_lookup(paf->peer->bgp->update_groups[paf->afid], &tmp);
697 conf_release(&tmp_conf, paf->afi, paf->safi);
698 return updgrp;
3f9c7369
DS
699}
700
d62a17ae 701static struct update_group *update_group_create(struct peer_af *paf)
3f9c7369 702{
d62a17ae 703 struct update_group *updgrp;
704 struct update_group tmp;
705 struct peer tmp_conf;
3f9c7369 706
d62a17ae 707 memset(&tmp, 0, sizeof(tmp));
708 memset(&tmp_conf, 0, sizeof(tmp_conf));
709 tmp.conf = &tmp_conf;
710 peer2_updgrp_copy(&tmp, paf);
3f9c7369 711
d62a17ae 712 updgrp = hash_get(paf->peer->bgp->update_groups[paf->afid], &tmp,
713 updgrp_hash_alloc);
714 if (!updgrp)
715 return NULL;
716 update_group_checkin(updgrp);
3f9c7369 717
d62a17ae 718 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
719 zlog_debug("create update group %" PRIu64, updgrp->id);
3f9c7369 720
d62a17ae 721 UPDGRP_GLOBAL_STAT(updgrp, updgrps_created) += 1;
3f9c7369 722
d62a17ae 723 conf_release(&tmp_conf, paf->afi, paf->safi);
724 return updgrp;
3f9c7369
DS
725}
726
d62a17ae 727static void update_group_delete(struct update_group *updgrp)
3f9c7369 728{
d62a17ae 729 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
730 zlog_debug("delete update group %" PRIu64, updgrp->id);
3f9c7369 731
d62a17ae 732 UPDGRP_GLOBAL_STAT(updgrp, updgrps_deleted) += 1;
3f9c7369 733
d62a17ae 734 hash_release(updgrp->bgp->update_groups[updgrp->afid], updgrp);
735 conf_release(updgrp->conf, updgrp->afi, updgrp->safi);
3d68677e 736
0a22ddfb 737 XFREE(MTYPE_BGP_PEER_HOST, updgrp->conf->host);
d62a17ae 738 updgrp->conf->host = NULL;
6e919709 739
0a22ddfb 740 XFREE(MTYPE_BGP_PEER_IFNAME, updgrp->conf->ifname);
6e919709 741
d62a17ae 742 XFREE(MTYPE_BGP_PEER, updgrp->conf);
743 XFREE(MTYPE_BGP_UPDGRP, updgrp);
3f9c7369
DS
744}
745
d62a17ae 746static void update_group_add_subgroup(struct update_group *updgrp,
747 struct update_subgroup *subgrp)
3f9c7369 748{
d62a17ae 749 if (!updgrp || !subgrp)
750 return;
3f9c7369 751
d62a17ae 752 LIST_INSERT_HEAD(&(updgrp->subgrps), subgrp, updgrp_train);
753 subgrp->update_group = updgrp;
3f9c7369
DS
754}
755
d62a17ae 756static void update_group_remove_subgroup(struct update_group *updgrp,
757 struct update_subgroup *subgrp)
3f9c7369 758{
d62a17ae 759 if (!updgrp || !subgrp)
760 return;
3f9c7369 761
d62a17ae 762 LIST_REMOVE(subgrp, updgrp_train);
763 subgrp->update_group = NULL;
764 if (LIST_EMPTY(&(updgrp->subgrps)))
765 update_group_delete(updgrp);
3f9c7369
DS
766}
767
768static struct update_subgroup *
d62a17ae 769update_subgroup_create(struct update_group *updgrp)
3f9c7369 770{
d62a17ae 771 struct update_subgroup *subgrp;
3f9c7369 772
d62a17ae 773 subgrp = XCALLOC(MTYPE_BGP_UPD_SUBGRP, sizeof(struct update_subgroup));
774 update_subgroup_checkin(subgrp, updgrp);
775 subgrp->v_coalesce = (UPDGRP_INST(updgrp))->coalesce_time;
776 sync_init(subgrp);
777 bpacket_queue_init(SUBGRP_PKTQ(subgrp));
778 bpacket_queue_add(SUBGRP_PKTQ(subgrp), NULL, NULL);
779 TAILQ_INIT(&(subgrp->adjq));
780 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
781 zlog_debug("create subgroup u%" PRIu64 ":s%" PRIu64, updgrp->id,
782 subgrp->id);
3f9c7369 783
d62a17ae 784 update_group_add_subgroup(updgrp, subgrp);
3f9c7369 785
d62a17ae 786 UPDGRP_INCR_STAT(updgrp, subgrps_created);
3f9c7369 787
d62a17ae 788 return subgrp;
3f9c7369
DS
789}
790
d62a17ae 791static void update_subgroup_delete(struct update_subgroup *subgrp)
3f9c7369 792{
d62a17ae 793 if (!subgrp)
794 return;
3f9c7369 795
d62a17ae 796 if (subgrp->update_group)
797 UPDGRP_INCR_STAT(subgrp->update_group, subgrps_deleted);
3f9c7369 798
d62a17ae 799 if (subgrp->t_merge_check)
800 THREAD_OFF(subgrp->t_merge_check);
3f9c7369 801
d62a17ae 802 if (subgrp->t_coalesce)
803 THREAD_TIMER_OFF(subgrp->t_coalesce);
3f9c7369 804
d62a17ae 805 bpacket_queue_cleanup(SUBGRP_PKTQ(subgrp));
806 subgroup_clear_table(subgrp);
3f9c7369 807
d62a17ae 808 if (subgrp->t_coalesce)
809 THREAD_TIMER_OFF(subgrp->t_coalesce);
810 sync_delete(subgrp);
3f9c7369 811
4f9a63ad 812 if (BGP_DEBUG(update_groups, UPDATE_GROUPS) && subgrp->update_group)
d62a17ae 813 zlog_debug("delete subgroup u%" PRIu64 ":s%" PRIu64,
814 subgrp->update_group->id, subgrp->id);
3f9c7369 815
d62a17ae 816 update_group_remove_subgroup(subgrp->update_group, subgrp);
3f9c7369 817
d62a17ae 818 XFREE(MTYPE_BGP_UPD_SUBGRP, subgrp);
3f9c7369
DS
819}
820
d62a17ae 821void update_subgroup_inherit_info(struct update_subgroup *to,
822 struct update_subgroup *from)
3f9c7369 823{
d62a17ae 824 if (!to || !from)
825 return;
3f9c7369 826
d62a17ae 827 to->sflags = from->sflags;
3f9c7369
DS
828}
829
830/*
831 * update_subgroup_check_delete
832 *
833 * Delete a subgroup if it is ready to be deleted.
834 *
2951a7a4 835 * Returns true if the subgroup was deleted.
3f9c7369 836 */
d62a17ae 837static int update_subgroup_check_delete(struct update_subgroup *subgrp)
3f9c7369 838{
d62a17ae 839 if (!subgrp)
840 return 0;
3f9c7369 841
d62a17ae 842 if (!LIST_EMPTY(&(subgrp->peers)))
843 return 0;
3f9c7369 844
d62a17ae 845 update_subgroup_delete(subgrp);
3f9c7369 846
d62a17ae 847 return 1;
3f9c7369
DS
848}
849
850/*
851 * update_subgroup_add_peer
852 *
853 * @param send_enqueued_packets If true all currently enqueued packets will
854 * also be sent to the peer.
855 */
d62a17ae 856static void update_subgroup_add_peer(struct update_subgroup *subgrp,
857 struct peer_af *paf,
858 int send_enqueued_pkts)
3f9c7369 859{
d62a17ae 860 struct bpacket *pkt;
3f9c7369 861
d62a17ae 862 if (!subgrp || !paf)
863 return;
3f9c7369 864
d62a17ae 865 LIST_INSERT_HEAD(&(subgrp->peers), paf, subgrp_train);
866 paf->subgroup = subgrp;
867 subgrp->peer_count++;
3f9c7369 868
d62a17ae 869 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
870 UPDGRP_PEER_DBG_EN(subgrp->update_group);
871 }
3f9c7369 872
d62a17ae 873 SUBGRP_INCR_STAT(subgrp, join_events);
3f9c7369 874
d62a17ae 875 if (send_enqueued_pkts) {
876 pkt = bpacket_queue_first(SUBGRP_PKTQ(subgrp));
877 } else {
3f9c7369 878
d62a17ae 879 /*
880 * Hang the peer off of the last, placeholder, packet in the
881 * queue. This means it won't see any of the packets that are
882 * currently the queue.
883 */
884 pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp));
885 assert(pkt->buffer == NULL);
886 }
3f9c7369 887
d62a17ae 888 bpacket_add_peer(pkt, paf);
3f9c7369 889
d62a17ae 890 bpacket_queue_sanity_check(SUBGRP_PKTQ(subgrp));
7bfdba54
S
891 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
892 zlog_debug("peer %s added to subgroup s%" PRIu64,
893 paf->peer->host, subgrp->id);
3f9c7369
DS
894}
895
896/*
897 * update_subgroup_remove_peer_internal
898 *
899 * Internal function that removes a peer from a subgroup, but does not
900 * delete the subgroup. A call to this function must almost always be
901 * followed by a call to update_subgroup_check_delete().
902 *
903 * @see update_subgroup_remove_peer
904 */
d62a17ae 905static void update_subgroup_remove_peer_internal(struct update_subgroup *subgrp,
906 struct peer_af *paf)
3f9c7369 907{
d3e51db0 908 assert(subgrp && paf && subgrp->update_group);
3f9c7369 909
d62a17ae 910 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
911 UPDGRP_PEER_DBG_DIS(subgrp->update_group);
912 }
3f9c7369 913
d62a17ae 914 bpacket_queue_remove_peer(paf);
915 LIST_REMOVE(paf, subgrp_train);
916 paf->subgroup = NULL;
917 subgrp->peer_count--;
3f9c7369 918
7bfdba54
S
919 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
920 zlog_debug("peer %s deleted from subgroup s%"
4882d296 921 PRIu64 " peer cnt %d",
7bfdba54 922 paf->peer->host, subgrp->id, subgrp->peer_count);
d62a17ae 923 SUBGRP_INCR_STAT(subgrp, prune_events);
3f9c7369
DS
924}
925
926/*
927 * update_subgroup_remove_peer
928 */
d62a17ae 929void update_subgroup_remove_peer(struct update_subgroup *subgrp,
930 struct peer_af *paf)
3f9c7369 931{
d62a17ae 932 if (!subgrp || !paf)
933 return;
3f9c7369 934
d62a17ae 935 update_subgroup_remove_peer_internal(subgrp, paf);
3f9c7369 936
d62a17ae 937 if (update_subgroup_check_delete(subgrp))
938 return;
3f9c7369 939
d62a17ae 940 /*
941 * The deletion of the peer may have caused some packets to be
942 * deleted from the subgroup packet queue. Check if the subgroup can
943 * be merged now.
944 */
945 update_subgroup_check_merge(subgrp, "removed peer from subgroup");
3f9c7369
DS
946}
947
d62a17ae 948static struct update_subgroup *update_subgroup_find(struct update_group *updgrp,
949 struct peer_af *paf)
3f9c7369 950{
d62a17ae 951 struct update_subgroup *subgrp = NULL;
952 uint64_t version;
953
954 if (paf->subgroup) {
955 assert(0);
956 return NULL;
957 } else
958 version = 0;
959
960 if (!peer_established(PAF_PEER(paf)))
961 return NULL;
962
a2addae8 963 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
d62a17ae 964 if (subgrp->version != version
965 || CHECK_FLAG(subgrp->sflags,
966 SUBGRP_STATUS_DEFAULT_ORIGINATE))
967 continue;
968
969 /*
970 * The version number is not meaningful on a subgroup that needs
971 * a refresh.
972 */
973 if (update_subgroup_needs_refresh(subgrp))
974 continue;
975
976 break;
977 }
978
979 return subgrp;
3f9c7369
DS
980}
981
982/*
983 * update_subgroup_ready_for_merge
984 *
2951a7a4 985 * Returns true if this subgroup is in a state that allows it to be
3f9c7369
DS
986 * merged into another subgroup.
987 */
d62a17ae 988static int update_subgroup_ready_for_merge(struct update_subgroup *subgrp)
3f9c7369
DS
989{
990
d62a17ae 991 /*
992 * Not ready if there are any encoded packets waiting to be written
993 * out to peers.
994 */
995 if (!bpacket_queue_is_empty(SUBGRP_PKTQ(subgrp)))
996 return 0;
997
998 /*
999 * Not ready if there enqueued updates waiting to be encoded.
1000 */
1001 if (!advertise_list_is_empty(subgrp))
1002 return 0;
1003
1004 /*
1005 * Don't attempt to merge a subgroup that needs a refresh. For one,
1006 * we can't determine if the adj_out of such a group matches that of
1007 * another group.
1008 */
1009 if (update_subgroup_needs_refresh(subgrp))
1010 return 0;
1011
1012 return 1;
3f9c7369
DS
1013}
1014
1015/*
1016 * update_subgrp_can_merge_into
1017 *
2951a7a4 1018 * Returns true if the first subgroup can merge into the second
3f9c7369
DS
1019 * subgroup.
1020 */
d62a17ae 1021static int update_subgroup_can_merge_into(struct update_subgroup *subgrp,
1022 struct update_subgroup *target)
3f9c7369
DS
1023{
1024
d62a17ae 1025 if (subgrp == target)
1026 return 0;
3f9c7369 1027
d62a17ae 1028 /*
1029 * Both must have processed the BRIB to the same point in order to
1030 * be merged.
1031 */
1032 if (subgrp->version != target->version)
1033 return 0;
3f9c7369 1034
d62a17ae 1035 if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE)
1036 != CHECK_FLAG(target->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
1037 return 0;
f910ef58 1038
d62a17ae 1039 if (subgrp->adj_count != target->adj_count)
1040 return 0;
3f9c7369 1041
d62a17ae 1042 return update_subgroup_ready_for_merge(target);
3f9c7369
DS
1043}
1044
1045/*
1046 * update_subgroup_merge
1047 *
1048 * Merge the first subgroup into the second one.
1049 */
d62a17ae 1050static void update_subgroup_merge(struct update_subgroup *subgrp,
1051 struct update_subgroup *target,
1052 const char *reason)
3f9c7369 1053{
d62a17ae 1054 struct peer_af *paf;
1055 int result;
1056 int peer_count;
3f9c7369 1057
d62a17ae 1058 assert(subgrp->adj_count == target->adj_count);
3f9c7369 1059
d62a17ae 1060 peer_count = subgrp->peer_count;
3f9c7369 1061
d62a17ae 1062 while (1) {
1063 paf = LIST_FIRST(&subgrp->peers);
1064 if (!paf)
1065 break;
3f9c7369 1066
d62a17ae 1067 update_subgroup_remove_peer_internal(subgrp, paf);
3f9c7369 1068
d62a17ae 1069 /*
1070 * Add the peer to the target subgroup, while making sure that
1071 * any currently enqueued packets won't be sent to it. Enqueued
1072 * packets could, for example, result in an unnecessary withdraw
1073 * followed by an advertise.
1074 */
1075 update_subgroup_add_peer(target, paf, 0);
1076 }
3f9c7369 1077
d62a17ae 1078 SUBGRP_INCR_STAT(target, merge_events);
3f9c7369 1079
d62a17ae 1080 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1081 zlog_debug("u%" PRIu64 ":s%" PRIu64
1082 " (%d peers) merged into u%" PRIu64 ":s%" PRIu64
1083 ", "
1084 "trigger: %s",
1085 subgrp->update_group->id, subgrp->id, peer_count,
1086 target->update_group->id, target->id,
1087 reason ? reason : "unknown");
3f9c7369 1088
d62a17ae 1089 result = update_subgroup_check_delete(subgrp);
1090 assert(result);
3f9c7369
DS
1091}
1092
1093/*
1094 * update_subgroup_check_merge
1095 *
1096 * Merge this subgroup into another subgroup if possible.
1097 *
2951a7a4 1098 * Returns true if the subgroup has been merged. The subgroup pointer
3f9c7369
DS
1099 * should not be accessed in this case.
1100 */
d62a17ae 1101int update_subgroup_check_merge(struct update_subgroup *subgrp,
1102 const char *reason)
3f9c7369 1103{
d62a17ae 1104 struct update_subgroup *target;
3f9c7369 1105
d62a17ae 1106 if (!update_subgroup_ready_for_merge(subgrp))
1107 return 0;
3f9c7369 1108
d62a17ae 1109 /*
1110 * Look for a subgroup to merge into.
1111 */
a2addae8 1112 UPDGRP_FOREACH_SUBGRP (subgrp->update_group, target) {
d62a17ae 1113 if (update_subgroup_can_merge_into(subgrp, target))
1114 break;
1115 }
3f9c7369 1116
d62a17ae 1117 if (!target)
1118 return 0;
3f9c7369 1119
d62a17ae 1120 update_subgroup_merge(subgrp, target, reason);
1121 return 1;
3f9c7369
DS
1122}
1123
d62a17ae 1124/*
9d303b37
DL
1125* update_subgroup_merge_check_thread_cb
1126*/
d62a17ae 1127static int update_subgroup_merge_check_thread_cb(struct thread *thread)
3f9c7369 1128{
d62a17ae 1129 struct update_subgroup *subgrp;
3f9c7369 1130
d62a17ae 1131 subgrp = THREAD_ARG(thread);
3f9c7369 1132
d62a17ae 1133 subgrp->t_merge_check = NULL;
3f9c7369 1134
d62a17ae 1135 update_subgroup_check_merge(subgrp, "triggered merge check");
1136 return 0;
3f9c7369
DS
1137}
1138
1139/*
1140 * update_subgroup_trigger_merge_check
1141 *
1142 * Triggers a call to update_subgroup_check_merge() on a clean context.
1143 *
1144 * @param force If true, the merge check will be triggered even if the
1145 * subgroup doesn't currently look ready for a merge.
1146 *
2951a7a4 1147 * Returns true if a merge check will be performed shortly.
3f9c7369 1148 */
d62a17ae 1149int update_subgroup_trigger_merge_check(struct update_subgroup *subgrp,
1150 int force)
3f9c7369 1151{
d62a17ae 1152 if (subgrp->t_merge_check)
1153 return 1;
3f9c7369 1154
d62a17ae 1155 if (!force && !update_subgroup_ready_for_merge(subgrp))
1156 return 0;
3f9c7369 1157
d62a17ae 1158 subgrp->t_merge_check = NULL;
1159 thread_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb,
1160 subgrp, 0, &subgrp->t_merge_check);
3f9c7369 1161
d62a17ae 1162 SUBGRP_INCR_STAT(subgrp, merge_checks_triggered);
3f9c7369 1163
d62a17ae 1164 return 1;
3f9c7369
DS
1165}
1166
1167/*
1168 * update_subgroup_copy_adj_out
1169 *
1170 * Helper function that clones the adj out (state about advertised
1171 * routes) from one subgroup to another. It assumes that the adj out
1172 * of the target subgroup is empty.
1173 */
d62a17ae 1174static void update_subgroup_copy_adj_out(struct update_subgroup *source,
1175 struct update_subgroup *dest)
3f9c7369 1176{
d62a17ae 1177 struct bgp_adj_out *aout, *aout_copy;
1178
a2addae8 1179 SUBGRP_FOREACH_ADJ (source, aout) {
d62a17ae 1180 /*
1181 * Copy the adj out.
1182 */
1183 aout_copy =
1184 bgp_adj_out_alloc(dest, aout->rn, aout->addpath_tx_id);
1185 aout_copy->attr =
7c87afac 1186 aout->attr ? bgp_attr_intern(aout->attr) : NULL;
d62a17ae 1187 }
0ab7b206
AD
1188
1189 dest->scount = source->scount;
3f9c7369
DS
1190}
1191
1192/*
1193 * update_subgroup_copy_packets
1194 *
1195 * Copy packets after and including the given packet to the subgroup
1196 * 'dest'.
1197 *
1198 * Returns the number of packets copied.
1199 */
d62a17ae 1200static int update_subgroup_copy_packets(struct update_subgroup *dest,
1201 struct bpacket *pkt)
3f9c7369 1202{
d62a17ae 1203 int count;
1204
1205 count = 0;
1206 while (pkt && pkt->buffer) {
1207 bpacket_queue_add(SUBGRP_PKTQ(dest), stream_dup(pkt->buffer),
1208 &pkt->arr);
1209 count++;
1210 pkt = bpacket_next(pkt);
1211 }
3f9c7369 1212
d62a17ae 1213 bpacket_queue_sanity_check(SUBGRP_PKTQ(dest));
3f9c7369 1214
d62a17ae 1215 return count;
3f9c7369
DS
1216}
1217
d62a17ae 1218static int updgrp_prefix_list_update(struct update_group *updgrp,
1219 const char *name)
3f9c7369 1220{
d62a17ae 1221 struct peer *peer;
1222 struct bgp_filter *filter;
1223
1224 peer = UPDGRP_PEER(updgrp);
1225 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1226
1227 if (PREFIX_LIST_OUT_NAME(filter)
1228 && (strcmp(name, PREFIX_LIST_OUT_NAME(filter)) == 0)) {
1229 PREFIX_LIST_OUT(filter) = prefix_list_lookup(
1230 UPDGRP_AFI(updgrp), PREFIX_LIST_OUT_NAME(filter));
1231 return 1;
1232 }
1233 return 0;
3f9c7369
DS
1234}
1235
d62a17ae 1236static int updgrp_filter_list_update(struct update_group *updgrp,
1237 const char *name)
3f9c7369 1238{
d62a17ae 1239 struct peer *peer;
1240 struct bgp_filter *filter;
1241
1242 peer = UPDGRP_PEER(updgrp);
1243 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1244
1245 if (FILTER_LIST_OUT_NAME(filter)
1246 && (strcmp(name, FILTER_LIST_OUT_NAME(filter)) == 0)) {
1247 FILTER_LIST_OUT(filter) =
1248 as_list_lookup(FILTER_LIST_OUT_NAME(filter));
1249 return 1;
1250 }
1251 return 0;
3f9c7369
DS
1252}
1253
d62a17ae 1254static int updgrp_distribute_list_update(struct update_group *updgrp,
1255 const char *name)
3f9c7369 1256{
d62a17ae 1257 struct peer *peer;
1258 struct bgp_filter *filter;
1259
1260 peer = UPDGRP_PEER(updgrp);
1261 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1262
1263 if (DISTRIBUTE_OUT_NAME(filter)
1264 && (strcmp(name, DISTRIBUTE_OUT_NAME(filter)) == 0)) {
1265 DISTRIBUTE_OUT(filter) = access_list_lookup(
1266 UPDGRP_AFI(updgrp), DISTRIBUTE_OUT_NAME(filter));
1267 return 1;
1268 }
1269 return 0;
3f9c7369
DS
1270}
1271
d62a17ae 1272static int updgrp_route_map_update(struct update_group *updgrp,
1273 const char *name, int *def_rmap_changed)
3f9c7369 1274{
d62a17ae 1275 struct peer *peer;
1276 struct bgp_filter *filter;
1277 int changed = 0;
1278 afi_t afi;
1279 safi_t safi;
1280
1281 peer = UPDGRP_PEER(updgrp);
1282 afi = UPDGRP_AFI(updgrp);
1283 safi = UPDGRP_SAFI(updgrp);
1284 filter = &peer->filter[afi][safi];
1285
1286 if (ROUTE_MAP_OUT_NAME(filter)
1287 && (strcmp(name, ROUTE_MAP_OUT_NAME(filter)) == 0)) {
1288 ROUTE_MAP_OUT(filter) = route_map_lookup_by_name(name);
1289
1290 changed = 1;
1291 }
1292
1293 if (UNSUPPRESS_MAP_NAME(filter)
1294 && (strcmp(name, UNSUPPRESS_MAP_NAME(filter)) == 0)) {
1295 UNSUPPRESS_MAP(filter) = route_map_lookup_by_name(name);
1296 changed = 1;
1297 }
1298
1299 /* process default-originate route-map */
1300 if (peer->default_rmap[afi][safi].name
1301 && (strcmp(name, peer->default_rmap[afi][safi].name) == 0)) {
1302 peer->default_rmap[afi][safi].map =
1303 route_map_lookup_by_name(name);
1304 if (def_rmap_changed)
1305 *def_rmap_changed = 1;
1306 }
1307 return changed;
3f9c7369
DS
1308}
1309
1310/*
1311 * hash iteration callback function to process a policy change for an
1312 * update group. Check if the changed policy matches the updgrp's
1313 * outbound route-map or unsuppress-map or default-originate map or
1314 * filter-list or prefix-list or distribute-list.
1315 * Trigger update generation accordingly.
1316 */
d62a17ae 1317static int updgrp_policy_update_walkcb(struct update_group *updgrp, void *arg)
3f9c7369 1318{
d62a17ae 1319 struct updwalk_context *ctx = arg;
1320 struct update_subgroup *subgrp;
1321 int changed = 0;
1322 int def_changed = 0;
1323
1324 if (!updgrp || !ctx || !ctx->policy_name)
1325 return UPDWALK_CONTINUE;
1326
1327 switch (ctx->policy_type) {
1328 case BGP_POLICY_ROUTE_MAP:
1329 changed = updgrp_route_map_update(updgrp, ctx->policy_name,
1330 &def_changed);
1331 break;
1332 case BGP_POLICY_FILTER_LIST:
1333 changed = updgrp_filter_list_update(updgrp, ctx->policy_name);
1334 break;
1335 case BGP_POLICY_PREFIX_LIST:
1336 changed = updgrp_prefix_list_update(updgrp, ctx->policy_name);
1337 break;
1338 case BGP_POLICY_DISTRIBUTE_LIST:
1339 changed =
1340 updgrp_distribute_list_update(updgrp, ctx->policy_name);
1341 break;
1342 default:
1343 break;
1344 }
1345
1346 /* If not doing route update, return after updating "config" */
1347 if (!ctx->policy_route_update)
1348 return UPDWALK_CONTINUE;
1349
1350 /* If nothing has changed, return after updating "config" */
1351 if (!changed && !def_changed)
1352 return UPDWALK_CONTINUE;
1353
1354 /*
1355 * If something has changed, at the beginning of a route-map
1356 * modification
1357 * event, mark each subgroup's needs-refresh bit. For one, it signals to
1358 * whoever that the subgroup needs a refresh. Second, it prevents
1359 * premature
1360 * merge of this subgroup with another before a complete (outbound)
1361 * refresh.
1362 */
1363 if (ctx->policy_event_start_flag) {
a2addae8 1364 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
d62a17ae 1365 update_subgroup_set_needs_refresh(subgrp, 1);
1366 }
1367 return UPDWALK_CONTINUE;
1368 }
1369
a2addae8 1370 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
d62a17ae 1371 if (changed) {
1372 if (bgp_debug_update(NULL, NULL, updgrp, 0))
1373 zlog_debug(
1374 "u%" PRIu64 ":s%" PRIu64
1375 " announcing routes upon policy %s (type %d) change",
1376 updgrp->id, subgrp->id,
1377 ctx->policy_name, ctx->policy_type);
1378 subgroup_announce_route(subgrp);
1379 }
1380 if (def_changed) {
1381 if (bgp_debug_update(NULL, NULL, updgrp, 0))
1382 zlog_debug(
1383 "u%" PRIu64 ":s%" PRIu64
1384 " announcing default upon default routemap %s change",
1385 updgrp->id, subgrp->id,
1386 ctx->policy_name);
1387 subgroup_default_originate(subgrp, 0);
1388 }
1389 update_subgroup_set_needs_refresh(subgrp, 0);
1390 }
1391 return UPDWALK_CONTINUE;
3f9c7369
DS
1392}
1393
e3b78da8 1394static int update_group_walkcb(struct hash_bucket *bucket, void *arg)
3f9c7369 1395{
e3b78da8 1396 struct update_group *updgrp = bucket->data;
d62a17ae 1397 struct updwalk_context *wctx = arg;
1398 int ret = (*wctx->cb)(updgrp, wctx->context);
1399 return ret;
3f9c7369
DS
1400}
1401
d62a17ae 1402static int update_group_periodic_merge_walkcb(struct update_group *updgrp,
1403 void *arg)
3f9c7369 1404{
d62a17ae 1405 struct update_subgroup *subgrp;
1406 struct update_subgroup *tmp_subgrp;
1407 const char *reason = arg;
3f9c7369 1408
a2addae8
RW
1409 UPDGRP_FOREACH_SUBGRP_SAFE (updgrp, subgrp, tmp_subgrp)
1410 update_subgroup_check_merge(subgrp, reason);
d62a17ae 1411 return UPDWALK_CONTINUE;
3f9c7369
DS
1412}
1413
1414/********************
1415 * PUBLIC FUNCTIONS
1416 ********************/
1417
1418/*
1419 * trigger function when a policy (route-map/filter-list/prefix-list/
1420 * distribute-list etc.) content changes. Go through all the
1421 * update groups and process the change.
1422 *
1423 * bgp: the bgp instance
1424 * ptype: the type of policy that got modified, see bgpd.h
1425 * pname: name of the policy
1426 * route_update: flag to control if an automatic update generation should
1427 * occur
1428 * start_event: flag that indicates if it's the beginning of the change.
1429 * Esp. when the user is changing the content interactively
1430 * over multiple statements. Useful to set dirty flag on
1431 * update groups.
1432 */
d62a17ae 1433void update_group_policy_update(struct bgp *bgp, bgp_policy_type_e ptype,
1434 const char *pname, int route_update,
1435 int start_event)
3f9c7369 1436{
d62a17ae 1437 struct updwalk_context ctx;
3f9c7369 1438
d62a17ae 1439 memset(&ctx, 0, sizeof(ctx));
1440 ctx.policy_type = ptype;
1441 ctx.policy_name = pname;
1442 ctx.policy_route_update = route_update;
1443 ctx.policy_event_start_flag = start_event;
1444 ctx.flags = 0;
3f9c7369 1445
d62a17ae 1446 update_group_walk(bgp, updgrp_policy_update_walkcb, &ctx);
3f9c7369
DS
1447}
1448
1449/*
1450 * update_subgroup_split_peer
1451 *
1452 * Ensure that the given peer is in a subgroup of its own in the
1453 * specified update group.
1454 */
d62a17ae 1455void update_subgroup_split_peer(struct peer_af *paf,
1456 struct update_group *updgrp)
3f9c7369 1457{
d62a17ae 1458 struct update_subgroup *old_subgrp, *subgrp;
1459 uint64_t old_id;
1460
1461
1462 old_subgrp = paf->subgroup;
1463
1464 if (!updgrp)
1465 updgrp = old_subgrp->update_group;
1466
1467 /*
1468 * If the peer is alone in its subgroup, reuse the existing
1469 * subgroup.
1470 */
1471 if (old_subgrp->peer_count == 1) {
1472 if (updgrp == old_subgrp->update_group)
1473 return;
1474
1475 subgrp = old_subgrp;
1476 old_id = old_subgrp->update_group->id;
1477
1478 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
1479 UPDGRP_PEER_DBG_DIS(old_subgrp->update_group);
1480 }
1481
1482 update_group_remove_subgroup(old_subgrp->update_group,
1483 old_subgrp);
1484 update_group_add_subgroup(updgrp, subgrp);
1485
1486 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
1487 UPDGRP_PEER_DBG_EN(updgrp);
1488 }
1489 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1490 zlog_debug("u%" PRIu64 ":s%" PRIu64
1491 " peer %s moved to u%" PRIu64 ":s%" PRIu64,
1492 old_id, subgrp->id, paf->peer->host,
1493 updgrp->id, subgrp->id);
1494
1495 /*
1496 * The state of the subgroup (adj_out, advs, packet queue etc)
1497 * is consistent internally, but may not be identical to other
1498 * subgroups in the new update group even if the version number
1499 * matches up. Make sure a full refresh is done before the
1500 * subgroup is merged with another.
1501 */
1502 update_subgroup_set_needs_refresh(subgrp, 1);
1503
1504 SUBGRP_INCR_STAT(subgrp, updgrp_switch_events);
1505 return;
1506 }
3f9c7369 1507
d62a17ae 1508 /*
1509 * Create a new subgroup under the specified update group, and copy
1510 * over relevant state to it.
1511 */
1512 subgrp = update_subgroup_create(updgrp);
1513 update_subgroup_inherit_info(subgrp, old_subgrp);
1514
1515 subgrp->split_from.update_group_id = old_subgrp->update_group->id;
1516 subgrp->split_from.subgroup_id = old_subgrp->id;
1517
1518 /*
1519 * Copy out relevant state from the old subgroup.
1520 */
1521 update_subgroup_copy_adj_out(paf->subgroup, subgrp);
1522 update_subgroup_copy_packets(subgrp, paf->next_pkt_to_send);
1523
1524 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1525 zlog_debug("u%" PRIu64 ":s%" PRIu64
1526 " peer %s split and moved into u%" PRIu64
1527 ":s%" PRIu64,
1528 paf->subgroup->update_group->id, paf->subgroup->id,
1529 paf->peer->host, updgrp->id, subgrp->id);
1530
1531 SUBGRP_INCR_STAT(paf->subgroup, split_events);
1532
1533 /*
1534 * Since queued advs were left behind, this new subgroup needs a
1535 * refresh.
1536 */
1537 update_subgroup_set_needs_refresh(subgrp, 1);
1538
1539 /*
1540 * Remove peer from old subgroup, and add it to the new one.
1541 */
1542 update_subgroup_remove_peer(paf->subgroup, paf);
1543
1544 update_subgroup_add_peer(subgrp, paf, 1);
3f9c7369
DS
1545}
1546
d62a17ae 1547void update_bgp_group_init(struct bgp *bgp)
3f9c7369 1548{
d62a17ae 1549 int afid;
3f9c7369 1550
a2addae8 1551 AF_FOREACH (afid)
3f65c5b1 1552 bgp->update_groups[afid] =
996c9314 1553 hash_create(updgrp_hash_key_make, updgrp_hash_cmp,
3f65c5b1 1554 "BGP Update Group Hash");
3f9c7369
DS
1555}
1556
d62a17ae 1557void update_bgp_group_free(struct bgp *bgp)
3d68677e 1558{
d62a17ae 1559 int afid;
1560
a2addae8 1561 AF_FOREACH (afid) {
d62a17ae 1562 if (bgp->update_groups[afid]) {
1563 hash_free(bgp->update_groups[afid]);
1564 bgp->update_groups[afid] = NULL;
1565 }
1566 }
3d68677e
DS
1567}
1568
d62a17ae 1569void update_group_show(struct bgp *bgp, afi_t afi, safi_t safi, struct vty *vty,
1570 uint64_t subgrp_id)
3f9c7369 1571{
d62a17ae 1572 struct updwalk_context ctx;
1573 memset(&ctx, 0, sizeof(ctx));
1574 ctx.vty = vty;
1575 ctx.subgrp_id = subgrp_id;
8fe8a7f6 1576
d62a17ae 1577 update_group_af_walk(bgp, afi, safi, update_group_show_walkcb, &ctx);
3f9c7369
DS
1578}
1579
1580/*
1581 * update_group_show_stats
1582 *
1583 * Show global statistics about update groups.
1584 */
d62a17ae 1585void update_group_show_stats(struct bgp *bgp, struct vty *vty)
3f9c7369 1586{
d62a17ae 1587 vty_out(vty, "Update groups created: %u\n",
1588 bgp->update_group_stats.updgrps_created);
1589 vty_out(vty, "Update groups deleted: %u\n",
1590 bgp->update_group_stats.updgrps_deleted);
1591 vty_out(vty, "Update subgroups created: %u\n",
1592 bgp->update_group_stats.subgrps_created);
1593 vty_out(vty, "Update subgroups deleted: %u\n",
1594 bgp->update_group_stats.subgrps_deleted);
1595 vty_out(vty, "Join events: %u\n", bgp->update_group_stats.join_events);
1596 vty_out(vty, "Prune events: %u\n",
1597 bgp->update_group_stats.prune_events);
1598 vty_out(vty, "Merge events: %u\n",
1599 bgp->update_group_stats.merge_events);
1600 vty_out(vty, "Split events: %u\n",
1601 bgp->update_group_stats.split_events);
1602 vty_out(vty, "Update group switch events: %u\n",
1603 bgp->update_group_stats.updgrp_switch_events);
1604 vty_out(vty, "Peer route refreshes combined: %u\n",
1605 bgp->update_group_stats.peer_refreshes_combined);
1606 vty_out(vty, "Merge checks triggered: %u\n",
1607 bgp->update_group_stats.merge_checks_triggered);
3f9c7369
DS
1608}
1609
1610/*
1611 * update_group_adjust_peer
1612 */
d62a17ae 1613void update_group_adjust_peer(struct peer_af *paf)
3f9c7369 1614{
d62a17ae 1615 struct update_group *updgrp;
1616 struct update_subgroup *subgrp, *old_subgrp;
1617 struct peer *peer;
1618
1619 if (!paf)
1620 return;
1621
1622 peer = PAF_PEER(paf);
1623 if (!peer_established(peer)) {
1624 return;
1625 }
1626
1627 if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE)) {
1628 return;
3f9c7369 1629 }
3f9c7369 1630
d62a17ae 1631 if (!peer->afc_nego[paf->afi][paf->safi]) {
1632 return;
1633 }
3f9c7369 1634
d62a17ae 1635 updgrp = update_group_find(paf);
1636 if (!updgrp) {
1637 updgrp = update_group_create(paf);
1638 if (!updgrp) {
e50f7cfd 1639 flog_err(EC_BGP_UPDGRP_CREATE,
1c50c1c0
QY
1640 "couldn't create update group for peer %s",
1641 paf->peer->host);
d62a17ae 1642 return;
1643 }
1644 }
3f9c7369 1645
d62a17ae 1646 old_subgrp = paf->subgroup;
3f9c7369 1647
d62a17ae 1648 if (old_subgrp) {
3f9c7369 1649
d62a17ae 1650 /*
1651 * If the update group of the peer is unchanged, the peer can
1652 * stay
1653 * in its existing subgroup and we're done.
1654 */
1655 if (old_subgrp->update_group == updgrp)
1656 return;
1657
1658 /*
1659 * The peer is switching between update groups. Put it in its
1660 * own subgroup under the new update group.
1661 */
1662 update_subgroup_split_peer(paf, updgrp);
1663 return;
1664 }
1665
1666 subgrp = update_subgroup_find(updgrp, paf);
1667 if (!subgrp) {
1668 subgrp = update_subgroup_create(updgrp);
1669 if (!subgrp)
1670 return;
1671 }
3f9c7369 1672
d62a17ae 1673 update_subgroup_add_peer(subgrp, paf, 1);
1674 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1675 zlog_debug("u%" PRIu64 ":s%" PRIu64 " add peer %s", updgrp->id,
1676 subgrp->id, paf->peer->host);
1677
1678 return;
3f9c7369
DS
1679}
1680
d62a17ae 1681int update_group_adjust_soloness(struct peer *peer, int set)
3f9c7369 1682{
d62a17ae 1683 struct peer_group *group;
1684 struct listnode *node, *nnode;
1685
1686 if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
1687 peer_lonesoul_or_not(peer, set);
1688 if (peer->status == Established)
1689 bgp_announce_route_all(peer);
1690 } else {
1691 group = peer->group;
1692 for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
1693 peer_lonesoul_or_not(peer, set);
1694 if (peer->status == Established)
1695 bgp_announce_route_all(peer);
1696 }
1697 }
1698 return 0;
3f9c7369
DS
1699}
1700
1701/*
1702 * update_subgroup_rib
1703 */
d62a17ae 1704struct bgp_table *update_subgroup_rib(struct update_subgroup *subgrp)
3f9c7369 1705{
d62a17ae 1706 struct bgp *bgp;
3f9c7369 1707
d62a17ae 1708 bgp = SUBGRP_INST(subgrp);
1709 if (!bgp)
1710 return NULL;
3f9c7369 1711
d62a17ae 1712 return bgp->rib[SUBGRP_AFI(subgrp)][SUBGRP_SAFI(subgrp)];
3f9c7369
DS
1713}
1714
d62a17ae 1715void update_group_af_walk(struct bgp *bgp, afi_t afi, safi_t safi,
1716 updgrp_walkcb cb, void *ctx)
3f9c7369 1717{
d62a17ae 1718 struct updwalk_context wctx;
1719 int afid;
3f9c7369 1720
d62a17ae 1721 if (!bgp)
1722 return;
1723 afid = afindex(afi, safi);
1724 if (afid >= BGP_AF_MAX)
1725 return;
3f9c7369 1726
d62a17ae 1727 memset(&wctx, 0, sizeof(wctx));
1728 wctx.cb = cb;
1729 wctx.context = ctx;
0de4848d 1730
d62a17ae 1731 if (bgp->update_groups[afid])
1732 hash_walk(bgp->update_groups[afid], update_group_walkcb, &wctx);
3f9c7369
DS
1733}
1734
d62a17ae 1735void update_group_walk(struct bgp *bgp, updgrp_walkcb cb, void *ctx)
3f9c7369 1736{
d62a17ae 1737 afi_t afi;
1738 safi_t safi;
3f9c7369 1739
a2addae8 1740 FOREACH_AFI_SAFI (afi, safi) {
d62a17ae 1741 update_group_af_walk(bgp, afi, safi, cb, ctx);
1742 }
3f9c7369
DS
1743}
1744
d62a17ae 1745void update_group_periodic_merge(struct bgp *bgp)
3f9c7369 1746{
d62a17ae 1747 char reason[] = "periodic merge check";
3f9c7369 1748
d62a17ae 1749 update_group_walk(bgp, update_group_periodic_merge_walkcb,
1750 (void *)reason);
3f9c7369
DS
1751}
1752
0de4848d
DS
1753static int
1754update_group_default_originate_route_map_walkcb(struct update_group *updgrp,
d62a17ae 1755 void *arg)
0de4848d 1756{
d62a17ae 1757 struct update_subgroup *subgrp;
1758 struct peer *peer;
1759 afi_t afi;
1760 safi_t safi;
1761
a2addae8 1762 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
d62a17ae 1763 peer = SUBGRP_PEER(subgrp);
1764 afi = SUBGRP_AFI(subgrp);
1765 safi = SUBGRP_SAFI(subgrp);
1766
1767 if (peer->default_rmap[afi][safi].name) {
1768 subgroup_default_originate(subgrp, 0);
1769 }
1770 }
1771
1772 return UPDWALK_CONTINUE;
0de4848d
DS
1773}
1774
d62a17ae 1775int update_group_refresh_default_originate_route_map(struct thread *thread)
0de4848d 1776{
d62a17ae 1777 struct bgp *bgp;
1778 char reason[] = "refresh default-originate route-map";
0de4848d 1779
d62a17ae 1780 bgp = THREAD_ARG(thread);
1781 update_group_walk(bgp, update_group_default_originate_route_map_walkcb,
1782 reason);
1783 THREAD_TIMER_OFF(bgp->t_rmap_def_originate_eval);
1784 bgp_unlock(bgp);
ffd0c037 1785
d62a17ae 1786 return (0);
0de4848d
DS
1787}
1788
3f9c7369
DS
1789/*
1790 * peer_af_announce_route
1791 *
1792 * Refreshes routes out to a peer_af immediately.
1793 *
2951a7a4 1794 * If the combine parameter is true, then this function will try to
3f9c7369
DS
1795 * gather other peers in the subgroup for which a route announcement
1796 * is pending and efficently announce routes to all of them.
1797 *
1798 * For now, the 'combine' option has an effect only if all peers in
1799 * the subgroup have a route announcement pending.
1800 */
d62a17ae 1801void peer_af_announce_route(struct peer_af *paf, int combine)
3f9c7369 1802{
d62a17ae 1803 struct update_subgroup *subgrp;
1804 struct peer_af *cur_paf;
1805 int all_pending;
1806
1807 subgrp = paf->subgroup;
1808 all_pending = 0;
1809
1810 if (combine) {
1811 /*
1812 * If there are other peers in the old subgroup that also need
1813 * routes to be announced, pull them into the peer's new
1814 * subgroup.
1815 * Combine route announcement with other peers if possible.
1816 *
1817 * For now, we combine only if all peers in the subgroup have an
1818 * announcement pending.
1819 */
1820 all_pending = 1;
1821
a2addae8 1822 SUBGRP_FOREACH_PEER (subgrp, cur_paf) {
d62a17ae 1823 if (cur_paf == paf)
1824 continue;
1825
1826 if (cur_paf->t_announce_route)
1827 continue;
1828
1829 all_pending = 0;
1830 break;
1831 }
1832 }
1833 /*
1834 * Announce to the peer alone if we were not asked to combine peers,
1835 * or if some peers don't have a route annoucement pending.
1836 */
1837 if (!combine || !all_pending) {
1838 update_subgroup_split_peer(paf, NULL);
7bfdba54 1839 subgrp = paf->subgroup;
d62a17ae 1840
7bfdba54 1841 assert(subgrp && subgrp->update_group);
d62a17ae 1842 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
1843 zlog_debug("u%" PRIu64 ":s%" PRIu64
1844 " %s announcing routes",
1845 subgrp->update_group->id, subgrp->id,
1846 paf->peer->host);
1847
1848 subgroup_announce_route(paf->subgroup);
1849 return;
3f9c7369 1850 }
3f9c7369 1851
d62a17ae 1852 /*
1853 * We will announce routes the entire subgroup.
1854 *
1855 * First stop refresh timers on all the other peers.
1856 */
a2addae8 1857 SUBGRP_FOREACH_PEER (subgrp, cur_paf) {
d62a17ae 1858 if (cur_paf == paf)
1859 continue;
3f9c7369 1860
d62a17ae 1861 bgp_stop_announce_route_timer(cur_paf);
1862 }
3f9c7369 1863
d62a17ae 1864 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
1865 zlog_debug("u%" PRIu64 ":s%" PRIu64
1866 " announcing routes to %s, combined into %d peers",
1867 subgrp->update_group->id, subgrp->id,
1868 paf->peer->host, subgrp->peer_count);
3f9c7369 1869
d62a17ae 1870 subgroup_announce_route(subgrp);
3f9c7369 1871
d62a17ae 1872 SUBGRP_INCR_STAT_BY(subgrp, peer_refreshes_combined,
1873 subgrp->peer_count - 1);
3f9c7369
DS
1874}
1875
2fc102e1
QY
1876void subgroup_trigger_write(struct update_subgroup *subgrp)
1877{
1878 struct peer_af *paf;
1879
becedef6
QY
1880 /*
1881 * For each peer in the subgroup, schedule a job to pull packets from
1882 * the subgroup output queue into their own output queue. This action
1883 * will trigger a write job on the I/O thread.
1884 */
996c9314
LB
1885 SUBGRP_FOREACH_PEER (subgrp, paf)
1886 if (paf->peer->status == Established)
1887 thread_add_timer_msec(
1888 bm->master, bgp_generate_updgrp_packets,
1889 paf->peer, 0,
1890 &paf->peer->t_generate_updgrp_packets);
2fc102e1
QY
1891}
1892
d62a17ae 1893int update_group_clear_update_dbg(struct update_group *updgrp, void *arg)
3f9c7369 1894{
d62a17ae 1895 UPDGRP_PEER_DBG_OFF(updgrp);
1896 return UPDWALK_CONTINUE;
3f9c7369 1897}
adbac85e 1898
06370dac 1899/* Return true if we should addpath encode NLRI to this peer */
d62a17ae 1900int bgp_addpath_encode_tx(struct peer *peer, afi_t afi, safi_t safi)
adbac85e 1901{
d62a17ae 1902 return (CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ADDPATH_AF_TX_ADV)
1903 && CHECK_FLAG(peer->af_cap[afi][safi],
1904 PEER_CAP_ADDPATH_AF_RX_RCV));
adbac85e 1905}