]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_updgrp.c
Merge pull request #12425 from cscarpitta/fix/fix-bgp-srv6l3vpn-to-bgp-vrf2-topotest
[mirror_frr.git] / bgpd / bgp_updgrp.c
1 /**
2 * bgp_updgrp.c: BGP update group structures
3 *
4 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
5 *
6 * @author Avneesh Sachdev <avneesh@sproute.net>
7 * @author Rajesh Varadarajan <rajesh@sproute.net>
8 * @author Pradosh Mohapatra <pradosh@sproute.net>
9 *
10 * This file is part of GNU Zebra.
11 *
12 * GNU Zebra is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2, or (at your option) any
15 * later version.
16 *
17 * GNU Zebra is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; see the file COPYING; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27 #include <zebra.h>
28
29 #include "prefix.h"
30 #include "thread.h"
31 #include "buffer.h"
32 #include "stream.h"
33 #include "command.h"
34 #include "sockunion.h"
35 #include "network.h"
36 #include "memory.h"
37 #include "filter.h"
38 #include "routemap.h"
39 #include "log.h"
40 #include "plist.h"
41 #include "linklist.h"
42 #include "workqueue.h"
43 #include "hash.h"
44 #include "jhash.h"
45 #include "queue.h"
46
47 #include "bgpd/bgpd.h"
48 #include "bgpd/bgp_table.h"
49 #include "bgpd/bgp_debug.h"
50 #include "bgpd/bgp_errors.h"
51 #include "bgpd/bgp_fsm.h"
52 #include "bgpd/bgp_addpath.h"
53 #include "bgpd/bgp_advertise.h"
54 #include "bgpd/bgp_packet.h"
55 #include "bgpd/bgp_updgrp.h"
56 #include "bgpd/bgp_route.h"
57 #include "bgpd/bgp_filter.h"
58 #include "bgpd/bgp_io.h"
59
60 /********************
61 * PRIVATE FUNCTIONS
62 ********************/
63
64 /**
65 * assign a unique ID to update group and subgroup. Mostly for display/
66 * debugging purposes. It's a 64-bit space - used leisurely without a
67 * worry about its wrapping and about filling gaps. While at it, timestamp
68 * the creation.
69 */
70 static void update_group_checkin(struct update_group *updgrp)
71 {
72 updgrp->id = ++bm->updgrp_idspace;
73 updgrp->uptime = monotime(NULL);
74 }
75
76 static void update_subgroup_checkin(struct update_subgroup *subgrp,
77 struct update_group *updgrp)
78 {
79 subgrp->id = ++bm->subgrp_idspace;
80 subgrp->uptime = monotime(NULL);
81 }
82
83 static void sync_init(struct update_subgroup *subgrp,
84 struct update_group *updgrp)
85 {
86 struct peer *peer = UPDGRP_PEER(updgrp);
87
88 subgrp->sync =
89 XCALLOC(MTYPE_BGP_SYNCHRONISE, sizeof(struct bgp_synchronize));
90 bgp_adv_fifo_init(&subgrp->sync->update);
91 bgp_adv_fifo_init(&subgrp->sync->withdraw);
92 bgp_adv_fifo_init(&subgrp->sync->withdraw_low);
93 subgrp->hash =
94 hash_create(bgp_advertise_attr_hash_key,
95 bgp_advertise_attr_hash_cmp, "BGP SubGroup Hash");
96
97 /* We use a larger buffer for subgrp->work in the event that:
98 * - We RX a BGP_UPDATE where the attributes alone are just
99 * under 4096 or 65535 (if Extended Message capability negotiated).
100 * - The user configures an outbound route-map that does many as-path
101 * prepends or adds many communities. At most they can have
102 * CMD_ARGC_MAX
103 * args in a route-map so there is a finite limit on how large they
104 * can
105 * make the attributes.
106 *
107 * Having a buffer with BGP_MAX_PACKET_SIZE_OVERFLOW allows us to avoid
108 * bounds
109 * checking for every single attribute as we construct an UPDATE.
110 */
111 subgrp->work = stream_new(peer->max_packet_size
112 + BGP_MAX_PACKET_SIZE_OVERFLOW);
113 subgrp->scratch = stream_new(peer->max_packet_size);
114 }
115
116 static void sync_delete(struct update_subgroup *subgrp)
117 {
118 XFREE(MTYPE_BGP_SYNCHRONISE, subgrp->sync);
119 if (subgrp->hash) {
120 hash_clean(subgrp->hash,
121 (void (*)(void *))bgp_advertise_attr_free);
122 hash_free(subgrp->hash);
123 }
124 subgrp->hash = NULL;
125 if (subgrp->work)
126 stream_free(subgrp->work);
127 subgrp->work = NULL;
128 if (subgrp->scratch)
129 stream_free(subgrp->scratch);
130 subgrp->scratch = NULL;
131 }
132
133 /**
134 * conf_copy
135 *
136 * copy only those fields that are relevant to update group match
137 */
138 static void conf_copy(struct peer *dst, struct peer *src, afi_t afi,
139 safi_t safi)
140 {
141 struct bgp_filter *srcfilter;
142 struct bgp_filter *dstfilter;
143
144 srcfilter = &src->filter[afi][safi];
145 dstfilter = &dst->filter[afi][safi];
146
147 dst->bgp = src->bgp;
148 dst->sort = src->sort;
149 dst->as = src->as;
150 dst->v_routeadv = src->v_routeadv;
151 dst->flags = src->flags;
152 dst->af_flags[afi][safi] = src->af_flags[afi][safi];
153 dst->pmax_out[afi][safi] = src->pmax_out[afi][safi];
154 dst->max_packet_size = src->max_packet_size;
155 XFREE(MTYPE_BGP_PEER_HOST, dst->host);
156
157 dst->host = XSTRDUP(MTYPE_BGP_PEER_HOST, src->host);
158 dst->cap = src->cap;
159 dst->af_cap[afi][safi] = src->af_cap[afi][safi];
160 dst->afc_nego[afi][safi] = src->afc_nego[afi][safi];
161 dst->orf_plist[afi][safi] = src->orf_plist[afi][safi];
162 dst->addpath_type[afi][safi] = src->addpath_type[afi][safi];
163 dst->local_as = src->local_as;
164 dst->change_local_as = src->change_local_as;
165 dst->shared_network = src->shared_network;
166 dst->local_role = src->local_role;
167
168 if (src->soo[afi][safi]) {
169 ecommunity_free(&dst->soo[afi][safi]);
170 dst->soo[afi][safi] = ecommunity_dup(src->soo[afi][safi]);
171 }
172
173 memcpy(&(dst->nexthop), &(src->nexthop), sizeof(struct bgp_nexthop));
174
175 dst->group = src->group;
176
177 if (src->default_rmap[afi][safi].name) {
178 dst->default_rmap[afi][safi].name =
179 XSTRDUP(MTYPE_ROUTE_MAP_NAME,
180 src->default_rmap[afi][safi].name);
181 dst->default_rmap[afi][safi].map =
182 src->default_rmap[afi][safi].map;
183 }
184
185 if (DISTRIBUTE_OUT_NAME(srcfilter)) {
186 DISTRIBUTE_OUT_NAME(dstfilter) = XSTRDUP(
187 MTYPE_BGP_FILTER_NAME, DISTRIBUTE_OUT_NAME(srcfilter));
188 DISTRIBUTE_OUT(dstfilter) = DISTRIBUTE_OUT(srcfilter);
189 }
190
191 if (PREFIX_LIST_OUT_NAME(srcfilter)) {
192 PREFIX_LIST_OUT_NAME(dstfilter) = XSTRDUP(
193 MTYPE_BGP_FILTER_NAME, PREFIX_LIST_OUT_NAME(srcfilter));
194 PREFIX_LIST_OUT(dstfilter) = PREFIX_LIST_OUT(srcfilter);
195 }
196
197 if (FILTER_LIST_OUT_NAME(srcfilter)) {
198 FILTER_LIST_OUT_NAME(dstfilter) = XSTRDUP(
199 MTYPE_BGP_FILTER_NAME, FILTER_LIST_OUT_NAME(srcfilter));
200 FILTER_LIST_OUT(dstfilter) = FILTER_LIST_OUT(srcfilter);
201 }
202
203 if (ROUTE_MAP_OUT_NAME(srcfilter)) {
204 ROUTE_MAP_OUT_NAME(dstfilter) = XSTRDUP(
205 MTYPE_BGP_FILTER_NAME, ROUTE_MAP_OUT_NAME(srcfilter));
206 ROUTE_MAP_OUT(dstfilter) = ROUTE_MAP_OUT(srcfilter);
207 }
208
209 if (UNSUPPRESS_MAP_NAME(srcfilter)) {
210 UNSUPPRESS_MAP_NAME(dstfilter) = XSTRDUP(
211 MTYPE_BGP_FILTER_NAME, UNSUPPRESS_MAP_NAME(srcfilter));
212 UNSUPPRESS_MAP(dstfilter) = UNSUPPRESS_MAP(srcfilter);
213 }
214
215 if (ADVERTISE_MAP_NAME(srcfilter)) {
216 ADVERTISE_MAP_NAME(dstfilter) = XSTRDUP(
217 MTYPE_BGP_FILTER_NAME, ADVERTISE_MAP_NAME(srcfilter));
218 ADVERTISE_MAP(dstfilter) = ADVERTISE_MAP(srcfilter);
219 ADVERTISE_CONDITION(dstfilter) = ADVERTISE_CONDITION(srcfilter);
220 }
221
222 if (CONDITION_MAP_NAME(srcfilter)) {
223 CONDITION_MAP_NAME(dstfilter) = XSTRDUP(
224 MTYPE_BGP_FILTER_NAME, CONDITION_MAP_NAME(srcfilter));
225 CONDITION_MAP(dstfilter) = CONDITION_MAP(srcfilter);
226 }
227
228 dstfilter->advmap.update_type = srcfilter->advmap.update_type;
229 }
230
231 /**
232 * since we did a bunch of XSTRDUP's in conf_copy, time to free them up
233 */
234 static void conf_release(struct peer *src, afi_t afi, safi_t safi)
235 {
236 struct bgp_filter *srcfilter;
237
238 srcfilter = &src->filter[afi][safi];
239
240 XFREE(MTYPE_ROUTE_MAP_NAME, src->default_rmap[afi][safi].name);
241
242 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->dlist[FILTER_OUT].name);
243
244 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->plist[FILTER_OUT].name);
245
246 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->aslist[FILTER_OUT].name);
247
248 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->map[RMAP_OUT].name);
249
250 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->usmap.name);
251
252 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->advmap.aname);
253
254 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->advmap.cname);
255
256 XFREE(MTYPE_BGP_PEER_HOST, src->host);
257
258 ecommunity_free(&src->soo[afi][safi]);
259 }
260
261 static void peer2_updgrp_copy(struct update_group *updgrp, struct peer_af *paf)
262 {
263 struct peer *src;
264 struct peer *dst;
265
266 if (!updgrp || !paf)
267 return;
268
269 src = paf->peer;
270 dst = updgrp->conf;
271 if (!src || !dst)
272 return;
273
274 updgrp->afi = paf->afi;
275 updgrp->safi = paf->safi;
276 updgrp->afid = paf->afid;
277 updgrp->bgp = src->bgp;
278
279 conf_copy(dst, src, paf->afi, paf->safi);
280 }
281
282 /**
283 * auxiliary functions to maintain the hash table.
284 * - updgrp_hash_alloc - to create a new entry, passed to hash_get
285 * - updgrp_hash_key_make - makes the key for update group search
286 * - updgrp_hash_cmp - compare two update groups.
287 */
288 static void *updgrp_hash_alloc(void *p)
289 {
290 struct update_group *updgrp;
291 const struct update_group *in;
292
293 in = (const struct update_group *)p;
294 updgrp = XCALLOC(MTYPE_BGP_UPDGRP, sizeof(struct update_group));
295 memcpy(updgrp, in, sizeof(struct update_group));
296 updgrp->conf = XCALLOC(MTYPE_BGP_PEER, sizeof(struct peer));
297 conf_copy(updgrp->conf, in->conf, in->afi, in->safi);
298 return updgrp;
299 }
300
301 /**
302 * The hash value for a peer is computed from the following variables:
303 * v = f(
304 * 1. IBGP (1) or EBGP (2)
305 * 2. FLAGS based on configuration:
306 * LOCAL_AS_NO_PREPEND
307 * LOCAL_AS_REPLACE_AS
308 * 3. AF_FLAGS based on configuration:
309 * Refer to definition in bgp_updgrp.h
310 * 4. (AF-independent) Capability flags:
311 * AS4_RCV capability
312 * 5. (AF-dependent) Capability flags:
313 * ORF_PREFIX_SM_RCV (peer can send prefix ORF)
314 * 6. MRAI
315 * 7. peer-group name
316 * 8. Outbound route-map name (neighbor route-map <> out)
317 * 9. Outbound distribute-list name (neighbor distribute-list <> out)
318 * 10. Outbound prefix-list name (neighbor prefix-list <> out)
319 * 11. Outbound as-list name (neighbor filter-list <> out)
320 * 12. Unsuppress map name (neighbor unsuppress-map <>)
321 * 13. default rmap name (neighbor default-originate route-map <>)
322 * 14. encoding both global and link-local nexthop?
323 * 15. If peer is configured to be a lonesoul, peer ip address
324 * 16. Local-as should match, if configured.
325 * 17. maximum-prefix-out
326 * 18. Local-role should also match, if configured.
327 * )
328 */
329 static unsigned int updgrp_hash_key_make(const void *p)
330 {
331 const struct update_group *updgrp;
332 const struct peer *peer;
333 const struct bgp_filter *filter;
334 uint32_t flags;
335 uint32_t key;
336 afi_t afi;
337 safi_t safi;
338
339 #define SEED1 999331
340 #define SEED2 2147483647
341
342 updgrp = p;
343 peer = updgrp->conf;
344 afi = updgrp->afi;
345 safi = updgrp->safi;
346 flags = peer->af_flags[afi][safi];
347 filter = &peer->filter[afi][safi];
348
349 key = 0;
350
351 key = jhash_1word(peer->sort, key); /* EBGP or IBGP */
352 key = jhash_1word((peer->flags & PEER_UPDGRP_FLAGS), key);
353 key = jhash_1word((flags & PEER_UPDGRP_AF_FLAGS), key);
354 key = jhash_1word((uint32_t)peer->addpath_type[afi][safi], key);
355 key = jhash_1word((peer->cap & PEER_UPDGRP_CAP_FLAGS), key);
356 key = jhash_1word((peer->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS),
357 key);
358 key = jhash_1word(peer->v_routeadv, key);
359 key = jhash_1word(peer->change_local_as, key);
360 key = jhash_1word(peer->max_packet_size, key);
361 key = jhash_1word(peer->pmax_out[afi][safi], key);
362
363 if (peer->group)
364 key = jhash_1word(jhash(peer->group->name,
365 strlen(peer->group->name), SEED1),
366 key);
367
368 if (filter->map[RMAP_OUT].name)
369 key = jhash_1word(jhash(filter->map[RMAP_OUT].name,
370 strlen(filter->map[RMAP_OUT].name),
371 SEED1),
372 key);
373
374 if (filter->dlist[FILTER_OUT].name)
375 key = jhash_1word(jhash(filter->dlist[FILTER_OUT].name,
376 strlen(filter->dlist[FILTER_OUT].name),
377 SEED1),
378 key);
379
380 if (filter->plist[FILTER_OUT].name)
381 key = jhash_1word(jhash(filter->plist[FILTER_OUT].name,
382 strlen(filter->plist[FILTER_OUT].name),
383 SEED1),
384 key);
385
386 if (filter->aslist[FILTER_OUT].name)
387 key = jhash_1word(jhash(filter->aslist[FILTER_OUT].name,
388 strlen(filter->aslist[FILTER_OUT].name),
389 SEED1),
390 key);
391
392 if (filter->usmap.name)
393 key = jhash_1word(jhash(filter->usmap.name,
394 strlen(filter->usmap.name), SEED1),
395 key);
396
397 if (filter->advmap.aname)
398 key = jhash_1word(jhash(filter->advmap.aname,
399 strlen(filter->advmap.aname), SEED1),
400 key);
401
402 if (filter->advmap.update_type)
403 key = jhash_1word(filter->advmap.update_type, key);
404
405 if (peer->default_rmap[afi][safi].name)
406 key = jhash_1word(
407 jhash(peer->default_rmap[afi][safi].name,
408 strlen(peer->default_rmap[afi][safi].name),
409 SEED1),
410 key);
411
412 /* If peer is on a shared network and is exchanging IPv6 prefixes,
413 * it needs to include link-local address. That's different from
414 * non-shared-network peers (nexthop encoded with 32 bytes vs 16
415 * bytes). We create different update groups to take care of that.
416 */
417 key = jhash_1word(
418 (peer->shared_network && peer_afi_active_nego(peer, AFI_IP6)),
419 key);
420 /*
421 * There are certain peers that must get their own update-group:
422 * - lonesoul peers
423 * - peers that negotiated ORF
424 * - maximum-prefix-out is set
425 */
426 if (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL)
427 || CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV)
428 || CHECK_FLAG(peer->af_cap[afi][safi],
429 PEER_CAP_ORF_PREFIX_SM_OLD_RCV)
430 || CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_OUT))
431 key = jhash_1word(jhash(peer->host, strlen(peer->host), SEED2),
432 key);
433 /*
434 * Multiple sessions with the same neighbor should get their own
435 * update-group if they have different roles.
436 */
437 key = jhash_1word(peer->local_role, key);
438
439 /* Neighbors configured with the AIGP attribute are put in a separate
440 * update group from other neighbors.
441 */
442 key = jhash_1word((peer->flags & PEER_FLAG_AIGP), key);
443
444 if (peer->soo[afi][safi]) {
445 char *soo_str = ecommunity_str(peer->soo[afi][safi]);
446
447 key = jhash_1word(jhash(soo_str, strlen(soo_str), SEED1), key);
448 }
449
450 if (bgp_debug_neighbor_events(peer)) {
451 zlog_debug(
452 "%pBP Update Group Hash: sort: %d UpdGrpFlags: %ju UpdGrpAFFlags: %ju",
453 peer, peer->sort,
454 (intmax_t)CHECK_FLAG(peer->flags, PEER_UPDGRP_FLAGS),
455 (intmax_t)CHECK_FLAG(flags, PEER_UPDGRP_AF_FLAGS));
456 zlog_debug(
457 "%pBP Update Group Hash: addpath: %u UpdGrpCapFlag: %u UpdGrpCapAFFlag: %u route_adv: %u change local as: %u",
458 peer, (uint32_t)peer->addpath_type[afi][safi],
459 CHECK_FLAG(peer->cap, PEER_UPDGRP_CAP_FLAGS),
460 CHECK_FLAG(peer->af_cap[afi][safi],
461 PEER_UPDGRP_AF_CAP_FLAGS),
462 peer->v_routeadv, peer->change_local_as);
463 zlog_debug(
464 "%pBP Update Group Hash: max packet size: %u pmax_out: %u Peer Group: %s rmap out: %s",
465 peer, peer->max_packet_size, peer->pmax_out[afi][safi],
466 peer->group ? peer->group->name : "(NONE)",
467 ROUTE_MAP_OUT_NAME(filter) ? ROUTE_MAP_OUT_NAME(filter)
468 : "(NONE)");
469 zlog_debug(
470 "%pBP Update Group Hash: dlist out: %s plist out: %s aslist out: %s usmap out: %s advmap: %s",
471 peer,
472 DISTRIBUTE_OUT_NAME(filter)
473 ? DISTRIBUTE_OUT_NAME(filter)
474 : "(NONE)",
475 PREFIX_LIST_OUT_NAME(filter)
476 ? PREFIX_LIST_OUT_NAME(filter)
477 : "(NONE)",
478 FILTER_LIST_OUT_NAME(filter)
479 ? FILTER_LIST_OUT_NAME(filter)
480 : "(NONE)",
481 UNSUPPRESS_MAP_NAME(filter)
482 ? UNSUPPRESS_MAP_NAME(filter)
483 : "(NONE)",
484 ADVERTISE_MAP_NAME(filter) ? ADVERTISE_MAP_NAME(filter)
485 : "(NONE)");
486 zlog_debug(
487 "%pBP Update Group Hash: default rmap: %s shared network and afi active network: %d",
488 peer,
489 peer->default_rmap[afi][safi].name
490 ? peer->default_rmap[afi][safi].name
491 : "(NONE)",
492 peer->shared_network &&
493 peer_afi_active_nego(peer, AFI_IP6));
494 zlog_debug(
495 "%pBP Update Group Hash: Lonesoul: %d ORF prefix: %u ORF old: %u max prefix out: %ju",
496 peer, !!CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL),
497 CHECK_FLAG(peer->af_cap[afi][safi],
498 PEER_CAP_ORF_PREFIX_SM_RCV),
499 CHECK_FLAG(peer->af_cap[afi][safi],
500 PEER_CAP_ORF_PREFIX_SM_OLD_RCV),
501 (intmax_t)CHECK_FLAG(peer->af_flags[afi][safi],
502 PEER_FLAG_MAX_PREFIX_OUT));
503 zlog_debug("%pBP Update Group Hash key: %u", peer, key);
504 }
505 return key;
506 }
507
508 static bool updgrp_hash_cmp(const void *p1, const void *p2)
509 {
510 const struct update_group *grp1;
511 const struct update_group *grp2;
512 const struct peer *pe1;
513 const struct peer *pe2;
514 uint32_t flags1;
515 uint32_t flags2;
516 const struct bgp_filter *fl1;
517 const struct bgp_filter *fl2;
518 afi_t afi;
519 safi_t safi;
520
521 if (!p1 || !p2)
522 return false;
523
524 grp1 = p1;
525 grp2 = p2;
526 pe1 = grp1->conf;
527 pe2 = grp2->conf;
528 afi = grp1->afi;
529 safi = grp1->safi;
530 flags1 = pe1->af_flags[afi][safi];
531 flags2 = pe2->af_flags[afi][safi];
532 fl1 = &pe1->filter[afi][safi];
533 fl2 = &pe2->filter[afi][safi];
534
535 /* put EBGP and IBGP peers in different update groups */
536 if (pe1->sort != pe2->sort)
537 return false;
538
539 /* check peer flags */
540 if ((pe1->flags & PEER_UPDGRP_FLAGS)
541 != (pe2->flags & PEER_UPDGRP_FLAGS))
542 return false;
543
544 /* If there is 'local-as' configured, it should match. */
545 if (pe1->change_local_as != pe2->change_local_as)
546 return false;
547
548 if (pe1->pmax_out[afi][safi] != pe2->pmax_out[afi][safi])
549 return false;
550
551 /* flags like route reflector client */
552 if ((flags1 & PEER_UPDGRP_AF_FLAGS) != (flags2 & PEER_UPDGRP_AF_FLAGS))
553 return false;
554
555 if (pe1->addpath_type[afi][safi] != pe2->addpath_type[afi][safi])
556 return false;
557
558 if ((pe1->cap & PEER_UPDGRP_CAP_FLAGS)
559 != (pe2->cap & PEER_UPDGRP_CAP_FLAGS))
560 return false;
561
562 if ((pe1->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS)
563 != (pe2->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS))
564 return false;
565
566 if (pe1->v_routeadv != pe2->v_routeadv)
567 return false;
568
569 if (pe1->group != pe2->group)
570 return false;
571
572 /* Roles can affect filtering */
573 if (pe1->local_role != pe2->local_role)
574 return false;
575
576 /* route-map names should be the same */
577 if ((fl1->map[RMAP_OUT].name && !fl2->map[RMAP_OUT].name)
578 || (!fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name)
579 || (fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name
580 && strcmp(fl1->map[RMAP_OUT].name, fl2->map[RMAP_OUT].name)))
581 return false;
582
583 if ((fl1->dlist[FILTER_OUT].name && !fl2->dlist[FILTER_OUT].name)
584 || (!fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name)
585 || (fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name
586 && strcmp(fl1->dlist[FILTER_OUT].name,
587 fl2->dlist[FILTER_OUT].name)))
588 return false;
589
590 if ((fl1->plist[FILTER_OUT].name && !fl2->plist[FILTER_OUT].name)
591 || (!fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name)
592 || (fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name
593 && strcmp(fl1->plist[FILTER_OUT].name,
594 fl2->plist[FILTER_OUT].name)))
595 return false;
596
597 if ((fl1->aslist[FILTER_OUT].name && !fl2->aslist[FILTER_OUT].name)
598 || (!fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name)
599 || (fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name
600 && strcmp(fl1->aslist[FILTER_OUT].name,
601 fl2->aslist[FILTER_OUT].name)))
602 return false;
603
604 if ((fl1->usmap.name && !fl2->usmap.name)
605 || (!fl1->usmap.name && fl2->usmap.name)
606 || (fl1->usmap.name && fl2->usmap.name
607 && strcmp(fl1->usmap.name, fl2->usmap.name)))
608 return false;
609
610 if ((fl1->advmap.aname && !fl2->advmap.aname)
611 || (!fl1->advmap.aname && fl2->advmap.aname)
612 || (fl1->advmap.aname && fl2->advmap.aname
613 && strcmp(fl1->advmap.aname, fl2->advmap.aname)))
614 return false;
615
616 if (fl1->advmap.update_type != fl2->advmap.update_type)
617 return false;
618
619 if ((pe1->default_rmap[afi][safi].name
620 && !pe2->default_rmap[afi][safi].name)
621 || (!pe1->default_rmap[afi][safi].name
622 && pe2->default_rmap[afi][safi].name)
623 || (pe1->default_rmap[afi][safi].name
624 && pe2->default_rmap[afi][safi].name
625 && strcmp(pe1->default_rmap[afi][safi].name,
626 pe2->default_rmap[afi][safi].name)))
627 return false;
628
629 if ((afi == AFI_IP6) && (pe1->shared_network != pe2->shared_network))
630 return false;
631
632 if ((CHECK_FLAG(pe1->flags, PEER_FLAG_LONESOUL)
633 || CHECK_FLAG(pe1->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV)
634 || CHECK_FLAG(pe1->af_cap[afi][safi],
635 PEER_CAP_ORF_PREFIX_SM_OLD_RCV))
636 && !sockunion_same(&pe1->su, &pe2->su))
637 return false;
638
639 return true;
640 }
641
642 static void peer_lonesoul_or_not(struct peer *peer, int set)
643 {
644 /* no change in status? */
645 if (set == (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL) > 0))
646 return;
647
648 if (set)
649 SET_FLAG(peer->flags, PEER_FLAG_LONESOUL);
650 else
651 UNSET_FLAG(peer->flags, PEER_FLAG_LONESOUL);
652
653 update_group_adjust_peer_afs(peer);
654 }
655
656 /*
657 * subgroup_total_packets_enqueued
658 *
659 * Returns the total number of packets enqueued to a subgroup.
660 */
661 static unsigned int
662 subgroup_total_packets_enqueued(struct update_subgroup *subgrp)
663 {
664 struct bpacket *pkt;
665
666 pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp));
667
668 return pkt->ver - 1;
669 }
670
671 static int update_group_show_walkcb(struct update_group *updgrp, void *arg)
672 {
673 struct updwalk_context *ctx = arg;
674 struct vty *vty;
675 struct update_subgroup *subgrp;
676 struct peer_af *paf;
677 struct bgp_filter *filter;
678 struct peer *peer = UPDGRP_PEER(updgrp);
679 int match = 0;
680 json_object *json_updgrp = NULL;
681 json_object *json_subgrps = NULL;
682 json_object *json_subgrp = NULL;
683 json_object *json_time = NULL;
684 json_object *json_subgrp_time = NULL;
685 json_object *json_subgrp_event = NULL;
686 json_object *json_peers = NULL;
687 json_object *json_pkt_info = NULL;
688 time_t epoch_tbuf, tbuf;
689
690 if (!ctx)
691 return CMD_SUCCESS;
692
693 if (ctx->subgrp_id) {
694 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
695 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
696 continue;
697 else {
698 match = 1;
699 break;
700 }
701 }
702 } else {
703 match = 1;
704 }
705
706 if (!match) {
707 /* Since this routine is invoked from a walk, we cannot signal
708 * any */
709 /* error here, can only return. */
710 return CMD_SUCCESS;
711 }
712
713 vty = ctx->vty;
714
715 if (ctx->uj) {
716 json_updgrp = json_object_new_object();
717 /* Display json o/p */
718 tbuf = monotime(NULL);
719 tbuf -= updgrp->uptime;
720 epoch_tbuf = time(NULL) - tbuf;
721 json_time = json_object_new_object();
722 json_object_int_add(json_time, "epoch", epoch_tbuf);
723 json_object_string_add(json_time, "epochString",
724 ctime(&epoch_tbuf));
725 json_object_object_add(json_updgrp, "groupCreateTime",
726 json_time);
727 json_object_string_add(json_updgrp, "afi",
728 afi2str(updgrp->afi));
729 json_object_string_add(json_updgrp, "safi",
730 safi2str(updgrp->safi));
731 } else {
732 vty_out(vty, "Update-group %" PRIu64 ":\n", updgrp->id);
733 vty_out(vty, " Created: %s", timestamp_string(updgrp->uptime));
734 }
735
736 filter = &updgrp->conf->filter[updgrp->afi][updgrp->safi];
737 if (filter->map[RMAP_OUT].name) {
738 if (ctx->uj)
739 json_object_string_add(json_updgrp, "outRouteMap",
740 filter->map[RMAP_OUT].name);
741 else
742 vty_out(vty, " Outgoing route map: %s\n",
743 filter->map[RMAP_OUT].name);
744 }
745
746 if (ctx->uj)
747 json_object_int_add(json_updgrp, "minRouteAdvInt",
748 updgrp->conf->v_routeadv);
749 else
750 vty_out(vty, " MRAI value (seconds): %d\n",
751 updgrp->conf->v_routeadv);
752
753 if (updgrp->conf->change_local_as) {
754 if (ctx->uj) {
755 json_object_int_add(json_updgrp, "localAs",
756 updgrp->conf->change_local_as);
757 json_object_boolean_add(
758 json_updgrp, "noPrepend",
759 CHECK_FLAG(updgrp->conf->flags,
760 PEER_FLAG_LOCAL_AS_NO_PREPEND));
761 json_object_boolean_add(
762 json_updgrp, "replaceLocalAs",
763 CHECK_FLAG(updgrp->conf->flags,
764 PEER_FLAG_LOCAL_AS_REPLACE_AS));
765 } else {
766 vty_out(vty, " Local AS %u%s%s\n",
767 updgrp->conf->change_local_as,
768 CHECK_FLAG(updgrp->conf->flags,
769 PEER_FLAG_LOCAL_AS_NO_PREPEND)
770 ? " no-prepend"
771 : "",
772 CHECK_FLAG(updgrp->conf->flags,
773 PEER_FLAG_LOCAL_AS_REPLACE_AS)
774 ? " replace-as"
775 : "");
776 }
777 }
778 json_subgrps = json_object_new_array();
779 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
780 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
781 continue;
782 if (ctx->uj) {
783 json_subgrp = json_object_new_object();
784 json_object_int_add(json_subgrp, "subGroupId",
785 subgrp->id);
786 tbuf = monotime(NULL);
787 tbuf -= subgrp->uptime;
788 epoch_tbuf = time(NULL) - tbuf;
789 json_subgrp_time = json_object_new_object();
790 json_object_int_add(json_subgrp_time, "epoch",
791 epoch_tbuf);
792 json_object_string_add(json_subgrp_time, "epochString",
793 ctime(&epoch_tbuf));
794 json_object_object_add(json_subgrp, "groupCreateTime",
795 json_subgrp_time);
796 } else {
797 vty_out(vty, "\n");
798 vty_out(vty, " Update-subgroup %" PRIu64 ":\n",
799 subgrp->id);
800 vty_out(vty, " Created: %s",
801 timestamp_string(subgrp->uptime));
802 }
803
804 if (subgrp->split_from.update_group_id
805 || subgrp->split_from.subgroup_id) {
806 if (ctx->uj) {
807 json_object_int_add(
808 json_subgrp, "splitGroupId",
809 subgrp->split_from.update_group_id);
810 json_object_int_add(
811 json_subgrp, "splitSubGroupId",
812 subgrp->split_from.subgroup_id);
813 } else {
814 vty_out(vty,
815 " Split from group id: %" PRIu64
816 "\n",
817 subgrp->split_from.update_group_id);
818 vty_out(vty,
819 " Split from subgroup id: %" PRIu64
820 "\n",
821 subgrp->split_from.subgroup_id);
822 }
823 }
824
825 if (ctx->uj) {
826 json_subgrp_event = json_object_new_object();
827 json_object_int_add(json_subgrp_event, "joinEvents",
828 subgrp->join_events);
829 json_object_int_add(json_subgrp_event, "pruneEvents",
830 subgrp->prune_events);
831 json_object_int_add(json_subgrp_event, "mergeEvents",
832 subgrp->merge_events);
833 json_object_int_add(json_subgrp_event, "splitEvents",
834 subgrp->split_events);
835 json_object_int_add(json_subgrp_event, "switchEvents",
836 subgrp->updgrp_switch_events);
837 json_object_int_add(json_subgrp_event,
838 "peerRefreshEvents",
839 subgrp->peer_refreshes_combined);
840 json_object_int_add(json_subgrp_event,
841 "mergeCheckEvents",
842 subgrp->merge_checks_triggered);
843 json_object_object_add(json_subgrp, "statistics",
844 json_subgrp_event);
845 json_object_int_add(json_subgrp, "coalesceTime",
846 (UPDGRP_INST(subgrp->update_group))
847 ->coalesce_time);
848 json_object_int_add(json_subgrp, "version",
849 subgrp->version);
850 json_pkt_info = json_object_new_object();
851 json_object_int_add(
852 json_pkt_info, "qeueueLen",
853 bpacket_queue_length(SUBGRP_PKTQ(subgrp)));
854 json_object_int_add(
855 json_pkt_info, "queuedTotal",
856 subgroup_total_packets_enqueued(subgrp));
857 json_object_int_add(
858 json_pkt_info, "queueHwmLen",
859 bpacket_queue_hwm_length(SUBGRP_PKTQ(subgrp)));
860 json_object_int_add(
861 json_pkt_info, "totalEnqueued",
862 subgroup_total_packets_enqueued(subgrp));
863 json_object_object_add(json_subgrp, "packetQueueInfo",
864 json_pkt_info);
865 json_object_int_add(json_subgrp, "adjListCount",
866 subgrp->adj_count);
867 json_object_boolean_add(
868 json_subgrp, "needsRefresh",
869 CHECK_FLAG(subgrp->flags,
870 SUBGRP_FLAG_NEEDS_REFRESH));
871 } else {
872 vty_out(vty, " Join events: %u\n",
873 subgrp->join_events);
874 vty_out(vty, " Prune events: %u\n",
875 subgrp->prune_events);
876 vty_out(vty, " Merge events: %u\n",
877 subgrp->merge_events);
878 vty_out(vty, " Split events: %u\n",
879 subgrp->split_events);
880 vty_out(vty, " Update group switch events: %u\n",
881 subgrp->updgrp_switch_events);
882 vty_out(vty, " Peer refreshes combined: %u\n",
883 subgrp->peer_refreshes_combined);
884 vty_out(vty, " Merge checks triggered: %u\n",
885 subgrp->merge_checks_triggered);
886 vty_out(vty, " Coalesce Time: %u%s\n",
887 (UPDGRP_INST(subgrp->update_group))
888 ->coalesce_time,
889 subgrp->t_coalesce ? "(Running)" : "");
890 vty_out(vty, " Version: %" PRIu64 "\n",
891 subgrp->version);
892 vty_out(vty, " Packet queue length: %d\n",
893 bpacket_queue_length(SUBGRP_PKTQ(subgrp)));
894 vty_out(vty, " Total packets enqueued: %u\n",
895 subgroup_total_packets_enqueued(subgrp));
896 vty_out(vty, " Packet queue high watermark: %d\n",
897 bpacket_queue_hwm_length(SUBGRP_PKTQ(subgrp)));
898 vty_out(vty, " Adj-out list count: %u\n",
899 subgrp->adj_count);
900 vty_out(vty, " Advertise list: %s\n",
901 advertise_list_is_empty(subgrp) ? "empty"
902 : "not empty");
903 vty_out(vty, " Flags: %s\n",
904 CHECK_FLAG(subgrp->flags,
905 SUBGRP_FLAG_NEEDS_REFRESH)
906 ? "R"
907 : "");
908 if (peer)
909 vty_out(vty, " Max packet size: %d\n",
910 peer->max_packet_size);
911 }
912 if (subgrp->peer_count > 0) {
913 if (ctx->uj) {
914 json_peers = json_object_new_array();
915 SUBGRP_FOREACH_PEER (subgrp, paf) {
916 json_object *peer =
917 json_object_new_string(
918 paf->peer->host);
919 json_object_array_add(json_peers, peer);
920 }
921 json_object_object_add(json_subgrp, "peers",
922 json_peers);
923 } else {
924 vty_out(vty, " Peers:\n");
925 SUBGRP_FOREACH_PEER (subgrp, paf)
926 vty_out(vty, " - %s\n",
927 paf->peer->host);
928 }
929 }
930
931 if (ctx->uj)
932 json_object_array_add(json_subgrps, json_subgrp);
933 }
934
935 if (ctx->uj) {
936 json_object_object_add(json_updgrp, "subGroup", json_subgrps);
937 json_object_object_addf(ctx->json_updategrps, json_updgrp,
938 "%" PRIu64, updgrp->id);
939 }
940
941 return UPDWALK_CONTINUE;
942 }
943
944 /*
945 * Helper function to show the packet queue for each subgroup of update group.
946 * Will be constrained to a particular subgroup id if id !=0
947 */
948 static int updgrp_show_packet_queue_walkcb(struct update_group *updgrp,
949 void *arg)
950 {
951 struct updwalk_context *ctx = arg;
952 struct update_subgroup *subgrp;
953 struct vty *vty;
954
955 vty = ctx->vty;
956 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
957 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
958 continue;
959 vty_out(vty, "update group %" PRIu64 ", subgroup %" PRIu64 "\n",
960 updgrp->id, subgrp->id);
961 bpacket_queue_show_vty(SUBGRP_PKTQ(subgrp), vty);
962 }
963 return UPDWALK_CONTINUE;
964 }
965
966 /*
967 * Show the packet queue for each subgroup of update group. Will be
968 * constrained to a particular subgroup id if id !=0
969 */
970 void update_group_show_packet_queue(struct bgp *bgp, afi_t afi, safi_t safi,
971 struct vty *vty, uint64_t id)
972 {
973 struct updwalk_context ctx;
974
975 memset(&ctx, 0, sizeof(ctx));
976 ctx.vty = vty;
977 ctx.subgrp_id = id;
978 ctx.flags = 0;
979 update_group_af_walk(bgp, afi, safi, updgrp_show_packet_queue_walkcb,
980 &ctx);
981 }
982
983 static struct update_group *update_group_find(struct peer_af *paf)
984 {
985 struct update_group *updgrp;
986 struct update_group tmp;
987 struct peer tmp_conf;
988
989 if (!peer_established(PAF_PEER(paf)))
990 return NULL;
991
992 memset(&tmp, 0, sizeof(tmp));
993 memset(&tmp_conf, 0, sizeof(tmp_conf));
994 tmp.conf = &tmp_conf;
995 peer2_updgrp_copy(&tmp, paf);
996
997 updgrp = hash_lookup(paf->peer->bgp->update_groups[paf->afid], &tmp);
998 conf_release(&tmp_conf, paf->afi, paf->safi);
999 return updgrp;
1000 }
1001
1002 static struct update_group *update_group_create(struct peer_af *paf)
1003 {
1004 struct update_group *updgrp;
1005 struct update_group tmp;
1006 struct peer tmp_conf;
1007
1008 memset(&tmp, 0, sizeof(tmp));
1009 memset(&tmp_conf, 0, sizeof(tmp_conf));
1010 tmp.conf = &tmp_conf;
1011 peer2_updgrp_copy(&tmp, paf);
1012
1013 updgrp = hash_get(paf->peer->bgp->update_groups[paf->afid], &tmp,
1014 updgrp_hash_alloc);
1015 update_group_checkin(updgrp);
1016
1017 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1018 zlog_debug("create update group %" PRIu64, updgrp->id);
1019
1020 UPDGRP_GLOBAL_STAT(updgrp, updgrps_created) += 1;
1021
1022 conf_release(&tmp_conf, paf->afi, paf->safi);
1023 return updgrp;
1024 }
1025
1026 static void update_group_delete(struct update_group *updgrp)
1027 {
1028 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1029 zlog_debug("delete update group %" PRIu64, updgrp->id);
1030
1031 UPDGRP_GLOBAL_STAT(updgrp, updgrps_deleted) += 1;
1032
1033 hash_release(updgrp->bgp->update_groups[updgrp->afid], updgrp);
1034 conf_release(updgrp->conf, updgrp->afi, updgrp->safi);
1035
1036 XFREE(MTYPE_BGP_PEER_HOST, updgrp->conf->host);
1037
1038 XFREE(MTYPE_BGP_PEER_IFNAME, updgrp->conf->ifname);
1039
1040 XFREE(MTYPE_BGP_PEER, updgrp->conf);
1041 XFREE(MTYPE_BGP_UPDGRP, updgrp);
1042 }
1043
1044 static void update_group_add_subgroup(struct update_group *updgrp,
1045 struct update_subgroup *subgrp)
1046 {
1047 if (!updgrp || !subgrp)
1048 return;
1049
1050 LIST_INSERT_HEAD(&(updgrp->subgrps), subgrp, updgrp_train);
1051 subgrp->update_group = updgrp;
1052 }
1053
1054 static void update_group_remove_subgroup(struct update_group *updgrp,
1055 struct update_subgroup *subgrp)
1056 {
1057 if (!updgrp || !subgrp)
1058 return;
1059
1060 LIST_REMOVE(subgrp, updgrp_train);
1061 subgrp->update_group = NULL;
1062 if (LIST_EMPTY(&(updgrp->subgrps)))
1063 update_group_delete(updgrp);
1064 }
1065
1066 static struct update_subgroup *
1067 update_subgroup_create(struct update_group *updgrp)
1068 {
1069 struct update_subgroup *subgrp;
1070
1071 subgrp = XCALLOC(MTYPE_BGP_UPD_SUBGRP, sizeof(struct update_subgroup));
1072 update_subgroup_checkin(subgrp, updgrp);
1073 subgrp->v_coalesce = (UPDGRP_INST(updgrp))->coalesce_time;
1074 sync_init(subgrp, updgrp);
1075 bpacket_queue_init(SUBGRP_PKTQ(subgrp));
1076 bpacket_queue_add(SUBGRP_PKTQ(subgrp), NULL, NULL);
1077 TAILQ_INIT(&(subgrp->adjq));
1078 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1079 zlog_debug("create subgroup u%" PRIu64 ":s%" PRIu64, updgrp->id,
1080 subgrp->id);
1081
1082 update_group_add_subgroup(updgrp, subgrp);
1083
1084 UPDGRP_INCR_STAT(updgrp, subgrps_created);
1085
1086 return subgrp;
1087 }
1088
1089 static void update_subgroup_delete(struct update_subgroup *subgrp)
1090 {
1091 if (!subgrp)
1092 return;
1093
1094 if (subgrp->update_group)
1095 UPDGRP_INCR_STAT(subgrp->update_group, subgrps_deleted);
1096
1097 THREAD_OFF(subgrp->t_merge_check);
1098 THREAD_OFF(subgrp->t_coalesce);
1099
1100 bpacket_queue_cleanup(SUBGRP_PKTQ(subgrp));
1101 subgroup_clear_table(subgrp);
1102
1103 sync_delete(subgrp);
1104
1105 if (BGP_DEBUG(update_groups, UPDATE_GROUPS) && subgrp->update_group)
1106 zlog_debug("delete subgroup u%" PRIu64 ":s%" PRIu64,
1107 subgrp->update_group->id, subgrp->id);
1108
1109 update_group_remove_subgroup(subgrp->update_group, subgrp);
1110
1111 XFREE(MTYPE_BGP_UPD_SUBGRP, subgrp);
1112 }
1113
1114 void update_subgroup_inherit_info(struct update_subgroup *to,
1115 struct update_subgroup *from)
1116 {
1117 if (!to || !from)
1118 return;
1119
1120 to->sflags = from->sflags;
1121 }
1122
1123 /*
1124 * update_subgroup_check_delete
1125 *
1126 * Delete a subgroup if it is ready to be deleted.
1127 *
1128 * Returns true if the subgroup was deleted.
1129 */
1130 static bool update_subgroup_check_delete(struct update_subgroup *subgrp)
1131 {
1132 if (!subgrp)
1133 return false;
1134
1135 if (!LIST_EMPTY(&(subgrp->peers)))
1136 return false;
1137
1138 update_subgroup_delete(subgrp);
1139
1140 return true;
1141 }
1142
1143 /*
1144 * update_subgroup_add_peer
1145 *
1146 * @param send_enqueued_packets If true all currently enqueued packets will
1147 * also be sent to the peer.
1148 */
1149 static void update_subgroup_add_peer(struct update_subgroup *subgrp,
1150 struct peer_af *paf,
1151 int send_enqueued_pkts)
1152 {
1153 struct bpacket *pkt;
1154
1155 if (!subgrp || !paf)
1156 return;
1157
1158 LIST_INSERT_HEAD(&(subgrp->peers), paf, subgrp_train);
1159 paf->subgroup = subgrp;
1160 subgrp->peer_count++;
1161
1162 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
1163 UPDGRP_PEER_DBG_EN(subgrp->update_group);
1164 }
1165
1166 SUBGRP_INCR_STAT(subgrp, join_events);
1167
1168 if (send_enqueued_pkts) {
1169 pkt = bpacket_queue_first(SUBGRP_PKTQ(subgrp));
1170 } else {
1171
1172 /*
1173 * Hang the peer off of the last, placeholder, packet in the
1174 * queue. This means it won't see any of the packets that are
1175 * currently the queue.
1176 */
1177 pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp));
1178 assert(pkt->buffer == NULL);
1179 }
1180
1181 bpacket_add_peer(pkt, paf);
1182
1183 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1184 zlog_debug("peer %s added to subgroup s%" PRIu64,
1185 paf->peer->host, subgrp->id);
1186 }
1187
1188 /*
1189 * update_subgroup_remove_peer_internal
1190 *
1191 * Internal function that removes a peer from a subgroup, but does not
1192 * delete the subgroup. A call to this function must almost always be
1193 * followed by a call to update_subgroup_check_delete().
1194 *
1195 * @see update_subgroup_remove_peer
1196 */
1197 static void update_subgroup_remove_peer_internal(struct update_subgroup *subgrp,
1198 struct peer_af *paf)
1199 {
1200 assert(subgrp && paf && subgrp->update_group);
1201
1202 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
1203 UPDGRP_PEER_DBG_DIS(subgrp->update_group);
1204 }
1205
1206 bpacket_queue_remove_peer(paf);
1207 LIST_REMOVE(paf, subgrp_train);
1208 paf->subgroup = NULL;
1209 subgrp->peer_count--;
1210
1211 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1212 zlog_debug("peer %s deleted from subgroup s%"
1213 PRIu64 " peer cnt %d",
1214 paf->peer->host, subgrp->id, subgrp->peer_count);
1215 SUBGRP_INCR_STAT(subgrp, prune_events);
1216 }
1217
1218 /*
1219 * update_subgroup_remove_peer
1220 */
1221 void update_subgroup_remove_peer(struct update_subgroup *subgrp,
1222 struct peer_af *paf)
1223 {
1224 if (!subgrp || !paf)
1225 return;
1226
1227 update_subgroup_remove_peer_internal(subgrp, paf);
1228
1229 if (update_subgroup_check_delete(subgrp))
1230 return;
1231
1232 /*
1233 * The deletion of the peer may have caused some packets to be
1234 * deleted from the subgroup packet queue. Check if the subgroup can
1235 * be merged now.
1236 */
1237 update_subgroup_check_merge(subgrp, "removed peer from subgroup");
1238 }
1239
1240 static struct update_subgroup *update_subgroup_find(struct update_group *updgrp,
1241 struct peer_af *paf)
1242 {
1243 struct update_subgroup *subgrp = NULL;
1244 uint64_t version;
1245
1246 if (paf->subgroup) {
1247 assert(0);
1248 return NULL;
1249 } else
1250 version = 0;
1251
1252 if (!peer_established(PAF_PEER(paf)))
1253 return NULL;
1254
1255 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
1256 if (subgrp->version != version
1257 || CHECK_FLAG(subgrp->sflags,
1258 SUBGRP_STATUS_DEFAULT_ORIGINATE))
1259 continue;
1260
1261 /*
1262 * The version number is not meaningful on a subgroup that needs
1263 * a refresh.
1264 */
1265 if (update_subgroup_needs_refresh(subgrp))
1266 continue;
1267
1268 break;
1269 }
1270
1271 return subgrp;
1272 }
1273
1274 /*
1275 * update_subgroup_ready_for_merge
1276 *
1277 * Returns true if this subgroup is in a state that allows it to be
1278 * merged into another subgroup.
1279 */
1280 static bool update_subgroup_ready_for_merge(struct update_subgroup *subgrp)
1281 {
1282
1283 /*
1284 * Not ready if there are any encoded packets waiting to be written
1285 * out to peers.
1286 */
1287 if (!bpacket_queue_is_empty(SUBGRP_PKTQ(subgrp)))
1288 return false;
1289
1290 /*
1291 * Not ready if there enqueued updates waiting to be encoded.
1292 */
1293 if (!advertise_list_is_empty(subgrp))
1294 return false;
1295
1296 /*
1297 * Don't attempt to merge a subgroup that needs a refresh. For one,
1298 * we can't determine if the adj_out of such a group matches that of
1299 * another group.
1300 */
1301 if (update_subgroup_needs_refresh(subgrp))
1302 return false;
1303
1304 return true;
1305 }
1306
1307 /*
1308 * update_subgrp_can_merge_into
1309 *
1310 * Returns true if the first subgroup can merge into the second
1311 * subgroup.
1312 */
1313 static int update_subgroup_can_merge_into(struct update_subgroup *subgrp,
1314 struct update_subgroup *target)
1315 {
1316
1317 if (subgrp == target)
1318 return 0;
1319
1320 /*
1321 * Both must have processed the BRIB to the same point in order to
1322 * be merged.
1323 */
1324 if (subgrp->version != target->version)
1325 return 0;
1326
1327 if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE)
1328 != CHECK_FLAG(target->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
1329 return 0;
1330
1331 if (subgrp->adj_count != target->adj_count)
1332 return 0;
1333
1334 return update_subgroup_ready_for_merge(target);
1335 }
1336
1337 /*
1338 * update_subgroup_merge
1339 *
1340 * Merge the first subgroup into the second one.
1341 */
1342 static void update_subgroup_merge(struct update_subgroup *subgrp,
1343 struct update_subgroup *target,
1344 const char *reason)
1345 {
1346 struct peer_af *paf;
1347 int result;
1348 int peer_count;
1349
1350 assert(subgrp->adj_count == target->adj_count);
1351
1352 peer_count = subgrp->peer_count;
1353
1354 while (1) {
1355 paf = LIST_FIRST(&subgrp->peers);
1356 if (!paf)
1357 break;
1358
1359 update_subgroup_remove_peer_internal(subgrp, paf);
1360
1361 /*
1362 * Add the peer to the target subgroup, while making sure that
1363 * any currently enqueued packets won't be sent to it. Enqueued
1364 * packets could, for example, result in an unnecessary withdraw
1365 * followed by an advertise.
1366 */
1367 update_subgroup_add_peer(target, paf, 0);
1368 }
1369
1370 SUBGRP_INCR_STAT(target, merge_events);
1371
1372 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1373 zlog_debug("u%" PRIu64 ":s%" PRIu64" (%d peers) merged into u%" PRIu64 ":s%" PRIu64", trigger: %s",
1374 subgrp->update_group->id, subgrp->id, peer_count,
1375 target->update_group->id, target->id,
1376 reason ? reason : "unknown");
1377
1378 result = update_subgroup_check_delete(subgrp);
1379 assert(result);
1380 }
1381
1382 /*
1383 * update_subgroup_check_merge
1384 *
1385 * Merge this subgroup into another subgroup if possible.
1386 *
1387 * Returns true if the subgroup has been merged. The subgroup pointer
1388 * should not be accessed in this case.
1389 */
1390 bool update_subgroup_check_merge(struct update_subgroup *subgrp,
1391 const char *reason)
1392 {
1393 struct update_subgroup *target;
1394
1395 if (!update_subgroup_ready_for_merge(subgrp))
1396 return false;
1397
1398 /*
1399 * Look for a subgroup to merge into.
1400 */
1401 UPDGRP_FOREACH_SUBGRP (subgrp->update_group, target) {
1402 if (update_subgroup_can_merge_into(subgrp, target))
1403 break;
1404 }
1405
1406 if (!target)
1407 return false;
1408
1409 update_subgroup_merge(subgrp, target, reason);
1410 return true;
1411 }
1412
1413 /*
1414 * update_subgroup_merge_check_thread_cb
1415 */
1416 static void update_subgroup_merge_check_thread_cb(struct thread *thread)
1417 {
1418 struct update_subgroup *subgrp;
1419
1420 subgrp = THREAD_ARG(thread);
1421
1422 subgrp->t_merge_check = NULL;
1423
1424 update_subgroup_check_merge(subgrp, "triggered merge check");
1425 }
1426
1427 /*
1428 * update_subgroup_trigger_merge_check
1429 *
1430 * Triggers a call to update_subgroup_check_merge() on a clean context.
1431 *
1432 * @param force If true, the merge check will be triggered even if the
1433 * subgroup doesn't currently look ready for a merge.
1434 *
1435 * Returns true if a merge check will be performed shortly.
1436 */
1437 bool update_subgroup_trigger_merge_check(struct update_subgroup *subgrp,
1438 int force)
1439 {
1440 if (subgrp->t_merge_check)
1441 return true;
1442
1443 if (!force && !update_subgroup_ready_for_merge(subgrp))
1444 return false;
1445
1446 subgrp->t_merge_check = NULL;
1447 thread_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb,
1448 subgrp, 0, &subgrp->t_merge_check);
1449
1450 SUBGRP_INCR_STAT(subgrp, merge_checks_triggered);
1451
1452 return true;
1453 }
1454
1455 /*
1456 * update_subgroup_copy_adj_out
1457 *
1458 * Helper function that clones the adj out (state about advertised
1459 * routes) from one subgroup to another. It assumes that the adj out
1460 * of the target subgroup is empty.
1461 */
1462 static void update_subgroup_copy_adj_out(struct update_subgroup *source,
1463 struct update_subgroup *dest)
1464 {
1465 struct bgp_adj_out *aout, *aout_copy;
1466
1467 SUBGRP_FOREACH_ADJ (source, aout) {
1468 /*
1469 * Copy the adj out.
1470 */
1471 aout_copy = bgp_adj_out_alloc(dest, aout->dest,
1472 aout->addpath_tx_id);
1473 aout_copy->attr =
1474 aout->attr ? bgp_attr_intern(aout->attr) : NULL;
1475 }
1476
1477 dest->scount = source->scount;
1478 }
1479
1480 /*
1481 * update_subgroup_copy_packets
1482 *
1483 * Copy packets after and including the given packet to the subgroup
1484 * 'dest'.
1485 *
1486 * Returns the number of packets copied.
1487 */
1488 static int update_subgroup_copy_packets(struct update_subgroup *dest,
1489 struct bpacket *pkt)
1490 {
1491 int count;
1492
1493 count = 0;
1494 while (pkt && pkt->buffer) {
1495 bpacket_queue_add(SUBGRP_PKTQ(dest), stream_dup(pkt->buffer),
1496 &pkt->arr);
1497 count++;
1498 pkt = bpacket_next(pkt);
1499 }
1500
1501 return count;
1502 }
1503
1504 static bool updgrp_prefix_list_update(struct update_group *updgrp,
1505 const char *name)
1506 {
1507 struct peer *peer;
1508 struct bgp_filter *filter;
1509
1510 peer = UPDGRP_PEER(updgrp);
1511 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1512
1513 if (PREFIX_LIST_OUT_NAME(filter)
1514 && (strcmp(name, PREFIX_LIST_OUT_NAME(filter)) == 0)) {
1515 PREFIX_LIST_OUT(filter) = prefix_list_lookup(
1516 UPDGRP_AFI(updgrp), PREFIX_LIST_OUT_NAME(filter));
1517 return true;
1518 }
1519 return false;
1520 }
1521
1522 static bool updgrp_filter_list_update(struct update_group *updgrp,
1523 const char *name)
1524 {
1525 struct peer *peer;
1526 struct bgp_filter *filter;
1527
1528 peer = UPDGRP_PEER(updgrp);
1529 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1530
1531 if (FILTER_LIST_OUT_NAME(filter)
1532 && (strcmp(name, FILTER_LIST_OUT_NAME(filter)) == 0)) {
1533 FILTER_LIST_OUT(filter) =
1534 as_list_lookup(FILTER_LIST_OUT_NAME(filter));
1535 return true;
1536 }
1537 return false;
1538 }
1539
1540 static bool updgrp_distribute_list_update(struct update_group *updgrp,
1541 const char *name)
1542 {
1543 struct peer *peer;
1544 struct bgp_filter *filter;
1545
1546 peer = UPDGRP_PEER(updgrp);
1547 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1548
1549 if (DISTRIBUTE_OUT_NAME(filter)
1550 && (strcmp(name, DISTRIBUTE_OUT_NAME(filter)) == 0)) {
1551 DISTRIBUTE_OUT(filter) = access_list_lookup(
1552 UPDGRP_AFI(updgrp), DISTRIBUTE_OUT_NAME(filter));
1553 return true;
1554 }
1555 return false;
1556 }
1557
1558 static int updgrp_route_map_update(struct update_group *updgrp,
1559 const char *name, int *def_rmap_changed)
1560 {
1561 struct peer *peer;
1562 struct bgp_filter *filter;
1563 int changed = 0;
1564 afi_t afi;
1565 safi_t safi;
1566
1567 peer = UPDGRP_PEER(updgrp);
1568 afi = UPDGRP_AFI(updgrp);
1569 safi = UPDGRP_SAFI(updgrp);
1570 filter = &peer->filter[afi][safi];
1571
1572 if (ROUTE_MAP_OUT_NAME(filter)
1573 && (strcmp(name, ROUTE_MAP_OUT_NAME(filter)) == 0)) {
1574 ROUTE_MAP_OUT(filter) = route_map_lookup_by_name(name);
1575
1576 changed = 1;
1577 }
1578
1579 if (UNSUPPRESS_MAP_NAME(filter)
1580 && (strcmp(name, UNSUPPRESS_MAP_NAME(filter)) == 0)) {
1581 UNSUPPRESS_MAP(filter) = route_map_lookup_by_name(name);
1582 changed = 1;
1583 }
1584
1585 /* process default-originate route-map */
1586 if (peer->default_rmap[afi][safi].name
1587 && (strcmp(name, peer->default_rmap[afi][safi].name) == 0)) {
1588 peer->default_rmap[afi][safi].map =
1589 route_map_lookup_by_name(name);
1590 if (def_rmap_changed)
1591 *def_rmap_changed = 1;
1592 }
1593 return changed;
1594 }
1595
1596 /*
1597 * hash iteration callback function to process a policy change for an
1598 * update group. Check if the changed policy matches the updgrp's
1599 * outbound route-map or unsuppress-map or default-originate map or
1600 * filter-list or prefix-list or distribute-list.
1601 * Trigger update generation accordingly.
1602 */
1603 static int updgrp_policy_update_walkcb(struct update_group *updgrp, void *arg)
1604 {
1605 struct updwalk_context *ctx = arg;
1606 struct update_subgroup *subgrp;
1607 int changed = 0;
1608 int def_changed = 0;
1609
1610 if (!updgrp || !ctx || !ctx->policy_name)
1611 return UPDWALK_CONTINUE;
1612
1613 switch (ctx->policy_type) {
1614 case BGP_POLICY_ROUTE_MAP:
1615 changed = updgrp_route_map_update(updgrp, ctx->policy_name,
1616 &def_changed);
1617 break;
1618 case BGP_POLICY_FILTER_LIST:
1619 changed = updgrp_filter_list_update(updgrp, ctx->policy_name);
1620 break;
1621 case BGP_POLICY_PREFIX_LIST:
1622 changed = updgrp_prefix_list_update(updgrp, ctx->policy_name);
1623 break;
1624 case BGP_POLICY_DISTRIBUTE_LIST:
1625 changed =
1626 updgrp_distribute_list_update(updgrp, ctx->policy_name);
1627 break;
1628 default:
1629 break;
1630 }
1631
1632 /* If not doing route update, return after updating "config" */
1633 if (!ctx->policy_route_update)
1634 return UPDWALK_CONTINUE;
1635
1636 /* If nothing has changed, return after updating "config" */
1637 if (!changed && !def_changed)
1638 return UPDWALK_CONTINUE;
1639
1640 /*
1641 * If something has changed, at the beginning of a route-map
1642 * modification
1643 * event, mark each subgroup's needs-refresh bit. For one, it signals to
1644 * whoever that the subgroup needs a refresh. Second, it prevents
1645 * premature
1646 * merge of this subgroup with another before a complete (outbound)
1647 * refresh.
1648 */
1649 if (ctx->policy_event_start_flag) {
1650 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
1651 update_subgroup_set_needs_refresh(subgrp, 1);
1652 }
1653 return UPDWALK_CONTINUE;
1654 }
1655
1656 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
1657 /* Avoid supressing duplicate routes later
1658 * when processing in subgroup_announce_table().
1659 */
1660 SET_FLAG(subgrp->sflags, SUBGRP_STATUS_FORCE_UPDATES);
1661
1662 if (changed) {
1663 if (bgp_debug_update(NULL, NULL, updgrp, 0))
1664 zlog_debug(
1665 "u%" PRIu64 ":s%" PRIu64" announcing routes upon policy %s (type %d) change",
1666 updgrp->id, subgrp->id,
1667 ctx->policy_name, ctx->policy_type);
1668 subgroup_announce_route(subgrp);
1669 }
1670 if (def_changed) {
1671 if (bgp_debug_update(NULL, NULL, updgrp, 0))
1672 zlog_debug(
1673 "u%" PRIu64 ":s%" PRIu64" announcing default upon default routemap %s change",
1674 updgrp->id, subgrp->id,
1675 ctx->policy_name);
1676 if (route_map_lookup_by_name(ctx->policy_name)) {
1677 /*
1678 * When there is change in routemap, this flow
1679 * is triggered. the routemap is still present
1680 * in lib, hence its a update flow. The flag
1681 * needs to be unset.
1682 */
1683 UNSET_FLAG(subgrp->sflags,
1684 SUBGRP_STATUS_DEFAULT_ORIGINATE);
1685 subgroup_default_originate(subgrp, 0);
1686 } else {
1687 /*
1688 * This is a explicit withdraw, since the
1689 * routemap is not present in routemap lib. need
1690 * to pass 1 for withdraw arg.
1691 */
1692 subgroup_default_originate(subgrp, 1);
1693 }
1694 }
1695 update_subgroup_set_needs_refresh(subgrp, 0);
1696 }
1697 return UPDWALK_CONTINUE;
1698 }
1699
1700 static int update_group_walkcb(struct hash_bucket *bucket, void *arg)
1701 {
1702 struct update_group *updgrp = bucket->data;
1703 struct updwalk_context *wctx = arg;
1704 int ret = (*wctx->cb)(updgrp, wctx->context);
1705 return ret;
1706 }
1707
1708 static int update_group_periodic_merge_walkcb(struct update_group *updgrp,
1709 void *arg)
1710 {
1711 struct update_subgroup *subgrp;
1712 struct update_subgroup *tmp_subgrp;
1713 const char *reason = arg;
1714
1715 UPDGRP_FOREACH_SUBGRP_SAFE (updgrp, subgrp, tmp_subgrp)
1716 update_subgroup_check_merge(subgrp, reason);
1717 return UPDWALK_CONTINUE;
1718 }
1719
1720 /********************
1721 * PUBLIC FUNCTIONS
1722 ********************/
1723
1724 /*
1725 * trigger function when a policy (route-map/filter-list/prefix-list/
1726 * distribute-list etc.) content changes. Go through all the
1727 * update groups and process the change.
1728 *
1729 * bgp: the bgp instance
1730 * ptype: the type of policy that got modified, see bgpd.h
1731 * pname: name of the policy
1732 * route_update: flag to control if an automatic update generation should
1733 * occur
1734 * start_event: flag that indicates if it's the beginning of the change.
1735 * Esp. when the user is changing the content interactively
1736 * over multiple statements. Useful to set dirty flag on
1737 * update groups.
1738 */
1739 void update_group_policy_update(struct bgp *bgp, enum bgp_policy_type ptype,
1740 const char *pname, bool route_update,
1741 int start_event)
1742 {
1743 struct updwalk_context ctx;
1744
1745 memset(&ctx, 0, sizeof(ctx));
1746 ctx.policy_type = ptype;
1747 ctx.policy_name = pname;
1748 ctx.policy_route_update = route_update;
1749 ctx.policy_event_start_flag = start_event;
1750 ctx.flags = 0;
1751
1752 update_group_walk(bgp, updgrp_policy_update_walkcb, &ctx);
1753 }
1754
1755 /*
1756 * update_subgroup_split_peer
1757 *
1758 * Ensure that the given peer is in a subgroup of its own in the
1759 * specified update group.
1760 */
1761 void update_subgroup_split_peer(struct peer_af *paf,
1762 struct update_group *updgrp)
1763 {
1764 struct update_subgroup *old_subgrp, *subgrp;
1765 uint64_t old_id;
1766
1767
1768 old_subgrp = paf->subgroup;
1769
1770 if (!updgrp)
1771 updgrp = old_subgrp->update_group;
1772
1773 /*
1774 * If the peer is alone in its subgroup, reuse the existing
1775 * subgroup.
1776 */
1777 if (old_subgrp->peer_count == 1) {
1778 if (updgrp == old_subgrp->update_group)
1779 return;
1780
1781 subgrp = old_subgrp;
1782 old_id = old_subgrp->update_group->id;
1783
1784 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
1785 UPDGRP_PEER_DBG_DIS(old_subgrp->update_group);
1786 }
1787
1788 update_group_remove_subgroup(old_subgrp->update_group,
1789 old_subgrp);
1790 update_group_add_subgroup(updgrp, subgrp);
1791
1792 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
1793 UPDGRP_PEER_DBG_EN(updgrp);
1794 }
1795 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1796 zlog_debug("u%" PRIu64 ":s%" PRIu64" peer %s moved to u%" PRIu64 ":s%" PRIu64,
1797 old_id, subgrp->id, paf->peer->host,
1798 updgrp->id, subgrp->id);
1799
1800 /*
1801 * The state of the subgroup (adj_out, advs, packet queue etc)
1802 * is consistent internally, but may not be identical to other
1803 * subgroups in the new update group even if the version number
1804 * matches up. Make sure a full refresh is done before the
1805 * subgroup is merged with another.
1806 */
1807 update_subgroup_set_needs_refresh(subgrp, 1);
1808
1809 SUBGRP_INCR_STAT(subgrp, updgrp_switch_events);
1810 return;
1811 }
1812
1813 /*
1814 * Create a new subgroup under the specified update group, and copy
1815 * over relevant state to it.
1816 */
1817 subgrp = update_subgroup_create(updgrp);
1818 update_subgroup_inherit_info(subgrp, old_subgrp);
1819
1820 subgrp->split_from.update_group_id = old_subgrp->update_group->id;
1821 subgrp->split_from.subgroup_id = old_subgrp->id;
1822
1823 /*
1824 * Copy out relevant state from the old subgroup.
1825 */
1826 update_subgroup_copy_adj_out(paf->subgroup, subgrp);
1827 update_subgroup_copy_packets(subgrp, paf->next_pkt_to_send);
1828
1829 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1830 zlog_debug("u%" PRIu64 ":s%" PRIu64" peer %s split and moved into u%" PRIu64":s%" PRIu64,
1831 paf->subgroup->update_group->id, paf->subgroup->id,
1832 paf->peer->host, updgrp->id, subgrp->id);
1833
1834 SUBGRP_INCR_STAT(paf->subgroup, split_events);
1835
1836 /*
1837 * Since queued advs were left behind, this new subgroup needs a
1838 * refresh.
1839 */
1840 update_subgroup_set_needs_refresh(subgrp, 1);
1841
1842 /*
1843 * Remove peer from old subgroup, and add it to the new one.
1844 */
1845 update_subgroup_remove_peer(paf->subgroup, paf);
1846
1847 update_subgroup_add_peer(subgrp, paf, 1);
1848 }
1849
1850 void update_bgp_group_init(struct bgp *bgp)
1851 {
1852 int afid;
1853
1854 AF_FOREACH (afid)
1855 bgp->update_groups[afid] =
1856 hash_create(updgrp_hash_key_make, updgrp_hash_cmp,
1857 "BGP Update Group Hash");
1858 }
1859
1860 void update_bgp_group_free(struct bgp *bgp)
1861 {
1862 int afid;
1863
1864 AF_FOREACH (afid) {
1865 if (bgp->update_groups[afid]) {
1866 hash_free(bgp->update_groups[afid]);
1867 bgp->update_groups[afid] = NULL;
1868 }
1869 }
1870 }
1871
1872 void update_group_show(struct bgp *bgp, afi_t afi, safi_t safi, struct vty *vty,
1873 uint64_t subgrp_id, bool uj)
1874 {
1875 struct updwalk_context ctx;
1876 json_object *json_vrf_obj = NULL;
1877
1878 memset(&ctx, 0, sizeof(ctx));
1879 ctx.vty = vty;
1880 ctx.subgrp_id = subgrp_id;
1881 ctx.uj = uj;
1882
1883 if (uj) {
1884 ctx.json_updategrps = json_object_new_object();
1885 json_vrf_obj = json_object_new_object();
1886 }
1887
1888 update_group_af_walk(bgp, afi, safi, update_group_show_walkcb, &ctx);
1889
1890 if (uj) {
1891 const char *vname;
1892
1893 if (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT)
1894 vname = VRF_DEFAULT_NAME;
1895 else
1896 vname = bgp->name;
1897 json_object_object_add(json_vrf_obj, vname,
1898 ctx.json_updategrps);
1899 vty_json(vty, json_vrf_obj);
1900 }
1901 }
1902
1903 /*
1904 * update_group_show_stats
1905 *
1906 * Show global statistics about update groups.
1907 */
1908 void update_group_show_stats(struct bgp *bgp, struct vty *vty)
1909 {
1910 vty_out(vty, "Update groups created: %u\n",
1911 bgp->update_group_stats.updgrps_created);
1912 vty_out(vty, "Update groups deleted: %u\n",
1913 bgp->update_group_stats.updgrps_deleted);
1914 vty_out(vty, "Update subgroups created: %u\n",
1915 bgp->update_group_stats.subgrps_created);
1916 vty_out(vty, "Update subgroups deleted: %u\n",
1917 bgp->update_group_stats.subgrps_deleted);
1918 vty_out(vty, "Join events: %u\n", bgp->update_group_stats.join_events);
1919 vty_out(vty, "Prune events: %u\n",
1920 bgp->update_group_stats.prune_events);
1921 vty_out(vty, "Merge events: %u\n",
1922 bgp->update_group_stats.merge_events);
1923 vty_out(vty, "Split events: %u\n",
1924 bgp->update_group_stats.split_events);
1925 vty_out(vty, "Update group switch events: %u\n",
1926 bgp->update_group_stats.updgrp_switch_events);
1927 vty_out(vty, "Peer route refreshes combined: %u\n",
1928 bgp->update_group_stats.peer_refreshes_combined);
1929 vty_out(vty, "Merge checks triggered: %u\n",
1930 bgp->update_group_stats.merge_checks_triggered);
1931 }
1932
1933 /*
1934 * update_group_adjust_peer
1935 */
1936 void update_group_adjust_peer(struct peer_af *paf)
1937 {
1938 struct update_group *updgrp;
1939 struct update_subgroup *subgrp, *old_subgrp;
1940 struct peer *peer;
1941
1942 if (!paf)
1943 return;
1944
1945 peer = PAF_PEER(paf);
1946 if (!peer_established(peer)) {
1947 return;
1948 }
1949
1950 if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE)) {
1951 return;
1952 }
1953
1954 if (!peer->afc_nego[paf->afi][paf->safi]) {
1955 return;
1956 }
1957
1958 updgrp = update_group_find(paf);
1959 if (!updgrp) {
1960 updgrp = update_group_create(paf);
1961 if (!updgrp) {
1962 flog_err(EC_BGP_UPDGRP_CREATE,
1963 "couldn't create update group for peer %s",
1964 paf->peer->host);
1965 return;
1966 }
1967 }
1968
1969 old_subgrp = paf->subgroup;
1970
1971 if (old_subgrp) {
1972
1973 /*
1974 * If the update group of the peer is unchanged, the peer can
1975 * stay
1976 * in its existing subgroup and we're done.
1977 */
1978 if (old_subgrp->update_group == updgrp)
1979 return;
1980
1981 /*
1982 * The peer is switching between update groups. Put it in its
1983 * own subgroup under the new update group.
1984 */
1985 update_subgroup_split_peer(paf, updgrp);
1986 return;
1987 }
1988
1989 subgrp = update_subgroup_find(updgrp, paf);
1990 if (!subgrp) {
1991 subgrp = update_subgroup_create(updgrp);
1992 if (!subgrp)
1993 return;
1994 }
1995
1996 update_subgroup_add_peer(subgrp, paf, 1);
1997 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1998 zlog_debug("u%" PRIu64 ":s%" PRIu64 " add peer %s", updgrp->id,
1999 subgrp->id, paf->peer->host);
2000
2001 return;
2002 }
2003
2004 int update_group_adjust_soloness(struct peer *peer, int set)
2005 {
2006 struct peer_group *group;
2007 struct listnode *node, *nnode;
2008
2009 if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
2010 peer_lonesoul_or_not(peer, set);
2011 if (peer_established(peer))
2012 bgp_announce_route_all(peer);
2013 } else {
2014 group = peer->group;
2015 for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
2016 peer_lonesoul_or_not(peer, set);
2017 if (peer_established(peer))
2018 bgp_announce_route_all(peer);
2019 }
2020 }
2021 return 0;
2022 }
2023
2024 /*
2025 * update_subgroup_rib
2026 */
2027 struct bgp_table *update_subgroup_rib(struct update_subgroup *subgrp)
2028 {
2029 struct bgp *bgp;
2030
2031 bgp = SUBGRP_INST(subgrp);
2032 if (!bgp)
2033 return NULL;
2034
2035 return bgp->rib[SUBGRP_AFI(subgrp)][SUBGRP_SAFI(subgrp)];
2036 }
2037
2038 void update_group_af_walk(struct bgp *bgp, afi_t afi, safi_t safi,
2039 updgrp_walkcb cb, void *ctx)
2040 {
2041 struct updwalk_context wctx;
2042 int afid;
2043
2044 if (!bgp)
2045 return;
2046 afid = afindex(afi, safi);
2047 if (afid >= BGP_AF_MAX)
2048 return;
2049
2050 memset(&wctx, 0, sizeof(wctx));
2051 wctx.cb = cb;
2052 wctx.context = ctx;
2053
2054 if (bgp->update_groups[afid])
2055 hash_walk(bgp->update_groups[afid], update_group_walkcb, &wctx);
2056 }
2057
2058 void update_group_walk(struct bgp *bgp, updgrp_walkcb cb, void *ctx)
2059 {
2060 afi_t afi;
2061 safi_t safi;
2062
2063 FOREACH_AFI_SAFI (afi, safi) {
2064 update_group_af_walk(bgp, afi, safi, cb, ctx);
2065 }
2066 }
2067
2068 void update_group_periodic_merge(struct bgp *bgp)
2069 {
2070 char reason[] = "periodic merge check";
2071
2072 update_group_walk(bgp, update_group_periodic_merge_walkcb,
2073 (void *)reason);
2074 }
2075
2076 static int
2077 update_group_default_originate_route_map_walkcb(struct update_group *updgrp,
2078 void *arg)
2079 {
2080 struct update_subgroup *subgrp;
2081 struct peer *peer;
2082 afi_t afi;
2083 safi_t safi;
2084
2085 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
2086 peer = SUBGRP_PEER(subgrp);
2087 afi = SUBGRP_AFI(subgrp);
2088 safi = SUBGRP_SAFI(subgrp);
2089
2090 if (peer->default_rmap[afi][safi].name) {
2091 /*
2092 * When there is change in routemap this flow will
2093 * be triggered. We need to unset the Flag to ensure
2094 * the update flow gets triggered.
2095 */
2096 UNSET_FLAG(subgrp->sflags,
2097 SUBGRP_STATUS_DEFAULT_ORIGINATE);
2098 subgroup_default_originate(subgrp, 0);
2099 }
2100 }
2101
2102 return UPDWALK_CONTINUE;
2103 }
2104
2105 void update_group_refresh_default_originate_route_map(struct thread *thread)
2106 {
2107 struct bgp *bgp;
2108 char reason[] = "refresh default-originate route-map";
2109
2110 bgp = THREAD_ARG(thread);
2111 update_group_walk(bgp, update_group_default_originate_route_map_walkcb,
2112 reason);
2113 THREAD_OFF(bgp->t_rmap_def_originate_eval);
2114 bgp_unlock(bgp);
2115 }
2116
2117 /*
2118 * peer_af_announce_route
2119 *
2120 * Refreshes routes out to a peer_af immediately.
2121 *
2122 * If the combine parameter is true, then this function will try to
2123 * gather other peers in the subgroup for which a route announcement
2124 * is pending and efficently announce routes to all of them.
2125 *
2126 * For now, the 'combine' option has an effect only if all peers in
2127 * the subgroup have a route announcement pending.
2128 */
2129 void peer_af_announce_route(struct peer_af *paf, int combine)
2130 {
2131 struct update_subgroup *subgrp;
2132 struct peer_af *cur_paf;
2133 int all_pending;
2134
2135 subgrp = paf->subgroup;
2136 all_pending = 0;
2137
2138 if (combine) {
2139 /*
2140 * If there are other peers in the old subgroup that also need
2141 * routes to be announced, pull them into the peer's new
2142 * subgroup.
2143 * Combine route announcement with other peers if possible.
2144 *
2145 * For now, we combine only if all peers in the subgroup have an
2146 * announcement pending.
2147 */
2148 all_pending = 1;
2149
2150 SUBGRP_FOREACH_PEER (subgrp, cur_paf) {
2151 if (cur_paf == paf)
2152 continue;
2153
2154 if (cur_paf->t_announce_route)
2155 continue;
2156
2157 all_pending = 0;
2158 break;
2159 }
2160 }
2161 /*
2162 * Announce to the peer alone if we were not asked to combine peers,
2163 * or if some peers don't have a route annoucement pending.
2164 */
2165 if (!combine || !all_pending) {
2166 update_subgroup_split_peer(paf, NULL);
2167 subgrp = paf->subgroup;
2168
2169 assert(subgrp && subgrp->update_group);
2170 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
2171 zlog_debug("u%" PRIu64 ":s%" PRIu64" %s announcing routes",
2172 subgrp->update_group->id, subgrp->id,
2173 paf->peer->host);
2174
2175 subgroup_announce_route(paf->subgroup);
2176 return;
2177 }
2178
2179 /*
2180 * We will announce routes the entire subgroup.
2181 *
2182 * First stop refresh timers on all the other peers.
2183 */
2184 SUBGRP_FOREACH_PEER (subgrp, cur_paf) {
2185 if (cur_paf == paf)
2186 continue;
2187
2188 bgp_stop_announce_route_timer(cur_paf);
2189 }
2190
2191 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
2192 zlog_debug("u%" PRIu64 ":s%" PRIu64" announcing routes to %s, combined into %d peers",
2193 subgrp->update_group->id, subgrp->id,
2194 paf->peer->host, subgrp->peer_count);
2195
2196 subgroup_announce_route(subgrp);
2197
2198 SUBGRP_INCR_STAT_BY(subgrp, peer_refreshes_combined,
2199 subgrp->peer_count - 1);
2200 }
2201
2202 void subgroup_trigger_write(struct update_subgroup *subgrp)
2203 {
2204 struct peer_af *paf;
2205
2206 /*
2207 * For each peer in the subgroup, schedule a job to pull packets from
2208 * the subgroup output queue into their own output queue. This action
2209 * will trigger a write job on the I/O thread.
2210 */
2211 SUBGRP_FOREACH_PEER (subgrp, paf)
2212 if (peer_established(paf->peer))
2213 thread_add_timer_msec(
2214 bm->master, bgp_generate_updgrp_packets,
2215 paf->peer, 0,
2216 &paf->peer->t_generate_updgrp_packets);
2217 }
2218
2219 int update_group_clear_update_dbg(struct update_group *updgrp, void *arg)
2220 {
2221 UPDGRP_PEER_DBG_OFF(updgrp);
2222 return UPDWALK_CONTINUE;
2223 }
2224
2225 /* Return true if we should addpath encode NLRI to this peer */
2226 bool bgp_addpath_encode_tx(struct peer *peer, afi_t afi, safi_t safi)
2227 {
2228 return (CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ADDPATH_AF_TX_ADV)
2229 && CHECK_FLAG(peer->af_cap[afi][safi],
2230 PEER_CAP_ADDPATH_AF_RX_RCV));
2231 }
2232
2233 bool bgp_check_selected(struct bgp_path_info *bpi, struct peer *peer,
2234 bool addpath_capable, afi_t afi, safi_t safi)
2235 {
2236 return (CHECK_FLAG(bpi->flags, BGP_PATH_SELECTED) ||
2237 (addpath_capable &&
2238 bgp_addpath_tx_path(peer->addpath_type[afi][safi], bpi)));
2239 }