]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_updgrp.c
Merge pull request #3142 from donaldsharp/bgp_peerhash
[mirror_frr.git] / bgpd / bgp_updgrp.c
1 /**
2 * bgp_updgrp.c: BGP update group structures
3 *
4 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
5 *
6 * @author Avneesh Sachdev <avneesh@sproute.net>
7 * @author Rajesh Varadarajan <rajesh@sproute.net>
8 * @author Pradosh Mohapatra <pradosh@sproute.net>
9 *
10 * This file is part of GNU Zebra.
11 *
12 * GNU Zebra is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2, or (at your option) any
15 * later version.
16 *
17 * GNU Zebra is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; see the file COPYING; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27 #include <zebra.h>
28
29 #include "prefix.h"
30 #include "thread.h"
31 #include "buffer.h"
32 #include "stream.h"
33 #include "command.h"
34 #include "sockunion.h"
35 #include "network.h"
36 #include "memory.h"
37 #include "filter.h"
38 #include "routemap.h"
39 #include "log.h"
40 #include "plist.h"
41 #include "linklist.h"
42 #include "workqueue.h"
43 #include "hash.h"
44 #include "jhash.h"
45 #include "queue.h"
46
47 #include "bgpd/bgpd.h"
48 #include "bgpd/bgp_table.h"
49 #include "bgpd/bgp_debug.h"
50 #include "bgpd/bgp_errors.h"
51 #include "bgpd/bgp_fsm.h"
52 #include "bgpd/bgp_advertise.h"
53 #include "bgpd/bgp_packet.h"
54 #include "bgpd/bgp_updgrp.h"
55 #include "bgpd/bgp_route.h"
56 #include "bgpd/bgp_filter.h"
57 #include "bgpd/bgp_io.h"
58
59 /********************
60 * PRIVATE FUNCTIONS
61 ********************/
62
63 /**
64 * assign a unique ID to update group and subgroup. Mostly for display/
65 * debugging purposes. It's a 64-bit space - used leisurely without a
66 * worry about its wrapping and about filling gaps. While at it, timestamp
67 * the creation.
68 */
69 static void update_group_checkin(struct update_group *updgrp)
70 {
71 updgrp->id = ++bm->updgrp_idspace;
72 updgrp->uptime = bgp_clock();
73 }
74
75 static void update_subgroup_checkin(struct update_subgroup *subgrp,
76 struct update_group *updgrp)
77 {
78 subgrp->id = ++bm->subgrp_idspace;
79 subgrp->uptime = bgp_clock();
80 }
81
82 static void sync_init(struct update_subgroup *subgrp)
83 {
84 subgrp->sync =
85 XCALLOC(MTYPE_BGP_SYNCHRONISE, sizeof(struct bgp_synchronize));
86 BGP_ADV_FIFO_INIT(&subgrp->sync->update);
87 BGP_ADV_FIFO_INIT(&subgrp->sync->withdraw);
88 BGP_ADV_FIFO_INIT(&subgrp->sync->withdraw_low);
89 subgrp->hash =
90 hash_create(baa_hash_key, baa_hash_cmp, "BGP SubGroup Hash");
91
92 /* We use a larger buffer for subgrp->work in the event that:
93 * - We RX a BGP_UPDATE where the attributes alone are just
94 * under BGP_MAX_PACKET_SIZE
95 * - The user configures an outbound route-map that does many as-path
96 * prepends or adds many communities. At most they can have
97 * CMD_ARGC_MAX
98 * args in a route-map so there is a finite limit on how large they
99 * can
100 * make the attributes.
101 *
102 * Having a buffer with BGP_MAX_PACKET_SIZE_OVERFLOW allows us to avoid
103 * bounds
104 * checking for every single attribute as we construct an UPDATE.
105 */
106 subgrp->work =
107 stream_new(BGP_MAX_PACKET_SIZE + BGP_MAX_PACKET_SIZE_OVERFLOW);
108 subgrp->scratch = stream_new(BGP_MAX_PACKET_SIZE);
109 }
110
111 static void sync_delete(struct update_subgroup *subgrp)
112 {
113 if (subgrp->sync)
114 XFREE(MTYPE_BGP_SYNCHRONISE, subgrp->sync);
115 subgrp->sync = NULL;
116 if (subgrp->hash)
117 hash_free(subgrp->hash);
118 subgrp->hash = NULL;
119 if (subgrp->work)
120 stream_free(subgrp->work);
121 subgrp->work = NULL;
122 if (subgrp->scratch)
123 stream_free(subgrp->scratch);
124 subgrp->scratch = NULL;
125 }
126
127 /**
128 * conf_copy
129 *
130 * copy only those fields that are relevant to update group match
131 */
132 static void conf_copy(struct peer *dst, struct peer *src, afi_t afi,
133 safi_t safi)
134 {
135 struct bgp_filter *srcfilter;
136 struct bgp_filter *dstfilter;
137
138 srcfilter = &src->filter[afi][safi];
139 dstfilter = &dst->filter[afi][safi];
140
141 dst->bgp = src->bgp;
142 dst->sort = src->sort;
143 dst->as = src->as;
144 dst->v_routeadv = src->v_routeadv;
145 dst->flags = src->flags;
146 dst->af_flags[afi][safi] = src->af_flags[afi][safi];
147 if (dst->host)
148 XFREE(MTYPE_BGP_PEER_HOST, dst->host);
149
150 dst->host = XSTRDUP(MTYPE_BGP_PEER_HOST, src->host);
151 dst->cap = src->cap;
152 dst->af_cap[afi][safi] = src->af_cap[afi][safi];
153 dst->afc_nego[afi][safi] = src->afc_nego[afi][safi];
154 dst->orf_plist[afi][safi] = src->orf_plist[afi][safi];
155 dst->local_as = src->local_as;
156 dst->change_local_as = src->change_local_as;
157 dst->shared_network = src->shared_network;
158 memcpy(&(dst->nexthop), &(src->nexthop), sizeof(struct bgp_nexthop));
159
160 dst->group = src->group;
161
162 if (src->default_rmap[afi][safi].name) {
163 dst->default_rmap[afi][safi].name =
164 XSTRDUP(MTYPE_ROUTE_MAP_NAME,
165 src->default_rmap[afi][safi].name);
166 dst->default_rmap[afi][safi].map =
167 src->default_rmap[afi][safi].map;
168 }
169
170 if (DISTRIBUTE_OUT_NAME(srcfilter)) {
171 DISTRIBUTE_OUT_NAME(dstfilter) = XSTRDUP(
172 MTYPE_BGP_FILTER_NAME, DISTRIBUTE_OUT_NAME(srcfilter));
173 DISTRIBUTE_OUT(dstfilter) = DISTRIBUTE_OUT(srcfilter);
174 }
175
176 if (PREFIX_LIST_OUT_NAME(srcfilter)) {
177 PREFIX_LIST_OUT_NAME(dstfilter) = XSTRDUP(
178 MTYPE_BGP_FILTER_NAME, PREFIX_LIST_OUT_NAME(srcfilter));
179 PREFIX_LIST_OUT(dstfilter) = PREFIX_LIST_OUT(srcfilter);
180 }
181
182 if (FILTER_LIST_OUT_NAME(srcfilter)) {
183 FILTER_LIST_OUT_NAME(dstfilter) = XSTRDUP(
184 MTYPE_BGP_FILTER_NAME, FILTER_LIST_OUT_NAME(srcfilter));
185 FILTER_LIST_OUT(dstfilter) = FILTER_LIST_OUT(srcfilter);
186 }
187
188 if (ROUTE_MAP_OUT_NAME(srcfilter)) {
189 ROUTE_MAP_OUT_NAME(dstfilter) = XSTRDUP(
190 MTYPE_BGP_FILTER_NAME, ROUTE_MAP_OUT_NAME(srcfilter));
191 ROUTE_MAP_OUT(dstfilter) = ROUTE_MAP_OUT(srcfilter);
192 }
193
194 if (UNSUPPRESS_MAP_NAME(srcfilter)) {
195 UNSUPPRESS_MAP_NAME(dstfilter) = XSTRDUP(
196 MTYPE_BGP_FILTER_NAME, UNSUPPRESS_MAP_NAME(srcfilter));
197 UNSUPPRESS_MAP(dstfilter) = UNSUPPRESS_MAP(srcfilter);
198 }
199 }
200
201 /**
202 * since we did a bunch of XSTRDUP's in conf_copy, time to free them up
203 */
204 static void conf_release(struct peer *src, afi_t afi, safi_t safi)
205 {
206 struct bgp_filter *srcfilter;
207
208 srcfilter = &src->filter[afi][safi];
209
210 if (src->default_rmap[afi][safi].name)
211 XFREE(MTYPE_ROUTE_MAP_NAME, src->default_rmap[afi][safi].name);
212
213 if (srcfilter->dlist[FILTER_OUT].name)
214 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->dlist[FILTER_OUT].name);
215
216 if (srcfilter->plist[FILTER_OUT].name)
217 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->plist[FILTER_OUT].name);
218
219 if (srcfilter->aslist[FILTER_OUT].name)
220 XFREE(MTYPE_BGP_FILTER_NAME,
221 srcfilter->aslist[FILTER_OUT].name);
222
223 if (srcfilter->map[RMAP_OUT].name)
224 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->map[RMAP_OUT].name);
225
226 if (srcfilter->usmap.name)
227 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->usmap.name);
228
229 if (src->host)
230 XFREE(MTYPE_BGP_PEER_HOST, src->host);
231 src->host = NULL;
232 }
233
234 static void peer2_updgrp_copy(struct update_group *updgrp, struct peer_af *paf)
235 {
236 struct peer *src;
237 struct peer *dst;
238
239 if (!updgrp || !paf)
240 return;
241
242 src = paf->peer;
243 dst = updgrp->conf;
244 if (!src || !dst)
245 return;
246
247 updgrp->afi = paf->afi;
248 updgrp->safi = paf->safi;
249 updgrp->afid = paf->afid;
250 updgrp->bgp = src->bgp;
251
252 conf_copy(dst, src, paf->afi, paf->safi);
253 }
254
255 /**
256 * auxiliary functions to maintain the hash table.
257 * - updgrp_hash_alloc - to create a new entry, passed to hash_get
258 * - updgrp_hash_key_make - makes the key for update group search
259 * - updgrp_hash_cmp - compare two update groups.
260 */
261 static void *updgrp_hash_alloc(void *p)
262 {
263 struct update_group *updgrp;
264 const struct update_group *in;
265
266 in = (const struct update_group *)p;
267 updgrp = XCALLOC(MTYPE_BGP_UPDGRP, sizeof(struct update_group));
268 memcpy(updgrp, in, sizeof(struct update_group));
269 updgrp->conf = XCALLOC(MTYPE_BGP_PEER, sizeof(struct peer));
270 conf_copy(updgrp->conf, in->conf, in->afi, in->safi);
271 return updgrp;
272 }
273
274 /**
275 * The hash value for a peer is computed from the following variables:
276 * v = f(
277 * 1. IBGP (1) or EBGP (2)
278 * 2. FLAGS based on configuration:
279 * LOCAL_AS_NO_PREPEND
280 * LOCAL_AS_REPLACE_AS
281 * 3. AF_FLAGS based on configuration:
282 * Refer to definition in bgp_updgrp.h
283 * 4. (AF-independent) Capability flags:
284 * AS4_RCV capability
285 * 5. (AF-dependent) Capability flags:
286 * ORF_PREFIX_SM_RCV (peer can send prefix ORF)
287 * 6. MRAI
288 * 7. peer-group name
289 * 8. Outbound route-map name (neighbor route-map <> out)
290 * 9. Outbound distribute-list name (neighbor distribute-list <> out)
291 * 10. Outbound prefix-list name (neighbor prefix-list <> out)
292 * 11. Outbound as-list name (neighbor filter-list <> out)
293 * 12. Unsuppress map name (neighbor unsuppress-map <>)
294 * 13. default rmap name (neighbor default-originate route-map <>)
295 * 14. encoding both global and link-local nexthop?
296 * 15. If peer is configured to be a lonesoul, peer ip address
297 * 16. Local-as should match, if configured.
298 * )
299 */
300 static unsigned int updgrp_hash_key_make(void *p)
301 {
302 const struct update_group *updgrp;
303 const struct peer *peer;
304 const struct bgp_filter *filter;
305 uint32_t flags;
306 uint32_t key;
307 afi_t afi;
308 safi_t safi;
309
310 #define SEED1 999331
311 #define SEED2 2147483647
312
313 updgrp = p;
314 peer = updgrp->conf;
315 afi = updgrp->afi;
316 safi = updgrp->safi;
317 flags = peer->af_flags[afi][safi];
318 filter = &peer->filter[afi][safi];
319
320 key = 0;
321
322 key = jhash_1word(peer->sort, key); /* EBGP or IBGP */
323 key = jhash_1word((peer->flags & PEER_UPDGRP_FLAGS), key);
324 key = jhash_1word((flags & PEER_UPDGRP_AF_FLAGS), key);
325 key = jhash_1word((peer->cap & PEER_UPDGRP_CAP_FLAGS), key);
326 key = jhash_1word((peer->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS),
327 key);
328 key = jhash_1word(peer->v_routeadv, key);
329 key = jhash_1word(peer->change_local_as, key);
330
331 if (peer->group)
332 key = jhash_1word(jhash(peer->group->name,
333 strlen(peer->group->name), SEED1),
334 key);
335
336 if (filter->map[RMAP_OUT].name)
337 key = jhash_1word(jhash(filter->map[RMAP_OUT].name,
338 strlen(filter->map[RMAP_OUT].name),
339 SEED1),
340 key);
341
342 if (filter->dlist[FILTER_OUT].name)
343 key = jhash_1word(jhash(filter->dlist[FILTER_OUT].name,
344 strlen(filter->dlist[FILTER_OUT].name),
345 SEED1),
346 key);
347
348 if (filter->plist[FILTER_OUT].name)
349 key = jhash_1word(jhash(filter->plist[FILTER_OUT].name,
350 strlen(filter->plist[FILTER_OUT].name),
351 SEED1),
352 key);
353
354 if (filter->aslist[FILTER_OUT].name)
355 key = jhash_1word(jhash(filter->aslist[FILTER_OUT].name,
356 strlen(filter->aslist[FILTER_OUT].name),
357 SEED1),
358 key);
359
360 if (filter->usmap.name)
361 key = jhash_1word(jhash(filter->usmap.name,
362 strlen(filter->usmap.name), SEED1),
363 key);
364
365 if (peer->default_rmap[afi][safi].name)
366 key = jhash_1word(
367 jhash(peer->default_rmap[afi][safi].name,
368 strlen(peer->default_rmap[afi][safi].name),
369 SEED1),
370 key);
371
372 /* If peer is on a shared network and is exchanging IPv6 prefixes,
373 * it needs to include link-local address. That's different from
374 * non-shared-network peers (nexthop encoded with 32 bytes vs 16
375 * bytes). We create different update groups to take care of that.
376 */
377 key = jhash_1word(
378 (peer->shared_network && peer_afi_active_nego(peer, AFI_IP6)),
379 key);
380
381 /*
382 * There are certain peers that must get their own update-group:
383 * - lonesoul peers
384 * - peers that negotiated ORF
385 */
386 if (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL)
387 || CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV)
388 || CHECK_FLAG(peer->af_cap[afi][safi],
389 PEER_CAP_ORF_PREFIX_SM_OLD_RCV))
390 key = jhash_1word(jhash(peer->host, strlen(peer->host), SEED2),
391 key);
392
393 return key;
394 }
395
396 static int updgrp_hash_cmp(const void *p1, const void *p2)
397 {
398 const struct update_group *grp1;
399 const struct update_group *grp2;
400 const struct peer *pe1;
401 const struct peer *pe2;
402 uint32_t flags1;
403 uint32_t flags2;
404 const struct bgp_filter *fl1;
405 const struct bgp_filter *fl2;
406 afi_t afi;
407 safi_t safi;
408
409 if (!p1 || !p2)
410 return 0;
411
412 grp1 = p1;
413 grp2 = p2;
414 pe1 = grp1->conf;
415 pe2 = grp2->conf;
416 afi = grp1->afi;
417 safi = grp1->safi;
418 flags1 = pe1->af_flags[afi][safi];
419 flags2 = pe2->af_flags[afi][safi];
420 fl1 = &pe1->filter[afi][safi];
421 fl2 = &pe2->filter[afi][safi];
422
423 /* put EBGP and IBGP peers in different update groups */
424 if (pe1->sort != pe2->sort)
425 return 0;
426
427 /* check peer flags */
428 if ((pe1->flags & PEER_UPDGRP_FLAGS)
429 != (pe2->flags & PEER_UPDGRP_FLAGS))
430 return 0;
431
432 /* If there is 'local-as' configured, it should match. */
433 if (pe1->change_local_as != pe2->change_local_as)
434 return 0;
435
436 /* flags like route reflector client */
437 if ((flags1 & PEER_UPDGRP_AF_FLAGS) != (flags2 & PEER_UPDGRP_AF_FLAGS))
438 return 0;
439
440 if ((pe1->cap & PEER_UPDGRP_CAP_FLAGS)
441 != (pe2->cap & PEER_UPDGRP_CAP_FLAGS))
442 return 0;
443
444 if ((pe1->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS)
445 != (pe2->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS))
446 return 0;
447
448 if (pe1->v_routeadv != pe2->v_routeadv)
449 return 0;
450
451 if (pe1->group != pe2->group)
452 return 0;
453
454 /* route-map names should be the same */
455 if ((fl1->map[RMAP_OUT].name && !fl2->map[RMAP_OUT].name)
456 || (!fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name)
457 || (fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name
458 && strcmp(fl1->map[RMAP_OUT].name, fl2->map[RMAP_OUT].name)))
459 return 0;
460
461 if ((fl1->dlist[FILTER_OUT].name && !fl2->dlist[FILTER_OUT].name)
462 || (!fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name)
463 || (fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name
464 && strcmp(fl1->dlist[FILTER_OUT].name,
465 fl2->dlist[FILTER_OUT].name)))
466 return 0;
467
468 if ((fl1->plist[FILTER_OUT].name && !fl2->plist[FILTER_OUT].name)
469 || (!fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name)
470 || (fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name
471 && strcmp(fl1->plist[FILTER_OUT].name,
472 fl2->plist[FILTER_OUT].name)))
473 return 0;
474
475 if ((fl1->aslist[FILTER_OUT].name && !fl2->aslist[FILTER_OUT].name)
476 || (!fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name)
477 || (fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name
478 && strcmp(fl1->aslist[FILTER_OUT].name,
479 fl2->aslist[FILTER_OUT].name)))
480 return 0;
481
482 if ((fl1->usmap.name && !fl2->usmap.name)
483 || (!fl1->usmap.name && fl2->usmap.name)
484 || (fl1->usmap.name && fl2->usmap.name
485 && strcmp(fl1->usmap.name, fl2->usmap.name)))
486 return 0;
487
488 if ((pe1->default_rmap[afi][safi].name
489 && !pe2->default_rmap[afi][safi].name)
490 || (!pe1->default_rmap[afi][safi].name
491 && pe2->default_rmap[afi][safi].name)
492 || (pe1->default_rmap[afi][safi].name
493 && pe2->default_rmap[afi][safi].name
494 && strcmp(pe1->default_rmap[afi][safi].name,
495 pe2->default_rmap[afi][safi].name)))
496 return 0;
497
498 if ((afi == AFI_IP6) && (pe1->shared_network != pe2->shared_network))
499 return 0;
500
501 if ((CHECK_FLAG(pe1->flags, PEER_FLAG_LONESOUL)
502 || CHECK_FLAG(pe1->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV)
503 || CHECK_FLAG(pe1->af_cap[afi][safi],
504 PEER_CAP_ORF_PREFIX_SM_OLD_RCV))
505 && !sockunion_same(&pe1->su, &pe2->su))
506 return 0;
507
508 return 1;
509 }
510
511 static void peer_lonesoul_or_not(struct peer *peer, int set)
512 {
513 /* no change in status? */
514 if (set == (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL) > 0))
515 return;
516
517 if (set)
518 SET_FLAG(peer->flags, PEER_FLAG_LONESOUL);
519 else
520 UNSET_FLAG(peer->flags, PEER_FLAG_LONESOUL);
521
522 update_group_adjust_peer_afs(peer);
523 }
524
525 /*
526 * subgroup_total_packets_enqueued
527 *
528 * Returns the total number of packets enqueued to a subgroup.
529 */
530 static unsigned int
531 subgroup_total_packets_enqueued(struct update_subgroup *subgrp)
532 {
533 struct bpacket *pkt;
534
535 pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp));
536
537 return pkt->ver - 1;
538 }
539
540 static int update_group_show_walkcb(struct update_group *updgrp, void *arg)
541 {
542 struct updwalk_context *ctx = arg;
543 struct vty *vty;
544 struct update_subgroup *subgrp;
545 struct peer_af *paf;
546 struct bgp_filter *filter;
547 int match = 0;
548
549 if (!ctx)
550 return CMD_SUCCESS;
551
552 if (ctx->subgrp_id) {
553 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
554 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
555 continue;
556 else {
557 match = 1;
558 break;
559 }
560 }
561 } else {
562 match = 1;
563 }
564
565 if (!match) {
566 /* Since this routine is invoked from a walk, we cannot signal
567 * any */
568 /* error here, can only return. */
569 return CMD_SUCCESS;
570 }
571
572 vty = ctx->vty;
573
574 vty_out(vty, "Update-group %" PRIu64 ":\n", updgrp->id);
575 vty_out(vty, " Created: %s", timestamp_string(updgrp->uptime));
576 filter = &updgrp->conf->filter[updgrp->afi][updgrp->safi];
577 if (filter->map[RMAP_OUT].name)
578 vty_out(vty, " Outgoing route map: %s%s\n",
579 filter->map[RMAP_OUT].map ? "X" : "",
580 filter->map[RMAP_OUT].name);
581 vty_out(vty, " MRAI value (seconds): %d\n", updgrp->conf->v_routeadv);
582 if (updgrp->conf->change_local_as)
583 vty_out(vty, " Local AS %u%s%s\n",
584 updgrp->conf->change_local_as,
585 CHECK_FLAG(updgrp->conf->flags,
586 PEER_FLAG_LOCAL_AS_NO_PREPEND)
587 ? " no-prepend"
588 : "",
589 CHECK_FLAG(updgrp->conf->flags,
590 PEER_FLAG_LOCAL_AS_REPLACE_AS)
591 ? " replace-as"
592 : "");
593
594 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
595 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
596 continue;
597 vty_out(vty, "\n");
598 vty_out(vty, " Update-subgroup %" PRIu64 ":\n", subgrp->id);
599 vty_out(vty, " Created: %s",
600 timestamp_string(subgrp->uptime));
601
602 if (subgrp->split_from.update_group_id
603 || subgrp->split_from.subgroup_id) {
604 vty_out(vty, " Split from group id: %" PRIu64 "\n",
605 subgrp->split_from.update_group_id);
606 vty_out(vty,
607 " Split from subgroup id: %" PRIu64 "\n",
608 subgrp->split_from.subgroup_id);
609 }
610
611 vty_out(vty, " Join events: %u\n", subgrp->join_events);
612 vty_out(vty, " Prune events: %u\n", subgrp->prune_events);
613 vty_out(vty, " Merge events: %u\n", subgrp->merge_events);
614 vty_out(vty, " Split events: %u\n", subgrp->split_events);
615 vty_out(vty, " Update group switch events: %u\n",
616 subgrp->updgrp_switch_events);
617 vty_out(vty, " Peer refreshes combined: %u\n",
618 subgrp->peer_refreshes_combined);
619 vty_out(vty, " Merge checks triggered: %u\n",
620 subgrp->merge_checks_triggered);
621 vty_out(vty, " Version: %" PRIu64 "\n", subgrp->version);
622 vty_out(vty, " Packet queue length: %d\n",
623 bpacket_queue_length(SUBGRP_PKTQ(subgrp)));
624 vty_out(vty, " Total packets enqueued: %u\n",
625 subgroup_total_packets_enqueued(subgrp));
626 vty_out(vty, " Packet queue high watermark: %d\n",
627 bpacket_queue_hwm_length(SUBGRP_PKTQ(subgrp)));
628 vty_out(vty, " Adj-out list count: %u\n", subgrp->adj_count);
629 vty_out(vty, " Advertise list: %s\n",
630 advertise_list_is_empty(subgrp) ? "empty"
631 : "not empty");
632 vty_out(vty, " Flags: %s\n",
633 CHECK_FLAG(subgrp->flags, SUBGRP_FLAG_NEEDS_REFRESH)
634 ? "R"
635 : "");
636 if (subgrp->peer_count > 0) {
637 vty_out(vty, " Peers:\n");
638 SUBGRP_FOREACH_PEER (subgrp, paf)
639 vty_out(vty, " - %s\n", paf->peer->host);
640 }
641 }
642 return UPDWALK_CONTINUE;
643 }
644
645 /*
646 * Helper function to show the packet queue for each subgroup of update group.
647 * Will be constrained to a particular subgroup id if id !=0
648 */
649 static int updgrp_show_packet_queue_walkcb(struct update_group *updgrp,
650 void *arg)
651 {
652 struct updwalk_context *ctx = arg;
653 struct update_subgroup *subgrp;
654 struct vty *vty;
655
656 vty = ctx->vty;
657 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
658 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
659 continue;
660 vty_out(vty, "update group %" PRIu64 ", subgroup %" PRIu64 "\n",
661 updgrp->id, subgrp->id);
662 bpacket_queue_show_vty(SUBGRP_PKTQ(subgrp), vty);
663 }
664 return UPDWALK_CONTINUE;
665 }
666
667 /*
668 * Show the packet queue for each subgroup of update group. Will be
669 * constrained to a particular subgroup id if id !=0
670 */
671 void update_group_show_packet_queue(struct bgp *bgp, afi_t afi, safi_t safi,
672 struct vty *vty, uint64_t id)
673 {
674 struct updwalk_context ctx;
675
676 memset(&ctx, 0, sizeof(ctx));
677 ctx.vty = vty;
678 ctx.subgrp_id = id;
679 ctx.flags = 0;
680 update_group_af_walk(bgp, afi, safi, updgrp_show_packet_queue_walkcb,
681 &ctx);
682 }
683
684 static struct update_group *update_group_find(struct peer_af *paf)
685 {
686 struct update_group *updgrp;
687 struct update_group tmp;
688 struct peer tmp_conf;
689
690 if (!peer_established(PAF_PEER(paf)))
691 return NULL;
692
693 memset(&tmp, 0, sizeof(tmp));
694 memset(&tmp_conf, 0, sizeof(tmp_conf));
695 tmp.conf = &tmp_conf;
696 peer2_updgrp_copy(&tmp, paf);
697
698 updgrp = hash_lookup(paf->peer->bgp->update_groups[paf->afid], &tmp);
699 conf_release(&tmp_conf, paf->afi, paf->safi);
700 return updgrp;
701 }
702
703 static struct update_group *update_group_create(struct peer_af *paf)
704 {
705 struct update_group *updgrp;
706 struct update_group tmp;
707 struct peer tmp_conf;
708
709 memset(&tmp, 0, sizeof(tmp));
710 memset(&tmp_conf, 0, sizeof(tmp_conf));
711 tmp.conf = &tmp_conf;
712 peer2_updgrp_copy(&tmp, paf);
713
714 updgrp = hash_get(paf->peer->bgp->update_groups[paf->afid], &tmp,
715 updgrp_hash_alloc);
716 if (!updgrp)
717 return NULL;
718 update_group_checkin(updgrp);
719
720 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
721 zlog_debug("create update group %" PRIu64, updgrp->id);
722
723 UPDGRP_GLOBAL_STAT(updgrp, updgrps_created) += 1;
724
725 conf_release(&tmp_conf, paf->afi, paf->safi);
726 return updgrp;
727 }
728
729 static void update_group_delete(struct update_group *updgrp)
730 {
731 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
732 zlog_debug("delete update group %" PRIu64, updgrp->id);
733
734 UPDGRP_GLOBAL_STAT(updgrp, updgrps_deleted) += 1;
735
736 hash_release(updgrp->bgp->update_groups[updgrp->afid], updgrp);
737 conf_release(updgrp->conf, updgrp->afi, updgrp->safi);
738
739 if (updgrp->conf->host)
740 XFREE(MTYPE_BGP_PEER_HOST, updgrp->conf->host);
741 updgrp->conf->host = NULL;
742
743 if (updgrp->conf->ifname)
744 XFREE(MTYPE_BGP_PEER_IFNAME, updgrp->conf->ifname);
745
746 XFREE(MTYPE_BGP_PEER, updgrp->conf);
747 XFREE(MTYPE_BGP_UPDGRP, updgrp);
748 }
749
750 static void update_group_add_subgroup(struct update_group *updgrp,
751 struct update_subgroup *subgrp)
752 {
753 if (!updgrp || !subgrp)
754 return;
755
756 LIST_INSERT_HEAD(&(updgrp->subgrps), subgrp, updgrp_train);
757 subgrp->update_group = updgrp;
758 }
759
760 static void update_group_remove_subgroup(struct update_group *updgrp,
761 struct update_subgroup *subgrp)
762 {
763 if (!updgrp || !subgrp)
764 return;
765
766 LIST_REMOVE(subgrp, updgrp_train);
767 subgrp->update_group = NULL;
768 if (LIST_EMPTY(&(updgrp->subgrps)))
769 update_group_delete(updgrp);
770 }
771
772 static struct update_subgroup *
773 update_subgroup_create(struct update_group *updgrp)
774 {
775 struct update_subgroup *subgrp;
776
777 subgrp = XCALLOC(MTYPE_BGP_UPD_SUBGRP, sizeof(struct update_subgroup));
778 update_subgroup_checkin(subgrp, updgrp);
779 subgrp->v_coalesce = (UPDGRP_INST(updgrp))->coalesce_time;
780 sync_init(subgrp);
781 bpacket_queue_init(SUBGRP_PKTQ(subgrp));
782 bpacket_queue_add(SUBGRP_PKTQ(subgrp), NULL, NULL);
783 TAILQ_INIT(&(subgrp->adjq));
784 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
785 zlog_debug("create subgroup u%" PRIu64 ":s%" PRIu64, updgrp->id,
786 subgrp->id);
787
788 update_group_add_subgroup(updgrp, subgrp);
789
790 UPDGRP_INCR_STAT(updgrp, subgrps_created);
791
792 return subgrp;
793 }
794
795 static void update_subgroup_delete(struct update_subgroup *subgrp)
796 {
797 if (!subgrp)
798 return;
799
800 if (subgrp->update_group)
801 UPDGRP_INCR_STAT(subgrp->update_group, subgrps_deleted);
802
803 if (subgrp->t_merge_check)
804 THREAD_OFF(subgrp->t_merge_check);
805
806 if (subgrp->t_coalesce)
807 THREAD_TIMER_OFF(subgrp->t_coalesce);
808
809 bpacket_queue_cleanup(SUBGRP_PKTQ(subgrp));
810 subgroup_clear_table(subgrp);
811
812 if (subgrp->t_coalesce)
813 THREAD_TIMER_OFF(subgrp->t_coalesce);
814 sync_delete(subgrp);
815
816 if (BGP_DEBUG(update_groups, UPDATE_GROUPS) && subgrp->update_group)
817 zlog_debug("delete subgroup u%" PRIu64 ":s%" PRIu64,
818 subgrp->update_group->id, subgrp->id);
819
820 update_group_remove_subgroup(subgrp->update_group, subgrp);
821
822 XFREE(MTYPE_BGP_UPD_SUBGRP, subgrp);
823 }
824
825 void update_subgroup_inherit_info(struct update_subgroup *to,
826 struct update_subgroup *from)
827 {
828 if (!to || !from)
829 return;
830
831 to->sflags = from->sflags;
832 }
833
834 /*
835 * update_subgroup_check_delete
836 *
837 * Delete a subgroup if it is ready to be deleted.
838 *
839 * Returns TRUE if the subgroup was deleted.
840 */
841 static int update_subgroup_check_delete(struct update_subgroup *subgrp)
842 {
843 if (!subgrp)
844 return 0;
845
846 if (!LIST_EMPTY(&(subgrp->peers)))
847 return 0;
848
849 update_subgroup_delete(subgrp);
850
851 return 1;
852 }
853
854 /*
855 * update_subgroup_add_peer
856 *
857 * @param send_enqueued_packets If true all currently enqueued packets will
858 * also be sent to the peer.
859 */
860 static void update_subgroup_add_peer(struct update_subgroup *subgrp,
861 struct peer_af *paf,
862 int send_enqueued_pkts)
863 {
864 struct bpacket *pkt;
865
866 if (!subgrp || !paf)
867 return;
868
869 LIST_INSERT_HEAD(&(subgrp->peers), paf, subgrp_train);
870 paf->subgroup = subgrp;
871 subgrp->peer_count++;
872
873 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
874 UPDGRP_PEER_DBG_EN(subgrp->update_group);
875 }
876
877 SUBGRP_INCR_STAT(subgrp, join_events);
878
879 if (send_enqueued_pkts) {
880 pkt = bpacket_queue_first(SUBGRP_PKTQ(subgrp));
881 } else {
882
883 /*
884 * Hang the peer off of the last, placeholder, packet in the
885 * queue. This means it won't see any of the packets that are
886 * currently the queue.
887 */
888 pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp));
889 assert(pkt->buffer == NULL);
890 }
891
892 bpacket_add_peer(pkt, paf);
893
894 bpacket_queue_sanity_check(SUBGRP_PKTQ(subgrp));
895 }
896
897 /*
898 * update_subgroup_remove_peer_internal
899 *
900 * Internal function that removes a peer from a subgroup, but does not
901 * delete the subgroup. A call to this function must almost always be
902 * followed by a call to update_subgroup_check_delete().
903 *
904 * @see update_subgroup_remove_peer
905 */
906 static void update_subgroup_remove_peer_internal(struct update_subgroup *subgrp,
907 struct peer_af *paf)
908 {
909 assert(subgrp && paf && subgrp->update_group);
910
911 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
912 UPDGRP_PEER_DBG_DIS(subgrp->update_group);
913 }
914
915 bpacket_queue_remove_peer(paf);
916 LIST_REMOVE(paf, subgrp_train);
917 paf->subgroup = NULL;
918 subgrp->peer_count--;
919
920 SUBGRP_INCR_STAT(subgrp, prune_events);
921 }
922
923 /*
924 * update_subgroup_remove_peer
925 */
926 void update_subgroup_remove_peer(struct update_subgroup *subgrp,
927 struct peer_af *paf)
928 {
929 if (!subgrp || !paf)
930 return;
931
932 update_subgroup_remove_peer_internal(subgrp, paf);
933
934 if (update_subgroup_check_delete(subgrp))
935 return;
936
937 /*
938 * The deletion of the peer may have caused some packets to be
939 * deleted from the subgroup packet queue. Check if the subgroup can
940 * be merged now.
941 */
942 update_subgroup_check_merge(subgrp, "removed peer from subgroup");
943 }
944
945 static struct update_subgroup *update_subgroup_find(struct update_group *updgrp,
946 struct peer_af *paf)
947 {
948 struct update_subgroup *subgrp = NULL;
949 uint64_t version;
950
951 if (paf->subgroup) {
952 assert(0);
953 return NULL;
954 } else
955 version = 0;
956
957 if (!peer_established(PAF_PEER(paf)))
958 return NULL;
959
960 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
961 if (subgrp->version != version
962 || CHECK_FLAG(subgrp->sflags,
963 SUBGRP_STATUS_DEFAULT_ORIGINATE))
964 continue;
965
966 /*
967 * The version number is not meaningful on a subgroup that needs
968 * a refresh.
969 */
970 if (update_subgroup_needs_refresh(subgrp))
971 continue;
972
973 break;
974 }
975
976 return subgrp;
977 }
978
979 /*
980 * update_subgroup_ready_for_merge
981 *
982 * Returns TRUE if this subgroup is in a state that allows it to be
983 * merged into another subgroup.
984 */
985 static int update_subgroup_ready_for_merge(struct update_subgroup *subgrp)
986 {
987
988 /*
989 * Not ready if there are any encoded packets waiting to be written
990 * out to peers.
991 */
992 if (!bpacket_queue_is_empty(SUBGRP_PKTQ(subgrp)))
993 return 0;
994
995 /*
996 * Not ready if there enqueued updates waiting to be encoded.
997 */
998 if (!advertise_list_is_empty(subgrp))
999 return 0;
1000
1001 /*
1002 * Don't attempt to merge a subgroup that needs a refresh. For one,
1003 * we can't determine if the adj_out of such a group matches that of
1004 * another group.
1005 */
1006 if (update_subgroup_needs_refresh(subgrp))
1007 return 0;
1008
1009 return 1;
1010 }
1011
1012 /*
1013 * update_subgrp_can_merge_into
1014 *
1015 * Returns TRUE if the first subgroup can merge into the second
1016 * subgroup.
1017 */
1018 static int update_subgroup_can_merge_into(struct update_subgroup *subgrp,
1019 struct update_subgroup *target)
1020 {
1021
1022 if (subgrp == target)
1023 return 0;
1024
1025 /*
1026 * Both must have processed the BRIB to the same point in order to
1027 * be merged.
1028 */
1029 if (subgrp->version != target->version)
1030 return 0;
1031
1032 if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE)
1033 != CHECK_FLAG(target->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
1034 return 0;
1035
1036 if (subgrp->adj_count != target->adj_count)
1037 return 0;
1038
1039 return update_subgroup_ready_for_merge(target);
1040 }
1041
1042 /*
1043 * update_subgroup_merge
1044 *
1045 * Merge the first subgroup into the second one.
1046 */
1047 static void update_subgroup_merge(struct update_subgroup *subgrp,
1048 struct update_subgroup *target,
1049 const char *reason)
1050 {
1051 struct peer_af *paf;
1052 int result;
1053 int peer_count;
1054
1055 assert(subgrp->adj_count == target->adj_count);
1056
1057 peer_count = subgrp->peer_count;
1058
1059 while (1) {
1060 paf = LIST_FIRST(&subgrp->peers);
1061 if (!paf)
1062 break;
1063
1064 update_subgroup_remove_peer_internal(subgrp, paf);
1065
1066 /*
1067 * Add the peer to the target subgroup, while making sure that
1068 * any currently enqueued packets won't be sent to it. Enqueued
1069 * packets could, for example, result in an unnecessary withdraw
1070 * followed by an advertise.
1071 */
1072 update_subgroup_add_peer(target, paf, 0);
1073 }
1074
1075 SUBGRP_INCR_STAT(target, merge_events);
1076
1077 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1078 zlog_debug("u%" PRIu64 ":s%" PRIu64
1079 " (%d peers) merged into u%" PRIu64 ":s%" PRIu64
1080 ", "
1081 "trigger: %s",
1082 subgrp->update_group->id, subgrp->id, peer_count,
1083 target->update_group->id, target->id,
1084 reason ? reason : "unknown");
1085
1086 result = update_subgroup_check_delete(subgrp);
1087 assert(result);
1088 }
1089
1090 /*
1091 * update_subgroup_check_merge
1092 *
1093 * Merge this subgroup into another subgroup if possible.
1094 *
1095 * Returns TRUE if the subgroup has been merged. The subgroup pointer
1096 * should not be accessed in this case.
1097 */
1098 int update_subgroup_check_merge(struct update_subgroup *subgrp,
1099 const char *reason)
1100 {
1101 struct update_subgroup *target;
1102
1103 if (!update_subgroup_ready_for_merge(subgrp))
1104 return 0;
1105
1106 /*
1107 * Look for a subgroup to merge into.
1108 */
1109 UPDGRP_FOREACH_SUBGRP (subgrp->update_group, target) {
1110 if (update_subgroup_can_merge_into(subgrp, target))
1111 break;
1112 }
1113
1114 if (!target)
1115 return 0;
1116
1117 update_subgroup_merge(subgrp, target, reason);
1118 return 1;
1119 }
1120
1121 /*
1122 * update_subgroup_merge_check_thread_cb
1123 */
1124 static int update_subgroup_merge_check_thread_cb(struct thread *thread)
1125 {
1126 struct update_subgroup *subgrp;
1127
1128 subgrp = THREAD_ARG(thread);
1129
1130 subgrp->t_merge_check = NULL;
1131
1132 update_subgroup_check_merge(subgrp, "triggered merge check");
1133 return 0;
1134 }
1135
1136 /*
1137 * update_subgroup_trigger_merge_check
1138 *
1139 * Triggers a call to update_subgroup_check_merge() on a clean context.
1140 *
1141 * @param force If true, the merge check will be triggered even if the
1142 * subgroup doesn't currently look ready for a merge.
1143 *
1144 * Returns TRUE if a merge check will be performed shortly.
1145 */
1146 int update_subgroup_trigger_merge_check(struct update_subgroup *subgrp,
1147 int force)
1148 {
1149 if (subgrp->t_merge_check)
1150 return 1;
1151
1152 if (!force && !update_subgroup_ready_for_merge(subgrp))
1153 return 0;
1154
1155 subgrp->t_merge_check = NULL;
1156 thread_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb,
1157 subgrp, 0, &subgrp->t_merge_check);
1158
1159 SUBGRP_INCR_STAT(subgrp, merge_checks_triggered);
1160
1161 return 1;
1162 }
1163
1164 /*
1165 * update_subgroup_copy_adj_out
1166 *
1167 * Helper function that clones the adj out (state about advertised
1168 * routes) from one subgroup to another. It assumes that the adj out
1169 * of the target subgroup is empty.
1170 */
1171 static void update_subgroup_copy_adj_out(struct update_subgroup *source,
1172 struct update_subgroup *dest)
1173 {
1174 struct bgp_adj_out *aout, *aout_copy;
1175
1176 SUBGRP_FOREACH_ADJ (source, aout) {
1177 /*
1178 * Copy the adj out.
1179 */
1180 aout_copy =
1181 bgp_adj_out_alloc(dest, aout->rn, aout->addpath_tx_id);
1182 aout_copy->attr =
1183 aout->attr ? bgp_attr_intern(aout->attr) : NULL;
1184 }
1185
1186 dest->scount = source->scount;
1187 }
1188
1189 /*
1190 * update_subgroup_copy_packets
1191 *
1192 * Copy packets after and including the given packet to the subgroup
1193 * 'dest'.
1194 *
1195 * Returns the number of packets copied.
1196 */
1197 static int update_subgroup_copy_packets(struct update_subgroup *dest,
1198 struct bpacket *pkt)
1199 {
1200 int count;
1201
1202 count = 0;
1203 while (pkt && pkt->buffer) {
1204 bpacket_queue_add(SUBGRP_PKTQ(dest), stream_dup(pkt->buffer),
1205 &pkt->arr);
1206 count++;
1207 pkt = bpacket_next(pkt);
1208 }
1209
1210 bpacket_queue_sanity_check(SUBGRP_PKTQ(dest));
1211
1212 return count;
1213 }
1214
1215 static int updgrp_prefix_list_update(struct update_group *updgrp,
1216 const char *name)
1217 {
1218 struct peer *peer;
1219 struct bgp_filter *filter;
1220
1221 peer = UPDGRP_PEER(updgrp);
1222 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1223
1224 if (PREFIX_LIST_OUT_NAME(filter)
1225 && (strcmp(name, PREFIX_LIST_OUT_NAME(filter)) == 0)) {
1226 PREFIX_LIST_OUT(filter) = prefix_list_lookup(
1227 UPDGRP_AFI(updgrp), PREFIX_LIST_OUT_NAME(filter));
1228 return 1;
1229 }
1230 return 0;
1231 }
1232
1233 static int updgrp_filter_list_update(struct update_group *updgrp,
1234 const char *name)
1235 {
1236 struct peer *peer;
1237 struct bgp_filter *filter;
1238
1239 peer = UPDGRP_PEER(updgrp);
1240 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1241
1242 if (FILTER_LIST_OUT_NAME(filter)
1243 && (strcmp(name, FILTER_LIST_OUT_NAME(filter)) == 0)) {
1244 FILTER_LIST_OUT(filter) =
1245 as_list_lookup(FILTER_LIST_OUT_NAME(filter));
1246 return 1;
1247 }
1248 return 0;
1249 }
1250
1251 static int updgrp_distribute_list_update(struct update_group *updgrp,
1252 const char *name)
1253 {
1254 struct peer *peer;
1255 struct bgp_filter *filter;
1256
1257 peer = UPDGRP_PEER(updgrp);
1258 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1259
1260 if (DISTRIBUTE_OUT_NAME(filter)
1261 && (strcmp(name, DISTRIBUTE_OUT_NAME(filter)) == 0)) {
1262 DISTRIBUTE_OUT(filter) = access_list_lookup(
1263 UPDGRP_AFI(updgrp), DISTRIBUTE_OUT_NAME(filter));
1264 return 1;
1265 }
1266 return 0;
1267 }
1268
1269 static int updgrp_route_map_update(struct update_group *updgrp,
1270 const char *name, int *def_rmap_changed)
1271 {
1272 struct peer *peer;
1273 struct bgp_filter *filter;
1274 int changed = 0;
1275 afi_t afi;
1276 safi_t safi;
1277
1278 peer = UPDGRP_PEER(updgrp);
1279 afi = UPDGRP_AFI(updgrp);
1280 safi = UPDGRP_SAFI(updgrp);
1281 filter = &peer->filter[afi][safi];
1282
1283 if (ROUTE_MAP_OUT_NAME(filter)
1284 && (strcmp(name, ROUTE_MAP_OUT_NAME(filter)) == 0)) {
1285 ROUTE_MAP_OUT(filter) = route_map_lookup_by_name(name);
1286
1287 changed = 1;
1288 }
1289
1290 if (UNSUPPRESS_MAP_NAME(filter)
1291 && (strcmp(name, UNSUPPRESS_MAP_NAME(filter)) == 0)) {
1292 UNSUPPRESS_MAP(filter) = route_map_lookup_by_name(name);
1293 changed = 1;
1294 }
1295
1296 /* process default-originate route-map */
1297 if (peer->default_rmap[afi][safi].name
1298 && (strcmp(name, peer->default_rmap[afi][safi].name) == 0)) {
1299 peer->default_rmap[afi][safi].map =
1300 route_map_lookup_by_name(name);
1301 if (def_rmap_changed)
1302 *def_rmap_changed = 1;
1303 }
1304 return changed;
1305 }
1306
1307 /*
1308 * hash iteration callback function to process a policy change for an
1309 * update group. Check if the changed policy matches the updgrp's
1310 * outbound route-map or unsuppress-map or default-originate map or
1311 * filter-list or prefix-list or distribute-list.
1312 * Trigger update generation accordingly.
1313 */
1314 static int updgrp_policy_update_walkcb(struct update_group *updgrp, void *arg)
1315 {
1316 struct updwalk_context *ctx = arg;
1317 struct update_subgroup *subgrp;
1318 int changed = 0;
1319 int def_changed = 0;
1320
1321 if (!updgrp || !ctx || !ctx->policy_name)
1322 return UPDWALK_CONTINUE;
1323
1324 switch (ctx->policy_type) {
1325 case BGP_POLICY_ROUTE_MAP:
1326 changed = updgrp_route_map_update(updgrp, ctx->policy_name,
1327 &def_changed);
1328 break;
1329 case BGP_POLICY_FILTER_LIST:
1330 changed = updgrp_filter_list_update(updgrp, ctx->policy_name);
1331 break;
1332 case BGP_POLICY_PREFIX_LIST:
1333 changed = updgrp_prefix_list_update(updgrp, ctx->policy_name);
1334 break;
1335 case BGP_POLICY_DISTRIBUTE_LIST:
1336 changed =
1337 updgrp_distribute_list_update(updgrp, ctx->policy_name);
1338 break;
1339 default:
1340 break;
1341 }
1342
1343 /* If not doing route update, return after updating "config" */
1344 if (!ctx->policy_route_update)
1345 return UPDWALK_CONTINUE;
1346
1347 /* If nothing has changed, return after updating "config" */
1348 if (!changed && !def_changed)
1349 return UPDWALK_CONTINUE;
1350
1351 /*
1352 * If something has changed, at the beginning of a route-map
1353 * modification
1354 * event, mark each subgroup's needs-refresh bit. For one, it signals to
1355 * whoever that the subgroup needs a refresh. Second, it prevents
1356 * premature
1357 * merge of this subgroup with another before a complete (outbound)
1358 * refresh.
1359 */
1360 if (ctx->policy_event_start_flag) {
1361 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
1362 update_subgroup_set_needs_refresh(subgrp, 1);
1363 }
1364 return UPDWALK_CONTINUE;
1365 }
1366
1367 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
1368 if (changed) {
1369 if (bgp_debug_update(NULL, NULL, updgrp, 0))
1370 zlog_debug(
1371 "u%" PRIu64 ":s%" PRIu64
1372 " announcing routes upon policy %s (type %d) change",
1373 updgrp->id, subgrp->id,
1374 ctx->policy_name, ctx->policy_type);
1375 subgroup_announce_route(subgrp);
1376 }
1377 if (def_changed) {
1378 if (bgp_debug_update(NULL, NULL, updgrp, 0))
1379 zlog_debug(
1380 "u%" PRIu64 ":s%" PRIu64
1381 " announcing default upon default routemap %s change",
1382 updgrp->id, subgrp->id,
1383 ctx->policy_name);
1384 subgroup_default_originate(subgrp, 0);
1385 }
1386 update_subgroup_set_needs_refresh(subgrp, 0);
1387 }
1388 return UPDWALK_CONTINUE;
1389 }
1390
1391 static int update_group_walkcb(struct hash_backet *backet, void *arg)
1392 {
1393 struct update_group *updgrp = backet->data;
1394 struct updwalk_context *wctx = arg;
1395 int ret = (*wctx->cb)(updgrp, wctx->context);
1396 return ret;
1397 }
1398
1399 static int update_group_periodic_merge_walkcb(struct update_group *updgrp,
1400 void *arg)
1401 {
1402 struct update_subgroup *subgrp;
1403 struct update_subgroup *tmp_subgrp;
1404 const char *reason = arg;
1405
1406 UPDGRP_FOREACH_SUBGRP_SAFE (updgrp, subgrp, tmp_subgrp)
1407 update_subgroup_check_merge(subgrp, reason);
1408 return UPDWALK_CONTINUE;
1409 }
1410
1411 /********************
1412 * PUBLIC FUNCTIONS
1413 ********************/
1414
1415 /*
1416 * trigger function when a policy (route-map/filter-list/prefix-list/
1417 * distribute-list etc.) content changes. Go through all the
1418 * update groups and process the change.
1419 *
1420 * bgp: the bgp instance
1421 * ptype: the type of policy that got modified, see bgpd.h
1422 * pname: name of the policy
1423 * route_update: flag to control if an automatic update generation should
1424 * occur
1425 * start_event: flag that indicates if it's the beginning of the change.
1426 * Esp. when the user is changing the content interactively
1427 * over multiple statements. Useful to set dirty flag on
1428 * update groups.
1429 */
1430 void update_group_policy_update(struct bgp *bgp, bgp_policy_type_e ptype,
1431 const char *pname, int route_update,
1432 int start_event)
1433 {
1434 struct updwalk_context ctx;
1435
1436 memset(&ctx, 0, sizeof(ctx));
1437 ctx.policy_type = ptype;
1438 ctx.policy_name = pname;
1439 ctx.policy_route_update = route_update;
1440 ctx.policy_event_start_flag = start_event;
1441 ctx.flags = 0;
1442
1443 update_group_walk(bgp, updgrp_policy_update_walkcb, &ctx);
1444 }
1445
1446 /*
1447 * update_subgroup_split_peer
1448 *
1449 * Ensure that the given peer is in a subgroup of its own in the
1450 * specified update group.
1451 */
1452 void update_subgroup_split_peer(struct peer_af *paf,
1453 struct update_group *updgrp)
1454 {
1455 struct update_subgroup *old_subgrp, *subgrp;
1456 uint64_t old_id;
1457
1458
1459 old_subgrp = paf->subgroup;
1460
1461 if (!updgrp)
1462 updgrp = old_subgrp->update_group;
1463
1464 /*
1465 * If the peer is alone in its subgroup, reuse the existing
1466 * subgroup.
1467 */
1468 if (old_subgrp->peer_count == 1) {
1469 if (updgrp == old_subgrp->update_group)
1470 return;
1471
1472 subgrp = old_subgrp;
1473 old_id = old_subgrp->update_group->id;
1474
1475 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
1476 UPDGRP_PEER_DBG_DIS(old_subgrp->update_group);
1477 }
1478
1479 update_group_remove_subgroup(old_subgrp->update_group,
1480 old_subgrp);
1481 update_group_add_subgroup(updgrp, subgrp);
1482
1483 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
1484 UPDGRP_PEER_DBG_EN(updgrp);
1485 }
1486 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1487 zlog_debug("u%" PRIu64 ":s%" PRIu64
1488 " peer %s moved to u%" PRIu64 ":s%" PRIu64,
1489 old_id, subgrp->id, paf->peer->host,
1490 updgrp->id, subgrp->id);
1491
1492 /*
1493 * The state of the subgroup (adj_out, advs, packet queue etc)
1494 * is consistent internally, but may not be identical to other
1495 * subgroups in the new update group even if the version number
1496 * matches up. Make sure a full refresh is done before the
1497 * subgroup is merged with another.
1498 */
1499 update_subgroup_set_needs_refresh(subgrp, 1);
1500
1501 SUBGRP_INCR_STAT(subgrp, updgrp_switch_events);
1502 return;
1503 }
1504
1505 /*
1506 * Create a new subgroup under the specified update group, and copy
1507 * over relevant state to it.
1508 */
1509 subgrp = update_subgroup_create(updgrp);
1510 update_subgroup_inherit_info(subgrp, old_subgrp);
1511
1512 subgrp->split_from.update_group_id = old_subgrp->update_group->id;
1513 subgrp->split_from.subgroup_id = old_subgrp->id;
1514
1515 /*
1516 * Copy out relevant state from the old subgroup.
1517 */
1518 update_subgroup_copy_adj_out(paf->subgroup, subgrp);
1519 update_subgroup_copy_packets(subgrp, paf->next_pkt_to_send);
1520
1521 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1522 zlog_debug("u%" PRIu64 ":s%" PRIu64
1523 " peer %s split and moved into u%" PRIu64
1524 ":s%" PRIu64,
1525 paf->subgroup->update_group->id, paf->subgroup->id,
1526 paf->peer->host, updgrp->id, subgrp->id);
1527
1528 SUBGRP_INCR_STAT(paf->subgroup, split_events);
1529
1530 /*
1531 * Since queued advs were left behind, this new subgroup needs a
1532 * refresh.
1533 */
1534 update_subgroup_set_needs_refresh(subgrp, 1);
1535
1536 /*
1537 * Remove peer from old subgroup, and add it to the new one.
1538 */
1539 update_subgroup_remove_peer(paf->subgroup, paf);
1540
1541 update_subgroup_add_peer(subgrp, paf, 1);
1542 }
1543
1544 void update_bgp_group_init(struct bgp *bgp)
1545 {
1546 int afid;
1547
1548 AF_FOREACH (afid)
1549 bgp->update_groups[afid] =
1550 hash_create(updgrp_hash_key_make, updgrp_hash_cmp,
1551 "BGP Update Group Hash");
1552 }
1553
1554 void update_bgp_group_free(struct bgp *bgp)
1555 {
1556 int afid;
1557
1558 AF_FOREACH (afid) {
1559 if (bgp->update_groups[afid]) {
1560 hash_free(bgp->update_groups[afid]);
1561 bgp->update_groups[afid] = NULL;
1562 }
1563 }
1564 }
1565
1566 void update_group_show(struct bgp *bgp, afi_t afi, safi_t safi, struct vty *vty,
1567 uint64_t subgrp_id)
1568 {
1569 struct updwalk_context ctx;
1570 memset(&ctx, 0, sizeof(ctx));
1571 ctx.vty = vty;
1572 ctx.subgrp_id = subgrp_id;
1573
1574 update_group_af_walk(bgp, afi, safi, update_group_show_walkcb, &ctx);
1575 }
1576
1577 /*
1578 * update_group_show_stats
1579 *
1580 * Show global statistics about update groups.
1581 */
1582 void update_group_show_stats(struct bgp *bgp, struct vty *vty)
1583 {
1584 vty_out(vty, "Update groups created: %u\n",
1585 bgp->update_group_stats.updgrps_created);
1586 vty_out(vty, "Update groups deleted: %u\n",
1587 bgp->update_group_stats.updgrps_deleted);
1588 vty_out(vty, "Update subgroups created: %u\n",
1589 bgp->update_group_stats.subgrps_created);
1590 vty_out(vty, "Update subgroups deleted: %u\n",
1591 bgp->update_group_stats.subgrps_deleted);
1592 vty_out(vty, "Join events: %u\n", bgp->update_group_stats.join_events);
1593 vty_out(vty, "Prune events: %u\n",
1594 bgp->update_group_stats.prune_events);
1595 vty_out(vty, "Merge events: %u\n",
1596 bgp->update_group_stats.merge_events);
1597 vty_out(vty, "Split events: %u\n",
1598 bgp->update_group_stats.split_events);
1599 vty_out(vty, "Update group switch events: %u\n",
1600 bgp->update_group_stats.updgrp_switch_events);
1601 vty_out(vty, "Peer route refreshes combined: %u\n",
1602 bgp->update_group_stats.peer_refreshes_combined);
1603 vty_out(vty, "Merge checks triggered: %u\n",
1604 bgp->update_group_stats.merge_checks_triggered);
1605 }
1606
1607 /*
1608 * update_group_adjust_peer
1609 */
1610 void update_group_adjust_peer(struct peer_af *paf)
1611 {
1612 struct update_group *updgrp;
1613 struct update_subgroup *subgrp, *old_subgrp;
1614 struct peer *peer;
1615
1616 if (!paf)
1617 return;
1618
1619 peer = PAF_PEER(paf);
1620 if (!peer_established(peer)) {
1621 return;
1622 }
1623
1624 if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE)) {
1625 return;
1626 }
1627
1628 if (!peer->afc_nego[paf->afi][paf->safi]) {
1629 return;
1630 }
1631
1632 updgrp = update_group_find(paf);
1633 if (!updgrp) {
1634 updgrp = update_group_create(paf);
1635 if (!updgrp) {
1636 flog_err(EC_BGP_UPDGRP_CREATE,
1637 "couldn't create update group for peer %s",
1638 paf->peer->host);
1639 return;
1640 }
1641 }
1642
1643 old_subgrp = paf->subgroup;
1644
1645 if (old_subgrp) {
1646
1647 /*
1648 * If the update group of the peer is unchanged, the peer can
1649 * stay
1650 * in its existing subgroup and we're done.
1651 */
1652 if (old_subgrp->update_group == updgrp)
1653 return;
1654
1655 /*
1656 * The peer is switching between update groups. Put it in its
1657 * own subgroup under the new update group.
1658 */
1659 update_subgroup_split_peer(paf, updgrp);
1660 return;
1661 }
1662
1663 subgrp = update_subgroup_find(updgrp, paf);
1664 if (!subgrp) {
1665 subgrp = update_subgroup_create(updgrp);
1666 if (!subgrp)
1667 return;
1668 }
1669
1670 update_subgroup_add_peer(subgrp, paf, 1);
1671 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1672 zlog_debug("u%" PRIu64 ":s%" PRIu64 " add peer %s", updgrp->id,
1673 subgrp->id, paf->peer->host);
1674
1675 return;
1676 }
1677
1678 int update_group_adjust_soloness(struct peer *peer, int set)
1679 {
1680 struct peer_group *group;
1681 struct listnode *node, *nnode;
1682
1683 if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
1684 peer_lonesoul_or_not(peer, set);
1685 if (peer->status == Established)
1686 bgp_announce_route_all(peer);
1687 } else {
1688 group = peer->group;
1689 for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
1690 peer_lonesoul_or_not(peer, set);
1691 if (peer->status == Established)
1692 bgp_announce_route_all(peer);
1693 }
1694 }
1695 return 0;
1696 }
1697
1698 /*
1699 * update_subgroup_rib
1700 */
1701 struct bgp_table *update_subgroup_rib(struct update_subgroup *subgrp)
1702 {
1703 struct bgp *bgp;
1704
1705 bgp = SUBGRP_INST(subgrp);
1706 if (!bgp)
1707 return NULL;
1708
1709 return bgp->rib[SUBGRP_AFI(subgrp)][SUBGRP_SAFI(subgrp)];
1710 }
1711
1712 void update_group_af_walk(struct bgp *bgp, afi_t afi, safi_t safi,
1713 updgrp_walkcb cb, void *ctx)
1714 {
1715 struct updwalk_context wctx;
1716 int afid;
1717
1718 if (!bgp)
1719 return;
1720 afid = afindex(afi, safi);
1721 if (afid >= BGP_AF_MAX)
1722 return;
1723
1724 memset(&wctx, 0, sizeof(wctx));
1725 wctx.cb = cb;
1726 wctx.context = ctx;
1727
1728 if (bgp->update_groups[afid])
1729 hash_walk(bgp->update_groups[afid], update_group_walkcb, &wctx);
1730 }
1731
1732 void update_group_walk(struct bgp *bgp, updgrp_walkcb cb, void *ctx)
1733 {
1734 afi_t afi;
1735 safi_t safi;
1736
1737 FOREACH_AFI_SAFI (afi, safi) {
1738 update_group_af_walk(bgp, afi, safi, cb, ctx);
1739 }
1740 }
1741
1742 void update_group_periodic_merge(struct bgp *bgp)
1743 {
1744 char reason[] = "periodic merge check";
1745
1746 update_group_walk(bgp, update_group_periodic_merge_walkcb,
1747 (void *)reason);
1748 }
1749
1750 static int
1751 update_group_default_originate_route_map_walkcb(struct update_group *updgrp,
1752 void *arg)
1753 {
1754 struct update_subgroup *subgrp;
1755 struct peer *peer;
1756 afi_t afi;
1757 safi_t safi;
1758
1759 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
1760 peer = SUBGRP_PEER(subgrp);
1761 afi = SUBGRP_AFI(subgrp);
1762 safi = SUBGRP_SAFI(subgrp);
1763
1764 if (peer->default_rmap[afi][safi].name) {
1765 subgroup_default_originate(subgrp, 0);
1766 }
1767 }
1768
1769 return UPDWALK_CONTINUE;
1770 }
1771
1772 int update_group_refresh_default_originate_route_map(struct thread *thread)
1773 {
1774 struct bgp *bgp;
1775 char reason[] = "refresh default-originate route-map";
1776
1777 bgp = THREAD_ARG(thread);
1778 update_group_walk(bgp, update_group_default_originate_route_map_walkcb,
1779 reason);
1780 THREAD_TIMER_OFF(bgp->t_rmap_def_originate_eval);
1781 bgp_unlock(bgp);
1782
1783 return (0);
1784 }
1785
1786 /*
1787 * peer_af_announce_route
1788 *
1789 * Refreshes routes out to a peer_af immediately.
1790 *
1791 * If the combine parameter is TRUE, then this function will try to
1792 * gather other peers in the subgroup for which a route announcement
1793 * is pending and efficently announce routes to all of them.
1794 *
1795 * For now, the 'combine' option has an effect only if all peers in
1796 * the subgroup have a route announcement pending.
1797 */
1798 void peer_af_announce_route(struct peer_af *paf, int combine)
1799 {
1800 struct update_subgroup *subgrp;
1801 struct peer_af *cur_paf;
1802 int all_pending;
1803
1804 subgrp = paf->subgroup;
1805 all_pending = 0;
1806
1807 if (combine) {
1808 /*
1809 * If there are other peers in the old subgroup that also need
1810 * routes to be announced, pull them into the peer's new
1811 * subgroup.
1812 * Combine route announcement with other peers if possible.
1813 *
1814 * For now, we combine only if all peers in the subgroup have an
1815 * announcement pending.
1816 */
1817 all_pending = 1;
1818
1819 SUBGRP_FOREACH_PEER (subgrp, cur_paf) {
1820 if (cur_paf == paf)
1821 continue;
1822
1823 if (cur_paf->t_announce_route)
1824 continue;
1825
1826 all_pending = 0;
1827 break;
1828 }
1829 }
1830 /*
1831 * Announce to the peer alone if we were not asked to combine peers,
1832 * or if some peers don't have a route annoucement pending.
1833 */
1834 if (!combine || !all_pending) {
1835 update_subgroup_split_peer(paf, NULL);
1836 if (!paf->subgroup)
1837 return;
1838
1839 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
1840 zlog_debug("u%" PRIu64 ":s%" PRIu64
1841 " %s announcing routes",
1842 subgrp->update_group->id, subgrp->id,
1843 paf->peer->host);
1844
1845 subgroup_announce_route(paf->subgroup);
1846 return;
1847 }
1848
1849 /*
1850 * We will announce routes the entire subgroup.
1851 *
1852 * First stop refresh timers on all the other peers.
1853 */
1854 SUBGRP_FOREACH_PEER (subgrp, cur_paf) {
1855 if (cur_paf == paf)
1856 continue;
1857
1858 bgp_stop_announce_route_timer(cur_paf);
1859 }
1860
1861 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
1862 zlog_debug("u%" PRIu64 ":s%" PRIu64
1863 " announcing routes to %s, combined into %d peers",
1864 subgrp->update_group->id, subgrp->id,
1865 paf->peer->host, subgrp->peer_count);
1866
1867 subgroup_announce_route(subgrp);
1868
1869 SUBGRP_INCR_STAT_BY(subgrp, peer_refreshes_combined,
1870 subgrp->peer_count - 1);
1871 }
1872
1873 void subgroup_trigger_write(struct update_subgroup *subgrp)
1874 {
1875 struct peer_af *paf;
1876
1877 /*
1878 * For each peer in the subgroup, schedule a job to pull packets from
1879 * the subgroup output queue into their own output queue. This action
1880 * will trigger a write job on the I/O thread.
1881 */
1882 SUBGRP_FOREACH_PEER (subgrp, paf)
1883 if (paf->peer->status == Established)
1884 thread_add_timer_msec(
1885 bm->master, bgp_generate_updgrp_packets,
1886 paf->peer, 0,
1887 &paf->peer->t_generate_updgrp_packets);
1888 }
1889
1890 int update_group_clear_update_dbg(struct update_group *updgrp, void *arg)
1891 {
1892 UPDGRP_PEER_DBG_OFF(updgrp);
1893 return UPDWALK_CONTINUE;
1894 }
1895
1896 /* Return true if we should addpath encode NLRI to this peer */
1897 int bgp_addpath_encode_tx(struct peer *peer, afi_t afi, safi_t safi)
1898 {
1899 return (CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ADDPATH_AF_TX_ADV)
1900 && CHECK_FLAG(peer->af_cap[afi][safi],
1901 PEER_CAP_ADDPATH_AF_RX_RCV));
1902 }
1903
1904 /*
1905 * Return true if this is a path we should advertise due to a
1906 * configured addpath-tx knob
1907 */
1908 int bgp_addpath_tx_path(struct peer *peer, afi_t afi, safi_t safi,
1909 struct bgp_path_info *pi)
1910 {
1911 if (CHECK_FLAG(peer->af_flags[afi][safi],
1912 PEER_FLAG_ADDPATH_TX_ALL_PATHS))
1913 return 1;
1914
1915 if (CHECK_FLAG(peer->af_flags[afi][safi],
1916 PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS)
1917 && CHECK_FLAG(pi->flags, BGP_PATH_DMED_SELECTED))
1918 return 1;
1919
1920 return 0;
1921 }