]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_updgrp.c
Merge pull request #7948 from Jafaral/strongswan
[mirror_frr.git] / bgpd / bgp_updgrp.c
1 /**
2 * bgp_updgrp.c: BGP update group structures
3 *
4 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
5 *
6 * @author Avneesh Sachdev <avneesh@sproute.net>
7 * @author Rajesh Varadarajan <rajesh@sproute.net>
8 * @author Pradosh Mohapatra <pradosh@sproute.net>
9 *
10 * This file is part of GNU Zebra.
11 *
12 * GNU Zebra is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2, or (at your option) any
15 * later version.
16 *
17 * GNU Zebra is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; see the file COPYING; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27 #include <zebra.h>
28
29 #include "prefix.h"
30 #include "thread.h"
31 #include "buffer.h"
32 #include "stream.h"
33 #include "command.h"
34 #include "sockunion.h"
35 #include "network.h"
36 #include "memory.h"
37 #include "filter.h"
38 #include "routemap.h"
39 #include "log.h"
40 #include "plist.h"
41 #include "linklist.h"
42 #include "workqueue.h"
43 #include "hash.h"
44 #include "jhash.h"
45 #include "queue.h"
46
47 #include "bgpd/bgpd.h"
48 #include "bgpd/bgp_table.h"
49 #include "bgpd/bgp_debug.h"
50 #include "bgpd/bgp_errors.h"
51 #include "bgpd/bgp_fsm.h"
52 #include "bgpd/bgp_advertise.h"
53 #include "bgpd/bgp_packet.h"
54 #include "bgpd/bgp_updgrp.h"
55 #include "bgpd/bgp_route.h"
56 #include "bgpd/bgp_filter.h"
57 #include "bgpd/bgp_io.h"
58
59 /********************
60 * PRIVATE FUNCTIONS
61 ********************/
62
63 /**
64 * assign a unique ID to update group and subgroup. Mostly for display/
65 * debugging purposes. It's a 64-bit space - used leisurely without a
66 * worry about its wrapping and about filling gaps. While at it, timestamp
67 * the creation.
68 */
69 static void update_group_checkin(struct update_group *updgrp)
70 {
71 updgrp->id = ++bm->updgrp_idspace;
72 updgrp->uptime = bgp_clock();
73 }
74
75 static void update_subgroup_checkin(struct update_subgroup *subgrp,
76 struct update_group *updgrp)
77 {
78 subgrp->id = ++bm->subgrp_idspace;
79 subgrp->uptime = bgp_clock();
80 }
81
82 static void sync_init(struct update_subgroup *subgrp)
83 {
84 subgrp->sync =
85 XCALLOC(MTYPE_BGP_SYNCHRONISE, sizeof(struct bgp_synchronize));
86 bgp_adv_fifo_init(&subgrp->sync->update);
87 bgp_adv_fifo_init(&subgrp->sync->withdraw);
88 bgp_adv_fifo_init(&subgrp->sync->withdraw_low);
89 subgrp->hash =
90 hash_create(baa_hash_key, baa_hash_cmp, "BGP SubGroup Hash");
91
92 /* We use a larger buffer for subgrp->work in the event that:
93 * - We RX a BGP_UPDATE where the attributes alone are just
94 * under BGP_MAX_PACKET_SIZE
95 * - The user configures an outbound route-map that does many as-path
96 * prepends or adds many communities. At most they can have
97 * CMD_ARGC_MAX
98 * args in a route-map so there is a finite limit on how large they
99 * can
100 * make the attributes.
101 *
102 * Having a buffer with BGP_MAX_PACKET_SIZE_OVERFLOW allows us to avoid
103 * bounds
104 * checking for every single attribute as we construct an UPDATE.
105 */
106 subgrp->work =
107 stream_new(BGP_MAX_PACKET_SIZE + BGP_MAX_PACKET_SIZE_OVERFLOW);
108 subgrp->scratch = stream_new(BGP_MAX_PACKET_SIZE);
109 }
110
111 static void sync_delete(struct update_subgroup *subgrp)
112 {
113 XFREE(MTYPE_BGP_SYNCHRONISE, subgrp->sync);
114 if (subgrp->hash)
115 hash_free(subgrp->hash);
116 subgrp->hash = NULL;
117 if (subgrp->work)
118 stream_free(subgrp->work);
119 subgrp->work = NULL;
120 if (subgrp->scratch)
121 stream_free(subgrp->scratch);
122 subgrp->scratch = NULL;
123 }
124
125 /**
126 * conf_copy
127 *
128 * copy only those fields that are relevant to update group match
129 */
130 static void conf_copy(struct peer *dst, struct peer *src, afi_t afi,
131 safi_t safi)
132 {
133 struct bgp_filter *srcfilter;
134 struct bgp_filter *dstfilter;
135
136 srcfilter = &src->filter[afi][safi];
137 dstfilter = &dst->filter[afi][safi];
138
139 dst->bgp = src->bgp;
140 dst->sort = src->sort;
141 dst->as = src->as;
142 dst->v_routeadv = src->v_routeadv;
143 dst->flags = src->flags;
144 dst->af_flags[afi][safi] = src->af_flags[afi][safi];
145 dst->pmax_out[afi][safi] = src->pmax_out[afi][safi];
146 XFREE(MTYPE_BGP_PEER_HOST, dst->host);
147
148 dst->host = XSTRDUP(MTYPE_BGP_PEER_HOST, src->host);
149 dst->cap = src->cap;
150 dst->af_cap[afi][safi] = src->af_cap[afi][safi];
151 dst->afc_nego[afi][safi] = src->afc_nego[afi][safi];
152 dst->orf_plist[afi][safi] = src->orf_plist[afi][safi];
153 dst->addpath_type[afi][safi] = src->addpath_type[afi][safi];
154 dst->local_as = src->local_as;
155 dst->change_local_as = src->change_local_as;
156 dst->shared_network = src->shared_network;
157 memcpy(&(dst->nexthop), &(src->nexthop), sizeof(struct bgp_nexthop));
158
159 dst->group = src->group;
160
161 if (src->default_rmap[afi][safi].name) {
162 dst->default_rmap[afi][safi].name =
163 XSTRDUP(MTYPE_ROUTE_MAP_NAME,
164 src->default_rmap[afi][safi].name);
165 dst->default_rmap[afi][safi].map =
166 src->default_rmap[afi][safi].map;
167 }
168
169 if (DISTRIBUTE_OUT_NAME(srcfilter)) {
170 DISTRIBUTE_OUT_NAME(dstfilter) = XSTRDUP(
171 MTYPE_BGP_FILTER_NAME, DISTRIBUTE_OUT_NAME(srcfilter));
172 DISTRIBUTE_OUT(dstfilter) = DISTRIBUTE_OUT(srcfilter);
173 }
174
175 if (PREFIX_LIST_OUT_NAME(srcfilter)) {
176 PREFIX_LIST_OUT_NAME(dstfilter) = XSTRDUP(
177 MTYPE_BGP_FILTER_NAME, PREFIX_LIST_OUT_NAME(srcfilter));
178 PREFIX_LIST_OUT(dstfilter) = PREFIX_LIST_OUT(srcfilter);
179 }
180
181 if (FILTER_LIST_OUT_NAME(srcfilter)) {
182 FILTER_LIST_OUT_NAME(dstfilter) = XSTRDUP(
183 MTYPE_BGP_FILTER_NAME, FILTER_LIST_OUT_NAME(srcfilter));
184 FILTER_LIST_OUT(dstfilter) = FILTER_LIST_OUT(srcfilter);
185 }
186
187 if (ROUTE_MAP_OUT_NAME(srcfilter)) {
188 ROUTE_MAP_OUT_NAME(dstfilter) = XSTRDUP(
189 MTYPE_BGP_FILTER_NAME, ROUTE_MAP_OUT_NAME(srcfilter));
190 ROUTE_MAP_OUT(dstfilter) = ROUTE_MAP_OUT(srcfilter);
191 }
192
193 if (UNSUPPRESS_MAP_NAME(srcfilter)) {
194 UNSUPPRESS_MAP_NAME(dstfilter) = XSTRDUP(
195 MTYPE_BGP_FILTER_NAME, UNSUPPRESS_MAP_NAME(srcfilter));
196 UNSUPPRESS_MAP(dstfilter) = UNSUPPRESS_MAP(srcfilter);
197 }
198
199 if (ADVERTISE_MAP_NAME(srcfilter)) {
200 ADVERTISE_MAP_NAME(dstfilter) = XSTRDUP(
201 MTYPE_BGP_FILTER_NAME, ADVERTISE_MAP_NAME(srcfilter));
202 ADVERTISE_MAP(dstfilter) = ADVERTISE_MAP(srcfilter);
203 ADVERTISE_CONDITION(dstfilter) = ADVERTISE_CONDITION(srcfilter);
204 }
205
206 if (CONDITION_MAP_NAME(srcfilter)) {
207 CONDITION_MAP_NAME(dstfilter) = XSTRDUP(
208 MTYPE_BGP_FILTER_NAME, CONDITION_MAP_NAME(srcfilter));
209 CONDITION_MAP(dstfilter) = CONDITION_MAP(srcfilter);
210 }
211 }
212
213 /**
214 * since we did a bunch of XSTRDUP's in conf_copy, time to free them up
215 */
216 static void conf_release(struct peer *src, afi_t afi, safi_t safi)
217 {
218 struct bgp_filter *srcfilter;
219
220 srcfilter = &src->filter[afi][safi];
221
222 XFREE(MTYPE_ROUTE_MAP_NAME, src->default_rmap[afi][safi].name);
223
224 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->dlist[FILTER_OUT].name);
225
226 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->plist[FILTER_OUT].name);
227
228 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->aslist[FILTER_OUT].name);
229
230 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->map[RMAP_OUT].name);
231
232 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->usmap.name);
233
234 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->advmap.aname);
235
236 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->advmap.cname);
237
238 XFREE(MTYPE_BGP_PEER_HOST, src->host);
239 }
240
241 static void peer2_updgrp_copy(struct update_group *updgrp, struct peer_af *paf)
242 {
243 struct peer *src;
244 struct peer *dst;
245
246 if (!updgrp || !paf)
247 return;
248
249 src = paf->peer;
250 dst = updgrp->conf;
251 if (!src || !dst)
252 return;
253
254 updgrp->afi = paf->afi;
255 updgrp->safi = paf->safi;
256 updgrp->afid = paf->afid;
257 updgrp->bgp = src->bgp;
258
259 conf_copy(dst, src, paf->afi, paf->safi);
260 }
261
262 /**
263 * auxiliary functions to maintain the hash table.
264 * - updgrp_hash_alloc - to create a new entry, passed to hash_get
265 * - updgrp_hash_key_make - makes the key for update group search
266 * - updgrp_hash_cmp - compare two update groups.
267 */
268 static void *updgrp_hash_alloc(void *p)
269 {
270 struct update_group *updgrp;
271 const struct update_group *in;
272
273 in = (const struct update_group *)p;
274 updgrp = XCALLOC(MTYPE_BGP_UPDGRP, sizeof(struct update_group));
275 memcpy(updgrp, in, sizeof(struct update_group));
276 updgrp->conf = XCALLOC(MTYPE_BGP_PEER, sizeof(struct peer));
277 conf_copy(updgrp->conf, in->conf, in->afi, in->safi);
278 return updgrp;
279 }
280
281 /**
282 * The hash value for a peer is computed from the following variables:
283 * v = f(
284 * 1. IBGP (1) or EBGP (2)
285 * 2. FLAGS based on configuration:
286 * LOCAL_AS_NO_PREPEND
287 * LOCAL_AS_REPLACE_AS
288 * 3. AF_FLAGS based on configuration:
289 * Refer to definition in bgp_updgrp.h
290 * 4. (AF-independent) Capability flags:
291 * AS4_RCV capability
292 * 5. (AF-dependent) Capability flags:
293 * ORF_PREFIX_SM_RCV (peer can send prefix ORF)
294 * 6. MRAI
295 * 7. peer-group name
296 * 8. Outbound route-map name (neighbor route-map <> out)
297 * 9. Outbound distribute-list name (neighbor distribute-list <> out)
298 * 10. Outbound prefix-list name (neighbor prefix-list <> out)
299 * 11. Outbound as-list name (neighbor filter-list <> out)
300 * 12. Unsuppress map name (neighbor unsuppress-map <>)
301 * 13. default rmap name (neighbor default-originate route-map <>)
302 * 14. encoding both global and link-local nexthop?
303 * 15. If peer is configured to be a lonesoul, peer ip address
304 * 16. Local-as should match, if configured.
305 * )
306 */
307 static unsigned int updgrp_hash_key_make(const void *p)
308 {
309 const struct update_group *updgrp;
310 const struct peer *peer;
311 const struct bgp_filter *filter;
312 uint32_t flags;
313 uint32_t key;
314 afi_t afi;
315 safi_t safi;
316
317 #define SEED1 999331
318 #define SEED2 2147483647
319
320 updgrp = p;
321 peer = updgrp->conf;
322 afi = updgrp->afi;
323 safi = updgrp->safi;
324 flags = peer->af_flags[afi][safi];
325 filter = &peer->filter[afi][safi];
326
327 key = 0;
328
329 key = jhash_1word(peer->sort, key); /* EBGP or IBGP */
330 key = jhash_1word((peer->flags & PEER_UPDGRP_FLAGS), key);
331 key = jhash_1word((flags & PEER_UPDGRP_AF_FLAGS), key);
332 key = jhash_1word((uint32_t)peer->addpath_type[afi][safi], key);
333 key = jhash_1word((peer->cap & PEER_UPDGRP_CAP_FLAGS), key);
334 key = jhash_1word((peer->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS),
335 key);
336 key = jhash_1word(peer->v_routeadv, key);
337 key = jhash_1word(peer->change_local_as, key);
338
339 if (peer->group)
340 key = jhash_1word(jhash(peer->group->name,
341 strlen(peer->group->name), SEED1),
342 key);
343
344 if (filter->map[RMAP_OUT].name)
345 key = jhash_1word(jhash(filter->map[RMAP_OUT].name,
346 strlen(filter->map[RMAP_OUT].name),
347 SEED1),
348 key);
349
350 if (filter->dlist[FILTER_OUT].name)
351 key = jhash_1word(jhash(filter->dlist[FILTER_OUT].name,
352 strlen(filter->dlist[FILTER_OUT].name),
353 SEED1),
354 key);
355
356 if (filter->plist[FILTER_OUT].name)
357 key = jhash_1word(jhash(filter->plist[FILTER_OUT].name,
358 strlen(filter->plist[FILTER_OUT].name),
359 SEED1),
360 key);
361
362 if (filter->aslist[FILTER_OUT].name)
363 key = jhash_1word(jhash(filter->aslist[FILTER_OUT].name,
364 strlen(filter->aslist[FILTER_OUT].name),
365 SEED1),
366 key);
367
368 if (filter->usmap.name)
369 key = jhash_1word(jhash(filter->usmap.name,
370 strlen(filter->usmap.name), SEED1),
371 key);
372
373 if (filter->advmap.aname)
374 key = jhash_1word(jhash(filter->advmap.aname,
375 strlen(filter->advmap.aname), SEED1),
376 key);
377
378 if (peer->default_rmap[afi][safi].name)
379 key = jhash_1word(
380 jhash(peer->default_rmap[afi][safi].name,
381 strlen(peer->default_rmap[afi][safi].name),
382 SEED1),
383 key);
384
385 /* If peer is on a shared network and is exchanging IPv6 prefixes,
386 * it needs to include link-local address. That's different from
387 * non-shared-network peers (nexthop encoded with 32 bytes vs 16
388 * bytes). We create different update groups to take care of that.
389 */
390 key = jhash_1word(
391 (peer->shared_network && peer_afi_active_nego(peer, AFI_IP6)),
392 key);
393
394 /*
395 * There are certain peers that must get their own update-group:
396 * - lonesoul peers
397 * - peers that negotiated ORF
398 * - maximum-prefix-out is set
399 */
400 if (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL)
401 || CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV)
402 || CHECK_FLAG(peer->af_cap[afi][safi],
403 PEER_CAP_ORF_PREFIX_SM_OLD_RCV)
404 || CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_OUT))
405 key = jhash_1word(jhash(peer->host, strlen(peer->host), SEED2),
406 key);
407
408 return key;
409 }
410
411 static bool updgrp_hash_cmp(const void *p1, const void *p2)
412 {
413 const struct update_group *grp1;
414 const struct update_group *grp2;
415 const struct peer *pe1;
416 const struct peer *pe2;
417 uint32_t flags1;
418 uint32_t flags2;
419 const struct bgp_filter *fl1;
420 const struct bgp_filter *fl2;
421 afi_t afi;
422 safi_t safi;
423
424 if (!p1 || !p2)
425 return false;
426
427 grp1 = p1;
428 grp2 = p2;
429 pe1 = grp1->conf;
430 pe2 = grp2->conf;
431 afi = grp1->afi;
432 safi = grp1->safi;
433 flags1 = pe1->af_flags[afi][safi];
434 flags2 = pe2->af_flags[afi][safi];
435 fl1 = &pe1->filter[afi][safi];
436 fl2 = &pe2->filter[afi][safi];
437
438 /* put EBGP and IBGP peers in different update groups */
439 if (pe1->sort != pe2->sort)
440 return false;
441
442 /* check peer flags */
443 if ((pe1->flags & PEER_UPDGRP_FLAGS)
444 != (pe2->flags & PEER_UPDGRP_FLAGS))
445 return false;
446
447 /* If there is 'local-as' configured, it should match. */
448 if (pe1->change_local_as != pe2->change_local_as)
449 return false;
450
451 /* flags like route reflector client */
452 if ((flags1 & PEER_UPDGRP_AF_FLAGS) != (flags2 & PEER_UPDGRP_AF_FLAGS))
453 return false;
454
455 if (pe1->addpath_type[afi][safi] != pe2->addpath_type[afi][safi])
456 return false;
457
458 if ((pe1->cap & PEER_UPDGRP_CAP_FLAGS)
459 != (pe2->cap & PEER_UPDGRP_CAP_FLAGS))
460 return false;
461
462 if ((pe1->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS)
463 != (pe2->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS))
464 return false;
465
466 if (pe1->v_routeadv != pe2->v_routeadv)
467 return false;
468
469 if (pe1->group != pe2->group)
470 return false;
471
472 /* route-map names should be the same */
473 if ((fl1->map[RMAP_OUT].name && !fl2->map[RMAP_OUT].name)
474 || (!fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name)
475 || (fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name
476 && strcmp(fl1->map[RMAP_OUT].name, fl2->map[RMAP_OUT].name)))
477 return false;
478
479 if ((fl1->dlist[FILTER_OUT].name && !fl2->dlist[FILTER_OUT].name)
480 || (!fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name)
481 || (fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name
482 && strcmp(fl1->dlist[FILTER_OUT].name,
483 fl2->dlist[FILTER_OUT].name)))
484 return false;
485
486 if ((fl1->plist[FILTER_OUT].name && !fl2->plist[FILTER_OUT].name)
487 || (!fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name)
488 || (fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name
489 && strcmp(fl1->plist[FILTER_OUT].name,
490 fl2->plist[FILTER_OUT].name)))
491 return false;
492
493 if ((fl1->aslist[FILTER_OUT].name && !fl2->aslist[FILTER_OUT].name)
494 || (!fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name)
495 || (fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name
496 && strcmp(fl1->aslist[FILTER_OUT].name,
497 fl2->aslist[FILTER_OUT].name)))
498 return false;
499
500 if ((fl1->usmap.name && !fl2->usmap.name)
501 || (!fl1->usmap.name && fl2->usmap.name)
502 || (fl1->usmap.name && fl2->usmap.name
503 && strcmp(fl1->usmap.name, fl2->usmap.name)))
504 return false;
505
506 if ((fl1->advmap.aname && !fl2->advmap.aname)
507 || (!fl1->advmap.aname && fl2->advmap.aname)
508 || (fl1->advmap.aname && fl2->advmap.aname
509 && strcmp(fl1->advmap.aname, fl2->advmap.aname)))
510 return false;
511
512 if ((pe1->default_rmap[afi][safi].name
513 && !pe2->default_rmap[afi][safi].name)
514 || (!pe1->default_rmap[afi][safi].name
515 && pe2->default_rmap[afi][safi].name)
516 || (pe1->default_rmap[afi][safi].name
517 && pe2->default_rmap[afi][safi].name
518 && strcmp(pe1->default_rmap[afi][safi].name,
519 pe2->default_rmap[afi][safi].name)))
520 return false;
521
522 if ((afi == AFI_IP6) && (pe1->shared_network != pe2->shared_network))
523 return false;
524
525 if ((CHECK_FLAG(pe1->flags, PEER_FLAG_LONESOUL)
526 || CHECK_FLAG(pe1->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV)
527 || CHECK_FLAG(pe1->af_cap[afi][safi],
528 PEER_CAP_ORF_PREFIX_SM_OLD_RCV))
529 && !sockunion_same(&pe1->su, &pe2->su))
530 return false;
531
532 return true;
533 }
534
535 static void peer_lonesoul_or_not(struct peer *peer, int set)
536 {
537 /* no change in status? */
538 if (set == (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL) > 0))
539 return;
540
541 if (set)
542 SET_FLAG(peer->flags, PEER_FLAG_LONESOUL);
543 else
544 UNSET_FLAG(peer->flags, PEER_FLAG_LONESOUL);
545
546 update_group_adjust_peer_afs(peer);
547 }
548
549 /*
550 * subgroup_total_packets_enqueued
551 *
552 * Returns the total number of packets enqueued to a subgroup.
553 */
554 static unsigned int
555 subgroup_total_packets_enqueued(struct update_subgroup *subgrp)
556 {
557 struct bpacket *pkt;
558
559 pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp));
560
561 return pkt->ver - 1;
562 }
563
564 static int update_group_show_walkcb(struct update_group *updgrp, void *arg)
565 {
566 struct updwalk_context *ctx = arg;
567 struct vty *vty;
568 struct update_subgroup *subgrp;
569 struct peer_af *paf;
570 struct bgp_filter *filter;
571 int match = 0;
572
573 if (!ctx)
574 return CMD_SUCCESS;
575
576 if (ctx->subgrp_id) {
577 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
578 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
579 continue;
580 else {
581 match = 1;
582 break;
583 }
584 }
585 } else {
586 match = 1;
587 }
588
589 if (!match) {
590 /* Since this routine is invoked from a walk, we cannot signal
591 * any */
592 /* error here, can only return. */
593 return CMD_SUCCESS;
594 }
595
596 vty = ctx->vty;
597
598 vty_out(vty, "Update-group %" PRIu64 ":\n", updgrp->id);
599 vty_out(vty, " Created: %s", timestamp_string(updgrp->uptime));
600 filter = &updgrp->conf->filter[updgrp->afi][updgrp->safi];
601 if (filter->map[RMAP_OUT].name)
602 vty_out(vty, " Outgoing route map: %s\n",
603 filter->map[RMAP_OUT].name);
604 vty_out(vty, " MRAI value (seconds): %d\n", updgrp->conf->v_routeadv);
605 if (updgrp->conf->change_local_as)
606 vty_out(vty, " Local AS %u%s%s\n",
607 updgrp->conf->change_local_as,
608 CHECK_FLAG(updgrp->conf->flags,
609 PEER_FLAG_LOCAL_AS_NO_PREPEND)
610 ? " no-prepend"
611 : "",
612 CHECK_FLAG(updgrp->conf->flags,
613 PEER_FLAG_LOCAL_AS_REPLACE_AS)
614 ? " replace-as"
615 : "");
616
617 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
618 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
619 continue;
620 vty_out(vty, "\n");
621 vty_out(vty, " Update-subgroup %" PRIu64 ":\n", subgrp->id);
622 vty_out(vty, " Created: %s",
623 timestamp_string(subgrp->uptime));
624
625 if (subgrp->split_from.update_group_id
626 || subgrp->split_from.subgroup_id) {
627 vty_out(vty, " Split from group id: %" PRIu64 "\n",
628 subgrp->split_from.update_group_id);
629 vty_out(vty,
630 " Split from subgroup id: %" PRIu64 "\n",
631 subgrp->split_from.subgroup_id);
632 }
633
634 vty_out(vty, " Join events: %u\n", subgrp->join_events);
635 vty_out(vty, " Prune events: %u\n", subgrp->prune_events);
636 vty_out(vty, " Merge events: %u\n", subgrp->merge_events);
637 vty_out(vty, " Split events: %u\n", subgrp->split_events);
638 vty_out(vty, " Update group switch events: %u\n",
639 subgrp->updgrp_switch_events);
640 vty_out(vty, " Peer refreshes combined: %u\n",
641 subgrp->peer_refreshes_combined);
642 vty_out(vty, " Merge checks triggered: %u\n",
643 subgrp->merge_checks_triggered);
644 vty_out(vty, " Coalesce Time: %u%s\n",
645 (UPDGRP_INST(subgrp->update_group))->coalesce_time,
646 subgrp->t_coalesce ? "(Running)" : "");
647 vty_out(vty, " Version: %" PRIu64 "\n", subgrp->version);
648 vty_out(vty, " Packet queue length: %d\n",
649 bpacket_queue_length(SUBGRP_PKTQ(subgrp)));
650 vty_out(vty, " Total packets enqueued: %u\n",
651 subgroup_total_packets_enqueued(subgrp));
652 vty_out(vty, " Packet queue high watermark: %d\n",
653 bpacket_queue_hwm_length(SUBGRP_PKTQ(subgrp)));
654 vty_out(vty, " Adj-out list count: %u\n", subgrp->adj_count);
655 vty_out(vty, " Advertise list: %s\n",
656 advertise_list_is_empty(subgrp) ? "empty"
657 : "not empty");
658 vty_out(vty, " Flags: %s\n",
659 CHECK_FLAG(subgrp->flags, SUBGRP_FLAG_NEEDS_REFRESH)
660 ? "R"
661 : "");
662 if (subgrp->peer_count > 0) {
663 vty_out(vty, " Peers:\n");
664 SUBGRP_FOREACH_PEER (subgrp, paf)
665 vty_out(vty, " - %s\n", paf->peer->host);
666 }
667 }
668 return UPDWALK_CONTINUE;
669 }
670
671 /*
672 * Helper function to show the packet queue for each subgroup of update group.
673 * Will be constrained to a particular subgroup id if id !=0
674 */
675 static int updgrp_show_packet_queue_walkcb(struct update_group *updgrp,
676 void *arg)
677 {
678 struct updwalk_context *ctx = arg;
679 struct update_subgroup *subgrp;
680 struct vty *vty;
681
682 vty = ctx->vty;
683 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
684 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
685 continue;
686 vty_out(vty, "update group %" PRIu64 ", subgroup %" PRIu64 "\n",
687 updgrp->id, subgrp->id);
688 bpacket_queue_show_vty(SUBGRP_PKTQ(subgrp), vty);
689 }
690 return UPDWALK_CONTINUE;
691 }
692
693 /*
694 * Show the packet queue for each subgroup of update group. Will be
695 * constrained to a particular subgroup id if id !=0
696 */
697 void update_group_show_packet_queue(struct bgp *bgp, afi_t afi, safi_t safi,
698 struct vty *vty, uint64_t id)
699 {
700 struct updwalk_context ctx;
701
702 memset(&ctx, 0, sizeof(ctx));
703 ctx.vty = vty;
704 ctx.subgrp_id = id;
705 ctx.flags = 0;
706 update_group_af_walk(bgp, afi, safi, updgrp_show_packet_queue_walkcb,
707 &ctx);
708 }
709
710 static struct update_group *update_group_find(struct peer_af *paf)
711 {
712 struct update_group *updgrp;
713 struct update_group tmp;
714 struct peer tmp_conf;
715
716 if (!peer_established(PAF_PEER(paf)))
717 return NULL;
718
719 memset(&tmp, 0, sizeof(tmp));
720 memset(&tmp_conf, 0, sizeof(tmp_conf));
721 tmp.conf = &tmp_conf;
722 peer2_updgrp_copy(&tmp, paf);
723
724 updgrp = hash_lookup(paf->peer->bgp->update_groups[paf->afid], &tmp);
725 conf_release(&tmp_conf, paf->afi, paf->safi);
726 return updgrp;
727 }
728
729 static struct update_group *update_group_create(struct peer_af *paf)
730 {
731 struct update_group *updgrp;
732 struct update_group tmp;
733 struct peer tmp_conf;
734
735 memset(&tmp, 0, sizeof(tmp));
736 memset(&tmp_conf, 0, sizeof(tmp_conf));
737 tmp.conf = &tmp_conf;
738 peer2_updgrp_copy(&tmp, paf);
739
740 updgrp = hash_get(paf->peer->bgp->update_groups[paf->afid], &tmp,
741 updgrp_hash_alloc);
742 if (!updgrp)
743 return NULL;
744 update_group_checkin(updgrp);
745
746 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
747 zlog_debug("create update group %" PRIu64, updgrp->id);
748
749 UPDGRP_GLOBAL_STAT(updgrp, updgrps_created) += 1;
750
751 conf_release(&tmp_conf, paf->afi, paf->safi);
752 return updgrp;
753 }
754
755 static void update_group_delete(struct update_group *updgrp)
756 {
757 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
758 zlog_debug("delete update group %" PRIu64, updgrp->id);
759
760 UPDGRP_GLOBAL_STAT(updgrp, updgrps_deleted) += 1;
761
762 hash_release(updgrp->bgp->update_groups[updgrp->afid], updgrp);
763 conf_release(updgrp->conf, updgrp->afi, updgrp->safi);
764
765 XFREE(MTYPE_BGP_PEER_HOST, updgrp->conf->host);
766
767 XFREE(MTYPE_BGP_PEER_IFNAME, updgrp->conf->ifname);
768
769 XFREE(MTYPE_BGP_PEER, updgrp->conf);
770 XFREE(MTYPE_BGP_UPDGRP, updgrp);
771 }
772
773 static void update_group_add_subgroup(struct update_group *updgrp,
774 struct update_subgroup *subgrp)
775 {
776 if (!updgrp || !subgrp)
777 return;
778
779 LIST_INSERT_HEAD(&(updgrp->subgrps), subgrp, updgrp_train);
780 subgrp->update_group = updgrp;
781 }
782
783 static void update_group_remove_subgroup(struct update_group *updgrp,
784 struct update_subgroup *subgrp)
785 {
786 if (!updgrp || !subgrp)
787 return;
788
789 LIST_REMOVE(subgrp, updgrp_train);
790 subgrp->update_group = NULL;
791 if (LIST_EMPTY(&(updgrp->subgrps)))
792 update_group_delete(updgrp);
793 }
794
795 static struct update_subgroup *
796 update_subgroup_create(struct update_group *updgrp)
797 {
798 struct update_subgroup *subgrp;
799
800 subgrp = XCALLOC(MTYPE_BGP_UPD_SUBGRP, sizeof(struct update_subgroup));
801 update_subgroup_checkin(subgrp, updgrp);
802 subgrp->v_coalesce = (UPDGRP_INST(updgrp))->coalesce_time;
803 sync_init(subgrp);
804 bpacket_queue_init(SUBGRP_PKTQ(subgrp));
805 bpacket_queue_add(SUBGRP_PKTQ(subgrp), NULL, NULL);
806 TAILQ_INIT(&(subgrp->adjq));
807 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
808 zlog_debug("create subgroup u%" PRIu64 ":s%" PRIu64, updgrp->id,
809 subgrp->id);
810
811 update_group_add_subgroup(updgrp, subgrp);
812
813 UPDGRP_INCR_STAT(updgrp, subgrps_created);
814
815 return subgrp;
816 }
817
818 static void update_subgroup_delete(struct update_subgroup *subgrp)
819 {
820 if (!subgrp)
821 return;
822
823 if (subgrp->update_group)
824 UPDGRP_INCR_STAT(subgrp->update_group, subgrps_deleted);
825
826 THREAD_OFF(subgrp->t_merge_check);
827 THREAD_OFF(subgrp->t_coalesce);
828
829 bpacket_queue_cleanup(SUBGRP_PKTQ(subgrp));
830 subgroup_clear_table(subgrp);
831
832 sync_delete(subgrp);
833
834 if (BGP_DEBUG(update_groups, UPDATE_GROUPS) && subgrp->update_group)
835 zlog_debug("delete subgroup u%" PRIu64 ":s%" PRIu64,
836 subgrp->update_group->id, subgrp->id);
837
838 update_group_remove_subgroup(subgrp->update_group, subgrp);
839
840 XFREE(MTYPE_BGP_UPD_SUBGRP, subgrp);
841 }
842
843 void update_subgroup_inherit_info(struct update_subgroup *to,
844 struct update_subgroup *from)
845 {
846 if (!to || !from)
847 return;
848
849 to->sflags = from->sflags;
850 }
851
852 /*
853 * update_subgroup_check_delete
854 *
855 * Delete a subgroup if it is ready to be deleted.
856 *
857 * Returns true if the subgroup was deleted.
858 */
859 static bool update_subgroup_check_delete(struct update_subgroup *subgrp)
860 {
861 if (!subgrp)
862 return false;
863
864 if (!LIST_EMPTY(&(subgrp->peers)))
865 return false;
866
867 update_subgroup_delete(subgrp);
868
869 return true;
870 }
871
872 /*
873 * update_subgroup_add_peer
874 *
875 * @param send_enqueued_packets If true all currently enqueued packets will
876 * also be sent to the peer.
877 */
878 static void update_subgroup_add_peer(struct update_subgroup *subgrp,
879 struct peer_af *paf,
880 int send_enqueued_pkts)
881 {
882 struct bpacket *pkt;
883
884 if (!subgrp || !paf)
885 return;
886
887 LIST_INSERT_HEAD(&(subgrp->peers), paf, subgrp_train);
888 paf->subgroup = subgrp;
889 subgrp->peer_count++;
890
891 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
892 UPDGRP_PEER_DBG_EN(subgrp->update_group);
893 }
894
895 SUBGRP_INCR_STAT(subgrp, join_events);
896
897 if (send_enqueued_pkts) {
898 pkt = bpacket_queue_first(SUBGRP_PKTQ(subgrp));
899 } else {
900
901 /*
902 * Hang the peer off of the last, placeholder, packet in the
903 * queue. This means it won't see any of the packets that are
904 * currently the queue.
905 */
906 pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp));
907 assert(pkt->buffer == NULL);
908 }
909
910 bpacket_add_peer(pkt, paf);
911
912 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
913 zlog_debug("peer %s added to subgroup s%" PRIu64,
914 paf->peer->host, subgrp->id);
915 }
916
917 /*
918 * update_subgroup_remove_peer_internal
919 *
920 * Internal function that removes a peer from a subgroup, but does not
921 * delete the subgroup. A call to this function must almost always be
922 * followed by a call to update_subgroup_check_delete().
923 *
924 * @see update_subgroup_remove_peer
925 */
926 static void update_subgroup_remove_peer_internal(struct update_subgroup *subgrp,
927 struct peer_af *paf)
928 {
929 assert(subgrp && paf && subgrp->update_group);
930
931 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
932 UPDGRP_PEER_DBG_DIS(subgrp->update_group);
933 }
934
935 bpacket_queue_remove_peer(paf);
936 LIST_REMOVE(paf, subgrp_train);
937 paf->subgroup = NULL;
938 subgrp->peer_count--;
939
940 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
941 zlog_debug("peer %s deleted from subgroup s%"
942 PRIu64 " peer cnt %d",
943 paf->peer->host, subgrp->id, subgrp->peer_count);
944 SUBGRP_INCR_STAT(subgrp, prune_events);
945 }
946
947 /*
948 * update_subgroup_remove_peer
949 */
950 void update_subgroup_remove_peer(struct update_subgroup *subgrp,
951 struct peer_af *paf)
952 {
953 if (!subgrp || !paf)
954 return;
955
956 update_subgroup_remove_peer_internal(subgrp, paf);
957
958 if (update_subgroup_check_delete(subgrp))
959 return;
960
961 /*
962 * The deletion of the peer may have caused some packets to be
963 * deleted from the subgroup packet queue. Check if the subgroup can
964 * be merged now.
965 */
966 update_subgroup_check_merge(subgrp, "removed peer from subgroup");
967 }
968
969 static struct update_subgroup *update_subgroup_find(struct update_group *updgrp,
970 struct peer_af *paf)
971 {
972 struct update_subgroup *subgrp = NULL;
973 uint64_t version;
974
975 if (paf->subgroup) {
976 assert(0);
977 return NULL;
978 } else
979 version = 0;
980
981 if (!peer_established(PAF_PEER(paf)))
982 return NULL;
983
984 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
985 if (subgrp->version != version
986 || CHECK_FLAG(subgrp->sflags,
987 SUBGRP_STATUS_DEFAULT_ORIGINATE))
988 continue;
989
990 /*
991 * The version number is not meaningful on a subgroup that needs
992 * a refresh.
993 */
994 if (update_subgroup_needs_refresh(subgrp))
995 continue;
996
997 break;
998 }
999
1000 return subgrp;
1001 }
1002
1003 /*
1004 * update_subgroup_ready_for_merge
1005 *
1006 * Returns true if this subgroup is in a state that allows it to be
1007 * merged into another subgroup.
1008 */
1009 static bool update_subgroup_ready_for_merge(struct update_subgroup *subgrp)
1010 {
1011
1012 /*
1013 * Not ready if there are any encoded packets waiting to be written
1014 * out to peers.
1015 */
1016 if (!bpacket_queue_is_empty(SUBGRP_PKTQ(subgrp)))
1017 return false;
1018
1019 /*
1020 * Not ready if there enqueued updates waiting to be encoded.
1021 */
1022 if (!advertise_list_is_empty(subgrp))
1023 return false;
1024
1025 /*
1026 * Don't attempt to merge a subgroup that needs a refresh. For one,
1027 * we can't determine if the adj_out of such a group matches that of
1028 * another group.
1029 */
1030 if (update_subgroup_needs_refresh(subgrp))
1031 return false;
1032
1033 return true;
1034 }
1035
1036 /*
1037 * update_subgrp_can_merge_into
1038 *
1039 * Returns true if the first subgroup can merge into the second
1040 * subgroup.
1041 */
1042 static int update_subgroup_can_merge_into(struct update_subgroup *subgrp,
1043 struct update_subgroup *target)
1044 {
1045
1046 if (subgrp == target)
1047 return 0;
1048
1049 /*
1050 * Both must have processed the BRIB to the same point in order to
1051 * be merged.
1052 */
1053 if (subgrp->version != target->version)
1054 return 0;
1055
1056 if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE)
1057 != CHECK_FLAG(target->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
1058 return 0;
1059
1060 if (subgrp->adj_count != target->adj_count)
1061 return 0;
1062
1063 return update_subgroup_ready_for_merge(target);
1064 }
1065
1066 /*
1067 * update_subgroup_merge
1068 *
1069 * Merge the first subgroup into the second one.
1070 */
1071 static void update_subgroup_merge(struct update_subgroup *subgrp,
1072 struct update_subgroup *target,
1073 const char *reason)
1074 {
1075 struct peer_af *paf;
1076 int result;
1077 int peer_count;
1078
1079 assert(subgrp->adj_count == target->adj_count);
1080
1081 peer_count = subgrp->peer_count;
1082
1083 while (1) {
1084 paf = LIST_FIRST(&subgrp->peers);
1085 if (!paf)
1086 break;
1087
1088 update_subgroup_remove_peer_internal(subgrp, paf);
1089
1090 /*
1091 * Add the peer to the target subgroup, while making sure that
1092 * any currently enqueued packets won't be sent to it. Enqueued
1093 * packets could, for example, result in an unnecessary withdraw
1094 * followed by an advertise.
1095 */
1096 update_subgroup_add_peer(target, paf, 0);
1097 }
1098
1099 SUBGRP_INCR_STAT(target, merge_events);
1100
1101 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1102 zlog_debug("u%" PRIu64 ":s%" PRIu64" (%d peers) merged into u%" PRIu64 ":s%" PRIu64", trigger: %s",
1103 subgrp->update_group->id, subgrp->id, peer_count,
1104 target->update_group->id, target->id,
1105 reason ? reason : "unknown");
1106
1107 result = update_subgroup_check_delete(subgrp);
1108 assert(result);
1109 }
1110
1111 /*
1112 * update_subgroup_check_merge
1113 *
1114 * Merge this subgroup into another subgroup if possible.
1115 *
1116 * Returns true if the subgroup has been merged. The subgroup pointer
1117 * should not be accessed in this case.
1118 */
1119 bool update_subgroup_check_merge(struct update_subgroup *subgrp,
1120 const char *reason)
1121 {
1122 struct update_subgroup *target;
1123
1124 if (!update_subgroup_ready_for_merge(subgrp))
1125 return false;
1126
1127 /*
1128 * Look for a subgroup to merge into.
1129 */
1130 UPDGRP_FOREACH_SUBGRP (subgrp->update_group, target) {
1131 if (update_subgroup_can_merge_into(subgrp, target))
1132 break;
1133 }
1134
1135 if (!target)
1136 return false;
1137
1138 update_subgroup_merge(subgrp, target, reason);
1139 return true;
1140 }
1141
1142 /*
1143 * update_subgroup_merge_check_thread_cb
1144 */
1145 static int update_subgroup_merge_check_thread_cb(struct thread *thread)
1146 {
1147 struct update_subgroup *subgrp;
1148
1149 subgrp = THREAD_ARG(thread);
1150
1151 subgrp->t_merge_check = NULL;
1152
1153 update_subgroup_check_merge(subgrp, "triggered merge check");
1154 return 0;
1155 }
1156
1157 /*
1158 * update_subgroup_trigger_merge_check
1159 *
1160 * Triggers a call to update_subgroup_check_merge() on a clean context.
1161 *
1162 * @param force If true, the merge check will be triggered even if the
1163 * subgroup doesn't currently look ready for a merge.
1164 *
1165 * Returns true if a merge check will be performed shortly.
1166 */
1167 bool update_subgroup_trigger_merge_check(struct update_subgroup *subgrp,
1168 int force)
1169 {
1170 if (subgrp->t_merge_check)
1171 return true;
1172
1173 if (!force && !update_subgroup_ready_for_merge(subgrp))
1174 return false;
1175
1176 subgrp->t_merge_check = NULL;
1177 thread_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb,
1178 subgrp, 0, &subgrp->t_merge_check);
1179
1180 SUBGRP_INCR_STAT(subgrp, merge_checks_triggered);
1181
1182 return true;
1183 }
1184
1185 /*
1186 * update_subgroup_copy_adj_out
1187 *
1188 * Helper function that clones the adj out (state about advertised
1189 * routes) from one subgroup to another. It assumes that the adj out
1190 * of the target subgroup is empty.
1191 */
1192 static void update_subgroup_copy_adj_out(struct update_subgroup *source,
1193 struct update_subgroup *dest)
1194 {
1195 struct bgp_adj_out *aout, *aout_copy;
1196
1197 SUBGRP_FOREACH_ADJ (source, aout) {
1198 /*
1199 * Copy the adj out.
1200 */
1201 aout_copy = bgp_adj_out_alloc(dest, aout->dest,
1202 aout->addpath_tx_id);
1203 aout_copy->attr =
1204 aout->attr ? bgp_attr_intern(aout->attr) : NULL;
1205 }
1206
1207 dest->scount = source->scount;
1208 }
1209
1210 /*
1211 * update_subgroup_copy_packets
1212 *
1213 * Copy packets after and including the given packet to the subgroup
1214 * 'dest'.
1215 *
1216 * Returns the number of packets copied.
1217 */
1218 static int update_subgroup_copy_packets(struct update_subgroup *dest,
1219 struct bpacket *pkt)
1220 {
1221 int count;
1222
1223 count = 0;
1224 while (pkt && pkt->buffer) {
1225 bpacket_queue_add(SUBGRP_PKTQ(dest), stream_dup(pkt->buffer),
1226 &pkt->arr);
1227 count++;
1228 pkt = bpacket_next(pkt);
1229 }
1230
1231 return count;
1232 }
1233
1234 static bool updgrp_prefix_list_update(struct update_group *updgrp,
1235 const char *name)
1236 {
1237 struct peer *peer;
1238 struct bgp_filter *filter;
1239
1240 peer = UPDGRP_PEER(updgrp);
1241 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1242
1243 if (PREFIX_LIST_OUT_NAME(filter)
1244 && (strcmp(name, PREFIX_LIST_OUT_NAME(filter)) == 0)) {
1245 PREFIX_LIST_OUT(filter) = prefix_list_lookup(
1246 UPDGRP_AFI(updgrp), PREFIX_LIST_OUT_NAME(filter));
1247 return true;
1248 }
1249 return false;
1250 }
1251
1252 static bool updgrp_filter_list_update(struct update_group *updgrp,
1253 const char *name)
1254 {
1255 struct peer *peer;
1256 struct bgp_filter *filter;
1257
1258 peer = UPDGRP_PEER(updgrp);
1259 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1260
1261 if (FILTER_LIST_OUT_NAME(filter)
1262 && (strcmp(name, FILTER_LIST_OUT_NAME(filter)) == 0)) {
1263 FILTER_LIST_OUT(filter) =
1264 as_list_lookup(FILTER_LIST_OUT_NAME(filter));
1265 return true;
1266 }
1267 return false;
1268 }
1269
1270 static bool updgrp_distribute_list_update(struct update_group *updgrp,
1271 const char *name)
1272 {
1273 struct peer *peer;
1274 struct bgp_filter *filter;
1275
1276 peer = UPDGRP_PEER(updgrp);
1277 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1278
1279 if (DISTRIBUTE_OUT_NAME(filter)
1280 && (strcmp(name, DISTRIBUTE_OUT_NAME(filter)) == 0)) {
1281 DISTRIBUTE_OUT(filter) = access_list_lookup(
1282 UPDGRP_AFI(updgrp), DISTRIBUTE_OUT_NAME(filter));
1283 return true;
1284 }
1285 return false;
1286 }
1287
1288 static int updgrp_route_map_update(struct update_group *updgrp,
1289 const char *name, int *def_rmap_changed)
1290 {
1291 struct peer *peer;
1292 struct bgp_filter *filter;
1293 int changed = 0;
1294 afi_t afi;
1295 safi_t safi;
1296
1297 peer = UPDGRP_PEER(updgrp);
1298 afi = UPDGRP_AFI(updgrp);
1299 safi = UPDGRP_SAFI(updgrp);
1300 filter = &peer->filter[afi][safi];
1301
1302 if (ROUTE_MAP_OUT_NAME(filter)
1303 && (strcmp(name, ROUTE_MAP_OUT_NAME(filter)) == 0)) {
1304 ROUTE_MAP_OUT(filter) = route_map_lookup_by_name(name);
1305
1306 changed = 1;
1307 }
1308
1309 if (UNSUPPRESS_MAP_NAME(filter)
1310 && (strcmp(name, UNSUPPRESS_MAP_NAME(filter)) == 0)) {
1311 UNSUPPRESS_MAP(filter) = route_map_lookup_by_name(name);
1312 changed = 1;
1313 }
1314
1315 /* process default-originate route-map */
1316 if (peer->default_rmap[afi][safi].name
1317 && (strcmp(name, peer->default_rmap[afi][safi].name) == 0)) {
1318 peer->default_rmap[afi][safi].map =
1319 route_map_lookup_by_name(name);
1320 if (def_rmap_changed)
1321 *def_rmap_changed = 1;
1322 }
1323 return changed;
1324 }
1325
1326 /*
1327 * hash iteration callback function to process a policy change for an
1328 * update group. Check if the changed policy matches the updgrp's
1329 * outbound route-map or unsuppress-map or default-originate map or
1330 * filter-list or prefix-list or distribute-list.
1331 * Trigger update generation accordingly.
1332 */
1333 static int updgrp_policy_update_walkcb(struct update_group *updgrp, void *arg)
1334 {
1335 struct updwalk_context *ctx = arg;
1336 struct update_subgroup *subgrp;
1337 int changed = 0;
1338 int def_changed = 0;
1339
1340 if (!updgrp || !ctx || !ctx->policy_name)
1341 return UPDWALK_CONTINUE;
1342
1343 switch (ctx->policy_type) {
1344 case BGP_POLICY_ROUTE_MAP:
1345 changed = updgrp_route_map_update(updgrp, ctx->policy_name,
1346 &def_changed);
1347 break;
1348 case BGP_POLICY_FILTER_LIST:
1349 changed = updgrp_filter_list_update(updgrp, ctx->policy_name);
1350 break;
1351 case BGP_POLICY_PREFIX_LIST:
1352 changed = updgrp_prefix_list_update(updgrp, ctx->policy_name);
1353 break;
1354 case BGP_POLICY_DISTRIBUTE_LIST:
1355 changed =
1356 updgrp_distribute_list_update(updgrp, ctx->policy_name);
1357 break;
1358 default:
1359 break;
1360 }
1361
1362 /* If not doing route update, return after updating "config" */
1363 if (!ctx->policy_route_update)
1364 return UPDWALK_CONTINUE;
1365
1366 /* If nothing has changed, return after updating "config" */
1367 if (!changed && !def_changed)
1368 return UPDWALK_CONTINUE;
1369
1370 /*
1371 * If something has changed, at the beginning of a route-map
1372 * modification
1373 * event, mark each subgroup's needs-refresh bit. For one, it signals to
1374 * whoever that the subgroup needs a refresh. Second, it prevents
1375 * premature
1376 * merge of this subgroup with another before a complete (outbound)
1377 * refresh.
1378 */
1379 if (ctx->policy_event_start_flag) {
1380 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
1381 update_subgroup_set_needs_refresh(subgrp, 1);
1382 }
1383 return UPDWALK_CONTINUE;
1384 }
1385
1386 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
1387 /* Avoid supressing duplicate routes later
1388 * when processing in subgroup_announce_table().
1389 */
1390 SET_FLAG(subgrp->sflags, SUBGRP_STATUS_FORCE_UPDATES);
1391
1392 if (changed) {
1393 if (bgp_debug_update(NULL, NULL, updgrp, 0))
1394 zlog_debug(
1395 "u%" PRIu64 ":s%" PRIu64" announcing routes upon policy %s (type %d) change",
1396 updgrp->id, subgrp->id,
1397 ctx->policy_name, ctx->policy_type);
1398 subgroup_announce_route(subgrp);
1399 }
1400 if (def_changed) {
1401 if (bgp_debug_update(NULL, NULL, updgrp, 0))
1402 zlog_debug(
1403 "u%" PRIu64 ":s%" PRIu64" announcing default upon default routemap %s change",
1404 updgrp->id, subgrp->id,
1405 ctx->policy_name);
1406 subgroup_default_originate(subgrp, 0);
1407 }
1408 update_subgroup_set_needs_refresh(subgrp, 0);
1409 }
1410 return UPDWALK_CONTINUE;
1411 }
1412
1413 static int update_group_walkcb(struct hash_bucket *bucket, void *arg)
1414 {
1415 struct update_group *updgrp = bucket->data;
1416 struct updwalk_context *wctx = arg;
1417 int ret = (*wctx->cb)(updgrp, wctx->context);
1418 return ret;
1419 }
1420
1421 static int update_group_periodic_merge_walkcb(struct update_group *updgrp,
1422 void *arg)
1423 {
1424 struct update_subgroup *subgrp;
1425 struct update_subgroup *tmp_subgrp;
1426 const char *reason = arg;
1427
1428 UPDGRP_FOREACH_SUBGRP_SAFE (updgrp, subgrp, tmp_subgrp)
1429 update_subgroup_check_merge(subgrp, reason);
1430 return UPDWALK_CONTINUE;
1431 }
1432
1433 /********************
1434 * PUBLIC FUNCTIONS
1435 ********************/
1436
1437 /*
1438 * trigger function when a policy (route-map/filter-list/prefix-list/
1439 * distribute-list etc.) content changes. Go through all the
1440 * update groups and process the change.
1441 *
1442 * bgp: the bgp instance
1443 * ptype: the type of policy that got modified, see bgpd.h
1444 * pname: name of the policy
1445 * route_update: flag to control if an automatic update generation should
1446 * occur
1447 * start_event: flag that indicates if it's the beginning of the change.
1448 * Esp. when the user is changing the content interactively
1449 * over multiple statements. Useful to set dirty flag on
1450 * update groups.
1451 */
1452 void update_group_policy_update(struct bgp *bgp, bgp_policy_type_e ptype,
1453 const char *pname, int route_update,
1454 int start_event)
1455 {
1456 struct updwalk_context ctx;
1457
1458 memset(&ctx, 0, sizeof(ctx));
1459 ctx.policy_type = ptype;
1460 ctx.policy_name = pname;
1461 ctx.policy_route_update = route_update;
1462 ctx.policy_event_start_flag = start_event;
1463 ctx.flags = 0;
1464
1465 update_group_walk(bgp, updgrp_policy_update_walkcb, &ctx);
1466 }
1467
1468 /*
1469 * update_subgroup_split_peer
1470 *
1471 * Ensure that the given peer is in a subgroup of its own in the
1472 * specified update group.
1473 */
1474 void update_subgroup_split_peer(struct peer_af *paf,
1475 struct update_group *updgrp)
1476 {
1477 struct update_subgroup *old_subgrp, *subgrp;
1478 uint64_t old_id;
1479
1480
1481 old_subgrp = paf->subgroup;
1482
1483 if (!updgrp)
1484 updgrp = old_subgrp->update_group;
1485
1486 /*
1487 * If the peer is alone in its subgroup, reuse the existing
1488 * subgroup.
1489 */
1490 if (old_subgrp->peer_count == 1) {
1491 if (updgrp == old_subgrp->update_group)
1492 return;
1493
1494 subgrp = old_subgrp;
1495 old_id = old_subgrp->update_group->id;
1496
1497 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
1498 UPDGRP_PEER_DBG_DIS(old_subgrp->update_group);
1499 }
1500
1501 update_group_remove_subgroup(old_subgrp->update_group,
1502 old_subgrp);
1503 update_group_add_subgroup(updgrp, subgrp);
1504
1505 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
1506 UPDGRP_PEER_DBG_EN(updgrp);
1507 }
1508 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1509 zlog_debug("u%" PRIu64 ":s%" PRIu64" peer %s moved to u%" PRIu64 ":s%" PRIu64,
1510 old_id, subgrp->id, paf->peer->host,
1511 updgrp->id, subgrp->id);
1512
1513 /*
1514 * The state of the subgroup (adj_out, advs, packet queue etc)
1515 * is consistent internally, but may not be identical to other
1516 * subgroups in the new update group even if the version number
1517 * matches up. Make sure a full refresh is done before the
1518 * subgroup is merged with another.
1519 */
1520 update_subgroup_set_needs_refresh(subgrp, 1);
1521
1522 SUBGRP_INCR_STAT(subgrp, updgrp_switch_events);
1523 return;
1524 }
1525
1526 /*
1527 * Create a new subgroup under the specified update group, and copy
1528 * over relevant state to it.
1529 */
1530 subgrp = update_subgroup_create(updgrp);
1531 update_subgroup_inherit_info(subgrp, old_subgrp);
1532
1533 subgrp->split_from.update_group_id = old_subgrp->update_group->id;
1534 subgrp->split_from.subgroup_id = old_subgrp->id;
1535
1536 /*
1537 * Copy out relevant state from the old subgroup.
1538 */
1539 update_subgroup_copy_adj_out(paf->subgroup, subgrp);
1540 update_subgroup_copy_packets(subgrp, paf->next_pkt_to_send);
1541
1542 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1543 zlog_debug("u%" PRIu64 ":s%" PRIu64" peer %s split and moved into u%" PRIu64":s%" PRIu64,
1544 paf->subgroup->update_group->id, paf->subgroup->id,
1545 paf->peer->host, updgrp->id, subgrp->id);
1546
1547 SUBGRP_INCR_STAT(paf->subgroup, split_events);
1548
1549 /*
1550 * Since queued advs were left behind, this new subgroup needs a
1551 * refresh.
1552 */
1553 update_subgroup_set_needs_refresh(subgrp, 1);
1554
1555 /*
1556 * Remove peer from old subgroup, and add it to the new one.
1557 */
1558 update_subgroup_remove_peer(paf->subgroup, paf);
1559
1560 update_subgroup_add_peer(subgrp, paf, 1);
1561 }
1562
1563 void update_bgp_group_init(struct bgp *bgp)
1564 {
1565 int afid;
1566
1567 AF_FOREACH (afid)
1568 bgp->update_groups[afid] =
1569 hash_create(updgrp_hash_key_make, updgrp_hash_cmp,
1570 "BGP Update Group Hash");
1571 }
1572
1573 void update_bgp_group_free(struct bgp *bgp)
1574 {
1575 int afid;
1576
1577 AF_FOREACH (afid) {
1578 if (bgp->update_groups[afid]) {
1579 hash_free(bgp->update_groups[afid]);
1580 bgp->update_groups[afid] = NULL;
1581 }
1582 }
1583 }
1584
1585 void update_group_show(struct bgp *bgp, afi_t afi, safi_t safi, struct vty *vty,
1586 uint64_t subgrp_id)
1587 {
1588 struct updwalk_context ctx;
1589 memset(&ctx, 0, sizeof(ctx));
1590 ctx.vty = vty;
1591 ctx.subgrp_id = subgrp_id;
1592
1593 update_group_af_walk(bgp, afi, safi, update_group_show_walkcb, &ctx);
1594 }
1595
1596 /*
1597 * update_group_show_stats
1598 *
1599 * Show global statistics about update groups.
1600 */
1601 void update_group_show_stats(struct bgp *bgp, struct vty *vty)
1602 {
1603 vty_out(vty, "Update groups created: %u\n",
1604 bgp->update_group_stats.updgrps_created);
1605 vty_out(vty, "Update groups deleted: %u\n",
1606 bgp->update_group_stats.updgrps_deleted);
1607 vty_out(vty, "Update subgroups created: %u\n",
1608 bgp->update_group_stats.subgrps_created);
1609 vty_out(vty, "Update subgroups deleted: %u\n",
1610 bgp->update_group_stats.subgrps_deleted);
1611 vty_out(vty, "Join events: %u\n", bgp->update_group_stats.join_events);
1612 vty_out(vty, "Prune events: %u\n",
1613 bgp->update_group_stats.prune_events);
1614 vty_out(vty, "Merge events: %u\n",
1615 bgp->update_group_stats.merge_events);
1616 vty_out(vty, "Split events: %u\n",
1617 bgp->update_group_stats.split_events);
1618 vty_out(vty, "Update group switch events: %u\n",
1619 bgp->update_group_stats.updgrp_switch_events);
1620 vty_out(vty, "Peer route refreshes combined: %u\n",
1621 bgp->update_group_stats.peer_refreshes_combined);
1622 vty_out(vty, "Merge checks triggered: %u\n",
1623 bgp->update_group_stats.merge_checks_triggered);
1624 }
1625
1626 /*
1627 * update_group_adjust_peer
1628 */
1629 void update_group_adjust_peer(struct peer_af *paf)
1630 {
1631 struct update_group *updgrp;
1632 struct update_subgroup *subgrp, *old_subgrp;
1633 struct peer *peer;
1634
1635 if (!paf)
1636 return;
1637
1638 peer = PAF_PEER(paf);
1639 if (!peer_established(peer)) {
1640 return;
1641 }
1642
1643 if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE)) {
1644 return;
1645 }
1646
1647 if (!peer->afc_nego[paf->afi][paf->safi]) {
1648 return;
1649 }
1650
1651 updgrp = update_group_find(paf);
1652 if (!updgrp) {
1653 updgrp = update_group_create(paf);
1654 if (!updgrp) {
1655 flog_err(EC_BGP_UPDGRP_CREATE,
1656 "couldn't create update group for peer %s",
1657 paf->peer->host);
1658 return;
1659 }
1660 }
1661
1662 old_subgrp = paf->subgroup;
1663
1664 if (old_subgrp) {
1665
1666 /*
1667 * If the update group of the peer is unchanged, the peer can
1668 * stay
1669 * in its existing subgroup and we're done.
1670 */
1671 if (old_subgrp->update_group == updgrp)
1672 return;
1673
1674 /*
1675 * The peer is switching between update groups. Put it in its
1676 * own subgroup under the new update group.
1677 */
1678 update_subgroup_split_peer(paf, updgrp);
1679 return;
1680 }
1681
1682 subgrp = update_subgroup_find(updgrp, paf);
1683 if (!subgrp) {
1684 subgrp = update_subgroup_create(updgrp);
1685 if (!subgrp)
1686 return;
1687 }
1688
1689 update_subgroup_add_peer(subgrp, paf, 1);
1690 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1691 zlog_debug("u%" PRIu64 ":s%" PRIu64 " add peer %s", updgrp->id,
1692 subgrp->id, paf->peer->host);
1693
1694 return;
1695 }
1696
1697 int update_group_adjust_soloness(struct peer *peer, int set)
1698 {
1699 struct peer_group *group;
1700 struct listnode *node, *nnode;
1701
1702 if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
1703 peer_lonesoul_or_not(peer, set);
1704 if (peer->status == Established)
1705 bgp_announce_route_all(peer);
1706 } else {
1707 group = peer->group;
1708 for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
1709 peer_lonesoul_or_not(peer, set);
1710 if (peer->status == Established)
1711 bgp_announce_route_all(peer);
1712 }
1713 }
1714 return 0;
1715 }
1716
1717 /*
1718 * update_subgroup_rib
1719 */
1720 struct bgp_table *update_subgroup_rib(struct update_subgroup *subgrp)
1721 {
1722 struct bgp *bgp;
1723
1724 bgp = SUBGRP_INST(subgrp);
1725 if (!bgp)
1726 return NULL;
1727
1728 return bgp->rib[SUBGRP_AFI(subgrp)][SUBGRP_SAFI(subgrp)];
1729 }
1730
1731 void update_group_af_walk(struct bgp *bgp, afi_t afi, safi_t safi,
1732 updgrp_walkcb cb, void *ctx)
1733 {
1734 struct updwalk_context wctx;
1735 int afid;
1736
1737 if (!bgp)
1738 return;
1739 afid = afindex(afi, safi);
1740 if (afid >= BGP_AF_MAX)
1741 return;
1742
1743 memset(&wctx, 0, sizeof(wctx));
1744 wctx.cb = cb;
1745 wctx.context = ctx;
1746
1747 if (bgp->update_groups[afid])
1748 hash_walk(bgp->update_groups[afid], update_group_walkcb, &wctx);
1749 }
1750
1751 void update_group_walk(struct bgp *bgp, updgrp_walkcb cb, void *ctx)
1752 {
1753 afi_t afi;
1754 safi_t safi;
1755
1756 FOREACH_AFI_SAFI (afi, safi) {
1757 update_group_af_walk(bgp, afi, safi, cb, ctx);
1758 }
1759 }
1760
1761 void update_group_periodic_merge(struct bgp *bgp)
1762 {
1763 char reason[] = "periodic merge check";
1764
1765 update_group_walk(bgp, update_group_periodic_merge_walkcb,
1766 (void *)reason);
1767 }
1768
1769 static int
1770 update_group_default_originate_route_map_walkcb(struct update_group *updgrp,
1771 void *arg)
1772 {
1773 struct update_subgroup *subgrp;
1774 struct peer *peer;
1775 afi_t afi;
1776 safi_t safi;
1777
1778 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
1779 peer = SUBGRP_PEER(subgrp);
1780 afi = SUBGRP_AFI(subgrp);
1781 safi = SUBGRP_SAFI(subgrp);
1782
1783 if (peer->default_rmap[afi][safi].name) {
1784 subgroup_default_originate(subgrp, 0);
1785 }
1786 }
1787
1788 return UPDWALK_CONTINUE;
1789 }
1790
1791 int update_group_refresh_default_originate_route_map(struct thread *thread)
1792 {
1793 struct bgp *bgp;
1794 char reason[] = "refresh default-originate route-map";
1795
1796 bgp = THREAD_ARG(thread);
1797 update_group_walk(bgp, update_group_default_originate_route_map_walkcb,
1798 reason);
1799 thread_cancel(&bgp->t_rmap_def_originate_eval);
1800 bgp_unlock(bgp);
1801
1802 return 0;
1803 }
1804
1805 /*
1806 * peer_af_announce_route
1807 *
1808 * Refreshes routes out to a peer_af immediately.
1809 *
1810 * If the combine parameter is true, then this function will try to
1811 * gather other peers in the subgroup for which a route announcement
1812 * is pending and efficently announce routes to all of them.
1813 *
1814 * For now, the 'combine' option has an effect only if all peers in
1815 * the subgroup have a route announcement pending.
1816 */
1817 void peer_af_announce_route(struct peer_af *paf, int combine)
1818 {
1819 struct update_subgroup *subgrp;
1820 struct peer_af *cur_paf;
1821 int all_pending;
1822
1823 subgrp = paf->subgroup;
1824 all_pending = 0;
1825
1826 if (combine) {
1827 /*
1828 * If there are other peers in the old subgroup that also need
1829 * routes to be announced, pull them into the peer's new
1830 * subgroup.
1831 * Combine route announcement with other peers if possible.
1832 *
1833 * For now, we combine only if all peers in the subgroup have an
1834 * announcement pending.
1835 */
1836 all_pending = 1;
1837
1838 SUBGRP_FOREACH_PEER (subgrp, cur_paf) {
1839 if (cur_paf == paf)
1840 continue;
1841
1842 if (cur_paf->t_announce_route)
1843 continue;
1844
1845 all_pending = 0;
1846 break;
1847 }
1848 }
1849 /*
1850 * Announce to the peer alone if we were not asked to combine peers,
1851 * or if some peers don't have a route annoucement pending.
1852 */
1853 if (!combine || !all_pending) {
1854 update_subgroup_split_peer(paf, NULL);
1855 subgrp = paf->subgroup;
1856
1857 assert(subgrp && subgrp->update_group);
1858 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
1859 zlog_debug("u%" PRIu64 ":s%" PRIu64" %s announcing routes",
1860 subgrp->update_group->id, subgrp->id,
1861 paf->peer->host);
1862
1863 subgroup_announce_route(paf->subgroup);
1864 return;
1865 }
1866
1867 /*
1868 * We will announce routes the entire subgroup.
1869 *
1870 * First stop refresh timers on all the other peers.
1871 */
1872 SUBGRP_FOREACH_PEER (subgrp, cur_paf) {
1873 if (cur_paf == paf)
1874 continue;
1875
1876 bgp_stop_announce_route_timer(cur_paf);
1877 }
1878
1879 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
1880 zlog_debug("u%" PRIu64 ":s%" PRIu64" announcing routes to %s, combined into %d peers",
1881 subgrp->update_group->id, subgrp->id,
1882 paf->peer->host, subgrp->peer_count);
1883
1884 subgroup_announce_route(subgrp);
1885
1886 SUBGRP_INCR_STAT_BY(subgrp, peer_refreshes_combined,
1887 subgrp->peer_count - 1);
1888 }
1889
1890 void subgroup_trigger_write(struct update_subgroup *subgrp)
1891 {
1892 struct peer_af *paf;
1893
1894 /*
1895 * For each peer in the subgroup, schedule a job to pull packets from
1896 * the subgroup output queue into their own output queue. This action
1897 * will trigger a write job on the I/O thread.
1898 */
1899 SUBGRP_FOREACH_PEER (subgrp, paf)
1900 if (paf->peer->status == Established)
1901 thread_add_timer_msec(
1902 bm->master, bgp_generate_updgrp_packets,
1903 paf->peer, 0,
1904 &paf->peer->t_generate_updgrp_packets);
1905 }
1906
1907 int update_group_clear_update_dbg(struct update_group *updgrp, void *arg)
1908 {
1909 UPDGRP_PEER_DBG_OFF(updgrp);
1910 return UPDWALK_CONTINUE;
1911 }
1912
1913 /* Return true if we should addpath encode NLRI to this peer */
1914 int bgp_addpath_encode_tx(struct peer *peer, afi_t afi, safi_t safi)
1915 {
1916 return (CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ADDPATH_AF_TX_ADV)
1917 && CHECK_FLAG(peer->af_cap[afi][safi],
1918 PEER_CAP_ADDPATH_AF_RX_RCV));
1919 }