]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_updgrp.c
Merge pull request #5686 from qlyoung/fix-bgp-fqdn-capability-leak
[mirror_frr.git] / bgpd / bgp_updgrp.c
1 /**
2 * bgp_updgrp.c: BGP update group structures
3 *
4 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
5 *
6 * @author Avneesh Sachdev <avneesh@sproute.net>
7 * @author Rajesh Varadarajan <rajesh@sproute.net>
8 * @author Pradosh Mohapatra <pradosh@sproute.net>
9 *
10 * This file is part of GNU Zebra.
11 *
12 * GNU Zebra is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2, or (at your option) any
15 * later version.
16 *
17 * GNU Zebra is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; see the file COPYING; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27 #include <zebra.h>
28
29 #include "prefix.h"
30 #include "thread.h"
31 #include "buffer.h"
32 #include "stream.h"
33 #include "command.h"
34 #include "sockunion.h"
35 #include "network.h"
36 #include "memory.h"
37 #include "filter.h"
38 #include "routemap.h"
39 #include "log.h"
40 #include "plist.h"
41 #include "linklist.h"
42 #include "workqueue.h"
43 #include "hash.h"
44 #include "jhash.h"
45 #include "queue.h"
46
47 #include "bgpd/bgpd.h"
48 #include "bgpd/bgp_table.h"
49 #include "bgpd/bgp_debug.h"
50 #include "bgpd/bgp_errors.h"
51 #include "bgpd/bgp_fsm.h"
52 #include "bgpd/bgp_advertise.h"
53 #include "bgpd/bgp_packet.h"
54 #include "bgpd/bgp_updgrp.h"
55 #include "bgpd/bgp_route.h"
56 #include "bgpd/bgp_filter.h"
57 #include "bgpd/bgp_io.h"
58
59 /********************
60 * PRIVATE FUNCTIONS
61 ********************/
62
63 /**
64 * assign a unique ID to update group and subgroup. Mostly for display/
65 * debugging purposes. It's a 64-bit space - used leisurely without a
66 * worry about its wrapping and about filling gaps. While at it, timestamp
67 * the creation.
68 */
69 static void update_group_checkin(struct update_group *updgrp)
70 {
71 updgrp->id = ++bm->updgrp_idspace;
72 updgrp->uptime = bgp_clock();
73 }
74
75 static void update_subgroup_checkin(struct update_subgroup *subgrp,
76 struct update_group *updgrp)
77 {
78 subgrp->id = ++bm->subgrp_idspace;
79 subgrp->uptime = bgp_clock();
80 }
81
82 static void sync_init(struct update_subgroup *subgrp)
83 {
84 subgrp->sync =
85 XCALLOC(MTYPE_BGP_SYNCHRONISE, sizeof(struct bgp_synchronize));
86 bgp_adv_fifo_init(&subgrp->sync->update);
87 bgp_adv_fifo_init(&subgrp->sync->withdraw);
88 bgp_adv_fifo_init(&subgrp->sync->withdraw_low);
89 subgrp->hash =
90 hash_create(baa_hash_key, baa_hash_cmp, "BGP SubGroup Hash");
91
92 /* We use a larger buffer for subgrp->work in the event that:
93 * - We RX a BGP_UPDATE where the attributes alone are just
94 * under BGP_MAX_PACKET_SIZE
95 * - The user configures an outbound route-map that does many as-path
96 * prepends or adds many communities. At most they can have
97 * CMD_ARGC_MAX
98 * args in a route-map so there is a finite limit on how large they
99 * can
100 * make the attributes.
101 *
102 * Having a buffer with BGP_MAX_PACKET_SIZE_OVERFLOW allows us to avoid
103 * bounds
104 * checking for every single attribute as we construct an UPDATE.
105 */
106 subgrp->work =
107 stream_new(BGP_MAX_PACKET_SIZE + BGP_MAX_PACKET_SIZE_OVERFLOW);
108 subgrp->scratch = stream_new(BGP_MAX_PACKET_SIZE);
109 }
110
111 static void sync_delete(struct update_subgroup *subgrp)
112 {
113 XFREE(MTYPE_BGP_SYNCHRONISE, subgrp->sync);
114 subgrp->sync = NULL;
115 if (subgrp->hash)
116 hash_free(subgrp->hash);
117 subgrp->hash = NULL;
118 if (subgrp->work)
119 stream_free(subgrp->work);
120 subgrp->work = NULL;
121 if (subgrp->scratch)
122 stream_free(subgrp->scratch);
123 subgrp->scratch = NULL;
124 }
125
126 /**
127 * conf_copy
128 *
129 * copy only those fields that are relevant to update group match
130 */
131 static void conf_copy(struct peer *dst, struct peer *src, afi_t afi,
132 safi_t safi)
133 {
134 struct bgp_filter *srcfilter;
135 struct bgp_filter *dstfilter;
136
137 srcfilter = &src->filter[afi][safi];
138 dstfilter = &dst->filter[afi][safi];
139
140 dst->bgp = src->bgp;
141 dst->sort = src->sort;
142 dst->as = src->as;
143 dst->v_routeadv = src->v_routeadv;
144 dst->flags = src->flags;
145 dst->af_flags[afi][safi] = src->af_flags[afi][safi];
146 XFREE(MTYPE_BGP_PEER_HOST, dst->host);
147
148 dst->host = XSTRDUP(MTYPE_BGP_PEER_HOST, src->host);
149 dst->cap = src->cap;
150 dst->af_cap[afi][safi] = src->af_cap[afi][safi];
151 dst->afc_nego[afi][safi] = src->afc_nego[afi][safi];
152 dst->orf_plist[afi][safi] = src->orf_plist[afi][safi];
153 dst->addpath_type[afi][safi] = src->addpath_type[afi][safi];
154 dst->local_as = src->local_as;
155 dst->change_local_as = src->change_local_as;
156 dst->shared_network = src->shared_network;
157 memcpy(&(dst->nexthop), &(src->nexthop), sizeof(struct bgp_nexthop));
158
159 dst->group = src->group;
160
161 if (src->default_rmap[afi][safi].name) {
162 dst->default_rmap[afi][safi].name =
163 XSTRDUP(MTYPE_ROUTE_MAP_NAME,
164 src->default_rmap[afi][safi].name);
165 dst->default_rmap[afi][safi].map =
166 src->default_rmap[afi][safi].map;
167 }
168
169 if (DISTRIBUTE_OUT_NAME(srcfilter)) {
170 DISTRIBUTE_OUT_NAME(dstfilter) = XSTRDUP(
171 MTYPE_BGP_FILTER_NAME, DISTRIBUTE_OUT_NAME(srcfilter));
172 DISTRIBUTE_OUT(dstfilter) = DISTRIBUTE_OUT(srcfilter);
173 }
174
175 if (PREFIX_LIST_OUT_NAME(srcfilter)) {
176 PREFIX_LIST_OUT_NAME(dstfilter) = XSTRDUP(
177 MTYPE_BGP_FILTER_NAME, PREFIX_LIST_OUT_NAME(srcfilter));
178 PREFIX_LIST_OUT(dstfilter) = PREFIX_LIST_OUT(srcfilter);
179 }
180
181 if (FILTER_LIST_OUT_NAME(srcfilter)) {
182 FILTER_LIST_OUT_NAME(dstfilter) = XSTRDUP(
183 MTYPE_BGP_FILTER_NAME, FILTER_LIST_OUT_NAME(srcfilter));
184 FILTER_LIST_OUT(dstfilter) = FILTER_LIST_OUT(srcfilter);
185 }
186
187 if (ROUTE_MAP_OUT_NAME(srcfilter)) {
188 ROUTE_MAP_OUT_NAME(dstfilter) = XSTRDUP(
189 MTYPE_BGP_FILTER_NAME, ROUTE_MAP_OUT_NAME(srcfilter));
190 ROUTE_MAP_OUT(dstfilter) = ROUTE_MAP_OUT(srcfilter);
191 }
192
193 if (UNSUPPRESS_MAP_NAME(srcfilter)) {
194 UNSUPPRESS_MAP_NAME(dstfilter) = XSTRDUP(
195 MTYPE_BGP_FILTER_NAME, UNSUPPRESS_MAP_NAME(srcfilter));
196 UNSUPPRESS_MAP(dstfilter) = UNSUPPRESS_MAP(srcfilter);
197 }
198 }
199
200 /**
201 * since we did a bunch of XSTRDUP's in conf_copy, time to free them up
202 */
203 static void conf_release(struct peer *src, afi_t afi, safi_t safi)
204 {
205 struct bgp_filter *srcfilter;
206
207 srcfilter = &src->filter[afi][safi];
208
209 XFREE(MTYPE_ROUTE_MAP_NAME, src->default_rmap[afi][safi].name);
210
211 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->dlist[FILTER_OUT].name);
212
213 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->plist[FILTER_OUT].name);
214
215 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->aslist[FILTER_OUT].name);
216
217 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->map[RMAP_OUT].name);
218
219 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->usmap.name);
220
221 XFREE(MTYPE_BGP_PEER_HOST, src->host);
222 src->host = NULL;
223 }
224
225 static void peer2_updgrp_copy(struct update_group *updgrp, struct peer_af *paf)
226 {
227 struct peer *src;
228 struct peer *dst;
229
230 if (!updgrp || !paf)
231 return;
232
233 src = paf->peer;
234 dst = updgrp->conf;
235 if (!src || !dst)
236 return;
237
238 updgrp->afi = paf->afi;
239 updgrp->safi = paf->safi;
240 updgrp->afid = paf->afid;
241 updgrp->bgp = src->bgp;
242
243 conf_copy(dst, src, paf->afi, paf->safi);
244 }
245
246 /**
247 * auxiliary functions to maintain the hash table.
248 * - updgrp_hash_alloc - to create a new entry, passed to hash_get
249 * - updgrp_hash_key_make - makes the key for update group search
250 * - updgrp_hash_cmp - compare two update groups.
251 */
252 static void *updgrp_hash_alloc(void *p)
253 {
254 struct update_group *updgrp;
255 const struct update_group *in;
256
257 in = (const struct update_group *)p;
258 updgrp = XCALLOC(MTYPE_BGP_UPDGRP, sizeof(struct update_group));
259 memcpy(updgrp, in, sizeof(struct update_group));
260 updgrp->conf = XCALLOC(MTYPE_BGP_PEER, sizeof(struct peer));
261 conf_copy(updgrp->conf, in->conf, in->afi, in->safi);
262 return updgrp;
263 }
264
265 /**
266 * The hash value for a peer is computed from the following variables:
267 * v = f(
268 * 1. IBGP (1) or EBGP (2)
269 * 2. FLAGS based on configuration:
270 * LOCAL_AS_NO_PREPEND
271 * LOCAL_AS_REPLACE_AS
272 * 3. AF_FLAGS based on configuration:
273 * Refer to definition in bgp_updgrp.h
274 * 4. (AF-independent) Capability flags:
275 * AS4_RCV capability
276 * 5. (AF-dependent) Capability flags:
277 * ORF_PREFIX_SM_RCV (peer can send prefix ORF)
278 * 6. MRAI
279 * 7. peer-group name
280 * 8. Outbound route-map name (neighbor route-map <> out)
281 * 9. Outbound distribute-list name (neighbor distribute-list <> out)
282 * 10. Outbound prefix-list name (neighbor prefix-list <> out)
283 * 11. Outbound as-list name (neighbor filter-list <> out)
284 * 12. Unsuppress map name (neighbor unsuppress-map <>)
285 * 13. default rmap name (neighbor default-originate route-map <>)
286 * 14. encoding both global and link-local nexthop?
287 * 15. If peer is configured to be a lonesoul, peer ip address
288 * 16. Local-as should match, if configured.
289 * )
290 */
291 static unsigned int updgrp_hash_key_make(const void *p)
292 {
293 const struct update_group *updgrp;
294 const struct peer *peer;
295 const struct bgp_filter *filter;
296 uint32_t flags;
297 uint32_t key;
298 afi_t afi;
299 safi_t safi;
300
301 #define SEED1 999331
302 #define SEED2 2147483647
303
304 updgrp = p;
305 peer = updgrp->conf;
306 afi = updgrp->afi;
307 safi = updgrp->safi;
308 flags = peer->af_flags[afi][safi];
309 filter = &peer->filter[afi][safi];
310
311 key = 0;
312
313 key = jhash_1word(peer->sort, key); /* EBGP or IBGP */
314 key = jhash_1word((peer->flags & PEER_UPDGRP_FLAGS), key);
315 key = jhash_1word((flags & PEER_UPDGRP_AF_FLAGS), key);
316 key = jhash_1word((uint32_t)peer->addpath_type[afi][safi], key);
317 key = jhash_1word((peer->cap & PEER_UPDGRP_CAP_FLAGS), key);
318 key = jhash_1word((peer->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS),
319 key);
320 key = jhash_1word(peer->v_routeadv, key);
321 key = jhash_1word(peer->change_local_as, key);
322
323 if (peer->group)
324 key = jhash_1word(jhash(peer->group->name,
325 strlen(peer->group->name), SEED1),
326 key);
327
328 if (filter->map[RMAP_OUT].name)
329 key = jhash_1word(jhash(filter->map[RMAP_OUT].name,
330 strlen(filter->map[RMAP_OUT].name),
331 SEED1),
332 key);
333
334 if (filter->dlist[FILTER_OUT].name)
335 key = jhash_1word(jhash(filter->dlist[FILTER_OUT].name,
336 strlen(filter->dlist[FILTER_OUT].name),
337 SEED1),
338 key);
339
340 if (filter->plist[FILTER_OUT].name)
341 key = jhash_1word(jhash(filter->plist[FILTER_OUT].name,
342 strlen(filter->plist[FILTER_OUT].name),
343 SEED1),
344 key);
345
346 if (filter->aslist[FILTER_OUT].name)
347 key = jhash_1word(jhash(filter->aslist[FILTER_OUT].name,
348 strlen(filter->aslist[FILTER_OUT].name),
349 SEED1),
350 key);
351
352 if (filter->usmap.name)
353 key = jhash_1word(jhash(filter->usmap.name,
354 strlen(filter->usmap.name), SEED1),
355 key);
356
357 if (peer->default_rmap[afi][safi].name)
358 key = jhash_1word(
359 jhash(peer->default_rmap[afi][safi].name,
360 strlen(peer->default_rmap[afi][safi].name),
361 SEED1),
362 key);
363
364 /* If peer is on a shared network and is exchanging IPv6 prefixes,
365 * it needs to include link-local address. That's different from
366 * non-shared-network peers (nexthop encoded with 32 bytes vs 16
367 * bytes). We create different update groups to take care of that.
368 */
369 key = jhash_1word(
370 (peer->shared_network && peer_afi_active_nego(peer, AFI_IP6)),
371 key);
372
373 /*
374 * There are certain peers that must get their own update-group:
375 * - lonesoul peers
376 * - peers that negotiated ORF
377 */
378 if (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL)
379 || CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV)
380 || CHECK_FLAG(peer->af_cap[afi][safi],
381 PEER_CAP_ORF_PREFIX_SM_OLD_RCV))
382 key = jhash_1word(jhash(peer->host, strlen(peer->host), SEED2),
383 key);
384
385 return key;
386 }
387
388 static bool updgrp_hash_cmp(const void *p1, const void *p2)
389 {
390 const struct update_group *grp1;
391 const struct update_group *grp2;
392 const struct peer *pe1;
393 const struct peer *pe2;
394 uint32_t flags1;
395 uint32_t flags2;
396 const struct bgp_filter *fl1;
397 const struct bgp_filter *fl2;
398 afi_t afi;
399 safi_t safi;
400
401 if (!p1 || !p2)
402 return false;
403
404 grp1 = p1;
405 grp2 = p2;
406 pe1 = grp1->conf;
407 pe2 = grp2->conf;
408 afi = grp1->afi;
409 safi = grp1->safi;
410 flags1 = pe1->af_flags[afi][safi];
411 flags2 = pe2->af_flags[afi][safi];
412 fl1 = &pe1->filter[afi][safi];
413 fl2 = &pe2->filter[afi][safi];
414
415 /* put EBGP and IBGP peers in different update groups */
416 if (pe1->sort != pe2->sort)
417 return false;
418
419 /* check peer flags */
420 if ((pe1->flags & PEER_UPDGRP_FLAGS)
421 != (pe2->flags & PEER_UPDGRP_FLAGS))
422 return false;
423
424 /* If there is 'local-as' configured, it should match. */
425 if (pe1->change_local_as != pe2->change_local_as)
426 return false;
427
428 /* flags like route reflector client */
429 if ((flags1 & PEER_UPDGRP_AF_FLAGS) != (flags2 & PEER_UPDGRP_AF_FLAGS))
430 return false;
431
432 if (pe1->addpath_type[afi][safi] != pe2->addpath_type[afi][safi])
433 return false;
434
435 if ((pe1->cap & PEER_UPDGRP_CAP_FLAGS)
436 != (pe2->cap & PEER_UPDGRP_CAP_FLAGS))
437 return false;
438
439 if ((pe1->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS)
440 != (pe2->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS))
441 return false;
442
443 if (pe1->v_routeadv != pe2->v_routeadv)
444 return false;
445
446 if (pe1->group != pe2->group)
447 return false;
448
449 /* route-map names should be the same */
450 if ((fl1->map[RMAP_OUT].name && !fl2->map[RMAP_OUT].name)
451 || (!fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name)
452 || (fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name
453 && strcmp(fl1->map[RMAP_OUT].name, fl2->map[RMAP_OUT].name)))
454 return false;
455
456 if ((fl1->dlist[FILTER_OUT].name && !fl2->dlist[FILTER_OUT].name)
457 || (!fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name)
458 || (fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name
459 && strcmp(fl1->dlist[FILTER_OUT].name,
460 fl2->dlist[FILTER_OUT].name)))
461 return false;
462
463 if ((fl1->plist[FILTER_OUT].name && !fl2->plist[FILTER_OUT].name)
464 || (!fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name)
465 || (fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name
466 && strcmp(fl1->plist[FILTER_OUT].name,
467 fl2->plist[FILTER_OUT].name)))
468 return false;
469
470 if ((fl1->aslist[FILTER_OUT].name && !fl2->aslist[FILTER_OUT].name)
471 || (!fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name)
472 || (fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name
473 && strcmp(fl1->aslist[FILTER_OUT].name,
474 fl2->aslist[FILTER_OUT].name)))
475 return false;
476
477 if ((fl1->usmap.name && !fl2->usmap.name)
478 || (!fl1->usmap.name && fl2->usmap.name)
479 || (fl1->usmap.name && fl2->usmap.name
480 && strcmp(fl1->usmap.name, fl2->usmap.name)))
481 return false;
482
483 if ((pe1->default_rmap[afi][safi].name
484 && !pe2->default_rmap[afi][safi].name)
485 || (!pe1->default_rmap[afi][safi].name
486 && pe2->default_rmap[afi][safi].name)
487 || (pe1->default_rmap[afi][safi].name
488 && pe2->default_rmap[afi][safi].name
489 && strcmp(pe1->default_rmap[afi][safi].name,
490 pe2->default_rmap[afi][safi].name)))
491 return false;
492
493 if ((afi == AFI_IP6) && (pe1->shared_network != pe2->shared_network))
494 return false;
495
496 if ((CHECK_FLAG(pe1->flags, PEER_FLAG_LONESOUL)
497 || CHECK_FLAG(pe1->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV)
498 || CHECK_FLAG(pe1->af_cap[afi][safi],
499 PEER_CAP_ORF_PREFIX_SM_OLD_RCV))
500 && !sockunion_same(&pe1->su, &pe2->su))
501 return false;
502
503 return true;
504 }
505
506 static void peer_lonesoul_or_not(struct peer *peer, int set)
507 {
508 /* no change in status? */
509 if (set == (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL) > 0))
510 return;
511
512 if (set)
513 SET_FLAG(peer->flags, PEER_FLAG_LONESOUL);
514 else
515 UNSET_FLAG(peer->flags, PEER_FLAG_LONESOUL);
516
517 update_group_adjust_peer_afs(peer);
518 }
519
520 /*
521 * subgroup_total_packets_enqueued
522 *
523 * Returns the total number of packets enqueued to a subgroup.
524 */
525 static unsigned int
526 subgroup_total_packets_enqueued(struct update_subgroup *subgrp)
527 {
528 struct bpacket *pkt;
529
530 pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp));
531
532 return pkt->ver - 1;
533 }
534
535 static int update_group_show_walkcb(struct update_group *updgrp, void *arg)
536 {
537 struct updwalk_context *ctx = arg;
538 struct vty *vty;
539 struct update_subgroup *subgrp;
540 struct peer_af *paf;
541 struct bgp_filter *filter;
542 int match = 0;
543
544 if (!ctx)
545 return CMD_SUCCESS;
546
547 if (ctx->subgrp_id) {
548 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
549 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
550 continue;
551 else {
552 match = 1;
553 break;
554 }
555 }
556 } else {
557 match = 1;
558 }
559
560 if (!match) {
561 /* Since this routine is invoked from a walk, we cannot signal
562 * any */
563 /* error here, can only return. */
564 return CMD_SUCCESS;
565 }
566
567 vty = ctx->vty;
568
569 vty_out(vty, "Update-group %" PRIu64 ":\n", updgrp->id);
570 vty_out(vty, " Created: %s", timestamp_string(updgrp->uptime));
571 filter = &updgrp->conf->filter[updgrp->afi][updgrp->safi];
572 if (filter->map[RMAP_OUT].name)
573 vty_out(vty, " Outgoing route map: %s\n",
574 filter->map[RMAP_OUT].name);
575 vty_out(vty, " MRAI value (seconds): %d\n", updgrp->conf->v_routeadv);
576 if (updgrp->conf->change_local_as)
577 vty_out(vty, " Local AS %u%s%s\n",
578 updgrp->conf->change_local_as,
579 CHECK_FLAG(updgrp->conf->flags,
580 PEER_FLAG_LOCAL_AS_NO_PREPEND)
581 ? " no-prepend"
582 : "",
583 CHECK_FLAG(updgrp->conf->flags,
584 PEER_FLAG_LOCAL_AS_REPLACE_AS)
585 ? " replace-as"
586 : "");
587
588 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
589 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
590 continue;
591 vty_out(vty, "\n");
592 vty_out(vty, " Update-subgroup %" PRIu64 ":\n", subgrp->id);
593 vty_out(vty, " Created: %s",
594 timestamp_string(subgrp->uptime));
595
596 if (subgrp->split_from.update_group_id
597 || subgrp->split_from.subgroup_id) {
598 vty_out(vty, " Split from group id: %" PRIu64 "\n",
599 subgrp->split_from.update_group_id);
600 vty_out(vty,
601 " Split from subgroup id: %" PRIu64 "\n",
602 subgrp->split_from.subgroup_id);
603 }
604
605 vty_out(vty, " Join events: %u\n", subgrp->join_events);
606 vty_out(vty, " Prune events: %u\n", subgrp->prune_events);
607 vty_out(vty, " Merge events: %u\n", subgrp->merge_events);
608 vty_out(vty, " Split events: %u\n", subgrp->split_events);
609 vty_out(vty, " Update group switch events: %u\n",
610 subgrp->updgrp_switch_events);
611 vty_out(vty, " Peer refreshes combined: %u\n",
612 subgrp->peer_refreshes_combined);
613 vty_out(vty, " Merge checks triggered: %u\n",
614 subgrp->merge_checks_triggered);
615 vty_out(vty, " Coalesce Time: %u%s\n",
616 (UPDGRP_INST(subgrp->update_group))->coalesce_time,
617 subgrp->t_coalesce ? "(Running)" : "");
618 vty_out(vty, " Version: %" PRIu64 "\n", subgrp->version);
619 vty_out(vty, " Packet queue length: %d\n",
620 bpacket_queue_length(SUBGRP_PKTQ(subgrp)));
621 vty_out(vty, " Total packets enqueued: %u\n",
622 subgroup_total_packets_enqueued(subgrp));
623 vty_out(vty, " Packet queue high watermark: %d\n",
624 bpacket_queue_hwm_length(SUBGRP_PKTQ(subgrp)));
625 vty_out(vty, " Adj-out list count: %u\n", subgrp->adj_count);
626 vty_out(vty, " Advertise list: %s\n",
627 advertise_list_is_empty(subgrp) ? "empty"
628 : "not empty");
629 vty_out(vty, " Flags: %s\n",
630 CHECK_FLAG(subgrp->flags, SUBGRP_FLAG_NEEDS_REFRESH)
631 ? "R"
632 : "");
633 if (subgrp->peer_count > 0) {
634 vty_out(vty, " Peers:\n");
635 SUBGRP_FOREACH_PEER (subgrp, paf)
636 vty_out(vty, " - %s\n", paf->peer->host);
637 }
638 }
639 return UPDWALK_CONTINUE;
640 }
641
642 /*
643 * Helper function to show the packet queue for each subgroup of update group.
644 * Will be constrained to a particular subgroup id if id !=0
645 */
646 static int updgrp_show_packet_queue_walkcb(struct update_group *updgrp,
647 void *arg)
648 {
649 struct updwalk_context *ctx = arg;
650 struct update_subgroup *subgrp;
651 struct vty *vty;
652
653 vty = ctx->vty;
654 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
655 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
656 continue;
657 vty_out(vty, "update group %" PRIu64 ", subgroup %" PRIu64 "\n",
658 updgrp->id, subgrp->id);
659 bpacket_queue_show_vty(SUBGRP_PKTQ(subgrp), vty);
660 }
661 return UPDWALK_CONTINUE;
662 }
663
664 /*
665 * Show the packet queue for each subgroup of update group. Will be
666 * constrained to a particular subgroup id if id !=0
667 */
668 void update_group_show_packet_queue(struct bgp *bgp, afi_t afi, safi_t safi,
669 struct vty *vty, uint64_t id)
670 {
671 struct updwalk_context ctx;
672
673 memset(&ctx, 0, sizeof(ctx));
674 ctx.vty = vty;
675 ctx.subgrp_id = id;
676 ctx.flags = 0;
677 update_group_af_walk(bgp, afi, safi, updgrp_show_packet_queue_walkcb,
678 &ctx);
679 }
680
681 static struct update_group *update_group_find(struct peer_af *paf)
682 {
683 struct update_group *updgrp;
684 struct update_group tmp;
685 struct peer tmp_conf;
686
687 if (!peer_established(PAF_PEER(paf)))
688 return NULL;
689
690 memset(&tmp, 0, sizeof(tmp));
691 memset(&tmp_conf, 0, sizeof(tmp_conf));
692 tmp.conf = &tmp_conf;
693 peer2_updgrp_copy(&tmp, paf);
694
695 updgrp = hash_lookup(paf->peer->bgp->update_groups[paf->afid], &tmp);
696 conf_release(&tmp_conf, paf->afi, paf->safi);
697 return updgrp;
698 }
699
700 static struct update_group *update_group_create(struct peer_af *paf)
701 {
702 struct update_group *updgrp;
703 struct update_group tmp;
704 struct peer tmp_conf;
705
706 memset(&tmp, 0, sizeof(tmp));
707 memset(&tmp_conf, 0, sizeof(tmp_conf));
708 tmp.conf = &tmp_conf;
709 peer2_updgrp_copy(&tmp, paf);
710
711 updgrp = hash_get(paf->peer->bgp->update_groups[paf->afid], &tmp,
712 updgrp_hash_alloc);
713 if (!updgrp)
714 return NULL;
715 update_group_checkin(updgrp);
716
717 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
718 zlog_debug("create update group %" PRIu64, updgrp->id);
719
720 UPDGRP_GLOBAL_STAT(updgrp, updgrps_created) += 1;
721
722 conf_release(&tmp_conf, paf->afi, paf->safi);
723 return updgrp;
724 }
725
726 static void update_group_delete(struct update_group *updgrp)
727 {
728 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
729 zlog_debug("delete update group %" PRIu64, updgrp->id);
730
731 UPDGRP_GLOBAL_STAT(updgrp, updgrps_deleted) += 1;
732
733 hash_release(updgrp->bgp->update_groups[updgrp->afid], updgrp);
734 conf_release(updgrp->conf, updgrp->afi, updgrp->safi);
735
736 XFREE(MTYPE_BGP_PEER_HOST, updgrp->conf->host);
737 updgrp->conf->host = NULL;
738
739 XFREE(MTYPE_BGP_PEER_IFNAME, updgrp->conf->ifname);
740
741 XFREE(MTYPE_BGP_PEER, updgrp->conf);
742 XFREE(MTYPE_BGP_UPDGRP, updgrp);
743 }
744
745 static void update_group_add_subgroup(struct update_group *updgrp,
746 struct update_subgroup *subgrp)
747 {
748 if (!updgrp || !subgrp)
749 return;
750
751 LIST_INSERT_HEAD(&(updgrp->subgrps), subgrp, updgrp_train);
752 subgrp->update_group = updgrp;
753 }
754
755 static void update_group_remove_subgroup(struct update_group *updgrp,
756 struct update_subgroup *subgrp)
757 {
758 if (!updgrp || !subgrp)
759 return;
760
761 LIST_REMOVE(subgrp, updgrp_train);
762 subgrp->update_group = NULL;
763 if (LIST_EMPTY(&(updgrp->subgrps)))
764 update_group_delete(updgrp);
765 }
766
767 static struct update_subgroup *
768 update_subgroup_create(struct update_group *updgrp)
769 {
770 struct update_subgroup *subgrp;
771
772 subgrp = XCALLOC(MTYPE_BGP_UPD_SUBGRP, sizeof(struct update_subgroup));
773 update_subgroup_checkin(subgrp, updgrp);
774 subgrp->v_coalesce = (UPDGRP_INST(updgrp))->coalesce_time;
775 sync_init(subgrp);
776 bpacket_queue_init(SUBGRP_PKTQ(subgrp));
777 bpacket_queue_add(SUBGRP_PKTQ(subgrp), NULL, NULL);
778 TAILQ_INIT(&(subgrp->adjq));
779 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
780 zlog_debug("create subgroup u%" PRIu64 ":s%" PRIu64, updgrp->id,
781 subgrp->id);
782
783 update_group_add_subgroup(updgrp, subgrp);
784
785 UPDGRP_INCR_STAT(updgrp, subgrps_created);
786
787 return subgrp;
788 }
789
790 static void update_subgroup_delete(struct update_subgroup *subgrp)
791 {
792 if (!subgrp)
793 return;
794
795 if (subgrp->update_group)
796 UPDGRP_INCR_STAT(subgrp->update_group, subgrps_deleted);
797
798 if (subgrp->t_merge_check)
799 THREAD_OFF(subgrp->t_merge_check);
800
801 if (subgrp->t_coalesce)
802 THREAD_TIMER_OFF(subgrp->t_coalesce);
803
804 bpacket_queue_cleanup(SUBGRP_PKTQ(subgrp));
805 subgroup_clear_table(subgrp);
806
807 if (subgrp->t_coalesce)
808 THREAD_TIMER_OFF(subgrp->t_coalesce);
809 sync_delete(subgrp);
810
811 if (BGP_DEBUG(update_groups, UPDATE_GROUPS) && subgrp->update_group)
812 zlog_debug("delete subgroup u%" PRIu64 ":s%" PRIu64,
813 subgrp->update_group->id, subgrp->id);
814
815 update_group_remove_subgroup(subgrp->update_group, subgrp);
816
817 XFREE(MTYPE_BGP_UPD_SUBGRP, subgrp);
818 }
819
820 void update_subgroup_inherit_info(struct update_subgroup *to,
821 struct update_subgroup *from)
822 {
823 if (!to || !from)
824 return;
825
826 to->sflags = from->sflags;
827 }
828
829 /*
830 * update_subgroup_check_delete
831 *
832 * Delete a subgroup if it is ready to be deleted.
833 *
834 * Returns true if the subgroup was deleted.
835 */
836 static int update_subgroup_check_delete(struct update_subgroup *subgrp)
837 {
838 if (!subgrp)
839 return 0;
840
841 if (!LIST_EMPTY(&(subgrp->peers)))
842 return 0;
843
844 update_subgroup_delete(subgrp);
845
846 return 1;
847 }
848
849 /*
850 * update_subgroup_add_peer
851 *
852 * @param send_enqueued_packets If true all currently enqueued packets will
853 * also be sent to the peer.
854 */
855 static void update_subgroup_add_peer(struct update_subgroup *subgrp,
856 struct peer_af *paf,
857 int send_enqueued_pkts)
858 {
859 struct bpacket *pkt;
860
861 if (!subgrp || !paf)
862 return;
863
864 LIST_INSERT_HEAD(&(subgrp->peers), paf, subgrp_train);
865 paf->subgroup = subgrp;
866 subgrp->peer_count++;
867
868 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
869 UPDGRP_PEER_DBG_EN(subgrp->update_group);
870 }
871
872 SUBGRP_INCR_STAT(subgrp, join_events);
873
874 if (send_enqueued_pkts) {
875 pkt = bpacket_queue_first(SUBGRP_PKTQ(subgrp));
876 } else {
877
878 /*
879 * Hang the peer off of the last, placeholder, packet in the
880 * queue. This means it won't see any of the packets that are
881 * currently the queue.
882 */
883 pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp));
884 assert(pkt->buffer == NULL);
885 }
886
887 bpacket_add_peer(pkt, paf);
888
889 bpacket_queue_sanity_check(SUBGRP_PKTQ(subgrp));
890 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
891 zlog_debug("peer %s added to subgroup s%" PRIu64,
892 paf->peer->host, subgrp->id);
893 }
894
895 /*
896 * update_subgroup_remove_peer_internal
897 *
898 * Internal function that removes a peer from a subgroup, but does not
899 * delete the subgroup. A call to this function must almost always be
900 * followed by a call to update_subgroup_check_delete().
901 *
902 * @see update_subgroup_remove_peer
903 */
904 static void update_subgroup_remove_peer_internal(struct update_subgroup *subgrp,
905 struct peer_af *paf)
906 {
907 assert(subgrp && paf && subgrp->update_group);
908
909 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
910 UPDGRP_PEER_DBG_DIS(subgrp->update_group);
911 }
912
913 bpacket_queue_remove_peer(paf);
914 LIST_REMOVE(paf, subgrp_train);
915 paf->subgroup = NULL;
916 subgrp->peer_count--;
917
918 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
919 zlog_debug("peer %s deleted from subgroup s%"
920 PRIu64 " peer cnt %d",
921 paf->peer->host, subgrp->id, subgrp->peer_count);
922 SUBGRP_INCR_STAT(subgrp, prune_events);
923 }
924
925 /*
926 * update_subgroup_remove_peer
927 */
928 void update_subgroup_remove_peer(struct update_subgroup *subgrp,
929 struct peer_af *paf)
930 {
931 if (!subgrp || !paf)
932 return;
933
934 update_subgroup_remove_peer_internal(subgrp, paf);
935
936 if (update_subgroup_check_delete(subgrp))
937 return;
938
939 /*
940 * The deletion of the peer may have caused some packets to be
941 * deleted from the subgroup packet queue. Check if the subgroup can
942 * be merged now.
943 */
944 update_subgroup_check_merge(subgrp, "removed peer from subgroup");
945 }
946
947 static struct update_subgroup *update_subgroup_find(struct update_group *updgrp,
948 struct peer_af *paf)
949 {
950 struct update_subgroup *subgrp = NULL;
951 uint64_t version;
952
953 if (paf->subgroup) {
954 assert(0);
955 return NULL;
956 } else
957 version = 0;
958
959 if (!peer_established(PAF_PEER(paf)))
960 return NULL;
961
962 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
963 if (subgrp->version != version
964 || CHECK_FLAG(subgrp->sflags,
965 SUBGRP_STATUS_DEFAULT_ORIGINATE))
966 continue;
967
968 /*
969 * The version number is not meaningful on a subgroup that needs
970 * a refresh.
971 */
972 if (update_subgroup_needs_refresh(subgrp))
973 continue;
974
975 break;
976 }
977
978 return subgrp;
979 }
980
981 /*
982 * update_subgroup_ready_for_merge
983 *
984 * Returns true if this subgroup is in a state that allows it to be
985 * merged into another subgroup.
986 */
987 static int update_subgroup_ready_for_merge(struct update_subgroup *subgrp)
988 {
989
990 /*
991 * Not ready if there are any encoded packets waiting to be written
992 * out to peers.
993 */
994 if (!bpacket_queue_is_empty(SUBGRP_PKTQ(subgrp)))
995 return 0;
996
997 /*
998 * Not ready if there enqueued updates waiting to be encoded.
999 */
1000 if (!advertise_list_is_empty(subgrp))
1001 return 0;
1002
1003 /*
1004 * Don't attempt to merge a subgroup that needs a refresh. For one,
1005 * we can't determine if the adj_out of such a group matches that of
1006 * another group.
1007 */
1008 if (update_subgroup_needs_refresh(subgrp))
1009 return 0;
1010
1011 return 1;
1012 }
1013
1014 /*
1015 * update_subgrp_can_merge_into
1016 *
1017 * Returns true if the first subgroup can merge into the second
1018 * subgroup.
1019 */
1020 static int update_subgroup_can_merge_into(struct update_subgroup *subgrp,
1021 struct update_subgroup *target)
1022 {
1023
1024 if (subgrp == target)
1025 return 0;
1026
1027 /*
1028 * Both must have processed the BRIB to the same point in order to
1029 * be merged.
1030 */
1031 if (subgrp->version != target->version)
1032 return 0;
1033
1034 if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE)
1035 != CHECK_FLAG(target->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
1036 return 0;
1037
1038 if (subgrp->adj_count != target->adj_count)
1039 return 0;
1040
1041 return update_subgroup_ready_for_merge(target);
1042 }
1043
1044 /*
1045 * update_subgroup_merge
1046 *
1047 * Merge the first subgroup into the second one.
1048 */
1049 static void update_subgroup_merge(struct update_subgroup *subgrp,
1050 struct update_subgroup *target,
1051 const char *reason)
1052 {
1053 struct peer_af *paf;
1054 int result;
1055 int peer_count;
1056
1057 assert(subgrp->adj_count == target->adj_count);
1058
1059 peer_count = subgrp->peer_count;
1060
1061 while (1) {
1062 paf = LIST_FIRST(&subgrp->peers);
1063 if (!paf)
1064 break;
1065
1066 update_subgroup_remove_peer_internal(subgrp, paf);
1067
1068 /*
1069 * Add the peer to the target subgroup, while making sure that
1070 * any currently enqueued packets won't be sent to it. Enqueued
1071 * packets could, for example, result in an unnecessary withdraw
1072 * followed by an advertise.
1073 */
1074 update_subgroup_add_peer(target, paf, 0);
1075 }
1076
1077 SUBGRP_INCR_STAT(target, merge_events);
1078
1079 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1080 zlog_debug("u%" PRIu64 ":s%" PRIu64
1081 " (%d peers) merged into u%" PRIu64 ":s%" PRIu64
1082 ", "
1083 "trigger: %s",
1084 subgrp->update_group->id, subgrp->id, peer_count,
1085 target->update_group->id, target->id,
1086 reason ? reason : "unknown");
1087
1088 result = update_subgroup_check_delete(subgrp);
1089 assert(result);
1090 }
1091
1092 /*
1093 * update_subgroup_check_merge
1094 *
1095 * Merge this subgroup into another subgroup if possible.
1096 *
1097 * Returns true if the subgroup has been merged. The subgroup pointer
1098 * should not be accessed in this case.
1099 */
1100 int update_subgroup_check_merge(struct update_subgroup *subgrp,
1101 const char *reason)
1102 {
1103 struct update_subgroup *target;
1104
1105 if (!update_subgroup_ready_for_merge(subgrp))
1106 return 0;
1107
1108 /*
1109 * Look for a subgroup to merge into.
1110 */
1111 UPDGRP_FOREACH_SUBGRP (subgrp->update_group, target) {
1112 if (update_subgroup_can_merge_into(subgrp, target))
1113 break;
1114 }
1115
1116 if (!target)
1117 return 0;
1118
1119 update_subgroup_merge(subgrp, target, reason);
1120 return 1;
1121 }
1122
1123 /*
1124 * update_subgroup_merge_check_thread_cb
1125 */
1126 static int update_subgroup_merge_check_thread_cb(struct thread *thread)
1127 {
1128 struct update_subgroup *subgrp;
1129
1130 subgrp = THREAD_ARG(thread);
1131
1132 subgrp->t_merge_check = NULL;
1133
1134 update_subgroup_check_merge(subgrp, "triggered merge check");
1135 return 0;
1136 }
1137
1138 /*
1139 * update_subgroup_trigger_merge_check
1140 *
1141 * Triggers a call to update_subgroup_check_merge() on a clean context.
1142 *
1143 * @param force If true, the merge check will be triggered even if the
1144 * subgroup doesn't currently look ready for a merge.
1145 *
1146 * Returns true if a merge check will be performed shortly.
1147 */
1148 int update_subgroup_trigger_merge_check(struct update_subgroup *subgrp,
1149 int force)
1150 {
1151 if (subgrp->t_merge_check)
1152 return 1;
1153
1154 if (!force && !update_subgroup_ready_for_merge(subgrp))
1155 return 0;
1156
1157 subgrp->t_merge_check = NULL;
1158 thread_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb,
1159 subgrp, 0, &subgrp->t_merge_check);
1160
1161 SUBGRP_INCR_STAT(subgrp, merge_checks_triggered);
1162
1163 return 1;
1164 }
1165
1166 /*
1167 * update_subgroup_copy_adj_out
1168 *
1169 * Helper function that clones the adj out (state about advertised
1170 * routes) from one subgroup to another. It assumes that the adj out
1171 * of the target subgroup is empty.
1172 */
1173 static void update_subgroup_copy_adj_out(struct update_subgroup *source,
1174 struct update_subgroup *dest)
1175 {
1176 struct bgp_adj_out *aout, *aout_copy;
1177
1178 SUBGRP_FOREACH_ADJ (source, aout) {
1179 /*
1180 * Copy the adj out.
1181 */
1182 aout_copy =
1183 bgp_adj_out_alloc(dest, aout->rn, aout->addpath_tx_id);
1184 aout_copy->attr =
1185 aout->attr ? bgp_attr_intern(aout->attr) : NULL;
1186 }
1187
1188 dest->scount = source->scount;
1189 }
1190
1191 /*
1192 * update_subgroup_copy_packets
1193 *
1194 * Copy packets after and including the given packet to the subgroup
1195 * 'dest'.
1196 *
1197 * Returns the number of packets copied.
1198 */
1199 static int update_subgroup_copy_packets(struct update_subgroup *dest,
1200 struct bpacket *pkt)
1201 {
1202 int count;
1203
1204 count = 0;
1205 while (pkt && pkt->buffer) {
1206 bpacket_queue_add(SUBGRP_PKTQ(dest), stream_dup(pkt->buffer),
1207 &pkt->arr);
1208 count++;
1209 pkt = bpacket_next(pkt);
1210 }
1211
1212 bpacket_queue_sanity_check(SUBGRP_PKTQ(dest));
1213
1214 return count;
1215 }
1216
1217 static int updgrp_prefix_list_update(struct update_group *updgrp,
1218 const char *name)
1219 {
1220 struct peer *peer;
1221 struct bgp_filter *filter;
1222
1223 peer = UPDGRP_PEER(updgrp);
1224 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1225
1226 if (PREFIX_LIST_OUT_NAME(filter)
1227 && (strcmp(name, PREFIX_LIST_OUT_NAME(filter)) == 0)) {
1228 PREFIX_LIST_OUT(filter) = prefix_list_lookup(
1229 UPDGRP_AFI(updgrp), PREFIX_LIST_OUT_NAME(filter));
1230 return 1;
1231 }
1232 return 0;
1233 }
1234
1235 static int updgrp_filter_list_update(struct update_group *updgrp,
1236 const char *name)
1237 {
1238 struct peer *peer;
1239 struct bgp_filter *filter;
1240
1241 peer = UPDGRP_PEER(updgrp);
1242 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1243
1244 if (FILTER_LIST_OUT_NAME(filter)
1245 && (strcmp(name, FILTER_LIST_OUT_NAME(filter)) == 0)) {
1246 FILTER_LIST_OUT(filter) =
1247 as_list_lookup(FILTER_LIST_OUT_NAME(filter));
1248 return 1;
1249 }
1250 return 0;
1251 }
1252
1253 static int updgrp_distribute_list_update(struct update_group *updgrp,
1254 const char *name)
1255 {
1256 struct peer *peer;
1257 struct bgp_filter *filter;
1258
1259 peer = UPDGRP_PEER(updgrp);
1260 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1261
1262 if (DISTRIBUTE_OUT_NAME(filter)
1263 && (strcmp(name, DISTRIBUTE_OUT_NAME(filter)) == 0)) {
1264 DISTRIBUTE_OUT(filter) = access_list_lookup(
1265 UPDGRP_AFI(updgrp), DISTRIBUTE_OUT_NAME(filter));
1266 return 1;
1267 }
1268 return 0;
1269 }
1270
1271 static int updgrp_route_map_update(struct update_group *updgrp,
1272 const char *name, int *def_rmap_changed)
1273 {
1274 struct peer *peer;
1275 struct bgp_filter *filter;
1276 int changed = 0;
1277 afi_t afi;
1278 safi_t safi;
1279
1280 peer = UPDGRP_PEER(updgrp);
1281 afi = UPDGRP_AFI(updgrp);
1282 safi = UPDGRP_SAFI(updgrp);
1283 filter = &peer->filter[afi][safi];
1284
1285 if (ROUTE_MAP_OUT_NAME(filter)
1286 && (strcmp(name, ROUTE_MAP_OUT_NAME(filter)) == 0)) {
1287 ROUTE_MAP_OUT(filter) = route_map_lookup_by_name(name);
1288
1289 changed = 1;
1290 }
1291
1292 if (UNSUPPRESS_MAP_NAME(filter)
1293 && (strcmp(name, UNSUPPRESS_MAP_NAME(filter)) == 0)) {
1294 UNSUPPRESS_MAP(filter) = route_map_lookup_by_name(name);
1295 changed = 1;
1296 }
1297
1298 /* process default-originate route-map */
1299 if (peer->default_rmap[afi][safi].name
1300 && (strcmp(name, peer->default_rmap[afi][safi].name) == 0)) {
1301 peer->default_rmap[afi][safi].map =
1302 route_map_lookup_by_name(name);
1303 if (def_rmap_changed)
1304 *def_rmap_changed = 1;
1305 }
1306 return changed;
1307 }
1308
1309 /*
1310 * hash iteration callback function to process a policy change for an
1311 * update group. Check if the changed policy matches the updgrp's
1312 * outbound route-map or unsuppress-map or default-originate map or
1313 * filter-list or prefix-list or distribute-list.
1314 * Trigger update generation accordingly.
1315 */
1316 static int updgrp_policy_update_walkcb(struct update_group *updgrp, void *arg)
1317 {
1318 struct updwalk_context *ctx = arg;
1319 struct update_subgroup *subgrp;
1320 int changed = 0;
1321 int def_changed = 0;
1322
1323 if (!updgrp || !ctx || !ctx->policy_name)
1324 return UPDWALK_CONTINUE;
1325
1326 switch (ctx->policy_type) {
1327 case BGP_POLICY_ROUTE_MAP:
1328 changed = updgrp_route_map_update(updgrp, ctx->policy_name,
1329 &def_changed);
1330 break;
1331 case BGP_POLICY_FILTER_LIST:
1332 changed = updgrp_filter_list_update(updgrp, ctx->policy_name);
1333 break;
1334 case BGP_POLICY_PREFIX_LIST:
1335 changed = updgrp_prefix_list_update(updgrp, ctx->policy_name);
1336 break;
1337 case BGP_POLICY_DISTRIBUTE_LIST:
1338 changed =
1339 updgrp_distribute_list_update(updgrp, ctx->policy_name);
1340 break;
1341 default:
1342 break;
1343 }
1344
1345 /* If not doing route update, return after updating "config" */
1346 if (!ctx->policy_route_update)
1347 return UPDWALK_CONTINUE;
1348
1349 /* If nothing has changed, return after updating "config" */
1350 if (!changed && !def_changed)
1351 return UPDWALK_CONTINUE;
1352
1353 /*
1354 * If something has changed, at the beginning of a route-map
1355 * modification
1356 * event, mark each subgroup's needs-refresh bit. For one, it signals to
1357 * whoever that the subgroup needs a refresh. Second, it prevents
1358 * premature
1359 * merge of this subgroup with another before a complete (outbound)
1360 * refresh.
1361 */
1362 if (ctx->policy_event_start_flag) {
1363 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
1364 update_subgroup_set_needs_refresh(subgrp, 1);
1365 }
1366 return UPDWALK_CONTINUE;
1367 }
1368
1369 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
1370 if (changed) {
1371 if (bgp_debug_update(NULL, NULL, updgrp, 0))
1372 zlog_debug(
1373 "u%" PRIu64 ":s%" PRIu64
1374 " announcing routes upon policy %s (type %d) change",
1375 updgrp->id, subgrp->id,
1376 ctx->policy_name, ctx->policy_type);
1377 subgroup_announce_route(subgrp);
1378 }
1379 if (def_changed) {
1380 if (bgp_debug_update(NULL, NULL, updgrp, 0))
1381 zlog_debug(
1382 "u%" PRIu64 ":s%" PRIu64
1383 " announcing default upon default routemap %s change",
1384 updgrp->id, subgrp->id,
1385 ctx->policy_name);
1386 subgroup_default_originate(subgrp, 0);
1387 }
1388 update_subgroup_set_needs_refresh(subgrp, 0);
1389 }
1390 return UPDWALK_CONTINUE;
1391 }
1392
1393 static int update_group_walkcb(struct hash_bucket *bucket, void *arg)
1394 {
1395 struct update_group *updgrp = bucket->data;
1396 struct updwalk_context *wctx = arg;
1397 int ret = (*wctx->cb)(updgrp, wctx->context);
1398 return ret;
1399 }
1400
1401 static int update_group_periodic_merge_walkcb(struct update_group *updgrp,
1402 void *arg)
1403 {
1404 struct update_subgroup *subgrp;
1405 struct update_subgroup *tmp_subgrp;
1406 const char *reason = arg;
1407
1408 UPDGRP_FOREACH_SUBGRP_SAFE (updgrp, subgrp, tmp_subgrp)
1409 update_subgroup_check_merge(subgrp, reason);
1410 return UPDWALK_CONTINUE;
1411 }
1412
1413 /********************
1414 * PUBLIC FUNCTIONS
1415 ********************/
1416
1417 /*
1418 * trigger function when a policy (route-map/filter-list/prefix-list/
1419 * distribute-list etc.) content changes. Go through all the
1420 * update groups and process the change.
1421 *
1422 * bgp: the bgp instance
1423 * ptype: the type of policy that got modified, see bgpd.h
1424 * pname: name of the policy
1425 * route_update: flag to control if an automatic update generation should
1426 * occur
1427 * start_event: flag that indicates if it's the beginning of the change.
1428 * Esp. when the user is changing the content interactively
1429 * over multiple statements. Useful to set dirty flag on
1430 * update groups.
1431 */
1432 void update_group_policy_update(struct bgp *bgp, bgp_policy_type_e ptype,
1433 const char *pname, int route_update,
1434 int start_event)
1435 {
1436 struct updwalk_context ctx;
1437
1438 memset(&ctx, 0, sizeof(ctx));
1439 ctx.policy_type = ptype;
1440 ctx.policy_name = pname;
1441 ctx.policy_route_update = route_update;
1442 ctx.policy_event_start_flag = start_event;
1443 ctx.flags = 0;
1444
1445 update_group_walk(bgp, updgrp_policy_update_walkcb, &ctx);
1446 }
1447
1448 /*
1449 * update_subgroup_split_peer
1450 *
1451 * Ensure that the given peer is in a subgroup of its own in the
1452 * specified update group.
1453 */
1454 void update_subgroup_split_peer(struct peer_af *paf,
1455 struct update_group *updgrp)
1456 {
1457 struct update_subgroup *old_subgrp, *subgrp;
1458 uint64_t old_id;
1459
1460
1461 old_subgrp = paf->subgroup;
1462
1463 if (!updgrp)
1464 updgrp = old_subgrp->update_group;
1465
1466 /*
1467 * If the peer is alone in its subgroup, reuse the existing
1468 * subgroup.
1469 */
1470 if (old_subgrp->peer_count == 1) {
1471 if (updgrp == old_subgrp->update_group)
1472 return;
1473
1474 subgrp = old_subgrp;
1475 old_id = old_subgrp->update_group->id;
1476
1477 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
1478 UPDGRP_PEER_DBG_DIS(old_subgrp->update_group);
1479 }
1480
1481 update_group_remove_subgroup(old_subgrp->update_group,
1482 old_subgrp);
1483 update_group_add_subgroup(updgrp, subgrp);
1484
1485 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
1486 UPDGRP_PEER_DBG_EN(updgrp);
1487 }
1488 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1489 zlog_debug("u%" PRIu64 ":s%" PRIu64
1490 " peer %s moved to u%" PRIu64 ":s%" PRIu64,
1491 old_id, subgrp->id, paf->peer->host,
1492 updgrp->id, subgrp->id);
1493
1494 /*
1495 * The state of the subgroup (adj_out, advs, packet queue etc)
1496 * is consistent internally, but may not be identical to other
1497 * subgroups in the new update group even if the version number
1498 * matches up. Make sure a full refresh is done before the
1499 * subgroup is merged with another.
1500 */
1501 update_subgroup_set_needs_refresh(subgrp, 1);
1502
1503 SUBGRP_INCR_STAT(subgrp, updgrp_switch_events);
1504 return;
1505 }
1506
1507 /*
1508 * Create a new subgroup under the specified update group, and copy
1509 * over relevant state to it.
1510 */
1511 subgrp = update_subgroup_create(updgrp);
1512 update_subgroup_inherit_info(subgrp, old_subgrp);
1513
1514 subgrp->split_from.update_group_id = old_subgrp->update_group->id;
1515 subgrp->split_from.subgroup_id = old_subgrp->id;
1516
1517 /*
1518 * Copy out relevant state from the old subgroup.
1519 */
1520 update_subgroup_copy_adj_out(paf->subgroup, subgrp);
1521 update_subgroup_copy_packets(subgrp, paf->next_pkt_to_send);
1522
1523 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1524 zlog_debug("u%" PRIu64 ":s%" PRIu64
1525 " peer %s split and moved into u%" PRIu64
1526 ":s%" PRIu64,
1527 paf->subgroup->update_group->id, paf->subgroup->id,
1528 paf->peer->host, updgrp->id, subgrp->id);
1529
1530 SUBGRP_INCR_STAT(paf->subgroup, split_events);
1531
1532 /*
1533 * Since queued advs were left behind, this new subgroup needs a
1534 * refresh.
1535 */
1536 update_subgroup_set_needs_refresh(subgrp, 1);
1537
1538 /*
1539 * Remove peer from old subgroup, and add it to the new one.
1540 */
1541 update_subgroup_remove_peer(paf->subgroup, paf);
1542
1543 update_subgroup_add_peer(subgrp, paf, 1);
1544 }
1545
1546 void update_bgp_group_init(struct bgp *bgp)
1547 {
1548 int afid;
1549
1550 AF_FOREACH (afid)
1551 bgp->update_groups[afid] =
1552 hash_create(updgrp_hash_key_make, updgrp_hash_cmp,
1553 "BGP Update Group Hash");
1554 }
1555
1556 void update_bgp_group_free(struct bgp *bgp)
1557 {
1558 int afid;
1559
1560 AF_FOREACH (afid) {
1561 if (bgp->update_groups[afid]) {
1562 hash_free(bgp->update_groups[afid]);
1563 bgp->update_groups[afid] = NULL;
1564 }
1565 }
1566 }
1567
1568 void update_group_show(struct bgp *bgp, afi_t afi, safi_t safi, struct vty *vty,
1569 uint64_t subgrp_id)
1570 {
1571 struct updwalk_context ctx;
1572 memset(&ctx, 0, sizeof(ctx));
1573 ctx.vty = vty;
1574 ctx.subgrp_id = subgrp_id;
1575
1576 update_group_af_walk(bgp, afi, safi, update_group_show_walkcb, &ctx);
1577 }
1578
1579 /*
1580 * update_group_show_stats
1581 *
1582 * Show global statistics about update groups.
1583 */
1584 void update_group_show_stats(struct bgp *bgp, struct vty *vty)
1585 {
1586 vty_out(vty, "Update groups created: %u\n",
1587 bgp->update_group_stats.updgrps_created);
1588 vty_out(vty, "Update groups deleted: %u\n",
1589 bgp->update_group_stats.updgrps_deleted);
1590 vty_out(vty, "Update subgroups created: %u\n",
1591 bgp->update_group_stats.subgrps_created);
1592 vty_out(vty, "Update subgroups deleted: %u\n",
1593 bgp->update_group_stats.subgrps_deleted);
1594 vty_out(vty, "Join events: %u\n", bgp->update_group_stats.join_events);
1595 vty_out(vty, "Prune events: %u\n",
1596 bgp->update_group_stats.prune_events);
1597 vty_out(vty, "Merge events: %u\n",
1598 bgp->update_group_stats.merge_events);
1599 vty_out(vty, "Split events: %u\n",
1600 bgp->update_group_stats.split_events);
1601 vty_out(vty, "Update group switch events: %u\n",
1602 bgp->update_group_stats.updgrp_switch_events);
1603 vty_out(vty, "Peer route refreshes combined: %u\n",
1604 bgp->update_group_stats.peer_refreshes_combined);
1605 vty_out(vty, "Merge checks triggered: %u\n",
1606 bgp->update_group_stats.merge_checks_triggered);
1607 }
1608
1609 /*
1610 * update_group_adjust_peer
1611 */
1612 void update_group_adjust_peer(struct peer_af *paf)
1613 {
1614 struct update_group *updgrp;
1615 struct update_subgroup *subgrp, *old_subgrp;
1616 struct peer *peer;
1617
1618 if (!paf)
1619 return;
1620
1621 peer = PAF_PEER(paf);
1622 if (!peer_established(peer)) {
1623 return;
1624 }
1625
1626 if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE)) {
1627 return;
1628 }
1629
1630 if (!peer->afc_nego[paf->afi][paf->safi]) {
1631 return;
1632 }
1633
1634 updgrp = update_group_find(paf);
1635 if (!updgrp) {
1636 updgrp = update_group_create(paf);
1637 if (!updgrp) {
1638 flog_err(EC_BGP_UPDGRP_CREATE,
1639 "couldn't create update group for peer %s",
1640 paf->peer->host);
1641 return;
1642 }
1643 }
1644
1645 old_subgrp = paf->subgroup;
1646
1647 if (old_subgrp) {
1648
1649 /*
1650 * If the update group of the peer is unchanged, the peer can
1651 * stay
1652 * in its existing subgroup and we're done.
1653 */
1654 if (old_subgrp->update_group == updgrp)
1655 return;
1656
1657 /*
1658 * The peer is switching between update groups. Put it in its
1659 * own subgroup under the new update group.
1660 */
1661 update_subgroup_split_peer(paf, updgrp);
1662 return;
1663 }
1664
1665 subgrp = update_subgroup_find(updgrp, paf);
1666 if (!subgrp) {
1667 subgrp = update_subgroup_create(updgrp);
1668 if (!subgrp)
1669 return;
1670 }
1671
1672 update_subgroup_add_peer(subgrp, paf, 1);
1673 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1674 zlog_debug("u%" PRIu64 ":s%" PRIu64 " add peer %s", updgrp->id,
1675 subgrp->id, paf->peer->host);
1676
1677 return;
1678 }
1679
1680 int update_group_adjust_soloness(struct peer *peer, int set)
1681 {
1682 struct peer_group *group;
1683 struct listnode *node, *nnode;
1684
1685 if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
1686 peer_lonesoul_or_not(peer, set);
1687 if (peer->status == Established)
1688 bgp_announce_route_all(peer);
1689 } else {
1690 group = peer->group;
1691 for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
1692 peer_lonesoul_or_not(peer, set);
1693 if (peer->status == Established)
1694 bgp_announce_route_all(peer);
1695 }
1696 }
1697 return 0;
1698 }
1699
1700 /*
1701 * update_subgroup_rib
1702 */
1703 struct bgp_table *update_subgroup_rib(struct update_subgroup *subgrp)
1704 {
1705 struct bgp *bgp;
1706
1707 bgp = SUBGRP_INST(subgrp);
1708 if (!bgp)
1709 return NULL;
1710
1711 return bgp->rib[SUBGRP_AFI(subgrp)][SUBGRP_SAFI(subgrp)];
1712 }
1713
1714 void update_group_af_walk(struct bgp *bgp, afi_t afi, safi_t safi,
1715 updgrp_walkcb cb, void *ctx)
1716 {
1717 struct updwalk_context wctx;
1718 int afid;
1719
1720 if (!bgp)
1721 return;
1722 afid = afindex(afi, safi);
1723 if (afid >= BGP_AF_MAX)
1724 return;
1725
1726 memset(&wctx, 0, sizeof(wctx));
1727 wctx.cb = cb;
1728 wctx.context = ctx;
1729
1730 if (bgp->update_groups[afid])
1731 hash_walk(bgp->update_groups[afid], update_group_walkcb, &wctx);
1732 }
1733
1734 void update_group_walk(struct bgp *bgp, updgrp_walkcb cb, void *ctx)
1735 {
1736 afi_t afi;
1737 safi_t safi;
1738
1739 FOREACH_AFI_SAFI (afi, safi) {
1740 update_group_af_walk(bgp, afi, safi, cb, ctx);
1741 }
1742 }
1743
1744 void update_group_periodic_merge(struct bgp *bgp)
1745 {
1746 char reason[] = "periodic merge check";
1747
1748 update_group_walk(bgp, update_group_periodic_merge_walkcb,
1749 (void *)reason);
1750 }
1751
1752 static int
1753 update_group_default_originate_route_map_walkcb(struct update_group *updgrp,
1754 void *arg)
1755 {
1756 struct update_subgroup *subgrp;
1757 struct peer *peer;
1758 afi_t afi;
1759 safi_t safi;
1760
1761 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
1762 peer = SUBGRP_PEER(subgrp);
1763 afi = SUBGRP_AFI(subgrp);
1764 safi = SUBGRP_SAFI(subgrp);
1765
1766 if (peer->default_rmap[afi][safi].name) {
1767 subgroup_default_originate(subgrp, 0);
1768 }
1769 }
1770
1771 return UPDWALK_CONTINUE;
1772 }
1773
1774 int update_group_refresh_default_originate_route_map(struct thread *thread)
1775 {
1776 struct bgp *bgp;
1777 char reason[] = "refresh default-originate route-map";
1778
1779 bgp = THREAD_ARG(thread);
1780 update_group_walk(bgp, update_group_default_originate_route_map_walkcb,
1781 reason);
1782 THREAD_TIMER_OFF(bgp->t_rmap_def_originate_eval);
1783 bgp_unlock(bgp);
1784
1785 return (0);
1786 }
1787
1788 /*
1789 * peer_af_announce_route
1790 *
1791 * Refreshes routes out to a peer_af immediately.
1792 *
1793 * If the combine parameter is true, then this function will try to
1794 * gather other peers in the subgroup for which a route announcement
1795 * is pending and efficently announce routes to all of them.
1796 *
1797 * For now, the 'combine' option has an effect only if all peers in
1798 * the subgroup have a route announcement pending.
1799 */
1800 void peer_af_announce_route(struct peer_af *paf, int combine)
1801 {
1802 struct update_subgroup *subgrp;
1803 struct peer_af *cur_paf;
1804 int all_pending;
1805
1806 subgrp = paf->subgroup;
1807 all_pending = 0;
1808
1809 if (combine) {
1810 /*
1811 * If there are other peers in the old subgroup that also need
1812 * routes to be announced, pull them into the peer's new
1813 * subgroup.
1814 * Combine route announcement with other peers if possible.
1815 *
1816 * For now, we combine only if all peers in the subgroup have an
1817 * announcement pending.
1818 */
1819 all_pending = 1;
1820
1821 SUBGRP_FOREACH_PEER (subgrp, cur_paf) {
1822 if (cur_paf == paf)
1823 continue;
1824
1825 if (cur_paf->t_announce_route)
1826 continue;
1827
1828 all_pending = 0;
1829 break;
1830 }
1831 }
1832 /*
1833 * Announce to the peer alone if we were not asked to combine peers,
1834 * or if some peers don't have a route annoucement pending.
1835 */
1836 if (!combine || !all_pending) {
1837 update_subgroup_split_peer(paf, NULL);
1838 subgrp = paf->subgroup;
1839
1840 assert(subgrp && subgrp->update_group);
1841 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
1842 zlog_debug("u%" PRIu64 ":s%" PRIu64
1843 " %s announcing routes",
1844 subgrp->update_group->id, subgrp->id,
1845 paf->peer->host);
1846
1847 subgroup_announce_route(paf->subgroup);
1848 return;
1849 }
1850
1851 /*
1852 * We will announce routes the entire subgroup.
1853 *
1854 * First stop refresh timers on all the other peers.
1855 */
1856 SUBGRP_FOREACH_PEER (subgrp, cur_paf) {
1857 if (cur_paf == paf)
1858 continue;
1859
1860 bgp_stop_announce_route_timer(cur_paf);
1861 }
1862
1863 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
1864 zlog_debug("u%" PRIu64 ":s%" PRIu64
1865 " announcing routes to %s, combined into %d peers",
1866 subgrp->update_group->id, subgrp->id,
1867 paf->peer->host, subgrp->peer_count);
1868
1869 subgroup_announce_route(subgrp);
1870
1871 SUBGRP_INCR_STAT_BY(subgrp, peer_refreshes_combined,
1872 subgrp->peer_count - 1);
1873 }
1874
1875 void subgroup_trigger_write(struct update_subgroup *subgrp)
1876 {
1877 struct peer_af *paf;
1878
1879 /*
1880 * For each peer in the subgroup, schedule a job to pull packets from
1881 * the subgroup output queue into their own output queue. This action
1882 * will trigger a write job on the I/O thread.
1883 */
1884 SUBGRP_FOREACH_PEER (subgrp, paf)
1885 if (paf->peer->status == Established)
1886 thread_add_timer_msec(
1887 bm->master, bgp_generate_updgrp_packets,
1888 paf->peer, 0,
1889 &paf->peer->t_generate_updgrp_packets);
1890 }
1891
1892 int update_group_clear_update_dbg(struct update_group *updgrp, void *arg)
1893 {
1894 UPDGRP_PEER_DBG_OFF(updgrp);
1895 return UPDWALK_CONTINUE;
1896 }
1897
1898 /* Return true if we should addpath encode NLRI to this peer */
1899 int bgp_addpath_encode_tx(struct peer *peer, afi_t afi, safi_t safi)
1900 {
1901 return (CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ADDPATH_AF_TX_ADV)
1902 && CHECK_FLAG(peer->af_cap[afi][safi],
1903 PEER_CAP_ADDPATH_AF_RX_RCV));
1904 }