]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_updgrp.c
Merge commit '78986c0' into tmp-3.0-master-merge
[mirror_frr.git] / bgpd / bgp_updgrp.c
1 /**
2 * bgp_updgrp.c: BGP update group structures
3 *
4 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
5 *
6 * @author Avneesh Sachdev <avneesh@sproute.net>
7 * @author Rajesh Varadarajan <rajesh@sproute.net>
8 * @author Pradosh Mohapatra <pradosh@sproute.net>
9 *
10 * This file is part of GNU Zebra.
11 *
12 * GNU Zebra is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2, or (at your option) any
15 * later version.
16 *
17 * GNU Zebra is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; see the file COPYING; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27 #include <zebra.h>
28
29 #include "prefix.h"
30 #include "thread.h"
31 #include "buffer.h"
32 #include "stream.h"
33 #include "command.h"
34 #include "sockunion.h"
35 #include "network.h"
36 #include "memory.h"
37 #include "filter.h"
38 #include "routemap.h"
39 #include "log.h"
40 #include "plist.h"
41 #include "linklist.h"
42 #include "workqueue.h"
43 #include "hash.h"
44 #include "jhash.h"
45 #include "queue.h"
46
47 #include "bgpd/bgpd.h"
48 #include "bgpd/bgp_table.h"
49 #include "bgpd/bgp_debug.h"
50 #include "bgpd/bgp_fsm.h"
51 #include "bgpd/bgp_advertise.h"
52 #include "bgpd/bgp_packet.h"
53 #include "bgpd/bgp_updgrp.h"
54 #include "bgpd/bgp_route.h"
55 #include "bgpd/bgp_filter.h"
56
57 /********************
58 * PRIVATE FUNCTIONS
59 ********************/
60
61 /**
62 * assign a unique ID to update group and subgroup. Mostly for display/
63 * debugging purposes. It's a 64-bit space - used leisurely without a
64 * worry about its wrapping and about filling gaps. While at it, timestamp
65 * the creation.
66 */
67 static void update_group_checkin(struct update_group *updgrp)
68 {
69 updgrp->id = ++bm->updgrp_idspace;
70 updgrp->uptime = bgp_clock();
71 }
72
73 static void update_subgroup_checkin(struct update_subgroup *subgrp,
74 struct update_group *updgrp)
75 {
76 subgrp->id = ++bm->subgrp_idspace;
77 subgrp->uptime = bgp_clock();
78 }
79
80 static void sync_init(struct update_subgroup *subgrp)
81 {
82 subgrp->sync =
83 XCALLOC(MTYPE_BGP_SYNCHRONISE, sizeof(struct bgp_synchronize));
84 BGP_ADV_FIFO_INIT(&subgrp->sync->update);
85 BGP_ADV_FIFO_INIT(&subgrp->sync->withdraw);
86 BGP_ADV_FIFO_INIT(&subgrp->sync->withdraw_low);
87 subgrp->hash = hash_create(baa_hash_key, baa_hash_cmp, NULL);
88
89 /* We use a larger buffer for subgrp->work in the event that:
90 * - We RX a BGP_UPDATE where the attributes alone are just
91 * under BGP_MAX_PACKET_SIZE
92 * - The user configures an outbound route-map that does many as-path
93 * prepends or adds many communities. At most they can have
94 * CMD_ARGC_MAX
95 * args in a route-map so there is a finite limit on how large they
96 * can
97 * make the attributes.
98 *
99 * Having a buffer with BGP_MAX_PACKET_SIZE_OVERFLOW allows us to avoid
100 * bounds
101 * checking for every single attribute as we construct an UPDATE.
102 */
103 subgrp->work =
104 stream_new(BGP_MAX_PACKET_SIZE + BGP_MAX_PACKET_SIZE_OVERFLOW);
105 subgrp->scratch = stream_new(BGP_MAX_PACKET_SIZE);
106 }
107
108 static void sync_delete(struct update_subgroup *subgrp)
109 {
110 if (subgrp->sync)
111 XFREE(MTYPE_BGP_SYNCHRONISE, subgrp->sync);
112 subgrp->sync = NULL;
113 if (subgrp->hash)
114 hash_free(subgrp->hash);
115 subgrp->hash = NULL;
116 if (subgrp->work)
117 stream_free(subgrp->work);
118 subgrp->work = NULL;
119 if (subgrp->scratch)
120 stream_free(subgrp->scratch);
121 subgrp->scratch = NULL;
122 }
123
124 /**
125 * conf_copy
126 *
127 * copy only those fields that are relevant to update group match
128 */
129 static void conf_copy(struct peer *dst, struct peer *src, afi_t afi,
130 safi_t safi)
131 {
132 struct bgp_filter *srcfilter;
133 struct bgp_filter *dstfilter;
134
135 srcfilter = &src->filter[afi][safi];
136 dstfilter = &dst->filter[afi][safi];
137
138 dst->bgp = src->bgp;
139 dst->sort = src->sort;
140 dst->as = src->as;
141 dst->v_routeadv = src->v_routeadv;
142 dst->flags = src->flags;
143 dst->af_flags[afi][safi] = src->af_flags[afi][safi];
144 if (dst->host)
145 XFREE(MTYPE_BGP_PEER_HOST, dst->host);
146
147 dst->host = XSTRDUP(MTYPE_BGP_PEER_HOST, src->host);
148 dst->cap = src->cap;
149 dst->af_cap[afi][safi] = src->af_cap[afi][safi];
150 dst->afc_nego[afi][safi] = src->afc_nego[afi][safi];
151 dst->orf_plist[afi][safi] = src->orf_plist[afi][safi];
152 dst->local_as = src->local_as;
153 dst->change_local_as = src->change_local_as;
154 dst->shared_network = src->shared_network;
155 memcpy(&(dst->nexthop), &(src->nexthop), sizeof(struct bgp_nexthop));
156
157 dst->group = src->group;
158
159 if (src->default_rmap[afi][safi].name) {
160 dst->default_rmap[afi][safi].name =
161 XSTRDUP(MTYPE_ROUTE_MAP_NAME,
162 src->default_rmap[afi][safi].name);
163 dst->default_rmap[afi][safi].map =
164 src->default_rmap[afi][safi].map;
165 }
166
167 if (DISTRIBUTE_OUT_NAME(srcfilter)) {
168 DISTRIBUTE_OUT_NAME(dstfilter) = XSTRDUP(
169 MTYPE_BGP_FILTER_NAME, DISTRIBUTE_OUT_NAME(srcfilter));
170 DISTRIBUTE_OUT(dstfilter) = DISTRIBUTE_OUT(srcfilter);
171 }
172
173 if (PREFIX_LIST_OUT_NAME(srcfilter)) {
174 PREFIX_LIST_OUT_NAME(dstfilter) = XSTRDUP(
175 MTYPE_BGP_FILTER_NAME, PREFIX_LIST_OUT_NAME(srcfilter));
176 PREFIX_LIST_OUT(dstfilter) = PREFIX_LIST_OUT(srcfilter);
177 }
178
179 if (FILTER_LIST_OUT_NAME(srcfilter)) {
180 FILTER_LIST_OUT_NAME(dstfilter) = XSTRDUP(
181 MTYPE_BGP_FILTER_NAME, FILTER_LIST_OUT_NAME(srcfilter));
182 FILTER_LIST_OUT(dstfilter) = FILTER_LIST_OUT(srcfilter);
183 }
184
185 if (ROUTE_MAP_OUT_NAME(srcfilter)) {
186 ROUTE_MAP_OUT_NAME(dstfilter) = XSTRDUP(
187 MTYPE_BGP_FILTER_NAME, ROUTE_MAP_OUT_NAME(srcfilter));
188 ROUTE_MAP_OUT(dstfilter) = ROUTE_MAP_OUT(srcfilter);
189 }
190
191 if (UNSUPPRESS_MAP_NAME(srcfilter)) {
192 UNSUPPRESS_MAP_NAME(dstfilter) = XSTRDUP(
193 MTYPE_BGP_FILTER_NAME, UNSUPPRESS_MAP_NAME(srcfilter));
194 UNSUPPRESS_MAP(dstfilter) = UNSUPPRESS_MAP(srcfilter);
195 }
196 }
197
198 /**
199 * since we did a bunch of XSTRDUP's in conf_copy, time to free them up
200 */
201 static void conf_release(struct peer *src, afi_t afi, safi_t safi)
202 {
203 struct bgp_filter *srcfilter;
204
205 srcfilter = &src->filter[afi][safi];
206
207 if (src->default_rmap[afi][safi].name)
208 XFREE(MTYPE_ROUTE_MAP_NAME, src->default_rmap[afi][safi].name);
209
210 if (srcfilter->dlist[FILTER_OUT].name)
211 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->dlist[FILTER_OUT].name);
212
213 if (srcfilter->plist[FILTER_OUT].name)
214 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->plist[FILTER_OUT].name);
215
216 if (srcfilter->aslist[FILTER_OUT].name)
217 XFREE(MTYPE_BGP_FILTER_NAME,
218 srcfilter->aslist[FILTER_OUT].name);
219
220 if (srcfilter->map[RMAP_OUT].name)
221 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->map[RMAP_OUT].name);
222
223 if (srcfilter->usmap.name)
224 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->usmap.name);
225
226 if (src->host)
227 XFREE(MTYPE_BGP_PEER_HOST, src->host);
228 src->host = NULL;
229 }
230
231 static void peer2_updgrp_copy(struct update_group *updgrp, struct peer_af *paf)
232 {
233 struct peer *src;
234 struct peer *dst;
235
236 if (!updgrp || !paf)
237 return;
238
239 src = paf->peer;
240 dst = updgrp->conf;
241 if (!src || !dst)
242 return;
243
244 updgrp->afi = paf->afi;
245 updgrp->safi = paf->safi;
246 updgrp->afid = paf->afid;
247 updgrp->bgp = src->bgp;
248
249 conf_copy(dst, src, paf->afi, paf->safi);
250 }
251
252 /**
253 * auxiliary functions to maintain the hash table.
254 * - updgrp_hash_alloc - to create a new entry, passed to hash_get
255 * - updgrp_hash_key_make - makes the key for update group search
256 * - updgrp_hash_cmp - compare two update groups.
257 */
258 static void *updgrp_hash_alloc(void *p)
259 {
260 struct update_group *updgrp;
261 const struct update_group *in;
262
263 in = (const struct update_group *)p;
264 updgrp = XCALLOC(MTYPE_BGP_UPDGRP, sizeof(struct update_group));
265 memcpy(updgrp, in, sizeof(struct update_group));
266 updgrp->conf = XCALLOC(MTYPE_BGP_PEER, sizeof(struct peer));
267 conf_copy(updgrp->conf, in->conf, in->afi, in->safi);
268 return updgrp;
269 }
270
271 /**
272 * The hash value for a peer is computed from the following variables:
273 * v = f(
274 * 1. IBGP (1) or EBGP (2)
275 * 2. FLAGS based on configuration:
276 * LOCAL_AS_NO_PREPEND
277 * LOCAL_AS_REPLACE_AS
278 * 3. AF_FLAGS based on configuration:
279 * Refer to definition in bgp_updgrp.h
280 * 4. (AF-independent) Capability flags:
281 * AS4_RCV capability
282 * 5. (AF-dependent) Capability flags:
283 * ORF_PREFIX_SM_RCV (peer can send prefix ORF)
284 * 6. MRAI
285 * 7. peer-group name
286 * 8. Outbound route-map name (neighbor route-map <> out)
287 * 9. Outbound distribute-list name (neighbor distribute-list <> out)
288 * 10. Outbound prefix-list name (neighbor prefix-list <> out)
289 * 11. Outbound as-list name (neighbor filter-list <> out)
290 * 12. Unsuppress map name (neighbor unsuppress-map <>)
291 * 13. default rmap name (neighbor default-originate route-map <>)
292 * 14. encoding both global and link-local nexthop?
293 * 15. If peer is configured to be a lonesoul, peer ip address
294 * 16. Local-as should match, if configured.
295 * )
296 */
297 static unsigned int updgrp_hash_key_make(void *p)
298 {
299 const struct update_group *updgrp;
300 const struct peer *peer;
301 const struct bgp_filter *filter;
302 uint32_t flags;
303 uint32_t key;
304 afi_t afi;
305 safi_t safi;
306
307 #define SEED1 999331
308 #define SEED2 2147483647
309
310 updgrp = p;
311 peer = updgrp->conf;
312 afi = updgrp->afi;
313 safi = updgrp->safi;
314 flags = peer->af_flags[afi][safi];
315 filter = &peer->filter[afi][safi];
316
317 key = 0;
318
319 key = jhash_1word(peer->sort, key); /* EBGP or IBGP */
320 key = jhash_1word((peer->flags & PEER_UPDGRP_FLAGS), key);
321 key = jhash_1word((flags & PEER_UPDGRP_AF_FLAGS), key);
322 key = jhash_1word((peer->cap & PEER_UPDGRP_CAP_FLAGS), key);
323 key = jhash_1word((peer->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS),
324 key);
325 key = jhash_1word(peer->v_routeadv, key);
326 key = jhash_1word(peer->change_local_as, key);
327
328 if (peer->group)
329 key = jhash_1word(jhash(peer->group->name,
330 strlen(peer->group->name), SEED1),
331 key);
332
333 if (filter->map[RMAP_OUT].name)
334 key = jhash_1word(jhash(filter->map[RMAP_OUT].name,
335 strlen(filter->map[RMAP_OUT].name),
336 SEED1),
337 key);
338
339 if (filter->dlist[FILTER_OUT].name)
340 key = jhash_1word(jhash(filter->dlist[FILTER_OUT].name,
341 strlen(filter->dlist[FILTER_OUT].name),
342 SEED1),
343 key);
344
345 if (filter->plist[FILTER_OUT].name)
346 key = jhash_1word(jhash(filter->plist[FILTER_OUT].name,
347 strlen(filter->plist[FILTER_OUT].name),
348 SEED1),
349 key);
350
351 if (filter->aslist[FILTER_OUT].name)
352 key = jhash_1word(jhash(filter->aslist[FILTER_OUT].name,
353 strlen(filter->aslist[FILTER_OUT].name),
354 SEED1),
355 key);
356
357 if (filter->usmap.name)
358 key = jhash_1word(jhash(filter->usmap.name,
359 strlen(filter->usmap.name), SEED1),
360 key);
361
362 if (peer->default_rmap[afi][safi].name)
363 key = jhash_1word(
364 jhash(peer->default_rmap[afi][safi].name,
365 strlen(peer->default_rmap[afi][safi].name),
366 SEED1),
367 key);
368
369 /* If peer is on a shared network and is exchanging IPv6 prefixes,
370 * it needs to include link-local address. That's different from
371 * non-shared-network peers (nexthop encoded with 32 bytes vs 16
372 * bytes). We create different update groups to take care of that.
373 */
374 key = jhash_1word(
375 (peer->shared_network && peer_afi_active_nego(peer, AFI_IP6)),
376 key);
377
378 /*
379 * There are certain peers that must get their own update-group:
380 * - lonesoul peers
381 * - peers that negotiated ORF
382 */
383 if (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL)
384 || CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV)
385 || CHECK_FLAG(peer->af_cap[afi][safi],
386 PEER_CAP_ORF_PREFIX_SM_OLD_RCV))
387 key = jhash_1word(jhash(peer->host, strlen(peer->host), SEED2),
388 key);
389
390 return key;
391 }
392
393 static int updgrp_hash_cmp(const void *p1, const void *p2)
394 {
395 const struct update_group *grp1;
396 const struct update_group *grp2;
397 const struct peer *pe1;
398 const struct peer *pe2;
399 uint32_t flags1;
400 uint32_t flags2;
401 const struct bgp_filter *fl1;
402 const struct bgp_filter *fl2;
403 afi_t afi;
404 safi_t safi;
405
406 if (!p1 || !p2)
407 return 0;
408
409 grp1 = p1;
410 grp2 = p2;
411 pe1 = grp1->conf;
412 pe2 = grp2->conf;
413 afi = grp1->afi;
414 safi = grp1->safi;
415 flags1 = pe1->af_flags[afi][safi];
416 flags2 = pe2->af_flags[afi][safi];
417 fl1 = &pe1->filter[afi][safi];
418 fl2 = &pe2->filter[afi][safi];
419
420 /* put EBGP and IBGP peers in different update groups */
421 if (pe1->sort != pe2->sort)
422 return 0;
423
424 /* check peer flags */
425 if ((pe1->flags & PEER_UPDGRP_FLAGS)
426 != (pe2->flags & PEER_UPDGRP_FLAGS))
427 return 0;
428
429 /* If there is 'local-as' configured, it should match. */
430 if (pe1->change_local_as != pe2->change_local_as)
431 return 0;
432
433 /* flags like route reflector client */
434 if ((flags1 & PEER_UPDGRP_AF_FLAGS) != (flags2 & PEER_UPDGRP_AF_FLAGS))
435 return 0;
436
437 if ((pe1->cap & PEER_UPDGRP_CAP_FLAGS)
438 != (pe2->cap & PEER_UPDGRP_CAP_FLAGS))
439 return 0;
440
441 if ((pe1->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS)
442 != (pe2->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS))
443 return 0;
444
445 if (pe1->v_routeadv != pe2->v_routeadv)
446 return 0;
447
448 if (pe1->group != pe2->group)
449 return 0;
450
451 /* route-map names should be the same */
452 if ((fl1->map[RMAP_OUT].name && !fl2->map[RMAP_OUT].name)
453 || (!fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name)
454 || (fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name
455 && strcmp(fl1->map[RMAP_OUT].name, fl2->map[RMAP_OUT].name)))
456 return 0;
457
458 if ((fl1->dlist[FILTER_OUT].name && !fl2->dlist[FILTER_OUT].name)
459 || (!fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name)
460 || (fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name
461 && strcmp(fl1->dlist[FILTER_OUT].name,
462 fl2->dlist[FILTER_OUT].name)))
463 return 0;
464
465 if ((fl1->plist[FILTER_OUT].name && !fl2->plist[FILTER_OUT].name)
466 || (!fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name)
467 || (fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name
468 && strcmp(fl1->plist[FILTER_OUT].name,
469 fl2->plist[FILTER_OUT].name)))
470 return 0;
471
472 if ((fl1->aslist[FILTER_OUT].name && !fl2->aslist[FILTER_OUT].name)
473 || (!fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name)
474 || (fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name
475 && strcmp(fl1->aslist[FILTER_OUT].name,
476 fl2->aslist[FILTER_OUT].name)))
477 return 0;
478
479 if ((fl1->usmap.name && !fl2->usmap.name)
480 || (!fl1->usmap.name && fl2->usmap.name)
481 || (fl1->usmap.name && fl2->usmap.name
482 && strcmp(fl1->usmap.name, fl2->usmap.name)))
483 return 0;
484
485 if ((pe1->default_rmap[afi][safi].name
486 && !pe2->default_rmap[afi][safi].name)
487 || (!pe1->default_rmap[afi][safi].name
488 && pe2->default_rmap[afi][safi].name)
489 || (pe1->default_rmap[afi][safi].name
490 && pe2->default_rmap[afi][safi].name
491 && strcmp(pe1->default_rmap[afi][safi].name,
492 pe2->default_rmap[afi][safi].name)))
493 return 0;
494
495 if ((afi == AFI_IP6) && (pe1->shared_network != pe2->shared_network))
496 return 0;
497
498 if ((CHECK_FLAG(pe1->flags, PEER_FLAG_LONESOUL)
499 || CHECK_FLAG(pe1->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV)
500 || CHECK_FLAG(pe1->af_cap[afi][safi],
501 PEER_CAP_ORF_PREFIX_SM_OLD_RCV))
502 && !sockunion_same(&pe1->su, &pe2->su))
503 return 0;
504
505 return 1;
506 }
507
508 static void peer_lonesoul_or_not(struct peer *peer, int set)
509 {
510 /* no change in status? */
511 if (set == (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL) > 0))
512 return;
513
514 if (set)
515 SET_FLAG(peer->flags, PEER_FLAG_LONESOUL);
516 else
517 UNSET_FLAG(peer->flags, PEER_FLAG_LONESOUL);
518
519 update_group_adjust_peer_afs(peer);
520 }
521
522 /*
523 * subgroup_total_packets_enqueued
524 *
525 * Returns the total number of packets enqueued to a subgroup.
526 */
527 static unsigned int
528 subgroup_total_packets_enqueued(struct update_subgroup *subgrp)
529 {
530 struct bpacket *pkt;
531
532 pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp));
533
534 return pkt->ver - 1;
535 }
536
537 static int update_group_show_walkcb(struct update_group *updgrp, void *arg)
538 {
539 struct updwalk_context *ctx = arg;
540 struct vty *vty;
541 struct update_subgroup *subgrp;
542 struct peer_af *paf;
543 struct bgp_filter *filter;
544 int match = 0;
545
546 if (!ctx)
547 return CMD_SUCCESS;
548
549 if (ctx->subgrp_id) {
550 UPDGRP_FOREACH_SUBGRP(updgrp, subgrp)
551 {
552 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
553 continue;
554 else {
555 match = 1;
556 break;
557 }
558 }
559 } else {
560 match = 1;
561 }
562
563 if (!match) {
564 /* Since this routine is invoked from a walk, we cannot signal
565 * any */
566 /* error here, can only return. */
567 return CMD_SUCCESS;
568 }
569
570 vty = ctx->vty;
571
572 vty_out(vty, "Update-group %" PRIu64 ":\n", updgrp->id);
573 vty_out(vty, " Created: %s", timestamp_string(updgrp->uptime));
574 filter = &updgrp->conf->filter[updgrp->afi][updgrp->safi];
575 if (filter->map[RMAP_OUT].name)
576 vty_out(vty, " Outgoing route map: %s%s\n",
577 filter->map[RMAP_OUT].map ? "X" : "",
578 filter->map[RMAP_OUT].name);
579 vty_out(vty, " MRAI value (seconds): %d\n", updgrp->conf->v_routeadv);
580 if (updgrp->conf->change_local_as)
581 vty_out(vty, " Local AS %u%s%s\n",
582 updgrp->conf->change_local_as,
583 CHECK_FLAG(updgrp->conf->flags,
584 PEER_FLAG_LOCAL_AS_NO_PREPEND)
585 ? " no-prepend"
586 : "",
587 CHECK_FLAG(updgrp->conf->flags,
588 PEER_FLAG_LOCAL_AS_REPLACE_AS)
589 ? " replace-as"
590 : "");
591
592 UPDGRP_FOREACH_SUBGRP(updgrp, subgrp)
593 {
594 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
595 continue;
596 vty_out(vty, "\n");
597 vty_out(vty, " Update-subgroup %" PRIu64 ":\n", subgrp->id);
598 vty_out(vty, " Created: %s",
599 timestamp_string(subgrp->uptime));
600
601 if (subgrp->split_from.update_group_id
602 || subgrp->split_from.subgroup_id) {
603 vty_out(vty, " Split from group id: %" PRIu64 "\n",
604 subgrp->split_from.update_group_id);
605 vty_out(vty,
606 " Split from subgroup id: %" PRIu64 "\n",
607 subgrp->split_from.subgroup_id);
608 }
609
610 vty_out(vty, " Join events: %u\n", subgrp->join_events);
611 vty_out(vty, " Prune events: %u\n", subgrp->prune_events);
612 vty_out(vty, " Merge events: %u\n", subgrp->merge_events);
613 vty_out(vty, " Split events: %u\n", subgrp->split_events);
614 vty_out(vty, " Update group switch events: %u\n",
615 subgrp->updgrp_switch_events);
616 vty_out(vty, " Peer refreshes combined: %u\n",
617 subgrp->peer_refreshes_combined);
618 vty_out(vty, " Merge checks triggered: %u\n",
619 subgrp->merge_checks_triggered);
620 vty_out(vty, " Version: %" PRIu64 "\n", subgrp->version);
621 vty_out(vty, " Packet queue length: %d\n",
622 bpacket_queue_length(SUBGRP_PKTQ(subgrp)));
623 vty_out(vty, " Total packets enqueued: %u\n",
624 subgroup_total_packets_enqueued(subgrp));
625 vty_out(vty, " Packet queue high watermark: %d\n",
626 bpacket_queue_hwm_length(SUBGRP_PKTQ(subgrp)));
627 vty_out(vty, " Adj-out list count: %u\n", subgrp->adj_count);
628 vty_out(vty, " Advertise list: %s\n",
629 advertise_list_is_empty(subgrp) ? "empty"
630 : "not empty");
631 vty_out(vty, " Flags: %s\n",
632 CHECK_FLAG(subgrp->flags, SUBGRP_FLAG_NEEDS_REFRESH)
633 ? "R"
634 : "");
635 if (subgrp->peer_count > 0) {
636 vty_out(vty, " Peers:\n");
637 SUBGRP_FOREACH_PEER(subgrp, paf)
638 vty_out(vty, " - %s\n", paf->peer->host);
639 }
640 }
641 return UPDWALK_CONTINUE;
642 }
643
644 /*
645 * Helper function to show the packet queue for each subgroup of update group.
646 * Will be constrained to a particular subgroup id if id !=0
647 */
648 static int updgrp_show_packet_queue_walkcb(struct update_group *updgrp,
649 void *arg)
650 {
651 struct updwalk_context *ctx = arg;
652 struct update_subgroup *subgrp;
653 struct vty *vty;
654
655 vty = ctx->vty;
656 UPDGRP_FOREACH_SUBGRP(updgrp, subgrp)
657 {
658 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
659 continue;
660 vty_out(vty, "update group %" PRIu64 ", subgroup %" PRIu64 "\n",
661 updgrp->id, subgrp->id);
662 bpacket_queue_show_vty(SUBGRP_PKTQ(subgrp), vty);
663 }
664 return UPDWALK_CONTINUE;
665 }
666
667 /*
668 * Show the packet queue for each subgroup of update group. Will be
669 * constrained to a particular subgroup id if id !=0
670 */
671 void update_group_show_packet_queue(struct bgp *bgp, afi_t afi, safi_t safi,
672 struct vty *vty, uint64_t id)
673 {
674 struct updwalk_context ctx;
675
676 memset(&ctx, 0, sizeof(ctx));
677 ctx.vty = vty;
678 ctx.subgrp_id = id;
679 ctx.flags = 0;
680 update_group_af_walk(bgp, afi, safi, updgrp_show_packet_queue_walkcb,
681 &ctx);
682 }
683
684 static struct update_group *update_group_find(struct peer_af *paf)
685 {
686 struct update_group *updgrp;
687 struct update_group tmp;
688 struct peer tmp_conf;
689
690 if (!peer_established(PAF_PEER(paf)))
691 return NULL;
692
693 memset(&tmp, 0, sizeof(tmp));
694 memset(&tmp_conf, 0, sizeof(tmp_conf));
695 tmp.conf = &tmp_conf;
696 peer2_updgrp_copy(&tmp, paf);
697
698 updgrp = hash_lookup(paf->peer->bgp->update_groups[paf->afid], &tmp);
699 conf_release(&tmp_conf, paf->afi, paf->safi);
700 return updgrp;
701 }
702
703 static struct update_group *update_group_create(struct peer_af *paf)
704 {
705 struct update_group *updgrp;
706 struct update_group tmp;
707 struct peer tmp_conf;
708
709 memset(&tmp, 0, sizeof(tmp));
710 memset(&tmp_conf, 0, sizeof(tmp_conf));
711 tmp.conf = &tmp_conf;
712 peer2_updgrp_copy(&tmp, paf);
713
714 updgrp = hash_get(paf->peer->bgp->update_groups[paf->afid], &tmp,
715 updgrp_hash_alloc);
716 if (!updgrp)
717 return NULL;
718 update_group_checkin(updgrp);
719
720 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
721 zlog_debug("create update group %" PRIu64, updgrp->id);
722
723 UPDGRP_GLOBAL_STAT(updgrp, updgrps_created) += 1;
724
725 conf_release(&tmp_conf, paf->afi, paf->safi);
726 return updgrp;
727 }
728
729 static void update_group_delete(struct update_group *updgrp)
730 {
731 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
732 zlog_debug("delete update group %" PRIu64, updgrp->id);
733
734 UPDGRP_GLOBAL_STAT(updgrp, updgrps_deleted) += 1;
735
736 hash_release(updgrp->bgp->update_groups[updgrp->afid], updgrp);
737 conf_release(updgrp->conf, updgrp->afi, updgrp->safi);
738
739 if (updgrp->conf->host)
740 XFREE(MTYPE_BGP_PEER_HOST, updgrp->conf->host);
741 updgrp->conf->host = NULL;
742
743 if (updgrp->conf->ifname)
744 XFREE(MTYPE_BGP_PEER_IFNAME, updgrp->conf->ifname);
745
746 XFREE(MTYPE_BGP_PEER, updgrp->conf);
747 XFREE(MTYPE_BGP_UPDGRP, updgrp);
748 }
749
750 static void update_group_add_subgroup(struct update_group *updgrp,
751 struct update_subgroup *subgrp)
752 {
753 if (!updgrp || !subgrp)
754 return;
755
756 LIST_INSERT_HEAD(&(updgrp->subgrps), subgrp, updgrp_train);
757 subgrp->update_group = updgrp;
758 }
759
760 static void update_group_remove_subgroup(struct update_group *updgrp,
761 struct update_subgroup *subgrp)
762 {
763 if (!updgrp || !subgrp)
764 return;
765
766 LIST_REMOVE(subgrp, updgrp_train);
767 subgrp->update_group = NULL;
768 if (LIST_EMPTY(&(updgrp->subgrps)))
769 update_group_delete(updgrp);
770 }
771
772 static struct update_subgroup *
773 update_subgroup_create(struct update_group *updgrp)
774 {
775 struct update_subgroup *subgrp;
776
777 subgrp = XCALLOC(MTYPE_BGP_UPD_SUBGRP, sizeof(struct update_subgroup));
778 update_subgroup_checkin(subgrp, updgrp);
779 subgrp->v_coalesce = (UPDGRP_INST(updgrp))->coalesce_time;
780 sync_init(subgrp);
781 bpacket_queue_init(SUBGRP_PKTQ(subgrp));
782 bpacket_queue_add(SUBGRP_PKTQ(subgrp), NULL, NULL);
783 TAILQ_INIT(&(subgrp->adjq));
784 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
785 zlog_debug("create subgroup u%" PRIu64 ":s%" PRIu64, updgrp->id,
786 subgrp->id);
787
788 update_group_add_subgroup(updgrp, subgrp);
789
790 UPDGRP_INCR_STAT(updgrp, subgrps_created);
791
792 return subgrp;
793 }
794
795 static void update_subgroup_delete(struct update_subgroup *subgrp)
796 {
797 if (!subgrp)
798 return;
799
800 if (subgrp->update_group)
801 UPDGRP_INCR_STAT(subgrp->update_group, subgrps_deleted);
802
803 if (subgrp->t_merge_check)
804 THREAD_OFF(subgrp->t_merge_check);
805
806 if (subgrp->t_coalesce)
807 THREAD_TIMER_OFF(subgrp->t_coalesce);
808
809 bpacket_queue_cleanup(SUBGRP_PKTQ(subgrp));
810 subgroup_clear_table(subgrp);
811
812 if (subgrp->t_coalesce)
813 THREAD_TIMER_OFF(subgrp->t_coalesce);
814 sync_delete(subgrp);
815
816 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
817 zlog_debug("delete subgroup u%" PRIu64 ":s%" PRIu64,
818 subgrp->update_group->id, subgrp->id);
819
820 update_group_remove_subgroup(subgrp->update_group, subgrp);
821
822 XFREE(MTYPE_BGP_UPD_SUBGRP, subgrp);
823 }
824
825 void update_subgroup_inherit_info(struct update_subgroup *to,
826 struct update_subgroup *from)
827 {
828 if (!to || !from)
829 return;
830
831 to->sflags = from->sflags;
832 }
833
834 /*
835 * update_subgroup_check_delete
836 *
837 * Delete a subgroup if it is ready to be deleted.
838 *
839 * Returns TRUE if the subgroup was deleted.
840 */
841 static int update_subgroup_check_delete(struct update_subgroup *subgrp)
842 {
843 if (!subgrp)
844 return 0;
845
846 if (!LIST_EMPTY(&(subgrp->peers)))
847 return 0;
848
849 update_subgroup_delete(subgrp);
850
851 return 1;
852 }
853
854 /*
855 * update_subgroup_add_peer
856 *
857 * @param send_enqueued_packets If true all currently enqueued packets will
858 * also be sent to the peer.
859 */
860 static void update_subgroup_add_peer(struct update_subgroup *subgrp,
861 struct peer_af *paf,
862 int send_enqueued_pkts)
863 {
864 struct bpacket *pkt;
865
866 if (!subgrp || !paf)
867 return;
868
869 LIST_INSERT_HEAD(&(subgrp->peers), paf, subgrp_train);
870 paf->subgroup = subgrp;
871 subgrp->peer_count++;
872
873 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
874 UPDGRP_PEER_DBG_EN(subgrp->update_group);
875 }
876
877 SUBGRP_INCR_STAT(subgrp, join_events);
878
879 if (send_enqueued_pkts) {
880 pkt = bpacket_queue_first(SUBGRP_PKTQ(subgrp));
881 } else {
882
883 /*
884 * Hang the peer off of the last, placeholder, packet in the
885 * queue. This means it won't see any of the packets that are
886 * currently the queue.
887 */
888 pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp));
889 assert(pkt->buffer == NULL);
890 }
891
892 bpacket_add_peer(pkt, paf);
893
894 bpacket_queue_sanity_check(SUBGRP_PKTQ(subgrp));
895 }
896
897 /*
898 * update_subgroup_remove_peer_internal
899 *
900 * Internal function that removes a peer from a subgroup, but does not
901 * delete the subgroup. A call to this function must almost always be
902 * followed by a call to update_subgroup_check_delete().
903 *
904 * @see update_subgroup_remove_peer
905 */
906 static void update_subgroup_remove_peer_internal(struct update_subgroup *subgrp,
907 struct peer_af *paf)
908 {
909 assert(subgrp && paf);
910
911 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
912 UPDGRP_PEER_DBG_DIS(subgrp->update_group);
913 }
914
915 bpacket_queue_remove_peer(paf);
916 LIST_REMOVE(paf, subgrp_train);
917 paf->subgroup = NULL;
918 subgrp->peer_count--;
919
920 SUBGRP_INCR_STAT(subgrp, prune_events);
921 }
922
923 /*
924 * update_subgroup_remove_peer
925 */
926 void update_subgroup_remove_peer(struct update_subgroup *subgrp,
927 struct peer_af *paf)
928 {
929 if (!subgrp || !paf)
930 return;
931
932 update_subgroup_remove_peer_internal(subgrp, paf);
933
934 if (update_subgroup_check_delete(subgrp))
935 return;
936
937 /*
938 * The deletion of the peer may have caused some packets to be
939 * deleted from the subgroup packet queue. Check if the subgroup can
940 * be merged now.
941 */
942 update_subgroup_check_merge(subgrp, "removed peer from subgroup");
943 }
944
945 static struct update_subgroup *update_subgroup_find(struct update_group *updgrp,
946 struct peer_af *paf)
947 {
948 struct update_subgroup *subgrp = NULL;
949 uint64_t version;
950
951 if (paf->subgroup) {
952 assert(0);
953 return NULL;
954 } else
955 version = 0;
956
957 if (!peer_established(PAF_PEER(paf)))
958 return NULL;
959
960 UPDGRP_FOREACH_SUBGRP(updgrp, subgrp)
961 {
962 if (subgrp->version != version
963 || CHECK_FLAG(subgrp->sflags,
964 SUBGRP_STATUS_DEFAULT_ORIGINATE))
965 continue;
966
967 /*
968 * The version number is not meaningful on a subgroup that needs
969 * a refresh.
970 */
971 if (update_subgroup_needs_refresh(subgrp))
972 continue;
973
974 break;
975 }
976
977 return subgrp;
978 }
979
980 /*
981 * update_subgroup_ready_for_merge
982 *
983 * Returns TRUE if this subgroup is in a state that allows it to be
984 * merged into another subgroup.
985 */
986 static int update_subgroup_ready_for_merge(struct update_subgroup *subgrp)
987 {
988
989 /*
990 * Not ready if there are any encoded packets waiting to be written
991 * out to peers.
992 */
993 if (!bpacket_queue_is_empty(SUBGRP_PKTQ(subgrp)))
994 return 0;
995
996 /*
997 * Not ready if there enqueued updates waiting to be encoded.
998 */
999 if (!advertise_list_is_empty(subgrp))
1000 return 0;
1001
1002 /*
1003 * Don't attempt to merge a subgroup that needs a refresh. For one,
1004 * we can't determine if the adj_out of such a group matches that of
1005 * another group.
1006 */
1007 if (update_subgroup_needs_refresh(subgrp))
1008 return 0;
1009
1010 return 1;
1011 }
1012
1013 /*
1014 * update_subgrp_can_merge_into
1015 *
1016 * Returns TRUE if the first subgroup can merge into the second
1017 * subgroup.
1018 */
1019 static int update_subgroup_can_merge_into(struct update_subgroup *subgrp,
1020 struct update_subgroup *target)
1021 {
1022
1023 if (subgrp == target)
1024 return 0;
1025
1026 /*
1027 * Both must have processed the BRIB to the same point in order to
1028 * be merged.
1029 */
1030 if (subgrp->version != target->version)
1031 return 0;
1032
1033 if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE)
1034 != CHECK_FLAG(target->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
1035 return 0;
1036
1037 if (subgrp->adj_count != target->adj_count)
1038 return 0;
1039
1040 return update_subgroup_ready_for_merge(target);
1041 }
1042
1043 /*
1044 * update_subgroup_merge
1045 *
1046 * Merge the first subgroup into the second one.
1047 */
1048 static void update_subgroup_merge(struct update_subgroup *subgrp,
1049 struct update_subgroup *target,
1050 const char *reason)
1051 {
1052 struct peer_af *paf;
1053 int result;
1054 int peer_count;
1055
1056 assert(subgrp->adj_count == target->adj_count);
1057
1058 peer_count = subgrp->peer_count;
1059
1060 while (1) {
1061 paf = LIST_FIRST(&subgrp->peers);
1062 if (!paf)
1063 break;
1064
1065 update_subgroup_remove_peer_internal(subgrp, paf);
1066
1067 /*
1068 * Add the peer to the target subgroup, while making sure that
1069 * any currently enqueued packets won't be sent to it. Enqueued
1070 * packets could, for example, result in an unnecessary withdraw
1071 * followed by an advertise.
1072 */
1073 update_subgroup_add_peer(target, paf, 0);
1074 }
1075
1076 SUBGRP_INCR_STAT(target, merge_events);
1077
1078 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1079 zlog_debug("u%" PRIu64 ":s%" PRIu64
1080 " (%d peers) merged into u%" PRIu64 ":s%" PRIu64
1081 ", "
1082 "trigger: %s",
1083 subgrp->update_group->id, subgrp->id, peer_count,
1084 target->update_group->id, target->id,
1085 reason ? reason : "unknown");
1086
1087 result = update_subgroup_check_delete(subgrp);
1088 assert(result);
1089 }
1090
1091 /*
1092 * update_subgroup_check_merge
1093 *
1094 * Merge this subgroup into another subgroup if possible.
1095 *
1096 * Returns TRUE if the subgroup has been merged. The subgroup pointer
1097 * should not be accessed in this case.
1098 */
1099 int update_subgroup_check_merge(struct update_subgroup *subgrp,
1100 const char *reason)
1101 {
1102 struct update_subgroup *target;
1103
1104 if (!update_subgroup_ready_for_merge(subgrp))
1105 return 0;
1106
1107 /*
1108 * Look for a subgroup to merge into.
1109 */
1110 UPDGRP_FOREACH_SUBGRP(subgrp->update_group, target)
1111 {
1112 if (update_subgroup_can_merge_into(subgrp, target))
1113 break;
1114 }
1115
1116 if (!target)
1117 return 0;
1118
1119 update_subgroup_merge(subgrp, target, reason);
1120 return 1;
1121 }
1122
1123 /*
1124 * update_subgroup_merge_check_thread_cb
1125 */
1126 static int update_subgroup_merge_check_thread_cb(struct thread *thread)
1127 {
1128 struct update_subgroup *subgrp;
1129
1130 subgrp = THREAD_ARG(thread);
1131
1132 subgrp->t_merge_check = NULL;
1133
1134 update_subgroup_check_merge(subgrp, "triggered merge check");
1135 return 0;
1136 }
1137
1138 /*
1139 * update_subgroup_trigger_merge_check
1140 *
1141 * Triggers a call to update_subgroup_check_merge() on a clean context.
1142 *
1143 * @param force If true, the merge check will be triggered even if the
1144 * subgroup doesn't currently look ready for a merge.
1145 *
1146 * Returns TRUE if a merge check will be performed shortly.
1147 */
1148 int update_subgroup_trigger_merge_check(struct update_subgroup *subgrp,
1149 int force)
1150 {
1151 if (subgrp->t_merge_check)
1152 return 1;
1153
1154 if (!force && !update_subgroup_ready_for_merge(subgrp))
1155 return 0;
1156
1157 subgrp->t_merge_check = NULL;
1158 thread_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb,
1159 subgrp, 0, &subgrp->t_merge_check);
1160
1161 SUBGRP_INCR_STAT(subgrp, merge_checks_triggered);
1162
1163 return 1;
1164 }
1165
1166 /*
1167 * update_subgroup_copy_adj_out
1168 *
1169 * Helper function that clones the adj out (state about advertised
1170 * routes) from one subgroup to another. It assumes that the adj out
1171 * of the target subgroup is empty.
1172 */
1173 static void update_subgroup_copy_adj_out(struct update_subgroup *source,
1174 struct update_subgroup *dest)
1175 {
1176 struct bgp_adj_out *aout, *aout_copy;
1177
1178 SUBGRP_FOREACH_ADJ(source, aout)
1179 {
1180 /*
1181 * Copy the adj out.
1182 */
1183 aout_copy =
1184 bgp_adj_out_alloc(dest, aout->rn, aout->addpath_tx_id);
1185 aout_copy->attr =
1186 aout->attr ? bgp_attr_refcount(aout->attr) : NULL;
1187 }
1188 }
1189
1190 /*
1191 * update_subgroup_copy_packets
1192 *
1193 * Copy packets after and including the given packet to the subgroup
1194 * 'dest'.
1195 *
1196 * Returns the number of packets copied.
1197 */
1198 static int update_subgroup_copy_packets(struct update_subgroup *dest,
1199 struct bpacket *pkt)
1200 {
1201 int count;
1202
1203 count = 0;
1204 while (pkt && pkt->buffer) {
1205 bpacket_queue_add(SUBGRP_PKTQ(dest), stream_dup(pkt->buffer),
1206 &pkt->arr);
1207 count++;
1208 pkt = bpacket_next(pkt);
1209 }
1210
1211 bpacket_queue_sanity_check(SUBGRP_PKTQ(dest));
1212
1213 return count;
1214 }
1215
1216 static int updgrp_prefix_list_update(struct update_group *updgrp,
1217 const char *name)
1218 {
1219 struct peer *peer;
1220 struct bgp_filter *filter;
1221
1222 peer = UPDGRP_PEER(updgrp);
1223 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1224
1225 if (PREFIX_LIST_OUT_NAME(filter)
1226 && (strcmp(name, PREFIX_LIST_OUT_NAME(filter)) == 0)) {
1227 PREFIX_LIST_OUT(filter) = prefix_list_lookup(
1228 UPDGRP_AFI(updgrp), PREFIX_LIST_OUT_NAME(filter));
1229 return 1;
1230 }
1231 return 0;
1232 }
1233
1234 static int updgrp_filter_list_update(struct update_group *updgrp,
1235 const char *name)
1236 {
1237 struct peer *peer;
1238 struct bgp_filter *filter;
1239
1240 peer = UPDGRP_PEER(updgrp);
1241 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1242
1243 if (FILTER_LIST_OUT_NAME(filter)
1244 && (strcmp(name, FILTER_LIST_OUT_NAME(filter)) == 0)) {
1245 FILTER_LIST_OUT(filter) =
1246 as_list_lookup(FILTER_LIST_OUT_NAME(filter));
1247 return 1;
1248 }
1249 return 0;
1250 }
1251
1252 static int updgrp_distribute_list_update(struct update_group *updgrp,
1253 const char *name)
1254 {
1255 struct peer *peer;
1256 struct bgp_filter *filter;
1257
1258 peer = UPDGRP_PEER(updgrp);
1259 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1260
1261 if (DISTRIBUTE_OUT_NAME(filter)
1262 && (strcmp(name, DISTRIBUTE_OUT_NAME(filter)) == 0)) {
1263 DISTRIBUTE_OUT(filter) = access_list_lookup(
1264 UPDGRP_AFI(updgrp), DISTRIBUTE_OUT_NAME(filter));
1265 return 1;
1266 }
1267 return 0;
1268 }
1269
1270 static int updgrp_route_map_update(struct update_group *updgrp,
1271 const char *name, int *def_rmap_changed)
1272 {
1273 struct peer *peer;
1274 struct bgp_filter *filter;
1275 int changed = 0;
1276 afi_t afi;
1277 safi_t safi;
1278
1279 peer = UPDGRP_PEER(updgrp);
1280 afi = UPDGRP_AFI(updgrp);
1281 safi = UPDGRP_SAFI(updgrp);
1282 filter = &peer->filter[afi][safi];
1283
1284 if (ROUTE_MAP_OUT_NAME(filter)
1285 && (strcmp(name, ROUTE_MAP_OUT_NAME(filter)) == 0)) {
1286 ROUTE_MAP_OUT(filter) = route_map_lookup_by_name(name);
1287
1288 changed = 1;
1289 }
1290
1291 if (UNSUPPRESS_MAP_NAME(filter)
1292 && (strcmp(name, UNSUPPRESS_MAP_NAME(filter)) == 0)) {
1293 UNSUPPRESS_MAP(filter) = route_map_lookup_by_name(name);
1294 changed = 1;
1295 }
1296
1297 /* process default-originate route-map */
1298 if (peer->default_rmap[afi][safi].name
1299 && (strcmp(name, peer->default_rmap[afi][safi].name) == 0)) {
1300 peer->default_rmap[afi][safi].map =
1301 route_map_lookup_by_name(name);
1302 if (def_rmap_changed)
1303 *def_rmap_changed = 1;
1304 }
1305 return changed;
1306 }
1307
1308 /*
1309 * hash iteration callback function to process a policy change for an
1310 * update group. Check if the changed policy matches the updgrp's
1311 * outbound route-map or unsuppress-map or default-originate map or
1312 * filter-list or prefix-list or distribute-list.
1313 * Trigger update generation accordingly.
1314 */
1315 static int updgrp_policy_update_walkcb(struct update_group *updgrp, void *arg)
1316 {
1317 struct updwalk_context *ctx = arg;
1318 struct update_subgroup *subgrp;
1319 int changed = 0;
1320 int def_changed = 0;
1321
1322 if (!updgrp || !ctx || !ctx->policy_name)
1323 return UPDWALK_CONTINUE;
1324
1325 switch (ctx->policy_type) {
1326 case BGP_POLICY_ROUTE_MAP:
1327 changed = updgrp_route_map_update(updgrp, ctx->policy_name,
1328 &def_changed);
1329 break;
1330 case BGP_POLICY_FILTER_LIST:
1331 changed = updgrp_filter_list_update(updgrp, ctx->policy_name);
1332 break;
1333 case BGP_POLICY_PREFIX_LIST:
1334 changed = updgrp_prefix_list_update(updgrp, ctx->policy_name);
1335 break;
1336 case BGP_POLICY_DISTRIBUTE_LIST:
1337 changed =
1338 updgrp_distribute_list_update(updgrp, ctx->policy_name);
1339 break;
1340 default:
1341 break;
1342 }
1343
1344 /* If not doing route update, return after updating "config" */
1345 if (!ctx->policy_route_update)
1346 return UPDWALK_CONTINUE;
1347
1348 /* If nothing has changed, return after updating "config" */
1349 if (!changed && !def_changed)
1350 return UPDWALK_CONTINUE;
1351
1352 /*
1353 * If something has changed, at the beginning of a route-map
1354 * modification
1355 * event, mark each subgroup's needs-refresh bit. For one, it signals to
1356 * whoever that the subgroup needs a refresh. Second, it prevents
1357 * premature
1358 * merge of this subgroup with another before a complete (outbound)
1359 * refresh.
1360 */
1361 if (ctx->policy_event_start_flag) {
1362 UPDGRP_FOREACH_SUBGRP(updgrp, subgrp)
1363 {
1364 update_subgroup_set_needs_refresh(subgrp, 1);
1365 }
1366 return UPDWALK_CONTINUE;
1367 }
1368
1369 UPDGRP_FOREACH_SUBGRP(updgrp, subgrp)
1370 {
1371 if (changed) {
1372 if (bgp_debug_update(NULL, NULL, updgrp, 0))
1373 zlog_debug(
1374 "u%" PRIu64 ":s%" PRIu64
1375 " announcing routes upon policy %s (type %d) change",
1376 updgrp->id, subgrp->id,
1377 ctx->policy_name, ctx->policy_type);
1378 subgroup_announce_route(subgrp);
1379 }
1380 if (def_changed) {
1381 if (bgp_debug_update(NULL, NULL, updgrp, 0))
1382 zlog_debug(
1383 "u%" PRIu64 ":s%" PRIu64
1384 " announcing default upon default routemap %s change",
1385 updgrp->id, subgrp->id,
1386 ctx->policy_name);
1387 subgroup_default_originate(subgrp, 0);
1388 }
1389 update_subgroup_set_needs_refresh(subgrp, 0);
1390 }
1391 return UPDWALK_CONTINUE;
1392 }
1393
1394 static int update_group_walkcb(struct hash_backet *backet, void *arg)
1395 {
1396 struct update_group *updgrp = backet->data;
1397 struct updwalk_context *wctx = arg;
1398 int ret = (*wctx->cb)(updgrp, wctx->context);
1399 return ret;
1400 }
1401
1402 static int update_group_periodic_merge_walkcb(struct update_group *updgrp,
1403 void *arg)
1404 {
1405 struct update_subgroup *subgrp;
1406 struct update_subgroup *tmp_subgrp;
1407 const char *reason = arg;
1408
1409 UPDGRP_FOREACH_SUBGRP_SAFE(updgrp, subgrp, tmp_subgrp)
1410 update_subgroup_check_merge(subgrp, reason);
1411 return UPDWALK_CONTINUE;
1412 }
1413
1414 /********************
1415 * PUBLIC FUNCTIONS
1416 ********************/
1417
1418 /*
1419 * trigger function when a policy (route-map/filter-list/prefix-list/
1420 * distribute-list etc.) content changes. Go through all the
1421 * update groups and process the change.
1422 *
1423 * bgp: the bgp instance
1424 * ptype: the type of policy that got modified, see bgpd.h
1425 * pname: name of the policy
1426 * route_update: flag to control if an automatic update generation should
1427 * occur
1428 * start_event: flag that indicates if it's the beginning of the change.
1429 * Esp. when the user is changing the content interactively
1430 * over multiple statements. Useful to set dirty flag on
1431 * update groups.
1432 */
1433 void update_group_policy_update(struct bgp *bgp, bgp_policy_type_e ptype,
1434 const char *pname, int route_update,
1435 int start_event)
1436 {
1437 struct updwalk_context ctx;
1438
1439 memset(&ctx, 0, sizeof(ctx));
1440 ctx.policy_type = ptype;
1441 ctx.policy_name = pname;
1442 ctx.policy_route_update = route_update;
1443 ctx.policy_event_start_flag = start_event;
1444 ctx.flags = 0;
1445
1446 update_group_walk(bgp, updgrp_policy_update_walkcb, &ctx);
1447 }
1448
1449 /*
1450 * update_subgroup_split_peer
1451 *
1452 * Ensure that the given peer is in a subgroup of its own in the
1453 * specified update group.
1454 */
1455 void update_subgroup_split_peer(struct peer_af *paf,
1456 struct update_group *updgrp)
1457 {
1458 struct update_subgroup *old_subgrp, *subgrp;
1459 uint64_t old_id;
1460
1461
1462 old_subgrp = paf->subgroup;
1463
1464 if (!updgrp)
1465 updgrp = old_subgrp->update_group;
1466
1467 /*
1468 * If the peer is alone in its subgroup, reuse the existing
1469 * subgroup.
1470 */
1471 if (old_subgrp->peer_count == 1) {
1472 if (updgrp == old_subgrp->update_group)
1473 return;
1474
1475 subgrp = old_subgrp;
1476 old_id = old_subgrp->update_group->id;
1477
1478 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
1479 UPDGRP_PEER_DBG_DIS(old_subgrp->update_group);
1480 }
1481
1482 update_group_remove_subgroup(old_subgrp->update_group,
1483 old_subgrp);
1484 update_group_add_subgroup(updgrp, subgrp);
1485
1486 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
1487 UPDGRP_PEER_DBG_EN(updgrp);
1488 }
1489 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1490 zlog_debug("u%" PRIu64 ":s%" PRIu64
1491 " peer %s moved to u%" PRIu64 ":s%" PRIu64,
1492 old_id, subgrp->id, paf->peer->host,
1493 updgrp->id, subgrp->id);
1494
1495 /*
1496 * The state of the subgroup (adj_out, advs, packet queue etc)
1497 * is consistent internally, but may not be identical to other
1498 * subgroups in the new update group even if the version number
1499 * matches up. Make sure a full refresh is done before the
1500 * subgroup is merged with another.
1501 */
1502 update_subgroup_set_needs_refresh(subgrp, 1);
1503
1504 SUBGRP_INCR_STAT(subgrp, updgrp_switch_events);
1505 return;
1506 }
1507
1508 /*
1509 * Create a new subgroup under the specified update group, and copy
1510 * over relevant state to it.
1511 */
1512 subgrp = update_subgroup_create(updgrp);
1513 update_subgroup_inherit_info(subgrp, old_subgrp);
1514
1515 subgrp->split_from.update_group_id = old_subgrp->update_group->id;
1516 subgrp->split_from.subgroup_id = old_subgrp->id;
1517
1518 /*
1519 * Copy out relevant state from the old subgroup.
1520 */
1521 update_subgroup_copy_adj_out(paf->subgroup, subgrp);
1522 update_subgroup_copy_packets(subgrp, paf->next_pkt_to_send);
1523
1524 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1525 zlog_debug("u%" PRIu64 ":s%" PRIu64
1526 " peer %s split and moved into u%" PRIu64
1527 ":s%" PRIu64,
1528 paf->subgroup->update_group->id, paf->subgroup->id,
1529 paf->peer->host, updgrp->id, subgrp->id);
1530
1531 SUBGRP_INCR_STAT(paf->subgroup, split_events);
1532
1533 /*
1534 * Since queued advs were left behind, this new subgroup needs a
1535 * refresh.
1536 */
1537 update_subgroup_set_needs_refresh(subgrp, 1);
1538
1539 /*
1540 * Remove peer from old subgroup, and add it to the new one.
1541 */
1542 update_subgroup_remove_peer(paf->subgroup, paf);
1543
1544 update_subgroup_add_peer(subgrp, paf, 1);
1545 }
1546
1547 void update_bgp_group_init(struct bgp *bgp)
1548 {
1549 int afid;
1550
1551 AF_FOREACH(afid)
1552 bgp->update_groups[afid] =
1553 hash_create(updgrp_hash_key_make, updgrp_hash_cmp, NULL);
1554 }
1555
1556 void update_bgp_group_free(struct bgp *bgp)
1557 {
1558 int afid;
1559
1560 AF_FOREACH(afid)
1561 {
1562 if (bgp->update_groups[afid]) {
1563 hash_free(bgp->update_groups[afid]);
1564 bgp->update_groups[afid] = NULL;
1565 }
1566 }
1567 }
1568
1569 void update_group_show(struct bgp *bgp, afi_t afi, safi_t safi, struct vty *vty,
1570 uint64_t subgrp_id)
1571 {
1572 struct updwalk_context ctx;
1573 memset(&ctx, 0, sizeof(ctx));
1574 ctx.vty = vty;
1575 ctx.subgrp_id = subgrp_id;
1576
1577 update_group_af_walk(bgp, afi, safi, update_group_show_walkcb, &ctx);
1578 }
1579
1580 /*
1581 * update_group_show_stats
1582 *
1583 * Show global statistics about update groups.
1584 */
1585 void update_group_show_stats(struct bgp *bgp, struct vty *vty)
1586 {
1587 vty_out(vty, "Update groups created: %u\n",
1588 bgp->update_group_stats.updgrps_created);
1589 vty_out(vty, "Update groups deleted: %u\n",
1590 bgp->update_group_stats.updgrps_deleted);
1591 vty_out(vty, "Update subgroups created: %u\n",
1592 bgp->update_group_stats.subgrps_created);
1593 vty_out(vty, "Update subgroups deleted: %u\n",
1594 bgp->update_group_stats.subgrps_deleted);
1595 vty_out(vty, "Join events: %u\n", bgp->update_group_stats.join_events);
1596 vty_out(vty, "Prune events: %u\n",
1597 bgp->update_group_stats.prune_events);
1598 vty_out(vty, "Merge events: %u\n",
1599 bgp->update_group_stats.merge_events);
1600 vty_out(vty, "Split events: %u\n",
1601 bgp->update_group_stats.split_events);
1602 vty_out(vty, "Update group switch events: %u\n",
1603 bgp->update_group_stats.updgrp_switch_events);
1604 vty_out(vty, "Peer route refreshes combined: %u\n",
1605 bgp->update_group_stats.peer_refreshes_combined);
1606 vty_out(vty, "Merge checks triggered: %u\n",
1607 bgp->update_group_stats.merge_checks_triggered);
1608 }
1609
1610 /*
1611 * update_group_adjust_peer
1612 */
1613 void update_group_adjust_peer(struct peer_af *paf)
1614 {
1615 struct update_group *updgrp;
1616 struct update_subgroup *subgrp, *old_subgrp;
1617 struct peer *peer;
1618
1619 if (!paf)
1620 return;
1621
1622 peer = PAF_PEER(paf);
1623 if (!peer_established(peer)) {
1624 return;
1625 }
1626
1627 if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE)) {
1628 return;
1629 }
1630
1631 if (!peer->afc_nego[paf->afi][paf->safi]) {
1632 return;
1633 }
1634
1635 updgrp = update_group_find(paf);
1636 if (!updgrp) {
1637 updgrp = update_group_create(paf);
1638 if (!updgrp) {
1639 zlog_err("couldn't create update group for peer %s",
1640 paf->peer->host);
1641 return;
1642 }
1643 }
1644
1645 old_subgrp = paf->subgroup;
1646
1647 if (old_subgrp) {
1648
1649 /*
1650 * If the update group of the peer is unchanged, the peer can
1651 * stay
1652 * in its existing subgroup and we're done.
1653 */
1654 if (old_subgrp->update_group == updgrp)
1655 return;
1656
1657 /*
1658 * The peer is switching between update groups. Put it in its
1659 * own subgroup under the new update group.
1660 */
1661 update_subgroup_split_peer(paf, updgrp);
1662 return;
1663 }
1664
1665 subgrp = update_subgroup_find(updgrp, paf);
1666 if (!subgrp) {
1667 subgrp = update_subgroup_create(updgrp);
1668 if (!subgrp)
1669 return;
1670 }
1671
1672 update_subgroup_add_peer(subgrp, paf, 1);
1673 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1674 zlog_debug("u%" PRIu64 ":s%" PRIu64 " add peer %s", updgrp->id,
1675 subgrp->id, paf->peer->host);
1676
1677 return;
1678 }
1679
1680 int update_group_adjust_soloness(struct peer *peer, int set)
1681 {
1682 struct peer_group *group;
1683 struct listnode *node, *nnode;
1684
1685 if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
1686 peer_lonesoul_or_not(peer, set);
1687 if (peer->status == Established)
1688 bgp_announce_route_all(peer);
1689 } else {
1690 group = peer->group;
1691 for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
1692 peer_lonesoul_or_not(peer, set);
1693 if (peer->status == Established)
1694 bgp_announce_route_all(peer);
1695 }
1696 }
1697 return 0;
1698 }
1699
1700 /*
1701 * update_subgroup_rib
1702 */
1703 struct bgp_table *update_subgroup_rib(struct update_subgroup *subgrp)
1704 {
1705 struct bgp *bgp;
1706
1707 bgp = SUBGRP_INST(subgrp);
1708 if (!bgp)
1709 return NULL;
1710
1711 return bgp->rib[SUBGRP_AFI(subgrp)][SUBGRP_SAFI(subgrp)];
1712 }
1713
1714 void update_group_af_walk(struct bgp *bgp, afi_t afi, safi_t safi,
1715 updgrp_walkcb cb, void *ctx)
1716 {
1717 struct updwalk_context wctx;
1718 int afid;
1719
1720 if (!bgp)
1721 return;
1722 afid = afindex(afi, safi);
1723 if (afid >= BGP_AF_MAX)
1724 return;
1725
1726 memset(&wctx, 0, sizeof(wctx));
1727 wctx.cb = cb;
1728 wctx.context = ctx;
1729
1730 if (bgp->update_groups[afid])
1731 hash_walk(bgp->update_groups[afid], update_group_walkcb, &wctx);
1732 }
1733
1734 void update_group_walk(struct bgp *bgp, updgrp_walkcb cb, void *ctx)
1735 {
1736 afi_t afi;
1737 safi_t safi;
1738
1739 FOREACH_AFI_SAFI(afi, safi)
1740 {
1741 update_group_af_walk(bgp, afi, safi, cb, ctx);
1742 }
1743 }
1744
1745 void update_group_periodic_merge(struct bgp *bgp)
1746 {
1747 char reason[] = "periodic merge check";
1748
1749 update_group_walk(bgp, update_group_periodic_merge_walkcb,
1750 (void *)reason);
1751 }
1752
1753 static int
1754 update_group_default_originate_route_map_walkcb(struct update_group *updgrp,
1755 void *arg)
1756 {
1757 struct update_subgroup *subgrp;
1758 struct peer *peer;
1759 afi_t afi;
1760 safi_t safi;
1761
1762 UPDGRP_FOREACH_SUBGRP(updgrp, subgrp)
1763 {
1764 peer = SUBGRP_PEER(subgrp);
1765 afi = SUBGRP_AFI(subgrp);
1766 safi = SUBGRP_SAFI(subgrp);
1767
1768 if (peer->default_rmap[afi][safi].name) {
1769 subgroup_default_originate(subgrp, 0);
1770 }
1771 }
1772
1773 return UPDWALK_CONTINUE;
1774 }
1775
1776 int update_group_refresh_default_originate_route_map(struct thread *thread)
1777 {
1778 struct bgp *bgp;
1779 char reason[] = "refresh default-originate route-map";
1780
1781 bgp = THREAD_ARG(thread);
1782 update_group_walk(bgp, update_group_default_originate_route_map_walkcb,
1783 reason);
1784 THREAD_TIMER_OFF(bgp->t_rmap_def_originate_eval);
1785 bgp_unlock(bgp);
1786
1787 return (0);
1788 }
1789
1790 /*
1791 * peer_af_announce_route
1792 *
1793 * Refreshes routes out to a peer_af immediately.
1794 *
1795 * If the combine parameter is TRUE, then this function will try to
1796 * gather other peers in the subgroup for which a route announcement
1797 * is pending and efficently announce routes to all of them.
1798 *
1799 * For now, the 'combine' option has an effect only if all peers in
1800 * the subgroup have a route announcement pending.
1801 */
1802 void peer_af_announce_route(struct peer_af *paf, int combine)
1803 {
1804 struct update_subgroup *subgrp;
1805 struct peer_af *cur_paf;
1806 int all_pending;
1807
1808 subgrp = paf->subgroup;
1809 all_pending = 0;
1810
1811 if (combine) {
1812 /*
1813 * If there are other peers in the old subgroup that also need
1814 * routes to be announced, pull them into the peer's new
1815 * subgroup.
1816 * Combine route announcement with other peers if possible.
1817 *
1818 * For now, we combine only if all peers in the subgroup have an
1819 * announcement pending.
1820 */
1821 all_pending = 1;
1822
1823 SUBGRP_FOREACH_PEER(subgrp, cur_paf)
1824 {
1825 if (cur_paf == paf)
1826 continue;
1827
1828 if (cur_paf->t_announce_route)
1829 continue;
1830
1831 all_pending = 0;
1832 break;
1833 }
1834 }
1835 /*
1836 * Announce to the peer alone if we were not asked to combine peers,
1837 * or if some peers don't have a route annoucement pending.
1838 */
1839 if (!combine || !all_pending) {
1840 update_subgroup_split_peer(paf, NULL);
1841 if (!paf->subgroup)
1842 return;
1843
1844 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
1845 zlog_debug("u%" PRIu64 ":s%" PRIu64
1846 " %s announcing routes",
1847 subgrp->update_group->id, subgrp->id,
1848 paf->peer->host);
1849
1850 subgroup_announce_route(paf->subgroup);
1851 return;
1852 }
1853
1854 /*
1855 * We will announce routes the entire subgroup.
1856 *
1857 * First stop refresh timers on all the other peers.
1858 */
1859 SUBGRP_FOREACH_PEER(subgrp, cur_paf)
1860 {
1861 if (cur_paf == paf)
1862 continue;
1863
1864 bgp_stop_announce_route_timer(cur_paf);
1865 }
1866
1867 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
1868 zlog_debug("u%" PRIu64 ":s%" PRIu64
1869 " announcing routes to %s, combined into %d peers",
1870 subgrp->update_group->id, subgrp->id,
1871 paf->peer->host, subgrp->peer_count);
1872
1873 subgroup_announce_route(subgrp);
1874
1875 SUBGRP_INCR_STAT_BY(subgrp, peer_refreshes_combined,
1876 subgrp->peer_count - 1);
1877 }
1878
1879 void subgroup_trigger_write(struct update_subgroup *subgrp)
1880 {
1881 struct peer_af *paf;
1882
1883 #if 0
1884 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
1885 zlog_debug("u%llu:s%llu scheduling write thread for peers",
1886 subgrp->update_group->id, subgrp->id);
1887 #endif
1888 SUBGRP_FOREACH_PEER(subgrp, paf)
1889 {
1890 if (paf->peer->status == Established) {
1891 BGP_PEER_WRITE_ON(paf->peer->t_write, bgp_write,
1892 paf->peer->fd, paf->peer);
1893 }
1894 }
1895 }
1896
1897 int update_group_clear_update_dbg(struct update_group *updgrp, void *arg)
1898 {
1899 UPDGRP_PEER_DBG_OFF(updgrp);
1900 return UPDWALK_CONTINUE;
1901 }
1902
1903 /* Return true if we should addpath encode NLRI to this peer */
1904 int bgp_addpath_encode_tx(struct peer *peer, afi_t afi, safi_t safi)
1905 {
1906 return (CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ADDPATH_AF_TX_ADV)
1907 && CHECK_FLAG(peer->af_cap[afi][safi],
1908 PEER_CAP_ADDPATH_AF_RX_RCV));
1909 }
1910
1911 /*
1912 * Return true if this is a path we should advertise due to a
1913 * configured addpath-tx knob
1914 */
1915 int bgp_addpath_tx_path(struct peer *peer, afi_t afi, safi_t safi,
1916 struct bgp_info *ri)
1917 {
1918 if (CHECK_FLAG(peer->af_flags[afi][safi],
1919 PEER_FLAG_ADDPATH_TX_ALL_PATHS))
1920 return 1;
1921
1922 if (CHECK_FLAG(peer->af_flags[afi][safi],
1923 PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS)
1924 && CHECK_FLAG(ri->flags, BGP_INFO_DMED_SELECTED))
1925 return 1;
1926
1927 return 0;
1928 }