]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_updgrp.c
lib: enforce vrf_name_to_id by returning default_vrf when name is null
[mirror_frr.git] / bgpd / bgp_updgrp.c
1 /**
2 * bgp_updgrp.c: BGP update group structures
3 *
4 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
5 *
6 * @author Avneesh Sachdev <avneesh@sproute.net>
7 * @author Rajesh Varadarajan <rajesh@sproute.net>
8 * @author Pradosh Mohapatra <pradosh@sproute.net>
9 *
10 * This file is part of GNU Zebra.
11 *
12 * GNU Zebra is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2, or (at your option) any
15 * later version.
16 *
17 * GNU Zebra is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; see the file COPYING; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27 #include <zebra.h>
28
29 #include "prefix.h"
30 #include "thread.h"
31 #include "buffer.h"
32 #include "stream.h"
33 #include "command.h"
34 #include "sockunion.h"
35 #include "network.h"
36 #include "memory.h"
37 #include "filter.h"
38 #include "routemap.h"
39 #include "log.h"
40 #include "plist.h"
41 #include "linklist.h"
42 #include "workqueue.h"
43 #include "hash.h"
44 #include "jhash.h"
45 #include "queue.h"
46
47 #include "bgpd/bgpd.h"
48 #include "bgpd/bgp_table.h"
49 #include "bgpd/bgp_debug.h"
50 #include "bgpd/bgp_errors.h"
51 #include "bgpd/bgp_fsm.h"
52 #include "bgpd/bgp_advertise.h"
53 #include "bgpd/bgp_packet.h"
54 #include "bgpd/bgp_updgrp.h"
55 #include "bgpd/bgp_route.h"
56 #include "bgpd/bgp_filter.h"
57 #include "bgpd/bgp_io.h"
58
59 /********************
60 * PRIVATE FUNCTIONS
61 ********************/
62
63 /**
64 * assign a unique ID to update group and subgroup. Mostly for display/
65 * debugging purposes. It's a 64-bit space - used leisurely without a
66 * worry about its wrapping and about filling gaps. While at it, timestamp
67 * the creation.
68 */
69 static void update_group_checkin(struct update_group *updgrp)
70 {
71 updgrp->id = ++bm->updgrp_idspace;
72 updgrp->uptime = bgp_clock();
73 }
74
75 static void update_subgroup_checkin(struct update_subgroup *subgrp,
76 struct update_group *updgrp)
77 {
78 subgrp->id = ++bm->subgrp_idspace;
79 subgrp->uptime = bgp_clock();
80 }
81
82 static void sync_init(struct update_subgroup *subgrp)
83 {
84 subgrp->sync =
85 XCALLOC(MTYPE_BGP_SYNCHRONISE, sizeof(struct bgp_synchronize));
86 BGP_ADV_FIFO_INIT(&subgrp->sync->update);
87 BGP_ADV_FIFO_INIT(&subgrp->sync->withdraw);
88 BGP_ADV_FIFO_INIT(&subgrp->sync->withdraw_low);
89 subgrp->hash =
90 hash_create(baa_hash_key, baa_hash_cmp, "BGP SubGroup Hash");
91
92 /* We use a larger buffer for subgrp->work in the event that:
93 * - We RX a BGP_UPDATE where the attributes alone are just
94 * under BGP_MAX_PACKET_SIZE
95 * - The user configures an outbound route-map that does many as-path
96 * prepends or adds many communities. At most they can have
97 * CMD_ARGC_MAX
98 * args in a route-map so there is a finite limit on how large they
99 * can
100 * make the attributes.
101 *
102 * Having a buffer with BGP_MAX_PACKET_SIZE_OVERFLOW allows us to avoid
103 * bounds
104 * checking for every single attribute as we construct an UPDATE.
105 */
106 subgrp->work =
107 stream_new(BGP_MAX_PACKET_SIZE + BGP_MAX_PACKET_SIZE_OVERFLOW);
108 subgrp->scratch = stream_new(BGP_MAX_PACKET_SIZE);
109 }
110
111 static void sync_delete(struct update_subgroup *subgrp)
112 {
113 if (subgrp->sync)
114 XFREE(MTYPE_BGP_SYNCHRONISE, subgrp->sync);
115 subgrp->sync = NULL;
116 if (subgrp->hash)
117 hash_free(subgrp->hash);
118 subgrp->hash = NULL;
119 if (subgrp->work)
120 stream_free(subgrp->work);
121 subgrp->work = NULL;
122 if (subgrp->scratch)
123 stream_free(subgrp->scratch);
124 subgrp->scratch = NULL;
125 }
126
127 /**
128 * conf_copy
129 *
130 * copy only those fields that are relevant to update group match
131 */
132 static void conf_copy(struct peer *dst, struct peer *src, afi_t afi,
133 safi_t safi)
134 {
135 struct bgp_filter *srcfilter;
136 struct bgp_filter *dstfilter;
137
138 srcfilter = &src->filter[afi][safi];
139 dstfilter = &dst->filter[afi][safi];
140
141 dst->bgp = src->bgp;
142 dst->sort = src->sort;
143 dst->as = src->as;
144 dst->v_routeadv = src->v_routeadv;
145 dst->flags = src->flags;
146 dst->af_flags[afi][safi] = src->af_flags[afi][safi];
147 if (dst->host)
148 XFREE(MTYPE_BGP_PEER_HOST, dst->host);
149
150 dst->host = XSTRDUP(MTYPE_BGP_PEER_HOST, src->host);
151 dst->cap = src->cap;
152 dst->af_cap[afi][safi] = src->af_cap[afi][safi];
153 dst->afc_nego[afi][safi] = src->afc_nego[afi][safi];
154 dst->orf_plist[afi][safi] = src->orf_plist[afi][safi];
155 dst->addpath_type[afi][safi] = src->addpath_type[afi][safi];
156 dst->local_as = src->local_as;
157 dst->change_local_as = src->change_local_as;
158 dst->shared_network = src->shared_network;
159 memcpy(&(dst->nexthop), &(src->nexthop), sizeof(struct bgp_nexthop));
160
161 dst->group = src->group;
162
163 if (src->default_rmap[afi][safi].name) {
164 dst->default_rmap[afi][safi].name =
165 XSTRDUP(MTYPE_ROUTE_MAP_NAME,
166 src->default_rmap[afi][safi].name);
167 dst->default_rmap[afi][safi].map =
168 src->default_rmap[afi][safi].map;
169 }
170
171 if (DISTRIBUTE_OUT_NAME(srcfilter)) {
172 DISTRIBUTE_OUT_NAME(dstfilter) = XSTRDUP(
173 MTYPE_BGP_FILTER_NAME, DISTRIBUTE_OUT_NAME(srcfilter));
174 DISTRIBUTE_OUT(dstfilter) = DISTRIBUTE_OUT(srcfilter);
175 }
176
177 if (PREFIX_LIST_OUT_NAME(srcfilter)) {
178 PREFIX_LIST_OUT_NAME(dstfilter) = XSTRDUP(
179 MTYPE_BGP_FILTER_NAME, PREFIX_LIST_OUT_NAME(srcfilter));
180 PREFIX_LIST_OUT(dstfilter) = PREFIX_LIST_OUT(srcfilter);
181 }
182
183 if (FILTER_LIST_OUT_NAME(srcfilter)) {
184 FILTER_LIST_OUT_NAME(dstfilter) = XSTRDUP(
185 MTYPE_BGP_FILTER_NAME, FILTER_LIST_OUT_NAME(srcfilter));
186 FILTER_LIST_OUT(dstfilter) = FILTER_LIST_OUT(srcfilter);
187 }
188
189 if (ROUTE_MAP_OUT_NAME(srcfilter)) {
190 ROUTE_MAP_OUT_NAME(dstfilter) = XSTRDUP(
191 MTYPE_BGP_FILTER_NAME, ROUTE_MAP_OUT_NAME(srcfilter));
192 ROUTE_MAP_OUT(dstfilter) = ROUTE_MAP_OUT(srcfilter);
193 }
194
195 if (UNSUPPRESS_MAP_NAME(srcfilter)) {
196 UNSUPPRESS_MAP_NAME(dstfilter) = XSTRDUP(
197 MTYPE_BGP_FILTER_NAME, UNSUPPRESS_MAP_NAME(srcfilter));
198 UNSUPPRESS_MAP(dstfilter) = UNSUPPRESS_MAP(srcfilter);
199 }
200 }
201
202 /**
203 * since we did a bunch of XSTRDUP's in conf_copy, time to free them up
204 */
205 static void conf_release(struct peer *src, afi_t afi, safi_t safi)
206 {
207 struct bgp_filter *srcfilter;
208
209 srcfilter = &src->filter[afi][safi];
210
211 if (src->default_rmap[afi][safi].name)
212 XFREE(MTYPE_ROUTE_MAP_NAME, src->default_rmap[afi][safi].name);
213
214 if (srcfilter->dlist[FILTER_OUT].name)
215 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->dlist[FILTER_OUT].name);
216
217 if (srcfilter->plist[FILTER_OUT].name)
218 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->plist[FILTER_OUT].name);
219
220 if (srcfilter->aslist[FILTER_OUT].name)
221 XFREE(MTYPE_BGP_FILTER_NAME,
222 srcfilter->aslist[FILTER_OUT].name);
223
224 if (srcfilter->map[RMAP_OUT].name)
225 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->map[RMAP_OUT].name);
226
227 if (srcfilter->usmap.name)
228 XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->usmap.name);
229
230 if (src->host)
231 XFREE(MTYPE_BGP_PEER_HOST, src->host);
232 src->host = NULL;
233 }
234
235 static void peer2_updgrp_copy(struct update_group *updgrp, struct peer_af *paf)
236 {
237 struct peer *src;
238 struct peer *dst;
239
240 if (!updgrp || !paf)
241 return;
242
243 src = paf->peer;
244 dst = updgrp->conf;
245 if (!src || !dst)
246 return;
247
248 updgrp->afi = paf->afi;
249 updgrp->safi = paf->safi;
250 updgrp->afid = paf->afid;
251 updgrp->bgp = src->bgp;
252
253 conf_copy(dst, src, paf->afi, paf->safi);
254 }
255
256 /**
257 * auxiliary functions to maintain the hash table.
258 * - updgrp_hash_alloc - to create a new entry, passed to hash_get
259 * - updgrp_hash_key_make - makes the key for update group search
260 * - updgrp_hash_cmp - compare two update groups.
261 */
262 static void *updgrp_hash_alloc(void *p)
263 {
264 struct update_group *updgrp;
265 const struct update_group *in;
266
267 in = (const struct update_group *)p;
268 updgrp = XCALLOC(MTYPE_BGP_UPDGRP, sizeof(struct update_group));
269 memcpy(updgrp, in, sizeof(struct update_group));
270 updgrp->conf = XCALLOC(MTYPE_BGP_PEER, sizeof(struct peer));
271 conf_copy(updgrp->conf, in->conf, in->afi, in->safi);
272 return updgrp;
273 }
274
275 /**
276 * The hash value for a peer is computed from the following variables:
277 * v = f(
278 * 1. IBGP (1) or EBGP (2)
279 * 2. FLAGS based on configuration:
280 * LOCAL_AS_NO_PREPEND
281 * LOCAL_AS_REPLACE_AS
282 * 3. AF_FLAGS based on configuration:
283 * Refer to definition in bgp_updgrp.h
284 * 4. (AF-independent) Capability flags:
285 * AS4_RCV capability
286 * 5. (AF-dependent) Capability flags:
287 * ORF_PREFIX_SM_RCV (peer can send prefix ORF)
288 * 6. MRAI
289 * 7. peer-group name
290 * 8. Outbound route-map name (neighbor route-map <> out)
291 * 9. Outbound distribute-list name (neighbor distribute-list <> out)
292 * 10. Outbound prefix-list name (neighbor prefix-list <> out)
293 * 11. Outbound as-list name (neighbor filter-list <> out)
294 * 12. Unsuppress map name (neighbor unsuppress-map <>)
295 * 13. default rmap name (neighbor default-originate route-map <>)
296 * 14. encoding both global and link-local nexthop?
297 * 15. If peer is configured to be a lonesoul, peer ip address
298 * 16. Local-as should match, if configured.
299 * )
300 */
301 static unsigned int updgrp_hash_key_make(void *p)
302 {
303 const struct update_group *updgrp;
304 const struct peer *peer;
305 const struct bgp_filter *filter;
306 uint32_t flags;
307 uint32_t key;
308 afi_t afi;
309 safi_t safi;
310
311 #define SEED1 999331
312 #define SEED2 2147483647
313
314 updgrp = p;
315 peer = updgrp->conf;
316 afi = updgrp->afi;
317 safi = updgrp->safi;
318 flags = peer->af_flags[afi][safi];
319 filter = &peer->filter[afi][safi];
320
321 key = 0;
322
323 key = jhash_1word(peer->sort, key); /* EBGP or IBGP */
324 key = jhash_1word((peer->flags & PEER_UPDGRP_FLAGS), key);
325 key = jhash_1word((flags & PEER_UPDGRP_AF_FLAGS), key);
326 key = jhash_1word((uint32_t)peer->addpath_type[afi][safi], key);
327 key = jhash_1word((peer->cap & PEER_UPDGRP_CAP_FLAGS), key);
328 key = jhash_1word((peer->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS),
329 key);
330 key = jhash_1word(peer->v_routeadv, key);
331 key = jhash_1word(peer->change_local_as, key);
332
333 if (peer->group)
334 key = jhash_1word(jhash(peer->group->name,
335 strlen(peer->group->name), SEED1),
336 key);
337
338 if (filter->map[RMAP_OUT].name)
339 key = jhash_1word(jhash(filter->map[RMAP_OUT].name,
340 strlen(filter->map[RMAP_OUT].name),
341 SEED1),
342 key);
343
344 if (filter->dlist[FILTER_OUT].name)
345 key = jhash_1word(jhash(filter->dlist[FILTER_OUT].name,
346 strlen(filter->dlist[FILTER_OUT].name),
347 SEED1),
348 key);
349
350 if (filter->plist[FILTER_OUT].name)
351 key = jhash_1word(jhash(filter->plist[FILTER_OUT].name,
352 strlen(filter->plist[FILTER_OUT].name),
353 SEED1),
354 key);
355
356 if (filter->aslist[FILTER_OUT].name)
357 key = jhash_1word(jhash(filter->aslist[FILTER_OUT].name,
358 strlen(filter->aslist[FILTER_OUT].name),
359 SEED1),
360 key);
361
362 if (filter->usmap.name)
363 key = jhash_1word(jhash(filter->usmap.name,
364 strlen(filter->usmap.name), SEED1),
365 key);
366
367 if (peer->default_rmap[afi][safi].name)
368 key = jhash_1word(
369 jhash(peer->default_rmap[afi][safi].name,
370 strlen(peer->default_rmap[afi][safi].name),
371 SEED1),
372 key);
373
374 /* If peer is on a shared network and is exchanging IPv6 prefixes,
375 * it needs to include link-local address. That's different from
376 * non-shared-network peers (nexthop encoded with 32 bytes vs 16
377 * bytes). We create different update groups to take care of that.
378 */
379 key = jhash_1word(
380 (peer->shared_network && peer_afi_active_nego(peer, AFI_IP6)),
381 key);
382
383 /*
384 * There are certain peers that must get their own update-group:
385 * - lonesoul peers
386 * - peers that negotiated ORF
387 */
388 if (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL)
389 || CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV)
390 || CHECK_FLAG(peer->af_cap[afi][safi],
391 PEER_CAP_ORF_PREFIX_SM_OLD_RCV))
392 key = jhash_1word(jhash(peer->host, strlen(peer->host), SEED2),
393 key);
394
395 return key;
396 }
397
398 static bool updgrp_hash_cmp(const void *p1, const void *p2)
399 {
400 const struct update_group *grp1;
401 const struct update_group *grp2;
402 const struct peer *pe1;
403 const struct peer *pe2;
404 uint32_t flags1;
405 uint32_t flags2;
406 const struct bgp_filter *fl1;
407 const struct bgp_filter *fl2;
408 afi_t afi;
409 safi_t safi;
410
411 if (!p1 || !p2)
412 return false;
413
414 grp1 = p1;
415 grp2 = p2;
416 pe1 = grp1->conf;
417 pe2 = grp2->conf;
418 afi = grp1->afi;
419 safi = grp1->safi;
420 flags1 = pe1->af_flags[afi][safi];
421 flags2 = pe2->af_flags[afi][safi];
422 fl1 = &pe1->filter[afi][safi];
423 fl2 = &pe2->filter[afi][safi];
424
425 /* put EBGP and IBGP peers in different update groups */
426 if (pe1->sort != pe2->sort)
427 return false;
428
429 /* check peer flags */
430 if ((pe1->flags & PEER_UPDGRP_FLAGS)
431 != (pe2->flags & PEER_UPDGRP_FLAGS))
432 return false;
433
434 /* If there is 'local-as' configured, it should match. */
435 if (pe1->change_local_as != pe2->change_local_as)
436 return false;
437
438 /* flags like route reflector client */
439 if ((flags1 & PEER_UPDGRP_AF_FLAGS) != (flags2 & PEER_UPDGRP_AF_FLAGS))
440 return false;
441
442 if (pe1->addpath_type[afi][safi] != pe2->addpath_type[afi][safi])
443 return 0;
444
445 if ((pe1->cap & PEER_UPDGRP_CAP_FLAGS)
446 != (pe2->cap & PEER_UPDGRP_CAP_FLAGS))
447 return false;
448
449 if ((pe1->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS)
450 != (pe2->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS))
451 return false;
452
453 if (pe1->v_routeadv != pe2->v_routeadv)
454 return false;
455
456 if (pe1->group != pe2->group)
457 return false;
458
459 /* route-map names should be the same */
460 if ((fl1->map[RMAP_OUT].name && !fl2->map[RMAP_OUT].name)
461 || (!fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name)
462 || (fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name
463 && strcmp(fl1->map[RMAP_OUT].name, fl2->map[RMAP_OUT].name)))
464 return false;
465
466 if ((fl1->dlist[FILTER_OUT].name && !fl2->dlist[FILTER_OUT].name)
467 || (!fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name)
468 || (fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name
469 && strcmp(fl1->dlist[FILTER_OUT].name,
470 fl2->dlist[FILTER_OUT].name)))
471 return false;
472
473 if ((fl1->plist[FILTER_OUT].name && !fl2->plist[FILTER_OUT].name)
474 || (!fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name)
475 || (fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name
476 && strcmp(fl1->plist[FILTER_OUT].name,
477 fl2->plist[FILTER_OUT].name)))
478 return false;
479
480 if ((fl1->aslist[FILTER_OUT].name && !fl2->aslist[FILTER_OUT].name)
481 || (!fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name)
482 || (fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name
483 && strcmp(fl1->aslist[FILTER_OUT].name,
484 fl2->aslist[FILTER_OUT].name)))
485 return false;
486
487 if ((fl1->usmap.name && !fl2->usmap.name)
488 || (!fl1->usmap.name && fl2->usmap.name)
489 || (fl1->usmap.name && fl2->usmap.name
490 && strcmp(fl1->usmap.name, fl2->usmap.name)))
491 return false;
492
493 if ((pe1->default_rmap[afi][safi].name
494 && !pe2->default_rmap[afi][safi].name)
495 || (!pe1->default_rmap[afi][safi].name
496 && pe2->default_rmap[afi][safi].name)
497 || (pe1->default_rmap[afi][safi].name
498 && pe2->default_rmap[afi][safi].name
499 && strcmp(pe1->default_rmap[afi][safi].name,
500 pe2->default_rmap[afi][safi].name)))
501 return false;
502
503 if ((afi == AFI_IP6) && (pe1->shared_network != pe2->shared_network))
504 return false;
505
506 if ((CHECK_FLAG(pe1->flags, PEER_FLAG_LONESOUL)
507 || CHECK_FLAG(pe1->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV)
508 || CHECK_FLAG(pe1->af_cap[afi][safi],
509 PEER_CAP_ORF_PREFIX_SM_OLD_RCV))
510 && !sockunion_same(&pe1->su, &pe2->su))
511 return false;
512
513 return true;
514 }
515
516 static void peer_lonesoul_or_not(struct peer *peer, int set)
517 {
518 /* no change in status? */
519 if (set == (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL) > 0))
520 return;
521
522 if (set)
523 SET_FLAG(peer->flags, PEER_FLAG_LONESOUL);
524 else
525 UNSET_FLAG(peer->flags, PEER_FLAG_LONESOUL);
526
527 update_group_adjust_peer_afs(peer);
528 }
529
530 /*
531 * subgroup_total_packets_enqueued
532 *
533 * Returns the total number of packets enqueued to a subgroup.
534 */
535 static unsigned int
536 subgroup_total_packets_enqueued(struct update_subgroup *subgrp)
537 {
538 struct bpacket *pkt;
539
540 pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp));
541
542 return pkt->ver - 1;
543 }
544
545 static int update_group_show_walkcb(struct update_group *updgrp, void *arg)
546 {
547 struct updwalk_context *ctx = arg;
548 struct vty *vty;
549 struct update_subgroup *subgrp;
550 struct peer_af *paf;
551 struct bgp_filter *filter;
552 int match = 0;
553
554 if (!ctx)
555 return CMD_SUCCESS;
556
557 if (ctx->subgrp_id) {
558 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
559 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
560 continue;
561 else {
562 match = 1;
563 break;
564 }
565 }
566 } else {
567 match = 1;
568 }
569
570 if (!match) {
571 /* Since this routine is invoked from a walk, we cannot signal
572 * any */
573 /* error here, can only return. */
574 return CMD_SUCCESS;
575 }
576
577 vty = ctx->vty;
578
579 vty_out(vty, "Update-group %" PRIu64 ":\n", updgrp->id);
580 vty_out(vty, " Created: %s", timestamp_string(updgrp->uptime));
581 filter = &updgrp->conf->filter[updgrp->afi][updgrp->safi];
582 if (filter->map[RMAP_OUT].name)
583 vty_out(vty, " Outgoing route map: %s%s\n",
584 filter->map[RMAP_OUT].map ? "X" : "",
585 filter->map[RMAP_OUT].name);
586 vty_out(vty, " MRAI value (seconds): %d\n", updgrp->conf->v_routeadv);
587 if (updgrp->conf->change_local_as)
588 vty_out(vty, " Local AS %u%s%s\n",
589 updgrp->conf->change_local_as,
590 CHECK_FLAG(updgrp->conf->flags,
591 PEER_FLAG_LOCAL_AS_NO_PREPEND)
592 ? " no-prepend"
593 : "",
594 CHECK_FLAG(updgrp->conf->flags,
595 PEER_FLAG_LOCAL_AS_REPLACE_AS)
596 ? " replace-as"
597 : "");
598
599 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
600 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
601 continue;
602 vty_out(vty, "\n");
603 vty_out(vty, " Update-subgroup %" PRIu64 ":\n", subgrp->id);
604 vty_out(vty, " Created: %s",
605 timestamp_string(subgrp->uptime));
606
607 if (subgrp->split_from.update_group_id
608 || subgrp->split_from.subgroup_id) {
609 vty_out(vty, " Split from group id: %" PRIu64 "\n",
610 subgrp->split_from.update_group_id);
611 vty_out(vty,
612 " Split from subgroup id: %" PRIu64 "\n",
613 subgrp->split_from.subgroup_id);
614 }
615
616 vty_out(vty, " Join events: %u\n", subgrp->join_events);
617 vty_out(vty, " Prune events: %u\n", subgrp->prune_events);
618 vty_out(vty, " Merge events: %u\n", subgrp->merge_events);
619 vty_out(vty, " Split events: %u\n", subgrp->split_events);
620 vty_out(vty, " Update group switch events: %u\n",
621 subgrp->updgrp_switch_events);
622 vty_out(vty, " Peer refreshes combined: %u\n",
623 subgrp->peer_refreshes_combined);
624 vty_out(vty, " Merge checks triggered: %u\n",
625 subgrp->merge_checks_triggered);
626 vty_out(vty, " Version: %" PRIu64 "\n", subgrp->version);
627 vty_out(vty, " Packet queue length: %d\n",
628 bpacket_queue_length(SUBGRP_PKTQ(subgrp)));
629 vty_out(vty, " Total packets enqueued: %u\n",
630 subgroup_total_packets_enqueued(subgrp));
631 vty_out(vty, " Packet queue high watermark: %d\n",
632 bpacket_queue_hwm_length(SUBGRP_PKTQ(subgrp)));
633 vty_out(vty, " Adj-out list count: %u\n", subgrp->adj_count);
634 vty_out(vty, " Advertise list: %s\n",
635 advertise_list_is_empty(subgrp) ? "empty"
636 : "not empty");
637 vty_out(vty, " Flags: %s\n",
638 CHECK_FLAG(subgrp->flags, SUBGRP_FLAG_NEEDS_REFRESH)
639 ? "R"
640 : "");
641 if (subgrp->peer_count > 0) {
642 vty_out(vty, " Peers:\n");
643 SUBGRP_FOREACH_PEER (subgrp, paf)
644 vty_out(vty, " - %s\n", paf->peer->host);
645 }
646 }
647 return UPDWALK_CONTINUE;
648 }
649
650 /*
651 * Helper function to show the packet queue for each subgroup of update group.
652 * Will be constrained to a particular subgroup id if id !=0
653 */
654 static int updgrp_show_packet_queue_walkcb(struct update_group *updgrp,
655 void *arg)
656 {
657 struct updwalk_context *ctx = arg;
658 struct update_subgroup *subgrp;
659 struct vty *vty;
660
661 vty = ctx->vty;
662 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
663 if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
664 continue;
665 vty_out(vty, "update group %" PRIu64 ", subgroup %" PRIu64 "\n",
666 updgrp->id, subgrp->id);
667 bpacket_queue_show_vty(SUBGRP_PKTQ(subgrp), vty);
668 }
669 return UPDWALK_CONTINUE;
670 }
671
672 /*
673 * Show the packet queue for each subgroup of update group. Will be
674 * constrained to a particular subgroup id if id !=0
675 */
676 void update_group_show_packet_queue(struct bgp *bgp, afi_t afi, safi_t safi,
677 struct vty *vty, uint64_t id)
678 {
679 struct updwalk_context ctx;
680
681 memset(&ctx, 0, sizeof(ctx));
682 ctx.vty = vty;
683 ctx.subgrp_id = id;
684 ctx.flags = 0;
685 update_group_af_walk(bgp, afi, safi, updgrp_show_packet_queue_walkcb,
686 &ctx);
687 }
688
689 static struct update_group *update_group_find(struct peer_af *paf)
690 {
691 struct update_group *updgrp;
692 struct update_group tmp;
693 struct peer tmp_conf;
694
695 if (!peer_established(PAF_PEER(paf)))
696 return NULL;
697
698 memset(&tmp, 0, sizeof(tmp));
699 memset(&tmp_conf, 0, sizeof(tmp_conf));
700 tmp.conf = &tmp_conf;
701 peer2_updgrp_copy(&tmp, paf);
702
703 updgrp = hash_lookup(paf->peer->bgp->update_groups[paf->afid], &tmp);
704 conf_release(&tmp_conf, paf->afi, paf->safi);
705 return updgrp;
706 }
707
708 static struct update_group *update_group_create(struct peer_af *paf)
709 {
710 struct update_group *updgrp;
711 struct update_group tmp;
712 struct peer tmp_conf;
713
714 memset(&tmp, 0, sizeof(tmp));
715 memset(&tmp_conf, 0, sizeof(tmp_conf));
716 tmp.conf = &tmp_conf;
717 peer2_updgrp_copy(&tmp, paf);
718
719 updgrp = hash_get(paf->peer->bgp->update_groups[paf->afid], &tmp,
720 updgrp_hash_alloc);
721 if (!updgrp)
722 return NULL;
723 update_group_checkin(updgrp);
724
725 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
726 zlog_debug("create update group %" PRIu64, updgrp->id);
727
728 UPDGRP_GLOBAL_STAT(updgrp, updgrps_created) += 1;
729
730 conf_release(&tmp_conf, paf->afi, paf->safi);
731 return updgrp;
732 }
733
734 static void update_group_delete(struct update_group *updgrp)
735 {
736 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
737 zlog_debug("delete update group %" PRIu64, updgrp->id);
738
739 UPDGRP_GLOBAL_STAT(updgrp, updgrps_deleted) += 1;
740
741 hash_release(updgrp->bgp->update_groups[updgrp->afid], updgrp);
742 conf_release(updgrp->conf, updgrp->afi, updgrp->safi);
743
744 if (updgrp->conf->host)
745 XFREE(MTYPE_BGP_PEER_HOST, updgrp->conf->host);
746 updgrp->conf->host = NULL;
747
748 if (updgrp->conf->ifname)
749 XFREE(MTYPE_BGP_PEER_IFNAME, updgrp->conf->ifname);
750
751 XFREE(MTYPE_BGP_PEER, updgrp->conf);
752 XFREE(MTYPE_BGP_UPDGRP, updgrp);
753 }
754
755 static void update_group_add_subgroup(struct update_group *updgrp,
756 struct update_subgroup *subgrp)
757 {
758 if (!updgrp || !subgrp)
759 return;
760
761 LIST_INSERT_HEAD(&(updgrp->subgrps), subgrp, updgrp_train);
762 subgrp->update_group = updgrp;
763 }
764
765 static void update_group_remove_subgroup(struct update_group *updgrp,
766 struct update_subgroup *subgrp)
767 {
768 if (!updgrp || !subgrp)
769 return;
770
771 LIST_REMOVE(subgrp, updgrp_train);
772 subgrp->update_group = NULL;
773 if (LIST_EMPTY(&(updgrp->subgrps)))
774 update_group_delete(updgrp);
775 }
776
777 static struct update_subgroup *
778 update_subgroup_create(struct update_group *updgrp)
779 {
780 struct update_subgroup *subgrp;
781
782 subgrp = XCALLOC(MTYPE_BGP_UPD_SUBGRP, sizeof(struct update_subgroup));
783 update_subgroup_checkin(subgrp, updgrp);
784 subgrp->v_coalesce = (UPDGRP_INST(updgrp))->coalesce_time;
785 sync_init(subgrp);
786 bpacket_queue_init(SUBGRP_PKTQ(subgrp));
787 bpacket_queue_add(SUBGRP_PKTQ(subgrp), NULL, NULL);
788 TAILQ_INIT(&(subgrp->adjq));
789 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
790 zlog_debug("create subgroup u%" PRIu64 ":s%" PRIu64, updgrp->id,
791 subgrp->id);
792
793 update_group_add_subgroup(updgrp, subgrp);
794
795 UPDGRP_INCR_STAT(updgrp, subgrps_created);
796
797 return subgrp;
798 }
799
800 static void update_subgroup_delete(struct update_subgroup *subgrp)
801 {
802 if (!subgrp)
803 return;
804
805 if (subgrp->update_group)
806 UPDGRP_INCR_STAT(subgrp->update_group, subgrps_deleted);
807
808 if (subgrp->t_merge_check)
809 THREAD_OFF(subgrp->t_merge_check);
810
811 if (subgrp->t_coalesce)
812 THREAD_TIMER_OFF(subgrp->t_coalesce);
813
814 bpacket_queue_cleanup(SUBGRP_PKTQ(subgrp));
815 subgroup_clear_table(subgrp);
816
817 if (subgrp->t_coalesce)
818 THREAD_TIMER_OFF(subgrp->t_coalesce);
819 sync_delete(subgrp);
820
821 if (BGP_DEBUG(update_groups, UPDATE_GROUPS) && subgrp->update_group)
822 zlog_debug("delete subgroup u%" PRIu64 ":s%" PRIu64,
823 subgrp->update_group->id, subgrp->id);
824
825 update_group_remove_subgroup(subgrp->update_group, subgrp);
826
827 XFREE(MTYPE_BGP_UPD_SUBGRP, subgrp);
828 }
829
830 void update_subgroup_inherit_info(struct update_subgroup *to,
831 struct update_subgroup *from)
832 {
833 if (!to || !from)
834 return;
835
836 to->sflags = from->sflags;
837 }
838
839 /*
840 * update_subgroup_check_delete
841 *
842 * Delete a subgroup if it is ready to be deleted.
843 *
844 * Returns TRUE if the subgroup was deleted.
845 */
846 static int update_subgroup_check_delete(struct update_subgroup *subgrp)
847 {
848 if (!subgrp)
849 return 0;
850
851 if (!LIST_EMPTY(&(subgrp->peers)))
852 return 0;
853
854 update_subgroup_delete(subgrp);
855
856 return 1;
857 }
858
859 /*
860 * update_subgroup_add_peer
861 *
862 * @param send_enqueued_packets If true all currently enqueued packets will
863 * also be sent to the peer.
864 */
865 static void update_subgroup_add_peer(struct update_subgroup *subgrp,
866 struct peer_af *paf,
867 int send_enqueued_pkts)
868 {
869 struct bpacket *pkt;
870
871 if (!subgrp || !paf)
872 return;
873
874 LIST_INSERT_HEAD(&(subgrp->peers), paf, subgrp_train);
875 paf->subgroup = subgrp;
876 subgrp->peer_count++;
877
878 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
879 UPDGRP_PEER_DBG_EN(subgrp->update_group);
880 }
881
882 SUBGRP_INCR_STAT(subgrp, join_events);
883
884 if (send_enqueued_pkts) {
885 pkt = bpacket_queue_first(SUBGRP_PKTQ(subgrp));
886 } else {
887
888 /*
889 * Hang the peer off of the last, placeholder, packet in the
890 * queue. This means it won't see any of the packets that are
891 * currently the queue.
892 */
893 pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp));
894 assert(pkt->buffer == NULL);
895 }
896
897 bpacket_add_peer(pkt, paf);
898
899 bpacket_queue_sanity_check(SUBGRP_PKTQ(subgrp));
900 }
901
902 /*
903 * update_subgroup_remove_peer_internal
904 *
905 * Internal function that removes a peer from a subgroup, but does not
906 * delete the subgroup. A call to this function must almost always be
907 * followed by a call to update_subgroup_check_delete().
908 *
909 * @see update_subgroup_remove_peer
910 */
911 static void update_subgroup_remove_peer_internal(struct update_subgroup *subgrp,
912 struct peer_af *paf)
913 {
914 assert(subgrp && paf && subgrp->update_group);
915
916 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
917 UPDGRP_PEER_DBG_DIS(subgrp->update_group);
918 }
919
920 bpacket_queue_remove_peer(paf);
921 LIST_REMOVE(paf, subgrp_train);
922 paf->subgroup = NULL;
923 subgrp->peer_count--;
924
925 SUBGRP_INCR_STAT(subgrp, prune_events);
926 }
927
928 /*
929 * update_subgroup_remove_peer
930 */
931 void update_subgroup_remove_peer(struct update_subgroup *subgrp,
932 struct peer_af *paf)
933 {
934 if (!subgrp || !paf)
935 return;
936
937 update_subgroup_remove_peer_internal(subgrp, paf);
938
939 if (update_subgroup_check_delete(subgrp))
940 return;
941
942 /*
943 * The deletion of the peer may have caused some packets to be
944 * deleted from the subgroup packet queue. Check if the subgroup can
945 * be merged now.
946 */
947 update_subgroup_check_merge(subgrp, "removed peer from subgroup");
948 }
949
950 static struct update_subgroup *update_subgroup_find(struct update_group *updgrp,
951 struct peer_af *paf)
952 {
953 struct update_subgroup *subgrp = NULL;
954 uint64_t version;
955
956 if (paf->subgroup) {
957 assert(0);
958 return NULL;
959 } else
960 version = 0;
961
962 if (!peer_established(PAF_PEER(paf)))
963 return NULL;
964
965 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
966 if (subgrp->version != version
967 || CHECK_FLAG(subgrp->sflags,
968 SUBGRP_STATUS_DEFAULT_ORIGINATE))
969 continue;
970
971 /*
972 * The version number is not meaningful on a subgroup that needs
973 * a refresh.
974 */
975 if (update_subgroup_needs_refresh(subgrp))
976 continue;
977
978 break;
979 }
980
981 return subgrp;
982 }
983
984 /*
985 * update_subgroup_ready_for_merge
986 *
987 * Returns TRUE if this subgroup is in a state that allows it to be
988 * merged into another subgroup.
989 */
990 static int update_subgroup_ready_for_merge(struct update_subgroup *subgrp)
991 {
992
993 /*
994 * Not ready if there are any encoded packets waiting to be written
995 * out to peers.
996 */
997 if (!bpacket_queue_is_empty(SUBGRP_PKTQ(subgrp)))
998 return 0;
999
1000 /*
1001 * Not ready if there enqueued updates waiting to be encoded.
1002 */
1003 if (!advertise_list_is_empty(subgrp))
1004 return 0;
1005
1006 /*
1007 * Don't attempt to merge a subgroup that needs a refresh. For one,
1008 * we can't determine if the adj_out of such a group matches that of
1009 * another group.
1010 */
1011 if (update_subgroup_needs_refresh(subgrp))
1012 return 0;
1013
1014 return 1;
1015 }
1016
1017 /*
1018 * update_subgrp_can_merge_into
1019 *
1020 * Returns TRUE if the first subgroup can merge into the second
1021 * subgroup.
1022 */
1023 static int update_subgroup_can_merge_into(struct update_subgroup *subgrp,
1024 struct update_subgroup *target)
1025 {
1026
1027 if (subgrp == target)
1028 return 0;
1029
1030 /*
1031 * Both must have processed the BRIB to the same point in order to
1032 * be merged.
1033 */
1034 if (subgrp->version != target->version)
1035 return 0;
1036
1037 if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE)
1038 != CHECK_FLAG(target->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
1039 return 0;
1040
1041 if (subgrp->adj_count != target->adj_count)
1042 return 0;
1043
1044 return update_subgroup_ready_for_merge(target);
1045 }
1046
1047 /*
1048 * update_subgroup_merge
1049 *
1050 * Merge the first subgroup into the second one.
1051 */
1052 static void update_subgroup_merge(struct update_subgroup *subgrp,
1053 struct update_subgroup *target,
1054 const char *reason)
1055 {
1056 struct peer_af *paf;
1057 int result;
1058 int peer_count;
1059
1060 assert(subgrp->adj_count == target->adj_count);
1061
1062 peer_count = subgrp->peer_count;
1063
1064 while (1) {
1065 paf = LIST_FIRST(&subgrp->peers);
1066 if (!paf)
1067 break;
1068
1069 update_subgroup_remove_peer_internal(subgrp, paf);
1070
1071 /*
1072 * Add the peer to the target subgroup, while making sure that
1073 * any currently enqueued packets won't be sent to it. Enqueued
1074 * packets could, for example, result in an unnecessary withdraw
1075 * followed by an advertise.
1076 */
1077 update_subgroup_add_peer(target, paf, 0);
1078 }
1079
1080 SUBGRP_INCR_STAT(target, merge_events);
1081
1082 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1083 zlog_debug("u%" PRIu64 ":s%" PRIu64
1084 " (%d peers) merged into u%" PRIu64 ":s%" PRIu64
1085 ", "
1086 "trigger: %s",
1087 subgrp->update_group->id, subgrp->id, peer_count,
1088 target->update_group->id, target->id,
1089 reason ? reason : "unknown");
1090
1091 result = update_subgroup_check_delete(subgrp);
1092 assert(result);
1093 }
1094
1095 /*
1096 * update_subgroup_check_merge
1097 *
1098 * Merge this subgroup into another subgroup if possible.
1099 *
1100 * Returns TRUE if the subgroup has been merged. The subgroup pointer
1101 * should not be accessed in this case.
1102 */
1103 int update_subgroup_check_merge(struct update_subgroup *subgrp,
1104 const char *reason)
1105 {
1106 struct update_subgroup *target;
1107
1108 if (!update_subgroup_ready_for_merge(subgrp))
1109 return 0;
1110
1111 /*
1112 * Look for a subgroup to merge into.
1113 */
1114 UPDGRP_FOREACH_SUBGRP (subgrp->update_group, target) {
1115 if (update_subgroup_can_merge_into(subgrp, target))
1116 break;
1117 }
1118
1119 if (!target)
1120 return 0;
1121
1122 update_subgroup_merge(subgrp, target, reason);
1123 return 1;
1124 }
1125
1126 /*
1127 * update_subgroup_merge_check_thread_cb
1128 */
1129 static int update_subgroup_merge_check_thread_cb(struct thread *thread)
1130 {
1131 struct update_subgroup *subgrp;
1132
1133 subgrp = THREAD_ARG(thread);
1134
1135 subgrp->t_merge_check = NULL;
1136
1137 update_subgroup_check_merge(subgrp, "triggered merge check");
1138 return 0;
1139 }
1140
1141 /*
1142 * update_subgroup_trigger_merge_check
1143 *
1144 * Triggers a call to update_subgroup_check_merge() on a clean context.
1145 *
1146 * @param force If true, the merge check will be triggered even if the
1147 * subgroup doesn't currently look ready for a merge.
1148 *
1149 * Returns TRUE if a merge check will be performed shortly.
1150 */
1151 int update_subgroup_trigger_merge_check(struct update_subgroup *subgrp,
1152 int force)
1153 {
1154 if (subgrp->t_merge_check)
1155 return 1;
1156
1157 if (!force && !update_subgroup_ready_for_merge(subgrp))
1158 return 0;
1159
1160 subgrp->t_merge_check = NULL;
1161 thread_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb,
1162 subgrp, 0, &subgrp->t_merge_check);
1163
1164 SUBGRP_INCR_STAT(subgrp, merge_checks_triggered);
1165
1166 return 1;
1167 }
1168
1169 /*
1170 * update_subgroup_copy_adj_out
1171 *
1172 * Helper function that clones the adj out (state about advertised
1173 * routes) from one subgroup to another. It assumes that the adj out
1174 * of the target subgroup is empty.
1175 */
1176 static void update_subgroup_copy_adj_out(struct update_subgroup *source,
1177 struct update_subgroup *dest)
1178 {
1179 struct bgp_adj_out *aout, *aout_copy;
1180
1181 SUBGRP_FOREACH_ADJ (source, aout) {
1182 /*
1183 * Copy the adj out.
1184 */
1185 aout_copy =
1186 bgp_adj_out_alloc(dest, aout->rn, aout->addpath_tx_id);
1187 aout_copy->attr =
1188 aout->attr ? bgp_attr_intern(aout->attr) : NULL;
1189 }
1190
1191 dest->scount = source->scount;
1192 }
1193
1194 /*
1195 * update_subgroup_copy_packets
1196 *
1197 * Copy packets after and including the given packet to the subgroup
1198 * 'dest'.
1199 *
1200 * Returns the number of packets copied.
1201 */
1202 static int update_subgroup_copy_packets(struct update_subgroup *dest,
1203 struct bpacket *pkt)
1204 {
1205 int count;
1206
1207 count = 0;
1208 while (pkt && pkt->buffer) {
1209 bpacket_queue_add(SUBGRP_PKTQ(dest), stream_dup(pkt->buffer),
1210 &pkt->arr);
1211 count++;
1212 pkt = bpacket_next(pkt);
1213 }
1214
1215 bpacket_queue_sanity_check(SUBGRP_PKTQ(dest));
1216
1217 return count;
1218 }
1219
1220 static int updgrp_prefix_list_update(struct update_group *updgrp,
1221 const char *name)
1222 {
1223 struct peer *peer;
1224 struct bgp_filter *filter;
1225
1226 peer = UPDGRP_PEER(updgrp);
1227 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1228
1229 if (PREFIX_LIST_OUT_NAME(filter)
1230 && (strcmp(name, PREFIX_LIST_OUT_NAME(filter)) == 0)) {
1231 PREFIX_LIST_OUT(filter) = prefix_list_lookup(
1232 UPDGRP_AFI(updgrp), PREFIX_LIST_OUT_NAME(filter));
1233 return 1;
1234 }
1235 return 0;
1236 }
1237
1238 static int updgrp_filter_list_update(struct update_group *updgrp,
1239 const char *name)
1240 {
1241 struct peer *peer;
1242 struct bgp_filter *filter;
1243
1244 peer = UPDGRP_PEER(updgrp);
1245 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1246
1247 if (FILTER_LIST_OUT_NAME(filter)
1248 && (strcmp(name, FILTER_LIST_OUT_NAME(filter)) == 0)) {
1249 FILTER_LIST_OUT(filter) =
1250 as_list_lookup(FILTER_LIST_OUT_NAME(filter));
1251 return 1;
1252 }
1253 return 0;
1254 }
1255
1256 static int updgrp_distribute_list_update(struct update_group *updgrp,
1257 const char *name)
1258 {
1259 struct peer *peer;
1260 struct bgp_filter *filter;
1261
1262 peer = UPDGRP_PEER(updgrp);
1263 filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
1264
1265 if (DISTRIBUTE_OUT_NAME(filter)
1266 && (strcmp(name, DISTRIBUTE_OUT_NAME(filter)) == 0)) {
1267 DISTRIBUTE_OUT(filter) = access_list_lookup(
1268 UPDGRP_AFI(updgrp), DISTRIBUTE_OUT_NAME(filter));
1269 return 1;
1270 }
1271 return 0;
1272 }
1273
1274 static int updgrp_route_map_update(struct update_group *updgrp,
1275 const char *name, int *def_rmap_changed)
1276 {
1277 struct peer *peer;
1278 struct bgp_filter *filter;
1279 int changed = 0;
1280 afi_t afi;
1281 safi_t safi;
1282
1283 peer = UPDGRP_PEER(updgrp);
1284 afi = UPDGRP_AFI(updgrp);
1285 safi = UPDGRP_SAFI(updgrp);
1286 filter = &peer->filter[afi][safi];
1287
1288 if (ROUTE_MAP_OUT_NAME(filter)
1289 && (strcmp(name, ROUTE_MAP_OUT_NAME(filter)) == 0)) {
1290 ROUTE_MAP_OUT(filter) = route_map_lookup_by_name(name);
1291
1292 changed = 1;
1293 }
1294
1295 if (UNSUPPRESS_MAP_NAME(filter)
1296 && (strcmp(name, UNSUPPRESS_MAP_NAME(filter)) == 0)) {
1297 UNSUPPRESS_MAP(filter) = route_map_lookup_by_name(name);
1298 changed = 1;
1299 }
1300
1301 /* process default-originate route-map */
1302 if (peer->default_rmap[afi][safi].name
1303 && (strcmp(name, peer->default_rmap[afi][safi].name) == 0)) {
1304 peer->default_rmap[afi][safi].map =
1305 route_map_lookup_by_name(name);
1306 if (def_rmap_changed)
1307 *def_rmap_changed = 1;
1308 }
1309 return changed;
1310 }
1311
1312 /*
1313 * hash iteration callback function to process a policy change for an
1314 * update group. Check if the changed policy matches the updgrp's
1315 * outbound route-map or unsuppress-map or default-originate map or
1316 * filter-list or prefix-list or distribute-list.
1317 * Trigger update generation accordingly.
1318 */
1319 static int updgrp_policy_update_walkcb(struct update_group *updgrp, void *arg)
1320 {
1321 struct updwalk_context *ctx = arg;
1322 struct update_subgroup *subgrp;
1323 int changed = 0;
1324 int def_changed = 0;
1325
1326 if (!updgrp || !ctx || !ctx->policy_name)
1327 return UPDWALK_CONTINUE;
1328
1329 switch (ctx->policy_type) {
1330 case BGP_POLICY_ROUTE_MAP:
1331 changed = updgrp_route_map_update(updgrp, ctx->policy_name,
1332 &def_changed);
1333 break;
1334 case BGP_POLICY_FILTER_LIST:
1335 changed = updgrp_filter_list_update(updgrp, ctx->policy_name);
1336 break;
1337 case BGP_POLICY_PREFIX_LIST:
1338 changed = updgrp_prefix_list_update(updgrp, ctx->policy_name);
1339 break;
1340 case BGP_POLICY_DISTRIBUTE_LIST:
1341 changed =
1342 updgrp_distribute_list_update(updgrp, ctx->policy_name);
1343 break;
1344 default:
1345 break;
1346 }
1347
1348 /* If not doing route update, return after updating "config" */
1349 if (!ctx->policy_route_update)
1350 return UPDWALK_CONTINUE;
1351
1352 /* If nothing has changed, return after updating "config" */
1353 if (!changed && !def_changed)
1354 return UPDWALK_CONTINUE;
1355
1356 /*
1357 * If something has changed, at the beginning of a route-map
1358 * modification
1359 * event, mark each subgroup's needs-refresh bit. For one, it signals to
1360 * whoever that the subgroup needs a refresh. Second, it prevents
1361 * premature
1362 * merge of this subgroup with another before a complete (outbound)
1363 * refresh.
1364 */
1365 if (ctx->policy_event_start_flag) {
1366 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
1367 update_subgroup_set_needs_refresh(subgrp, 1);
1368 }
1369 return UPDWALK_CONTINUE;
1370 }
1371
1372 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
1373 if (changed) {
1374 if (bgp_debug_update(NULL, NULL, updgrp, 0))
1375 zlog_debug(
1376 "u%" PRIu64 ":s%" PRIu64
1377 " announcing routes upon policy %s (type %d) change",
1378 updgrp->id, subgrp->id,
1379 ctx->policy_name, ctx->policy_type);
1380 subgroup_announce_route(subgrp);
1381 }
1382 if (def_changed) {
1383 if (bgp_debug_update(NULL, NULL, updgrp, 0))
1384 zlog_debug(
1385 "u%" PRIu64 ":s%" PRIu64
1386 " announcing default upon default routemap %s change",
1387 updgrp->id, subgrp->id,
1388 ctx->policy_name);
1389 subgroup_default_originate(subgrp, 0);
1390 }
1391 update_subgroup_set_needs_refresh(subgrp, 0);
1392 }
1393 return UPDWALK_CONTINUE;
1394 }
1395
1396 static int update_group_walkcb(struct hash_backet *backet, void *arg)
1397 {
1398 struct update_group *updgrp = backet->data;
1399 struct updwalk_context *wctx = arg;
1400 int ret = (*wctx->cb)(updgrp, wctx->context);
1401 return ret;
1402 }
1403
1404 static int update_group_periodic_merge_walkcb(struct update_group *updgrp,
1405 void *arg)
1406 {
1407 struct update_subgroup *subgrp;
1408 struct update_subgroup *tmp_subgrp;
1409 const char *reason = arg;
1410
1411 UPDGRP_FOREACH_SUBGRP_SAFE (updgrp, subgrp, tmp_subgrp)
1412 update_subgroup_check_merge(subgrp, reason);
1413 return UPDWALK_CONTINUE;
1414 }
1415
1416 /********************
1417 * PUBLIC FUNCTIONS
1418 ********************/
1419
1420 /*
1421 * trigger function when a policy (route-map/filter-list/prefix-list/
1422 * distribute-list etc.) content changes. Go through all the
1423 * update groups and process the change.
1424 *
1425 * bgp: the bgp instance
1426 * ptype: the type of policy that got modified, see bgpd.h
1427 * pname: name of the policy
1428 * route_update: flag to control if an automatic update generation should
1429 * occur
1430 * start_event: flag that indicates if it's the beginning of the change.
1431 * Esp. when the user is changing the content interactively
1432 * over multiple statements. Useful to set dirty flag on
1433 * update groups.
1434 */
1435 void update_group_policy_update(struct bgp *bgp, bgp_policy_type_e ptype,
1436 const char *pname, int route_update,
1437 int start_event)
1438 {
1439 struct updwalk_context ctx;
1440
1441 memset(&ctx, 0, sizeof(ctx));
1442 ctx.policy_type = ptype;
1443 ctx.policy_name = pname;
1444 ctx.policy_route_update = route_update;
1445 ctx.policy_event_start_flag = start_event;
1446 ctx.flags = 0;
1447
1448 update_group_walk(bgp, updgrp_policy_update_walkcb, &ctx);
1449 }
1450
1451 /*
1452 * update_subgroup_split_peer
1453 *
1454 * Ensure that the given peer is in a subgroup of its own in the
1455 * specified update group.
1456 */
1457 void update_subgroup_split_peer(struct peer_af *paf,
1458 struct update_group *updgrp)
1459 {
1460 struct update_subgroup *old_subgrp, *subgrp;
1461 uint64_t old_id;
1462
1463
1464 old_subgrp = paf->subgroup;
1465
1466 if (!updgrp)
1467 updgrp = old_subgrp->update_group;
1468
1469 /*
1470 * If the peer is alone in its subgroup, reuse the existing
1471 * subgroup.
1472 */
1473 if (old_subgrp->peer_count == 1) {
1474 if (updgrp == old_subgrp->update_group)
1475 return;
1476
1477 subgrp = old_subgrp;
1478 old_id = old_subgrp->update_group->id;
1479
1480 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
1481 UPDGRP_PEER_DBG_DIS(old_subgrp->update_group);
1482 }
1483
1484 update_group_remove_subgroup(old_subgrp->update_group,
1485 old_subgrp);
1486 update_group_add_subgroup(updgrp, subgrp);
1487
1488 if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
1489 UPDGRP_PEER_DBG_EN(updgrp);
1490 }
1491 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1492 zlog_debug("u%" PRIu64 ":s%" PRIu64
1493 " peer %s moved to u%" PRIu64 ":s%" PRIu64,
1494 old_id, subgrp->id, paf->peer->host,
1495 updgrp->id, subgrp->id);
1496
1497 /*
1498 * The state of the subgroup (adj_out, advs, packet queue etc)
1499 * is consistent internally, but may not be identical to other
1500 * subgroups in the new update group even if the version number
1501 * matches up. Make sure a full refresh is done before the
1502 * subgroup is merged with another.
1503 */
1504 update_subgroup_set_needs_refresh(subgrp, 1);
1505
1506 SUBGRP_INCR_STAT(subgrp, updgrp_switch_events);
1507 return;
1508 }
1509
1510 /*
1511 * Create a new subgroup under the specified update group, and copy
1512 * over relevant state to it.
1513 */
1514 subgrp = update_subgroup_create(updgrp);
1515 update_subgroup_inherit_info(subgrp, old_subgrp);
1516
1517 subgrp->split_from.update_group_id = old_subgrp->update_group->id;
1518 subgrp->split_from.subgroup_id = old_subgrp->id;
1519
1520 /*
1521 * Copy out relevant state from the old subgroup.
1522 */
1523 update_subgroup_copy_adj_out(paf->subgroup, subgrp);
1524 update_subgroup_copy_packets(subgrp, paf->next_pkt_to_send);
1525
1526 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1527 zlog_debug("u%" PRIu64 ":s%" PRIu64
1528 " peer %s split and moved into u%" PRIu64
1529 ":s%" PRIu64,
1530 paf->subgroup->update_group->id, paf->subgroup->id,
1531 paf->peer->host, updgrp->id, subgrp->id);
1532
1533 SUBGRP_INCR_STAT(paf->subgroup, split_events);
1534
1535 /*
1536 * Since queued advs were left behind, this new subgroup needs a
1537 * refresh.
1538 */
1539 update_subgroup_set_needs_refresh(subgrp, 1);
1540
1541 /*
1542 * Remove peer from old subgroup, and add it to the new one.
1543 */
1544 update_subgroup_remove_peer(paf->subgroup, paf);
1545
1546 update_subgroup_add_peer(subgrp, paf, 1);
1547 }
1548
1549 void update_bgp_group_init(struct bgp *bgp)
1550 {
1551 int afid;
1552
1553 AF_FOREACH (afid)
1554 bgp->update_groups[afid] =
1555 hash_create(updgrp_hash_key_make, updgrp_hash_cmp,
1556 "BGP Update Group Hash");
1557 }
1558
1559 void update_bgp_group_free(struct bgp *bgp)
1560 {
1561 int afid;
1562
1563 AF_FOREACH (afid) {
1564 if (bgp->update_groups[afid]) {
1565 hash_free(bgp->update_groups[afid]);
1566 bgp->update_groups[afid] = NULL;
1567 }
1568 }
1569 }
1570
1571 void update_group_show(struct bgp *bgp, afi_t afi, safi_t safi, struct vty *vty,
1572 uint64_t subgrp_id)
1573 {
1574 struct updwalk_context ctx;
1575 memset(&ctx, 0, sizeof(ctx));
1576 ctx.vty = vty;
1577 ctx.subgrp_id = subgrp_id;
1578
1579 update_group_af_walk(bgp, afi, safi, update_group_show_walkcb, &ctx);
1580 }
1581
1582 /*
1583 * update_group_show_stats
1584 *
1585 * Show global statistics about update groups.
1586 */
1587 void update_group_show_stats(struct bgp *bgp, struct vty *vty)
1588 {
1589 vty_out(vty, "Update groups created: %u\n",
1590 bgp->update_group_stats.updgrps_created);
1591 vty_out(vty, "Update groups deleted: %u\n",
1592 bgp->update_group_stats.updgrps_deleted);
1593 vty_out(vty, "Update subgroups created: %u\n",
1594 bgp->update_group_stats.subgrps_created);
1595 vty_out(vty, "Update subgroups deleted: %u\n",
1596 bgp->update_group_stats.subgrps_deleted);
1597 vty_out(vty, "Join events: %u\n", bgp->update_group_stats.join_events);
1598 vty_out(vty, "Prune events: %u\n",
1599 bgp->update_group_stats.prune_events);
1600 vty_out(vty, "Merge events: %u\n",
1601 bgp->update_group_stats.merge_events);
1602 vty_out(vty, "Split events: %u\n",
1603 bgp->update_group_stats.split_events);
1604 vty_out(vty, "Update group switch events: %u\n",
1605 bgp->update_group_stats.updgrp_switch_events);
1606 vty_out(vty, "Peer route refreshes combined: %u\n",
1607 bgp->update_group_stats.peer_refreshes_combined);
1608 vty_out(vty, "Merge checks triggered: %u\n",
1609 bgp->update_group_stats.merge_checks_triggered);
1610 }
1611
1612 /*
1613 * update_group_adjust_peer
1614 */
1615 void update_group_adjust_peer(struct peer_af *paf)
1616 {
1617 struct update_group *updgrp;
1618 struct update_subgroup *subgrp, *old_subgrp;
1619 struct peer *peer;
1620
1621 if (!paf)
1622 return;
1623
1624 peer = PAF_PEER(paf);
1625 if (!peer_established(peer)) {
1626 return;
1627 }
1628
1629 if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE)) {
1630 return;
1631 }
1632
1633 if (!peer->afc_nego[paf->afi][paf->safi]) {
1634 return;
1635 }
1636
1637 updgrp = update_group_find(paf);
1638 if (!updgrp) {
1639 updgrp = update_group_create(paf);
1640 if (!updgrp) {
1641 flog_err(EC_BGP_UPDGRP_CREATE,
1642 "couldn't create update group for peer %s",
1643 paf->peer->host);
1644 return;
1645 }
1646 }
1647
1648 old_subgrp = paf->subgroup;
1649
1650 if (old_subgrp) {
1651
1652 /*
1653 * If the update group of the peer is unchanged, the peer can
1654 * stay
1655 * in its existing subgroup and we're done.
1656 */
1657 if (old_subgrp->update_group == updgrp)
1658 return;
1659
1660 /*
1661 * The peer is switching between update groups. Put it in its
1662 * own subgroup under the new update group.
1663 */
1664 update_subgroup_split_peer(paf, updgrp);
1665 return;
1666 }
1667
1668 subgrp = update_subgroup_find(updgrp, paf);
1669 if (!subgrp) {
1670 subgrp = update_subgroup_create(updgrp);
1671 if (!subgrp)
1672 return;
1673 }
1674
1675 update_subgroup_add_peer(subgrp, paf, 1);
1676 if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
1677 zlog_debug("u%" PRIu64 ":s%" PRIu64 " add peer %s", updgrp->id,
1678 subgrp->id, paf->peer->host);
1679
1680 return;
1681 }
1682
1683 int update_group_adjust_soloness(struct peer *peer, int set)
1684 {
1685 struct peer_group *group;
1686 struct listnode *node, *nnode;
1687
1688 if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
1689 peer_lonesoul_or_not(peer, set);
1690 if (peer->status == Established)
1691 bgp_announce_route_all(peer);
1692 } else {
1693 group = peer->group;
1694 for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
1695 peer_lonesoul_or_not(peer, set);
1696 if (peer->status == Established)
1697 bgp_announce_route_all(peer);
1698 }
1699 }
1700 return 0;
1701 }
1702
1703 /*
1704 * update_subgroup_rib
1705 */
1706 struct bgp_table *update_subgroup_rib(struct update_subgroup *subgrp)
1707 {
1708 struct bgp *bgp;
1709
1710 bgp = SUBGRP_INST(subgrp);
1711 if (!bgp)
1712 return NULL;
1713
1714 return bgp->rib[SUBGRP_AFI(subgrp)][SUBGRP_SAFI(subgrp)];
1715 }
1716
1717 void update_group_af_walk(struct bgp *bgp, afi_t afi, safi_t safi,
1718 updgrp_walkcb cb, void *ctx)
1719 {
1720 struct updwalk_context wctx;
1721 int afid;
1722
1723 if (!bgp)
1724 return;
1725 afid = afindex(afi, safi);
1726 if (afid >= BGP_AF_MAX)
1727 return;
1728
1729 memset(&wctx, 0, sizeof(wctx));
1730 wctx.cb = cb;
1731 wctx.context = ctx;
1732
1733 if (bgp->update_groups[afid])
1734 hash_walk(bgp->update_groups[afid], update_group_walkcb, &wctx);
1735 }
1736
1737 void update_group_walk(struct bgp *bgp, updgrp_walkcb cb, void *ctx)
1738 {
1739 afi_t afi;
1740 safi_t safi;
1741
1742 FOREACH_AFI_SAFI (afi, safi) {
1743 update_group_af_walk(bgp, afi, safi, cb, ctx);
1744 }
1745 }
1746
1747 void update_group_periodic_merge(struct bgp *bgp)
1748 {
1749 char reason[] = "periodic merge check";
1750
1751 update_group_walk(bgp, update_group_periodic_merge_walkcb,
1752 (void *)reason);
1753 }
1754
1755 static int
1756 update_group_default_originate_route_map_walkcb(struct update_group *updgrp,
1757 void *arg)
1758 {
1759 struct update_subgroup *subgrp;
1760 struct peer *peer;
1761 afi_t afi;
1762 safi_t safi;
1763
1764 UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
1765 peer = SUBGRP_PEER(subgrp);
1766 afi = SUBGRP_AFI(subgrp);
1767 safi = SUBGRP_SAFI(subgrp);
1768
1769 if (peer->default_rmap[afi][safi].name) {
1770 subgroup_default_originate(subgrp, 0);
1771 }
1772 }
1773
1774 return UPDWALK_CONTINUE;
1775 }
1776
1777 int update_group_refresh_default_originate_route_map(struct thread *thread)
1778 {
1779 struct bgp *bgp;
1780 char reason[] = "refresh default-originate route-map";
1781
1782 bgp = THREAD_ARG(thread);
1783 update_group_walk(bgp, update_group_default_originate_route_map_walkcb,
1784 reason);
1785 THREAD_TIMER_OFF(bgp->t_rmap_def_originate_eval);
1786 bgp_unlock(bgp);
1787
1788 return (0);
1789 }
1790
1791 /*
1792 * peer_af_announce_route
1793 *
1794 * Refreshes routes out to a peer_af immediately.
1795 *
1796 * If the combine parameter is TRUE, then this function will try to
1797 * gather other peers in the subgroup for which a route announcement
1798 * is pending and efficently announce routes to all of them.
1799 *
1800 * For now, the 'combine' option has an effect only if all peers in
1801 * the subgroup have a route announcement pending.
1802 */
1803 void peer_af_announce_route(struct peer_af *paf, int combine)
1804 {
1805 struct update_subgroup *subgrp;
1806 struct peer_af *cur_paf;
1807 int all_pending;
1808
1809 subgrp = paf->subgroup;
1810 all_pending = 0;
1811
1812 if (combine) {
1813 /*
1814 * If there are other peers in the old subgroup that also need
1815 * routes to be announced, pull them into the peer's new
1816 * subgroup.
1817 * Combine route announcement with other peers if possible.
1818 *
1819 * For now, we combine only if all peers in the subgroup have an
1820 * announcement pending.
1821 */
1822 all_pending = 1;
1823
1824 SUBGRP_FOREACH_PEER (subgrp, cur_paf) {
1825 if (cur_paf == paf)
1826 continue;
1827
1828 if (cur_paf->t_announce_route)
1829 continue;
1830
1831 all_pending = 0;
1832 break;
1833 }
1834 }
1835 /*
1836 * Announce to the peer alone if we were not asked to combine peers,
1837 * or if some peers don't have a route annoucement pending.
1838 */
1839 if (!combine || !all_pending) {
1840 update_subgroup_split_peer(paf, NULL);
1841 if (!paf->subgroup)
1842 return;
1843
1844 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
1845 zlog_debug("u%" PRIu64 ":s%" PRIu64
1846 " %s announcing routes",
1847 subgrp->update_group->id, subgrp->id,
1848 paf->peer->host);
1849
1850 subgroup_announce_route(paf->subgroup);
1851 return;
1852 }
1853
1854 /*
1855 * We will announce routes the entire subgroup.
1856 *
1857 * First stop refresh timers on all the other peers.
1858 */
1859 SUBGRP_FOREACH_PEER (subgrp, cur_paf) {
1860 if (cur_paf == paf)
1861 continue;
1862
1863 bgp_stop_announce_route_timer(cur_paf);
1864 }
1865
1866 if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
1867 zlog_debug("u%" PRIu64 ":s%" PRIu64
1868 " announcing routes to %s, combined into %d peers",
1869 subgrp->update_group->id, subgrp->id,
1870 paf->peer->host, subgrp->peer_count);
1871
1872 subgroup_announce_route(subgrp);
1873
1874 SUBGRP_INCR_STAT_BY(subgrp, peer_refreshes_combined,
1875 subgrp->peer_count - 1);
1876 }
1877
1878 void subgroup_trigger_write(struct update_subgroup *subgrp)
1879 {
1880 struct peer_af *paf;
1881
1882 /*
1883 * For each peer in the subgroup, schedule a job to pull packets from
1884 * the subgroup output queue into their own output queue. This action
1885 * will trigger a write job on the I/O thread.
1886 */
1887 SUBGRP_FOREACH_PEER (subgrp, paf)
1888 if (paf->peer->status == Established)
1889 thread_add_timer_msec(
1890 bm->master, bgp_generate_updgrp_packets,
1891 paf->peer, 0,
1892 &paf->peer->t_generate_updgrp_packets);
1893 }
1894
1895 int update_group_clear_update_dbg(struct update_group *updgrp, void *arg)
1896 {
1897 UPDGRP_PEER_DBG_OFF(updgrp);
1898 return UPDWALK_CONTINUE;
1899 }
1900
1901 /* Return true if we should addpath encode NLRI to this peer */
1902 int bgp_addpath_encode_tx(struct peer *peer, afi_t afi, safi_t safi)
1903 {
1904 return (CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ADDPATH_AF_TX_ADV)
1905 && CHECK_FLAG(peer->af_cap[afi][safi],
1906 PEER_CAP_ADDPATH_AF_RX_RCV));
1907 }