]>
Commit | Line | Data |
---|---|---|
3f9c7369 DS |
1 | /** |
2 | * bgp_updgrp.c: BGP update group structures | |
3 | * | |
4 | * @copyright Copyright (C) 2014 Cumulus Networks, Inc. | |
5 | * | |
6 | * @author Avneesh Sachdev <avneesh@sproute.net> | |
7 | * @author Rajesh Varadarajan <rajesh@sproute.net> | |
8 | * @author Pradosh Mohapatra <pradosh@sproute.net> | |
9 | * | |
10 | * This file is part of GNU Zebra. | |
11 | * | |
12 | * GNU Zebra is free software; you can redistribute it and/or modify it | |
13 | * under the terms of the GNU General Public License as published by the | |
14 | * Free Software Foundation; either version 2, or (at your option) any | |
15 | * later version. | |
16 | * | |
17 | * GNU Zebra is distributed in the hope that it will be useful, but | |
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
20 | * General Public License for more details. | |
21 | * | |
896014f4 DL |
22 | * You should have received a copy of the GNU General Public License along |
23 | * with this program; see the file COPYING; if not, write to the Free Software | |
24 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
3f9c7369 DS |
25 | */ |
26 | ||
27 | #include <zebra.h> | |
28 | ||
29 | #include "prefix.h" | |
30 | #include "thread.h" | |
31 | #include "buffer.h" | |
32 | #include "stream.h" | |
33 | #include "command.h" | |
34 | #include "sockunion.h" | |
35 | #include "network.h" | |
36 | #include "memory.h" | |
37 | #include "filter.h" | |
38 | #include "routemap.h" | |
3f9c7369 DS |
39 | #include "log.h" |
40 | #include "plist.h" | |
41 | #include "linklist.h" | |
42 | #include "workqueue.h" | |
43 | #include "hash.h" | |
44 | #include "jhash.h" | |
45 | #include "queue.h" | |
46 | ||
47 | #include "bgpd/bgpd.h" | |
48 | #include "bgpd/bgp_table.h" | |
49 | #include "bgpd/bgp_debug.h" | |
14454c9f | 50 | #include "bgpd/bgp_errors.h" |
3f9c7369 DS |
51 | #include "bgpd/bgp_fsm.h" |
52 | #include "bgpd/bgp_advertise.h" | |
53 | #include "bgpd/bgp_packet.h" | |
54 | #include "bgpd/bgp_updgrp.h" | |
55 | #include "bgpd/bgp_route.h" | |
56 | #include "bgpd/bgp_filter.h" | |
2fc102e1 | 57 | #include "bgpd/bgp_io.h" |
3f9c7369 DS |
58 | |
59 | /******************** | |
60 | * PRIVATE FUNCTIONS | |
61 | ********************/ | |
62 | ||
63 | /** | |
64 | * assign a unique ID to update group and subgroup. Mostly for display/ | |
65 | * debugging purposes. It's a 64-bit space - used leisurely without a | |
66 | * worry about its wrapping and about filling gaps. While at it, timestamp | |
67 | * the creation. | |
68 | */ | |
d62a17ae | 69 | static void update_group_checkin(struct update_group *updgrp) |
3f9c7369 | 70 | { |
d62a17ae | 71 | updgrp->id = ++bm->updgrp_idspace; |
72 | updgrp->uptime = bgp_clock(); | |
3f9c7369 DS |
73 | } |
74 | ||
d62a17ae | 75 | static void update_subgroup_checkin(struct update_subgroup *subgrp, |
76 | struct update_group *updgrp) | |
3f9c7369 | 77 | { |
d62a17ae | 78 | subgrp->id = ++bm->subgrp_idspace; |
79 | subgrp->uptime = bgp_clock(); | |
3f9c7369 DS |
80 | } |
81 | ||
ef56aee4 DA |
82 | static void sync_init(struct update_subgroup *subgrp, |
83 | struct update_group *updgrp) | |
3f9c7369 | 84 | { |
ef56aee4 DA |
85 | struct peer *peer = UPDGRP_PEER(updgrp); |
86 | ||
d62a17ae | 87 | subgrp->sync = |
88 | XCALLOC(MTYPE_BGP_SYNCHRONISE, sizeof(struct bgp_synchronize)); | |
a274fef8 DL |
89 | bgp_adv_fifo_init(&subgrp->sync->update); |
90 | bgp_adv_fifo_init(&subgrp->sync->withdraw); | |
91 | bgp_adv_fifo_init(&subgrp->sync->withdraw_low); | |
996c9314 LB |
92 | subgrp->hash = |
93 | hash_create(baa_hash_key, baa_hash_cmp, "BGP SubGroup Hash"); | |
d62a17ae | 94 | |
95 | /* We use a larger buffer for subgrp->work in the event that: | |
96 | * - We RX a BGP_UPDATE where the attributes alone are just | |
ef56aee4 | 97 | * under 4096 or 65535 (if Extended Message capability negotiated). |
d62a17ae | 98 | * - The user configures an outbound route-map that does many as-path |
99 | * prepends or adds many communities. At most they can have | |
100 | * CMD_ARGC_MAX | |
101 | * args in a route-map so there is a finite limit on how large they | |
102 | * can | |
103 | * make the attributes. | |
104 | * | |
105 | * Having a buffer with BGP_MAX_PACKET_SIZE_OVERFLOW allows us to avoid | |
106 | * bounds | |
107 | * checking for every single attribute as we construct an UPDATE. | |
108 | */ | |
ef56aee4 DA |
109 | subgrp->work = stream_new(peer->max_packet_size |
110 | + BGP_MAX_PACKET_SIZE_OVERFLOW); | |
111 | subgrp->scratch = stream_new(peer->max_packet_size); | |
3f9c7369 DS |
112 | } |
113 | ||
d62a17ae | 114 | static void sync_delete(struct update_subgroup *subgrp) |
3f9c7369 | 115 | { |
0a22ddfb | 116 | XFREE(MTYPE_BGP_SYNCHRONISE, subgrp->sync); |
d62a17ae | 117 | if (subgrp->hash) |
118 | hash_free(subgrp->hash); | |
119 | subgrp->hash = NULL; | |
120 | if (subgrp->work) | |
121 | stream_free(subgrp->work); | |
122 | subgrp->work = NULL; | |
123 | if (subgrp->scratch) | |
124 | stream_free(subgrp->scratch); | |
125 | subgrp->scratch = NULL; | |
3f9c7369 DS |
126 | } |
127 | ||
128 | /** | |
129 | * conf_copy | |
130 | * | |
131 | * copy only those fields that are relevant to update group match | |
132 | */ | |
d62a17ae | 133 | static void conf_copy(struct peer *dst, struct peer *src, afi_t afi, |
134 | safi_t safi) | |
3f9c7369 | 135 | { |
d62a17ae | 136 | struct bgp_filter *srcfilter; |
137 | struct bgp_filter *dstfilter; | |
138 | ||
139 | srcfilter = &src->filter[afi][safi]; | |
140 | dstfilter = &dst->filter[afi][safi]; | |
141 | ||
142 | dst->bgp = src->bgp; | |
143 | dst->sort = src->sort; | |
144 | dst->as = src->as; | |
145 | dst->v_routeadv = src->v_routeadv; | |
146 | dst->flags = src->flags; | |
147 | dst->af_flags[afi][safi] = src->af_flags[afi][safi]; | |
fde246e8 | 148 | dst->pmax_out[afi][safi] = src->pmax_out[afi][safi]; |
ef56aee4 | 149 | dst->max_packet_size = src->max_packet_size; |
0a22ddfb | 150 | XFREE(MTYPE_BGP_PEER_HOST, dst->host); |
d62a17ae | 151 | |
152 | dst->host = XSTRDUP(MTYPE_BGP_PEER_HOST, src->host); | |
153 | dst->cap = src->cap; | |
154 | dst->af_cap[afi][safi] = src->af_cap[afi][safi]; | |
155 | dst->afc_nego[afi][safi] = src->afc_nego[afi][safi]; | |
156 | dst->orf_plist[afi][safi] = src->orf_plist[afi][safi]; | |
dcc68b5e | 157 | dst->addpath_type[afi][safi] = src->addpath_type[afi][safi]; |
d62a17ae | 158 | dst->local_as = src->local_as; |
159 | dst->change_local_as = src->change_local_as; | |
160 | dst->shared_network = src->shared_network; | |
161 | memcpy(&(dst->nexthop), &(src->nexthop), sizeof(struct bgp_nexthop)); | |
162 | ||
163 | dst->group = src->group; | |
164 | ||
165 | if (src->default_rmap[afi][safi].name) { | |
166 | dst->default_rmap[afi][safi].name = | |
167 | XSTRDUP(MTYPE_ROUTE_MAP_NAME, | |
168 | src->default_rmap[afi][safi].name); | |
169 | dst->default_rmap[afi][safi].map = | |
170 | src->default_rmap[afi][safi].map; | |
171 | } | |
172 | ||
173 | if (DISTRIBUTE_OUT_NAME(srcfilter)) { | |
174 | DISTRIBUTE_OUT_NAME(dstfilter) = XSTRDUP( | |
175 | MTYPE_BGP_FILTER_NAME, DISTRIBUTE_OUT_NAME(srcfilter)); | |
176 | DISTRIBUTE_OUT(dstfilter) = DISTRIBUTE_OUT(srcfilter); | |
177 | } | |
178 | ||
179 | if (PREFIX_LIST_OUT_NAME(srcfilter)) { | |
180 | PREFIX_LIST_OUT_NAME(dstfilter) = XSTRDUP( | |
181 | MTYPE_BGP_FILTER_NAME, PREFIX_LIST_OUT_NAME(srcfilter)); | |
182 | PREFIX_LIST_OUT(dstfilter) = PREFIX_LIST_OUT(srcfilter); | |
183 | } | |
184 | ||
185 | if (FILTER_LIST_OUT_NAME(srcfilter)) { | |
186 | FILTER_LIST_OUT_NAME(dstfilter) = XSTRDUP( | |
187 | MTYPE_BGP_FILTER_NAME, FILTER_LIST_OUT_NAME(srcfilter)); | |
188 | FILTER_LIST_OUT(dstfilter) = FILTER_LIST_OUT(srcfilter); | |
189 | } | |
190 | ||
191 | if (ROUTE_MAP_OUT_NAME(srcfilter)) { | |
192 | ROUTE_MAP_OUT_NAME(dstfilter) = XSTRDUP( | |
193 | MTYPE_BGP_FILTER_NAME, ROUTE_MAP_OUT_NAME(srcfilter)); | |
194 | ROUTE_MAP_OUT(dstfilter) = ROUTE_MAP_OUT(srcfilter); | |
195 | } | |
196 | ||
197 | if (UNSUPPRESS_MAP_NAME(srcfilter)) { | |
198 | UNSUPPRESS_MAP_NAME(dstfilter) = XSTRDUP( | |
199 | MTYPE_BGP_FILTER_NAME, UNSUPPRESS_MAP_NAME(srcfilter)); | |
200 | UNSUPPRESS_MAP(dstfilter) = UNSUPPRESS_MAP(srcfilter); | |
201 | } | |
7f7940e6 MK |
202 | |
203 | if (ADVERTISE_MAP_NAME(srcfilter)) { | |
204 | ADVERTISE_MAP_NAME(dstfilter) = XSTRDUP( | |
205 | MTYPE_BGP_FILTER_NAME, ADVERTISE_MAP_NAME(srcfilter)); | |
206 | ADVERTISE_MAP(dstfilter) = ADVERTISE_MAP(srcfilter); | |
207 | ADVERTISE_CONDITION(dstfilter) = ADVERTISE_CONDITION(srcfilter); | |
208 | } | |
209 | ||
210 | if (CONDITION_MAP_NAME(srcfilter)) { | |
211 | CONDITION_MAP_NAME(dstfilter) = XSTRDUP( | |
212 | MTYPE_BGP_FILTER_NAME, CONDITION_MAP_NAME(srcfilter)); | |
213 | CONDITION_MAP(dstfilter) = CONDITION_MAP(srcfilter); | |
214 | } | |
3f9c7369 DS |
215 | } |
216 | ||
217 | /** | |
6e919709 | 218 | * since we did a bunch of XSTRDUP's in conf_copy, time to free them up |
3f9c7369 | 219 | */ |
d62a17ae | 220 | static void conf_release(struct peer *src, afi_t afi, safi_t safi) |
3f9c7369 | 221 | { |
d62a17ae | 222 | struct bgp_filter *srcfilter; |
3f9c7369 | 223 | |
d62a17ae | 224 | srcfilter = &src->filter[afi][safi]; |
3f9c7369 | 225 | |
0a22ddfb | 226 | XFREE(MTYPE_ROUTE_MAP_NAME, src->default_rmap[afi][safi].name); |
3f9c7369 | 227 | |
0a22ddfb | 228 | XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->dlist[FILTER_OUT].name); |
3f9c7369 | 229 | |
0a22ddfb | 230 | XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->plist[FILTER_OUT].name); |
3f9c7369 | 231 | |
0a22ddfb | 232 | XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->aslist[FILTER_OUT].name); |
3f9c7369 | 233 | |
0a22ddfb | 234 | XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->map[RMAP_OUT].name); |
3f9c7369 | 235 | |
0a22ddfb | 236 | XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->usmap.name); |
495f0b13 | 237 | |
7f7940e6 MK |
238 | XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->advmap.aname); |
239 | ||
240 | XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->advmap.cname); | |
241 | ||
0a22ddfb | 242 | XFREE(MTYPE_BGP_PEER_HOST, src->host); |
3f9c7369 DS |
243 | } |
244 | ||
d62a17ae | 245 | static void peer2_updgrp_copy(struct update_group *updgrp, struct peer_af *paf) |
3f9c7369 | 246 | { |
d62a17ae | 247 | struct peer *src; |
248 | struct peer *dst; | |
3f9c7369 | 249 | |
d62a17ae | 250 | if (!updgrp || !paf) |
251 | return; | |
3f9c7369 | 252 | |
d62a17ae | 253 | src = paf->peer; |
254 | dst = updgrp->conf; | |
255 | if (!src || !dst) | |
256 | return; | |
3f9c7369 | 257 | |
d62a17ae | 258 | updgrp->afi = paf->afi; |
259 | updgrp->safi = paf->safi; | |
260 | updgrp->afid = paf->afid; | |
261 | updgrp->bgp = src->bgp; | |
3f9c7369 | 262 | |
d62a17ae | 263 | conf_copy(dst, src, paf->afi, paf->safi); |
3f9c7369 DS |
264 | } |
265 | ||
266 | /** | |
267 | * auxiliary functions to maintain the hash table. | |
268 | * - updgrp_hash_alloc - to create a new entry, passed to hash_get | |
269 | * - updgrp_hash_key_make - makes the key for update group search | |
270 | * - updgrp_hash_cmp - compare two update groups. | |
271 | */ | |
d62a17ae | 272 | static void *updgrp_hash_alloc(void *p) |
3f9c7369 | 273 | { |
d62a17ae | 274 | struct update_group *updgrp; |
275 | const struct update_group *in; | |
276 | ||
277 | in = (const struct update_group *)p; | |
278 | updgrp = XCALLOC(MTYPE_BGP_UPDGRP, sizeof(struct update_group)); | |
279 | memcpy(updgrp, in, sizeof(struct update_group)); | |
280 | updgrp->conf = XCALLOC(MTYPE_BGP_PEER, sizeof(struct peer)); | |
281 | conf_copy(updgrp->conf, in->conf, in->afi, in->safi); | |
282 | return updgrp; | |
3f9c7369 DS |
283 | } |
284 | ||
285 | /** | |
286 | * The hash value for a peer is computed from the following variables: | |
287 | * v = f( | |
288 | * 1. IBGP (1) or EBGP (2) | |
289 | * 2. FLAGS based on configuration: | |
290 | * LOCAL_AS_NO_PREPEND | |
291 | * LOCAL_AS_REPLACE_AS | |
292 | * 3. AF_FLAGS based on configuration: | |
293 | * Refer to definition in bgp_updgrp.h | |
294 | * 4. (AF-independent) Capability flags: | |
295 | * AS4_RCV capability | |
296 | * 5. (AF-dependent) Capability flags: | |
297 | * ORF_PREFIX_SM_RCV (peer can send prefix ORF) | |
298 | * 6. MRAI | |
299 | * 7. peer-group name | |
300 | * 8. Outbound route-map name (neighbor route-map <> out) | |
301 | * 9. Outbound distribute-list name (neighbor distribute-list <> out) | |
302 | * 10. Outbound prefix-list name (neighbor prefix-list <> out) | |
303 | * 11. Outbound as-list name (neighbor filter-list <> out) | |
304 | * 12. Unsuppress map name (neighbor unsuppress-map <>) | |
305 | * 13. default rmap name (neighbor default-originate route-map <>) | |
306 | * 14. encoding both global and link-local nexthop? | |
307 | * 15. If peer is configured to be a lonesoul, peer ip address | |
308 | * 16. Local-as should match, if configured. | |
309 | * ) | |
310 | */ | |
d8b87afe | 311 | static unsigned int updgrp_hash_key_make(const void *p) |
3f9c7369 | 312 | { |
d62a17ae | 313 | const struct update_group *updgrp; |
314 | const struct peer *peer; | |
315 | const struct bgp_filter *filter; | |
316 | uint32_t flags; | |
317 | uint32_t key; | |
318 | afi_t afi; | |
319 | safi_t safi; | |
3f9c7369 DS |
320 | |
321 | #define SEED1 999331 | |
322 | #define SEED2 2147483647 | |
323 | ||
d62a17ae | 324 | updgrp = p; |
325 | peer = updgrp->conf; | |
326 | afi = updgrp->afi; | |
327 | safi = updgrp->safi; | |
328 | flags = peer->af_flags[afi][safi]; | |
329 | filter = &peer->filter[afi][safi]; | |
330 | ||
331 | key = 0; | |
332 | ||
333 | key = jhash_1word(peer->sort, key); /* EBGP or IBGP */ | |
334 | key = jhash_1word((peer->flags & PEER_UPDGRP_FLAGS), key); | |
335 | key = jhash_1word((flags & PEER_UPDGRP_AF_FLAGS), key); | |
dcc68b5e | 336 | key = jhash_1word((uint32_t)peer->addpath_type[afi][safi], key); |
d62a17ae | 337 | key = jhash_1word((peer->cap & PEER_UPDGRP_CAP_FLAGS), key); |
338 | key = jhash_1word((peer->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS), | |
339 | key); | |
340 | key = jhash_1word(peer->v_routeadv, key); | |
341 | key = jhash_1word(peer->change_local_as, key); | |
cecd7358 | 342 | key = jhash_1word(peer->max_packet_size, key); |
d62a17ae | 343 | |
344 | if (peer->group) | |
345 | key = jhash_1word(jhash(peer->group->name, | |
346 | strlen(peer->group->name), SEED1), | |
347 | key); | |
348 | ||
349 | if (filter->map[RMAP_OUT].name) | |
350 | key = jhash_1word(jhash(filter->map[RMAP_OUT].name, | |
351 | strlen(filter->map[RMAP_OUT].name), | |
352 | SEED1), | |
353 | key); | |
354 | ||
355 | if (filter->dlist[FILTER_OUT].name) | |
356 | key = jhash_1word(jhash(filter->dlist[FILTER_OUT].name, | |
357 | strlen(filter->dlist[FILTER_OUT].name), | |
358 | SEED1), | |
359 | key); | |
360 | ||
361 | if (filter->plist[FILTER_OUT].name) | |
362 | key = jhash_1word(jhash(filter->plist[FILTER_OUT].name, | |
363 | strlen(filter->plist[FILTER_OUT].name), | |
364 | SEED1), | |
365 | key); | |
366 | ||
367 | if (filter->aslist[FILTER_OUT].name) | |
368 | key = jhash_1word(jhash(filter->aslist[FILTER_OUT].name, | |
369 | strlen(filter->aslist[FILTER_OUT].name), | |
370 | SEED1), | |
371 | key); | |
372 | ||
373 | if (filter->usmap.name) | |
374 | key = jhash_1word(jhash(filter->usmap.name, | |
375 | strlen(filter->usmap.name), SEED1), | |
376 | key); | |
377 | ||
7f7940e6 MK |
378 | if (filter->advmap.aname) |
379 | key = jhash_1word(jhash(filter->advmap.aname, | |
380 | strlen(filter->advmap.aname), SEED1), | |
381 | key); | |
382 | ||
d62a17ae | 383 | if (peer->default_rmap[afi][safi].name) |
384 | key = jhash_1word( | |
385 | jhash(peer->default_rmap[afi][safi].name, | |
386 | strlen(peer->default_rmap[afi][safi].name), | |
387 | SEED1), | |
388 | key); | |
389 | ||
390 | /* If peer is on a shared network and is exchanging IPv6 prefixes, | |
391 | * it needs to include link-local address. That's different from | |
392 | * non-shared-network peers (nexthop encoded with 32 bytes vs 16 | |
393 | * bytes). We create different update groups to take care of that. | |
394 | */ | |
395 | key = jhash_1word( | |
396 | (peer->shared_network && peer_afi_active_nego(peer, AFI_IP6)), | |
397 | key); | |
398 | ||
399 | /* | |
400 | * There are certain peers that must get their own update-group: | |
401 | * - lonesoul peers | |
402 | * - peers that negotiated ORF | |
a849a3fe | 403 | * - maximum-prefix-out is set |
d62a17ae | 404 | */ |
405 | if (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL) | |
406 | || CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV) | |
407 | || CHECK_FLAG(peer->af_cap[afi][safi], | |
a849a3fe DA |
408 | PEER_CAP_ORF_PREFIX_SM_OLD_RCV) |
409 | || CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_OUT)) | |
d62a17ae | 410 | key = jhash_1word(jhash(peer->host, strlen(peer->host), SEED2), |
411 | key); | |
412 | ||
413 | return key; | |
3f9c7369 DS |
414 | } |
415 | ||
74df8d6d | 416 | static bool updgrp_hash_cmp(const void *p1, const void *p2) |
3f9c7369 | 417 | { |
d62a17ae | 418 | const struct update_group *grp1; |
419 | const struct update_group *grp2; | |
420 | const struct peer *pe1; | |
421 | const struct peer *pe2; | |
422 | uint32_t flags1; | |
423 | uint32_t flags2; | |
424 | const struct bgp_filter *fl1; | |
425 | const struct bgp_filter *fl2; | |
426 | afi_t afi; | |
427 | safi_t safi; | |
428 | ||
429 | if (!p1 || !p2) | |
74df8d6d | 430 | return false; |
d62a17ae | 431 | |
432 | grp1 = p1; | |
433 | grp2 = p2; | |
434 | pe1 = grp1->conf; | |
435 | pe2 = grp2->conf; | |
436 | afi = grp1->afi; | |
437 | safi = grp1->safi; | |
438 | flags1 = pe1->af_flags[afi][safi]; | |
439 | flags2 = pe2->af_flags[afi][safi]; | |
440 | fl1 = &pe1->filter[afi][safi]; | |
441 | fl2 = &pe2->filter[afi][safi]; | |
442 | ||
443 | /* put EBGP and IBGP peers in different update groups */ | |
444 | if (pe1->sort != pe2->sort) | |
74df8d6d | 445 | return false; |
d62a17ae | 446 | |
447 | /* check peer flags */ | |
448 | if ((pe1->flags & PEER_UPDGRP_FLAGS) | |
449 | != (pe2->flags & PEER_UPDGRP_FLAGS)) | |
74df8d6d | 450 | return false; |
d62a17ae | 451 | |
452 | /* If there is 'local-as' configured, it should match. */ | |
453 | if (pe1->change_local_as != pe2->change_local_as) | |
74df8d6d | 454 | return false; |
d62a17ae | 455 | |
456 | /* flags like route reflector client */ | |
457 | if ((flags1 & PEER_UPDGRP_AF_FLAGS) != (flags2 & PEER_UPDGRP_AF_FLAGS)) | |
74df8d6d | 458 | return false; |
d62a17ae | 459 | |
dcc68b5e | 460 | if (pe1->addpath_type[afi][safi] != pe2->addpath_type[afi][safi]) |
b08047f8 | 461 | return false; |
dcc68b5e | 462 | |
d62a17ae | 463 | if ((pe1->cap & PEER_UPDGRP_CAP_FLAGS) |
464 | != (pe2->cap & PEER_UPDGRP_CAP_FLAGS)) | |
74df8d6d | 465 | return false; |
d62a17ae | 466 | |
467 | if ((pe1->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS) | |
468 | != (pe2->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS)) | |
74df8d6d | 469 | return false; |
d62a17ae | 470 | |
471 | if (pe1->v_routeadv != pe2->v_routeadv) | |
74df8d6d | 472 | return false; |
d62a17ae | 473 | |
474 | if (pe1->group != pe2->group) | |
74df8d6d | 475 | return false; |
d62a17ae | 476 | |
477 | /* route-map names should be the same */ | |
478 | if ((fl1->map[RMAP_OUT].name && !fl2->map[RMAP_OUT].name) | |
479 | || (!fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name) | |
480 | || (fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name | |
481 | && strcmp(fl1->map[RMAP_OUT].name, fl2->map[RMAP_OUT].name))) | |
74df8d6d | 482 | return false; |
d62a17ae | 483 | |
484 | if ((fl1->dlist[FILTER_OUT].name && !fl2->dlist[FILTER_OUT].name) | |
485 | || (!fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name) | |
486 | || (fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name | |
487 | && strcmp(fl1->dlist[FILTER_OUT].name, | |
488 | fl2->dlist[FILTER_OUT].name))) | |
74df8d6d | 489 | return false; |
d62a17ae | 490 | |
491 | if ((fl1->plist[FILTER_OUT].name && !fl2->plist[FILTER_OUT].name) | |
492 | || (!fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name) | |
493 | || (fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name | |
494 | && strcmp(fl1->plist[FILTER_OUT].name, | |
495 | fl2->plist[FILTER_OUT].name))) | |
74df8d6d | 496 | return false; |
d62a17ae | 497 | |
498 | if ((fl1->aslist[FILTER_OUT].name && !fl2->aslist[FILTER_OUT].name) | |
499 | || (!fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name) | |
500 | || (fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name | |
501 | && strcmp(fl1->aslist[FILTER_OUT].name, | |
502 | fl2->aslist[FILTER_OUT].name))) | |
74df8d6d | 503 | return false; |
d62a17ae | 504 | |
505 | if ((fl1->usmap.name && !fl2->usmap.name) | |
506 | || (!fl1->usmap.name && fl2->usmap.name) | |
507 | || (fl1->usmap.name && fl2->usmap.name | |
508 | && strcmp(fl1->usmap.name, fl2->usmap.name))) | |
74df8d6d | 509 | return false; |
d62a17ae | 510 | |
7f7940e6 MK |
511 | if ((fl1->advmap.aname && !fl2->advmap.aname) |
512 | || (!fl1->advmap.aname && fl2->advmap.aname) | |
513 | || (fl1->advmap.aname && fl2->advmap.aname | |
514 | && strcmp(fl1->advmap.aname, fl2->advmap.aname))) | |
515 | return false; | |
516 | ||
d62a17ae | 517 | if ((pe1->default_rmap[afi][safi].name |
518 | && !pe2->default_rmap[afi][safi].name) | |
519 | || (!pe1->default_rmap[afi][safi].name | |
520 | && pe2->default_rmap[afi][safi].name) | |
521 | || (pe1->default_rmap[afi][safi].name | |
522 | && pe2->default_rmap[afi][safi].name | |
523 | && strcmp(pe1->default_rmap[afi][safi].name, | |
524 | pe2->default_rmap[afi][safi].name))) | |
74df8d6d | 525 | return false; |
d62a17ae | 526 | |
527 | if ((afi == AFI_IP6) && (pe1->shared_network != pe2->shared_network)) | |
74df8d6d | 528 | return false; |
d62a17ae | 529 | |
530 | if ((CHECK_FLAG(pe1->flags, PEER_FLAG_LONESOUL) | |
531 | || CHECK_FLAG(pe1->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV) | |
532 | || CHECK_FLAG(pe1->af_cap[afi][safi], | |
533 | PEER_CAP_ORF_PREFIX_SM_OLD_RCV)) | |
534 | && !sockunion_same(&pe1->su, &pe2->su)) | |
74df8d6d | 535 | return false; |
d62a17ae | 536 | |
74df8d6d | 537 | return true; |
3f9c7369 DS |
538 | } |
539 | ||
d62a17ae | 540 | static void peer_lonesoul_or_not(struct peer *peer, int set) |
3f9c7369 | 541 | { |
d62a17ae | 542 | /* no change in status? */ |
543 | if (set == (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL) > 0)) | |
544 | return; | |
3f9c7369 | 545 | |
d62a17ae | 546 | if (set) |
547 | SET_FLAG(peer->flags, PEER_FLAG_LONESOUL); | |
548 | else | |
549 | UNSET_FLAG(peer->flags, PEER_FLAG_LONESOUL); | |
3f9c7369 | 550 | |
d62a17ae | 551 | update_group_adjust_peer_afs(peer); |
3f9c7369 DS |
552 | } |
553 | ||
554 | /* | |
555 | * subgroup_total_packets_enqueued | |
556 | * | |
557 | * Returns the total number of packets enqueued to a subgroup. | |
558 | */ | |
559 | static unsigned int | |
d62a17ae | 560 | subgroup_total_packets_enqueued(struct update_subgroup *subgrp) |
3f9c7369 | 561 | { |
d62a17ae | 562 | struct bpacket *pkt; |
3f9c7369 | 563 | |
d62a17ae | 564 | pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp)); |
3f9c7369 | 565 | |
d62a17ae | 566 | return pkt->ver - 1; |
3f9c7369 DS |
567 | } |
568 | ||
d62a17ae | 569 | static int update_group_show_walkcb(struct update_group *updgrp, void *arg) |
3f9c7369 | 570 | { |
d62a17ae | 571 | struct updwalk_context *ctx = arg; |
572 | struct vty *vty; | |
573 | struct update_subgroup *subgrp; | |
574 | struct peer_af *paf; | |
575 | struct bgp_filter *filter; | |
0997ee26 | 576 | struct peer *peer = UPDGRP_PEER(updgrp); |
d62a17ae | 577 | int match = 0; |
578 | ||
579 | if (!ctx) | |
580 | return CMD_SUCCESS; | |
581 | ||
582 | if (ctx->subgrp_id) { | |
a2addae8 | 583 | UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { |
d62a17ae | 584 | if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id)) |
585 | continue; | |
586 | else { | |
587 | match = 1; | |
588 | break; | |
589 | } | |
590 | } | |
591 | } else { | |
592 | match = 1; | |
593 | } | |
594 | ||
595 | if (!match) { | |
596 | /* Since this routine is invoked from a walk, we cannot signal | |
597 | * any */ | |
598 | /* error here, can only return. */ | |
599 | return CMD_SUCCESS; | |
600 | } | |
601 | ||
602 | vty = ctx->vty; | |
603 | ||
604 | vty_out(vty, "Update-group %" PRIu64 ":\n", updgrp->id); | |
605 | vty_out(vty, " Created: %s", timestamp_string(updgrp->uptime)); | |
606 | filter = &updgrp->conf->filter[updgrp->afi][updgrp->safi]; | |
607 | if (filter->map[RMAP_OUT].name) | |
7dba67ff | 608 | vty_out(vty, " Outgoing route map: %s\n", |
d62a17ae | 609 | filter->map[RMAP_OUT].name); |
610 | vty_out(vty, " MRAI value (seconds): %d\n", updgrp->conf->v_routeadv); | |
611 | if (updgrp->conf->change_local_as) | |
612 | vty_out(vty, " Local AS %u%s%s\n", | |
613 | updgrp->conf->change_local_as, | |
614 | CHECK_FLAG(updgrp->conf->flags, | |
615 | PEER_FLAG_LOCAL_AS_NO_PREPEND) | |
616 | ? " no-prepend" | |
617 | : "", | |
618 | CHECK_FLAG(updgrp->conf->flags, | |
619 | PEER_FLAG_LOCAL_AS_REPLACE_AS) | |
620 | ? " replace-as" | |
621 | : ""); | |
622 | ||
a2addae8 | 623 | UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { |
d62a17ae | 624 | if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id)) |
625 | continue; | |
626 | vty_out(vty, "\n"); | |
627 | vty_out(vty, " Update-subgroup %" PRIu64 ":\n", subgrp->id); | |
628 | vty_out(vty, " Created: %s", | |
629 | timestamp_string(subgrp->uptime)); | |
630 | ||
631 | if (subgrp->split_from.update_group_id | |
632 | || subgrp->split_from.subgroup_id) { | |
633 | vty_out(vty, " Split from group id: %" PRIu64 "\n", | |
634 | subgrp->split_from.update_group_id); | |
635 | vty_out(vty, | |
636 | " Split from subgroup id: %" PRIu64 "\n", | |
637 | subgrp->split_from.subgroup_id); | |
638 | } | |
639 | ||
640 | vty_out(vty, " Join events: %u\n", subgrp->join_events); | |
641 | vty_out(vty, " Prune events: %u\n", subgrp->prune_events); | |
642 | vty_out(vty, " Merge events: %u\n", subgrp->merge_events); | |
643 | vty_out(vty, " Split events: %u\n", subgrp->split_events); | |
644 | vty_out(vty, " Update group switch events: %u\n", | |
645 | subgrp->updgrp_switch_events); | |
646 | vty_out(vty, " Peer refreshes combined: %u\n", | |
647 | subgrp->peer_refreshes_combined); | |
648 | vty_out(vty, " Merge checks triggered: %u\n", | |
649 | subgrp->merge_checks_triggered); | |
5b18ef82 DS |
650 | vty_out(vty, " Coalesce Time: %u%s\n", |
651 | (UPDGRP_INST(subgrp->update_group))->coalesce_time, | |
652 | subgrp->t_coalesce ? "(Running)" : ""); | |
d62a17ae | 653 | vty_out(vty, " Version: %" PRIu64 "\n", subgrp->version); |
654 | vty_out(vty, " Packet queue length: %d\n", | |
655 | bpacket_queue_length(SUBGRP_PKTQ(subgrp))); | |
656 | vty_out(vty, " Total packets enqueued: %u\n", | |
657 | subgroup_total_packets_enqueued(subgrp)); | |
658 | vty_out(vty, " Packet queue high watermark: %d\n", | |
659 | bpacket_queue_hwm_length(SUBGRP_PKTQ(subgrp))); | |
660 | vty_out(vty, " Adj-out list count: %u\n", subgrp->adj_count); | |
661 | vty_out(vty, " Advertise list: %s\n", | |
662 | advertise_list_is_empty(subgrp) ? "empty" | |
663 | : "not empty"); | |
664 | vty_out(vty, " Flags: %s\n", | |
665 | CHECK_FLAG(subgrp->flags, SUBGRP_FLAG_NEEDS_REFRESH) | |
666 | ? "R" | |
667 | : ""); | |
0997ee26 DA |
668 | if (peer) |
669 | vty_out(vty, " Max packet size: %d\n", | |
670 | peer->max_packet_size); | |
d62a17ae | 671 | if (subgrp->peer_count > 0) { |
672 | vty_out(vty, " Peers:\n"); | |
a2addae8 RW |
673 | SUBGRP_FOREACH_PEER (subgrp, paf) |
674 | vty_out(vty, " - %s\n", paf->peer->host); | |
d62a17ae | 675 | } |
8fe8a7f6 | 676 | } |
d62a17ae | 677 | return UPDWALK_CONTINUE; |
3f9c7369 DS |
678 | } |
679 | ||
680 | /* | |
681 | * Helper function to show the packet queue for each subgroup of update group. | |
682 | * Will be constrained to a particular subgroup id if id !=0 | |
683 | */ | |
d62a17ae | 684 | static int updgrp_show_packet_queue_walkcb(struct update_group *updgrp, |
685 | void *arg) | |
3f9c7369 | 686 | { |
d62a17ae | 687 | struct updwalk_context *ctx = arg; |
688 | struct update_subgroup *subgrp; | |
689 | struct vty *vty; | |
690 | ||
691 | vty = ctx->vty; | |
a2addae8 | 692 | UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { |
d62a17ae | 693 | if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id)) |
694 | continue; | |
695 | vty_out(vty, "update group %" PRIu64 ", subgroup %" PRIu64 "\n", | |
696 | updgrp->id, subgrp->id); | |
697 | bpacket_queue_show_vty(SUBGRP_PKTQ(subgrp), vty); | |
698 | } | |
699 | return UPDWALK_CONTINUE; | |
3f9c7369 DS |
700 | } |
701 | ||
702 | /* | |
703 | * Show the packet queue for each subgroup of update group. Will be | |
704 | * constrained to a particular subgroup id if id !=0 | |
705 | */ | |
d62a17ae | 706 | void update_group_show_packet_queue(struct bgp *bgp, afi_t afi, safi_t safi, |
707 | struct vty *vty, uint64_t id) | |
3f9c7369 | 708 | { |
d62a17ae | 709 | struct updwalk_context ctx; |
710 | ||
711 | memset(&ctx, 0, sizeof(ctx)); | |
712 | ctx.vty = vty; | |
713 | ctx.subgrp_id = id; | |
714 | ctx.flags = 0; | |
715 | update_group_af_walk(bgp, afi, safi, updgrp_show_packet_queue_walkcb, | |
716 | &ctx); | |
3f9c7369 DS |
717 | } |
718 | ||
d62a17ae | 719 | static struct update_group *update_group_find(struct peer_af *paf) |
3f9c7369 | 720 | { |
d62a17ae | 721 | struct update_group *updgrp; |
722 | struct update_group tmp; | |
723 | struct peer tmp_conf; | |
3f9c7369 | 724 | |
d62a17ae | 725 | if (!peer_established(PAF_PEER(paf))) |
726 | return NULL; | |
3f9c7369 | 727 | |
d62a17ae | 728 | memset(&tmp, 0, sizeof(tmp)); |
729 | memset(&tmp_conf, 0, sizeof(tmp_conf)); | |
730 | tmp.conf = &tmp_conf; | |
731 | peer2_updgrp_copy(&tmp, paf); | |
3f9c7369 | 732 | |
d62a17ae | 733 | updgrp = hash_lookup(paf->peer->bgp->update_groups[paf->afid], &tmp); |
734 | conf_release(&tmp_conf, paf->afi, paf->safi); | |
735 | return updgrp; | |
3f9c7369 DS |
736 | } |
737 | ||
d62a17ae | 738 | static struct update_group *update_group_create(struct peer_af *paf) |
3f9c7369 | 739 | { |
d62a17ae | 740 | struct update_group *updgrp; |
741 | struct update_group tmp; | |
742 | struct peer tmp_conf; | |
3f9c7369 | 743 | |
d62a17ae | 744 | memset(&tmp, 0, sizeof(tmp)); |
745 | memset(&tmp_conf, 0, sizeof(tmp_conf)); | |
746 | tmp.conf = &tmp_conf; | |
747 | peer2_updgrp_copy(&tmp, paf); | |
3f9c7369 | 748 | |
d62a17ae | 749 | updgrp = hash_get(paf->peer->bgp->update_groups[paf->afid], &tmp, |
750 | updgrp_hash_alloc); | |
751 | if (!updgrp) | |
752 | return NULL; | |
753 | update_group_checkin(updgrp); | |
3f9c7369 | 754 | |
d62a17ae | 755 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) |
756 | zlog_debug("create update group %" PRIu64, updgrp->id); | |
3f9c7369 | 757 | |
d62a17ae | 758 | UPDGRP_GLOBAL_STAT(updgrp, updgrps_created) += 1; |
3f9c7369 | 759 | |
d62a17ae | 760 | conf_release(&tmp_conf, paf->afi, paf->safi); |
761 | return updgrp; | |
3f9c7369 DS |
762 | } |
763 | ||
d62a17ae | 764 | static void update_group_delete(struct update_group *updgrp) |
3f9c7369 | 765 | { |
d62a17ae | 766 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) |
767 | zlog_debug("delete update group %" PRIu64, updgrp->id); | |
3f9c7369 | 768 | |
d62a17ae | 769 | UPDGRP_GLOBAL_STAT(updgrp, updgrps_deleted) += 1; |
3f9c7369 | 770 | |
d62a17ae | 771 | hash_release(updgrp->bgp->update_groups[updgrp->afid], updgrp); |
772 | conf_release(updgrp->conf, updgrp->afi, updgrp->safi); | |
3d68677e | 773 | |
0a22ddfb | 774 | XFREE(MTYPE_BGP_PEER_HOST, updgrp->conf->host); |
6e919709 | 775 | |
0a22ddfb | 776 | XFREE(MTYPE_BGP_PEER_IFNAME, updgrp->conf->ifname); |
6e919709 | 777 | |
d62a17ae | 778 | XFREE(MTYPE_BGP_PEER, updgrp->conf); |
779 | XFREE(MTYPE_BGP_UPDGRP, updgrp); | |
3f9c7369 DS |
780 | } |
781 | ||
d62a17ae | 782 | static void update_group_add_subgroup(struct update_group *updgrp, |
783 | struct update_subgroup *subgrp) | |
3f9c7369 | 784 | { |
d62a17ae | 785 | if (!updgrp || !subgrp) |
786 | return; | |
3f9c7369 | 787 | |
d62a17ae | 788 | LIST_INSERT_HEAD(&(updgrp->subgrps), subgrp, updgrp_train); |
789 | subgrp->update_group = updgrp; | |
3f9c7369 DS |
790 | } |
791 | ||
d62a17ae | 792 | static void update_group_remove_subgroup(struct update_group *updgrp, |
793 | struct update_subgroup *subgrp) | |
3f9c7369 | 794 | { |
d62a17ae | 795 | if (!updgrp || !subgrp) |
796 | return; | |
3f9c7369 | 797 | |
d62a17ae | 798 | LIST_REMOVE(subgrp, updgrp_train); |
799 | subgrp->update_group = NULL; | |
800 | if (LIST_EMPTY(&(updgrp->subgrps))) | |
801 | update_group_delete(updgrp); | |
3f9c7369 DS |
802 | } |
803 | ||
804 | static struct update_subgroup * | |
d62a17ae | 805 | update_subgroup_create(struct update_group *updgrp) |
3f9c7369 | 806 | { |
d62a17ae | 807 | struct update_subgroup *subgrp; |
3f9c7369 | 808 | |
d62a17ae | 809 | subgrp = XCALLOC(MTYPE_BGP_UPD_SUBGRP, sizeof(struct update_subgroup)); |
810 | update_subgroup_checkin(subgrp, updgrp); | |
811 | subgrp->v_coalesce = (UPDGRP_INST(updgrp))->coalesce_time; | |
ef56aee4 | 812 | sync_init(subgrp, updgrp); |
d62a17ae | 813 | bpacket_queue_init(SUBGRP_PKTQ(subgrp)); |
814 | bpacket_queue_add(SUBGRP_PKTQ(subgrp), NULL, NULL); | |
815 | TAILQ_INIT(&(subgrp->adjq)); | |
816 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) | |
817 | zlog_debug("create subgroup u%" PRIu64 ":s%" PRIu64, updgrp->id, | |
818 | subgrp->id); | |
3f9c7369 | 819 | |
d62a17ae | 820 | update_group_add_subgroup(updgrp, subgrp); |
3f9c7369 | 821 | |
d62a17ae | 822 | UPDGRP_INCR_STAT(updgrp, subgrps_created); |
3f9c7369 | 823 | |
d62a17ae | 824 | return subgrp; |
3f9c7369 DS |
825 | } |
826 | ||
d62a17ae | 827 | static void update_subgroup_delete(struct update_subgroup *subgrp) |
3f9c7369 | 828 | { |
d62a17ae | 829 | if (!subgrp) |
830 | return; | |
3f9c7369 | 831 | |
d62a17ae | 832 | if (subgrp->update_group) |
833 | UPDGRP_INCR_STAT(subgrp->update_group, subgrps_deleted); | |
3f9c7369 | 834 | |
28ef0ee1 | 835 | THREAD_OFF(subgrp->t_merge_check); |
50478845 | 836 | THREAD_OFF(subgrp->t_coalesce); |
3f9c7369 | 837 | |
d62a17ae | 838 | bpacket_queue_cleanup(SUBGRP_PKTQ(subgrp)); |
839 | subgroup_clear_table(subgrp); | |
3f9c7369 | 840 | |
d62a17ae | 841 | sync_delete(subgrp); |
3f9c7369 | 842 | |
4f9a63ad | 843 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS) && subgrp->update_group) |
d62a17ae | 844 | zlog_debug("delete subgroup u%" PRIu64 ":s%" PRIu64, |
845 | subgrp->update_group->id, subgrp->id); | |
3f9c7369 | 846 | |
d62a17ae | 847 | update_group_remove_subgroup(subgrp->update_group, subgrp); |
3f9c7369 | 848 | |
d62a17ae | 849 | XFREE(MTYPE_BGP_UPD_SUBGRP, subgrp); |
3f9c7369 DS |
850 | } |
851 | ||
d62a17ae | 852 | void update_subgroup_inherit_info(struct update_subgroup *to, |
853 | struct update_subgroup *from) | |
3f9c7369 | 854 | { |
d62a17ae | 855 | if (!to || !from) |
856 | return; | |
3f9c7369 | 857 | |
d62a17ae | 858 | to->sflags = from->sflags; |
3f9c7369 DS |
859 | } |
860 | ||
861 | /* | |
862 | * update_subgroup_check_delete | |
863 | * | |
864 | * Delete a subgroup if it is ready to be deleted. | |
865 | * | |
2951a7a4 | 866 | * Returns true if the subgroup was deleted. |
3f9c7369 | 867 | */ |
3dc339cd | 868 | static bool update_subgroup_check_delete(struct update_subgroup *subgrp) |
3f9c7369 | 869 | { |
d62a17ae | 870 | if (!subgrp) |
3dc339cd | 871 | return false; |
3f9c7369 | 872 | |
d62a17ae | 873 | if (!LIST_EMPTY(&(subgrp->peers))) |
3dc339cd | 874 | return false; |
3f9c7369 | 875 | |
d62a17ae | 876 | update_subgroup_delete(subgrp); |
3f9c7369 | 877 | |
3dc339cd | 878 | return true; |
3f9c7369 DS |
879 | } |
880 | ||
881 | /* | |
882 | * update_subgroup_add_peer | |
883 | * | |
884 | * @param send_enqueued_packets If true all currently enqueued packets will | |
885 | * also be sent to the peer. | |
886 | */ | |
d62a17ae | 887 | static void update_subgroup_add_peer(struct update_subgroup *subgrp, |
888 | struct peer_af *paf, | |
889 | int send_enqueued_pkts) | |
3f9c7369 | 890 | { |
d62a17ae | 891 | struct bpacket *pkt; |
3f9c7369 | 892 | |
d62a17ae | 893 | if (!subgrp || !paf) |
894 | return; | |
3f9c7369 | 895 | |
d62a17ae | 896 | LIST_INSERT_HEAD(&(subgrp->peers), paf, subgrp_train); |
897 | paf->subgroup = subgrp; | |
898 | subgrp->peer_count++; | |
3f9c7369 | 899 | |
d62a17ae | 900 | if (bgp_debug_peer_updout_enabled(paf->peer->host)) { |
901 | UPDGRP_PEER_DBG_EN(subgrp->update_group); | |
902 | } | |
3f9c7369 | 903 | |
d62a17ae | 904 | SUBGRP_INCR_STAT(subgrp, join_events); |
3f9c7369 | 905 | |
d62a17ae | 906 | if (send_enqueued_pkts) { |
907 | pkt = bpacket_queue_first(SUBGRP_PKTQ(subgrp)); | |
908 | } else { | |
3f9c7369 | 909 | |
d62a17ae | 910 | /* |
911 | * Hang the peer off of the last, placeholder, packet in the | |
912 | * queue. This means it won't see any of the packets that are | |
913 | * currently the queue. | |
914 | */ | |
915 | pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp)); | |
916 | assert(pkt->buffer == NULL); | |
917 | } | |
3f9c7369 | 918 | |
d62a17ae | 919 | bpacket_add_peer(pkt, paf); |
3f9c7369 | 920 | |
7bfdba54 S |
921 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) |
922 | zlog_debug("peer %s added to subgroup s%" PRIu64, | |
923 | paf->peer->host, subgrp->id); | |
3f9c7369 DS |
924 | } |
925 | ||
926 | /* | |
927 | * update_subgroup_remove_peer_internal | |
928 | * | |
929 | * Internal function that removes a peer from a subgroup, but does not | |
930 | * delete the subgroup. A call to this function must almost always be | |
931 | * followed by a call to update_subgroup_check_delete(). | |
932 | * | |
933 | * @see update_subgroup_remove_peer | |
934 | */ | |
d62a17ae | 935 | static void update_subgroup_remove_peer_internal(struct update_subgroup *subgrp, |
936 | struct peer_af *paf) | |
3f9c7369 | 937 | { |
d3e51db0 | 938 | assert(subgrp && paf && subgrp->update_group); |
3f9c7369 | 939 | |
d62a17ae | 940 | if (bgp_debug_peer_updout_enabled(paf->peer->host)) { |
941 | UPDGRP_PEER_DBG_DIS(subgrp->update_group); | |
942 | } | |
3f9c7369 | 943 | |
d62a17ae | 944 | bpacket_queue_remove_peer(paf); |
945 | LIST_REMOVE(paf, subgrp_train); | |
946 | paf->subgroup = NULL; | |
947 | subgrp->peer_count--; | |
3f9c7369 | 948 | |
7bfdba54 S |
949 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) |
950 | zlog_debug("peer %s deleted from subgroup s%" | |
4882d296 | 951 | PRIu64 " peer cnt %d", |
7bfdba54 | 952 | paf->peer->host, subgrp->id, subgrp->peer_count); |
d62a17ae | 953 | SUBGRP_INCR_STAT(subgrp, prune_events); |
3f9c7369 DS |
954 | } |
955 | ||
956 | /* | |
957 | * update_subgroup_remove_peer | |
958 | */ | |
d62a17ae | 959 | void update_subgroup_remove_peer(struct update_subgroup *subgrp, |
960 | struct peer_af *paf) | |
3f9c7369 | 961 | { |
d62a17ae | 962 | if (!subgrp || !paf) |
963 | return; | |
3f9c7369 | 964 | |
d62a17ae | 965 | update_subgroup_remove_peer_internal(subgrp, paf); |
3f9c7369 | 966 | |
d62a17ae | 967 | if (update_subgroup_check_delete(subgrp)) |
968 | return; | |
3f9c7369 | 969 | |
d62a17ae | 970 | /* |
971 | * The deletion of the peer may have caused some packets to be | |
972 | * deleted from the subgroup packet queue. Check if the subgroup can | |
973 | * be merged now. | |
974 | */ | |
975 | update_subgroup_check_merge(subgrp, "removed peer from subgroup"); | |
3f9c7369 DS |
976 | } |
977 | ||
d62a17ae | 978 | static struct update_subgroup *update_subgroup_find(struct update_group *updgrp, |
979 | struct peer_af *paf) | |
3f9c7369 | 980 | { |
d62a17ae | 981 | struct update_subgroup *subgrp = NULL; |
982 | uint64_t version; | |
983 | ||
984 | if (paf->subgroup) { | |
985 | assert(0); | |
986 | return NULL; | |
987 | } else | |
988 | version = 0; | |
989 | ||
990 | if (!peer_established(PAF_PEER(paf))) | |
991 | return NULL; | |
992 | ||
a2addae8 | 993 | UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { |
d62a17ae | 994 | if (subgrp->version != version |
995 | || CHECK_FLAG(subgrp->sflags, | |
996 | SUBGRP_STATUS_DEFAULT_ORIGINATE)) | |
997 | continue; | |
998 | ||
999 | /* | |
1000 | * The version number is not meaningful on a subgroup that needs | |
1001 | * a refresh. | |
1002 | */ | |
1003 | if (update_subgroup_needs_refresh(subgrp)) | |
1004 | continue; | |
1005 | ||
1006 | break; | |
1007 | } | |
1008 | ||
1009 | return subgrp; | |
3f9c7369 DS |
1010 | } |
1011 | ||
1012 | /* | |
1013 | * update_subgroup_ready_for_merge | |
1014 | * | |
2951a7a4 | 1015 | * Returns true if this subgroup is in a state that allows it to be |
3f9c7369 DS |
1016 | * merged into another subgroup. |
1017 | */ | |
3dc339cd | 1018 | static bool update_subgroup_ready_for_merge(struct update_subgroup *subgrp) |
3f9c7369 DS |
1019 | { |
1020 | ||
d62a17ae | 1021 | /* |
1022 | * Not ready if there are any encoded packets waiting to be written | |
1023 | * out to peers. | |
1024 | */ | |
1025 | if (!bpacket_queue_is_empty(SUBGRP_PKTQ(subgrp))) | |
3dc339cd | 1026 | return false; |
d62a17ae | 1027 | |
1028 | /* | |
1029 | * Not ready if there enqueued updates waiting to be encoded. | |
1030 | */ | |
1031 | if (!advertise_list_is_empty(subgrp)) | |
3dc339cd | 1032 | return false; |
d62a17ae | 1033 | |
1034 | /* | |
1035 | * Don't attempt to merge a subgroup that needs a refresh. For one, | |
1036 | * we can't determine if the adj_out of such a group matches that of | |
1037 | * another group. | |
1038 | */ | |
1039 | if (update_subgroup_needs_refresh(subgrp)) | |
3dc339cd | 1040 | return false; |
d62a17ae | 1041 | |
3dc339cd | 1042 | return true; |
3f9c7369 DS |
1043 | } |
1044 | ||
1045 | /* | |
1046 | * update_subgrp_can_merge_into | |
1047 | * | |
2951a7a4 | 1048 | * Returns true if the first subgroup can merge into the second |
3f9c7369 DS |
1049 | * subgroup. |
1050 | */ | |
d62a17ae | 1051 | static int update_subgroup_can_merge_into(struct update_subgroup *subgrp, |
1052 | struct update_subgroup *target) | |
3f9c7369 DS |
1053 | { |
1054 | ||
d62a17ae | 1055 | if (subgrp == target) |
1056 | return 0; | |
3f9c7369 | 1057 | |
d62a17ae | 1058 | /* |
1059 | * Both must have processed the BRIB to the same point in order to | |
1060 | * be merged. | |
1061 | */ | |
1062 | if (subgrp->version != target->version) | |
1063 | return 0; | |
3f9c7369 | 1064 | |
d62a17ae | 1065 | if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE) |
1066 | != CHECK_FLAG(target->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE)) | |
1067 | return 0; | |
f910ef58 | 1068 | |
d62a17ae | 1069 | if (subgrp->adj_count != target->adj_count) |
1070 | return 0; | |
3f9c7369 | 1071 | |
d62a17ae | 1072 | return update_subgroup_ready_for_merge(target); |
3f9c7369 DS |
1073 | } |
1074 | ||
1075 | /* | |
1076 | * update_subgroup_merge | |
1077 | * | |
1078 | * Merge the first subgroup into the second one. | |
1079 | */ | |
d62a17ae | 1080 | static void update_subgroup_merge(struct update_subgroup *subgrp, |
1081 | struct update_subgroup *target, | |
1082 | const char *reason) | |
3f9c7369 | 1083 | { |
d62a17ae | 1084 | struct peer_af *paf; |
1085 | int result; | |
1086 | int peer_count; | |
3f9c7369 | 1087 | |
d62a17ae | 1088 | assert(subgrp->adj_count == target->adj_count); |
3f9c7369 | 1089 | |
d62a17ae | 1090 | peer_count = subgrp->peer_count; |
3f9c7369 | 1091 | |
d62a17ae | 1092 | while (1) { |
1093 | paf = LIST_FIRST(&subgrp->peers); | |
1094 | if (!paf) | |
1095 | break; | |
3f9c7369 | 1096 | |
d62a17ae | 1097 | update_subgroup_remove_peer_internal(subgrp, paf); |
3f9c7369 | 1098 | |
d62a17ae | 1099 | /* |
1100 | * Add the peer to the target subgroup, while making sure that | |
1101 | * any currently enqueued packets won't be sent to it. Enqueued | |
1102 | * packets could, for example, result in an unnecessary withdraw | |
1103 | * followed by an advertise. | |
1104 | */ | |
1105 | update_subgroup_add_peer(target, paf, 0); | |
1106 | } | |
3f9c7369 | 1107 | |
d62a17ae | 1108 | SUBGRP_INCR_STAT(target, merge_events); |
3f9c7369 | 1109 | |
d62a17ae | 1110 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) |
6cde4b45 | 1111 | zlog_debug("u%" PRIu64 ":s%" PRIu64" (%d peers) merged into u%" PRIu64 ":s%" PRIu64", trigger: %s", |
d62a17ae | 1112 | subgrp->update_group->id, subgrp->id, peer_count, |
1113 | target->update_group->id, target->id, | |
1114 | reason ? reason : "unknown"); | |
3f9c7369 | 1115 | |
d62a17ae | 1116 | result = update_subgroup_check_delete(subgrp); |
1117 | assert(result); | |
3f9c7369 DS |
1118 | } |
1119 | ||
1120 | /* | |
1121 | * update_subgroup_check_merge | |
1122 | * | |
1123 | * Merge this subgroup into another subgroup if possible. | |
1124 | * | |
2951a7a4 | 1125 | * Returns true if the subgroup has been merged. The subgroup pointer |
3f9c7369 DS |
1126 | * should not be accessed in this case. |
1127 | */ | |
3dc339cd DA |
1128 | bool update_subgroup_check_merge(struct update_subgroup *subgrp, |
1129 | const char *reason) | |
3f9c7369 | 1130 | { |
d62a17ae | 1131 | struct update_subgroup *target; |
3f9c7369 | 1132 | |
d62a17ae | 1133 | if (!update_subgroup_ready_for_merge(subgrp)) |
3dc339cd | 1134 | return false; |
3f9c7369 | 1135 | |
d62a17ae | 1136 | /* |
1137 | * Look for a subgroup to merge into. | |
1138 | */ | |
a2addae8 | 1139 | UPDGRP_FOREACH_SUBGRP (subgrp->update_group, target) { |
d62a17ae | 1140 | if (update_subgroup_can_merge_into(subgrp, target)) |
1141 | break; | |
1142 | } | |
3f9c7369 | 1143 | |
d62a17ae | 1144 | if (!target) |
3dc339cd | 1145 | return false; |
3f9c7369 | 1146 | |
d62a17ae | 1147 | update_subgroup_merge(subgrp, target, reason); |
3dc339cd | 1148 | return true; |
3f9c7369 DS |
1149 | } |
1150 | ||
d62a17ae | 1151 | /* |
9d303b37 DL |
1152 | * update_subgroup_merge_check_thread_cb |
1153 | */ | |
d62a17ae | 1154 | static int update_subgroup_merge_check_thread_cb(struct thread *thread) |
3f9c7369 | 1155 | { |
d62a17ae | 1156 | struct update_subgroup *subgrp; |
3f9c7369 | 1157 | |
d62a17ae | 1158 | subgrp = THREAD_ARG(thread); |
3f9c7369 | 1159 | |
d62a17ae | 1160 | subgrp->t_merge_check = NULL; |
3f9c7369 | 1161 | |
d62a17ae | 1162 | update_subgroup_check_merge(subgrp, "triggered merge check"); |
1163 | return 0; | |
3f9c7369 DS |
1164 | } |
1165 | ||
1166 | /* | |
1167 | * update_subgroup_trigger_merge_check | |
1168 | * | |
1169 | * Triggers a call to update_subgroup_check_merge() on a clean context. | |
1170 | * | |
1171 | * @param force If true, the merge check will be triggered even if the | |
1172 | * subgroup doesn't currently look ready for a merge. | |
1173 | * | |
2951a7a4 | 1174 | * Returns true if a merge check will be performed shortly. |
3f9c7369 | 1175 | */ |
3dc339cd DA |
1176 | bool update_subgroup_trigger_merge_check(struct update_subgroup *subgrp, |
1177 | int force) | |
3f9c7369 | 1178 | { |
d62a17ae | 1179 | if (subgrp->t_merge_check) |
3dc339cd | 1180 | return true; |
3f9c7369 | 1181 | |
d62a17ae | 1182 | if (!force && !update_subgroup_ready_for_merge(subgrp)) |
3dc339cd | 1183 | return false; |
3f9c7369 | 1184 | |
d62a17ae | 1185 | subgrp->t_merge_check = NULL; |
1186 | thread_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb, | |
1187 | subgrp, 0, &subgrp->t_merge_check); | |
3f9c7369 | 1188 | |
d62a17ae | 1189 | SUBGRP_INCR_STAT(subgrp, merge_checks_triggered); |
3f9c7369 | 1190 | |
3dc339cd | 1191 | return true; |
3f9c7369 DS |
1192 | } |
1193 | ||
1194 | /* | |
1195 | * update_subgroup_copy_adj_out | |
1196 | * | |
1197 | * Helper function that clones the adj out (state about advertised | |
1198 | * routes) from one subgroup to another. It assumes that the adj out | |
1199 | * of the target subgroup is empty. | |
1200 | */ | |
d62a17ae | 1201 | static void update_subgroup_copy_adj_out(struct update_subgroup *source, |
1202 | struct update_subgroup *dest) | |
3f9c7369 | 1203 | { |
d62a17ae | 1204 | struct bgp_adj_out *aout, *aout_copy; |
1205 | ||
a2addae8 | 1206 | SUBGRP_FOREACH_ADJ (source, aout) { |
d62a17ae | 1207 | /* |
1208 | * Copy the adj out. | |
1209 | */ | |
9bcb3eef DS |
1210 | aout_copy = bgp_adj_out_alloc(dest, aout->dest, |
1211 | aout->addpath_tx_id); | |
d62a17ae | 1212 | aout_copy->attr = |
7c87afac | 1213 | aout->attr ? bgp_attr_intern(aout->attr) : NULL; |
d62a17ae | 1214 | } |
0ab7b206 AD |
1215 | |
1216 | dest->scount = source->scount; | |
3f9c7369 DS |
1217 | } |
1218 | ||
1219 | /* | |
1220 | * update_subgroup_copy_packets | |
1221 | * | |
1222 | * Copy packets after and including the given packet to the subgroup | |
1223 | * 'dest'. | |
1224 | * | |
1225 | * Returns the number of packets copied. | |
1226 | */ | |
d62a17ae | 1227 | static int update_subgroup_copy_packets(struct update_subgroup *dest, |
1228 | struct bpacket *pkt) | |
3f9c7369 | 1229 | { |
d62a17ae | 1230 | int count; |
1231 | ||
1232 | count = 0; | |
1233 | while (pkt && pkt->buffer) { | |
1234 | bpacket_queue_add(SUBGRP_PKTQ(dest), stream_dup(pkt->buffer), | |
1235 | &pkt->arr); | |
1236 | count++; | |
1237 | pkt = bpacket_next(pkt); | |
1238 | } | |
3f9c7369 | 1239 | |
d62a17ae | 1240 | return count; |
3f9c7369 DS |
1241 | } |
1242 | ||
3dc339cd DA |
1243 | static bool updgrp_prefix_list_update(struct update_group *updgrp, |
1244 | const char *name) | |
3f9c7369 | 1245 | { |
d62a17ae | 1246 | struct peer *peer; |
1247 | struct bgp_filter *filter; | |
1248 | ||
1249 | peer = UPDGRP_PEER(updgrp); | |
1250 | filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)]; | |
1251 | ||
1252 | if (PREFIX_LIST_OUT_NAME(filter) | |
1253 | && (strcmp(name, PREFIX_LIST_OUT_NAME(filter)) == 0)) { | |
1254 | PREFIX_LIST_OUT(filter) = prefix_list_lookup( | |
1255 | UPDGRP_AFI(updgrp), PREFIX_LIST_OUT_NAME(filter)); | |
3dc339cd | 1256 | return true; |
d62a17ae | 1257 | } |
3dc339cd | 1258 | return false; |
3f9c7369 DS |
1259 | } |
1260 | ||
3dc339cd DA |
1261 | static bool updgrp_filter_list_update(struct update_group *updgrp, |
1262 | const char *name) | |
3f9c7369 | 1263 | { |
d62a17ae | 1264 | struct peer *peer; |
1265 | struct bgp_filter *filter; | |
1266 | ||
1267 | peer = UPDGRP_PEER(updgrp); | |
1268 | filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)]; | |
1269 | ||
1270 | if (FILTER_LIST_OUT_NAME(filter) | |
1271 | && (strcmp(name, FILTER_LIST_OUT_NAME(filter)) == 0)) { | |
1272 | FILTER_LIST_OUT(filter) = | |
1273 | as_list_lookup(FILTER_LIST_OUT_NAME(filter)); | |
3dc339cd | 1274 | return true; |
d62a17ae | 1275 | } |
3dc339cd | 1276 | return false; |
3f9c7369 DS |
1277 | } |
1278 | ||
3dc339cd DA |
1279 | static bool updgrp_distribute_list_update(struct update_group *updgrp, |
1280 | const char *name) | |
3f9c7369 | 1281 | { |
d62a17ae | 1282 | struct peer *peer; |
1283 | struct bgp_filter *filter; | |
1284 | ||
1285 | peer = UPDGRP_PEER(updgrp); | |
1286 | filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)]; | |
1287 | ||
1288 | if (DISTRIBUTE_OUT_NAME(filter) | |
1289 | && (strcmp(name, DISTRIBUTE_OUT_NAME(filter)) == 0)) { | |
1290 | DISTRIBUTE_OUT(filter) = access_list_lookup( | |
1291 | UPDGRP_AFI(updgrp), DISTRIBUTE_OUT_NAME(filter)); | |
3dc339cd | 1292 | return true; |
d62a17ae | 1293 | } |
3dc339cd | 1294 | return false; |
3f9c7369 DS |
1295 | } |
1296 | ||
d62a17ae | 1297 | static int updgrp_route_map_update(struct update_group *updgrp, |
1298 | const char *name, int *def_rmap_changed) | |
3f9c7369 | 1299 | { |
d62a17ae | 1300 | struct peer *peer; |
1301 | struct bgp_filter *filter; | |
1302 | int changed = 0; | |
1303 | afi_t afi; | |
1304 | safi_t safi; | |
1305 | ||
1306 | peer = UPDGRP_PEER(updgrp); | |
1307 | afi = UPDGRP_AFI(updgrp); | |
1308 | safi = UPDGRP_SAFI(updgrp); | |
1309 | filter = &peer->filter[afi][safi]; | |
1310 | ||
1311 | if (ROUTE_MAP_OUT_NAME(filter) | |
1312 | && (strcmp(name, ROUTE_MAP_OUT_NAME(filter)) == 0)) { | |
1313 | ROUTE_MAP_OUT(filter) = route_map_lookup_by_name(name); | |
1314 | ||
1315 | changed = 1; | |
1316 | } | |
1317 | ||
1318 | if (UNSUPPRESS_MAP_NAME(filter) | |
1319 | && (strcmp(name, UNSUPPRESS_MAP_NAME(filter)) == 0)) { | |
1320 | UNSUPPRESS_MAP(filter) = route_map_lookup_by_name(name); | |
1321 | changed = 1; | |
1322 | } | |
1323 | ||
1324 | /* process default-originate route-map */ | |
1325 | if (peer->default_rmap[afi][safi].name | |
1326 | && (strcmp(name, peer->default_rmap[afi][safi].name) == 0)) { | |
1327 | peer->default_rmap[afi][safi].map = | |
1328 | route_map_lookup_by_name(name); | |
1329 | if (def_rmap_changed) | |
1330 | *def_rmap_changed = 1; | |
1331 | } | |
1332 | return changed; | |
3f9c7369 DS |
1333 | } |
1334 | ||
1335 | /* | |
1336 | * hash iteration callback function to process a policy change for an | |
1337 | * update group. Check if the changed policy matches the updgrp's | |
1338 | * outbound route-map or unsuppress-map or default-originate map or | |
1339 | * filter-list or prefix-list or distribute-list. | |
1340 | * Trigger update generation accordingly. | |
1341 | */ | |
d62a17ae | 1342 | static int updgrp_policy_update_walkcb(struct update_group *updgrp, void *arg) |
3f9c7369 | 1343 | { |
d62a17ae | 1344 | struct updwalk_context *ctx = arg; |
1345 | struct update_subgroup *subgrp; | |
1346 | int changed = 0; | |
1347 | int def_changed = 0; | |
1348 | ||
1349 | if (!updgrp || !ctx || !ctx->policy_name) | |
1350 | return UPDWALK_CONTINUE; | |
1351 | ||
1352 | switch (ctx->policy_type) { | |
1353 | case BGP_POLICY_ROUTE_MAP: | |
1354 | changed = updgrp_route_map_update(updgrp, ctx->policy_name, | |
1355 | &def_changed); | |
1356 | break; | |
1357 | case BGP_POLICY_FILTER_LIST: | |
1358 | changed = updgrp_filter_list_update(updgrp, ctx->policy_name); | |
1359 | break; | |
1360 | case BGP_POLICY_PREFIX_LIST: | |
1361 | changed = updgrp_prefix_list_update(updgrp, ctx->policy_name); | |
1362 | break; | |
1363 | case BGP_POLICY_DISTRIBUTE_LIST: | |
1364 | changed = | |
1365 | updgrp_distribute_list_update(updgrp, ctx->policy_name); | |
1366 | break; | |
1367 | default: | |
1368 | break; | |
1369 | } | |
1370 | ||
1371 | /* If not doing route update, return after updating "config" */ | |
1372 | if (!ctx->policy_route_update) | |
1373 | return UPDWALK_CONTINUE; | |
1374 | ||
1375 | /* If nothing has changed, return after updating "config" */ | |
1376 | if (!changed && !def_changed) | |
1377 | return UPDWALK_CONTINUE; | |
1378 | ||
1379 | /* | |
1380 | * If something has changed, at the beginning of a route-map | |
1381 | * modification | |
1382 | * event, mark each subgroup's needs-refresh bit. For one, it signals to | |
1383 | * whoever that the subgroup needs a refresh. Second, it prevents | |
1384 | * premature | |
1385 | * merge of this subgroup with another before a complete (outbound) | |
1386 | * refresh. | |
1387 | */ | |
1388 | if (ctx->policy_event_start_flag) { | |
a2addae8 | 1389 | UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { |
d62a17ae | 1390 | update_subgroup_set_needs_refresh(subgrp, 1); |
1391 | } | |
1392 | return UPDWALK_CONTINUE; | |
1393 | } | |
1394 | ||
a2addae8 | 1395 | UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { |
2adac256 DA |
1396 | /* Avoid supressing duplicate routes later |
1397 | * when processing in subgroup_announce_table(). | |
1398 | */ | |
1399 | SET_FLAG(subgrp->sflags, SUBGRP_STATUS_FORCE_UPDATES); | |
1400 | ||
d62a17ae | 1401 | if (changed) { |
1402 | if (bgp_debug_update(NULL, NULL, updgrp, 0)) | |
1403 | zlog_debug( | |
6cde4b45 | 1404 | "u%" PRIu64 ":s%" PRIu64" announcing routes upon policy %s (type %d) change", |
d62a17ae | 1405 | updgrp->id, subgrp->id, |
1406 | ctx->policy_name, ctx->policy_type); | |
1407 | subgroup_announce_route(subgrp); | |
1408 | } | |
1409 | if (def_changed) { | |
1410 | if (bgp_debug_update(NULL, NULL, updgrp, 0)) | |
1411 | zlog_debug( | |
6cde4b45 | 1412 | "u%" PRIu64 ":s%" PRIu64" announcing default upon default routemap %s change", |
d62a17ae | 1413 | updgrp->id, subgrp->id, |
1414 | ctx->policy_name); | |
1415 | subgroup_default_originate(subgrp, 0); | |
1416 | } | |
1417 | update_subgroup_set_needs_refresh(subgrp, 0); | |
1418 | } | |
1419 | return UPDWALK_CONTINUE; | |
3f9c7369 DS |
1420 | } |
1421 | ||
e3b78da8 | 1422 | static int update_group_walkcb(struct hash_bucket *bucket, void *arg) |
3f9c7369 | 1423 | { |
e3b78da8 | 1424 | struct update_group *updgrp = bucket->data; |
d62a17ae | 1425 | struct updwalk_context *wctx = arg; |
1426 | int ret = (*wctx->cb)(updgrp, wctx->context); | |
1427 | return ret; | |
3f9c7369 DS |
1428 | } |
1429 | ||
d62a17ae | 1430 | static int update_group_periodic_merge_walkcb(struct update_group *updgrp, |
1431 | void *arg) | |
3f9c7369 | 1432 | { |
d62a17ae | 1433 | struct update_subgroup *subgrp; |
1434 | struct update_subgroup *tmp_subgrp; | |
1435 | const char *reason = arg; | |
3f9c7369 | 1436 | |
a2addae8 RW |
1437 | UPDGRP_FOREACH_SUBGRP_SAFE (updgrp, subgrp, tmp_subgrp) |
1438 | update_subgroup_check_merge(subgrp, reason); | |
d62a17ae | 1439 | return UPDWALK_CONTINUE; |
3f9c7369 DS |
1440 | } |
1441 | ||
1442 | /******************** | |
1443 | * PUBLIC FUNCTIONS | |
1444 | ********************/ | |
1445 | ||
1446 | /* | |
1447 | * trigger function when a policy (route-map/filter-list/prefix-list/ | |
1448 | * distribute-list etc.) content changes. Go through all the | |
1449 | * update groups and process the change. | |
1450 | * | |
1451 | * bgp: the bgp instance | |
1452 | * ptype: the type of policy that got modified, see bgpd.h | |
1453 | * pname: name of the policy | |
1454 | * route_update: flag to control if an automatic update generation should | |
1455 | * occur | |
1456 | * start_event: flag that indicates if it's the beginning of the change. | |
1457 | * Esp. when the user is changing the content interactively | |
1458 | * over multiple statements. Useful to set dirty flag on | |
1459 | * update groups. | |
1460 | */ | |
d62a17ae | 1461 | void update_group_policy_update(struct bgp *bgp, bgp_policy_type_e ptype, |
1462 | const char *pname, int route_update, | |
1463 | int start_event) | |
3f9c7369 | 1464 | { |
d62a17ae | 1465 | struct updwalk_context ctx; |
3f9c7369 | 1466 | |
d62a17ae | 1467 | memset(&ctx, 0, sizeof(ctx)); |
1468 | ctx.policy_type = ptype; | |
1469 | ctx.policy_name = pname; | |
1470 | ctx.policy_route_update = route_update; | |
1471 | ctx.policy_event_start_flag = start_event; | |
1472 | ctx.flags = 0; | |
3f9c7369 | 1473 | |
d62a17ae | 1474 | update_group_walk(bgp, updgrp_policy_update_walkcb, &ctx); |
3f9c7369 DS |
1475 | } |
1476 | ||
1477 | /* | |
1478 | * update_subgroup_split_peer | |
1479 | * | |
1480 | * Ensure that the given peer is in a subgroup of its own in the | |
1481 | * specified update group. | |
1482 | */ | |
d62a17ae | 1483 | void update_subgroup_split_peer(struct peer_af *paf, |
1484 | struct update_group *updgrp) | |
3f9c7369 | 1485 | { |
d62a17ae | 1486 | struct update_subgroup *old_subgrp, *subgrp; |
1487 | uint64_t old_id; | |
1488 | ||
1489 | ||
1490 | old_subgrp = paf->subgroup; | |
1491 | ||
1492 | if (!updgrp) | |
1493 | updgrp = old_subgrp->update_group; | |
1494 | ||
1495 | /* | |
1496 | * If the peer is alone in its subgroup, reuse the existing | |
1497 | * subgroup. | |
1498 | */ | |
1499 | if (old_subgrp->peer_count == 1) { | |
1500 | if (updgrp == old_subgrp->update_group) | |
1501 | return; | |
1502 | ||
1503 | subgrp = old_subgrp; | |
1504 | old_id = old_subgrp->update_group->id; | |
1505 | ||
1506 | if (bgp_debug_peer_updout_enabled(paf->peer->host)) { | |
1507 | UPDGRP_PEER_DBG_DIS(old_subgrp->update_group); | |
1508 | } | |
1509 | ||
1510 | update_group_remove_subgroup(old_subgrp->update_group, | |
1511 | old_subgrp); | |
1512 | update_group_add_subgroup(updgrp, subgrp); | |
1513 | ||
1514 | if (bgp_debug_peer_updout_enabled(paf->peer->host)) { | |
1515 | UPDGRP_PEER_DBG_EN(updgrp); | |
1516 | } | |
1517 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) | |
6cde4b45 | 1518 | zlog_debug("u%" PRIu64 ":s%" PRIu64" peer %s moved to u%" PRIu64 ":s%" PRIu64, |
d62a17ae | 1519 | old_id, subgrp->id, paf->peer->host, |
1520 | updgrp->id, subgrp->id); | |
1521 | ||
1522 | /* | |
1523 | * The state of the subgroup (adj_out, advs, packet queue etc) | |
1524 | * is consistent internally, but may not be identical to other | |
1525 | * subgroups in the new update group even if the version number | |
1526 | * matches up. Make sure a full refresh is done before the | |
1527 | * subgroup is merged with another. | |
1528 | */ | |
1529 | update_subgroup_set_needs_refresh(subgrp, 1); | |
1530 | ||
1531 | SUBGRP_INCR_STAT(subgrp, updgrp_switch_events); | |
1532 | return; | |
1533 | } | |
3f9c7369 | 1534 | |
d62a17ae | 1535 | /* |
1536 | * Create a new subgroup under the specified update group, and copy | |
1537 | * over relevant state to it. | |
1538 | */ | |
1539 | subgrp = update_subgroup_create(updgrp); | |
1540 | update_subgroup_inherit_info(subgrp, old_subgrp); | |
1541 | ||
1542 | subgrp->split_from.update_group_id = old_subgrp->update_group->id; | |
1543 | subgrp->split_from.subgroup_id = old_subgrp->id; | |
1544 | ||
1545 | /* | |
1546 | * Copy out relevant state from the old subgroup. | |
1547 | */ | |
1548 | update_subgroup_copy_adj_out(paf->subgroup, subgrp); | |
1549 | update_subgroup_copy_packets(subgrp, paf->next_pkt_to_send); | |
1550 | ||
1551 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) | |
6cde4b45 | 1552 | zlog_debug("u%" PRIu64 ":s%" PRIu64" peer %s split and moved into u%" PRIu64":s%" PRIu64, |
d62a17ae | 1553 | paf->subgroup->update_group->id, paf->subgroup->id, |
1554 | paf->peer->host, updgrp->id, subgrp->id); | |
1555 | ||
1556 | SUBGRP_INCR_STAT(paf->subgroup, split_events); | |
1557 | ||
1558 | /* | |
1559 | * Since queued advs were left behind, this new subgroup needs a | |
1560 | * refresh. | |
1561 | */ | |
1562 | update_subgroup_set_needs_refresh(subgrp, 1); | |
1563 | ||
1564 | /* | |
1565 | * Remove peer from old subgroup, and add it to the new one. | |
1566 | */ | |
1567 | update_subgroup_remove_peer(paf->subgroup, paf); | |
1568 | ||
1569 | update_subgroup_add_peer(subgrp, paf, 1); | |
3f9c7369 DS |
1570 | } |
1571 | ||
d62a17ae | 1572 | void update_bgp_group_init(struct bgp *bgp) |
3f9c7369 | 1573 | { |
d62a17ae | 1574 | int afid; |
3f9c7369 | 1575 | |
a2addae8 | 1576 | AF_FOREACH (afid) |
3f65c5b1 | 1577 | bgp->update_groups[afid] = |
996c9314 | 1578 | hash_create(updgrp_hash_key_make, updgrp_hash_cmp, |
3f65c5b1 | 1579 | "BGP Update Group Hash"); |
3f9c7369 DS |
1580 | } |
1581 | ||
d62a17ae | 1582 | void update_bgp_group_free(struct bgp *bgp) |
3d68677e | 1583 | { |
d62a17ae | 1584 | int afid; |
1585 | ||
a2addae8 | 1586 | AF_FOREACH (afid) { |
d62a17ae | 1587 | if (bgp->update_groups[afid]) { |
1588 | hash_free(bgp->update_groups[afid]); | |
1589 | bgp->update_groups[afid] = NULL; | |
1590 | } | |
1591 | } | |
3d68677e DS |
1592 | } |
1593 | ||
d62a17ae | 1594 | void update_group_show(struct bgp *bgp, afi_t afi, safi_t safi, struct vty *vty, |
1595 | uint64_t subgrp_id) | |
3f9c7369 | 1596 | { |
d62a17ae | 1597 | struct updwalk_context ctx; |
1598 | memset(&ctx, 0, sizeof(ctx)); | |
1599 | ctx.vty = vty; | |
1600 | ctx.subgrp_id = subgrp_id; | |
8fe8a7f6 | 1601 | |
d62a17ae | 1602 | update_group_af_walk(bgp, afi, safi, update_group_show_walkcb, &ctx); |
3f9c7369 DS |
1603 | } |
1604 | ||
1605 | /* | |
1606 | * update_group_show_stats | |
1607 | * | |
1608 | * Show global statistics about update groups. | |
1609 | */ | |
d62a17ae | 1610 | void update_group_show_stats(struct bgp *bgp, struct vty *vty) |
3f9c7369 | 1611 | { |
d62a17ae | 1612 | vty_out(vty, "Update groups created: %u\n", |
1613 | bgp->update_group_stats.updgrps_created); | |
1614 | vty_out(vty, "Update groups deleted: %u\n", | |
1615 | bgp->update_group_stats.updgrps_deleted); | |
1616 | vty_out(vty, "Update subgroups created: %u\n", | |
1617 | bgp->update_group_stats.subgrps_created); | |
1618 | vty_out(vty, "Update subgroups deleted: %u\n", | |
1619 | bgp->update_group_stats.subgrps_deleted); | |
1620 | vty_out(vty, "Join events: %u\n", bgp->update_group_stats.join_events); | |
1621 | vty_out(vty, "Prune events: %u\n", | |
1622 | bgp->update_group_stats.prune_events); | |
1623 | vty_out(vty, "Merge events: %u\n", | |
1624 | bgp->update_group_stats.merge_events); | |
1625 | vty_out(vty, "Split events: %u\n", | |
1626 | bgp->update_group_stats.split_events); | |
1627 | vty_out(vty, "Update group switch events: %u\n", | |
1628 | bgp->update_group_stats.updgrp_switch_events); | |
1629 | vty_out(vty, "Peer route refreshes combined: %u\n", | |
1630 | bgp->update_group_stats.peer_refreshes_combined); | |
1631 | vty_out(vty, "Merge checks triggered: %u\n", | |
1632 | bgp->update_group_stats.merge_checks_triggered); | |
3f9c7369 DS |
1633 | } |
1634 | ||
1635 | /* | |
1636 | * update_group_adjust_peer | |
1637 | */ | |
d62a17ae | 1638 | void update_group_adjust_peer(struct peer_af *paf) |
3f9c7369 | 1639 | { |
d62a17ae | 1640 | struct update_group *updgrp; |
1641 | struct update_subgroup *subgrp, *old_subgrp; | |
1642 | struct peer *peer; | |
1643 | ||
1644 | if (!paf) | |
1645 | return; | |
1646 | ||
1647 | peer = PAF_PEER(paf); | |
1648 | if (!peer_established(peer)) { | |
1649 | return; | |
1650 | } | |
1651 | ||
1652 | if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE)) { | |
1653 | return; | |
3f9c7369 | 1654 | } |
3f9c7369 | 1655 | |
d62a17ae | 1656 | if (!peer->afc_nego[paf->afi][paf->safi]) { |
1657 | return; | |
1658 | } | |
3f9c7369 | 1659 | |
d62a17ae | 1660 | updgrp = update_group_find(paf); |
1661 | if (!updgrp) { | |
1662 | updgrp = update_group_create(paf); | |
1663 | if (!updgrp) { | |
e50f7cfd | 1664 | flog_err(EC_BGP_UPDGRP_CREATE, |
1c50c1c0 QY |
1665 | "couldn't create update group for peer %s", |
1666 | paf->peer->host); | |
d62a17ae | 1667 | return; |
1668 | } | |
1669 | } | |
3f9c7369 | 1670 | |
d62a17ae | 1671 | old_subgrp = paf->subgroup; |
3f9c7369 | 1672 | |
d62a17ae | 1673 | if (old_subgrp) { |
3f9c7369 | 1674 | |
d62a17ae | 1675 | /* |
1676 | * If the update group of the peer is unchanged, the peer can | |
1677 | * stay | |
1678 | * in its existing subgroup and we're done. | |
1679 | */ | |
1680 | if (old_subgrp->update_group == updgrp) | |
1681 | return; | |
1682 | ||
1683 | /* | |
1684 | * The peer is switching between update groups. Put it in its | |
1685 | * own subgroup under the new update group. | |
1686 | */ | |
1687 | update_subgroup_split_peer(paf, updgrp); | |
1688 | return; | |
1689 | } | |
1690 | ||
1691 | subgrp = update_subgroup_find(updgrp, paf); | |
1692 | if (!subgrp) { | |
1693 | subgrp = update_subgroup_create(updgrp); | |
1694 | if (!subgrp) | |
1695 | return; | |
1696 | } | |
3f9c7369 | 1697 | |
d62a17ae | 1698 | update_subgroup_add_peer(subgrp, paf, 1); |
1699 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) | |
1700 | zlog_debug("u%" PRIu64 ":s%" PRIu64 " add peer %s", updgrp->id, | |
1701 | subgrp->id, paf->peer->host); | |
1702 | ||
1703 | return; | |
3f9c7369 DS |
1704 | } |
1705 | ||
d62a17ae | 1706 | int update_group_adjust_soloness(struct peer *peer, int set) |
3f9c7369 | 1707 | { |
d62a17ae | 1708 | struct peer_group *group; |
1709 | struct listnode *node, *nnode; | |
1710 | ||
1711 | if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { | |
1712 | peer_lonesoul_or_not(peer, set); | |
feb17238 | 1713 | if (peer_established(peer)) |
d62a17ae | 1714 | bgp_announce_route_all(peer); |
1715 | } else { | |
1716 | group = peer->group; | |
1717 | for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) { | |
1718 | peer_lonesoul_or_not(peer, set); | |
feb17238 | 1719 | if (peer_established(peer)) |
d62a17ae | 1720 | bgp_announce_route_all(peer); |
1721 | } | |
1722 | } | |
1723 | return 0; | |
3f9c7369 DS |
1724 | } |
1725 | ||
1726 | /* | |
1727 | * update_subgroup_rib | |
1728 | */ | |
d62a17ae | 1729 | struct bgp_table *update_subgroup_rib(struct update_subgroup *subgrp) |
3f9c7369 | 1730 | { |
d62a17ae | 1731 | struct bgp *bgp; |
3f9c7369 | 1732 | |
d62a17ae | 1733 | bgp = SUBGRP_INST(subgrp); |
1734 | if (!bgp) | |
1735 | return NULL; | |
3f9c7369 | 1736 | |
d62a17ae | 1737 | return bgp->rib[SUBGRP_AFI(subgrp)][SUBGRP_SAFI(subgrp)]; |
3f9c7369 DS |
1738 | } |
1739 | ||
d62a17ae | 1740 | void update_group_af_walk(struct bgp *bgp, afi_t afi, safi_t safi, |
1741 | updgrp_walkcb cb, void *ctx) | |
3f9c7369 | 1742 | { |
d62a17ae | 1743 | struct updwalk_context wctx; |
1744 | int afid; | |
3f9c7369 | 1745 | |
d62a17ae | 1746 | if (!bgp) |
1747 | return; | |
1748 | afid = afindex(afi, safi); | |
1749 | if (afid >= BGP_AF_MAX) | |
1750 | return; | |
3f9c7369 | 1751 | |
d62a17ae | 1752 | memset(&wctx, 0, sizeof(wctx)); |
1753 | wctx.cb = cb; | |
1754 | wctx.context = ctx; | |
0de4848d | 1755 | |
d62a17ae | 1756 | if (bgp->update_groups[afid]) |
1757 | hash_walk(bgp->update_groups[afid], update_group_walkcb, &wctx); | |
3f9c7369 DS |
1758 | } |
1759 | ||
d62a17ae | 1760 | void update_group_walk(struct bgp *bgp, updgrp_walkcb cb, void *ctx) |
3f9c7369 | 1761 | { |
d62a17ae | 1762 | afi_t afi; |
1763 | safi_t safi; | |
3f9c7369 | 1764 | |
a2addae8 | 1765 | FOREACH_AFI_SAFI (afi, safi) { |
d62a17ae | 1766 | update_group_af_walk(bgp, afi, safi, cb, ctx); |
1767 | } | |
3f9c7369 DS |
1768 | } |
1769 | ||
d62a17ae | 1770 | void update_group_periodic_merge(struct bgp *bgp) |
3f9c7369 | 1771 | { |
d62a17ae | 1772 | char reason[] = "periodic merge check"; |
3f9c7369 | 1773 | |
d62a17ae | 1774 | update_group_walk(bgp, update_group_periodic_merge_walkcb, |
1775 | (void *)reason); | |
3f9c7369 DS |
1776 | } |
1777 | ||
0de4848d DS |
1778 | static int |
1779 | update_group_default_originate_route_map_walkcb(struct update_group *updgrp, | |
d62a17ae | 1780 | void *arg) |
0de4848d | 1781 | { |
d62a17ae | 1782 | struct update_subgroup *subgrp; |
1783 | struct peer *peer; | |
1784 | afi_t afi; | |
1785 | safi_t safi; | |
1786 | ||
a2addae8 | 1787 | UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { |
d62a17ae | 1788 | peer = SUBGRP_PEER(subgrp); |
1789 | afi = SUBGRP_AFI(subgrp); | |
1790 | safi = SUBGRP_SAFI(subgrp); | |
1791 | ||
1792 | if (peer->default_rmap[afi][safi].name) { | |
1793 | subgroup_default_originate(subgrp, 0); | |
1794 | } | |
1795 | } | |
1796 | ||
1797 | return UPDWALK_CONTINUE; | |
0de4848d DS |
1798 | } |
1799 | ||
d62a17ae | 1800 | int update_group_refresh_default_originate_route_map(struct thread *thread) |
0de4848d | 1801 | { |
d62a17ae | 1802 | struct bgp *bgp; |
1803 | char reason[] = "refresh default-originate route-map"; | |
0de4848d | 1804 | |
d62a17ae | 1805 | bgp = THREAD_ARG(thread); |
1806 | update_group_walk(bgp, update_group_default_originate_route_map_walkcb, | |
1807 | reason); | |
50478845 | 1808 | thread_cancel(&bgp->t_rmap_def_originate_eval); |
d62a17ae | 1809 | bgp_unlock(bgp); |
ffd0c037 | 1810 | |
95f7965d | 1811 | return 0; |
0de4848d DS |
1812 | } |
1813 | ||
3f9c7369 DS |
1814 | /* |
1815 | * peer_af_announce_route | |
1816 | * | |
1817 | * Refreshes routes out to a peer_af immediately. | |
1818 | * | |
2951a7a4 | 1819 | * If the combine parameter is true, then this function will try to |
3f9c7369 DS |
1820 | * gather other peers in the subgroup for which a route announcement |
1821 | * is pending and efficently announce routes to all of them. | |
1822 | * | |
1823 | * For now, the 'combine' option has an effect only if all peers in | |
1824 | * the subgroup have a route announcement pending. | |
1825 | */ | |
d62a17ae | 1826 | void peer_af_announce_route(struct peer_af *paf, int combine) |
3f9c7369 | 1827 | { |
d62a17ae | 1828 | struct update_subgroup *subgrp; |
1829 | struct peer_af *cur_paf; | |
1830 | int all_pending; | |
1831 | ||
1832 | subgrp = paf->subgroup; | |
1833 | all_pending = 0; | |
1834 | ||
1835 | if (combine) { | |
1836 | /* | |
1837 | * If there are other peers in the old subgroup that also need | |
1838 | * routes to be announced, pull them into the peer's new | |
1839 | * subgroup. | |
1840 | * Combine route announcement with other peers if possible. | |
1841 | * | |
1842 | * For now, we combine only if all peers in the subgroup have an | |
1843 | * announcement pending. | |
1844 | */ | |
1845 | all_pending = 1; | |
1846 | ||
a2addae8 | 1847 | SUBGRP_FOREACH_PEER (subgrp, cur_paf) { |
d62a17ae | 1848 | if (cur_paf == paf) |
1849 | continue; | |
1850 | ||
1851 | if (cur_paf->t_announce_route) | |
1852 | continue; | |
1853 | ||
1854 | all_pending = 0; | |
1855 | break; | |
1856 | } | |
1857 | } | |
1858 | /* | |
1859 | * Announce to the peer alone if we were not asked to combine peers, | |
1860 | * or if some peers don't have a route annoucement pending. | |
1861 | */ | |
1862 | if (!combine || !all_pending) { | |
1863 | update_subgroup_split_peer(paf, NULL); | |
7bfdba54 | 1864 | subgrp = paf->subgroup; |
d62a17ae | 1865 | |
7bfdba54 | 1866 | assert(subgrp && subgrp->update_group); |
d62a17ae | 1867 | if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0)) |
6cde4b45 | 1868 | zlog_debug("u%" PRIu64 ":s%" PRIu64" %s announcing routes", |
d62a17ae | 1869 | subgrp->update_group->id, subgrp->id, |
1870 | paf->peer->host); | |
1871 | ||
1872 | subgroup_announce_route(paf->subgroup); | |
1873 | return; | |
3f9c7369 | 1874 | } |
3f9c7369 | 1875 | |
d62a17ae | 1876 | /* |
1877 | * We will announce routes the entire subgroup. | |
1878 | * | |
1879 | * First stop refresh timers on all the other peers. | |
1880 | */ | |
a2addae8 | 1881 | SUBGRP_FOREACH_PEER (subgrp, cur_paf) { |
d62a17ae | 1882 | if (cur_paf == paf) |
1883 | continue; | |
3f9c7369 | 1884 | |
d62a17ae | 1885 | bgp_stop_announce_route_timer(cur_paf); |
1886 | } | |
3f9c7369 | 1887 | |
d62a17ae | 1888 | if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0)) |
6cde4b45 | 1889 | zlog_debug("u%" PRIu64 ":s%" PRIu64" announcing routes to %s, combined into %d peers", |
d62a17ae | 1890 | subgrp->update_group->id, subgrp->id, |
1891 | paf->peer->host, subgrp->peer_count); | |
3f9c7369 | 1892 | |
d62a17ae | 1893 | subgroup_announce_route(subgrp); |
3f9c7369 | 1894 | |
d62a17ae | 1895 | SUBGRP_INCR_STAT_BY(subgrp, peer_refreshes_combined, |
1896 | subgrp->peer_count - 1); | |
3f9c7369 DS |
1897 | } |
1898 | ||
2fc102e1 QY |
1899 | void subgroup_trigger_write(struct update_subgroup *subgrp) |
1900 | { | |
1901 | struct peer_af *paf; | |
1902 | ||
becedef6 QY |
1903 | /* |
1904 | * For each peer in the subgroup, schedule a job to pull packets from | |
1905 | * the subgroup output queue into their own output queue. This action | |
1906 | * will trigger a write job on the I/O thread. | |
1907 | */ | |
996c9314 | 1908 | SUBGRP_FOREACH_PEER (subgrp, paf) |
feb17238 | 1909 | if (peer_established(paf->peer)) |
996c9314 LB |
1910 | thread_add_timer_msec( |
1911 | bm->master, bgp_generate_updgrp_packets, | |
1912 | paf->peer, 0, | |
1913 | &paf->peer->t_generate_updgrp_packets); | |
2fc102e1 QY |
1914 | } |
1915 | ||
d62a17ae | 1916 | int update_group_clear_update_dbg(struct update_group *updgrp, void *arg) |
3f9c7369 | 1917 | { |
d62a17ae | 1918 | UPDGRP_PEER_DBG_OFF(updgrp); |
1919 | return UPDWALK_CONTINUE; | |
3f9c7369 | 1920 | } |
adbac85e | 1921 | |
06370dac | 1922 | /* Return true if we should addpath encode NLRI to this peer */ |
d62a17ae | 1923 | int bgp_addpath_encode_tx(struct peer *peer, afi_t afi, safi_t safi) |
adbac85e | 1924 | { |
d62a17ae | 1925 | return (CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ADDPATH_AF_TX_ADV) |
1926 | && CHECK_FLAG(peer->af_cap[afi][safi], | |
1927 | PEER_CAP_ADDPATH_AF_RX_RCV)); | |
adbac85e | 1928 | } |