]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_updgrp_packet.c
Merge pull request #7217 from AnuradhaKaruppiah/fix-es-del-regression
[mirror_frr.git] / bgpd / bgp_updgrp_packet.c
1 /**
2 * bgp_updgrp_packet.c: BGP update group packet handling routines
3 *
4 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
5 *
6 * @author Avneesh Sachdev <avneesh@sproute.net>
7 * @author Rajesh Varadarajan <rajesh@sproute.net>
8 * @author Pradosh Mohapatra <pradosh@sproute.net>
9 *
10 * This file is part of GNU Zebra.
11 *
12 * GNU Zebra is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2, or (at your option) any
15 * later version.
16 *
17 * GNU Zebra is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; see the file COPYING; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27 #include <zebra.h>
28
29 #include "prefix.h"
30 #include "thread.h"
31 #include "buffer.h"
32 #include "stream.h"
33 #include "command.h"
34 #include "sockunion.h"
35 #include "network.h"
36 #include "memory.h"
37 #include "filter.h"
38 #include "routemap.h"
39 #include "log.h"
40 #include "plist.h"
41 #include "linklist.h"
42 #include "workqueue.h"
43 #include "hash.h"
44 #include "queue.h"
45 #include "mpls.h"
46
47 #include "bgpd/bgpd.h"
48 #include "bgpd/bgp_debug.h"
49 #include "bgpd/bgp_errors.h"
50 #include "bgpd/bgp_fsm.h"
51 #include "bgpd/bgp_route.h"
52 #include "bgpd/bgp_packet.h"
53 #include "bgpd/bgp_advertise.h"
54 #include "bgpd/bgp_updgrp.h"
55 #include "bgpd/bgp_nexthop.h"
56 #include "bgpd/bgp_nht.h"
57 #include "bgpd/bgp_mplsvpn.h"
58 #include "bgpd/bgp_label.h"
59 #include "bgpd/bgp_addpath.h"
60
61 /********************
62 * PRIVATE FUNCTIONS
63 ********************/
64
65 /********************
66 * PUBLIC FUNCTIONS
67 ********************/
68 struct bpacket *bpacket_alloc(void)
69 {
70 struct bpacket *pkt;
71
72 pkt = XCALLOC(MTYPE_BGP_PACKET, sizeof(struct bpacket));
73
74 return pkt;
75 }
76
77 void bpacket_free(struct bpacket *pkt)
78 {
79 if (pkt->buffer)
80 stream_free(pkt->buffer);
81 pkt->buffer = NULL;
82 XFREE(MTYPE_BGP_PACKET, pkt);
83 }
84
85 void bpacket_queue_init(struct bpacket_queue *q)
86 {
87 TAILQ_INIT(&(q->pkts));
88 }
89
90 /*
91 * bpacket_queue_sanity_check
92 */
93 void bpacket_queue_sanity_check(struct bpacket_queue __attribute__((__unused__))
94 * q)
95 {
96 #if 0
97 struct bpacket *pkt;
98
99 pkt = bpacket_queue_last (q);
100 assert (pkt);
101 assert (!pkt->buffer);
102
103 /*
104 * Make sure the count of packets is correct.
105 */
106 int num_pkts = 0;
107
108 pkt = bpacket_queue_first (q);
109 while (pkt)
110 {
111 num_pkts++;
112
113 if (num_pkts > q->curr_count)
114 assert (0);
115
116 pkt = TAILQ_NEXT (pkt, pkt_train);
117 }
118
119 assert (num_pkts == q->curr_count);
120 #endif
121 }
122
123 /*
124 * bpacket_queue_add_packet
125 *
126 * Internal function of bpacket_queue - and adds a
127 * packet entry to the end of the list.
128 *
129 * Users of bpacket_queue should use bpacket_queue_add instead.
130 */
131 static void bpacket_queue_add_packet(struct bpacket_queue *q,
132 struct bpacket *pkt)
133 {
134 struct bpacket *last_pkt;
135
136 if (TAILQ_EMPTY(&(q->pkts)))
137 TAILQ_INSERT_TAIL(&(q->pkts), pkt, pkt_train);
138 else {
139 last_pkt = bpacket_queue_last(q);
140 TAILQ_INSERT_AFTER(&(q->pkts), last_pkt, pkt, pkt_train);
141 }
142 q->curr_count++;
143 if (q->hwm_count < q->curr_count)
144 q->hwm_count = q->curr_count;
145 }
146
147 /*
148 * Adds a packet to the bpacket_queue.
149 *
150 * The stream passed is consumed by this function. So, the caller should
151 * not free or use the stream after
152 * invoking this function.
153 */
154 struct bpacket *bpacket_queue_add(struct bpacket_queue *q, struct stream *s,
155 struct bpacket_attr_vec_arr *vecarrp)
156 {
157 struct bpacket *pkt;
158 struct bpacket *last_pkt;
159
160
161 pkt = bpacket_alloc();
162 if (TAILQ_EMPTY(&(q->pkts))) {
163 pkt->ver = 1;
164 pkt->buffer = s;
165 if (vecarrp)
166 memcpy(&pkt->arr, vecarrp,
167 sizeof(struct bpacket_attr_vec_arr));
168 else
169 bpacket_attr_vec_arr_reset(&pkt->arr);
170 bpacket_queue_add_packet(q, pkt);
171 bpacket_queue_sanity_check(q);
172 return pkt;
173 }
174
175 /*
176 * Fill in the new information into the current sentinel and create a
177 * new sentinel.
178 */
179 bpacket_queue_sanity_check(q);
180 last_pkt = bpacket_queue_last(q);
181 assert(last_pkt->buffer == NULL);
182 last_pkt->buffer = s;
183 if (vecarrp)
184 memcpy(&last_pkt->arr, vecarrp,
185 sizeof(struct bpacket_attr_vec_arr));
186 else
187 bpacket_attr_vec_arr_reset(&last_pkt->arr);
188
189 pkt->ver = last_pkt->ver;
190 pkt->ver++;
191 bpacket_queue_add_packet(q, pkt);
192
193 bpacket_queue_sanity_check(q);
194 return last_pkt;
195 }
196
197 struct bpacket *bpacket_queue_first(struct bpacket_queue *q)
198 {
199 return (TAILQ_FIRST(&(q->pkts)));
200 }
201
202 struct bpacket *bpacket_queue_last(struct bpacket_queue *q)
203 {
204 return TAILQ_LAST(&(q->pkts), pkt_queue);
205 }
206
207 struct bpacket *bpacket_queue_remove(struct bpacket_queue *q)
208 {
209 struct bpacket *first;
210
211 first = bpacket_queue_first(q);
212 if (first) {
213 TAILQ_REMOVE(&(q->pkts), first, pkt_train);
214 q->curr_count--;
215 }
216 return first;
217 }
218
219 unsigned int bpacket_queue_length(struct bpacket_queue *q)
220 {
221 return q->curr_count - 1;
222 }
223
224 unsigned int bpacket_queue_hwm_length(struct bpacket_queue *q)
225 {
226 return q->hwm_count - 1;
227 }
228
229 bool bpacket_queue_is_full(struct bgp *bgp, struct bpacket_queue *q)
230 {
231 if (q->curr_count >= bgp->default_subgroup_pkt_queue_max)
232 return true;
233 return false;
234 }
235
236 void bpacket_add_peer(struct bpacket *pkt, struct peer_af *paf)
237 {
238 if (!pkt || !paf)
239 return;
240
241 LIST_INSERT_HEAD(&(pkt->peers), paf, pkt_train);
242 paf->next_pkt_to_send = pkt;
243 }
244
245 /*
246 * bpacket_queue_cleanup
247 */
248 void bpacket_queue_cleanup(struct bpacket_queue *q)
249 {
250 struct bpacket *pkt;
251
252 while ((pkt = bpacket_queue_remove(q))) {
253 bpacket_free(pkt);
254 }
255 }
256
257 /*
258 * bpacket_queue_compact
259 *
260 * Delete packets that do not need to be transmitted to any peer from
261 * the queue.
262 *
263 * @return the number of packets deleted.
264 */
265 static int bpacket_queue_compact(struct bpacket_queue *q)
266 {
267 int num_deleted;
268 struct bpacket *pkt, *removed_pkt;
269
270 num_deleted = 0;
271
272 while (1) {
273 pkt = bpacket_queue_first(q);
274 if (!pkt)
275 break;
276
277 /*
278 * Don't delete the sentinel.
279 */
280 if (!pkt->buffer)
281 break;
282
283 if (!LIST_EMPTY(&(pkt->peers)))
284 break;
285
286 removed_pkt = bpacket_queue_remove(q);
287 assert(pkt == removed_pkt);
288 bpacket_free(removed_pkt);
289
290 num_deleted++;
291 }
292
293 bpacket_queue_sanity_check(q);
294 return num_deleted;
295 }
296
297 void bpacket_queue_advance_peer(struct peer_af *paf)
298 {
299 struct bpacket *pkt;
300 struct bpacket *old_pkt;
301
302 old_pkt = paf->next_pkt_to_send;
303 if (old_pkt->buffer == NULL)
304 /* Already at end of list */
305 return;
306
307 LIST_REMOVE(paf, pkt_train);
308 pkt = TAILQ_NEXT(old_pkt, pkt_train);
309 bpacket_add_peer(pkt, paf);
310
311 if (!bpacket_queue_compact(PAF_PKTQ(paf)))
312 return;
313
314 /*
315 * Deleted one or more packets. Check if we can now merge this
316 * peer's subgroup into another subgroup.
317 */
318 update_subgroup_check_merge(paf->subgroup, "advanced peer in queue");
319 }
320
321 /*
322 * bpacket_queue_remove_peer
323 *
324 * Remove the peer from the packet queue of the subgroup it belongs
325 * to.
326 */
327 void bpacket_queue_remove_peer(struct peer_af *paf)
328 {
329 struct bpacket_queue *q;
330
331 q = PAF_PKTQ(paf);
332 assert(q);
333
334 LIST_REMOVE(paf, pkt_train);
335 paf->next_pkt_to_send = NULL;
336
337 bpacket_queue_compact(q);
338 }
339
340 unsigned int bpacket_queue_virtual_length(struct peer_af *paf)
341 {
342 struct bpacket *pkt;
343 struct bpacket *last;
344 struct bpacket_queue *q;
345
346 pkt = paf->next_pkt_to_send;
347 if (!pkt || (pkt->buffer == NULL))
348 /* Already at end of list */
349 return 0;
350
351 q = PAF_PKTQ(paf);
352 if (TAILQ_EMPTY(&(q->pkts)))
353 return 0;
354
355 last = TAILQ_LAST(&(q->pkts), pkt_queue);
356 if (last->ver >= pkt->ver)
357 return last->ver - pkt->ver;
358
359 /* sequence # rolled over */
360 return (UINT_MAX - pkt->ver + 1) + last->ver;
361 }
362
363 /*
364 * Dump the bpacket queue
365 */
366 void bpacket_queue_show_vty(struct bpacket_queue *q, struct vty *vty)
367 {
368 struct bpacket *pkt;
369 struct peer_af *paf;
370
371 pkt = bpacket_queue_first(q);
372 while (pkt) {
373 vty_out(vty, " Packet %p ver %u buffer %p\n", pkt, pkt->ver,
374 pkt->buffer);
375
376 LIST_FOREACH (paf, &(pkt->peers), pkt_train) {
377 vty_out(vty, " - %s\n", paf->peer->host);
378 }
379 pkt = bpacket_next(pkt);
380 }
381 return;
382 }
383
384 struct stream *bpacket_reformat_for_peer(struct bpacket *pkt,
385 struct peer_af *paf)
386 {
387 struct stream *s = NULL;
388 bpacket_attr_vec *vec;
389 struct peer *peer;
390 char buf[BUFSIZ];
391 char buf2[BUFSIZ];
392 struct bgp_filter *filter;
393
394 s = stream_dup(pkt->buffer);
395 peer = PAF_PEER(paf);
396
397 vec = &pkt->arr.entries[BGP_ATTR_VEC_NH];
398
399 if (!CHECK_FLAG(vec->flags, BPKT_ATTRVEC_FLAGS_UPDATED))
400 return s;
401
402 uint8_t nhlen;
403 afi_t nhafi;
404 int route_map_sets_nh;
405
406 nhlen = stream_getc_from(s, vec->offset);
407 filter = &peer->filter[paf->afi][paf->safi];
408
409 if (peer_cap_enhe(peer, paf->afi, paf->safi))
410 nhafi = AFI_IP6;
411 else
412 nhafi = BGP_NEXTHOP_AFI_FROM_NHLEN(nhlen);
413
414 if (nhafi == AFI_IP) {
415 struct in_addr v4nh, *mod_v4nh;
416 int nh_modified = 0;
417 size_t offset_nh = vec->offset + 1;
418
419 route_map_sets_nh =
420 (CHECK_FLAG(vec->flags,
421 BPKT_ATTRVEC_FLAGS_RMAP_IPV4_NH_CHANGED)
422 || CHECK_FLAG(
423 vec->flags,
424 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS));
425
426 switch (nhlen) {
427 case BGP_ATTR_NHLEN_IPV4:
428 break;
429 case BGP_ATTR_NHLEN_VPNV4:
430 offset_nh += 8;
431 break;
432 default:
433 /* TODO: handle IPv6 nexthops */
434 flog_warn(
435 EC_BGP_INVALID_NEXTHOP_LENGTH,
436 "%s: %s: invalid MP nexthop length (AFI IP): %u",
437 __func__, peer->host, nhlen);
438 stream_free(s);
439 return NULL;
440 }
441
442 stream_get_from(&v4nh, s, offset_nh, IPV4_MAX_BYTELEN);
443 mod_v4nh = &v4nh;
444
445 /*
446 * If route-map has set the nexthop, that is normally
447 * used; if it is specified as peer-address, the peering
448 * address is picked up. Otherwise, if NH is unavailable
449 * from attribute, the peering addr is picked up; the
450 * "NH unavailable" case also covers next-hop-self and
451 * some other scenarios - see subgroup_announce_check().
452 * In all other cases, use the nexthop carried in the
453 * attribute unless it is EBGP non-multiaccess and there
454 * is no next-hop-unchanged setting or the peer is EBGP
455 * and the route-map that changed the next-hop value
456 * was applied inbound rather than outbound. Updates to
457 * an EBGP peer should only modify the next-hop if it
458 * was set in an outbound route-map to that peer.
459 * Note: It is assumed route-map cannot set the nexthop
460 * to an invalid value.
461 */
462 if (route_map_sets_nh
463 && ((peer->sort != BGP_PEER_EBGP)
464 || ROUTE_MAP_OUT(filter))) {
465 if (CHECK_FLAG(
466 vec->flags,
467 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS)) {
468 mod_v4nh = &peer->nexthop.v4;
469 nh_modified = 1;
470 }
471 } else if (v4nh.s_addr == INADDR_ANY) {
472 mod_v4nh = &peer->nexthop.v4;
473 nh_modified = 1;
474 } else if (peer->sort == BGP_PEER_EBGP
475 && (bgp_multiaccess_check_v4(v4nh, peer) == 0)
476 && !CHECK_FLAG(vec->flags,
477 BPKT_ATTRVEC_FLAGS_RMAP_NH_UNCHANGED)
478 && !peer_af_flag_check(
479 peer, paf->afi, paf->safi,
480 PEER_FLAG_NEXTHOP_UNCHANGED)) {
481 /* NOTE: not handling case where NH has new AFI
482 */
483 mod_v4nh = &peer->nexthop.v4;
484 nh_modified = 1;
485 }
486
487 if (nh_modified) /* allow for VPN RD */
488 stream_put_in_addr_at(s, offset_nh, mod_v4nh);
489
490 if (bgp_debug_update(peer, NULL, NULL, 0))
491 zlog_debug("u%" PRIu64 ":s%" PRIu64
492 " %s send UPDATE w/ nexthop %pI4%s",
493 PAF_SUBGRP(paf)->update_group->id,
494 PAF_SUBGRP(paf)->id, peer->host, mod_v4nh,
495 (nhlen == BGP_ATTR_NHLEN_VPNV4 ? " and RD"
496 : ""));
497 } else if (nhafi == AFI_IP6) {
498 struct in6_addr v6nhglobal, *mod_v6nhg;
499 struct in6_addr v6nhlocal, *mod_v6nhl;
500 int gnh_modified, lnh_modified;
501 size_t offset_nhglobal = vec->offset + 1;
502 size_t offset_nhlocal = vec->offset + 1;
503
504 gnh_modified = lnh_modified = 0;
505 mod_v6nhg = &v6nhglobal;
506 mod_v6nhl = &v6nhlocal;
507
508 route_map_sets_nh =
509 (CHECK_FLAG(vec->flags,
510 BPKT_ATTRVEC_FLAGS_RMAP_IPV6_GNH_CHANGED)
511 || CHECK_FLAG(
512 vec->flags,
513 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS));
514
515 /*
516 * The logic here is rather similar to that for IPv4, the
517 * additional work being to handle 1 or 2 nexthops.
518 * Also, 3rd party nexthop is not propagated for EBGP
519 * right now.
520 */
521 switch (nhlen) {
522 case BGP_ATTR_NHLEN_IPV6_GLOBAL:
523 break;
524 case BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL:
525 offset_nhlocal += IPV6_MAX_BYTELEN;
526 break;
527 case BGP_ATTR_NHLEN_VPNV6_GLOBAL:
528 offset_nhglobal += 8;
529 break;
530 case BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL:
531 offset_nhglobal += 8;
532 offset_nhlocal += 8 * 2 + IPV6_MAX_BYTELEN;
533 break;
534 default:
535 /* TODO: handle IPv4 nexthops */
536 flog_warn(
537 EC_BGP_INVALID_NEXTHOP_LENGTH,
538 "%s: %s: invalid MP nexthop length (AFI IP6): %u",
539 __func__, peer->host, nhlen);
540 stream_free(s);
541 return NULL;
542 }
543
544 stream_get_from(&v6nhglobal, s, offset_nhglobal,
545 IPV6_MAX_BYTELEN);
546
547 /*
548 * Updates to an EBGP peer should only modify the
549 * next-hop if it was set in an outbound route-map
550 * to that peer.
551 */
552 if (route_map_sets_nh
553 && ((peer->sort != BGP_PEER_EBGP)
554 || ROUTE_MAP_OUT(filter))) {
555 if (CHECK_FLAG(
556 vec->flags,
557 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS)) {
558 mod_v6nhg = &peer->nexthop.v6_global;
559 gnh_modified = 1;
560 }
561 } else if (IN6_IS_ADDR_UNSPECIFIED(&v6nhglobal)) {
562 mod_v6nhg = &peer->nexthop.v6_global;
563 gnh_modified = 1;
564 } else if ((peer->sort == BGP_PEER_EBGP)
565 && (!bgp_multiaccess_check_v6(v6nhglobal, peer))
566 && !CHECK_FLAG(vec->flags,
567 BPKT_ATTRVEC_FLAGS_RMAP_NH_UNCHANGED)
568 && !peer_af_flag_check(
569 peer, nhafi, paf->safi,
570 PEER_FLAG_NEXTHOP_UNCHANGED)) {
571 /* NOTE: not handling case where NH has new AFI
572 */
573 mod_v6nhg = &peer->nexthop.v6_global;
574 gnh_modified = 1;
575 }
576
577 if (IN6_IS_ADDR_UNSPECIFIED(mod_v6nhg)) {
578 if (peer->nexthop.v4.s_addr) {
579 ipv4_to_ipv4_mapped_ipv6(mod_v6nhg,
580 peer->nexthop.v4);
581 }
582 }
583
584 if (IS_MAPPED_IPV6(&peer->nexthop.v6_global)) {
585 mod_v6nhg = &peer->nexthop.v6_global;
586 gnh_modified = 1;
587 }
588
589 if (nhlen == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL
590 || nhlen == BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL) {
591 stream_get_from(&v6nhlocal, s, offset_nhlocal,
592 IPV6_MAX_BYTELEN);
593 if (IN6_IS_ADDR_UNSPECIFIED(&v6nhlocal)) {
594 mod_v6nhl = &peer->nexthop.v6_local;
595 lnh_modified = 1;
596 }
597 }
598
599 if (gnh_modified)
600 stream_put_in6_addr_at(s, offset_nhglobal, mod_v6nhg);
601 if (lnh_modified)
602 stream_put_in6_addr_at(s, offset_nhlocal, mod_v6nhl);
603
604 if (bgp_debug_update(peer, NULL, NULL, 0)) {
605 if (nhlen == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL
606 || nhlen == BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL)
607 zlog_debug(
608 "u%" PRIu64 ":s%" PRIu64" %s send UPDATE w/ mp_nexthops %s, %s%s",
609 PAF_SUBGRP(paf)->update_group->id,
610 PAF_SUBGRP(paf)->id, peer->host,
611 inet_ntop(AF_INET6, mod_v6nhg, buf,
612 BUFSIZ),
613 inet_ntop(AF_INET6, mod_v6nhl, buf2,
614 BUFSIZ),
615 (nhlen == BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL
616 ? " and RD"
617 : ""));
618 else
619 zlog_debug("u%" PRIu64 ":s%" PRIu64" %s send UPDATE w/ mp_nexthop %s%s",
620 PAF_SUBGRP(paf)->update_group->id,
621 PAF_SUBGRP(paf)->id, peer->host,
622 inet_ntop(AF_INET6, mod_v6nhg, buf,
623 BUFSIZ),
624 (nhlen == BGP_ATTR_NHLEN_VPNV6_GLOBAL
625 ? " and RD"
626 : ""));
627 }
628 } else if (paf->afi == AFI_L2VPN) {
629 struct in_addr v4nh, *mod_v4nh;
630 int nh_modified = 0;
631
632 stream_get_from(&v4nh, s, vec->offset + 1, 4);
633 mod_v4nh = &v4nh;
634
635 /* No route-map changes allowed for EVPN nexthops. */
636 if (v4nh.s_addr == INADDR_ANY) {
637 mod_v4nh = &peer->nexthop.v4;
638 nh_modified = 1;
639 }
640
641 if (nh_modified)
642 stream_put_in_addr_at(s, vec->offset + 1, mod_v4nh);
643
644 if (bgp_debug_update(peer, NULL, NULL, 0))
645 zlog_debug("u%" PRIu64 ":s%" PRIu64
646 " %s send UPDATE w/ nexthop %pI4",
647 PAF_SUBGRP(paf)->update_group->id,
648 PAF_SUBGRP(paf)->id, peer->host, mod_v4nh);
649 }
650
651 return s;
652 }
653
654 /*
655 * Update the vecarr offsets to go beyond 'pos' bytes, i.e. add 'pos'
656 * to each offset.
657 */
658 static void bpacket_attr_vec_arr_update(struct bpacket_attr_vec_arr *vecarr,
659 size_t pos)
660 {
661 int i;
662
663 if (!vecarr)
664 return;
665
666 for (i = 0; i < BGP_ATTR_VEC_MAX; i++)
667 vecarr->entries[i].offset += pos;
668 }
669
670 /*
671 * Return if there are packets to build for this subgroup.
672 */
673 bool subgroup_packets_to_build(struct update_subgroup *subgrp)
674 {
675 struct bgp_advertise *adv;
676
677 if (!subgrp)
678 return false;
679
680 adv = bgp_adv_fifo_first(&subgrp->sync->withdraw);
681 if (adv)
682 return true;
683
684 adv = bgp_adv_fifo_first(&subgrp->sync->update);
685 if (adv)
686 return true;
687
688 return false;
689 }
690
691 /* Make BGP update packet. */
692 struct bpacket *subgroup_update_packet(struct update_subgroup *subgrp)
693 {
694 struct bpacket_attr_vec_arr vecarr;
695 struct bpacket *pkt;
696 struct peer *peer;
697 struct stream *s;
698 struct stream *snlri;
699 struct stream *packet;
700 struct bgp_adj_out *adj;
701 struct bgp_advertise *adv;
702 struct bgp_dest *dest = NULL;
703 struct bgp_path_info *path = NULL;
704 bgp_size_t total_attr_len = 0;
705 unsigned long attrlen_pos = 0;
706 size_t mpattrlen_pos = 0;
707 size_t mpattr_pos = 0;
708 afi_t afi;
709 safi_t safi;
710 int space_remaining = 0;
711 int space_needed = 0;
712 char send_attr_str[BUFSIZ];
713 int send_attr_printed = 0;
714 int num_pfx = 0;
715 int addpath_encode = 0;
716 int addpath_overhead = 0;
717 uint32_t addpath_tx_id = 0;
718 struct prefix_rd *prd = NULL;
719 mpls_label_t label = MPLS_INVALID_LABEL, *label_pnt = NULL;
720 uint32_t num_labels = 0;
721
722 if (!subgrp)
723 return NULL;
724
725 if (bpacket_queue_is_full(SUBGRP_INST(subgrp), SUBGRP_PKTQ(subgrp)))
726 return NULL;
727
728 peer = SUBGRP_PEER(subgrp);
729 afi = SUBGRP_AFI(subgrp);
730 safi = SUBGRP_SAFI(subgrp);
731 s = subgrp->work;
732 stream_reset(s);
733 snlri = subgrp->scratch;
734 stream_reset(snlri);
735
736 bpacket_attr_vec_arr_reset(&vecarr);
737
738 addpath_encode = bgp_addpath_encode_tx(peer, afi, safi);
739 addpath_overhead = addpath_encode ? BGP_ADDPATH_ID_LEN : 0;
740
741 adv = bgp_adv_fifo_first(&subgrp->sync->update);
742 while (adv) {
743 const struct prefix *dest_p;
744
745 assert(adv->dest);
746 dest = adv->dest;
747 dest_p = bgp_dest_get_prefix(dest);
748 adj = adv->adj;
749 addpath_tx_id = adj->addpath_tx_id;
750 path = adv->pathi;
751
752 /* Check if we need to add a prefix to the packet if
753 * maximum-prefix-out is set for the peer.
754 */
755 if (CHECK_FLAG(peer->af_flags[afi][safi],
756 PEER_FLAG_MAX_PREFIX_OUT)
757 && subgrp->scount >= peer->pmax_out[afi][safi]) {
758 if (BGP_DEBUG(update, UPDATE_OUT)
759 || BGP_DEBUG(update, UPDATE_PREFIX)) {
760 zlog_debug(
761 "%s reached maximum prefix to be send (%u)",
762 peer->host, peer->pmax_out[afi][safi]);
763 }
764 goto next;
765 }
766
767 space_remaining = STREAM_CONCAT_REMAIN(s, snlri, STREAM_SIZE(s))
768 - BGP_MAX_PACKET_SIZE_OVERFLOW;
769 space_needed =
770 BGP_NLRI_LENGTH + addpath_overhead
771 + bgp_packet_mpattr_prefix_size(afi, safi, dest_p);
772
773 /* When remaining space can't include NLRI and it's length. */
774 if (space_remaining < space_needed)
775 break;
776
777 /* If packet is empty, set attribute. */
778 if (stream_empty(s)) {
779 struct peer *from = NULL;
780
781 if (path)
782 from = path->peer;
783
784 /* 1: Write the BGP message header - 16 bytes marker, 2
785 * bytes length,
786 * one byte message type.
787 */
788 bgp_packet_set_marker(s, BGP_MSG_UPDATE);
789
790 /* 2: withdrawn routes length */
791 stream_putw(s, 0);
792
793 /* 3: total attributes length - attrlen_pos stores the
794 * position */
795 attrlen_pos = stream_get_endp(s);
796 stream_putw(s, 0);
797
798 /* 4: if there is MP_REACH_NLRI attribute, that should
799 * be the first
800 * attribute, according to
801 * draft-ietf-idr-error-handling. Save the
802 * position.
803 */
804 mpattr_pos = stream_get_endp(s);
805
806 /* 5: Encode all the attributes, except MP_REACH_NLRI
807 * attr. */
808 total_attr_len = bgp_packet_attribute(
809 NULL, peer, s, adv->baa->attr, &vecarr, NULL,
810 afi, safi, from, NULL, NULL, 0, 0, 0);
811
812 space_remaining =
813 STREAM_CONCAT_REMAIN(s, snlri, STREAM_SIZE(s))
814 - BGP_MAX_PACKET_SIZE_OVERFLOW;
815 space_needed = BGP_NLRI_LENGTH + addpath_overhead
816 + bgp_packet_mpattr_prefix_size(
817 afi, safi, dest_p);
818
819 /* If the attributes alone do not leave any room for
820 * NLRI then
821 * return */
822 if (space_remaining < space_needed) {
823 flog_err(
824 EC_BGP_UPDGRP_ATTR_LEN,
825 "u%" PRIu64 ":s%" PRIu64" attributes too long, cannot send UPDATE",
826 subgrp->update_group->id, subgrp->id);
827
828 /* Flush the FIFO update queue */
829 while (adv)
830 adv = bgp_advertise_clean_subgroup(
831 subgrp, adj);
832 return NULL;
833 }
834
835 if (BGP_DEBUG(update, UPDATE_OUT)
836 || BGP_DEBUG(update, UPDATE_PREFIX)) {
837 memset(send_attr_str, 0, BUFSIZ);
838 send_attr_printed = 0;
839 bgp_dump_attr(adv->baa->attr, send_attr_str,
840 sizeof(send_attr_str));
841 }
842 }
843
844 if ((afi == AFI_IP && safi == SAFI_UNICAST)
845 && !peer_cap_enhe(peer, afi, safi))
846 stream_put_prefix_addpath(s, dest_p, addpath_encode,
847 addpath_tx_id);
848 else {
849 /* Encode the prefix in MP_REACH_NLRI attribute */
850 if (dest->pdest)
851 prd = (struct prefix_rd *)bgp_dest_get_prefix(
852 dest->pdest);
853
854 if (safi == SAFI_LABELED_UNICAST) {
855 label = bgp_adv_label(dest, path, peer, afi,
856 safi);
857 label_pnt = &label;
858 num_labels = 1;
859 } else if (path && path->extra) {
860 label_pnt = &path->extra->label[0];
861 num_labels = path->extra->num_labels;
862 }
863
864 if (stream_empty(snlri))
865 mpattrlen_pos = bgp_packet_mpattr_start(
866 snlri, peer, afi, safi, &vecarr,
867 adv->baa->attr);
868
869 bgp_packet_mpattr_prefix(snlri, afi, safi, dest_p, prd,
870 label_pnt, num_labels,
871 addpath_encode, addpath_tx_id,
872 adv->baa->attr);
873 }
874
875 num_pfx++;
876
877 if (bgp_debug_update(NULL, dest_p, subgrp->update_group, 0)) {
878 char pfx_buf[BGP_PRD_PATH_STRLEN];
879
880 if (!send_attr_printed) {
881 zlog_debug("u%" PRIu64 ":s%" PRIu64" send UPDATE w/ attr: %s",
882 subgrp->update_group->id, subgrp->id,
883 send_attr_str);
884 if (!stream_empty(snlri)) {
885 iana_afi_t pkt_afi;
886 iana_safi_t pkt_safi;
887
888 pkt_afi = afi_int2iana(afi);
889 pkt_safi = safi_int2iana(safi);
890 zlog_debug(
891 "u%" PRIu64 ":s%" PRIu64" send MP_REACH for afi/safi %d/%d",
892 subgrp->update_group->id,
893 subgrp->id, pkt_afi, pkt_safi);
894 }
895
896 send_attr_printed = 1;
897 }
898
899 bgp_debug_rdpfxpath2str(afi, safi, prd, dest_p,
900 label_pnt, num_labels,
901 addpath_encode, addpath_tx_id,
902 pfx_buf, sizeof(pfx_buf));
903 zlog_debug("u%" PRIu64 ":s%" PRIu64 " send UPDATE %s",
904 subgrp->update_group->id, subgrp->id,
905 pfx_buf);
906 }
907
908 /* Synchnorize attribute. */
909 if (adj->attr)
910 bgp_attr_unintern(&adj->attr);
911 else
912 subgrp->scount++;
913
914 adj->attr = bgp_attr_intern(adv->baa->attr);
915 next:
916 adv = bgp_advertise_clean_subgroup(subgrp, adj);
917 }
918
919 if (!stream_empty(s)) {
920 if (!stream_empty(snlri)) {
921 bgp_packet_mpattr_end(snlri, mpattrlen_pos);
922 total_attr_len += stream_get_endp(snlri);
923 }
924
925 /* set the total attribute length correctly */
926 stream_putw_at(s, attrlen_pos, total_attr_len);
927
928 if (!stream_empty(snlri)) {
929 packet = stream_dupcat(s, snlri, mpattr_pos);
930 bpacket_attr_vec_arr_update(&vecarr, mpattr_pos);
931 } else
932 packet = stream_dup(s);
933 bgp_packet_set_size(packet);
934 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
935 zlog_debug("u%" PRIu64 ":s%" PRIu64" send UPDATE len %zd numpfx %d",
936 subgrp->update_group->id, subgrp->id,
937 (stream_get_endp(packet)
938 - stream_get_getp(packet)),
939 num_pfx);
940 pkt = bpacket_queue_add(SUBGRP_PKTQ(subgrp), packet, &vecarr);
941 stream_reset(s);
942 stream_reset(snlri);
943 return pkt;
944 }
945 return NULL;
946 }
947
948 /* Make BGP withdraw packet. */
949 /* For ipv4 unicast:
950 16-octet marker | 2-octet length | 1-octet type |
951 2-octet withdrawn route length | withdrawn prefixes | 2-octet attrlen (=0)
952 */
953 /* For other afi/safis:
954 16-octet marker | 2-octet length | 1-octet type |
955 2-octet withdrawn route length (=0) | 2-octet attrlen |
956 mp_unreach attr type | attr len | afi | safi | withdrawn prefixes
957 */
958 struct bpacket *subgroup_withdraw_packet(struct update_subgroup *subgrp)
959 {
960 struct bpacket *pkt;
961 struct stream *s;
962 struct bgp_adj_out *adj;
963 struct bgp_advertise *adv;
964 struct peer *peer;
965 struct bgp_dest *dest;
966 bgp_size_t unfeasible_len;
967 bgp_size_t total_attr_len;
968 size_t mp_start = 0;
969 size_t attrlen_pos = 0;
970 size_t mplen_pos = 0;
971 uint8_t first_time = 1;
972 afi_t afi;
973 safi_t safi;
974 int space_remaining = 0;
975 int space_needed = 0;
976 int num_pfx = 0;
977 int addpath_encode = 0;
978 int addpath_overhead = 0;
979 uint32_t addpath_tx_id = 0;
980 const struct prefix_rd *prd = NULL;
981
982
983 if (!subgrp)
984 return NULL;
985
986 if (bpacket_queue_is_full(SUBGRP_INST(subgrp), SUBGRP_PKTQ(subgrp)))
987 return NULL;
988
989 peer = SUBGRP_PEER(subgrp);
990 afi = SUBGRP_AFI(subgrp);
991 safi = SUBGRP_SAFI(subgrp);
992 s = subgrp->work;
993 stream_reset(s);
994 addpath_encode = bgp_addpath_encode_tx(peer, afi, safi);
995 addpath_overhead = addpath_encode ? BGP_ADDPATH_ID_LEN : 0;
996
997 while ((adv = bgp_adv_fifo_first(&subgrp->sync->withdraw)) != NULL) {
998 const struct prefix *dest_p;
999
1000 assert(adv->dest);
1001 adj = adv->adj;
1002 dest = adv->dest;
1003 dest_p = bgp_dest_get_prefix(dest);
1004 addpath_tx_id = adj->addpath_tx_id;
1005
1006 space_remaining =
1007 STREAM_WRITEABLE(s) - BGP_MAX_PACKET_SIZE_OVERFLOW;
1008 space_needed =
1009 BGP_NLRI_LENGTH + addpath_overhead + BGP_TOTAL_ATTR_LEN
1010 + bgp_packet_mpattr_prefix_size(afi, safi, dest_p);
1011
1012 if (space_remaining < space_needed)
1013 break;
1014
1015 if (stream_empty(s)) {
1016 bgp_packet_set_marker(s, BGP_MSG_UPDATE);
1017 stream_putw(s, 0); /* unfeasible routes length */
1018 } else
1019 first_time = 0;
1020
1021 if (afi == AFI_IP && safi == SAFI_UNICAST
1022 && !peer_cap_enhe(peer, afi, safi))
1023 stream_put_prefix_addpath(s, dest_p, addpath_encode,
1024 addpath_tx_id);
1025 else {
1026 if (dest->pdest)
1027 prd = (struct prefix_rd *)bgp_dest_get_prefix(
1028 dest->pdest);
1029
1030 /* If first time, format the MP_UNREACH header
1031 */
1032 if (first_time) {
1033 iana_afi_t pkt_afi;
1034 iana_safi_t pkt_safi;
1035
1036 pkt_afi = afi_int2iana(afi);
1037 pkt_safi = safi_int2iana(safi);
1038
1039 attrlen_pos = stream_get_endp(s);
1040 /* total attr length = 0 for now.
1041 * reevaluate later */
1042 stream_putw(s, 0);
1043 mp_start = stream_get_endp(s);
1044 mplen_pos = bgp_packet_mpunreach_start(s, afi,
1045 safi);
1046 if (bgp_debug_update(NULL, NULL,
1047 subgrp->update_group, 0))
1048 zlog_debug(
1049 "u%" PRIu64 ":s%" PRIu64" send MP_UNREACH for afi/safi %d/%d",
1050 subgrp->update_group->id,
1051 subgrp->id, pkt_afi, pkt_safi);
1052 }
1053
1054 bgp_packet_mpunreach_prefix(s, dest_p, afi, safi, prd,
1055 NULL, 0, addpath_encode,
1056 addpath_tx_id, NULL);
1057 }
1058
1059 num_pfx++;
1060
1061 if (bgp_debug_update(NULL, dest_p, subgrp->update_group, 0)) {
1062 char pfx_buf[BGP_PRD_PATH_STRLEN];
1063
1064 bgp_debug_rdpfxpath2str(afi, safi, prd, dest_p, NULL, 0,
1065 addpath_encode, addpath_tx_id,
1066 pfx_buf, sizeof(pfx_buf));
1067 zlog_debug("u%" PRIu64 ":s%" PRIu64" send UPDATE %s -- unreachable",
1068 subgrp->update_group->id, subgrp->id,
1069 pfx_buf);
1070 }
1071
1072 subgrp->scount--;
1073
1074 bgp_adj_out_remove_subgroup(dest, adj, subgrp);
1075 bgp_dest_unlock_node(dest);
1076 }
1077
1078 if (!stream_empty(s)) {
1079 if (afi == AFI_IP && safi == SAFI_UNICAST
1080 && !peer_cap_enhe(peer, afi, safi)) {
1081 unfeasible_len = stream_get_endp(s) - BGP_HEADER_SIZE
1082 - BGP_UNFEASIBLE_LEN;
1083 stream_putw_at(s, BGP_HEADER_SIZE, unfeasible_len);
1084 stream_putw(s, 0);
1085 } else {
1086 /* Set the mp_unreach attr's length */
1087 bgp_packet_mpunreach_end(s, mplen_pos);
1088
1089 /* Set total path attribute length. */
1090 total_attr_len = stream_get_endp(s) - mp_start;
1091 stream_putw_at(s, attrlen_pos, total_attr_len);
1092 }
1093 bgp_packet_set_size(s);
1094 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
1095 zlog_debug("u%" PRIu64 ":s%" PRIu64" send UPDATE (withdraw) len %zd numpfx %d",
1096 subgrp->update_group->id, subgrp->id,
1097 (stream_get_endp(s) - stream_get_getp(s)),
1098 num_pfx);
1099 pkt = bpacket_queue_add(SUBGRP_PKTQ(subgrp), stream_dup(s),
1100 NULL);
1101 stream_reset(s);
1102 return pkt;
1103 }
1104
1105 return NULL;
1106 }
1107
1108 void subgroup_default_update_packet(struct update_subgroup *subgrp,
1109 struct attr *attr, struct peer *from)
1110 {
1111 struct stream *s;
1112 struct peer *peer;
1113 struct prefix p;
1114 unsigned long pos;
1115 bgp_size_t total_attr_len;
1116 afi_t afi;
1117 safi_t safi;
1118 struct bpacket_attr_vec_arr vecarr;
1119 int addpath_encode = 0;
1120
1121 if (DISABLE_BGP_ANNOUNCE)
1122 return;
1123
1124 if (!subgrp)
1125 return;
1126
1127 peer = SUBGRP_PEER(subgrp);
1128 afi = SUBGRP_AFI(subgrp);
1129 safi = SUBGRP_SAFI(subgrp);
1130 bpacket_attr_vec_arr_reset(&vecarr);
1131 addpath_encode = bgp_addpath_encode_tx(peer, afi, safi);
1132
1133 memset(&p, 0, sizeof(p));
1134 p.family = afi2family(afi);
1135 p.prefixlen = 0;
1136
1137 /* Logging the attribute. */
1138 if (bgp_debug_update(NULL, &p, subgrp->update_group, 0)) {
1139 char attrstr[BUFSIZ];
1140 /* ' with addpath ID ' 17
1141 * max strlen of uint32 + 10
1142 * +/- (just in case) + 1
1143 * null terminator + 1
1144 * ============================ 29 */
1145 char tx_id_buf[30];
1146
1147 attrstr[0] = '\0';
1148
1149 bgp_dump_attr(attr, attrstr, sizeof(attrstr));
1150
1151 if (addpath_encode)
1152 snprintf(tx_id_buf, sizeof(tx_id_buf),
1153 " with addpath ID %u",
1154 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1155 else
1156 tx_id_buf[0] = '\0';
1157
1158 zlog_debug("u%" PRIu64 ":s%" PRIu64 " send UPDATE %pFX%s %s",
1159 (SUBGRP_UPDGRP(subgrp))->id, subgrp->id, &p,
1160 tx_id_buf, attrstr);
1161 }
1162
1163 s = stream_new(BGP_MAX_PACKET_SIZE);
1164
1165 /* Make BGP update packet. */
1166 bgp_packet_set_marker(s, BGP_MSG_UPDATE);
1167
1168 /* Unfeasible Routes Length. */
1169 stream_putw(s, 0);
1170
1171 /* Make place for total attribute length. */
1172 pos = stream_get_endp(s);
1173 stream_putw(s, 0);
1174 total_attr_len = bgp_packet_attribute(
1175 NULL, peer, s, attr, &vecarr, &p, afi, safi, from, NULL, NULL,
1176 0, addpath_encode, BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1177
1178 /* Set Total Path Attribute Length. */
1179 stream_putw_at(s, pos, total_attr_len);
1180
1181 /* NLRI set. */
1182 if (p.family == AF_INET && safi == SAFI_UNICAST
1183 && !peer_cap_enhe(peer, afi, safi))
1184 stream_put_prefix_addpath(
1185 s, &p, addpath_encode,
1186 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1187
1188 /* Set size. */
1189 bgp_packet_set_size(s);
1190
1191 (void)bpacket_queue_add(SUBGRP_PKTQ(subgrp), s, &vecarr);
1192 subgroup_trigger_write(subgrp);
1193 }
1194
1195 void subgroup_default_withdraw_packet(struct update_subgroup *subgrp)
1196 {
1197 struct peer *peer;
1198 struct stream *s;
1199 struct prefix p;
1200 unsigned long attrlen_pos = 0;
1201 unsigned long cp;
1202 bgp_size_t unfeasible_len;
1203 bgp_size_t total_attr_len = 0;
1204 size_t mp_start = 0;
1205 size_t mplen_pos = 0;
1206 afi_t afi;
1207 safi_t safi;
1208 int addpath_encode = 0;
1209
1210 if (DISABLE_BGP_ANNOUNCE)
1211 return;
1212
1213 peer = SUBGRP_PEER(subgrp);
1214 afi = SUBGRP_AFI(subgrp);
1215 safi = SUBGRP_SAFI(subgrp);
1216 addpath_encode = bgp_addpath_encode_tx(peer, afi, safi);
1217
1218 memset(&p, 0, sizeof(p));
1219 p.family = afi2family(afi);
1220 p.prefixlen = 0;
1221
1222 if (bgp_debug_update(NULL, &p, subgrp->update_group, 0)) {
1223 /* ' with addpath ID ' 17
1224 * max strlen of uint32 + 10
1225 * +/- (just in case) + 1
1226 * null terminator + 1
1227 * ============================ 29 */
1228 char tx_id_buf[30];
1229
1230 if (addpath_encode)
1231 snprintf(tx_id_buf, sizeof(tx_id_buf),
1232 " with addpath ID %u",
1233 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1234
1235 zlog_debug("u%" PRIu64 ":s%" PRIu64
1236 " send UPDATE %pFX%s -- unreachable",
1237 (SUBGRP_UPDGRP(subgrp))->id, subgrp->id, &p,
1238 tx_id_buf);
1239 }
1240
1241 s = stream_new(BGP_MAX_PACKET_SIZE);
1242
1243 /* Make BGP update packet. */
1244 bgp_packet_set_marker(s, BGP_MSG_UPDATE);
1245
1246 /* Unfeasible Routes Length. */;
1247 cp = stream_get_endp(s);
1248 stream_putw(s, 0);
1249
1250 /* Withdrawn Routes. */
1251 if (p.family == AF_INET && safi == SAFI_UNICAST
1252 && !peer_cap_enhe(peer, afi, safi)) {
1253 stream_put_prefix_addpath(
1254 s, &p, addpath_encode,
1255 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1256
1257 unfeasible_len = stream_get_endp(s) - cp - 2;
1258
1259 /* Set unfeasible len. */
1260 stream_putw_at(s, cp, unfeasible_len);
1261
1262 /* Set total path attribute length. */
1263 stream_putw(s, 0);
1264 } else {
1265 attrlen_pos = stream_get_endp(s);
1266 stream_putw(s, 0);
1267 mp_start = stream_get_endp(s);
1268 mplen_pos = bgp_packet_mpunreach_start(s, afi, safi);
1269 bgp_packet_mpunreach_prefix(
1270 s, &p, afi, safi, NULL, NULL, 0, addpath_encode,
1271 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE, NULL);
1272
1273 /* Set the mp_unreach attr's length */
1274 bgp_packet_mpunreach_end(s, mplen_pos);
1275
1276 /* Set total path attribute length. */
1277 total_attr_len = stream_get_endp(s) - mp_start;
1278 stream_putw_at(s, attrlen_pos, total_attr_len);
1279 }
1280
1281 bgp_packet_set_size(s);
1282
1283 (void)bpacket_queue_add(SUBGRP_PKTQ(subgrp), s, NULL);
1284 subgroup_trigger_write(subgrp);
1285 }
1286
1287 static void
1288 bpacket_vec_arr_inherit_attr_flags(struct bpacket_attr_vec_arr *vecarr,
1289 bpacket_attr_vec_type type,
1290 struct attr *attr)
1291 {
1292 if (CHECK_FLAG(attr->rmap_change_flags,
1293 BATTR_RMAP_NEXTHOP_PEER_ADDRESS))
1294 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1295 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS);
1296
1297 if (CHECK_FLAG(attr->rmap_change_flags, BATTR_REFLECTED))
1298 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1299 BPKT_ATTRVEC_FLAGS_REFLECTED);
1300
1301 if (CHECK_FLAG(attr->rmap_change_flags, BATTR_RMAP_NEXTHOP_UNCHANGED))
1302 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1303 BPKT_ATTRVEC_FLAGS_RMAP_NH_UNCHANGED);
1304
1305 if (CHECK_FLAG(attr->rmap_change_flags, BATTR_RMAP_IPV4_NHOP_CHANGED))
1306 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1307 BPKT_ATTRVEC_FLAGS_RMAP_IPV4_NH_CHANGED);
1308
1309 if (CHECK_FLAG(attr->rmap_change_flags,
1310 BATTR_RMAP_IPV6_GLOBAL_NHOP_CHANGED))
1311 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1312 BPKT_ATTRVEC_FLAGS_RMAP_IPV6_GNH_CHANGED);
1313
1314 if (CHECK_FLAG(attr->rmap_change_flags,
1315 BATTR_RMAP_IPV6_LL_NHOP_CHANGED))
1316 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1317 BPKT_ATTRVEC_FLAGS_RMAP_IPV6_LNH_CHANGED);
1318 }
1319
1320 /* Reset the Attributes vector array. The vector array is used to override
1321 * certain output parameters in the packet for a particular peer
1322 */
1323 void bpacket_attr_vec_arr_reset(struct bpacket_attr_vec_arr *vecarr)
1324 {
1325 int i;
1326
1327 if (!vecarr)
1328 return;
1329
1330 i = 0;
1331 while (i < BGP_ATTR_VEC_MAX) {
1332 vecarr->entries[i].flags = 0;
1333 vecarr->entries[i].offset = 0;
1334 i++;
1335 }
1336 }
1337
1338 /* Setup a particular node entry in the vecarr */
1339 void bpacket_attr_vec_arr_set_vec(struct bpacket_attr_vec_arr *vecarr,
1340 bpacket_attr_vec_type type, struct stream *s,
1341 struct attr *attr)
1342 {
1343 if (!vecarr)
1344 return;
1345 assert(type < BGP_ATTR_VEC_MAX);
1346
1347 SET_FLAG(vecarr->entries[type].flags, BPKT_ATTRVEC_FLAGS_UPDATED);
1348 vecarr->entries[type].offset = stream_get_endp(s);
1349 if (attr)
1350 bpacket_vec_arr_inherit_attr_flags(vecarr, type, attr);
1351 }