]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_updgrp_packet.c
Merge pull request #6403 from NaveenThanikachalam/FRR_RMAP_FIX
[mirror_frr.git] / bgpd / bgp_updgrp_packet.c
1 /**
2 * bgp_updgrp_packet.c: BGP update group packet handling routines
3 *
4 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
5 *
6 * @author Avneesh Sachdev <avneesh@sproute.net>
7 * @author Rajesh Varadarajan <rajesh@sproute.net>
8 * @author Pradosh Mohapatra <pradosh@sproute.net>
9 *
10 * This file is part of GNU Zebra.
11 *
12 * GNU Zebra is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2, or (at your option) any
15 * later version.
16 *
17 * GNU Zebra is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; see the file COPYING; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27 #include <zebra.h>
28
29 #include "prefix.h"
30 #include "thread.h"
31 #include "buffer.h"
32 #include "stream.h"
33 #include "command.h"
34 #include "sockunion.h"
35 #include "network.h"
36 #include "memory.h"
37 #include "filter.h"
38 #include "routemap.h"
39 #include "log.h"
40 #include "plist.h"
41 #include "linklist.h"
42 #include "workqueue.h"
43 #include "hash.h"
44 #include "queue.h"
45 #include "mpls.h"
46
47 #include "bgpd/bgpd.h"
48 #include "bgpd/bgp_debug.h"
49 #include "bgpd/bgp_errors.h"
50 #include "bgpd/bgp_fsm.h"
51 #include "bgpd/bgp_route.h"
52 #include "bgpd/bgp_packet.h"
53 #include "bgpd/bgp_advertise.h"
54 #include "bgpd/bgp_updgrp.h"
55 #include "bgpd/bgp_nexthop.h"
56 #include "bgpd/bgp_nht.h"
57 #include "bgpd/bgp_mplsvpn.h"
58 #include "bgpd/bgp_label.h"
59 #include "bgpd/bgp_addpath.h"
60
61 /********************
62 * PRIVATE FUNCTIONS
63 ********************/
64
65 /********************
66 * PUBLIC FUNCTIONS
67 ********************/
68 struct bpacket *bpacket_alloc(void)
69 {
70 struct bpacket *pkt;
71
72 pkt = XCALLOC(MTYPE_BGP_PACKET, sizeof(struct bpacket));
73
74 return pkt;
75 }
76
77 void bpacket_free(struct bpacket *pkt)
78 {
79 if (pkt->buffer)
80 stream_free(pkt->buffer);
81 pkt->buffer = NULL;
82 XFREE(MTYPE_BGP_PACKET, pkt);
83 }
84
85 void bpacket_queue_init(struct bpacket_queue *q)
86 {
87 TAILQ_INIT(&(q->pkts));
88 }
89
90 /*
91 * bpacket_queue_sanity_check
92 */
93 void bpacket_queue_sanity_check(struct bpacket_queue __attribute__((__unused__))
94 * q)
95 {
96 #if 0
97 struct bpacket *pkt;
98
99 pkt = bpacket_queue_last (q);
100 assert (pkt);
101 assert (!pkt->buffer);
102
103 /*
104 * Make sure the count of packets is correct.
105 */
106 int num_pkts = 0;
107
108 pkt = bpacket_queue_first (q);
109 while (pkt)
110 {
111 num_pkts++;
112
113 if (num_pkts > q->curr_count)
114 assert (0);
115
116 pkt = TAILQ_NEXT (pkt, pkt_train);
117 }
118
119 assert (num_pkts == q->curr_count);
120 #endif
121 }
122
123 /*
124 * bpacket_queue_add_packet
125 *
126 * Internal function of bpacket_queue - and adds a
127 * packet entry to the end of the list.
128 *
129 * Users of bpacket_queue should use bpacket_queue_add instead.
130 */
131 static void bpacket_queue_add_packet(struct bpacket_queue *q,
132 struct bpacket *pkt)
133 {
134 struct bpacket *last_pkt;
135
136 if (TAILQ_EMPTY(&(q->pkts)))
137 TAILQ_INSERT_TAIL(&(q->pkts), pkt, pkt_train);
138 else {
139 last_pkt = bpacket_queue_last(q);
140 TAILQ_INSERT_AFTER(&(q->pkts), last_pkt, pkt, pkt_train);
141 }
142 q->curr_count++;
143 if (q->hwm_count < q->curr_count)
144 q->hwm_count = q->curr_count;
145 }
146
147 /*
148 * Adds a packet to the bpacket_queue.
149 *
150 * The stream passed is consumed by this function. So, the caller should
151 * not free or use the stream after
152 * invoking this function.
153 */
154 struct bpacket *bpacket_queue_add(struct bpacket_queue *q, struct stream *s,
155 struct bpacket_attr_vec_arr *vecarrp)
156 {
157 struct bpacket *pkt;
158 struct bpacket *last_pkt;
159
160
161 pkt = bpacket_alloc();
162 if (TAILQ_EMPTY(&(q->pkts))) {
163 pkt->ver = 1;
164 pkt->buffer = s;
165 if (vecarrp)
166 memcpy(&pkt->arr, vecarrp,
167 sizeof(struct bpacket_attr_vec_arr));
168 else
169 bpacket_attr_vec_arr_reset(&pkt->arr);
170 bpacket_queue_add_packet(q, pkt);
171 bpacket_queue_sanity_check(q);
172 return pkt;
173 }
174
175 /*
176 * Fill in the new information into the current sentinel and create a
177 * new sentinel.
178 */
179 bpacket_queue_sanity_check(q);
180 last_pkt = bpacket_queue_last(q);
181 assert(last_pkt->buffer == NULL);
182 last_pkt->buffer = s;
183 if (vecarrp)
184 memcpy(&last_pkt->arr, vecarrp,
185 sizeof(struct bpacket_attr_vec_arr));
186 else
187 bpacket_attr_vec_arr_reset(&last_pkt->arr);
188
189 pkt->ver = last_pkt->ver;
190 pkt->ver++;
191 bpacket_queue_add_packet(q, pkt);
192
193 bpacket_queue_sanity_check(q);
194 return last_pkt;
195 }
196
197 struct bpacket *bpacket_queue_first(struct bpacket_queue *q)
198 {
199 return (TAILQ_FIRST(&(q->pkts)));
200 }
201
202 struct bpacket *bpacket_queue_last(struct bpacket_queue *q)
203 {
204 return TAILQ_LAST(&(q->pkts), pkt_queue);
205 }
206
207 struct bpacket *bpacket_queue_remove(struct bpacket_queue *q)
208 {
209 struct bpacket *first;
210
211 first = bpacket_queue_first(q);
212 if (first) {
213 TAILQ_REMOVE(&(q->pkts), first, pkt_train);
214 q->curr_count--;
215 }
216 return first;
217 }
218
219 unsigned int bpacket_queue_length(struct bpacket_queue *q)
220 {
221 return q->curr_count - 1;
222 }
223
224 unsigned int bpacket_queue_hwm_length(struct bpacket_queue *q)
225 {
226 return q->hwm_count - 1;
227 }
228
229 bool bpacket_queue_is_full(struct bgp *bgp, struct bpacket_queue *q)
230 {
231 if (q->curr_count >= bgp->default_subgroup_pkt_queue_max)
232 return true;
233 return false;
234 }
235
236 void bpacket_add_peer(struct bpacket *pkt, struct peer_af *paf)
237 {
238 if (!pkt || !paf)
239 return;
240
241 LIST_INSERT_HEAD(&(pkt->peers), paf, pkt_train);
242 paf->next_pkt_to_send = pkt;
243 }
244
245 /*
246 * bpacket_queue_cleanup
247 */
248 void bpacket_queue_cleanup(struct bpacket_queue *q)
249 {
250 struct bpacket *pkt;
251
252 while ((pkt = bpacket_queue_remove(q))) {
253 bpacket_free(pkt);
254 }
255 }
256
257 /*
258 * bpacket_queue_compact
259 *
260 * Delete packets that do not need to be transmitted to any peer from
261 * the queue.
262 *
263 * @return the number of packets deleted.
264 */
265 static int bpacket_queue_compact(struct bpacket_queue *q)
266 {
267 int num_deleted;
268 struct bpacket *pkt, *removed_pkt;
269
270 num_deleted = 0;
271
272 while (1) {
273 pkt = bpacket_queue_first(q);
274 if (!pkt)
275 break;
276
277 /*
278 * Don't delete the sentinel.
279 */
280 if (!pkt->buffer)
281 break;
282
283 if (!LIST_EMPTY(&(pkt->peers)))
284 break;
285
286 removed_pkt = bpacket_queue_remove(q);
287 assert(pkt == removed_pkt);
288 bpacket_free(removed_pkt);
289
290 num_deleted++;
291 }
292
293 bpacket_queue_sanity_check(q);
294 return num_deleted;
295 }
296
297 void bpacket_queue_advance_peer(struct peer_af *paf)
298 {
299 struct bpacket *pkt;
300 struct bpacket *old_pkt;
301
302 old_pkt = paf->next_pkt_to_send;
303 if (old_pkt->buffer == NULL)
304 /* Already at end of list */
305 return;
306
307 LIST_REMOVE(paf, pkt_train);
308 pkt = TAILQ_NEXT(old_pkt, pkt_train);
309 bpacket_add_peer(pkt, paf);
310
311 if (!bpacket_queue_compact(PAF_PKTQ(paf)))
312 return;
313
314 /*
315 * Deleted one or more packets. Check if we can now merge this
316 * peer's subgroup into another subgroup.
317 */
318 update_subgroup_check_merge(paf->subgroup, "advanced peer in queue");
319 }
320
321 /*
322 * bpacket_queue_remove_peer
323 *
324 * Remove the peer from the packet queue of the subgroup it belongs
325 * to.
326 */
327 void bpacket_queue_remove_peer(struct peer_af *paf)
328 {
329 struct bpacket_queue *q;
330
331 q = PAF_PKTQ(paf);
332 assert(q);
333
334 LIST_REMOVE(paf, pkt_train);
335 paf->next_pkt_to_send = NULL;
336
337 bpacket_queue_compact(q);
338 }
339
340 unsigned int bpacket_queue_virtual_length(struct peer_af *paf)
341 {
342 struct bpacket *pkt;
343 struct bpacket *last;
344 struct bpacket_queue *q;
345
346 pkt = paf->next_pkt_to_send;
347 if (!pkt || (pkt->buffer == NULL))
348 /* Already at end of list */
349 return 0;
350
351 q = PAF_PKTQ(paf);
352 if (TAILQ_EMPTY(&(q->pkts)))
353 return 0;
354
355 last = TAILQ_LAST(&(q->pkts), pkt_queue);
356 if (last->ver >= pkt->ver)
357 return last->ver - pkt->ver;
358
359 /* sequence # rolled over */
360 return (UINT_MAX - pkt->ver + 1) + last->ver;
361 }
362
363 /*
364 * Dump the bpacket queue
365 */
366 void bpacket_queue_show_vty(struct bpacket_queue *q, struct vty *vty)
367 {
368 struct bpacket *pkt;
369 struct peer_af *paf;
370
371 pkt = bpacket_queue_first(q);
372 while (pkt) {
373 vty_out(vty, " Packet %p ver %u buffer %p\n", pkt, pkt->ver,
374 pkt->buffer);
375
376 LIST_FOREACH (paf, &(pkt->peers), pkt_train) {
377 vty_out(vty, " - %s\n", paf->peer->host);
378 }
379 pkt = bpacket_next(pkt);
380 }
381 return;
382 }
383
384 struct stream *bpacket_reformat_for_peer(struct bpacket *pkt,
385 struct peer_af *paf)
386 {
387 struct stream *s = NULL;
388 bpacket_attr_vec *vec;
389 struct peer *peer;
390 char buf[BUFSIZ];
391 char buf2[BUFSIZ];
392 struct bgp_filter *filter;
393
394 s = stream_dup(pkt->buffer);
395 peer = PAF_PEER(paf);
396
397 vec = &pkt->arr.entries[BGP_ATTR_VEC_NH];
398
399 if (!CHECK_FLAG(vec->flags, BPKT_ATTRVEC_FLAGS_UPDATED))
400 return s;
401
402 uint8_t nhlen;
403 afi_t nhafi;
404 int route_map_sets_nh;
405
406 nhlen = stream_getc_from(s, vec->offset);
407 filter = &peer->filter[paf->afi][paf->safi];
408
409 if (peer_cap_enhe(peer, paf->afi, paf->safi))
410 nhafi = AFI_IP6;
411 else
412 nhafi = BGP_NEXTHOP_AFI_FROM_NHLEN(nhlen);
413
414 if (nhafi == AFI_IP) {
415 struct in_addr v4nh, *mod_v4nh;
416 int nh_modified = 0;
417 size_t offset_nh = vec->offset + 1;
418
419 route_map_sets_nh =
420 (CHECK_FLAG(vec->flags,
421 BPKT_ATTRVEC_FLAGS_RMAP_IPV4_NH_CHANGED)
422 || CHECK_FLAG(
423 vec->flags,
424 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS));
425
426 switch (nhlen) {
427 case BGP_ATTR_NHLEN_IPV4:
428 break;
429 case BGP_ATTR_NHLEN_VPNV4:
430 offset_nh += 8;
431 break;
432 default:
433 /* TODO: handle IPv6 nexthops */
434 flog_warn(
435 EC_BGP_INVALID_NEXTHOP_LENGTH,
436 "%s: %s: invalid MP nexthop length (AFI IP): %u",
437 __func__, peer->host, nhlen);
438 stream_free(s);
439 return NULL;
440 }
441
442 stream_get_from(&v4nh, s, offset_nh, IPV4_MAX_BYTELEN);
443 mod_v4nh = &v4nh;
444
445 /*
446 * If route-map has set the nexthop, that is normally
447 * used; if it is specified as peer-address, the peering
448 * address is picked up. Otherwise, if NH is unavailable
449 * from attribute, the peering addr is picked up; the
450 * "NH unavailable" case also covers next-hop-self and
451 * some other scenarios - see subgroup_announce_check().
452 * In all other cases, use the nexthop carried in the
453 * attribute unless it is EBGP non-multiaccess and there
454 * is no next-hop-unchanged setting or the peer is EBGP
455 * and the route-map that changed the next-hop value
456 * was applied inbound rather than outbound. Updates to
457 * an EBGP peer should only modify the next-hop if it
458 * was set in an outbound route-map to that peer.
459 * Note: It is assumed route-map cannot set the nexthop
460 * to an invalid value.
461 */
462 if (route_map_sets_nh
463 && ((peer->sort != BGP_PEER_EBGP)
464 || ROUTE_MAP_OUT(filter))) {
465 if (CHECK_FLAG(
466 vec->flags,
467 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS)) {
468 mod_v4nh = &peer->nexthop.v4;
469 nh_modified = 1;
470 }
471 } else if (v4nh.s_addr == INADDR_ANY) {
472 mod_v4nh = &peer->nexthop.v4;
473 nh_modified = 1;
474 } else if (peer->sort == BGP_PEER_EBGP
475 && (bgp_multiaccess_check_v4(v4nh, peer) == 0)
476 && !CHECK_FLAG(vec->flags,
477 BPKT_ATTRVEC_FLAGS_RMAP_NH_UNCHANGED)
478 && !peer_af_flag_check(
479 peer, paf->afi, paf->safi,
480 PEER_FLAG_NEXTHOP_UNCHANGED)) {
481 /* NOTE: not handling case where NH has new AFI
482 */
483 mod_v4nh = &peer->nexthop.v4;
484 nh_modified = 1;
485 }
486
487 if (nh_modified) /* allow for VPN RD */
488 stream_put_in_addr_at(s, offset_nh, mod_v4nh);
489
490 if (bgp_debug_update(peer, NULL, NULL, 0))
491 zlog_debug("u%" PRIu64 ":s%" PRIu64
492 " %s send UPDATE w/ nexthop %s%s",
493 PAF_SUBGRP(paf)->update_group->id,
494 PAF_SUBGRP(paf)->id, peer->host,
495 inet_ntoa(*mod_v4nh),
496 (nhlen == BGP_ATTR_NHLEN_VPNV4 ? " and RD"
497 : ""));
498 } else if (nhafi == AFI_IP6) {
499 struct in6_addr v6nhglobal, *mod_v6nhg;
500 struct in6_addr v6nhlocal, *mod_v6nhl;
501 int gnh_modified, lnh_modified;
502 size_t offset_nhglobal = vec->offset + 1;
503 size_t offset_nhlocal = vec->offset + 1;
504
505 gnh_modified = lnh_modified = 0;
506 mod_v6nhg = &v6nhglobal;
507 mod_v6nhl = &v6nhlocal;
508
509 route_map_sets_nh =
510 (CHECK_FLAG(vec->flags,
511 BPKT_ATTRVEC_FLAGS_RMAP_IPV6_GNH_CHANGED)
512 || CHECK_FLAG(
513 vec->flags,
514 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS));
515
516 /*
517 * The logic here is rather similar to that for IPv4, the
518 * additional work being to handle 1 or 2 nexthops.
519 * Also, 3rd party nexthop is not propagated for EBGP
520 * right now.
521 */
522 switch (nhlen) {
523 case BGP_ATTR_NHLEN_IPV6_GLOBAL:
524 break;
525 case BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL:
526 offset_nhlocal += IPV6_MAX_BYTELEN;
527 break;
528 case BGP_ATTR_NHLEN_VPNV6_GLOBAL:
529 offset_nhglobal += 8;
530 break;
531 case BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL:
532 offset_nhglobal += 8;
533 offset_nhlocal += 8 * 2 + IPV6_MAX_BYTELEN;
534 break;
535 default:
536 /* TODO: handle IPv4 nexthops */
537 flog_warn(
538 EC_BGP_INVALID_NEXTHOP_LENGTH,
539 "%s: %s: invalid MP nexthop length (AFI IP6): %u",
540 __func__, peer->host, nhlen);
541 stream_free(s);
542 return NULL;
543 }
544
545 stream_get_from(&v6nhglobal, s, offset_nhglobal,
546 IPV6_MAX_BYTELEN);
547
548 /*
549 * Updates to an EBGP peer should only modify the
550 * next-hop if it was set in an outbound route-map
551 * to that peer.
552 */
553 if (route_map_sets_nh
554 && ((peer->sort != BGP_PEER_EBGP)
555 || ROUTE_MAP_OUT(filter))) {
556 if (CHECK_FLAG(
557 vec->flags,
558 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS)) {
559 mod_v6nhg = &peer->nexthop.v6_global;
560 gnh_modified = 1;
561 }
562 } else if (IN6_IS_ADDR_UNSPECIFIED(&v6nhglobal)) {
563 mod_v6nhg = &peer->nexthop.v6_global;
564 gnh_modified = 1;
565 } else if ((peer->sort == BGP_PEER_EBGP)
566 && (!bgp_multiaccess_check_v6(v6nhglobal, peer))
567 && !CHECK_FLAG(vec->flags,
568 BPKT_ATTRVEC_FLAGS_RMAP_NH_UNCHANGED)
569 && !peer_af_flag_check(
570 peer, nhafi, paf->safi,
571 PEER_FLAG_NEXTHOP_UNCHANGED)) {
572 /* NOTE: not handling case where NH has new AFI
573 */
574 mod_v6nhg = &peer->nexthop.v6_global;
575 gnh_modified = 1;
576 }
577
578 if (nhlen == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL
579 || nhlen == BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL) {
580 stream_get_from(&v6nhlocal, s, offset_nhlocal,
581 IPV6_MAX_BYTELEN);
582 if (IN6_IS_ADDR_UNSPECIFIED(&v6nhlocal)) {
583 mod_v6nhl = &peer->nexthop.v6_local;
584 lnh_modified = 1;
585 }
586 }
587
588 if (gnh_modified)
589 stream_put_in6_addr_at(s, offset_nhglobal, mod_v6nhg);
590 if (lnh_modified)
591 stream_put_in6_addr_at(s, offset_nhlocal, mod_v6nhl);
592
593 if (bgp_debug_update(peer, NULL, NULL, 0)) {
594 if (nhlen == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL
595 || nhlen == BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL)
596 zlog_debug(
597 "u%" PRIu64 ":s%" PRIu64
598 " %s send UPDATE w/ mp_nexthops %s, %s%s",
599 PAF_SUBGRP(paf)->update_group->id,
600 PAF_SUBGRP(paf)->id, peer->host,
601 inet_ntop(AF_INET6, mod_v6nhg, buf,
602 BUFSIZ),
603 inet_ntop(AF_INET6, mod_v6nhl, buf2,
604 BUFSIZ),
605 (nhlen == BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL
606 ? " and RD"
607 : ""));
608 else
609 zlog_debug("u%" PRIu64 ":s%" PRIu64
610 " %s send UPDATE w/ mp_nexthop %s%s",
611 PAF_SUBGRP(paf)->update_group->id,
612 PAF_SUBGRP(paf)->id, peer->host,
613 inet_ntop(AF_INET6, mod_v6nhg, buf,
614 BUFSIZ),
615 (nhlen == BGP_ATTR_NHLEN_VPNV6_GLOBAL
616 ? " and RD"
617 : ""));
618 }
619 } else if (paf->afi == AFI_L2VPN) {
620 struct in_addr v4nh, *mod_v4nh;
621 int nh_modified = 0;
622
623 stream_get_from(&v4nh, s, vec->offset + 1, 4);
624 mod_v4nh = &v4nh;
625
626 /* No route-map changes allowed for EVPN nexthops. */
627 if (v4nh.s_addr == INADDR_ANY) {
628 mod_v4nh = &peer->nexthop.v4;
629 nh_modified = 1;
630 }
631
632 if (nh_modified)
633 stream_put_in_addr_at(s, vec->offset + 1, mod_v4nh);
634
635 if (bgp_debug_update(peer, NULL, NULL, 0))
636 zlog_debug("u%" PRIu64 ":s%" PRIu64
637 " %s send UPDATE w/ nexthop %s",
638 PAF_SUBGRP(paf)->update_group->id,
639 PAF_SUBGRP(paf)->id, peer->host,
640 inet_ntoa(*mod_v4nh));
641 }
642
643 return s;
644 }
645
646 /*
647 * Update the vecarr offsets to go beyond 'pos' bytes, i.e. add 'pos'
648 * to each offset.
649 */
650 static void bpacket_attr_vec_arr_update(struct bpacket_attr_vec_arr *vecarr,
651 size_t pos)
652 {
653 int i;
654
655 if (!vecarr)
656 return;
657
658 for (i = 0; i < BGP_ATTR_VEC_MAX; i++)
659 vecarr->entries[i].offset += pos;
660 }
661
662 /*
663 * Return if there are packets to build for this subgroup.
664 */
665 bool subgroup_packets_to_build(struct update_subgroup *subgrp)
666 {
667 struct bgp_advertise *adv;
668
669 if (!subgrp)
670 return false;
671
672 adv = bgp_adv_fifo_first(&subgrp->sync->withdraw);
673 if (adv)
674 return true;
675
676 adv = bgp_adv_fifo_first(&subgrp->sync->update);
677 if (adv)
678 return true;
679
680 return false;
681 }
682
683 /* Make BGP update packet. */
684 struct bpacket *subgroup_update_packet(struct update_subgroup *subgrp)
685 {
686 struct bpacket_attr_vec_arr vecarr;
687 struct bpacket *pkt;
688 struct peer *peer;
689 struct stream *s;
690 struct stream *snlri;
691 struct stream *packet;
692 struct bgp_adj_out *adj;
693 struct bgp_advertise *adv;
694 struct bgp_dest *dest = NULL;
695 struct bgp_path_info *path = NULL;
696 bgp_size_t total_attr_len = 0;
697 unsigned long attrlen_pos = 0;
698 size_t mpattrlen_pos = 0;
699 size_t mpattr_pos = 0;
700 afi_t afi;
701 safi_t safi;
702 int space_remaining = 0;
703 int space_needed = 0;
704 char send_attr_str[BUFSIZ];
705 int send_attr_printed = 0;
706 int num_pfx = 0;
707 int addpath_encode = 0;
708 int addpath_overhead = 0;
709 uint32_t addpath_tx_id = 0;
710 struct prefix_rd *prd = NULL;
711 mpls_label_t label = MPLS_INVALID_LABEL, *label_pnt = NULL;
712 uint32_t num_labels = 0;
713
714 if (!subgrp)
715 return NULL;
716
717 if (bpacket_queue_is_full(SUBGRP_INST(subgrp), SUBGRP_PKTQ(subgrp)))
718 return NULL;
719
720 peer = SUBGRP_PEER(subgrp);
721 afi = SUBGRP_AFI(subgrp);
722 safi = SUBGRP_SAFI(subgrp);
723 s = subgrp->work;
724 stream_reset(s);
725 snlri = subgrp->scratch;
726 stream_reset(snlri);
727
728 bpacket_attr_vec_arr_reset(&vecarr);
729
730 addpath_encode = bgp_addpath_encode_tx(peer, afi, safi);
731 addpath_overhead = addpath_encode ? BGP_ADDPATH_ID_LEN : 0;
732
733 adv = bgp_adv_fifo_first(&subgrp->sync->update);
734 while (adv) {
735 const struct prefix *dest_p;
736
737 assert(adv->dest);
738 dest = adv->dest;
739 dest_p = bgp_dest_get_prefix(dest);
740 adj = adv->adj;
741 addpath_tx_id = adj->addpath_tx_id;
742 path = adv->pathi;
743
744 /* Check if we need to add a prefix to the packet if
745 * maximum-prefix-out is set for the peer.
746 */
747 if (CHECK_FLAG(peer->af_flags[afi][safi],
748 PEER_FLAG_MAX_PREFIX_OUT)
749 && subgrp->scount >= peer->pmax_out[afi][safi]) {
750 if (BGP_DEBUG(update, UPDATE_OUT)
751 || BGP_DEBUG(update, UPDATE_PREFIX)) {
752 zlog_debug(
753 "%s reached maximum prefix to be send (%" PRIu32
754 ")",
755 peer->host, peer->pmax_out[afi][safi]);
756 }
757 goto next;
758 }
759
760 space_remaining = STREAM_CONCAT_REMAIN(s, snlri, STREAM_SIZE(s))
761 - BGP_MAX_PACKET_SIZE_OVERFLOW;
762 space_needed =
763 BGP_NLRI_LENGTH + addpath_overhead
764 + bgp_packet_mpattr_prefix_size(afi, safi, dest_p);
765
766 /* When remaining space can't include NLRI and it's length. */
767 if (space_remaining < space_needed)
768 break;
769
770 /* If packet is empty, set attribute. */
771 if (stream_empty(s)) {
772 struct peer *from = NULL;
773
774 if (path)
775 from = path->peer;
776
777 /* 1: Write the BGP message header - 16 bytes marker, 2
778 * bytes length,
779 * one byte message type.
780 */
781 bgp_packet_set_marker(s, BGP_MSG_UPDATE);
782
783 /* 2: withdrawn routes length */
784 stream_putw(s, 0);
785
786 /* 3: total attributes length - attrlen_pos stores the
787 * position */
788 attrlen_pos = stream_get_endp(s);
789 stream_putw(s, 0);
790
791 /* 4: if there is MP_REACH_NLRI attribute, that should
792 * be the first
793 * attribute, according to
794 * draft-ietf-idr-error-handling. Save the
795 * position.
796 */
797 mpattr_pos = stream_get_endp(s);
798
799 /* 5: Encode all the attributes, except MP_REACH_NLRI
800 * attr. */
801 total_attr_len = bgp_packet_attribute(
802 NULL, peer, s, adv->baa->attr, &vecarr, NULL,
803 afi, safi, from, NULL, NULL, 0, 0, 0);
804
805 space_remaining =
806 STREAM_CONCAT_REMAIN(s, snlri, STREAM_SIZE(s))
807 - BGP_MAX_PACKET_SIZE_OVERFLOW;
808 space_needed = BGP_NLRI_LENGTH + addpath_overhead
809 + bgp_packet_mpattr_prefix_size(
810 afi, safi, dest_p);
811
812 /* If the attributes alone do not leave any room for
813 * NLRI then
814 * return */
815 if (space_remaining < space_needed) {
816 flog_err(
817 EC_BGP_UPDGRP_ATTR_LEN,
818 "u%" PRIu64 ":s%" PRIu64
819 " attributes too long, cannot send UPDATE",
820 subgrp->update_group->id, subgrp->id);
821
822 /* Flush the FIFO update queue */
823 while (adv)
824 adv = bgp_advertise_clean_subgroup(
825 subgrp, adj);
826 return NULL;
827 }
828
829 if (BGP_DEBUG(update, UPDATE_OUT)
830 || BGP_DEBUG(update, UPDATE_PREFIX)) {
831 memset(send_attr_str, 0, BUFSIZ);
832 send_attr_printed = 0;
833 bgp_dump_attr(adv->baa->attr, send_attr_str,
834 BUFSIZ);
835 }
836 }
837
838 if ((afi == AFI_IP && safi == SAFI_UNICAST)
839 && !peer_cap_enhe(peer, afi, safi))
840 stream_put_prefix_addpath(s, dest_p, addpath_encode,
841 addpath_tx_id);
842 else {
843 /* Encode the prefix in MP_REACH_NLRI attribute */
844 if (dest->pdest)
845 prd = (struct prefix_rd *)bgp_dest_get_prefix(
846 dest->pdest);
847
848 if (safi == SAFI_LABELED_UNICAST) {
849 label = bgp_adv_label(dest, path, peer, afi,
850 safi);
851 label_pnt = &label;
852 num_labels = 1;
853 } else if (path && path->extra) {
854 label_pnt = &path->extra->label[0];
855 num_labels = path->extra->num_labels;
856 }
857
858 if (stream_empty(snlri))
859 mpattrlen_pos = bgp_packet_mpattr_start(
860 snlri, peer, afi, safi, &vecarr,
861 adv->baa->attr);
862
863 bgp_packet_mpattr_prefix(snlri, afi, safi, dest_p, prd,
864 label_pnt, num_labels,
865 addpath_encode, addpath_tx_id,
866 adv->baa->attr);
867 }
868
869 num_pfx++;
870
871 if (bgp_debug_update(NULL, dest_p, subgrp->update_group, 0)) {
872 char pfx_buf[BGP_PRD_PATH_STRLEN];
873
874 if (!send_attr_printed) {
875 zlog_debug("u%" PRIu64 ":s%" PRIu64
876 " send UPDATE w/ attr: %s",
877 subgrp->update_group->id, subgrp->id,
878 send_attr_str);
879 if (!stream_empty(snlri)) {
880 iana_afi_t pkt_afi;
881 iana_safi_t pkt_safi;
882
883 pkt_afi = afi_int2iana(afi);
884 pkt_safi = safi_int2iana(safi);
885 zlog_debug(
886 "u%" PRIu64 ":s%" PRIu64
887 " send MP_REACH for afi/safi %d/%d",
888 subgrp->update_group->id,
889 subgrp->id, pkt_afi, pkt_safi);
890 }
891
892 send_attr_printed = 1;
893 }
894
895 bgp_debug_rdpfxpath2str(afi, safi, prd, dest_p,
896 label_pnt, num_labels,
897 addpath_encode, addpath_tx_id,
898 pfx_buf, sizeof(pfx_buf));
899 zlog_debug("u%" PRIu64 ":s%" PRIu64 " send UPDATE %s",
900 subgrp->update_group->id, subgrp->id,
901 pfx_buf);
902 }
903
904 /* Synchnorize attribute. */
905 if (adj->attr)
906 bgp_attr_unintern(&adj->attr);
907 else
908 subgrp->scount++;
909
910 adj->attr = bgp_attr_intern(adv->baa->attr);
911 next:
912 adv = bgp_advertise_clean_subgroup(subgrp, adj);
913 }
914
915 if (!stream_empty(s)) {
916 if (!stream_empty(snlri)) {
917 bgp_packet_mpattr_end(snlri, mpattrlen_pos);
918 total_attr_len += stream_get_endp(snlri);
919 }
920
921 /* set the total attribute length correctly */
922 stream_putw_at(s, attrlen_pos, total_attr_len);
923
924 if (!stream_empty(snlri)) {
925 packet = stream_dupcat(s, snlri, mpattr_pos);
926 bpacket_attr_vec_arr_update(&vecarr, mpattr_pos);
927 } else
928 packet = stream_dup(s);
929 bgp_packet_set_size(packet);
930 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
931 zlog_debug("u%" PRIu64 ":s%" PRIu64
932 " send UPDATE len %zd numpfx %d",
933 subgrp->update_group->id, subgrp->id,
934 (stream_get_endp(packet)
935 - stream_get_getp(packet)),
936 num_pfx);
937 pkt = bpacket_queue_add(SUBGRP_PKTQ(subgrp), packet, &vecarr);
938 stream_reset(s);
939 stream_reset(snlri);
940 return pkt;
941 }
942 return NULL;
943 }
944
945 /* Make BGP withdraw packet. */
946 /* For ipv4 unicast:
947 16-octet marker | 2-octet length | 1-octet type |
948 2-octet withdrawn route length | withdrawn prefixes | 2-octet attrlen (=0)
949 */
950 /* For other afi/safis:
951 16-octet marker | 2-octet length | 1-octet type |
952 2-octet withdrawn route length (=0) | 2-octet attrlen |
953 mp_unreach attr type | attr len | afi | safi | withdrawn prefixes
954 */
955 struct bpacket *subgroup_withdraw_packet(struct update_subgroup *subgrp)
956 {
957 struct bpacket *pkt;
958 struct stream *s;
959 struct bgp_adj_out *adj;
960 struct bgp_advertise *adv;
961 struct peer *peer;
962 struct bgp_dest *dest;
963 bgp_size_t unfeasible_len;
964 bgp_size_t total_attr_len;
965 size_t mp_start = 0;
966 size_t attrlen_pos = 0;
967 size_t mplen_pos = 0;
968 uint8_t first_time = 1;
969 afi_t afi;
970 safi_t safi;
971 int space_remaining = 0;
972 int space_needed = 0;
973 int num_pfx = 0;
974 int addpath_encode = 0;
975 int addpath_overhead = 0;
976 uint32_t addpath_tx_id = 0;
977 const struct prefix_rd *prd = NULL;
978
979
980 if (!subgrp)
981 return NULL;
982
983 if (bpacket_queue_is_full(SUBGRP_INST(subgrp), SUBGRP_PKTQ(subgrp)))
984 return NULL;
985
986 peer = SUBGRP_PEER(subgrp);
987 afi = SUBGRP_AFI(subgrp);
988 safi = SUBGRP_SAFI(subgrp);
989 s = subgrp->work;
990 stream_reset(s);
991 addpath_encode = bgp_addpath_encode_tx(peer, afi, safi);
992 addpath_overhead = addpath_encode ? BGP_ADDPATH_ID_LEN : 0;
993
994 while ((adv = bgp_adv_fifo_first(&subgrp->sync->withdraw)) != NULL) {
995 const struct prefix *dest_p;
996
997 assert(adv->dest);
998 adj = adv->adj;
999 dest = adv->dest;
1000 dest_p = bgp_dest_get_prefix(dest);
1001 addpath_tx_id = adj->addpath_tx_id;
1002
1003 space_remaining =
1004 STREAM_WRITEABLE(s) - BGP_MAX_PACKET_SIZE_OVERFLOW;
1005 space_needed =
1006 BGP_NLRI_LENGTH + addpath_overhead + BGP_TOTAL_ATTR_LEN
1007 + bgp_packet_mpattr_prefix_size(afi, safi, dest_p);
1008
1009 if (space_remaining < space_needed)
1010 break;
1011
1012 if (stream_empty(s)) {
1013 bgp_packet_set_marker(s, BGP_MSG_UPDATE);
1014 stream_putw(s, 0); /* unfeasible routes length */
1015 } else
1016 first_time = 0;
1017
1018 if (afi == AFI_IP && safi == SAFI_UNICAST
1019 && !peer_cap_enhe(peer, afi, safi))
1020 stream_put_prefix_addpath(s, dest_p, addpath_encode,
1021 addpath_tx_id);
1022 else {
1023 if (dest->pdest)
1024 prd = (struct prefix_rd *)bgp_dest_get_prefix(
1025 dest->pdest);
1026
1027 /* If first time, format the MP_UNREACH header
1028 */
1029 if (first_time) {
1030 iana_afi_t pkt_afi;
1031 iana_safi_t pkt_safi;
1032
1033 pkt_afi = afi_int2iana(afi);
1034 pkt_safi = safi_int2iana(safi);
1035
1036 attrlen_pos = stream_get_endp(s);
1037 /* total attr length = 0 for now.
1038 * reevaluate later */
1039 stream_putw(s, 0);
1040 mp_start = stream_get_endp(s);
1041 mplen_pos = bgp_packet_mpunreach_start(s, afi,
1042 safi);
1043 if (bgp_debug_update(NULL, NULL,
1044 subgrp->update_group, 0))
1045 zlog_debug(
1046 "u%" PRIu64 ":s%" PRIu64
1047 " send MP_UNREACH for afi/safi %d/%d",
1048 subgrp->update_group->id,
1049 subgrp->id, pkt_afi, pkt_safi);
1050 }
1051
1052 bgp_packet_mpunreach_prefix(s, dest_p, afi, safi, prd,
1053 NULL, 0, addpath_encode,
1054 addpath_tx_id, NULL);
1055 }
1056
1057 num_pfx++;
1058
1059 if (bgp_debug_update(NULL, dest_p, subgrp->update_group, 0)) {
1060 char pfx_buf[BGP_PRD_PATH_STRLEN];
1061
1062 bgp_debug_rdpfxpath2str(afi, safi, prd, dest_p, NULL, 0,
1063 addpath_encode, addpath_tx_id,
1064 pfx_buf, sizeof(pfx_buf));
1065 zlog_debug("u%" PRIu64 ":s%" PRIu64
1066 " send UPDATE %s -- unreachable",
1067 subgrp->update_group->id, subgrp->id,
1068 pfx_buf);
1069 }
1070
1071 subgrp->scount--;
1072
1073 bgp_adj_out_remove_subgroup(dest, adj, subgrp);
1074 bgp_dest_unlock_node(dest);
1075 }
1076
1077 if (!stream_empty(s)) {
1078 if (afi == AFI_IP && safi == SAFI_UNICAST
1079 && !peer_cap_enhe(peer, afi, safi)) {
1080 unfeasible_len = stream_get_endp(s) - BGP_HEADER_SIZE
1081 - BGP_UNFEASIBLE_LEN;
1082 stream_putw_at(s, BGP_HEADER_SIZE, unfeasible_len);
1083 stream_putw(s, 0);
1084 } else {
1085 /* Set the mp_unreach attr's length */
1086 bgp_packet_mpunreach_end(s, mplen_pos);
1087
1088 /* Set total path attribute length. */
1089 total_attr_len = stream_get_endp(s) - mp_start;
1090 stream_putw_at(s, attrlen_pos, total_attr_len);
1091 }
1092 bgp_packet_set_size(s);
1093 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
1094 zlog_debug("u%" PRIu64 ":s%" PRIu64
1095 " send UPDATE (withdraw) len %zd numpfx %d",
1096 subgrp->update_group->id, subgrp->id,
1097 (stream_get_endp(s) - stream_get_getp(s)),
1098 num_pfx);
1099 pkt = bpacket_queue_add(SUBGRP_PKTQ(subgrp), stream_dup(s),
1100 NULL);
1101 stream_reset(s);
1102 return pkt;
1103 }
1104
1105 return NULL;
1106 }
1107
1108 void subgroup_default_update_packet(struct update_subgroup *subgrp,
1109 struct attr *attr, struct peer *from)
1110 {
1111 struct stream *s;
1112 struct peer *peer;
1113 struct prefix p;
1114 unsigned long pos;
1115 bgp_size_t total_attr_len;
1116 afi_t afi;
1117 safi_t safi;
1118 struct bpacket_attr_vec_arr vecarr;
1119 int addpath_encode = 0;
1120
1121 if (DISABLE_BGP_ANNOUNCE)
1122 return;
1123
1124 if (!subgrp)
1125 return;
1126
1127 peer = SUBGRP_PEER(subgrp);
1128 afi = SUBGRP_AFI(subgrp);
1129 safi = SUBGRP_SAFI(subgrp);
1130 bpacket_attr_vec_arr_reset(&vecarr);
1131 addpath_encode = bgp_addpath_encode_tx(peer, afi, safi);
1132
1133 memset(&p, 0, sizeof(p));
1134 p.family = afi2family(afi);
1135 p.prefixlen = 0;
1136
1137 /* Logging the attribute. */
1138 if (bgp_debug_update(NULL, &p, subgrp->update_group, 0)) {
1139 char attrstr[BUFSIZ];
1140 char buf[PREFIX_STRLEN];
1141 /* ' with addpath ID ' 17
1142 * max strlen of uint32 + 10
1143 * +/- (just in case) + 1
1144 * null terminator + 1
1145 * ============================ 29 */
1146 char tx_id_buf[30];
1147
1148 attrstr[0] = '\0';
1149
1150 bgp_dump_attr(attr, attrstr, BUFSIZ);
1151
1152 if (addpath_encode)
1153 snprintf(tx_id_buf, sizeof(tx_id_buf),
1154 " with addpath ID %u",
1155 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1156 else
1157 tx_id_buf[0] = '\0';
1158
1159 zlog_debug("u%" PRIu64 ":s%" PRIu64 " send UPDATE %s%s %s",
1160 (SUBGRP_UPDGRP(subgrp))->id, subgrp->id,
1161 prefix2str(&p, buf, sizeof(buf)), tx_id_buf,
1162 attrstr);
1163 }
1164
1165 s = stream_new(BGP_MAX_PACKET_SIZE);
1166
1167 /* Make BGP update packet. */
1168 bgp_packet_set_marker(s, BGP_MSG_UPDATE);
1169
1170 /* Unfeasible Routes Length. */
1171 stream_putw(s, 0);
1172
1173 /* Make place for total attribute length. */
1174 pos = stream_get_endp(s);
1175 stream_putw(s, 0);
1176 total_attr_len = bgp_packet_attribute(
1177 NULL, peer, s, attr, &vecarr, &p, afi, safi, from, NULL, NULL,
1178 0, addpath_encode, BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1179
1180 /* Set Total Path Attribute Length. */
1181 stream_putw_at(s, pos, total_attr_len);
1182
1183 /* NLRI set. */
1184 if (p.family == AF_INET && safi == SAFI_UNICAST
1185 && !peer_cap_enhe(peer, afi, safi))
1186 stream_put_prefix_addpath(
1187 s, &p, addpath_encode,
1188 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1189
1190 /* Set size. */
1191 bgp_packet_set_size(s);
1192
1193 (void)bpacket_queue_add(SUBGRP_PKTQ(subgrp), s, &vecarr);
1194 subgroup_trigger_write(subgrp);
1195 }
1196
1197 void subgroup_default_withdraw_packet(struct update_subgroup *subgrp)
1198 {
1199 struct peer *peer;
1200 struct stream *s;
1201 struct prefix p;
1202 unsigned long attrlen_pos = 0;
1203 unsigned long cp;
1204 bgp_size_t unfeasible_len;
1205 bgp_size_t total_attr_len = 0;
1206 size_t mp_start = 0;
1207 size_t mplen_pos = 0;
1208 afi_t afi;
1209 safi_t safi;
1210 int addpath_encode = 0;
1211
1212 if (DISABLE_BGP_ANNOUNCE)
1213 return;
1214
1215 peer = SUBGRP_PEER(subgrp);
1216 afi = SUBGRP_AFI(subgrp);
1217 safi = SUBGRP_SAFI(subgrp);
1218 addpath_encode = bgp_addpath_encode_tx(peer, afi, safi);
1219
1220 memset(&p, 0, sizeof(p));
1221 p.family = afi2family(afi);
1222 p.prefixlen = 0;
1223
1224 if (bgp_debug_update(NULL, &p, subgrp->update_group, 0)) {
1225 char buf[PREFIX_STRLEN];
1226 /* ' with addpath ID ' 17
1227 * max strlen of uint32 + 10
1228 * +/- (just in case) + 1
1229 * null terminator + 1
1230 * ============================ 29 */
1231 char tx_id_buf[30];
1232
1233 if (addpath_encode)
1234 snprintf(tx_id_buf, sizeof(tx_id_buf),
1235 " with addpath ID %u",
1236 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1237
1238 zlog_debug("u%" PRIu64 ":s%" PRIu64
1239 " send UPDATE %s%s -- unreachable",
1240 (SUBGRP_UPDGRP(subgrp))->id, subgrp->id,
1241 prefix2str(&p, buf, sizeof(buf)), tx_id_buf);
1242 }
1243
1244 s = stream_new(BGP_MAX_PACKET_SIZE);
1245
1246 /* Make BGP update packet. */
1247 bgp_packet_set_marker(s, BGP_MSG_UPDATE);
1248
1249 /* Unfeasible Routes Length. */;
1250 cp = stream_get_endp(s);
1251 stream_putw(s, 0);
1252
1253 /* Withdrawn Routes. */
1254 if (p.family == AF_INET && safi == SAFI_UNICAST
1255 && !peer_cap_enhe(peer, afi, safi)) {
1256 stream_put_prefix_addpath(
1257 s, &p, addpath_encode,
1258 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1259
1260 unfeasible_len = stream_get_endp(s) - cp - 2;
1261
1262 /* Set unfeasible len. */
1263 stream_putw_at(s, cp, unfeasible_len);
1264
1265 /* Set total path attribute length. */
1266 stream_putw(s, 0);
1267 } else {
1268 attrlen_pos = stream_get_endp(s);
1269 stream_putw(s, 0);
1270 mp_start = stream_get_endp(s);
1271 mplen_pos = bgp_packet_mpunreach_start(s, afi, safi);
1272 bgp_packet_mpunreach_prefix(
1273 s, &p, afi, safi, NULL, NULL, 0, addpath_encode,
1274 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE, NULL);
1275
1276 /* Set the mp_unreach attr's length */
1277 bgp_packet_mpunreach_end(s, mplen_pos);
1278
1279 /* Set total path attribute length. */
1280 total_attr_len = stream_get_endp(s) - mp_start;
1281 stream_putw_at(s, attrlen_pos, total_attr_len);
1282 }
1283
1284 bgp_packet_set_size(s);
1285
1286 (void)bpacket_queue_add(SUBGRP_PKTQ(subgrp), s, NULL);
1287 subgroup_trigger_write(subgrp);
1288 }
1289
1290 static void
1291 bpacket_vec_arr_inherit_attr_flags(struct bpacket_attr_vec_arr *vecarr,
1292 bpacket_attr_vec_type type,
1293 struct attr *attr)
1294 {
1295 if (CHECK_FLAG(attr->rmap_change_flags,
1296 BATTR_RMAP_NEXTHOP_PEER_ADDRESS))
1297 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1298 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS);
1299
1300 if (CHECK_FLAG(attr->rmap_change_flags, BATTR_REFLECTED))
1301 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1302 BPKT_ATTRVEC_FLAGS_REFLECTED);
1303
1304 if (CHECK_FLAG(attr->rmap_change_flags, BATTR_RMAP_NEXTHOP_UNCHANGED))
1305 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1306 BPKT_ATTRVEC_FLAGS_RMAP_NH_UNCHANGED);
1307
1308 if (CHECK_FLAG(attr->rmap_change_flags, BATTR_RMAP_IPV4_NHOP_CHANGED))
1309 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1310 BPKT_ATTRVEC_FLAGS_RMAP_IPV4_NH_CHANGED);
1311
1312 if (CHECK_FLAG(attr->rmap_change_flags,
1313 BATTR_RMAP_IPV6_GLOBAL_NHOP_CHANGED))
1314 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1315 BPKT_ATTRVEC_FLAGS_RMAP_IPV6_GNH_CHANGED);
1316
1317 if (CHECK_FLAG(attr->rmap_change_flags,
1318 BATTR_RMAP_IPV6_LL_NHOP_CHANGED))
1319 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1320 BPKT_ATTRVEC_FLAGS_RMAP_IPV6_LNH_CHANGED);
1321 }
1322
1323 /* Reset the Attributes vector array. The vector array is used to override
1324 * certain output parameters in the packet for a particular peer
1325 */
1326 void bpacket_attr_vec_arr_reset(struct bpacket_attr_vec_arr *vecarr)
1327 {
1328 int i;
1329
1330 if (!vecarr)
1331 return;
1332
1333 i = 0;
1334 while (i < BGP_ATTR_VEC_MAX) {
1335 vecarr->entries[i].flags = 0;
1336 vecarr->entries[i].offset = 0;
1337 i++;
1338 }
1339 }
1340
1341 /* Setup a particular node entry in the vecarr */
1342 void bpacket_attr_vec_arr_set_vec(struct bpacket_attr_vec_arr *vecarr,
1343 bpacket_attr_vec_type type, struct stream *s,
1344 struct attr *attr)
1345 {
1346 if (!vecarr)
1347 return;
1348 assert(type < BGP_ATTR_VEC_MAX);
1349
1350 SET_FLAG(vecarr->entries[type].flags, BPKT_ATTRVEC_FLAGS_UPDATED);
1351 vecarr->entries[type].offset = stream_get_endp(s);
1352 if (attr)
1353 bpacket_vec_arr_inherit_attr_flags(vecarr, type, attr);
1354 }