]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_updgrp_packet.c
Merge pull request #6519 from RichardWu-Hebut/master
[mirror_frr.git] / bgpd / bgp_updgrp_packet.c
1 /**
2 * bgp_updgrp_packet.c: BGP update group packet handling routines
3 *
4 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
5 *
6 * @author Avneesh Sachdev <avneesh@sproute.net>
7 * @author Rajesh Varadarajan <rajesh@sproute.net>
8 * @author Pradosh Mohapatra <pradosh@sproute.net>
9 *
10 * This file is part of GNU Zebra.
11 *
12 * GNU Zebra is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2, or (at your option) any
15 * later version.
16 *
17 * GNU Zebra is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; see the file COPYING; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27 #include <zebra.h>
28
29 #include "prefix.h"
30 #include "thread.h"
31 #include "buffer.h"
32 #include "stream.h"
33 #include "command.h"
34 #include "sockunion.h"
35 #include "network.h"
36 #include "memory.h"
37 #include "filter.h"
38 #include "routemap.h"
39 #include "log.h"
40 #include "plist.h"
41 #include "linklist.h"
42 #include "workqueue.h"
43 #include "hash.h"
44 #include "queue.h"
45 #include "mpls.h"
46
47 #include "bgpd/bgpd.h"
48 #include "bgpd/bgp_debug.h"
49 #include "bgpd/bgp_errors.h"
50 #include "bgpd/bgp_fsm.h"
51 #include "bgpd/bgp_route.h"
52 #include "bgpd/bgp_packet.h"
53 #include "bgpd/bgp_advertise.h"
54 #include "bgpd/bgp_updgrp.h"
55 #include "bgpd/bgp_nexthop.h"
56 #include "bgpd/bgp_nht.h"
57 #include "bgpd/bgp_mplsvpn.h"
58 #include "bgpd/bgp_label.h"
59 #include "bgpd/bgp_addpath.h"
60
61 /********************
62 * PRIVATE FUNCTIONS
63 ********************/
64
65 /********************
66 * PUBLIC FUNCTIONS
67 ********************/
68 struct bpacket *bpacket_alloc(void)
69 {
70 struct bpacket *pkt;
71
72 pkt = XCALLOC(MTYPE_BGP_PACKET, sizeof(struct bpacket));
73
74 return pkt;
75 }
76
77 void bpacket_free(struct bpacket *pkt)
78 {
79 if (pkt->buffer)
80 stream_free(pkt->buffer);
81 pkt->buffer = NULL;
82 XFREE(MTYPE_BGP_PACKET, pkt);
83 }
84
85 void bpacket_queue_init(struct bpacket_queue *q)
86 {
87 TAILQ_INIT(&(q->pkts));
88 }
89
90 /*
91 * bpacket_queue_sanity_check
92 */
93 void bpacket_queue_sanity_check(struct bpacket_queue __attribute__((__unused__))
94 * q)
95 {
96 #if 0
97 struct bpacket *pkt;
98
99 pkt = bpacket_queue_last (q);
100 assert (pkt);
101 assert (!pkt->buffer);
102
103 /*
104 * Make sure the count of packets is correct.
105 */
106 int num_pkts = 0;
107
108 pkt = bpacket_queue_first (q);
109 while (pkt)
110 {
111 num_pkts++;
112
113 if (num_pkts > q->curr_count)
114 assert (0);
115
116 pkt = TAILQ_NEXT (pkt, pkt_train);
117 }
118
119 assert (num_pkts == q->curr_count);
120 #endif
121 }
122
123 /*
124 * bpacket_queue_add_packet
125 *
126 * Internal function of bpacket_queue - and adds a
127 * packet entry to the end of the list.
128 *
129 * Users of bpacket_queue should use bpacket_queue_add instead.
130 */
131 static void bpacket_queue_add_packet(struct bpacket_queue *q,
132 struct bpacket *pkt)
133 {
134 struct bpacket *last_pkt;
135
136 if (TAILQ_EMPTY(&(q->pkts)))
137 TAILQ_INSERT_TAIL(&(q->pkts), pkt, pkt_train);
138 else {
139 last_pkt = bpacket_queue_last(q);
140 TAILQ_INSERT_AFTER(&(q->pkts), last_pkt, pkt, pkt_train);
141 }
142 q->curr_count++;
143 if (q->hwm_count < q->curr_count)
144 q->hwm_count = q->curr_count;
145 }
146
147 /*
148 * Adds a packet to the bpacket_queue.
149 *
150 * The stream passed is consumed by this function. So, the caller should
151 * not free or use the stream after
152 * invoking this function.
153 */
154 struct bpacket *bpacket_queue_add(struct bpacket_queue *q, struct stream *s,
155 struct bpacket_attr_vec_arr *vecarrp)
156 {
157 struct bpacket *pkt;
158 struct bpacket *last_pkt;
159
160
161 pkt = bpacket_alloc();
162 if (TAILQ_EMPTY(&(q->pkts))) {
163 pkt->ver = 1;
164 pkt->buffer = s;
165 if (vecarrp)
166 memcpy(&pkt->arr, vecarrp,
167 sizeof(struct bpacket_attr_vec_arr));
168 else
169 bpacket_attr_vec_arr_reset(&pkt->arr);
170 bpacket_queue_add_packet(q, pkt);
171 bpacket_queue_sanity_check(q);
172 return pkt;
173 }
174
175 /*
176 * Fill in the new information into the current sentinel and create a
177 * new sentinel.
178 */
179 bpacket_queue_sanity_check(q);
180 last_pkt = bpacket_queue_last(q);
181 assert(last_pkt->buffer == NULL);
182 last_pkt->buffer = s;
183 if (vecarrp)
184 memcpy(&last_pkt->arr, vecarrp,
185 sizeof(struct bpacket_attr_vec_arr));
186 else
187 bpacket_attr_vec_arr_reset(&last_pkt->arr);
188
189 pkt->ver = last_pkt->ver;
190 pkt->ver++;
191 bpacket_queue_add_packet(q, pkt);
192
193 bpacket_queue_sanity_check(q);
194 return last_pkt;
195 }
196
197 struct bpacket *bpacket_queue_first(struct bpacket_queue *q)
198 {
199 return (TAILQ_FIRST(&(q->pkts)));
200 }
201
202 struct bpacket *bpacket_queue_last(struct bpacket_queue *q)
203 {
204 return TAILQ_LAST(&(q->pkts), pkt_queue);
205 }
206
207 struct bpacket *bpacket_queue_remove(struct bpacket_queue *q)
208 {
209 struct bpacket *first;
210
211 first = bpacket_queue_first(q);
212 if (first) {
213 TAILQ_REMOVE(&(q->pkts), first, pkt_train);
214 q->curr_count--;
215 }
216 return first;
217 }
218
219 unsigned int bpacket_queue_length(struct bpacket_queue *q)
220 {
221 return q->curr_count - 1;
222 }
223
224 unsigned int bpacket_queue_hwm_length(struct bpacket_queue *q)
225 {
226 return q->hwm_count - 1;
227 }
228
229 bool bpacket_queue_is_full(struct bgp *bgp, struct bpacket_queue *q)
230 {
231 if (q->curr_count >= bgp->default_subgroup_pkt_queue_max)
232 return true;
233 return false;
234 }
235
236 void bpacket_add_peer(struct bpacket *pkt, struct peer_af *paf)
237 {
238 if (!pkt || !paf)
239 return;
240
241 LIST_INSERT_HEAD(&(pkt->peers), paf, pkt_train);
242 paf->next_pkt_to_send = pkt;
243 }
244
245 /*
246 * bpacket_queue_cleanup
247 */
248 void bpacket_queue_cleanup(struct bpacket_queue *q)
249 {
250 struct bpacket *pkt;
251
252 while ((pkt = bpacket_queue_remove(q))) {
253 bpacket_free(pkt);
254 }
255 }
256
257 /*
258 * bpacket_queue_compact
259 *
260 * Delete packets that do not need to be transmitted to any peer from
261 * the queue.
262 *
263 * @return the number of packets deleted.
264 */
265 static int bpacket_queue_compact(struct bpacket_queue *q)
266 {
267 int num_deleted;
268 struct bpacket *pkt, *removed_pkt;
269
270 num_deleted = 0;
271
272 while (1) {
273 pkt = bpacket_queue_first(q);
274 if (!pkt)
275 break;
276
277 /*
278 * Don't delete the sentinel.
279 */
280 if (!pkt->buffer)
281 break;
282
283 if (!LIST_EMPTY(&(pkt->peers)))
284 break;
285
286 removed_pkt = bpacket_queue_remove(q);
287 assert(pkt == removed_pkt);
288 bpacket_free(removed_pkt);
289
290 num_deleted++;
291 }
292
293 bpacket_queue_sanity_check(q);
294 return num_deleted;
295 }
296
297 void bpacket_queue_advance_peer(struct peer_af *paf)
298 {
299 struct bpacket *pkt;
300 struct bpacket *old_pkt;
301
302 old_pkt = paf->next_pkt_to_send;
303 if (old_pkt->buffer == NULL)
304 /* Already at end of list */
305 return;
306
307 LIST_REMOVE(paf, pkt_train);
308 pkt = TAILQ_NEXT(old_pkt, pkt_train);
309 bpacket_add_peer(pkt, paf);
310
311 if (!bpacket_queue_compact(PAF_PKTQ(paf)))
312 return;
313
314 /*
315 * Deleted one or more packets. Check if we can now merge this
316 * peer's subgroup into another subgroup.
317 */
318 update_subgroup_check_merge(paf->subgroup, "advanced peer in queue");
319 }
320
321 /*
322 * bpacket_queue_remove_peer
323 *
324 * Remove the peer from the packet queue of the subgroup it belongs
325 * to.
326 */
327 void bpacket_queue_remove_peer(struct peer_af *paf)
328 {
329 struct bpacket_queue *q;
330
331 q = PAF_PKTQ(paf);
332 assert(q);
333
334 LIST_REMOVE(paf, pkt_train);
335 paf->next_pkt_to_send = NULL;
336
337 bpacket_queue_compact(q);
338 }
339
340 unsigned int bpacket_queue_virtual_length(struct peer_af *paf)
341 {
342 struct bpacket *pkt;
343 struct bpacket *last;
344 struct bpacket_queue *q;
345
346 pkt = paf->next_pkt_to_send;
347 if (!pkt || (pkt->buffer == NULL))
348 /* Already at end of list */
349 return 0;
350
351 q = PAF_PKTQ(paf);
352 if (TAILQ_EMPTY(&(q->pkts)))
353 return 0;
354
355 last = TAILQ_LAST(&(q->pkts), pkt_queue);
356 if (last->ver >= pkt->ver)
357 return last->ver - pkt->ver;
358
359 /* sequence # rolled over */
360 return (UINT_MAX - pkt->ver + 1) + last->ver;
361 }
362
363 /*
364 * Dump the bpacket queue
365 */
366 void bpacket_queue_show_vty(struct bpacket_queue *q, struct vty *vty)
367 {
368 struct bpacket *pkt;
369 struct peer_af *paf;
370
371 pkt = bpacket_queue_first(q);
372 while (pkt) {
373 vty_out(vty, " Packet %p ver %u buffer %p\n", pkt, pkt->ver,
374 pkt->buffer);
375
376 LIST_FOREACH (paf, &(pkt->peers), pkt_train) {
377 vty_out(vty, " - %s\n", paf->peer->host);
378 }
379 pkt = bpacket_next(pkt);
380 }
381 return;
382 }
383
384 struct stream *bpacket_reformat_for_peer(struct bpacket *pkt,
385 struct peer_af *paf)
386 {
387 struct stream *s = NULL;
388 bpacket_attr_vec *vec;
389 struct peer *peer;
390 char buf[BUFSIZ];
391 char buf2[BUFSIZ];
392 struct bgp_filter *filter;
393
394 s = stream_dup(pkt->buffer);
395 peer = PAF_PEER(paf);
396
397 vec = &pkt->arr.entries[BGP_ATTR_VEC_NH];
398
399 if (!CHECK_FLAG(vec->flags, BPKT_ATTRVEC_FLAGS_UPDATED))
400 return s;
401
402 uint8_t nhlen;
403 afi_t nhafi;
404 int route_map_sets_nh;
405
406 nhlen = stream_getc_from(s, vec->offset);
407 filter = &peer->filter[paf->afi][paf->safi];
408
409 if (peer_cap_enhe(peer, paf->afi, paf->safi))
410 nhafi = AFI_IP6;
411 else
412 nhafi = BGP_NEXTHOP_AFI_FROM_NHLEN(nhlen);
413
414 if (nhafi == AFI_IP) {
415 struct in_addr v4nh, *mod_v4nh;
416 int nh_modified = 0;
417 size_t offset_nh = vec->offset + 1;
418
419 route_map_sets_nh =
420 (CHECK_FLAG(vec->flags,
421 BPKT_ATTRVEC_FLAGS_RMAP_IPV4_NH_CHANGED)
422 || CHECK_FLAG(
423 vec->flags,
424 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS));
425
426 switch (nhlen) {
427 case BGP_ATTR_NHLEN_IPV4:
428 break;
429 case BGP_ATTR_NHLEN_VPNV4:
430 offset_nh += 8;
431 break;
432 default:
433 /* TODO: handle IPv6 nexthops */
434 flog_warn(
435 EC_BGP_INVALID_NEXTHOP_LENGTH,
436 "%s: %s: invalid MP nexthop length (AFI IP): %u",
437 __func__, peer->host, nhlen);
438 stream_free(s);
439 return NULL;
440 }
441
442 stream_get_from(&v4nh, s, offset_nh, IPV4_MAX_BYTELEN);
443 mod_v4nh = &v4nh;
444
445 /*
446 * If route-map has set the nexthop, that is normally
447 * used; if it is specified as peer-address, the peering
448 * address is picked up. Otherwise, if NH is unavailable
449 * from attribute, the peering addr is picked up; the
450 * "NH unavailable" case also covers next-hop-self and
451 * some other scenarios - see subgroup_announce_check().
452 * In all other cases, use the nexthop carried in the
453 * attribute unless it is EBGP non-multiaccess and there
454 * is no next-hop-unchanged setting or the peer is EBGP
455 * and the route-map that changed the next-hop value
456 * was applied inbound rather than outbound. Updates to
457 * an EBGP peer should only modify the next-hop if it
458 * was set in an outbound route-map to that peer.
459 * Note: It is assumed route-map cannot set the nexthop
460 * to an invalid value.
461 */
462 if (route_map_sets_nh
463 && ((peer->sort != BGP_PEER_EBGP)
464 || ROUTE_MAP_OUT(filter))) {
465 if (CHECK_FLAG(
466 vec->flags,
467 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS)) {
468 mod_v4nh = &peer->nexthop.v4;
469 nh_modified = 1;
470 }
471 } else if (v4nh.s_addr == INADDR_ANY) {
472 mod_v4nh = &peer->nexthop.v4;
473 nh_modified = 1;
474 } else if (peer->sort == BGP_PEER_EBGP
475 && (bgp_multiaccess_check_v4(v4nh, peer) == 0)
476 && !CHECK_FLAG(vec->flags,
477 BPKT_ATTRVEC_FLAGS_RMAP_NH_UNCHANGED)
478 && !peer_af_flag_check(
479 peer, paf->afi, paf->safi,
480 PEER_FLAG_NEXTHOP_UNCHANGED)) {
481 /* NOTE: not handling case where NH has new AFI
482 */
483 mod_v4nh = &peer->nexthop.v4;
484 nh_modified = 1;
485 }
486
487 if (nh_modified) /* allow for VPN RD */
488 stream_put_in_addr_at(s, offset_nh, mod_v4nh);
489
490 if (bgp_debug_update(peer, NULL, NULL, 0))
491 zlog_debug("u%" PRIu64 ":s%" PRIu64" %s send UPDATE w/ nexthop %s%s",
492 PAF_SUBGRP(paf)->update_group->id,
493 PAF_SUBGRP(paf)->id, peer->host,
494 inet_ntoa(*mod_v4nh),
495 (nhlen == BGP_ATTR_NHLEN_VPNV4 ? " and RD"
496 : ""));
497 } else if (nhafi == AFI_IP6) {
498 struct in6_addr v6nhglobal, *mod_v6nhg;
499 struct in6_addr v6nhlocal, *mod_v6nhl;
500 int gnh_modified, lnh_modified;
501 size_t offset_nhglobal = vec->offset + 1;
502 size_t offset_nhlocal = vec->offset + 1;
503
504 gnh_modified = lnh_modified = 0;
505 mod_v6nhg = &v6nhglobal;
506 mod_v6nhl = &v6nhlocal;
507
508 route_map_sets_nh =
509 (CHECK_FLAG(vec->flags,
510 BPKT_ATTRVEC_FLAGS_RMAP_IPV6_GNH_CHANGED)
511 || CHECK_FLAG(
512 vec->flags,
513 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS));
514
515 /*
516 * The logic here is rather similar to that for IPv4, the
517 * additional work being to handle 1 or 2 nexthops.
518 * Also, 3rd party nexthop is not propagated for EBGP
519 * right now.
520 */
521 switch (nhlen) {
522 case BGP_ATTR_NHLEN_IPV6_GLOBAL:
523 break;
524 case BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL:
525 offset_nhlocal += IPV6_MAX_BYTELEN;
526 break;
527 case BGP_ATTR_NHLEN_VPNV6_GLOBAL:
528 offset_nhglobal += 8;
529 break;
530 case BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL:
531 offset_nhglobal += 8;
532 offset_nhlocal += 8 * 2 + IPV6_MAX_BYTELEN;
533 break;
534 default:
535 /* TODO: handle IPv4 nexthops */
536 flog_warn(
537 EC_BGP_INVALID_NEXTHOP_LENGTH,
538 "%s: %s: invalid MP nexthop length (AFI IP6): %u",
539 __func__, peer->host, nhlen);
540 stream_free(s);
541 return NULL;
542 }
543
544 stream_get_from(&v6nhglobal, s, offset_nhglobal,
545 IPV6_MAX_BYTELEN);
546
547 /*
548 * Updates to an EBGP peer should only modify the
549 * next-hop if it was set in an outbound route-map
550 * to that peer.
551 */
552 if (route_map_sets_nh
553 && ((peer->sort != BGP_PEER_EBGP)
554 || ROUTE_MAP_OUT(filter))) {
555 if (CHECK_FLAG(
556 vec->flags,
557 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS)) {
558 mod_v6nhg = &peer->nexthop.v6_global;
559 gnh_modified = 1;
560 }
561 } else if (IN6_IS_ADDR_UNSPECIFIED(&v6nhglobal)) {
562 mod_v6nhg = &peer->nexthop.v6_global;
563 gnh_modified = 1;
564 } else if ((peer->sort == BGP_PEER_EBGP)
565 && (!bgp_multiaccess_check_v6(v6nhglobal, peer))
566 && !CHECK_FLAG(vec->flags,
567 BPKT_ATTRVEC_FLAGS_RMAP_NH_UNCHANGED)
568 && !peer_af_flag_check(
569 peer, nhafi, paf->safi,
570 PEER_FLAG_NEXTHOP_UNCHANGED)) {
571 /* NOTE: not handling case where NH has new AFI
572 */
573 mod_v6nhg = &peer->nexthop.v6_global;
574 gnh_modified = 1;
575 }
576
577 if (nhlen == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL
578 || nhlen == BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL) {
579 stream_get_from(&v6nhlocal, s, offset_nhlocal,
580 IPV6_MAX_BYTELEN);
581 if (IN6_IS_ADDR_UNSPECIFIED(&v6nhlocal)) {
582 mod_v6nhl = &peer->nexthop.v6_local;
583 lnh_modified = 1;
584 }
585 }
586
587 if (gnh_modified)
588 stream_put_in6_addr_at(s, offset_nhglobal, mod_v6nhg);
589 if (lnh_modified)
590 stream_put_in6_addr_at(s, offset_nhlocal, mod_v6nhl);
591
592 if (bgp_debug_update(peer, NULL, NULL, 0)) {
593 if (nhlen == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL
594 || nhlen == BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL)
595 zlog_debug(
596 "u%" PRIu64 ":s%" PRIu64" %s send UPDATE w/ mp_nexthops %s, %s%s",
597 PAF_SUBGRP(paf)->update_group->id,
598 PAF_SUBGRP(paf)->id, peer->host,
599 inet_ntop(AF_INET6, mod_v6nhg, buf,
600 BUFSIZ),
601 inet_ntop(AF_INET6, mod_v6nhl, buf2,
602 BUFSIZ),
603 (nhlen == BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL
604 ? " and RD"
605 : ""));
606 else
607 zlog_debug("u%" PRIu64 ":s%" PRIu64" %s send UPDATE w/ mp_nexthop %s%s",
608 PAF_SUBGRP(paf)->update_group->id,
609 PAF_SUBGRP(paf)->id, peer->host,
610 inet_ntop(AF_INET6, mod_v6nhg, buf,
611 BUFSIZ),
612 (nhlen == BGP_ATTR_NHLEN_VPNV6_GLOBAL
613 ? " and RD"
614 : ""));
615 }
616 } else if (paf->afi == AFI_L2VPN) {
617 struct in_addr v4nh, *mod_v4nh;
618 int nh_modified = 0;
619
620 stream_get_from(&v4nh, s, vec->offset + 1, 4);
621 mod_v4nh = &v4nh;
622
623 /* No route-map changes allowed for EVPN nexthops. */
624 if (v4nh.s_addr == INADDR_ANY) {
625 mod_v4nh = &peer->nexthop.v4;
626 nh_modified = 1;
627 }
628
629 if (nh_modified)
630 stream_put_in_addr_at(s, vec->offset + 1, mod_v4nh);
631
632 if (bgp_debug_update(peer, NULL, NULL, 0))
633 zlog_debug("u%" PRIu64 ":s%" PRIu64" %s send UPDATE w/ nexthop %s",
634 PAF_SUBGRP(paf)->update_group->id,
635 PAF_SUBGRP(paf)->id, peer->host,
636 inet_ntoa(*mod_v4nh));
637 }
638
639 return s;
640 }
641
642 /*
643 * Update the vecarr offsets to go beyond 'pos' bytes, i.e. add 'pos'
644 * to each offset.
645 */
646 static void bpacket_attr_vec_arr_update(struct bpacket_attr_vec_arr *vecarr,
647 size_t pos)
648 {
649 int i;
650
651 if (!vecarr)
652 return;
653
654 for (i = 0; i < BGP_ATTR_VEC_MAX; i++)
655 vecarr->entries[i].offset += pos;
656 }
657
658 /*
659 * Return if there are packets to build for this subgroup.
660 */
661 bool subgroup_packets_to_build(struct update_subgroup *subgrp)
662 {
663 struct bgp_advertise *adv;
664
665 if (!subgrp)
666 return false;
667
668 adv = bgp_adv_fifo_first(&subgrp->sync->withdraw);
669 if (adv)
670 return true;
671
672 adv = bgp_adv_fifo_first(&subgrp->sync->update);
673 if (adv)
674 return true;
675
676 return false;
677 }
678
679 /* Make BGP update packet. */
680 struct bpacket *subgroup_update_packet(struct update_subgroup *subgrp)
681 {
682 struct bpacket_attr_vec_arr vecarr;
683 struct bpacket *pkt;
684 struct peer *peer;
685 struct stream *s;
686 struct stream *snlri;
687 struct stream *packet;
688 struct bgp_adj_out *adj;
689 struct bgp_advertise *adv;
690 struct bgp_dest *dest = NULL;
691 struct bgp_path_info *path = NULL;
692 bgp_size_t total_attr_len = 0;
693 unsigned long attrlen_pos = 0;
694 size_t mpattrlen_pos = 0;
695 size_t mpattr_pos = 0;
696 afi_t afi;
697 safi_t safi;
698 int space_remaining = 0;
699 int space_needed = 0;
700 char send_attr_str[BUFSIZ];
701 int send_attr_printed = 0;
702 int num_pfx = 0;
703 int addpath_encode = 0;
704 int addpath_overhead = 0;
705 uint32_t addpath_tx_id = 0;
706 struct prefix_rd *prd = NULL;
707 mpls_label_t label = MPLS_INVALID_LABEL, *label_pnt = NULL;
708 uint32_t num_labels = 0;
709
710 if (!subgrp)
711 return NULL;
712
713 if (bpacket_queue_is_full(SUBGRP_INST(subgrp), SUBGRP_PKTQ(subgrp)))
714 return NULL;
715
716 peer = SUBGRP_PEER(subgrp);
717 afi = SUBGRP_AFI(subgrp);
718 safi = SUBGRP_SAFI(subgrp);
719 s = subgrp->work;
720 stream_reset(s);
721 snlri = subgrp->scratch;
722 stream_reset(snlri);
723
724 bpacket_attr_vec_arr_reset(&vecarr);
725
726 addpath_encode = bgp_addpath_encode_tx(peer, afi, safi);
727 addpath_overhead = addpath_encode ? BGP_ADDPATH_ID_LEN : 0;
728
729 adv = bgp_adv_fifo_first(&subgrp->sync->update);
730 while (adv) {
731 const struct prefix *dest_p;
732
733 assert(adv->dest);
734 dest = adv->dest;
735 dest_p = bgp_dest_get_prefix(dest);
736 adj = adv->adj;
737 addpath_tx_id = adj->addpath_tx_id;
738 path = adv->pathi;
739
740 /* Check if we need to add a prefix to the packet if
741 * maximum-prefix-out is set for the peer.
742 */
743 if (CHECK_FLAG(peer->af_flags[afi][safi],
744 PEER_FLAG_MAX_PREFIX_OUT)
745 && subgrp->scount >= peer->pmax_out[afi][safi]) {
746 if (BGP_DEBUG(update, UPDATE_OUT)
747 || BGP_DEBUG(update, UPDATE_PREFIX)) {
748 zlog_debug(
749 "%s reached maximum prefix to be send (%u)",
750 peer->host, peer->pmax_out[afi][safi]);
751 }
752 goto next;
753 }
754
755 space_remaining = STREAM_CONCAT_REMAIN(s, snlri, STREAM_SIZE(s))
756 - BGP_MAX_PACKET_SIZE_OVERFLOW;
757 space_needed =
758 BGP_NLRI_LENGTH + addpath_overhead
759 + bgp_packet_mpattr_prefix_size(afi, safi, dest_p);
760
761 /* When remaining space can't include NLRI and it's length. */
762 if (space_remaining < space_needed)
763 break;
764
765 /* If packet is empty, set attribute. */
766 if (stream_empty(s)) {
767 struct peer *from = NULL;
768
769 if (path)
770 from = path->peer;
771
772 /* 1: Write the BGP message header - 16 bytes marker, 2
773 * bytes length,
774 * one byte message type.
775 */
776 bgp_packet_set_marker(s, BGP_MSG_UPDATE);
777
778 /* 2: withdrawn routes length */
779 stream_putw(s, 0);
780
781 /* 3: total attributes length - attrlen_pos stores the
782 * position */
783 attrlen_pos = stream_get_endp(s);
784 stream_putw(s, 0);
785
786 /* 4: if there is MP_REACH_NLRI attribute, that should
787 * be the first
788 * attribute, according to
789 * draft-ietf-idr-error-handling. Save the
790 * position.
791 */
792 mpattr_pos = stream_get_endp(s);
793
794 /* 5: Encode all the attributes, except MP_REACH_NLRI
795 * attr. */
796 total_attr_len = bgp_packet_attribute(
797 NULL, peer, s, adv->baa->attr, &vecarr, NULL,
798 afi, safi, from, NULL, NULL, 0, 0, 0);
799
800 space_remaining =
801 STREAM_CONCAT_REMAIN(s, snlri, STREAM_SIZE(s))
802 - BGP_MAX_PACKET_SIZE_OVERFLOW;
803 space_needed = BGP_NLRI_LENGTH + addpath_overhead
804 + bgp_packet_mpattr_prefix_size(
805 afi, safi, dest_p);
806
807 /* If the attributes alone do not leave any room for
808 * NLRI then
809 * return */
810 if (space_remaining < space_needed) {
811 flog_err(
812 EC_BGP_UPDGRP_ATTR_LEN,
813 "u%" PRIu64 ":s%" PRIu64" attributes too long, cannot send UPDATE",
814 subgrp->update_group->id, subgrp->id);
815
816 /* Flush the FIFO update queue */
817 while (adv)
818 adv = bgp_advertise_clean_subgroup(
819 subgrp, adj);
820 return NULL;
821 }
822
823 if (BGP_DEBUG(update, UPDATE_OUT)
824 || BGP_DEBUG(update, UPDATE_PREFIX)) {
825 memset(send_attr_str, 0, BUFSIZ);
826 send_attr_printed = 0;
827 bgp_dump_attr(adv->baa->attr, send_attr_str,
828 BUFSIZ);
829 }
830 }
831
832 if ((afi == AFI_IP && safi == SAFI_UNICAST)
833 && !peer_cap_enhe(peer, afi, safi))
834 stream_put_prefix_addpath(s, dest_p, addpath_encode,
835 addpath_tx_id);
836 else {
837 /* Encode the prefix in MP_REACH_NLRI attribute */
838 if (dest->pdest)
839 prd = (struct prefix_rd *)bgp_dest_get_prefix(
840 dest->pdest);
841
842 if (safi == SAFI_LABELED_UNICAST) {
843 label = bgp_adv_label(dest, path, peer, afi,
844 safi);
845 label_pnt = &label;
846 num_labels = 1;
847 } else if (path && path->extra) {
848 label_pnt = &path->extra->label[0];
849 num_labels = path->extra->num_labels;
850 }
851
852 if (stream_empty(snlri))
853 mpattrlen_pos = bgp_packet_mpattr_start(
854 snlri, peer, afi, safi, &vecarr,
855 adv->baa->attr);
856
857 bgp_packet_mpattr_prefix(snlri, afi, safi, dest_p, prd,
858 label_pnt, num_labels,
859 addpath_encode, addpath_tx_id,
860 adv->baa->attr);
861 }
862
863 num_pfx++;
864
865 if (bgp_debug_update(NULL, dest_p, subgrp->update_group, 0)) {
866 char pfx_buf[BGP_PRD_PATH_STRLEN];
867
868 if (!send_attr_printed) {
869 zlog_debug("u%" PRIu64 ":s%" PRIu64" send UPDATE w/ attr: %s",
870 subgrp->update_group->id, subgrp->id,
871 send_attr_str);
872 if (!stream_empty(snlri)) {
873 iana_afi_t pkt_afi;
874 iana_safi_t pkt_safi;
875
876 pkt_afi = afi_int2iana(afi);
877 pkt_safi = safi_int2iana(safi);
878 zlog_debug(
879 "u%" PRIu64 ":s%" PRIu64" send MP_REACH for afi/safi %d/%d",
880 subgrp->update_group->id,
881 subgrp->id, pkt_afi, pkt_safi);
882 }
883
884 send_attr_printed = 1;
885 }
886
887 bgp_debug_rdpfxpath2str(afi, safi, prd, dest_p,
888 label_pnt, num_labels,
889 addpath_encode, addpath_tx_id,
890 pfx_buf, sizeof(pfx_buf));
891 zlog_debug("u%" PRIu64 ":s%" PRIu64 " send UPDATE %s",
892 subgrp->update_group->id, subgrp->id,
893 pfx_buf);
894 }
895
896 /* Synchnorize attribute. */
897 if (adj->attr)
898 bgp_attr_unintern(&adj->attr);
899 else
900 subgrp->scount++;
901
902 adj->attr = bgp_attr_intern(adv->baa->attr);
903 next:
904 adv = bgp_advertise_clean_subgroup(subgrp, adj);
905 }
906
907 if (!stream_empty(s)) {
908 if (!stream_empty(snlri)) {
909 bgp_packet_mpattr_end(snlri, mpattrlen_pos);
910 total_attr_len += stream_get_endp(snlri);
911 }
912
913 /* set the total attribute length correctly */
914 stream_putw_at(s, attrlen_pos, total_attr_len);
915
916 if (!stream_empty(snlri)) {
917 packet = stream_dupcat(s, snlri, mpattr_pos);
918 bpacket_attr_vec_arr_update(&vecarr, mpattr_pos);
919 } else
920 packet = stream_dup(s);
921 bgp_packet_set_size(packet);
922 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
923 zlog_debug("u%" PRIu64 ":s%" PRIu64" send UPDATE len %zd numpfx %d",
924 subgrp->update_group->id, subgrp->id,
925 (stream_get_endp(packet)
926 - stream_get_getp(packet)),
927 num_pfx);
928 pkt = bpacket_queue_add(SUBGRP_PKTQ(subgrp), packet, &vecarr);
929 stream_reset(s);
930 stream_reset(snlri);
931 return pkt;
932 }
933 return NULL;
934 }
935
936 /* Make BGP withdraw packet. */
937 /* For ipv4 unicast:
938 16-octet marker | 2-octet length | 1-octet type |
939 2-octet withdrawn route length | withdrawn prefixes | 2-octet attrlen (=0)
940 */
941 /* For other afi/safis:
942 16-octet marker | 2-octet length | 1-octet type |
943 2-octet withdrawn route length (=0) | 2-octet attrlen |
944 mp_unreach attr type | attr len | afi | safi | withdrawn prefixes
945 */
946 struct bpacket *subgroup_withdraw_packet(struct update_subgroup *subgrp)
947 {
948 struct bpacket *pkt;
949 struct stream *s;
950 struct bgp_adj_out *adj;
951 struct bgp_advertise *adv;
952 struct peer *peer;
953 struct bgp_dest *dest;
954 bgp_size_t unfeasible_len;
955 bgp_size_t total_attr_len;
956 size_t mp_start = 0;
957 size_t attrlen_pos = 0;
958 size_t mplen_pos = 0;
959 uint8_t first_time = 1;
960 afi_t afi;
961 safi_t safi;
962 int space_remaining = 0;
963 int space_needed = 0;
964 int num_pfx = 0;
965 int addpath_encode = 0;
966 int addpath_overhead = 0;
967 uint32_t addpath_tx_id = 0;
968 const struct prefix_rd *prd = NULL;
969
970
971 if (!subgrp)
972 return NULL;
973
974 if (bpacket_queue_is_full(SUBGRP_INST(subgrp), SUBGRP_PKTQ(subgrp)))
975 return NULL;
976
977 peer = SUBGRP_PEER(subgrp);
978 afi = SUBGRP_AFI(subgrp);
979 safi = SUBGRP_SAFI(subgrp);
980 s = subgrp->work;
981 stream_reset(s);
982 addpath_encode = bgp_addpath_encode_tx(peer, afi, safi);
983 addpath_overhead = addpath_encode ? BGP_ADDPATH_ID_LEN : 0;
984
985 while ((adv = bgp_adv_fifo_first(&subgrp->sync->withdraw)) != NULL) {
986 const struct prefix *dest_p;
987
988 assert(adv->dest);
989 adj = adv->adj;
990 dest = adv->dest;
991 dest_p = bgp_dest_get_prefix(dest);
992 addpath_tx_id = adj->addpath_tx_id;
993
994 space_remaining =
995 STREAM_WRITEABLE(s) - BGP_MAX_PACKET_SIZE_OVERFLOW;
996 space_needed =
997 BGP_NLRI_LENGTH + addpath_overhead + BGP_TOTAL_ATTR_LEN
998 + bgp_packet_mpattr_prefix_size(afi, safi, dest_p);
999
1000 if (space_remaining < space_needed)
1001 break;
1002
1003 if (stream_empty(s)) {
1004 bgp_packet_set_marker(s, BGP_MSG_UPDATE);
1005 stream_putw(s, 0); /* unfeasible routes length */
1006 } else
1007 first_time = 0;
1008
1009 if (afi == AFI_IP && safi == SAFI_UNICAST
1010 && !peer_cap_enhe(peer, afi, safi))
1011 stream_put_prefix_addpath(s, dest_p, addpath_encode,
1012 addpath_tx_id);
1013 else {
1014 if (dest->pdest)
1015 prd = (struct prefix_rd *)bgp_dest_get_prefix(
1016 dest->pdest);
1017
1018 /* If first time, format the MP_UNREACH header
1019 */
1020 if (first_time) {
1021 iana_afi_t pkt_afi;
1022 iana_safi_t pkt_safi;
1023
1024 pkt_afi = afi_int2iana(afi);
1025 pkt_safi = safi_int2iana(safi);
1026
1027 attrlen_pos = stream_get_endp(s);
1028 /* total attr length = 0 for now.
1029 * reevaluate later */
1030 stream_putw(s, 0);
1031 mp_start = stream_get_endp(s);
1032 mplen_pos = bgp_packet_mpunreach_start(s, afi,
1033 safi);
1034 if (bgp_debug_update(NULL, NULL,
1035 subgrp->update_group, 0))
1036 zlog_debug(
1037 "u%" PRIu64 ":s%" PRIu64" send MP_UNREACH for afi/safi %d/%d",
1038 subgrp->update_group->id,
1039 subgrp->id, pkt_afi, pkt_safi);
1040 }
1041
1042 bgp_packet_mpunreach_prefix(s, dest_p, afi, safi, prd,
1043 NULL, 0, addpath_encode,
1044 addpath_tx_id, NULL);
1045 }
1046
1047 num_pfx++;
1048
1049 if (bgp_debug_update(NULL, dest_p, subgrp->update_group, 0)) {
1050 char pfx_buf[BGP_PRD_PATH_STRLEN];
1051
1052 bgp_debug_rdpfxpath2str(afi, safi, prd, dest_p, NULL, 0,
1053 addpath_encode, addpath_tx_id,
1054 pfx_buf, sizeof(pfx_buf));
1055 zlog_debug("u%" PRIu64 ":s%" PRIu64" send UPDATE %s -- unreachable",
1056 subgrp->update_group->id, subgrp->id,
1057 pfx_buf);
1058 }
1059
1060 subgrp->scount--;
1061
1062 bgp_adj_out_remove_subgroup(dest, adj, subgrp);
1063 bgp_dest_unlock_node(dest);
1064 }
1065
1066 if (!stream_empty(s)) {
1067 if (afi == AFI_IP && safi == SAFI_UNICAST
1068 && !peer_cap_enhe(peer, afi, safi)) {
1069 unfeasible_len = stream_get_endp(s) - BGP_HEADER_SIZE
1070 - BGP_UNFEASIBLE_LEN;
1071 stream_putw_at(s, BGP_HEADER_SIZE, unfeasible_len);
1072 stream_putw(s, 0);
1073 } else {
1074 /* Set the mp_unreach attr's length */
1075 bgp_packet_mpunreach_end(s, mplen_pos);
1076
1077 /* Set total path attribute length. */
1078 total_attr_len = stream_get_endp(s) - mp_start;
1079 stream_putw_at(s, attrlen_pos, total_attr_len);
1080 }
1081 bgp_packet_set_size(s);
1082 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
1083 zlog_debug("u%" PRIu64 ":s%" PRIu64" send UPDATE (withdraw) len %zd numpfx %d",
1084 subgrp->update_group->id, subgrp->id,
1085 (stream_get_endp(s) - stream_get_getp(s)),
1086 num_pfx);
1087 pkt = bpacket_queue_add(SUBGRP_PKTQ(subgrp), stream_dup(s),
1088 NULL);
1089 stream_reset(s);
1090 return pkt;
1091 }
1092
1093 return NULL;
1094 }
1095
1096 void subgroup_default_update_packet(struct update_subgroup *subgrp,
1097 struct attr *attr, struct peer *from)
1098 {
1099 struct stream *s;
1100 struct peer *peer;
1101 struct prefix p;
1102 unsigned long pos;
1103 bgp_size_t total_attr_len;
1104 afi_t afi;
1105 safi_t safi;
1106 struct bpacket_attr_vec_arr vecarr;
1107 int addpath_encode = 0;
1108
1109 if (DISABLE_BGP_ANNOUNCE)
1110 return;
1111
1112 if (!subgrp)
1113 return;
1114
1115 peer = SUBGRP_PEER(subgrp);
1116 afi = SUBGRP_AFI(subgrp);
1117 safi = SUBGRP_SAFI(subgrp);
1118 bpacket_attr_vec_arr_reset(&vecarr);
1119 addpath_encode = bgp_addpath_encode_tx(peer, afi, safi);
1120
1121 memset(&p, 0, sizeof(p));
1122 p.family = afi2family(afi);
1123 p.prefixlen = 0;
1124
1125 /* Logging the attribute. */
1126 if (bgp_debug_update(NULL, &p, subgrp->update_group, 0)) {
1127 char attrstr[BUFSIZ];
1128 char buf[PREFIX_STRLEN];
1129 /* ' with addpath ID ' 17
1130 * max strlen of uint32 + 10
1131 * +/- (just in case) + 1
1132 * null terminator + 1
1133 * ============================ 29 */
1134 char tx_id_buf[30];
1135
1136 attrstr[0] = '\0';
1137
1138 bgp_dump_attr(attr, attrstr, BUFSIZ);
1139
1140 if (addpath_encode)
1141 snprintf(tx_id_buf, sizeof(tx_id_buf),
1142 " with addpath ID %u",
1143 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1144 else
1145 tx_id_buf[0] = '\0';
1146
1147 zlog_debug("u%" PRIu64 ":s%" PRIu64 " send UPDATE %s%s %s",
1148 (SUBGRP_UPDGRP(subgrp))->id, subgrp->id,
1149 prefix2str(&p, buf, sizeof(buf)), tx_id_buf,
1150 attrstr);
1151 }
1152
1153 s = stream_new(BGP_MAX_PACKET_SIZE);
1154
1155 /* Make BGP update packet. */
1156 bgp_packet_set_marker(s, BGP_MSG_UPDATE);
1157
1158 /* Unfeasible Routes Length. */
1159 stream_putw(s, 0);
1160
1161 /* Make place for total attribute length. */
1162 pos = stream_get_endp(s);
1163 stream_putw(s, 0);
1164 total_attr_len = bgp_packet_attribute(
1165 NULL, peer, s, attr, &vecarr, &p, afi, safi, from, NULL, NULL,
1166 0, addpath_encode, BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1167
1168 /* Set Total Path Attribute Length. */
1169 stream_putw_at(s, pos, total_attr_len);
1170
1171 /* NLRI set. */
1172 if (p.family == AF_INET && safi == SAFI_UNICAST
1173 && !peer_cap_enhe(peer, afi, safi))
1174 stream_put_prefix_addpath(
1175 s, &p, addpath_encode,
1176 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1177
1178 /* Set size. */
1179 bgp_packet_set_size(s);
1180
1181 (void)bpacket_queue_add(SUBGRP_PKTQ(subgrp), s, &vecarr);
1182 subgroup_trigger_write(subgrp);
1183 }
1184
1185 void subgroup_default_withdraw_packet(struct update_subgroup *subgrp)
1186 {
1187 struct peer *peer;
1188 struct stream *s;
1189 struct prefix p;
1190 unsigned long attrlen_pos = 0;
1191 unsigned long cp;
1192 bgp_size_t unfeasible_len;
1193 bgp_size_t total_attr_len = 0;
1194 size_t mp_start = 0;
1195 size_t mplen_pos = 0;
1196 afi_t afi;
1197 safi_t safi;
1198 int addpath_encode = 0;
1199
1200 if (DISABLE_BGP_ANNOUNCE)
1201 return;
1202
1203 peer = SUBGRP_PEER(subgrp);
1204 afi = SUBGRP_AFI(subgrp);
1205 safi = SUBGRP_SAFI(subgrp);
1206 addpath_encode = bgp_addpath_encode_tx(peer, afi, safi);
1207
1208 memset(&p, 0, sizeof(p));
1209 p.family = afi2family(afi);
1210 p.prefixlen = 0;
1211
1212 if (bgp_debug_update(NULL, &p, subgrp->update_group, 0)) {
1213 char buf[PREFIX_STRLEN];
1214 /* ' with addpath ID ' 17
1215 * max strlen of uint32 + 10
1216 * +/- (just in case) + 1
1217 * null terminator + 1
1218 * ============================ 29 */
1219 char tx_id_buf[30];
1220
1221 if (addpath_encode)
1222 snprintf(tx_id_buf, sizeof(tx_id_buf),
1223 " with addpath ID %u",
1224 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1225
1226 zlog_debug("u%" PRIu64 ":s%" PRIu64" send UPDATE %s%s -- unreachable",
1227 (SUBGRP_UPDGRP(subgrp))->id, subgrp->id,
1228 prefix2str(&p, buf, sizeof(buf)), tx_id_buf);
1229 }
1230
1231 s = stream_new(BGP_MAX_PACKET_SIZE);
1232
1233 /* Make BGP update packet. */
1234 bgp_packet_set_marker(s, BGP_MSG_UPDATE);
1235
1236 /* Unfeasible Routes Length. */;
1237 cp = stream_get_endp(s);
1238 stream_putw(s, 0);
1239
1240 /* Withdrawn Routes. */
1241 if (p.family == AF_INET && safi == SAFI_UNICAST
1242 && !peer_cap_enhe(peer, afi, safi)) {
1243 stream_put_prefix_addpath(
1244 s, &p, addpath_encode,
1245 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1246
1247 unfeasible_len = stream_get_endp(s) - cp - 2;
1248
1249 /* Set unfeasible len. */
1250 stream_putw_at(s, cp, unfeasible_len);
1251
1252 /* Set total path attribute length. */
1253 stream_putw(s, 0);
1254 } else {
1255 attrlen_pos = stream_get_endp(s);
1256 stream_putw(s, 0);
1257 mp_start = stream_get_endp(s);
1258 mplen_pos = bgp_packet_mpunreach_start(s, afi, safi);
1259 bgp_packet_mpunreach_prefix(
1260 s, &p, afi, safi, NULL, NULL, 0, addpath_encode,
1261 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE, NULL);
1262
1263 /* Set the mp_unreach attr's length */
1264 bgp_packet_mpunreach_end(s, mplen_pos);
1265
1266 /* Set total path attribute length. */
1267 total_attr_len = stream_get_endp(s) - mp_start;
1268 stream_putw_at(s, attrlen_pos, total_attr_len);
1269 }
1270
1271 bgp_packet_set_size(s);
1272
1273 (void)bpacket_queue_add(SUBGRP_PKTQ(subgrp), s, NULL);
1274 subgroup_trigger_write(subgrp);
1275 }
1276
1277 static void
1278 bpacket_vec_arr_inherit_attr_flags(struct bpacket_attr_vec_arr *vecarr,
1279 bpacket_attr_vec_type type,
1280 struct attr *attr)
1281 {
1282 if (CHECK_FLAG(attr->rmap_change_flags,
1283 BATTR_RMAP_NEXTHOP_PEER_ADDRESS))
1284 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1285 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS);
1286
1287 if (CHECK_FLAG(attr->rmap_change_flags, BATTR_REFLECTED))
1288 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1289 BPKT_ATTRVEC_FLAGS_REFLECTED);
1290
1291 if (CHECK_FLAG(attr->rmap_change_flags, BATTR_RMAP_NEXTHOP_UNCHANGED))
1292 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1293 BPKT_ATTRVEC_FLAGS_RMAP_NH_UNCHANGED);
1294
1295 if (CHECK_FLAG(attr->rmap_change_flags, BATTR_RMAP_IPV4_NHOP_CHANGED))
1296 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1297 BPKT_ATTRVEC_FLAGS_RMAP_IPV4_NH_CHANGED);
1298
1299 if (CHECK_FLAG(attr->rmap_change_flags,
1300 BATTR_RMAP_IPV6_GLOBAL_NHOP_CHANGED))
1301 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1302 BPKT_ATTRVEC_FLAGS_RMAP_IPV6_GNH_CHANGED);
1303
1304 if (CHECK_FLAG(attr->rmap_change_flags,
1305 BATTR_RMAP_IPV6_LL_NHOP_CHANGED))
1306 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1307 BPKT_ATTRVEC_FLAGS_RMAP_IPV6_LNH_CHANGED);
1308 }
1309
1310 /* Reset the Attributes vector array. The vector array is used to override
1311 * certain output parameters in the packet for a particular peer
1312 */
1313 void bpacket_attr_vec_arr_reset(struct bpacket_attr_vec_arr *vecarr)
1314 {
1315 int i;
1316
1317 if (!vecarr)
1318 return;
1319
1320 i = 0;
1321 while (i < BGP_ATTR_VEC_MAX) {
1322 vecarr->entries[i].flags = 0;
1323 vecarr->entries[i].offset = 0;
1324 i++;
1325 }
1326 }
1327
1328 /* Setup a particular node entry in the vecarr */
1329 void bpacket_attr_vec_arr_set_vec(struct bpacket_attr_vec_arr *vecarr,
1330 bpacket_attr_vec_type type, struct stream *s,
1331 struct attr *attr)
1332 {
1333 if (!vecarr)
1334 return;
1335 assert(type < BGP_ATTR_VEC_MAX);
1336
1337 SET_FLAG(vecarr->entries[type].flags, BPKT_ATTRVEC_FLAGS_UPDATED);
1338 vecarr->entries[type].offset = stream_get_endp(s);
1339 if (attr)
1340 bpacket_vec_arr_inherit_attr_flags(vecarr, type, attr);
1341 }