]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_updgrp_packet.c
Merge pull request #6577 from rtrlib/2020-06-12-master-fixes
[mirror_frr.git] / bgpd / bgp_updgrp_packet.c
1 /**
2 * bgp_updgrp_packet.c: BGP update group packet handling routines
3 *
4 * @copyright Copyright (C) 2014 Cumulus Networks, Inc.
5 *
6 * @author Avneesh Sachdev <avneesh@sproute.net>
7 * @author Rajesh Varadarajan <rajesh@sproute.net>
8 * @author Pradosh Mohapatra <pradosh@sproute.net>
9 *
10 * This file is part of GNU Zebra.
11 *
12 * GNU Zebra is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2, or (at your option) any
15 * later version.
16 *
17 * GNU Zebra is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; see the file COPYING; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27 #include <zebra.h>
28
29 #include "prefix.h"
30 #include "thread.h"
31 #include "buffer.h"
32 #include "stream.h"
33 #include "command.h"
34 #include "sockunion.h"
35 #include "network.h"
36 #include "memory.h"
37 #include "filter.h"
38 #include "routemap.h"
39 #include "log.h"
40 #include "plist.h"
41 #include "linklist.h"
42 #include "workqueue.h"
43 #include "hash.h"
44 #include "queue.h"
45 #include "mpls.h"
46
47 #include "bgpd/bgpd.h"
48 #include "bgpd/bgp_debug.h"
49 #include "bgpd/bgp_errors.h"
50 #include "bgpd/bgp_fsm.h"
51 #include "bgpd/bgp_route.h"
52 #include "bgpd/bgp_packet.h"
53 #include "bgpd/bgp_advertise.h"
54 #include "bgpd/bgp_updgrp.h"
55 #include "bgpd/bgp_nexthop.h"
56 #include "bgpd/bgp_nht.h"
57 #include "bgpd/bgp_mplsvpn.h"
58 #include "bgpd/bgp_label.h"
59 #include "bgpd/bgp_addpath.h"
60
61 /********************
62 * PRIVATE FUNCTIONS
63 ********************/
64
65 /********************
66 * PUBLIC FUNCTIONS
67 ********************/
68 struct bpacket *bpacket_alloc(void)
69 {
70 struct bpacket *pkt;
71
72 pkt = XCALLOC(MTYPE_BGP_PACKET, sizeof(struct bpacket));
73
74 return pkt;
75 }
76
77 void bpacket_free(struct bpacket *pkt)
78 {
79 if (pkt->buffer)
80 stream_free(pkt->buffer);
81 pkt->buffer = NULL;
82 XFREE(MTYPE_BGP_PACKET, pkt);
83 }
84
85 void bpacket_queue_init(struct bpacket_queue *q)
86 {
87 TAILQ_INIT(&(q->pkts));
88 }
89
90 /*
91 * bpacket_queue_sanity_check
92 */
93 void bpacket_queue_sanity_check(struct bpacket_queue __attribute__((__unused__))
94 * q)
95 {
96 #if 0
97 struct bpacket *pkt;
98
99 pkt = bpacket_queue_last (q);
100 assert (pkt);
101 assert (!pkt->buffer);
102
103 /*
104 * Make sure the count of packets is correct.
105 */
106 int num_pkts = 0;
107
108 pkt = bpacket_queue_first (q);
109 while (pkt)
110 {
111 num_pkts++;
112
113 if (num_pkts > q->curr_count)
114 assert (0);
115
116 pkt = TAILQ_NEXT (pkt, pkt_train);
117 }
118
119 assert (num_pkts == q->curr_count);
120 #endif
121 }
122
123 /*
124 * bpacket_queue_add_packet
125 *
126 * Internal function of bpacket_queue - and adds a
127 * packet entry to the end of the list.
128 *
129 * Users of bpacket_queue should use bpacket_queue_add instead.
130 */
131 static void bpacket_queue_add_packet(struct bpacket_queue *q,
132 struct bpacket *pkt)
133 {
134 struct bpacket *last_pkt;
135
136 if (TAILQ_EMPTY(&(q->pkts)))
137 TAILQ_INSERT_TAIL(&(q->pkts), pkt, pkt_train);
138 else {
139 last_pkt = bpacket_queue_last(q);
140 TAILQ_INSERT_AFTER(&(q->pkts), last_pkt, pkt, pkt_train);
141 }
142 q->curr_count++;
143 if (q->hwm_count < q->curr_count)
144 q->hwm_count = q->curr_count;
145 }
146
147 /*
148 * Adds a packet to the bpacket_queue.
149 *
150 * The stream passed is consumed by this function. So, the caller should
151 * not free or use the stream after
152 * invoking this function.
153 */
154 struct bpacket *bpacket_queue_add(struct bpacket_queue *q, struct stream *s,
155 struct bpacket_attr_vec_arr *vecarrp)
156 {
157 struct bpacket *pkt;
158 struct bpacket *last_pkt;
159
160
161 pkt = bpacket_alloc();
162 if (TAILQ_EMPTY(&(q->pkts))) {
163 pkt->ver = 1;
164 pkt->buffer = s;
165 if (vecarrp)
166 memcpy(&pkt->arr, vecarrp,
167 sizeof(struct bpacket_attr_vec_arr));
168 else
169 bpacket_attr_vec_arr_reset(&pkt->arr);
170 bpacket_queue_add_packet(q, pkt);
171 bpacket_queue_sanity_check(q);
172 return pkt;
173 }
174
175 /*
176 * Fill in the new information into the current sentinel and create a
177 * new sentinel.
178 */
179 bpacket_queue_sanity_check(q);
180 last_pkt = bpacket_queue_last(q);
181 assert(last_pkt->buffer == NULL);
182 last_pkt->buffer = s;
183 if (vecarrp)
184 memcpy(&last_pkt->arr, vecarrp,
185 sizeof(struct bpacket_attr_vec_arr));
186 else
187 bpacket_attr_vec_arr_reset(&last_pkt->arr);
188
189 pkt->ver = last_pkt->ver;
190 pkt->ver++;
191 bpacket_queue_add_packet(q, pkt);
192
193 bpacket_queue_sanity_check(q);
194 return last_pkt;
195 }
196
197 struct bpacket *bpacket_queue_first(struct bpacket_queue *q)
198 {
199 return (TAILQ_FIRST(&(q->pkts)));
200 }
201
202 struct bpacket *bpacket_queue_last(struct bpacket_queue *q)
203 {
204 return TAILQ_LAST(&(q->pkts), pkt_queue);
205 }
206
207 struct bpacket *bpacket_queue_remove(struct bpacket_queue *q)
208 {
209 struct bpacket *first;
210
211 first = bpacket_queue_first(q);
212 if (first) {
213 TAILQ_REMOVE(&(q->pkts), first, pkt_train);
214 q->curr_count--;
215 }
216 return first;
217 }
218
219 unsigned int bpacket_queue_length(struct bpacket_queue *q)
220 {
221 return q->curr_count - 1;
222 }
223
224 unsigned int bpacket_queue_hwm_length(struct bpacket_queue *q)
225 {
226 return q->hwm_count - 1;
227 }
228
229 bool bpacket_queue_is_full(struct bgp *bgp, struct bpacket_queue *q)
230 {
231 if (q->curr_count >= bgp->default_subgroup_pkt_queue_max)
232 return true;
233 return false;
234 }
235
236 void bpacket_add_peer(struct bpacket *pkt, struct peer_af *paf)
237 {
238 if (!pkt || !paf)
239 return;
240
241 LIST_INSERT_HEAD(&(pkt->peers), paf, pkt_train);
242 paf->next_pkt_to_send = pkt;
243 }
244
245 /*
246 * bpacket_queue_cleanup
247 */
248 void bpacket_queue_cleanup(struct bpacket_queue *q)
249 {
250 struct bpacket *pkt;
251
252 while ((pkt = bpacket_queue_remove(q))) {
253 bpacket_free(pkt);
254 }
255 }
256
257 /*
258 * bpacket_queue_compact
259 *
260 * Delete packets that do not need to be transmitted to any peer from
261 * the queue.
262 *
263 * @return the number of packets deleted.
264 */
265 static int bpacket_queue_compact(struct bpacket_queue *q)
266 {
267 int num_deleted;
268 struct bpacket *pkt, *removed_pkt;
269
270 num_deleted = 0;
271
272 while (1) {
273 pkt = bpacket_queue_first(q);
274 if (!pkt)
275 break;
276
277 /*
278 * Don't delete the sentinel.
279 */
280 if (!pkt->buffer)
281 break;
282
283 if (!LIST_EMPTY(&(pkt->peers)))
284 break;
285
286 removed_pkt = bpacket_queue_remove(q);
287 assert(pkt == removed_pkt);
288 bpacket_free(removed_pkt);
289
290 num_deleted++;
291 }
292
293 bpacket_queue_sanity_check(q);
294 return num_deleted;
295 }
296
297 void bpacket_queue_advance_peer(struct peer_af *paf)
298 {
299 struct bpacket *pkt;
300 struct bpacket *old_pkt;
301
302 old_pkt = paf->next_pkt_to_send;
303 if (old_pkt->buffer == NULL)
304 /* Already at end of list */
305 return;
306
307 LIST_REMOVE(paf, pkt_train);
308 pkt = TAILQ_NEXT(old_pkt, pkt_train);
309 bpacket_add_peer(pkt, paf);
310
311 if (!bpacket_queue_compact(PAF_PKTQ(paf)))
312 return;
313
314 /*
315 * Deleted one or more packets. Check if we can now merge this
316 * peer's subgroup into another subgroup.
317 */
318 update_subgroup_check_merge(paf->subgroup, "advanced peer in queue");
319 }
320
321 /*
322 * bpacket_queue_remove_peer
323 *
324 * Remove the peer from the packet queue of the subgroup it belongs
325 * to.
326 */
327 void bpacket_queue_remove_peer(struct peer_af *paf)
328 {
329 struct bpacket_queue *q;
330
331 q = PAF_PKTQ(paf);
332 assert(q);
333
334 LIST_REMOVE(paf, pkt_train);
335 paf->next_pkt_to_send = NULL;
336
337 bpacket_queue_compact(q);
338 }
339
340 unsigned int bpacket_queue_virtual_length(struct peer_af *paf)
341 {
342 struct bpacket *pkt;
343 struct bpacket *last;
344 struct bpacket_queue *q;
345
346 pkt = paf->next_pkt_to_send;
347 if (!pkt || (pkt->buffer == NULL))
348 /* Already at end of list */
349 return 0;
350
351 q = PAF_PKTQ(paf);
352 if (TAILQ_EMPTY(&(q->pkts)))
353 return 0;
354
355 last = TAILQ_LAST(&(q->pkts), pkt_queue);
356 if (last->ver >= pkt->ver)
357 return last->ver - pkt->ver;
358
359 /* sequence # rolled over */
360 return (UINT_MAX - pkt->ver + 1) + last->ver;
361 }
362
363 /*
364 * Dump the bpacket queue
365 */
366 void bpacket_queue_show_vty(struct bpacket_queue *q, struct vty *vty)
367 {
368 struct bpacket *pkt;
369 struct peer_af *paf;
370
371 pkt = bpacket_queue_first(q);
372 while (pkt) {
373 vty_out(vty, " Packet %p ver %u buffer %p\n", pkt, pkt->ver,
374 pkt->buffer);
375
376 LIST_FOREACH (paf, &(pkt->peers), pkt_train) {
377 vty_out(vty, " - %s\n", paf->peer->host);
378 }
379 pkt = bpacket_next(pkt);
380 }
381 return;
382 }
383
384 struct stream *bpacket_reformat_for_peer(struct bpacket *pkt,
385 struct peer_af *paf)
386 {
387 struct stream *s = NULL;
388 bpacket_attr_vec *vec;
389 struct peer *peer;
390 char buf[BUFSIZ];
391 char buf2[BUFSIZ];
392 struct bgp_filter *filter;
393
394 s = stream_dup(pkt->buffer);
395 peer = PAF_PEER(paf);
396
397 vec = &pkt->arr.entries[BGP_ATTR_VEC_NH];
398
399 if (!CHECK_FLAG(vec->flags, BPKT_ATTRVEC_FLAGS_UPDATED))
400 return s;
401
402 uint8_t nhlen;
403 afi_t nhafi;
404 int route_map_sets_nh;
405
406 nhlen = stream_getc_from(s, vec->offset);
407 filter = &peer->filter[paf->afi][paf->safi];
408
409 if (peer_cap_enhe(peer, paf->afi, paf->safi))
410 nhafi = AFI_IP6;
411 else
412 nhafi = BGP_NEXTHOP_AFI_FROM_NHLEN(nhlen);
413
414 if (nhafi == AFI_IP) {
415 struct in_addr v4nh, *mod_v4nh;
416 int nh_modified = 0;
417 size_t offset_nh = vec->offset + 1;
418
419 route_map_sets_nh =
420 (CHECK_FLAG(vec->flags,
421 BPKT_ATTRVEC_FLAGS_RMAP_IPV4_NH_CHANGED)
422 || CHECK_FLAG(
423 vec->flags,
424 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS));
425
426 switch (nhlen) {
427 case BGP_ATTR_NHLEN_IPV4:
428 break;
429 case BGP_ATTR_NHLEN_VPNV4:
430 offset_nh += 8;
431 break;
432 default:
433 /* TODO: handle IPv6 nexthops */
434 flog_warn(
435 EC_BGP_INVALID_NEXTHOP_LENGTH,
436 "%s: %s: invalid MP nexthop length (AFI IP): %u",
437 __func__, peer->host, nhlen);
438 stream_free(s);
439 return NULL;
440 }
441
442 stream_get_from(&v4nh, s, offset_nh, IPV4_MAX_BYTELEN);
443 mod_v4nh = &v4nh;
444
445 /*
446 * If route-map has set the nexthop, that is normally
447 * used; if it is specified as peer-address, the peering
448 * address is picked up. Otherwise, if NH is unavailable
449 * from attribute, the peering addr is picked up; the
450 * "NH unavailable" case also covers next-hop-self and
451 * some other scenarios - see subgroup_announce_check().
452 * In all other cases, use the nexthop carried in the
453 * attribute unless it is EBGP non-multiaccess and there
454 * is no next-hop-unchanged setting or the peer is EBGP
455 * and the route-map that changed the next-hop value
456 * was applied inbound rather than outbound. Updates to
457 * an EBGP peer should only modify the next-hop if it
458 * was set in an outbound route-map to that peer.
459 * Note: It is assumed route-map cannot set the nexthop
460 * to an invalid value.
461 */
462 if (route_map_sets_nh
463 && ((peer->sort != BGP_PEER_EBGP)
464 || ROUTE_MAP_OUT(filter))) {
465 if (CHECK_FLAG(
466 vec->flags,
467 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS)) {
468 mod_v4nh = &peer->nexthop.v4;
469 nh_modified = 1;
470 }
471 } else if (v4nh.s_addr == INADDR_ANY) {
472 mod_v4nh = &peer->nexthop.v4;
473 nh_modified = 1;
474 } else if (peer->sort == BGP_PEER_EBGP
475 && (bgp_multiaccess_check_v4(v4nh, peer) == 0)
476 && !CHECK_FLAG(vec->flags,
477 BPKT_ATTRVEC_FLAGS_RMAP_NH_UNCHANGED)
478 && !peer_af_flag_check(
479 peer, paf->afi, paf->safi,
480 PEER_FLAG_NEXTHOP_UNCHANGED)) {
481 /* NOTE: not handling case where NH has new AFI
482 */
483 mod_v4nh = &peer->nexthop.v4;
484 nh_modified = 1;
485 }
486
487 if (nh_modified) /* allow for VPN RD */
488 stream_put_in_addr_at(s, offset_nh, mod_v4nh);
489
490 if (bgp_debug_update(peer, NULL, NULL, 0))
491 zlog_debug("u%" PRIu64 ":s%" PRIu64
492 " %s send UPDATE w/ nexthop %s%s",
493 PAF_SUBGRP(paf)->update_group->id,
494 PAF_SUBGRP(paf)->id, peer->host,
495 inet_ntoa(*mod_v4nh),
496 (nhlen == BGP_ATTR_NHLEN_VPNV4 ? " and RD"
497 : ""));
498 } else if (nhafi == AFI_IP6) {
499 struct in6_addr v6nhglobal, *mod_v6nhg;
500 struct in6_addr v6nhlocal, *mod_v6nhl;
501 int gnh_modified, lnh_modified;
502 size_t offset_nhglobal = vec->offset + 1;
503 size_t offset_nhlocal = vec->offset + 1;
504
505 gnh_modified = lnh_modified = 0;
506 mod_v6nhg = &v6nhglobal;
507 mod_v6nhl = &v6nhlocal;
508
509 route_map_sets_nh =
510 (CHECK_FLAG(vec->flags,
511 BPKT_ATTRVEC_FLAGS_RMAP_IPV6_GNH_CHANGED)
512 || CHECK_FLAG(
513 vec->flags,
514 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS));
515
516 /*
517 * The logic here is rather similar to that for IPv4, the
518 * additional work being to handle 1 or 2 nexthops.
519 * Also, 3rd party nexthop is not propagated for EBGP
520 * right now.
521 */
522 switch (nhlen) {
523 case BGP_ATTR_NHLEN_IPV6_GLOBAL:
524 break;
525 case BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL:
526 offset_nhlocal += IPV6_MAX_BYTELEN;
527 break;
528 case BGP_ATTR_NHLEN_VPNV6_GLOBAL:
529 offset_nhglobal += 8;
530 break;
531 case BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL:
532 offset_nhglobal += 8;
533 offset_nhlocal += 8 * 2 + IPV6_MAX_BYTELEN;
534 break;
535 default:
536 /* TODO: handle IPv4 nexthops */
537 flog_warn(
538 EC_BGP_INVALID_NEXTHOP_LENGTH,
539 "%s: %s: invalid MP nexthop length (AFI IP6): %u",
540 __func__, peer->host, nhlen);
541 stream_free(s);
542 return NULL;
543 }
544
545 stream_get_from(&v6nhglobal, s, offset_nhglobal,
546 IPV6_MAX_BYTELEN);
547
548 /*
549 * Updates to an EBGP peer should only modify the
550 * next-hop if it was set in an outbound route-map
551 * to that peer.
552 */
553 if (route_map_sets_nh
554 && ((peer->sort != BGP_PEER_EBGP)
555 || ROUTE_MAP_OUT(filter))) {
556 if (CHECK_FLAG(
557 vec->flags,
558 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS)) {
559 mod_v6nhg = &peer->nexthop.v6_global;
560 gnh_modified = 1;
561 }
562 } else if (IN6_IS_ADDR_UNSPECIFIED(&v6nhglobal)) {
563 mod_v6nhg = &peer->nexthop.v6_global;
564 gnh_modified = 1;
565 } else if ((peer->sort == BGP_PEER_EBGP)
566 && (!bgp_multiaccess_check_v6(v6nhglobal, peer))
567 && !CHECK_FLAG(vec->flags,
568 BPKT_ATTRVEC_FLAGS_RMAP_NH_UNCHANGED)
569 && !peer_af_flag_check(
570 peer, nhafi, paf->safi,
571 PEER_FLAG_NEXTHOP_UNCHANGED)) {
572 /* NOTE: not handling case where NH has new AFI
573 */
574 mod_v6nhg = &peer->nexthop.v6_global;
575 gnh_modified = 1;
576 }
577
578 if (nhlen == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL
579 || nhlen == BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL) {
580 stream_get_from(&v6nhlocal, s, offset_nhlocal,
581 IPV6_MAX_BYTELEN);
582 if (IN6_IS_ADDR_UNSPECIFIED(&v6nhlocal)) {
583 mod_v6nhl = &peer->nexthop.v6_local;
584 lnh_modified = 1;
585 }
586 }
587
588 if (gnh_modified)
589 stream_put_in6_addr_at(s, offset_nhglobal, mod_v6nhg);
590 if (lnh_modified)
591 stream_put_in6_addr_at(s, offset_nhlocal, mod_v6nhl);
592
593 if (bgp_debug_update(peer, NULL, NULL, 0)) {
594 if (nhlen == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL
595 || nhlen == BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL)
596 zlog_debug(
597 "u%" PRIu64 ":s%" PRIu64
598 " %s send UPDATE w/ mp_nexthops %s, %s%s",
599 PAF_SUBGRP(paf)->update_group->id,
600 PAF_SUBGRP(paf)->id, peer->host,
601 inet_ntop(AF_INET6, mod_v6nhg, buf,
602 BUFSIZ),
603 inet_ntop(AF_INET6, mod_v6nhl, buf2,
604 BUFSIZ),
605 (nhlen == BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL
606 ? " and RD"
607 : ""));
608 else
609 zlog_debug("u%" PRIu64 ":s%" PRIu64
610 " %s send UPDATE w/ mp_nexthop %s%s",
611 PAF_SUBGRP(paf)->update_group->id,
612 PAF_SUBGRP(paf)->id, peer->host,
613 inet_ntop(AF_INET6, mod_v6nhg, buf,
614 BUFSIZ),
615 (nhlen == BGP_ATTR_NHLEN_VPNV6_GLOBAL
616 ? " and RD"
617 : ""));
618 }
619 } else if (paf->afi == AFI_L2VPN) {
620 struct in_addr v4nh, *mod_v4nh;
621 int nh_modified = 0;
622
623 stream_get_from(&v4nh, s, vec->offset + 1, 4);
624 mod_v4nh = &v4nh;
625
626 /* No route-map changes allowed for EVPN nexthops. */
627 if (v4nh.s_addr == INADDR_ANY) {
628 mod_v4nh = &peer->nexthop.v4;
629 nh_modified = 1;
630 }
631
632 if (nh_modified)
633 stream_put_in_addr_at(s, vec->offset + 1, mod_v4nh);
634
635 if (bgp_debug_update(peer, NULL, NULL, 0))
636 zlog_debug("u%" PRIu64 ":s%" PRIu64
637 " %s send UPDATE w/ nexthop %s",
638 PAF_SUBGRP(paf)->update_group->id,
639 PAF_SUBGRP(paf)->id, peer->host,
640 inet_ntoa(*mod_v4nh));
641 }
642
643 return s;
644 }
645
646 /*
647 * Update the vecarr offsets to go beyond 'pos' bytes, i.e. add 'pos'
648 * to each offset.
649 */
650 static void bpacket_attr_vec_arr_update(struct bpacket_attr_vec_arr *vecarr,
651 size_t pos)
652 {
653 int i;
654
655 if (!vecarr)
656 return;
657
658 for (i = 0; i < BGP_ATTR_VEC_MAX; i++)
659 vecarr->entries[i].offset += pos;
660 }
661
662 /*
663 * Return if there are packets to build for this subgroup.
664 */
665 bool subgroup_packets_to_build(struct update_subgroup *subgrp)
666 {
667 struct bgp_advertise *adv;
668
669 if (!subgrp)
670 return false;
671
672 adv = bgp_adv_fifo_first(&subgrp->sync->withdraw);
673 if (adv)
674 return true;
675
676 adv = bgp_adv_fifo_first(&subgrp->sync->update);
677 if (adv)
678 return true;
679
680 return false;
681 }
682
683 /* Make BGP update packet. */
684 struct bpacket *subgroup_update_packet(struct update_subgroup *subgrp)
685 {
686 struct bpacket_attr_vec_arr vecarr;
687 struct bpacket *pkt;
688 struct peer *peer;
689 struct stream *s;
690 struct stream *snlri;
691 struct stream *packet;
692 struct bgp_adj_out *adj;
693 struct bgp_advertise *adv;
694 struct bgp_node *rn = NULL;
695 struct bgp_path_info *path = NULL;
696 bgp_size_t total_attr_len = 0;
697 unsigned long attrlen_pos = 0;
698 size_t mpattrlen_pos = 0;
699 size_t mpattr_pos = 0;
700 afi_t afi;
701 safi_t safi;
702 int space_remaining = 0;
703 int space_needed = 0;
704 char send_attr_str[BUFSIZ];
705 int send_attr_printed = 0;
706 int num_pfx = 0;
707 int addpath_encode = 0;
708 int addpath_overhead = 0;
709 uint32_t addpath_tx_id = 0;
710 struct prefix_rd *prd = NULL;
711 mpls_label_t label = MPLS_INVALID_LABEL, *label_pnt = NULL;
712 uint32_t num_labels = 0;
713
714 if (!subgrp)
715 return NULL;
716
717 if (bpacket_queue_is_full(SUBGRP_INST(subgrp), SUBGRP_PKTQ(subgrp)))
718 return NULL;
719
720 peer = SUBGRP_PEER(subgrp);
721 afi = SUBGRP_AFI(subgrp);
722 safi = SUBGRP_SAFI(subgrp);
723 s = subgrp->work;
724 stream_reset(s);
725 snlri = subgrp->scratch;
726 stream_reset(snlri);
727
728 bpacket_attr_vec_arr_reset(&vecarr);
729
730 addpath_encode = bgp_addpath_encode_tx(peer, afi, safi);
731 addpath_overhead = addpath_encode ? BGP_ADDPATH_ID_LEN : 0;
732
733 adv = bgp_adv_fifo_first(&subgrp->sync->update);
734 while (adv) {
735 const struct prefix *rn_p;
736
737 assert(adv->rn);
738 rn = adv->rn;
739 rn_p = bgp_node_get_prefix(rn);
740 adj = adv->adj;
741 addpath_tx_id = adj->addpath_tx_id;
742 path = adv->pathi;
743
744 /* Check if we need to add a prefix to the packet if
745 * maximum-prefix-out is set for the peer.
746 */
747 if (CHECK_FLAG(peer->af_flags[afi][safi],
748 PEER_FLAG_MAX_PREFIX_OUT)
749 && subgrp->scount >= peer->pmax_out[afi][safi]) {
750 if (BGP_DEBUG(update, UPDATE_OUT)
751 || BGP_DEBUG(update, UPDATE_PREFIX)) {
752 zlog_debug(
753 "%s reached maximum prefix to be send (%" PRIu32
754 ")",
755 peer->host, peer->pmax_out[afi][safi]);
756 }
757 goto next;
758 }
759
760 space_remaining = STREAM_CONCAT_REMAIN(s, snlri, STREAM_SIZE(s))
761 - BGP_MAX_PACKET_SIZE_OVERFLOW;
762 space_needed = BGP_NLRI_LENGTH + addpath_overhead
763 + bgp_packet_mpattr_prefix_size(afi, safi, rn_p);
764
765 /* When remaining space can't include NLRI and it's length. */
766 if (space_remaining < space_needed)
767 break;
768
769 /* If packet is empty, set attribute. */
770 if (stream_empty(s)) {
771 struct peer *from = NULL;
772
773 if (path)
774 from = path->peer;
775
776 /* 1: Write the BGP message header - 16 bytes marker, 2
777 * bytes length,
778 * one byte message type.
779 */
780 bgp_packet_set_marker(s, BGP_MSG_UPDATE);
781
782 /* 2: withdrawn routes length */
783 stream_putw(s, 0);
784
785 /* 3: total attributes length - attrlen_pos stores the
786 * position */
787 attrlen_pos = stream_get_endp(s);
788 stream_putw(s, 0);
789
790 /* 4: if there is MP_REACH_NLRI attribute, that should
791 * be the first
792 * attribute, according to
793 * draft-ietf-idr-error-handling. Save the
794 * position.
795 */
796 mpattr_pos = stream_get_endp(s);
797
798 /* 5: Encode all the attributes, except MP_REACH_NLRI
799 * attr. */
800 total_attr_len = bgp_packet_attribute(
801 NULL, peer, s, adv->baa->attr, &vecarr, NULL,
802 afi, safi, from, NULL, NULL, 0, 0, 0);
803
804 space_remaining =
805 STREAM_CONCAT_REMAIN(s, snlri, STREAM_SIZE(s))
806 - BGP_MAX_PACKET_SIZE_OVERFLOW;
807 space_needed = BGP_NLRI_LENGTH + addpath_overhead
808 + bgp_packet_mpattr_prefix_size(
809 afi, safi, rn_p);
810
811 /* If the attributes alone do not leave any room for
812 * NLRI then
813 * return */
814 if (space_remaining < space_needed) {
815 flog_err(
816 EC_BGP_UPDGRP_ATTR_LEN,
817 "u%" PRIu64 ":s%" PRIu64
818 " attributes too long, cannot send UPDATE",
819 subgrp->update_group->id, subgrp->id);
820
821 /* Flush the FIFO update queue */
822 while (adv)
823 adv = bgp_advertise_clean_subgroup(
824 subgrp, adj);
825 return NULL;
826 }
827
828 if (BGP_DEBUG(update, UPDATE_OUT)
829 || BGP_DEBUG(update, UPDATE_PREFIX)) {
830 memset(send_attr_str, 0, BUFSIZ);
831 send_attr_printed = 0;
832 bgp_dump_attr(adv->baa->attr, send_attr_str,
833 BUFSIZ);
834 }
835 }
836
837 if ((afi == AFI_IP && safi == SAFI_UNICAST)
838 && !peer_cap_enhe(peer, afi, safi))
839 stream_put_prefix_addpath(s, rn_p, addpath_encode,
840 addpath_tx_id);
841 else {
842 /* Encode the prefix in MP_REACH_NLRI attribute */
843 if (rn->prn)
844 prd = (struct prefix_rd *)bgp_node_get_prefix(
845 rn->prn);
846
847 if (safi == SAFI_LABELED_UNICAST) {
848 label = bgp_adv_label(rn, path, peer, afi,
849 safi);
850 label_pnt = &label;
851 num_labels = 1;
852 } else if (path && path->extra) {
853 label_pnt = &path->extra->label[0];
854 num_labels = path->extra->num_labels;
855 }
856
857 if (stream_empty(snlri))
858 mpattrlen_pos = bgp_packet_mpattr_start(
859 snlri, peer, afi, safi, &vecarr,
860 adv->baa->attr);
861
862 bgp_packet_mpattr_prefix(snlri, afi, safi, rn_p, prd,
863 label_pnt, num_labels,
864 addpath_encode, addpath_tx_id,
865 adv->baa->attr);
866 }
867
868 num_pfx++;
869
870 if (bgp_debug_update(NULL, rn_p, subgrp->update_group, 0)) {
871 char pfx_buf[BGP_PRD_PATH_STRLEN];
872
873 if (!send_attr_printed) {
874 zlog_debug("u%" PRIu64 ":s%" PRIu64
875 " send UPDATE w/ attr: %s",
876 subgrp->update_group->id, subgrp->id,
877 send_attr_str);
878 if (!stream_empty(snlri)) {
879 iana_afi_t pkt_afi;
880 iana_safi_t pkt_safi;
881
882 pkt_afi = afi_int2iana(afi);
883 pkt_safi = safi_int2iana(safi);
884 zlog_debug(
885 "u%" PRIu64 ":s%" PRIu64
886 " send MP_REACH for afi/safi %d/%d",
887 subgrp->update_group->id,
888 subgrp->id, pkt_afi, pkt_safi);
889 }
890
891 send_attr_printed = 1;
892 }
893
894 bgp_debug_rdpfxpath2str(afi, safi, prd, rn_p, label_pnt,
895 num_labels, addpath_encode,
896 addpath_tx_id, pfx_buf,
897 sizeof(pfx_buf));
898 zlog_debug("u%" PRIu64 ":s%" PRIu64 " send UPDATE %s",
899 subgrp->update_group->id, subgrp->id,
900 pfx_buf);
901 }
902
903 /* Synchnorize attribute. */
904 if (adj->attr)
905 bgp_attr_unintern(&adj->attr);
906 else
907 subgrp->scount++;
908
909 adj->attr = bgp_attr_intern(adv->baa->attr);
910 next:
911 adv = bgp_advertise_clean_subgroup(subgrp, adj);
912 }
913
914 if (!stream_empty(s)) {
915 if (!stream_empty(snlri)) {
916 bgp_packet_mpattr_end(snlri, mpattrlen_pos);
917 total_attr_len += stream_get_endp(snlri);
918 }
919
920 /* set the total attribute length correctly */
921 stream_putw_at(s, attrlen_pos, total_attr_len);
922
923 if (!stream_empty(snlri)) {
924 packet = stream_dupcat(s, snlri, mpattr_pos);
925 bpacket_attr_vec_arr_update(&vecarr, mpattr_pos);
926 } else
927 packet = stream_dup(s);
928 bgp_packet_set_size(packet);
929 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
930 zlog_debug("u%" PRIu64 ":s%" PRIu64
931 " send UPDATE len %zd numpfx %d",
932 subgrp->update_group->id, subgrp->id,
933 (stream_get_endp(packet)
934 - stream_get_getp(packet)),
935 num_pfx);
936 pkt = bpacket_queue_add(SUBGRP_PKTQ(subgrp), packet, &vecarr);
937 stream_reset(s);
938 stream_reset(snlri);
939 return pkt;
940 }
941 return NULL;
942 }
943
944 /* Make BGP withdraw packet. */
945 /* For ipv4 unicast:
946 16-octet marker | 2-octet length | 1-octet type |
947 2-octet withdrawn route length | withdrawn prefixes | 2-octet attrlen (=0)
948 */
949 /* For other afi/safis:
950 16-octet marker | 2-octet length | 1-octet type |
951 2-octet withdrawn route length (=0) | 2-octet attrlen |
952 mp_unreach attr type | attr len | afi | safi | withdrawn prefixes
953 */
954 struct bpacket *subgroup_withdraw_packet(struct update_subgroup *subgrp)
955 {
956 struct bpacket *pkt;
957 struct stream *s;
958 struct bgp_adj_out *adj;
959 struct bgp_advertise *adv;
960 struct peer *peer;
961 struct bgp_node *rn;
962 bgp_size_t unfeasible_len;
963 bgp_size_t total_attr_len;
964 size_t mp_start = 0;
965 size_t attrlen_pos = 0;
966 size_t mplen_pos = 0;
967 uint8_t first_time = 1;
968 afi_t afi;
969 safi_t safi;
970 int space_remaining = 0;
971 int space_needed = 0;
972 int num_pfx = 0;
973 int addpath_encode = 0;
974 int addpath_overhead = 0;
975 uint32_t addpath_tx_id = 0;
976 const struct prefix_rd *prd = NULL;
977
978
979 if (!subgrp)
980 return NULL;
981
982 if (bpacket_queue_is_full(SUBGRP_INST(subgrp), SUBGRP_PKTQ(subgrp)))
983 return NULL;
984
985 peer = SUBGRP_PEER(subgrp);
986 afi = SUBGRP_AFI(subgrp);
987 safi = SUBGRP_SAFI(subgrp);
988 s = subgrp->work;
989 stream_reset(s);
990 addpath_encode = bgp_addpath_encode_tx(peer, afi, safi);
991 addpath_overhead = addpath_encode ? BGP_ADDPATH_ID_LEN : 0;
992
993 while ((adv = bgp_adv_fifo_first(&subgrp->sync->withdraw)) != NULL) {
994 const struct prefix *rn_p;
995
996 assert(adv->rn);
997 adj = adv->adj;
998 rn = adv->rn;
999 rn_p = bgp_node_get_prefix(rn);
1000 addpath_tx_id = adj->addpath_tx_id;
1001
1002 space_remaining =
1003 STREAM_WRITEABLE(s) - BGP_MAX_PACKET_SIZE_OVERFLOW;
1004 space_needed = BGP_NLRI_LENGTH + addpath_overhead
1005 + BGP_TOTAL_ATTR_LEN
1006 + bgp_packet_mpattr_prefix_size(afi, safi, rn_p);
1007
1008 if (space_remaining < space_needed)
1009 break;
1010
1011 if (stream_empty(s)) {
1012 bgp_packet_set_marker(s, BGP_MSG_UPDATE);
1013 stream_putw(s, 0); /* unfeasible routes length */
1014 } else
1015 first_time = 0;
1016
1017 if (afi == AFI_IP && safi == SAFI_UNICAST
1018 && !peer_cap_enhe(peer, afi, safi))
1019 stream_put_prefix_addpath(s, rn_p, addpath_encode,
1020 addpath_tx_id);
1021 else {
1022 if (rn->prn)
1023 prd = (struct prefix_rd *)bgp_node_get_prefix(
1024 rn->prn);
1025
1026 /* If first time, format the MP_UNREACH header
1027 */
1028 if (first_time) {
1029 iana_afi_t pkt_afi;
1030 iana_safi_t pkt_safi;
1031
1032 pkt_afi = afi_int2iana(afi);
1033 pkt_safi = safi_int2iana(safi);
1034
1035 attrlen_pos = stream_get_endp(s);
1036 /* total attr length = 0 for now.
1037 * reevaluate later */
1038 stream_putw(s, 0);
1039 mp_start = stream_get_endp(s);
1040 mplen_pos = bgp_packet_mpunreach_start(s, afi,
1041 safi);
1042 if (bgp_debug_update(NULL, NULL,
1043 subgrp->update_group, 0))
1044 zlog_debug(
1045 "u%" PRIu64 ":s%" PRIu64
1046 " send MP_UNREACH for afi/safi %d/%d",
1047 subgrp->update_group->id,
1048 subgrp->id, pkt_afi, pkt_safi);
1049 }
1050
1051 bgp_packet_mpunreach_prefix(s, rn_p, afi, safi, prd,
1052 NULL, 0, addpath_encode,
1053 addpath_tx_id, NULL);
1054 }
1055
1056 num_pfx++;
1057
1058 if (bgp_debug_update(NULL, rn_p, subgrp->update_group, 0)) {
1059 char pfx_buf[BGP_PRD_PATH_STRLEN];
1060
1061 bgp_debug_rdpfxpath2str(afi, safi, prd, rn_p, NULL, 0,
1062 addpath_encode, addpath_tx_id,
1063 pfx_buf, sizeof(pfx_buf));
1064 zlog_debug("u%" PRIu64 ":s%" PRIu64
1065 " send UPDATE %s -- unreachable",
1066 subgrp->update_group->id, subgrp->id,
1067 pfx_buf);
1068 }
1069
1070 subgrp->scount--;
1071
1072 bgp_adj_out_remove_subgroup(rn, adj, subgrp);
1073 bgp_unlock_node(rn);
1074 }
1075
1076 if (!stream_empty(s)) {
1077 if (afi == AFI_IP && safi == SAFI_UNICAST
1078 && !peer_cap_enhe(peer, afi, safi)) {
1079 unfeasible_len = stream_get_endp(s) - BGP_HEADER_SIZE
1080 - BGP_UNFEASIBLE_LEN;
1081 stream_putw_at(s, BGP_HEADER_SIZE, unfeasible_len);
1082 stream_putw(s, 0);
1083 } else {
1084 /* Set the mp_unreach attr's length */
1085 bgp_packet_mpunreach_end(s, mplen_pos);
1086
1087 /* Set total path attribute length. */
1088 total_attr_len = stream_get_endp(s) - mp_start;
1089 stream_putw_at(s, attrlen_pos, total_attr_len);
1090 }
1091 bgp_packet_set_size(s);
1092 if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
1093 zlog_debug("u%" PRIu64 ":s%" PRIu64
1094 " send UPDATE (withdraw) len %zd numpfx %d",
1095 subgrp->update_group->id, subgrp->id,
1096 (stream_get_endp(s) - stream_get_getp(s)),
1097 num_pfx);
1098 pkt = bpacket_queue_add(SUBGRP_PKTQ(subgrp), stream_dup(s),
1099 NULL);
1100 stream_reset(s);
1101 return pkt;
1102 }
1103
1104 return NULL;
1105 }
1106
1107 void subgroup_default_update_packet(struct update_subgroup *subgrp,
1108 struct attr *attr, struct peer *from)
1109 {
1110 struct stream *s;
1111 struct peer *peer;
1112 struct prefix p;
1113 unsigned long pos;
1114 bgp_size_t total_attr_len;
1115 afi_t afi;
1116 safi_t safi;
1117 struct bpacket_attr_vec_arr vecarr;
1118 int addpath_encode = 0;
1119
1120 if (DISABLE_BGP_ANNOUNCE)
1121 return;
1122
1123 if (!subgrp)
1124 return;
1125
1126 peer = SUBGRP_PEER(subgrp);
1127 afi = SUBGRP_AFI(subgrp);
1128 safi = SUBGRP_SAFI(subgrp);
1129 bpacket_attr_vec_arr_reset(&vecarr);
1130 addpath_encode = bgp_addpath_encode_tx(peer, afi, safi);
1131
1132 memset(&p, 0, sizeof(p));
1133 p.family = afi2family(afi);
1134 p.prefixlen = 0;
1135
1136 /* Logging the attribute. */
1137 if (bgp_debug_update(NULL, &p, subgrp->update_group, 0)) {
1138 char attrstr[BUFSIZ];
1139 char buf[PREFIX_STRLEN];
1140 /* ' with addpath ID ' 17
1141 * max strlen of uint32 + 10
1142 * +/- (just in case) + 1
1143 * null terminator + 1
1144 * ============================ 29 */
1145 char tx_id_buf[30];
1146
1147 attrstr[0] = '\0';
1148
1149 bgp_dump_attr(attr, attrstr, BUFSIZ);
1150
1151 if (addpath_encode)
1152 snprintf(tx_id_buf, sizeof(tx_id_buf),
1153 " with addpath ID %u",
1154 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1155 else
1156 tx_id_buf[0] = '\0';
1157
1158 zlog_debug("u%" PRIu64 ":s%" PRIu64 " send UPDATE %s%s %s",
1159 (SUBGRP_UPDGRP(subgrp))->id, subgrp->id,
1160 prefix2str(&p, buf, sizeof(buf)), tx_id_buf,
1161 attrstr);
1162 }
1163
1164 s = stream_new(BGP_MAX_PACKET_SIZE);
1165
1166 /* Make BGP update packet. */
1167 bgp_packet_set_marker(s, BGP_MSG_UPDATE);
1168
1169 /* Unfeasible Routes Length. */
1170 stream_putw(s, 0);
1171
1172 /* Make place for total attribute length. */
1173 pos = stream_get_endp(s);
1174 stream_putw(s, 0);
1175 total_attr_len = bgp_packet_attribute(
1176 NULL, peer, s, attr, &vecarr, &p, afi, safi, from, NULL, NULL,
1177 0, addpath_encode, BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1178
1179 /* Set Total Path Attribute Length. */
1180 stream_putw_at(s, pos, total_attr_len);
1181
1182 /* NLRI set. */
1183 if (p.family == AF_INET && safi == SAFI_UNICAST
1184 && !peer_cap_enhe(peer, afi, safi))
1185 stream_put_prefix_addpath(
1186 s, &p, addpath_encode,
1187 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1188
1189 /* Set size. */
1190 bgp_packet_set_size(s);
1191
1192 (void)bpacket_queue_add(SUBGRP_PKTQ(subgrp), s, &vecarr);
1193 subgroup_trigger_write(subgrp);
1194 }
1195
1196 void subgroup_default_withdraw_packet(struct update_subgroup *subgrp)
1197 {
1198 struct peer *peer;
1199 struct stream *s;
1200 struct prefix p;
1201 unsigned long attrlen_pos = 0;
1202 unsigned long cp;
1203 bgp_size_t unfeasible_len;
1204 bgp_size_t total_attr_len = 0;
1205 size_t mp_start = 0;
1206 size_t mplen_pos = 0;
1207 afi_t afi;
1208 safi_t safi;
1209 int addpath_encode = 0;
1210
1211 if (DISABLE_BGP_ANNOUNCE)
1212 return;
1213
1214 peer = SUBGRP_PEER(subgrp);
1215 afi = SUBGRP_AFI(subgrp);
1216 safi = SUBGRP_SAFI(subgrp);
1217 addpath_encode = bgp_addpath_encode_tx(peer, afi, safi);
1218
1219 memset(&p, 0, sizeof(p));
1220 p.family = afi2family(afi);
1221 p.prefixlen = 0;
1222
1223 if (bgp_debug_update(NULL, &p, subgrp->update_group, 0)) {
1224 char buf[PREFIX_STRLEN];
1225 /* ' with addpath ID ' 17
1226 * max strlen of uint32 + 10
1227 * +/- (just in case) + 1
1228 * null terminator + 1
1229 * ============================ 29 */
1230 char tx_id_buf[30];
1231
1232 if (addpath_encode)
1233 snprintf(tx_id_buf, sizeof(tx_id_buf),
1234 " with addpath ID %u",
1235 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1236
1237 zlog_debug("u%" PRIu64 ":s%" PRIu64
1238 " send UPDATE %s%s -- unreachable",
1239 (SUBGRP_UPDGRP(subgrp))->id, subgrp->id,
1240 prefix2str(&p, buf, sizeof(buf)), tx_id_buf);
1241 }
1242
1243 s = stream_new(BGP_MAX_PACKET_SIZE);
1244
1245 /* Make BGP update packet. */
1246 bgp_packet_set_marker(s, BGP_MSG_UPDATE);
1247
1248 /* Unfeasible Routes Length. */;
1249 cp = stream_get_endp(s);
1250 stream_putw(s, 0);
1251
1252 /* Withdrawn Routes. */
1253 if (p.family == AF_INET && safi == SAFI_UNICAST
1254 && !peer_cap_enhe(peer, afi, safi)) {
1255 stream_put_prefix_addpath(
1256 s, &p, addpath_encode,
1257 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
1258
1259 unfeasible_len = stream_get_endp(s) - cp - 2;
1260
1261 /* Set unfeasible len. */
1262 stream_putw_at(s, cp, unfeasible_len);
1263
1264 /* Set total path attribute length. */
1265 stream_putw(s, 0);
1266 } else {
1267 attrlen_pos = stream_get_endp(s);
1268 stream_putw(s, 0);
1269 mp_start = stream_get_endp(s);
1270 mplen_pos = bgp_packet_mpunreach_start(s, afi, safi);
1271 bgp_packet_mpunreach_prefix(
1272 s, &p, afi, safi, NULL, NULL, 0, addpath_encode,
1273 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE, NULL);
1274
1275 /* Set the mp_unreach attr's length */
1276 bgp_packet_mpunreach_end(s, mplen_pos);
1277
1278 /* Set total path attribute length. */
1279 total_attr_len = stream_get_endp(s) - mp_start;
1280 stream_putw_at(s, attrlen_pos, total_attr_len);
1281 }
1282
1283 bgp_packet_set_size(s);
1284
1285 (void)bpacket_queue_add(SUBGRP_PKTQ(subgrp), s, NULL);
1286 subgroup_trigger_write(subgrp);
1287 }
1288
1289 static void
1290 bpacket_vec_arr_inherit_attr_flags(struct bpacket_attr_vec_arr *vecarr,
1291 bpacket_attr_vec_type type,
1292 struct attr *attr)
1293 {
1294 if (CHECK_FLAG(attr->rmap_change_flags,
1295 BATTR_RMAP_NEXTHOP_PEER_ADDRESS))
1296 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1297 BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS);
1298
1299 if (CHECK_FLAG(attr->rmap_change_flags, BATTR_REFLECTED))
1300 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1301 BPKT_ATTRVEC_FLAGS_REFLECTED);
1302
1303 if (CHECK_FLAG(attr->rmap_change_flags, BATTR_RMAP_NEXTHOP_UNCHANGED))
1304 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1305 BPKT_ATTRVEC_FLAGS_RMAP_NH_UNCHANGED);
1306
1307 if (CHECK_FLAG(attr->rmap_change_flags, BATTR_RMAP_IPV4_NHOP_CHANGED))
1308 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1309 BPKT_ATTRVEC_FLAGS_RMAP_IPV4_NH_CHANGED);
1310
1311 if (CHECK_FLAG(attr->rmap_change_flags,
1312 BATTR_RMAP_IPV6_GLOBAL_NHOP_CHANGED))
1313 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1314 BPKT_ATTRVEC_FLAGS_RMAP_IPV6_GNH_CHANGED);
1315
1316 if (CHECK_FLAG(attr->rmap_change_flags,
1317 BATTR_RMAP_IPV6_LL_NHOP_CHANGED))
1318 SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags,
1319 BPKT_ATTRVEC_FLAGS_RMAP_IPV6_LNH_CHANGED);
1320 }
1321
1322 /* Reset the Attributes vector array. The vector array is used to override
1323 * certain output parameters in the packet for a particular peer
1324 */
1325 void bpacket_attr_vec_arr_reset(struct bpacket_attr_vec_arr *vecarr)
1326 {
1327 int i;
1328
1329 if (!vecarr)
1330 return;
1331
1332 i = 0;
1333 while (i < BGP_ATTR_VEC_MAX) {
1334 vecarr->entries[i].flags = 0;
1335 vecarr->entries[i].offset = 0;
1336 i++;
1337 }
1338 }
1339
1340 /* Setup a particular node entry in the vecarr */
1341 void bpacket_attr_vec_arr_set_vec(struct bpacket_attr_vec_arr *vecarr,
1342 bpacket_attr_vec_type type, struct stream *s,
1343 struct attr *attr)
1344 {
1345 if (!vecarr)
1346 return;
1347 assert(type < BGP_ATTR_VEC_MAX);
1348
1349 SET_FLAG(vecarr->entries[type].flags, BPKT_ATTRVEC_FLAGS_UPDATED);
1350 vecarr->entries[type].offset = stream_get_endp(s);
1351 if (attr)
1352 bpacket_vec_arr_inherit_attr_flags(vecarr, type, attr);
1353 }