#include "hash.h"
#include "jhash.h"
#include "queue.h"
+#include "table.h"
+#include "filter.h"
#include "bgpd/bgpd.h"
#include "bgpd/bgp_attr.h"
#include "bgpd/bgp_packet.h"
#include "bgpd/bgp_ecommunity.h"
#include "bgpd/bgp_updgrp.h"
+#include "bgpd/bgp_encap_types.h"
+#if ENABLE_BGP_VNC
+# include "bgpd/rfapi/bgp_rfapi_cfg.h"
+# include "bgp_encap_types.h"
+# include "bgp_vnc_types.h"
+#endif
/* Attribute strings for logging. */
static const struct message attr_str [] =
{ BGP_ATTR_AS4_PATH, "AS4_PATH" },
{ BGP_ATTR_AS4_AGGREGATOR, "AS4_AGGREGATOR" },
{ BGP_ATTR_AS_PATHLIMIT, "AS_PATHLIMIT" },
+ { BGP_ATTR_ENCAP, "ENCAP" },
+#if ENABLE_BGP_VNC
+ { BGP_ATTR_VNC, "VNC" },
+#endif
};
static const int attr_str_max = array_size(attr_str);
/* bgp_attr_flags_diagnose() relies on this bit being last in this list */
{ BGP_ATTR_FLAG_EXTLEN, "Extended Length" },
};
-static const size_t attr_flag_str_max = array_size(attr_flag_str);
static struct hash *cluster_hash;
static void *
cluster_hash_alloc (void *p)
{
- struct cluster_list * val = (struct cluster_list *) p;
+ const struct cluster_list *val = (const struct cluster_list *) p;
struct cluster_list *cluster;
cluster = XMALLOC (MTYPE_CLUSTER, sizeof (struct cluster_list));
static void
cluster_finish (void)
{
+ hash_clean (cluster_hash, (void (*)(void *))cluster_free);
hash_free (cluster_hash);
cluster_hash = NULL;
}
+static struct hash *encap_hash = NULL;
+#if ENABLE_BGP_VNC
+static struct hash *vnc_hash = NULL;
+#endif
+
+struct bgp_attr_encap_subtlv *
+encap_tlv_dup(struct bgp_attr_encap_subtlv *orig)
+{
+ struct bgp_attr_encap_subtlv *new;
+ struct bgp_attr_encap_subtlv *tail;
+ struct bgp_attr_encap_subtlv *p;
+
+ for (p = orig, tail = new = NULL; p; p = p->next) {
+ int size = sizeof(struct bgp_attr_encap_subtlv) - 1 + p->length;
+ if (tail) {
+ tail->next = XCALLOC(MTYPE_ENCAP_TLV, size);
+ tail = tail->next;
+ } else {
+ tail = new = XCALLOC(MTYPE_ENCAP_TLV, size);
+ }
+ assert(tail);
+ memcpy(tail, p, size);
+ tail->next = NULL;
+ }
+
+ return new;
+}
+
+static void
+encap_free(struct bgp_attr_encap_subtlv *p)
+{
+ struct bgp_attr_encap_subtlv *next;
+ while (p) {
+ next = p->next;
+ p->next = NULL;
+ XFREE(MTYPE_ENCAP_TLV, p);
+ p = next;
+ }
+}
+
+void
+bgp_attr_flush_encap(struct attr *attr)
+{
+ if (!attr || !attr->extra)
+ return;
+
+ if (attr->extra->encap_subtlvs) {
+ encap_free(attr->extra->encap_subtlvs);
+ attr->extra->encap_subtlvs = NULL;
+ }
+#if ENABLE_BGP_VNC
+ if (attr->extra->vnc_subtlvs) {
+ encap_free(attr->extra->vnc_subtlvs);
+ attr->extra->vnc_subtlvs = NULL;
+ }
+#endif
+}
+
+/*
+ * Compare encap sub-tlv chains
+ *
+ * 1 = equivalent
+ * 0 = not equivalent
+ *
+ * This algorithm could be made faster if needed
+ */
+static int
+encap_same(struct bgp_attr_encap_subtlv *h1, struct bgp_attr_encap_subtlv *h2)
+{
+ struct bgp_attr_encap_subtlv *p;
+ struct bgp_attr_encap_subtlv *q;
+
+ if (h1 == h2)
+ return 1;
+ if (h1 == NULL || h2 == NULL)
+ return 0;
+
+ for (p = h1; p; p = p->next) {
+ for (q = h2; q; q = q->next) {
+ if ((p->type == q->type) &&
+ (p->length == q->length) &&
+ !memcmp(p->value, q->value, p->length)) {
+
+ break;
+ }
+ }
+ if (!q)
+ return 0;
+ }
+
+ for (p = h2; p; p = p->next) {
+ for (q = h1; q; q = q->next) {
+ if ((p->type == q->type) &&
+ (p->length == q->length) &&
+ !memcmp(p->value, q->value, p->length)) {
+
+ break;
+ }
+ }
+ if (!q)
+ return 0;
+ }
+
+ return 1;
+}
+
+static void *
+encap_hash_alloc (void *p)
+{
+ /* Encap structure is already allocated. */
+ return p;
+}
+
+typedef enum
+{
+ ENCAP_SUBTLV_TYPE,
+#if ENABLE_BGP_VNC
+ VNC_SUBTLV_TYPE
+#endif
+} encap_subtlv_type;
+
+static struct bgp_attr_encap_subtlv *
+encap_intern (struct bgp_attr_encap_subtlv *encap, encap_subtlv_type type)
+{
+ struct bgp_attr_encap_subtlv *find;
+ struct hash *hash = encap_hash;
+#if ENABLE_BGP_VNC
+ if (type == VNC_SUBTLV_TYPE)
+ hash = vnc_hash;
+#endif
+
+ find = hash_get (hash, encap, encap_hash_alloc);
+ if (find != encap)
+ encap_free (encap);
+ find->refcnt++;
+
+ return find;
+}
+
+static void
+encap_unintern (struct bgp_attr_encap_subtlv **encapp, encap_subtlv_type type)
+{
+ struct bgp_attr_encap_subtlv *encap = *encapp;
+ if (encap->refcnt)
+ encap->refcnt--;
+
+ if (encap->refcnt == 0)
+ {
+ struct hash *hash = encap_hash;
+#if ENABLE_BGP_VNC
+ if (type == VNC_SUBTLV_TYPE)
+ hash = vnc_hash;
+#endif
+ hash_release (hash, encap);
+ encap_free (encap);
+ *encapp = NULL;
+ }
+}
+
+static unsigned int
+encap_hash_key_make (void *p)
+{
+ const struct bgp_attr_encap_subtlv * encap = p;
+
+ return jhash(encap->value, encap->length, 0);
+}
+
+static int
+encap_hash_cmp (const void *p1, const void *p2)
+{
+ return encap_same((struct bgp_attr_encap_subtlv *)p1,
+ (struct bgp_attr_encap_subtlv *)p2);
+}
+
+static void
+encap_init (void)
+{
+ encap_hash = hash_create (encap_hash_key_make, encap_hash_cmp);
+#if ENABLE_BGP_VNC
+ vnc_hash = hash_create (encap_hash_key_make, encap_hash_cmp);
+#endif
+}
+
+static void
+encap_finish (void)
+{
+ hash_clean (encap_hash, (void (*)(void *))encap_free);
+ hash_free (encap_hash);
+ encap_hash = NULL;
+#if ENABLE_BGP_VNC
+ hash_clean (vnc_hash, (void (*)(void *))encap_free);
+ hash_free (vnc_hash);
+ vnc_hash = NULL;
+#endif
+}
+
/* Unknown transit attribute. */
static struct hash *transit_hash;
static void
transit_finish (void)
{
+ hash_clean (transit_hash, (void (*)(void *))transit_free);
hash_free (transit_hash);
transit_hash = NULL;
}
{
new->extra = extra;
memset(new->extra, 0, sizeof(struct attr_extra));
- if (orig->extra)
+ if (orig->extra) {
*new->extra = *orig->extra;
+ }
}
else if (orig->extra)
{
new->extra->cluster = cluster_dup(orig->extra->cluster);
if (orig->extra->transit)
new->extra->transit = transit_dup(orig->extra->transit);
+ if (orig->extra->encap_subtlvs)
+ new->extra->encap_subtlvs = encap_tlv_dup(orig->extra->encap_subtlvs);
+#if ENABLE_BGP_VNC
+ if (orig->extra->vnc_subtlvs)
+ new->extra->vnc_subtlvs = encap_tlv_dup(orig->extra->vnc_subtlvs);
+#endif
}
}
cluster_free(attr->extra->cluster);
if (attr->extra->transit)
transit_free(attr->extra->transit);
+ if (attr->extra->encap_subtlvs)
+ encap_free(attr->extra->encap_subtlvs);
+#if ENABLE_BGP_VNC
+ if (attr->extra->vnc_subtlvs)
+ encap_free(attr->extra->vnc_subtlvs);
+#endif
}
}
MIX(cluster_hash_key_make (extra->cluster));
if (extra->transit)
MIX(transit_hash_key_make (extra->transit));
-
+ if (extra->encap_subtlvs)
+ MIX(encap_hash_key_make (extra->encap_subtlvs));
+#if ENABLE_BGP_VNC
+ if (extra->vnc_subtlvs)
+ MIX(encap_hash_key_make (extra->vnc_subtlvs));
+#endif
#ifdef HAVE_IPV6
MIX(extra->mp_nexthop_len);
key = jhash(extra->mp_nexthop_global.s6_addr, IPV6_MAX_BYTELEN, key);
&& ae1->ecommunity == ae2->ecommunity
&& ae1->cluster == ae2->cluster
&& ae1->transit == ae2->transit
+ && (ae1->encap_tunneltype == ae2->encap_tunneltype)
+ && encap_same(ae1->encap_subtlvs, ae2->encap_subtlvs)
+#if ENABLE_BGP_VNC
+ && encap_same(ae1->vnc_subtlvs, ae2->vnc_subtlvs)
+#endif
&& IPV4_ADDR_SAME (&ae1->originator_id, &ae2->originator_id))
return 1;
else if (ae1 || ae2)
attrhash = hash_create (attrhash_key_make, attrhash_cmp);
}
+/*
+ * special for hash_clean below
+ */
+static void
+attr_vfree (void *attr)
+{
+ bgp_attr_extra_free ((struct attr *)attr);
+ XFREE (MTYPE_ATTR, attr);
+}
+
static void
attrhash_finish (void)
{
+ hash_clean(attrhash, attr_vfree);
hash_free (attrhash);
attrhash = NULL;
}
static void *
bgp_attr_hash_alloc (void *p)
{
- struct attr * val = (struct attr *) p;
+ const struct attr * val = (const struct attr *) p;
struct attr *attr;
attr = XMALLOC (MTYPE_ATTR, sizeof (struct attr));
{
attr->extra = bgp_attr_extra_new ();
*attr->extra = *val->extra;
+ if (val->extra->encap_subtlvs) {
+ val->extra->encap_subtlvs = NULL;
+ }
+#if ENABLE_BGP_VNC
+ if (val->extra->vnc_subtlvs) {
+ val->extra->vnc_subtlvs = NULL;
+ }
+#endif
}
attr->refcnt = 0;
return attr;
else
attre->transit->refcnt++;
}
+ if (attre->encap_subtlvs)
+ {
+ if (! attre->encap_subtlvs->refcnt)
+ attre->encap_subtlvs = encap_intern (attre->encap_subtlvs, ENCAP_SUBTLV_TYPE);
+ else
+ attre->encap_subtlvs->refcnt++;
+ }
+#if ENABLE_BGP_VNC
+ if (attre->vnc_subtlvs)
+ {
+ if (! attre->vnc_subtlvs->refcnt)
+ attre->vnc_subtlvs = encap_intern (attre->vnc_subtlvs, VNC_SUBTLV_TYPE);
+ else
+ attre->vnc_subtlvs->refcnt++;
+ }
+#endif
}
find = (struct attr *) hash_get (attrhash, attr, bgp_attr_hash_alloc);
find->refcnt++;
-
+
return find;
}
if (attre->transit)
attre->transit->refcnt++;
+
+ if (attre->encap_subtlvs)
+ attre->encap_subtlvs->refcnt++;
+
+#if ENABLE_BGP_VNC
+ if (attre->vnc_subtlvs)
+ attre->vnc_subtlvs->refcnt++;
+#endif
}
attr->refcnt++;
return attr;
struct attr attr;
struct attr *new;
+ memset (&attr, 0, sizeof (struct attr));
+ bgp_attr_extra_get (&attr);
+
bgp_attr_default_set(&attr, origin);
new = bgp_attr_intern (&attr);
if (attr->extra->transit)
transit_unintern (attr->extra->transit);
+
+ if (attr->extra->encap_subtlvs)
+ encap_unintern (&attr->extra->encap_subtlvs, ENCAP_SUBTLV_TYPE);
+
+#if ENABLE_BGP_VNC
+ if (attr->extra->vnc_subtlvs)
+ encap_unintern (&attr->extra->vnc_subtlvs, VNC_SUBTLV_TYPE);
+#endif
}
}
bgp_attr_flush (struct attr *attr)
{
if (attr->aspath && ! attr->aspath->refcnt)
- aspath_free (attr->aspath);
+ {
+ aspath_free (attr->aspath);
+ attr->aspath = NULL;
+ }
if (attr->community && ! attr->community->refcnt)
- community_free (attr->community);
+ {
+ community_free (attr->community);
+ attr->community = NULL;
+ }
if (attr->extra)
{
struct attr_extra *attre = attr->extra;
if (attre->ecommunity && ! attre->ecommunity->refcnt)
ecommunity_free (&attre->ecommunity);
if (attre->cluster && ! attre->cluster->refcnt)
- cluster_free (attre->cluster);
+ {
+ cluster_free (attre->cluster);
+ attre->cluster = NULL;
+ }
if (attre->transit && ! attre->transit->refcnt)
- transit_free (attre->transit);
+ {
+ transit_free (attre->transit);
+ attre->transit = NULL;
+ }
+ if (attre->encap_subtlvs && ! attre->encap_subtlvs->refcnt)
+ {
+ encap_free(attre->encap_subtlvs);
+ attre->encap_subtlvs = NULL;
+ }
+#if ENABLE_BGP_VNC
+ if (attre->vnc_subtlvs && ! attre->vnc_subtlvs->refcnt)
+ {
+ encap_free(attre->vnc_subtlvs);
+ attre->vnc_subtlvs = NULL;
+ }
+#endif
}
}
[BGP_ATTR_AS4_PATH] = BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS,
[BGP_ATTR_AS4_AGGREGATOR] = BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS,
};
-static const size_t attr_flags_values_max =
- sizeof (attr_flags_values) / sizeof (attr_flags_values[0]);
+static const size_t attr_flags_values_max = array_size(attr_flags_values) - 1;
static int
bgp_attr_flag_invalid (struct bgp_attr_parser_args *args)
u_int8_t mask = BGP_ATTR_FLAG_EXTLEN;
const u_int8_t flags = args->flags;
const u_int8_t attr_code = args->type;
- struct peer *const peer = args->peer;
/* there may be attributes we don't know about */
if (attr_code > attr_flags_values_max)
gets ignored in any of these cases. */
nexthop_n = stream_get_ipv4 (peer->ibuf);
nexthop_h = ntohl (nexthop_n);
- if (IPV4_NET0 (nexthop_h) || IPV4_NET127 (nexthop_h) || IPV4_CLASS_DE (nexthop_h))
+ if ((IPV4_NET0 (nexthop_h) || IPV4_NET127 (nexthop_h) || IPV4_CLASS_DE (nexthop_h))
+ && !BGP_DEBUG (allow_martians, ALLOW_MARTIANS)) /* loopbacks may be used in testing */
{
char buf[INET_ADDRSTRLEN];
inet_ntop (AF_INET, &nexthop_n, buf, INET_ADDRSTRLEN);
static int
bgp_attr_atomic (struct bgp_attr_parser_args *args)
{
- struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
int ignore_as4_path = 0;
struct aspath *newpath;
struct attr_extra *attre = attr->extra;
-
+
+ if (!attr->aspath)
+ {
+ /* NULL aspath shouldn't be possible as bgp_attr_parse should have
+ * checked that all well-known, mandatory attributes were present.
+ *
+ * Can only be a problem with peer itself - hard error
+ */
+ return BGP_ATTR_PARSE_ERROR;
+ }
+
if (CHECK_FLAG (peer->cap, PEER_CAP_AS4_RCV))
{
/* peer can do AS4, so we ignore AS4_PATH and AS4_AGGREGATOR
/* need to reconcile NEW_AS_PATH and AS_PATH */
if (!ignore_as4_path && (attr->flag & (ATTR_FLAG_BIT( BGP_ATTR_AS4_PATH))))
{
- if (!attr->aspath)
- return BGP_ATTR_PARSE_PROCEED;
-
- newpath = aspath_reconcile_as4 (attr->aspath, as4_path);
- aspath_unintern (&attr->aspath);
- attr->aspath = aspath_intern (newpath);
+ newpath = aspath_reconcile_as4 (attr->aspath, as4_path);
+ aspath_unintern (&attr->aspath);
+ attr->aspath = aspath_intern (newpath);
}
return BGP_ATTR_PARSE_PROCEED;
}
safi_t safi;
bgp_size_t nlri_len;
size_t start;
- int ret;
- int num_mp_pfx = 0;
struct stream *s;
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
break;
#ifdef HAVE_IPV6
case BGP_ATTR_NHLEN_IPV6_GLOBAL:
+ case BGP_ATTR_NHLEN_VPNV6_GLOBAL:
+ if (attre->mp_nexthop_len == BGP_ATTR_NHLEN_VPNV6_GLOBAL)
+ {
+ stream_getl (s); /* RD high */
+ stream_getl (s); /* RD low */
+ }
stream_get (&attre->mp_nexthop_global, s, IPV6_MAX_BYTELEN);
break;
case BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL:
+ case BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL:
+ if (attre->mp_nexthop_len == BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL)
+ {
+ stream_getl (s); /* RD high */
+ stream_getl (s); /* RD low */
+ }
stream_get (&attre->mp_nexthop_global, s, IPV6_MAX_BYTELEN);
+ if (attre->mp_nexthop_len == BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL)
+ {
+ stream_getl (s); /* RD high */
+ stream_getl (s); /* RD low */
+ }
stream_get (&attre->mp_nexthop_local, s, IPV6_MAX_BYTELEN);
if (! IN6_IS_ADDR_LINKLOCAL (&attre->mp_nexthop_local))
{
char buf2[INET6_ADDRSTRLEN];
if (bgp_debug_update(peer, NULL, NULL, 1))
- zlog_debug ("%s sent two nexthops %s %s but second one is not a link-local nexthop", peer->host,
+ zlog_debug ("%s rcvd nexthops %s, %s -- ignoring non-LL value",
+ peer->host,
inet_ntop (AF_INET6, &attre->mp_nexthop_global,
buf1, INET6_ADDRSTRLEN),
inet_ntop (AF_INET6, &attre->mp_nexthop_local,
__func__, peer->host);
return BGP_ATTR_PARSE_ERROR_NOTIFYPLS;
}
-
- if (safi != SAFI_MPLS_LABELED_VPN)
- {
- ret = bgp_nlri_sanity_check (peer, afi, safi, stream_pnt (s),
- nlri_len, &num_mp_pfx);
- if (ret < 0)
- {
- zlog_info ("%s: (%s) NLRI doesn't pass sanity check",
- __func__, peer->host);
- return BGP_ATTR_PARSE_ERROR_NOTIFYPLS;
- }
- }
mp_update->afi = afi;
mp_update->safi = safi;
stream_forward_getp (s, nlri_len);
+ attr->flag |= ATTR_FLAG_BIT (BGP_ATTR_MP_REACH_NLRI);
+
return BGP_ATTR_PARSE_PROCEED;
#undef LEN_LEFT
}
afi_t afi;
safi_t safi;
u_int16_t withdraw_len;
- int ret;
- int num_mp_pfx = 0;
struct peer *const peer = args->peer;
+ struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
s = peer->ibuf;
withdraw_len = length - BGP_MP_UNREACH_MIN_SIZE;
- if (safi != SAFI_MPLS_LABELED_VPN)
- {
- ret = bgp_nlri_sanity_check (peer, afi, safi, stream_pnt (s),
- withdraw_len, &num_mp_pfx);
- if (ret < 0)
- return BGP_ATTR_PARSE_ERROR_NOTIFYPLS;
- }
-
mp_withdraw->afi = afi;
mp_withdraw->safi = safi;
mp_withdraw->nlri = stream_pnt (s);
stream_forward_getp (s, withdraw_len);
+ attr->flag |= ATTR_FLAG_BIT (BGP_ATTR_MP_UNREACH_NLRI);
+
return BGP_ATTR_PARSE_PROCEED;
}
return BGP_ATTR_PARSE_PROCEED;
}
+/* Parse Tunnel Encap attribute in an UPDATE */
+static int
+bgp_attr_encap(
+ uint8_t type,
+ struct peer *peer, /* IN */
+ bgp_size_t length, /* IN: attr's length field */
+ struct attr *attr, /* IN: caller already allocated */
+ u_char flag, /* IN: attr's flags field */
+ u_char *startp)
+{
+ bgp_size_t total;
+ struct attr_extra *attre = NULL;
+ struct bgp_attr_encap_subtlv *stlv_last = NULL;
+ uint16_t tunneltype = 0;
+
+ total = length + (CHECK_FLAG (flag, BGP_ATTR_FLAG_EXTLEN) ? 4 : 3);
+
+ if (!CHECK_FLAG(flag, BGP_ATTR_FLAG_TRANS)
+ || !CHECK_FLAG(flag, BGP_ATTR_FLAG_OPTIONAL))
+ {
+ zlog_info ("Tunnel Encap attribute flag isn't optional and transitive %d", flag);
+ bgp_notify_send_with_data (peer,
+ BGP_NOTIFY_UPDATE_ERR,
+ BGP_NOTIFY_UPDATE_ATTR_FLAG_ERR,
+ startp, total);
+ return -1;
+ }
+
+ if (BGP_ATTR_ENCAP == type) {
+ /* read outer TLV type and length */
+ uint16_t tlv_length;
+
+ if (length < 4) {
+ zlog_info ("Tunnel Encap attribute not long enough to contain outer T,L");
+ bgp_notify_send_with_data(peer,
+ BGP_NOTIFY_UPDATE_ERR,
+ BGP_NOTIFY_UPDATE_OPT_ATTR_ERR,
+ startp, total);
+ return -1;
+ }
+ tunneltype = stream_getw (BGP_INPUT (peer));
+ tlv_length = stream_getw (BGP_INPUT (peer));
+ length -= 4;
+
+ if (tlv_length != length) {
+ zlog_info ("%s: tlv_length(%d) != length(%d)",
+ __func__, tlv_length, length);
+ }
+ }
+
+ while (length >= 4) {
+ uint16_t subtype = 0;
+ uint16_t sublength = 0;
+ struct bgp_attr_encap_subtlv *tlv;
+
+ if (BGP_ATTR_ENCAP == type) {
+ subtype = stream_getc (BGP_INPUT (peer));
+ sublength = stream_getc (BGP_INPUT (peer));
+ length -= 2;
+#if ENABLE_BGP_VNC
+ } else {
+ subtype = stream_getw (BGP_INPUT (peer));
+ sublength = stream_getw (BGP_INPUT (peer));
+ length -= 4;
+#endif
+ }
+
+ if (sublength > length) {
+ zlog_info ("Tunnel Encap attribute sub-tlv length %d exceeds remaining length %d",
+ sublength, length);
+ bgp_notify_send_with_data (peer,
+ BGP_NOTIFY_UPDATE_ERR,
+ BGP_NOTIFY_UPDATE_OPT_ATTR_ERR,
+ startp, total);
+ return -1;
+ }
+
+ /* alloc and copy sub-tlv */
+ /* TBD make sure these are freed when attributes are released */
+ tlv = XCALLOC (MTYPE_ENCAP_TLV, sizeof(struct bgp_attr_encap_subtlv)-1+sublength);
+ tlv->type = subtype;
+ tlv->length = sublength;
+ stream_get(tlv->value, peer->ibuf, sublength);
+ length -= sublength;
+
+ /* attach tlv to encap chain */
+ if (!attre) {
+ attre = bgp_attr_extra_get(attr);
+ if (BGP_ATTR_ENCAP == type) {
+ for (stlv_last = attre->encap_subtlvs; stlv_last && stlv_last->next;
+ stlv_last = stlv_last->next);
+ if (stlv_last) {
+ stlv_last->next = tlv;
+ } else {
+ attre->encap_subtlvs = tlv;
+ }
+#if ENABLE_BGP_VNC
+ } else {
+ for (stlv_last = attre->vnc_subtlvs; stlv_last && stlv_last->next;
+ stlv_last = stlv_last->next);
+ if (stlv_last) {
+ stlv_last->next = tlv;
+ } else {
+ attre->vnc_subtlvs = tlv;
+ }
+#endif
+ }
+ } else {
+ stlv_last->next = tlv;
+ }
+ stlv_last = tlv;
+ }
+
+ if (BGP_ATTR_ENCAP == type) {
+ if (!attre)
+ attre = bgp_attr_extra_get(attr);
+ attre->encap_tunneltype = tunneltype;
+ }
+
+ if (length) {
+ /* spurious leftover data */
+ zlog_info ("Tunnel Encap attribute length is bad: %d leftover octets", length);
+ bgp_notify_send_with_data (peer,
+ BGP_NOTIFY_UPDATE_ERR,
+ BGP_NOTIFY_UPDATE_OPT_ATTR_ERR,
+ startp, total);
+ return -1;
+ }
+
+ return 0;
+}
+
/* BGP unknown attribute treatment. */
static bgp_attr_parse_ret_t
bgp_attr_unknown (struct bgp_attr_parser_args *args)
return BGP_ATTR_PARSE_PROCEED;
}
+/* Well-known attribute check. */
+static int
+bgp_attr_check (struct peer *peer, struct attr *attr)
+{
+ u_char type = 0;
+
+ /* BGP Graceful-Restart End-of-RIB for IPv4 unicast is signaled as an
+ * empty UPDATE. */
+ if (CHECK_FLAG (peer->cap, PEER_CAP_RESTART_RCV) && !attr->flag)
+ return BGP_ATTR_PARSE_PROCEED;
+
+ /* "An UPDATE message that contains the MP_UNREACH_NLRI is not required
+ to carry any other path attributes.", though if MP_REACH_NLRI or NLRI
+ are present, it should. Check for any other attribute being present
+ instead.
+ */
+ if (attr->flag == ATTR_FLAG_BIT (BGP_ATTR_MP_UNREACH_NLRI))
+ return BGP_ATTR_PARSE_PROCEED;
+
+ if (! CHECK_FLAG (attr->flag, ATTR_FLAG_BIT (BGP_ATTR_ORIGIN)))
+ type = BGP_ATTR_ORIGIN;
+
+ if (! CHECK_FLAG (attr->flag, ATTR_FLAG_BIT (BGP_ATTR_AS_PATH)))
+ type = BGP_ATTR_AS_PATH;
+
+ /* RFC 2858 makes Next-Hop optional/ignored, if MP_REACH_NLRI is present and
+ * NLRI is empty. We can't easily check NLRI empty here though.
+ */
+ if (!CHECK_FLAG (attr->flag, ATTR_FLAG_BIT (BGP_ATTR_NEXT_HOP))
+ && !CHECK_FLAG (attr->flag, ATTR_FLAG_BIT (BGP_ATTR_MP_REACH_NLRI)))
+ type = BGP_ATTR_NEXT_HOP;
+
+ if (peer->sort == BGP_PEER_IBGP
+ && ! CHECK_FLAG (attr->flag, ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF)))
+ type = BGP_ATTR_LOCAL_PREF;
+
+ if (type)
+ {
+ zlog_warn ("%s Missing well-known attribute %s.", peer->host,
+ LOOKUP (attr_str, type));
+ bgp_notify_send_with_data (peer,
+ BGP_NOTIFY_UPDATE_ERR,
+ BGP_NOTIFY_UPDATE_MISS_ATTR,
+ &type, 1);
+ return BGP_ATTR_PARSE_ERROR;
+ }
+ return BGP_ATTR_PARSE_PROCEED;
+}
+
/* Read attribute of update packet. This function is called from
bgp_update_receive() in bgp_packet.c. */
bgp_attr_parse_ret_t
/* same goes for as4_aggregator */
struct aspath *as4_path = NULL;
as_t as4_aggregator = 0;
- struct in_addr as4_aggregator_addr = { 0 };
+ struct in_addr as4_aggregator_addr = { .s_addr = 0 };
/* Initialize bitmap. */
memset (seen, 0, BGP_ATTR_BITMAP_SIZE);
attr_endp = BGP_INPUT_PNT (peer) + length;
if (attr_endp > endp)
- {
- zlog_warn ("%s: BGP type %d length %d is too large, attribute total length is %d. attr_endp is %p. endp is %p", peer->host, type, length, size, attr_endp, endp);
- bgp_notify_send_with_data (peer,
- BGP_NOTIFY_UPDATE_ERR,
- BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
- startp, attr_endp - startp);
- return BGP_ATTR_PARSE_ERROR;
- }
+ {
+ zlog_warn("%s: BGP type %d length %d is too large, attribute total length is %d. attr_endp is %p. endp is %p",
+ peer->host, type, length, size, attr_endp, endp);
+ /*
+ * RFC 4271 6.3
+ * If any recognized attribute has an Attribute Length that conflicts
+ * with the expected length (based on the attribute type code), then
+ * the Error Subcode MUST be set to Attribute Length Error. The Data
+ * field MUST contain the erroneous attribute (type, length, and
+ * value).
+ * ----------
+ * We do not currently have a good way to determine the length of the
+ * attribute independent of the length received in the message.
+ * Instead we send the minimum between the amount of data we have and
+ * the amount specified by the attribute length field.
+ *
+ * Instead of directly passing in the packet buffer and offset we use
+ * the stream_get* functions to read into a stack buffer, since they
+ * perform bounds checking and we are working with untrusted data.
+ */
+ unsigned char ndata[BGP_MAX_PACKET_SIZE];
+ memset(ndata, 0x00, sizeof(ndata));
+ size_t lfl = CHECK_FLAG(flag, BGP_ATTR_FLAG_EXTLEN) ? 2 : 1;
+ /* Rewind to end of flag field */
+ stream_forward_getp(BGP_INPUT(peer), -(1 + lfl));
+ /* Type */
+ stream_get(&ndata[0], BGP_INPUT(peer), 1);
+ /* Length */
+ stream_get(&ndata[1], BGP_INPUT(peer), lfl);
+ /* Value */
+ size_t atl = attr_endp - startp;
+ size_t ndl = MIN(atl, STREAM_READABLE(BGP_INPUT(peer)));
+ stream_get(&ndata[lfl + 1], BGP_INPUT(peer), ndl);
+
+ bgp_notify_send_with_data(peer, BGP_NOTIFY_UPDATE_ERR,
+ BGP_NOTIFY_UPDATE_ATTR_LENG_ERR, ndata, ndl + lfl + 1);
+
+ return BGP_ATTR_PARSE_ERROR;
+ }
struct bgp_attr_parser_args attr_args = {
.peer = peer,
case BGP_ATTR_EXT_COMMUNITIES:
ret = bgp_attr_ext_communities (&attr_args);
break;
+#if ENABLE_BGP_VNC
+ case BGP_ATTR_VNC:
+#endif
+ case BGP_ATTR_ENCAP:
+ ret = bgp_attr_encap (type, peer, length, attr, flag, startp);
+ break;
default:
ret = bgp_attr_unknown (&attr_args);
break;
return BGP_ATTR_PARSE_ERROR;
}
+ /* Check all mandatory well-known attributes are present */
+ {
+ bgp_attr_parse_ret_t ret;
+ if ((ret = bgp_attr_check (peer, attr)) < 0)
+ {
+ if (as4_path)
+ aspath_unintern (&as4_path);
+ return ret;
+ }
+ }
+
/*
* At this place we can see whether we got AS4_PATH and/or
* AS4_AGGREGATOR from a 16Bit peer and act accordingly.
* So, to be defensive, we are not relying on any order and read
* all attributes first, including these 32bit ones, and now,
* afterwards, we look what and if something is to be done for as4.
+ *
+ * It is possible to not have AS_PATH, e.g. GR EoR and sole
+ * MP_UNREACH_NLRI.
*/
/* actually... this doesn't ever return failure currently, but
* better safe than sorry */
- if (bgp_attr_munge_as4_attrs (peer, attr, as4_path,
+ if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT (BGP_ATTR_AS_PATH))
+ && bgp_attr_munge_as4_attrs (peer, attr, as4_path,
as4_aggregator, &as4_aggregator_addr))
{
bgp_notify_send (peer,
if (ret != BGP_ATTR_PARSE_PROCEED)
return ret;
}
-
- /* Finally intern unknown attribute. */
- if (attr->extra && attr->extra->transit)
- attr->extra->transit = transit_intern (attr->extra->transit);
-
- return BGP_ATTR_PARSE_PROCEED;
-}
-
-/* Well-known attribute check. */
-int
-bgp_attr_check (struct peer *peer, struct attr *attr)
-{
- u_char type = 0;
-
- if (! CHECK_FLAG (attr->flag, ATTR_FLAG_BIT (BGP_ATTR_ORIGIN)))
- type = BGP_ATTR_ORIGIN;
-
- if (! CHECK_FLAG (attr->flag, ATTR_FLAG_BIT (BGP_ATTR_AS_PATH)))
- type = BGP_ATTR_AS_PATH;
-
- if (! CHECK_FLAG (attr->flag, ATTR_FLAG_BIT (BGP_ATTR_NEXT_HOP)))
- type = BGP_ATTR_NEXT_HOP;
-
- if (peer->sort == BGP_PEER_IBGP
- && ! CHECK_FLAG (attr->flag, ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF)))
- type = BGP_ATTR_LOCAL_PREF;
-
- if (type)
+ if (attr->extra)
{
- zlog_warn ("%s Missing well-known attribute %d.", peer->host, type);
- bgp_notify_send_with_data (peer,
- BGP_NOTIFY_UPDATE_ERR,
- BGP_NOTIFY_UPDATE_MISS_ATTR,
- &type, 1);
- return BGP_ATTR_PARSE_ERROR;
+ /* Finally intern unknown attribute. */
+ if (attr->extra->transit)
+ attr->extra->transit = transit_intern (attr->extra->transit);
+ if (attr->extra->encap_subtlvs)
+ attr->extra->encap_subtlvs = encap_intern (attr->extra->encap_subtlvs, ENCAP_SUBTLV_TYPE);
+#if ENABLE_BGP_VNC
+ if (attr->extra->vnc_subtlvs)
+ attr->extra->vnc_subtlvs = encap_intern (attr->extra->vnc_subtlvs, VNC_SUBTLV_TYPE);
+#endif
}
+
return BGP_ATTR_PARSE_PROCEED;
}
-int stream_put_prefix (struct stream *, struct prefix *);
-
size_t
-bgp_packet_mpattr_start (struct stream *s, afi_t afi, safi_t safi,
+bgp_packet_mpattr_start (struct stream *s, afi_t afi, safi_t safi, afi_t nh_afi,
struct bpacket_attr_vec_arr *vecarr,
struct attr *attr)
{
stream_putc (s, BGP_ATTR_MP_REACH_NLRI);
sizep = stream_get_endp (s);
stream_putw (s, 0); /* Marker: Attribute length. */
- stream_putw (s, afi); /* AFI */
- stream_putc (s, safi); /* SAFI */
+ stream_putw (s, afi);
+ stream_putc (s, (safi == SAFI_MPLS_VPN) ? SAFI_MPLS_LABELED_VPN : safi);
+
+ if (nh_afi == AFI_MAX)
+ nh_afi = BGP_NEXTHOP_AFI_FROM_NHLEN(attr->extra->mp_nexthop_len);
/* Nexthop */
- switch (afi)
+ switch (nh_afi)
{
case AFI_IP:
switch (safi)
case SAFI_MPLS_VPN:
bpacket_attr_vec_arr_set_vec (vecarr, BGP_ATTR_VEC_NH, s, attr);
stream_putc (s, 12);
- stream_putl (s, 0);
+ stream_putl (s, 0); /* RD = 0, per RFC */
stream_putl (s, 0);
stream_put (s, &attr->extra->mp_nexthop_global_in, 4);
break;
+ case SAFI_ENCAP:
+ stream_putc (s, 4);
+ stream_put (s, &attr->extra->mp_nexthop_global_in, 4);
+ break;
default:
break;
}
case SAFI_UNICAST:
case SAFI_MULTICAST:
{
- unsigned long sizep;
struct attr_extra *attre = attr->extra;
assert (attr->extra);
if (attre->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL)
stream_put (s, &attre->mp_nexthop_local, IPV6_MAX_BYTELEN);
}
+ break;
+ case SAFI_MPLS_VPN:
+ {
+ struct attr_extra *attre = attr->extra;
+
+ assert (attr->extra);
+ if (attre->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL) {
+ stream_putc (s, 24);
+ stream_putl (s, 0); /* RD = 0, per RFC */
+ stream_putl (s, 0);
+ stream_put (s, &attre->mp_nexthop_global, IPV6_MAX_BYTELEN);
+ } else if (attre->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) {
+ stream_putc (s, 48);
+ stream_putl (s, 0); /* RD = 0, per RFC */
+ stream_putl (s, 0);
+ stream_put (s, &attre->mp_nexthop_global, IPV6_MAX_BYTELEN);
+ stream_putl (s, 0); /* RD = 0, per RFC */
+ stream_putl (s, 0);
+ stream_put (s, &attre->mp_nexthop_local, IPV6_MAX_BYTELEN);
+ }
+ }
+ break;
+ case SAFI_ENCAP:
+ assert (attr->extra);
+ stream_putc (s, IPV6_MAX_BYTELEN);
+ stream_put (s, &attr->extra->mp_nexthop_global, IPV6_MAX_BYTELEN);
+ break;
default:
break;
}
void
bgp_packet_mpattr_prefix (struct stream *s, afi_t afi, safi_t safi,
struct prefix *p, struct prefix_rd *prd,
- u_char *tag)
+ u_char *tag, int addpath_encode,
+ u_int32_t addpath_tx_id)
{
- switch (safi)
+ if (safi == SAFI_MPLS_VPN)
{
- case SAFI_MPLS_VPN:
+ if (addpath_encode)
+ stream_putl(s, addpath_tx_id);
/* Tag, RD, Prefix write. */
stream_putc (s, p->prefixlen + 88);
stream_put (s, tag, 3);
stream_put (s, prd->val, 8);
stream_put (s, &p->u.prefix, PSIZE (p->prefixlen));
- break;
- default:
- /* Prefix write. */
- stream_put_prefix (s, p);
- break;
+ }
+ else
+ stream_put_prefix_addpath (s, p, addpath_encode, addpath_tx_id);
+}
+
+size_t
+bgp_packet_mpattr_prefix_size (afi_t afi, safi_t safi, struct prefix *p)
+{
+ int size = PSIZE (p->prefixlen);
+ if (safi == SAFI_MPLS_VPN)
+ size += 88;
+ return size;
+}
+
+/*
+ * Encodes the tunnel encapsulation attribute,
+ * and with ENABLE_BGP_VNC the VNC attribute which uses
+ * almost the same TLV format
+ */
+static void
+bgp_packet_mpattr_tea(
+ struct bgp *bgp,
+ struct peer *peer,
+ struct stream *s,
+ struct attr *attr,
+ uint8_t attrtype)
+{
+ unsigned int attrlenfield = 0;
+ unsigned int attrhdrlen = 0;
+ struct bgp_attr_encap_subtlv *subtlvs;
+ struct bgp_attr_encap_subtlv *st;
+ const char *attrname;
+
+ if (!attr || !attr->extra ||
+ (attrtype == BGP_ATTR_ENCAP &&
+ (!attr->extra->encap_tunneltype ||
+ attr->extra->encap_tunneltype == BGP_ENCAP_TYPE_MPLS)))
+ return;
+
+ switch (attrtype) {
+ case BGP_ATTR_ENCAP:
+ attrname = "Tunnel Encap";
+ subtlvs = attr->extra->encap_subtlvs;
+
+ /*
+ * The tunnel encap attr has an "outer" tlv.
+ * T = tunneltype,
+ * L = total length of subtlvs,
+ * V = concatenated subtlvs.
+ */
+ attrlenfield = 2 + 2; /* T + L */
+ attrhdrlen = 1 + 1; /* subTLV T + L */
+ break;
+
+#if ENABLE_BGP_VNC
+ case BGP_ATTR_VNC:
+ attrname = "VNC";
+ subtlvs = attr->extra->vnc_subtlvs;
+ attrlenfield = 0; /* no outer T + L */
+ attrhdrlen = 2 + 2; /* subTLV T + L */
+ break;
+#endif
+
+ default:
+ assert(0);
+ }
+
+ /* compute attr length */
+ for (st = subtlvs; st; st = st->next) {
+ attrlenfield += (attrhdrlen + st->length);
+ }
+
+ if (attrlenfield > 0xffff) {
+ zlog_info ("%s attribute is too long (length=%d), can't send it",
+ attrname,
+ attrlenfield);
+ return;
+ }
+
+ if (attrlenfield > 0xff) {
+ /* 2-octet length field */
+ stream_putc (s,
+ BGP_ATTR_FLAG_TRANS|BGP_ATTR_FLAG_OPTIONAL|BGP_ATTR_FLAG_EXTLEN);
+ stream_putc (s, attrtype);
+ stream_putw (s, attrlenfield & 0xffff);
+ } else {
+ /* 1-octet length field */
+ stream_putc (s, BGP_ATTR_FLAG_TRANS|BGP_ATTR_FLAG_OPTIONAL);
+ stream_putc (s, attrtype);
+ stream_putc (s, attrlenfield & 0xff);
+ }
+
+ if (attrtype == BGP_ATTR_ENCAP) {
+ /* write outer T+L */
+ stream_putw(s, attr->extra->encap_tunneltype);
+ stream_putw(s, attrlenfield - 4);
+ }
+
+ /* write each sub-tlv */
+ for (st = subtlvs; st; st = st->next) {
+ if (attrtype == BGP_ATTR_ENCAP) {
+ stream_putc (s, st->type);
+ stream_putc (s, st->length);
+#if ENABLE_BGP_VNC
+ } else {
+ stream_putw (s, st->type);
+ stream_putw (s, st->length);
+#endif
+ }
+ stream_put (s, st->value, st->length);
}
}
struct stream *s, struct attr *attr,
struct bpacket_attr_vec_arr *vecarr,
struct prefix *p, afi_t afi, safi_t safi,
- struct peer *from, struct prefix_rd *prd, u_char *tag)
+ struct peer *from, struct prefix_rd *prd, u_char *tag,
+ int addpath_encode,
+ u_int32_t addpath_tx_id)
{
size_t cp;
size_t aspath_sizep;
struct aspath *aspath;
int send_as4_path = 0;
int send_as4_aggregator = 0;
- int i = 0;
int use32bit = (CHECK_FLAG (peer->cap, PEER_CAP_AS4_RCV)) ? 1 : 0;
- size_t mpattrlen_pos = 0;
if (! bgp)
bgp = peer->bgp;
/* Remember current pointer. */
cp = stream_get_endp (s);
- if (p && !(afi == AFI_IP && safi == SAFI_UNICAST))
+ if (p && !((afi == AFI_IP && safi == SAFI_UNICAST) &&
+ !peer_cap_enhe(peer)))
{
- mpattrlen_pos = bgp_packet_mpattr_start(s, afi, safi, vecarr, attr);
- bgp_packet_mpattr_prefix(s, afi, safi, p, prd, tag);
+ size_t mpattrlen_pos = 0;
+
+ mpattrlen_pos = bgp_packet_mpattr_start(s, afi, safi,
+ (peer_cap_enhe(peer) ? AFI_IP6 :
+ AFI_MAX), /* get from NH */
+ vecarr, attr);
+ bgp_packet_mpattr_prefix(s, afi, safi, p, prd, tag,
+ addpath_encode, addpath_tx_id);
bgp_packet_mpattr_end(s, mpattrlen_pos);
}
{
aspath = aspath_dup (attr->aspath);
+ /* Even though we may not be configured for confederations we may have
+ * RXed an AS_PATH with AS_CONFED_SEQUENCE or AS_CONFED_SET */
+ aspath = aspath_delete_confed_seq (aspath);
+
if (CHECK_FLAG(bgp->config, BGP_CONFIG_CONFEDERATION))
{
- /* Strip the confed info, and then stuff our path CONFED_ID
- on the front */
- aspath = aspath_delete_confed_seq (aspath);
+ /* Stuff our path CONFED_ID on the front */
aspath = aspath_add_seq (aspath, bgp->confed_id);
}
else
send_as4_path = 1; /* we'll do this later, at the correct place */
/* Nexthop attribute. */
- if (attr->flag & ATTR_FLAG_BIT (BGP_ATTR_NEXT_HOP) && afi == AFI_IP)
+ if (afi == AFI_IP && safi == SAFI_UNICAST && !peer_cap_enhe(peer))
{
- stream_putc (s, BGP_ATTR_FLAG_TRANS);
- stream_putc (s, BGP_ATTR_NEXT_HOP);
- bpacket_attr_vec_arr_set_vec (vecarr, BGP_ATTR_VEC_NH, s, attr);
- stream_putc (s, 4);
- stream_put_ipv4 (s, attr->nexthop.s_addr);
+ if (attr->flag & ATTR_FLAG_BIT (BGP_ATTR_NEXT_HOP))
+ {
+ stream_putc (s, BGP_ATTR_FLAG_TRANS);
+ stream_putc (s, BGP_ATTR_NEXT_HOP);
+ bpacket_attr_vec_arr_set_vec (vecarr, BGP_ATTR_VEC_NH, s, attr);
+ stream_putc (s, 4);
+ stream_put_ipv4 (s, attr->nexthop.s_addr);
+ }
+ else if (safi == SAFI_UNICAST && peer_cap_enhe(from))
+ {
+ /*
+ * Likely this is the case when an IPv4 prefix was received with
+ * Extended Next-hop capability and now being advertised to
+ * non-ENHE peers.
+ * Setting the mandatory (ipv4) next-hop attribute here to enable
+ * implicit next-hop self with correct (ipv4 address family).
+ */
+ stream_putc (s, BGP_ATTR_FLAG_TRANS);
+ stream_putc (s, BGP_ATTR_NEXT_HOP);
+ bpacket_attr_vec_arr_set_vec (vecarr, BGP_ATTR_VEC_NH, s, NULL);
+ stream_putc (s, 4);
+ stream_put_ipv4 (s, 0);
+ }
}
/* MED attribute. */
stream_putl (s, attr->extra->aggregator_as);
stream_put_ipv4 (s, attr->extra->aggregator_addr.s_addr);
}
-
+
+ if ((afi == AFI_IP || afi == AFI_IP6) &&
+ (safi == SAFI_ENCAP || safi == SAFI_MPLS_VPN))
+ {
+ /* Tunnel Encap attribute */
+ bgp_packet_mpattr_tea(bgp, peer, s, attr, BGP_ATTR_ENCAP);
+
+#if ENABLE_BGP_VNC
+ /* VNC attribute */
+ bgp_packet_mpattr_tea(bgp, peer, s, attr, BGP_ATTR_VNC);
+#endif
+ }
+
/* Unknown transit attribute. */
if (attr->extra && attr->extra->transit)
stream_put (s, attr->extra->transit->val, attr->extra->transit->length);
stream_putw (s, 0); /* Length of this attribute. */
stream_putw (s, afi);
- safi = (safi == SAFI_MPLS_VPN) ? SAFI_MPLS_LABELED_VPN : safi;
- stream_putc (s, safi);
+ stream_putc (s, (safi == SAFI_MPLS_VPN) ? SAFI_MPLS_LABELED_VPN : safi);
return attrlen_pnt;
}
void
bgp_packet_mpunreach_prefix (struct stream *s, struct prefix *p,
afi_t afi, safi_t safi, struct prefix_rd *prd,
- u_char *tag)
+ u_char *tag, int addpath_encode,
+ u_int32_t addpath_tx_id)
{
if (safi == SAFI_MPLS_VPN)
{
+ /* addpath TX ID */
+ if (addpath_encode)
+ stream_putl(s, addpath_tx_id);
+
stream_putc (s, p->prefixlen + 88);
stream_put (s, tag, 3);
stream_put (s, prd->val, 8);
stream_put (s, &p->u.prefix, PSIZE (p->prefixlen));
}
else
- stream_put_prefix (s, p);
+ stream_put_prefix_addpath (s, p, addpath_encode, addpath_tx_id);
}
void
bgp_packet_mpunreach_end (struct stream *s, size_t attrlen_pnt)
{
- bgp_size_t size;
-
- /* Set MP attribute length. Don't count the (2) bytes used to encode
- the attr length */
- size = stream_get_endp (s) - attrlen_pnt - 2;
- stream_putw_at (s, attrlen_pnt, size);
+ bgp_packet_mpattr_end (s, attrlen_pnt);
}
/* Initialization of attribute. */
ecommunity_init ();
cluster_init ();
transit_init ();
+ encap_init ();
}
void
ecommunity_finish ();
cluster_finish ();
transit_finish ();
+ encap_finish ();
}
/* Make attribute packet. */
unsigned long len;
size_t aspath_lenp;
struct aspath *aspath;
+ int addpath_encode = 0;
+ u_int32_t addpath_tx_id = 0;
/* Remember current pointer. */
cp = stream_get_endp (s);
stream_putc(s, 0);
/* Prefix */
- stream_put_prefix(s, prefix);
+ stream_put_prefix_addpath (s, prefix, addpath_encode, addpath_tx_id);
/* Set MP attribute length. */
stream_putc_at (s, sizep, (stream_get_endp (s) - sizep) - 1);