adj = XCALLOC(MTYPE_BGP_ADJ_IN, sizeof(struct bgp_adj_in));
adj->peer = peer_lock(peer); /* adj_in peer reference */
adj->attr = bgp_attr_intern(attr);
+ adj->uptime = bgp_clock();
adj->addpath_rx_id = addpath_id;
BGP_ADJ_IN_ADD(rn, adj);
bgp_lock_node(rn);
/* Received attribute. */
struct attr *attr;
+ /* timestamp (monotime) */
+ time_t uptime;
+
/* Addpath identifier */
uint32_t addpath_rx_id;
};
--- /dev/null
+/* BMP support.
+ * Copyright (C) 2018 Yasuhiro Ohara
+ * Copyright (C) 2019 David Lamparter for NetDEF, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+
+#include "log.h"
+#include "stream.h"
+#include "sockunion.h"
+#include "command.h"
+#include "prefix.h"
+#include "thread.h"
+#include "linklist.h"
+#include "queue.h"
+#include "pullwr.h"
+#include "memory.h"
+#include "network.h"
+#include "filter.h"
+#include "lib_errors.h"
+#include "stream.h"
+#include "libfrr.h"
+#include "version.h"
+#include "jhash.h"
+#include "termtable.h"
+
+#include "bgpd/bgp_table.h"
+#include "bgpd/bgpd.h"
+#include "bgpd/bgp_route.h"
+#include "bgpd/bgp_attr.h"
+#include "bgpd/bgp_dump.h"
+#include "bgpd/bgp_errors.h"
+#include "bgpd/bgp_packet.h"
+#include "bgpd/bgp_bmp.h"
+#include "bgpd/bgp_fsm.h"
+#include "bgpd/bgp_updgrp.h"
+#include "bgpd/bgp_vty.h"
+
+static void bmp_close(struct bmp *bmp);
+static struct bmp_bgp *bmp_bgp_find(struct bgp *bgp);
+static void bmp_targets_put(struct bmp_targets *bt);
+static struct bmp_bgp_peer *bmp_bgp_peer_find(uint64_t peerid);
+static struct bmp_bgp_peer *bmp_bgp_peer_get(struct peer *peer);
+static void bmp_active_disconnected(struct bmp_active *ba);
+static void bmp_active_put(struct bmp_active *ba);
+
+DEFINE_MGROUP(BMP, "BMP (BGP Monitoring Protocol)")
+
+DEFINE_MTYPE_STATIC(BMP, BMP_CONN, "BMP connection state")
+DEFINE_MTYPE_STATIC(BMP, BMP_TARGETS, "BMP targets")
+DEFINE_MTYPE_STATIC(BMP, BMP_TARGETSNAME, "BMP targets name")
+DEFINE_MTYPE_STATIC(BMP, BMP_LISTENER, "BMP listener")
+DEFINE_MTYPE_STATIC(BMP, BMP_ACTIVE, "BMP active connection config")
+DEFINE_MTYPE_STATIC(BMP, BMP_ACLNAME, "BMP access-list name")
+DEFINE_MTYPE_STATIC(BMP, BMP_QUEUE, "BMP update queue item")
+DEFINE_MTYPE_STATIC(BMP, BMP, "BMP instance state")
+DEFINE_MTYPE_STATIC(BMP, BMP_MIRRORQ, "BMP route mirroring buffer")
+DEFINE_MTYPE_STATIC(BMP, BMP_PEER, "BMP per BGP peer data")
+DEFINE_MTYPE_STATIC(BMP, BMP_OPEN, "BMP stored BGP OPEN message")
+
+DEFINE_QOBJ_TYPE(bmp_targets)
+
+static int bmp_bgp_cmp(const struct bmp_bgp *a, const struct bmp_bgp *b)
+{
+ if (a->bgp < b->bgp)
+ return -1;
+ if (a->bgp > b->bgp)
+ return 1;
+ return 0;
+}
+
+static uint32_t bmp_bgp_hash(const struct bmp_bgp *e)
+{
+ return jhash(&e->bgp, sizeof(e->bgp), 0x55aa5a5a);
+}
+
+DECLARE_HASH(bmp_bgph, struct bmp_bgp, bbi, bmp_bgp_cmp, bmp_bgp_hash)
+
+struct bmp_bgph_head bmp_bgph;
+
+static int bmp_bgp_peer_cmp(const struct bmp_bgp_peer *a,
+ const struct bmp_bgp_peer *b)
+{
+ if (a->peerid < b->peerid)
+ return -1;
+ if (a->peerid > b->peerid)
+ return 1;
+ return 0;
+}
+
+static uint32_t bmp_bgp_peer_hash(const struct bmp_bgp_peer *e)
+{
+ return e->peerid;
+}
+
+DECLARE_HASH(bmp_peerh, struct bmp_bgp_peer, bpi,
+ bmp_bgp_peer_cmp, bmp_bgp_peer_hash)
+
+struct bmp_peerh_head bmp_peerh;
+
+DECLARE_LIST(bmp_mirrorq, struct bmp_mirrorq, bmi)
+
+/* listener management */
+
+static int bmp_listener_cmp(const struct bmp_listener *a,
+ const struct bmp_listener *b)
+{
+ int c;
+
+ c = sockunion_cmp(&a->addr, &b->addr);
+ if (c)
+ return c;
+ if (a->port < b->port)
+ return -1;
+ if (a->port > b->port)
+ return 1;
+ return 0;
+}
+
+DECLARE_SORTLIST_UNIQ(bmp_listeners, struct bmp_listener, bli, bmp_listener_cmp)
+
+static int bmp_targets_cmp(const struct bmp_targets *a,
+ const struct bmp_targets *b)
+{
+ return strcmp(a->name, b->name);
+}
+
+DECLARE_SORTLIST_UNIQ(bmp_targets, struct bmp_targets, bti, bmp_targets_cmp)
+
+DECLARE_LIST(bmp_session, struct bmp, bsi)
+
+DECLARE_DLIST(bmp_qlist, struct bmp_queue_entry, bli)
+
+static int bmp_qhash_cmp(const struct bmp_queue_entry *a,
+ const struct bmp_queue_entry *b)
+{
+ int ret;
+ ret = prefix_cmp(&a->p, &b->p);
+ if (ret)
+ return ret;
+ ret = memcmp(&a->peerid, &b->peerid,
+ offsetof(struct bmp_queue_entry, refcount) -
+ offsetof(struct bmp_queue_entry, peerid));
+ return ret;
+}
+
+static uint32_t bmp_qhash_hkey(const struct bmp_queue_entry *e)
+{
+ uint32_t key;
+
+ key = prefix_hash_key((void *)&e->p);
+ key = jhash(&e->peerid,
+ offsetof(struct bmp_queue_entry, refcount) -
+ offsetof(struct bmp_queue_entry, peerid),
+ key);
+ return key;
+}
+
+DECLARE_HASH(bmp_qhash, struct bmp_queue_entry, bhi,
+ bmp_qhash_cmp, bmp_qhash_hkey)
+
+static int bmp_active_cmp(const struct bmp_active *a,
+ const struct bmp_active *b)
+{
+ int c;
+
+ c = strcmp(a->hostname, b->hostname);
+ if (c)
+ return c;
+ if (a->port < b->port)
+ return -1;
+ if (a->port > b->port)
+ return 1;
+ return 0;
+}
+
+DECLARE_SORTLIST_UNIQ(bmp_actives, struct bmp_active, bai, bmp_active_cmp)
+
+static struct bmp *bmp_new(struct bmp_targets *bt, int bmp_sock)
+{
+ struct bmp *new = XCALLOC(MTYPE_BMP_CONN, sizeof(struct bmp));
+ afi_t afi;
+ safi_t safi;
+
+ monotime(&new->t_up);
+ new->targets = bt;
+ new->socket = bmp_sock;
+ new->syncafi = AFI_MAX;
+
+ FOREACH_AFI_SAFI (afi, safi) {
+ new->afistate[afi][safi] = bt->afimon[afi][safi]
+ ? BMP_AFI_NEEDSYNC : BMP_AFI_INACTIVE;
+ }
+
+ bmp_session_add_tail(&bt->sessions, new);
+ return new;
+}
+
+static void bmp_free(struct bmp *bmp)
+{
+ bmp_session_del(&bmp->targets->sessions, bmp);
+ XFREE(MTYPE_BMP_CONN, bmp);
+}
+
+static void bmp_common_hdr(struct stream *s, uint8_t ver, uint8_t type)
+{
+ stream_putc(s, ver);
+ stream_putl(s, 0); //dummy message length. will be set later.
+ stream_putc(s, type);
+}
+
+static void bmp_per_peer_hdr(struct stream *s, struct peer *peer,
+ uint8_t flags, const struct timeval *tv)
+{
+ char peer_distinguisher[8];
+
+#define BMP_PEER_TYPE_GLOBAL_INSTANCE 0
+#define BMP_PEER_TYPE_RD_INSTANCE 1
+#define BMP_PEER_TYPE_LOCAL_INSTANCE 2
+
+#define BMP_PEER_FLAG_V (1 << 7)
+#define BMP_PEER_FLAG_L (1 << 6)
+#define BMP_PEER_FLAG_A (1 << 5)
+
+ /* Peer Type */
+ stream_putc(s, BMP_PEER_TYPE_GLOBAL_INSTANCE);
+
+ /* Peer Flags */
+ if (peer->su.sa.sa_family == AF_INET6)
+ SET_FLAG(flags, BMP_PEER_FLAG_V);
+ else
+ UNSET_FLAG(flags, BMP_PEER_FLAG_V);
+ stream_putc(s, flags);
+
+ /* Peer Distinguisher */
+ memset (&peer_distinguisher[0], 0, 8);
+ stream_put(s, &peer_distinguisher[0], 8);
+
+ /* Peer Address */
+ if (peer->su.sa.sa_family == AF_INET6)
+ stream_put(s, &peer->su.sin6.sin6_addr, 16);
+ else if (peer->su.sa.sa_family == AF_INET) {
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ stream_put_in_addr(s, &peer->su.sin.sin_addr);
+ } else {
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ }
+
+ /* Peer AS */
+ stream_putl(s, peer->as);
+
+ /* Peer BGP ID */
+ stream_put_in_addr(s, &peer->remote_id);
+
+ /* Timestamp */
+ if (tv) {
+ stream_putl(s, tv->tv_sec);
+ stream_putl(s, tv->tv_usec);
+ } else {
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ }
+}
+
+static void bmp_put_info_tlv(struct stream *s, uint16_t type,
+ const char *string)
+{
+ int len = strlen (string);
+ stream_putw(s, type);
+ stream_putw(s, len);
+ stream_put(s, string, len);
+}
+
+static int bmp_send_initiation(struct bmp *bmp)
+{
+ int len;
+ struct stream *s;
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+ bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_INITIATION);
+
+#define BMP_INFO_TYPE_SYSDESCR 1
+#define BMP_INFO_TYPE_SYSNAME 2
+ bmp_put_info_tlv(s, BMP_INFO_TYPE_SYSDESCR,
+ FRR_FULL_NAME " " FRR_VER_SHORT);
+ bmp_put_info_tlv(s, BMP_INFO_TYPE_SYSNAME, cmd_hostname_get());
+
+ len = stream_get_endp(s);
+ stream_putl_at(s, BMP_LENGTH_POS, len); //message length is set.
+
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s);
+ return 0;
+}
+
+static void bmp_notify_put(struct stream *s, struct bgp_notify *nfy)
+{
+ size_t len_pos;
+ uint8_t marker[16] = {
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ };
+
+ stream_put(s, marker, sizeof(marker));
+ len_pos = stream_get_endp(s);
+ stream_putw(s, 0);
+ stream_putc(s, BGP_MSG_NOTIFY);
+ stream_putc(s, nfy->code);
+ stream_putc(s, nfy->subcode);
+ stream_put(s, nfy->data, nfy->length);
+
+ stream_putw_at(s, len_pos, stream_get_endp(s) - len_pos
+ + sizeof(marker));
+}
+
+static struct stream *bmp_peerstate(struct peer *peer, bool down)
+{
+ struct stream *s;
+ size_t len;
+ struct timeval uptime, uptime_real;
+
+ uptime.tv_sec = peer->uptime;
+ uptime.tv_usec = 0;
+ monotime_to_realtime(&uptime, &uptime_real);
+
+#define BGP_BMP_MAX_PACKET_SIZE 1024
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+
+ if (peer->status == Established && !down) {
+ struct bmp_bgp_peer *bbpeer;
+
+ bmp_common_hdr(s, BMP_VERSION_3,
+ BMP_TYPE_PEER_UP_NOTIFICATION);
+ bmp_per_peer_hdr(s, peer, 0, &uptime_real);
+
+ /* Local Address (16 bytes) */
+ if (peer->su_local->sa.sa_family == AF_INET6)
+ stream_put(s, &peer->su_local->sin6.sin6_addr, 16);
+ else if (peer->su_local->sa.sa_family == AF_INET) {
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ stream_put_in_addr(s, &peer->su_local->sin.sin_addr);
+ }
+
+ /* Local Port, Remote Port */
+ if (peer->su_local->sa.sa_family == AF_INET6)
+ stream_putw(s, peer->su_local->sin6.sin6_port);
+ else if (peer->su_local->sa.sa_family == AF_INET)
+ stream_putw(s, peer->su_local->sin.sin_port);
+ if (peer->su_remote->sa.sa_family == AF_INET6)
+ stream_putw(s, peer->su_remote->sin6.sin6_port);
+ else if (peer->su_remote->sa.sa_family == AF_INET)
+ stream_putw(s, peer->su_remote->sin.sin_port);
+
+ static const uint8_t dummy_open[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x13, 0x01,
+ };
+
+ bbpeer = bmp_bgp_peer_find(peer->qobj_node.nid);
+
+ if (bbpeer && bbpeer->open_tx)
+ stream_put(s, bbpeer->open_tx, bbpeer->open_tx_len);
+ else {
+ stream_put(s, dummy_open, sizeof(dummy_open));
+ zlog_warn("bmp: missing TX OPEN message for peer %s\n",
+ peer->host);
+ }
+ if (bbpeer && bbpeer->open_rx)
+ stream_put(s, bbpeer->open_rx, bbpeer->open_rx_len);
+ else {
+ stream_put(s, dummy_open, sizeof(dummy_open));
+ zlog_warn("bmp: missing RX OPEN message for peer %s\n",
+ peer->host);
+ }
+
+ if (peer->desc)
+ bmp_put_info_tlv(s, 0, peer->desc);
+ } else {
+ uint8_t type;
+ size_t type_pos;
+
+ bmp_common_hdr(s, BMP_VERSION_3,
+ BMP_TYPE_PEER_DOWN_NOTIFICATION);
+ bmp_per_peer_hdr(s, peer, 0, &uptime_real);
+
+ type_pos = stream_get_endp(s);
+ stream_putc(s, 0); /* placeholder for down reason */
+
+ switch (peer->last_reset) {
+ case PEER_DOWN_NOTIFY_RECEIVED:
+ type = BMP_PEERDOWN_REMOTE_NOTIFY;
+ bmp_notify_put(s, &peer->notify);
+ break;
+ case PEER_DOWN_CLOSE_SESSION:
+ type = BMP_PEERDOWN_REMOTE_CLOSE;
+ break;
+ default:
+ type = BMP_PEERDOWN_LOCAL_NOTIFY;
+ stream_put(s, peer->last_reset_cause,
+ peer->last_reset_cause_size);
+ break;
+ }
+ stream_putc_at(s, type_pos, type);
+ }
+
+ len = stream_get_endp(s);
+ stream_putl_at(s, BMP_LENGTH_POS, len); //message length is set.
+ return s;
+}
+
+
+static int bmp_send_peerup(struct bmp *bmp)
+{
+ struct peer *peer;
+ struct listnode *node;
+ struct stream *s;
+
+ /* Walk down all peers */
+ for (ALL_LIST_ELEMENTS_RO(bmp->targets->bgp->peer, node, peer)) {
+ s = bmp_peerstate(peer, false);
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s);
+ }
+
+ return 0;
+}
+
+/* XXX: kludge - filling the pullwr's buffer */
+static void bmp_send_all(struct bmp_bgp *bmpbgp, struct stream *s)
+{
+ struct bmp_targets *bt;
+ struct bmp *bmp;
+
+ frr_each(bmp_targets, &bmpbgp->targets, bt)
+ frr_each(bmp_session, &bt->sessions, bmp)
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s);
+}
+
+/*
+ * Route Mirroring
+ */
+
+#define BMP_MIRROR_TLV_TYPE_BGP_MESSAGE 0
+#define BMP_MIRROR_TLV_TYPE_INFO 1
+
+#define BMP_MIRROR_INFO_CODE_ERRORPDU 0
+#define BMP_MIRROR_INFO_CODE_LOSTMSGS 1
+
+static struct bmp_mirrorq *bmp_pull_mirror(struct bmp *bmp)
+{
+ struct bmp_mirrorq *bmq;
+
+ bmq = bmp->mirrorpos;
+ if (!bmq)
+ return NULL;
+
+ bmp->mirrorpos = bmp_mirrorq_next(&bmp->targets->bmpbgp->mirrorq, bmq);
+
+ bmq->refcount--;
+ if (!bmq->refcount) {
+ bmp->targets->bmpbgp->mirror_qsize -= sizeof(*bmq) + bmq->len;
+ bmp_mirrorq_del(&bmp->targets->bmpbgp->mirrorq, bmq);
+ }
+ return bmq;
+}
+
+static void bmp_mirror_cull(struct bmp_bgp *bmpbgp)
+{
+ while (bmpbgp->mirror_qsize > bmpbgp->mirror_qsizelimit) {
+ struct bmp_mirrorq *bmq, *inner;
+ struct bmp_targets *bt;
+ struct bmp *bmp;
+
+ bmq = bmp_mirrorq_first(&bmpbgp->mirrorq);
+
+ frr_each(bmp_targets, &bmpbgp->targets, bt) {
+ if (!bt->mirror)
+ continue;
+ frr_each(bmp_session, &bt->sessions, bmp) {
+ if (bmp->mirrorpos != bmq)
+ continue;
+
+ while ((inner = bmp_pull_mirror(bmp))) {
+ if (!inner->refcount)
+ XFREE(MTYPE_BMP_MIRRORQ,
+ inner);
+ }
+
+ zlog_warn("bmp[%s] lost mirror messages due to buffer size limit",
+ bmp->remote);
+ bmp->mirror_lost = true;
+ pullwr_bump(bmp->pullwr);
+ }
+ }
+ }
+}
+
+static int bmp_mirror_packet(struct peer *peer, uint8_t type, bgp_size_t size,
+ struct stream *packet)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(peer->bgp);
+ struct timeval tv;
+ struct bmp_mirrorq *qitem;
+ struct bmp_targets *bt;
+ struct bmp *bmp;
+
+ gettimeofday(&tv, NULL);
+
+ if (type == BGP_MSG_OPEN) {
+ struct bmp_bgp_peer *bbpeer = bmp_bgp_peer_get(peer);
+
+ XFREE(MTYPE_BMP_OPEN, bbpeer->open_rx);
+
+ bbpeer->open_rx_len = size;
+ bbpeer->open_rx = XMALLOC(MTYPE_BMP_OPEN, size);
+ memcpy(bbpeer->open_rx, packet->data, size);
+ }
+
+ if (!bmpbgp)
+ return 0;
+
+ qitem = XCALLOC(MTYPE_BMP_MIRRORQ, sizeof(*qitem) + size);
+ qitem->peerid = peer->qobj_node.nid;
+ qitem->tv = tv;
+ qitem->len = size;
+ memcpy(qitem->data, packet->data, size);
+
+ frr_each(bmp_targets, &bmpbgp->targets, bt) {
+ if (!bt->mirror)
+ continue;
+ frr_each(bmp_session, &bt->sessions, bmp) {
+ qitem->refcount++;
+ if (!bmp->mirrorpos)
+ bmp->mirrorpos = qitem;
+ pullwr_bump(bmp->pullwr);
+ }
+ }
+ if (qitem->refcount == 0)
+ XFREE(MTYPE_BMP_MIRRORQ, qitem);
+ else {
+ bmpbgp->mirror_qsize += sizeof(*qitem) + size;
+ bmp_mirrorq_add_tail(&bmpbgp->mirrorq, qitem);
+
+ bmp_mirror_cull(bmpbgp);
+
+ bmpbgp->mirror_qsizemax = MAX(bmpbgp->mirror_qsizemax,
+ bmpbgp->mirror_qsize);
+ }
+ return 0;
+}
+
+static void bmp_wrmirror_lost(struct bmp *bmp, struct pullwr *pullwr)
+{
+ struct stream *s;
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+
+ bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_ROUTE_MIRRORING);
+ bmp_per_peer_hdr(s, bmp->targets->bgp->peer_self, 0, &tv);
+
+ stream_putw(s, BMP_MIRROR_TLV_TYPE_INFO);
+ stream_putw(s, 2);
+ stream_putw(s, BMP_MIRROR_INFO_CODE_LOSTMSGS);
+ stream_putl_at(s, BMP_LENGTH_POS, stream_get_endp(s));
+
+ bmp->cnt_mirror_overruns++;
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s);
+}
+
+static bool bmp_wrmirror(struct bmp *bmp, struct pullwr *pullwr)
+{
+ struct bmp_mirrorq *bmq;
+ struct peer *peer;
+ bool written = false;
+
+ if (bmp->mirror_lost) {
+ bmp_wrmirror_lost(bmp, pullwr);
+ bmp->mirror_lost = false;
+ return true;
+ }
+
+ bmq = bmp_pull_mirror(bmp);
+ if (!bmq)
+ return false;
+
+ peer = QOBJ_GET_TYPESAFE(bmq->peerid, peer);
+ if (!peer) {
+ zlog_info("bmp: skipping mirror message for deleted peer");
+ goto out;
+ }
+
+ struct stream *s;
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+
+ bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_ROUTE_MIRRORING);
+ bmp_per_peer_hdr(s, peer, 0, &bmq->tv);
+
+ /* BMP Mirror TLV. */
+ stream_putw(s, BMP_MIRROR_TLV_TYPE_BGP_MESSAGE);
+ stream_putw(s, bmq->len);
+ stream_putl_at(s, BMP_LENGTH_POS, stream_get_endp(s) + bmq->len);
+
+ bmp->cnt_mirror++;
+ pullwr_write_stream(bmp->pullwr, s);
+ pullwr_write(bmp->pullwr, bmq->data, bmq->len);
+
+ stream_free(s);
+ written = true;
+
+out:
+ if (!bmq->refcount)
+ XFREE(MTYPE_BMP_MIRRORQ, bmq);
+ return written;
+}
+
+static int bmp_outgoing_packet(struct peer *peer, uint8_t type, bgp_size_t size,
+ struct stream *packet)
+{
+ if (type == BGP_MSG_OPEN) {
+ struct bmp_bgp_peer *bbpeer = bmp_bgp_peer_get(peer);
+
+ XFREE(MTYPE_BMP_OPEN, bbpeer->open_tx);
+
+ bbpeer->open_tx_len = size;
+ bbpeer->open_tx = XMALLOC(MTYPE_BMP_OPEN, size);
+ memcpy(bbpeer->open_tx, packet->data, size);
+ }
+ return 0;
+}
+
+static int bmp_peer_established(struct peer *peer)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(peer->bgp);
+
+ if (!bmpbgp)
+ return 0;
+
+ if (peer->doppelganger && (peer->doppelganger->status != Deleted)) {
+ struct bmp_bgp_peer *bbpeer, *bbdopp;
+
+ bbpeer = bmp_bgp_peer_get(peer);
+ bbdopp = bmp_bgp_peer_find(peer->doppelganger->qobj_node.nid);
+ if (bbdopp) {
+ XFREE(MTYPE_BMP_OPEN, bbpeer->open_tx);
+ XFREE(MTYPE_BMP_OPEN, bbpeer->open_rx);
+
+ bbpeer->open_tx = bbdopp->open_tx;
+ bbpeer->open_tx_len = bbdopp->open_tx_len;
+ bbpeer->open_rx = bbdopp->open_rx;
+ bbpeer->open_rx_len = bbdopp->open_rx_len;
+
+ bmp_peerh_del(&bmp_peerh, bbdopp);
+ XFREE(MTYPE_BMP_PEER, bbdopp);
+ }
+ }
+
+ bmp_send_all(bmpbgp, bmp_peerstate(peer, false));
+ return 0;
+}
+
+static int bmp_peer_backward(struct peer *peer)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(peer->bgp);
+ struct bmp_bgp_peer *bbpeer;
+
+ if (!bmpbgp)
+ return 0;
+
+ bbpeer = bmp_bgp_peer_find(peer->qobj_node.nid);
+ if (bbpeer) {
+ XFREE(MTYPE_BMP_OPEN, bbpeer->open_tx);
+ bbpeer->open_tx_len = 0;
+ XFREE(MTYPE_BMP_OPEN, bbpeer->open_rx);
+ bbpeer->open_rx_len = 0;
+ }
+
+ bmp_send_all(bmpbgp, bmp_peerstate(peer, true));
+ return 0;
+}
+
+static void bmp_eor(struct bmp *bmp, afi_t afi, safi_t safi, uint8_t flags)
+{
+ struct peer *peer;
+ struct listnode *node;
+ struct stream *s, *s2;
+ iana_afi_t pkt_afi;
+ iana_safi_t pkt_safi;
+
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+
+ /* Make BGP update packet. */
+ bgp_packet_set_marker(s, BGP_MSG_UPDATE);
+
+ /* Unfeasible Routes Length */
+ stream_putw(s, 0);
+
+ if (afi == AFI_IP && safi == SAFI_UNICAST) {
+ /* Total Path Attribute Length */
+ stream_putw(s, 0);
+ } else {
+ /* Convert AFI, SAFI to values for packet. */
+ bgp_map_afi_safi_int2iana(afi, safi, &pkt_afi, &pkt_safi);
+
+ /* Total Path Attribute Length */
+ stream_putw(s, 6);
+ stream_putc(s, BGP_ATTR_FLAG_OPTIONAL);
+ stream_putc(s, BGP_ATTR_MP_UNREACH_NLRI);
+ stream_putc(s, 3);
+ stream_putw(s, pkt_afi);
+ stream_putc(s, pkt_safi);
+ }
+
+ bgp_packet_set_size(s);
+
+ for (ALL_LIST_ELEMENTS_RO(bmp->targets->bgp->peer, node, peer)) {
+ if (!peer->afc_nego[afi][safi])
+ continue;
+
+ s2 = stream_new(BGP_MAX_PACKET_SIZE);
+
+ bmp_common_hdr(s2, BMP_VERSION_3,
+ BMP_TYPE_ROUTE_MONITORING);
+ bmp_per_peer_hdr(s2, peer, flags, NULL);
+
+ stream_putl_at(s2, BMP_LENGTH_POS,
+ stream_get_endp(s) + stream_get_endp(s2));
+
+ bmp->cnt_update++;
+ pullwr_write_stream(bmp->pullwr, s2);
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s2);
+ }
+ stream_free(s);
+}
+
+static struct stream *bmp_update(struct prefix *p, struct peer *peer,
+ struct attr *attr, afi_t afi, safi_t safi)
+{
+ struct bpacket_attr_vec_arr vecarr;
+ struct stream *s;
+ size_t attrlen_pos = 0, mpattrlen_pos = 0;
+ bgp_size_t total_attr_len = 0;
+
+ bpacket_attr_vec_arr_reset(&vecarr);
+
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+ bgp_packet_set_marker(s, BGP_MSG_UPDATE);
+
+ /* 2: withdrawn routes length */
+ stream_putw(s, 0);
+
+ /* 3: total attributes length - attrlen_pos stores the position */
+ attrlen_pos = stream_get_endp(s);
+ stream_putw(s, 0);
+
+ /* 5: Encode all the attributes, except MP_REACH_NLRI attr. */
+ total_attr_len = bgp_packet_attribute(NULL, peer, s, attr,
+ &vecarr, NULL, afi, safi, peer, NULL, NULL, 0, 0, 0);
+
+ /* space check? */
+
+ /* peer_cap_enhe & add-path removed */
+ if (afi == AFI_IP && safi == SAFI_UNICAST)
+ stream_put_prefix(s, p);
+ else {
+ size_t p1 = stream_get_endp(s);
+
+ /* MPLS removed for now */
+
+ mpattrlen_pos = bgp_packet_mpattr_start(s, peer, afi, safi,
+ &vecarr, attr);
+ bgp_packet_mpattr_prefix(s, afi, safi, p, NULL, NULL, 0,
+ 0, 0, attr);
+ bgp_packet_mpattr_end(s, mpattrlen_pos);
+ total_attr_len += stream_get_endp(s) - p1;
+ }
+
+ /* set the total attribute length correctly */
+ stream_putw_at(s, attrlen_pos, total_attr_len);
+ bgp_packet_set_size(s);
+ return s;
+}
+
+static struct stream *bmp_withdraw(struct prefix *p, afi_t afi, safi_t safi)
+{
+ struct stream *s;
+ size_t attrlen_pos = 0, mp_start, mplen_pos;
+ bgp_size_t total_attr_len = 0;
+ bgp_size_t unfeasible_len;
+
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+
+ bgp_packet_set_marker(s, BGP_MSG_UPDATE);
+ stream_putw(s, 0);
+
+ if (afi == AFI_IP && safi == SAFI_UNICAST) {
+ stream_put_prefix(s, p);
+ unfeasible_len = stream_get_endp(s) - BGP_HEADER_SIZE
+ - BGP_UNFEASIBLE_LEN;
+ stream_putw_at(s, BGP_HEADER_SIZE, unfeasible_len);
+ stream_putw(s, 0);
+ } else {
+ attrlen_pos = stream_get_endp(s);
+ /* total attr length = 0 for now. reevaluate later */
+ stream_putw(s, 0);
+ mp_start = stream_get_endp(s);
+ mplen_pos = bgp_packet_mpunreach_start(s, afi, safi);
+
+ bgp_packet_mpunreach_prefix(s, p, afi, safi, NULL, NULL, 0,
+ 0, 0, NULL);
+ /* Set the mp_unreach attr's length */
+ bgp_packet_mpunreach_end(s, mplen_pos);
+
+ /* Set total path attribute length. */
+ total_attr_len = stream_get_endp(s) - mp_start;
+ stream_putw_at(s, attrlen_pos, total_attr_len);
+ }
+
+ bgp_packet_set_size(s);
+ return s;
+}
+
+static void bmp_monitor(struct bmp *bmp, struct peer *peer, uint8_t flags,
+ struct prefix *p, struct attr *attr, afi_t afi,
+ safi_t safi, time_t uptime)
+{
+ struct stream *hdr, *msg;
+ struct timeval tv = { .tv_sec = uptime, .tv_usec = 0 };
+
+ if (attr)
+ msg = bmp_update(p, peer, attr, afi, safi);
+ else
+ msg = bmp_withdraw(p, afi, safi);
+
+ hdr = stream_new(BGP_MAX_PACKET_SIZE);
+ bmp_common_hdr(hdr, BMP_VERSION_3, BMP_TYPE_ROUTE_MONITORING);
+ bmp_per_peer_hdr(hdr, peer, flags, &tv);
+
+ stream_putl_at(hdr, BMP_LENGTH_POS,
+ stream_get_endp(hdr) + stream_get_endp(msg));
+
+ bmp->cnt_update++;
+ pullwr_write_stream(bmp->pullwr, hdr);
+ pullwr_write_stream(bmp->pullwr, msg);
+ stream_free(hdr);
+ stream_free(msg);
+}
+
+static bool bmp_wrsync(struct bmp *bmp, struct pullwr *pullwr)
+{
+ afi_t afi;
+ safi_t safi;
+
+ if (bmp->syncafi == AFI_MAX) {
+ FOREACH_AFI_SAFI (afi, safi) {
+ if (bmp->afistate[afi][safi] != BMP_AFI_NEEDSYNC)
+ continue;
+
+ bmp->afistate[afi][safi] = BMP_AFI_SYNC;
+
+ bmp->syncafi = afi;
+ bmp->syncsafi = safi;
+ bmp->syncpeerid = 0;
+ memset(&bmp->syncpos, 0, sizeof(bmp->syncpos));
+ bmp->syncpos.family = afi2family(afi);
+ zlog_info("bmp[%s] %s %s sending table",
+ bmp->remote,
+ afi2str(bmp->syncafi),
+ safi2str(bmp->syncsafi));
+ /* break does not work here, 2 loops... */
+ goto afibreak;
+ }
+ if (bmp->syncafi == AFI_MAX)
+ return false;
+ }
+
+afibreak:
+ afi = bmp->syncafi;
+ safi = bmp->syncsafi;
+
+ if (!bmp->targets->afimon[afi][safi]) {
+ /* shouldn't happen */
+ bmp->afistate[afi][safi] = BMP_AFI_INACTIVE;
+ bmp->syncafi = AFI_MAX;
+ bmp->syncsafi = SAFI_MAX;
+ return true;
+ }
+
+ struct bgp_table *table = bmp->targets->bgp->rib[afi][safi];
+ struct bgp_node *bn;
+ struct bgp_path_info *bpi = NULL, *bpiter;
+ struct bgp_adj_in *adjin = NULL, *adjiter;
+
+ bn = bgp_node_lookup(table, &bmp->syncpos);
+ do {
+ if (!bn) {
+ bn = bgp_table_get_next(table, &bmp->syncpos);
+ if (!bn) {
+ zlog_info("bmp[%s] %s %s table completed (EoR)",
+ bmp->remote, afi2str(afi),
+ safi2str(safi));
+ bmp_eor(bmp, afi, safi, BMP_PEER_FLAG_L);
+ bmp_eor(bmp, afi, safi, 0);
+
+ bmp->afistate[afi][safi] = BMP_AFI_LIVE;
+ bmp->syncafi = AFI_MAX;
+ bmp->syncsafi = SAFI_MAX;
+ return true;
+ }
+ bmp->syncpeerid = 0;
+ prefix_copy(&bmp->syncpos, &bn->p);
+ }
+
+ if (bmp->targets->afimon[afi][safi] & BMP_MON_POSTPOLICY) {
+ for (bpiter = bn->info; bpiter; bpiter = bpiter->next) {
+ if (!CHECK_FLAG(bpiter->flags, BGP_PATH_VALID))
+ continue;
+ if (bpiter->peer->qobj_node.nid
+ <= bmp->syncpeerid)
+ continue;
+ if (bpi && bpiter->peer->qobj_node.nid
+ > bpi->peer->qobj_node.nid)
+ continue;
+ bpi = bpiter;
+ }
+ }
+ if (bmp->targets->afimon[afi][safi] & BMP_MON_PREPOLICY) {
+ for (adjiter = bn->adj_in; adjiter;
+ adjiter = adjiter->next) {
+ if (adjiter->peer->qobj_node.nid
+ <= bmp->syncpeerid)
+ continue;
+ if (adjin && adjiter->peer->qobj_node.nid
+ > adjin->peer->qobj_node.nid)
+ continue;
+ adjin = adjiter;
+ }
+ }
+ if (bpi || adjin)
+ break;
+
+ bn = NULL;
+ } while (1);
+
+ if (adjin && bpi
+ && adjin->peer->qobj_node.nid < bpi->peer->qobj_node.nid) {
+ bpi = NULL;
+ bmp->syncpeerid = adjin->peer->qobj_node.nid;
+ } else if (adjin && bpi
+ && adjin->peer->qobj_node.nid > bpi->peer->qobj_node.nid) {
+ adjin = NULL;
+ bmp->syncpeerid = bpi->peer->qobj_node.nid;
+ } else if (bpi) {
+ bmp->syncpeerid = bpi->peer->qobj_node.nid;
+ } else if (adjin) {
+ bmp->syncpeerid = adjin->peer->qobj_node.nid;
+ }
+
+ if (bpi)
+ bmp_monitor(bmp, bpi->peer, BMP_PEER_FLAG_L, &bn->p, bpi->attr,
+ afi, safi, bpi->uptime);
+ if (adjin)
+ bmp_monitor(bmp, adjin->peer, 0, &bn->p, adjin->attr,
+ afi, safi, adjin->uptime);
+
+ return true;
+}
+
+static struct bmp_queue_entry *bmp_pull(struct bmp *bmp)
+{
+ struct bmp_queue_entry *bqe;
+
+ bqe = bmp->queuepos;
+ if (!bqe)
+ return NULL;
+
+ bmp->queuepos = bmp_qlist_next(&bmp->targets->updlist, bqe);
+
+ bqe->refcount--;
+ if (!bqe->refcount) {
+ bmp_qhash_del(&bmp->targets->updhash, bqe);
+ bmp_qlist_del(&bmp->targets->updlist, bqe);
+ }
+ return bqe;
+}
+
+static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr)
+{
+ struct bmp_queue_entry *bqe;
+ struct peer *peer;
+ struct bgp_node *bn;
+ bool written = false;
+
+ bqe = bmp_pull(bmp);
+ if (!bqe)
+ return false;
+
+ afi_t afi = bqe->afi;
+ safi_t safi = bqe->safi;
+
+ switch (bmp->afistate[afi][safi]) {
+ case BMP_AFI_INACTIVE:
+ case BMP_AFI_NEEDSYNC:
+ goto out;
+ case BMP_AFI_SYNC:
+ if (prefix_cmp(&bqe->p, &bmp->syncpos) <= 0)
+ /* currently syncing but have already passed this
+ * prefix => send it. */
+ break;
+
+ /* currently syncing & haven't reached this prefix yet
+ * => it'll be sent as part of the table sync, no need here */
+ goto out;
+ case BMP_AFI_LIVE:
+ break;
+ }
+
+ peer = QOBJ_GET_TYPESAFE(bqe->peerid, peer);
+ if (!peer) {
+ zlog_info("bmp: skipping queued item for deleted peer");
+ goto out;
+ }
+ if (peer->status != Established)
+ goto out;
+
+ bn = bgp_node_lookup(bmp->targets->bgp->rib[afi][safi], &bqe->p);
+
+ if (bmp->targets->afimon[afi][safi] & BMP_MON_POSTPOLICY) {
+ struct bgp_path_info *bpi;
+
+ for (bpi = bn ? bn->info : NULL; bpi; bpi = bpi->next) {
+ if (!CHECK_FLAG(bpi->flags, BGP_PATH_VALID))
+ continue;
+ if (bpi->peer == peer)
+ break;
+ }
+
+ bmp_monitor(bmp, peer, BMP_PEER_FLAG_L, &bqe->p,
+ bpi ? bpi->attr : NULL, afi, safi,
+ bpi ? bpi->uptime : monotime(NULL));
+ written = true;
+ }
+
+ if (bmp->targets->afimon[afi][safi] & BMP_MON_PREPOLICY) {
+ struct bgp_adj_in *adjin;
+
+ for (adjin = bn ? bn->adj_in : NULL; adjin;
+ adjin = adjin->next) {
+ if (adjin->peer == peer)
+ break;
+ }
+ bmp_monitor(bmp, peer, BMP_PEER_FLAG_L, &bqe->p,
+ adjin ? adjin->attr : NULL, afi, safi,
+ adjin ? adjin->uptime : monotime(NULL));
+ written = true;
+ }
+
+out:
+ if (!bqe->refcount)
+ XFREE(MTYPE_BMP_QUEUE, bqe);
+ return written;
+}
+
+static void bmp_wrfill(struct bmp *bmp, struct pullwr *pullwr)
+{
+ switch(bmp->state) {
+ case BMP_PeerUp:
+ bmp_send_peerup(bmp);
+ bmp->state = BMP_Run;
+ break;
+
+ case BMP_Run:
+ if (bmp_wrmirror(bmp, pullwr))
+ break;
+ if (bmp_wrqueue(bmp, pullwr))
+ break;
+ if (bmp_wrsync(bmp, pullwr))
+ break;
+ break;
+ }
+}
+
+static void bmp_wrerr(struct bmp *bmp, struct pullwr *pullwr, bool eof)
+{
+ if (eof)
+ zlog_info("bmp[%s] disconnected", bmp->remote);
+ else
+ flog_warn(EC_LIB_SYSTEM_CALL, "bmp[%s] connection error: %s",
+ bmp->remote, strerror(errno));
+
+ bmp_close(bmp);
+ bmp_free(bmp);
+}
+
+static void bmp_process_one(struct bmp_targets *bt, struct bgp *bgp,
+ afi_t afi, safi_t safi, struct bgp_node *bn, struct peer *peer)
+{
+ struct bmp *bmp;
+ struct bmp_queue_entry *bqe, bqeref;
+ size_t refcount;
+ char buf[256];
+
+ prefix2str(&bn->p, buf, sizeof(buf));
+
+ refcount = bmp_session_count(&bt->sessions);
+ if (refcount == 0)
+ return;
+
+ memset(&bqeref, 0, sizeof(bqeref));
+ prefix_copy(&bqeref.p, &bn->p);
+ bqeref.peerid = peer->qobj_node.nid;
+ bqeref.afi = afi;
+ bqeref.safi = safi;
+
+ bqe = bmp_qhash_find(&bt->updhash, &bqeref);
+ if (bqe) {
+ if (bqe->refcount >= refcount)
+ /* nothing to do here */
+ return;
+
+ bmp_qlist_del(&bt->updlist, bqe);
+ } else {
+ bqe = XMALLOC(MTYPE_BMP_QUEUE, sizeof(*bqe));
+ memcpy(bqe, &bqeref, sizeof(*bqe));
+
+ bmp_qhash_add(&bt->updhash, bqe);
+ }
+
+ bqe->refcount = refcount;
+ bmp_qlist_add_tail(&bt->updlist, bqe);
+
+ frr_each (bmp_session, &bt->sessions, bmp)
+ if (!bmp->queuepos)
+ bmp->queuepos = bqe;
+}
+
+static int bmp_process(struct bgp *bgp, afi_t afi, safi_t safi,
+ struct bgp_node *bn, struct peer *peer, bool withdraw)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(peer->bgp);
+ struct bmp_targets *bt;
+ struct bmp *bmp;
+
+ if (!bmpbgp)
+ return 0;
+
+ frr_each(bmp_targets, &bmpbgp->targets, bt) {
+ if (!bt->afimon[afi][safi])
+ continue;
+
+ bmp_process_one(bt, bgp, afi, safi, bn, peer);
+
+ frr_each(bmp_session, &bt->sessions, bmp) {
+ pullwr_bump(bmp->pullwr);
+ }
+ }
+ return 0;
+}
+
+static void bmp_stat_put_u32(struct stream *s, size_t *cnt, uint16_t type,
+ uint32_t value)
+{
+ stream_putw(s, type);
+ stream_putw(s, 4);
+ stream_putl(s, value);
+ (*cnt)++;
+}
+
+static int bmp_stats(struct thread *thread)
+{
+ struct bmp_targets *bt = THREAD_ARG(thread);
+ struct stream *s;
+ struct peer *peer;
+ struct listnode *node;
+ struct timeval tv;
+
+ if (bt->stat_msec)
+ thread_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
+ &bt->t_stats);
+
+ gettimeofday(&tv, NULL);
+
+ /* Walk down all peers */
+ for (ALL_LIST_ELEMENTS_RO(bt->bgp->peer, node, peer)) {
+ size_t count = 0, count_pos, len;
+
+ if (peer->status != Established)
+ continue;
+
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+ bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_STATISTICS_REPORT);
+ bmp_per_peer_hdr(s, peer, 0, &tv);
+
+ count_pos = stream_get_endp(s);
+ stream_putl(s, 0);
+
+ bmp_stat_put_u32(s, &count, BMP_STATS_PFX_REJECTED,
+ peer->stat_pfx_filter);
+ bmp_stat_put_u32(s, &count, BMP_STATS_UPD_LOOP_ASPATH,
+ peer->stat_pfx_aspath_loop);
+ bmp_stat_put_u32(s, &count, BMP_STATS_UPD_LOOP_ORIGINATOR,
+ peer->stat_pfx_originator_loop);
+ bmp_stat_put_u32(s, &count, BMP_STATS_UPD_LOOP_CLUSTER,
+ peer->stat_pfx_cluster_loop);
+ bmp_stat_put_u32(s, &count, BMP_STATS_PFX_DUP_WITHDRAW,
+ peer->stat_pfx_dup_withdraw);
+ bmp_stat_put_u32(s, &count, BMP_STATS_UPD_7606_WITHDRAW,
+ peer->stat_upd_7606);
+ bmp_stat_put_u32(s, &count, BMP_STATS_FRR_NH_INVALID,
+ peer->stat_pfx_nh_invalid);
+
+ stream_putl_at(s, count_pos, count);
+
+ len = stream_get_endp(s);
+ stream_putl_at(s, BMP_LENGTH_POS, len);
+
+ bmp_send_all(bt->bmpbgp, s);
+ }
+ return 0;
+}
+
+static struct bmp *bmp_open(struct bmp_targets *bt, int bmp_sock)
+{
+ union sockunion su, *sumem;
+ struct prefix p;
+ int on = 1;
+ struct access_list *acl = NULL;
+ enum filter_type ret;
+ char buf[SU_ADDRSTRLEN];
+ struct bmp *bmp;
+
+ sumem = sockunion_getpeername(bmp_sock);
+ if (!sumem) {
+ close(bmp_sock);
+ return NULL;
+ }
+ memcpy(&su, sumem, sizeof(su));
+ sockunion_free(sumem);
+
+ set_nonblocking(bmp_sock);
+ set_cloexec(bmp_sock);
+ shutdown(bmp_sock, SHUT_RD);
+
+ sockunion2hostprefix(&su, &p);
+
+ acl = NULL;
+ switch (p.family) {
+ case AF_INET:
+ acl = access_list_lookup(AFI_IP, bt->acl_name);
+ break;
+ case AF_INET6:
+ acl = access_list_lookup(AFI_IP6, bt->acl6_name);
+ break;
+ default:
+ break;
+ }
+
+ ret = FILTER_PERMIT;
+ if (acl) {
+ ret = access_list_apply(acl, &p);
+ }
+
+ sockunion2str(&su, buf, SU_ADDRSTRLEN);
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ":%u",
+ su.sa.sa_family == AF_INET
+ ? ntohs(su.sin.sin_port)
+ : ntohs(su.sin6.sin6_port));
+
+ if (ret == FILTER_DENY) {
+ bt->cnt_aclrefused++;
+ zlog_info("bmp[%s] connection refused by access-list", buf);
+ close(bmp_sock);
+ return NULL;
+ }
+ bt->cnt_accept++;
+
+ setsockopt(bmp_sock, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on));
+ setsockopt(bmp_sock, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
+
+ zlog_info("bmp[%s] connection established", buf);
+
+ /* Allocate new BMP structure and set up default values. */
+ bmp = bmp_new(bt, bmp_sock);
+ strlcpy(bmp->remote, buf, sizeof(bmp->remote));
+
+ bmp->state = BMP_PeerUp;
+ bmp->pullwr = pullwr_new(bm->master, bmp_sock, bmp, bmp_wrfill,
+ bmp_wrerr);
+ bmp_send_initiation(bmp);
+
+ return bmp;
+}
+
+/* Accept BMP connection. */
+static int bmp_accept(struct thread *thread)
+{
+ union sockunion su;
+ struct bmp_listener *bl = THREAD_ARG(thread);
+ int bmp_sock;
+
+ /* We continue hearing BMP socket. */
+ thread_add_read(bm->master, bmp_accept, bl, bl->sock, &bl->t_accept);
+
+ memset(&su, 0, sizeof(union sockunion));
+
+ /* We can handle IPv4 or IPv6 socket. */
+ bmp_sock = sockunion_accept(bl->sock, &su);
+ if (bmp_sock < 0) {
+ zlog_info("bmp: accept_sock failed: %s\n",
+ safe_strerror (errno));
+ return -1;
+ }
+ bmp_open(bl->targets, bmp_sock);
+ return 0;
+}
+
+static void bmp_close(struct bmp *bmp)
+{
+ struct bmp_queue_entry *bqe;
+ struct bmp_mirrorq *bmq;
+
+ if (bmp->active)
+ bmp_active_disconnected(bmp->active);
+
+ while ((bmq = bmp_pull_mirror(bmp)))
+ if (!bmq->refcount)
+ XFREE(MTYPE_BMP_MIRRORQ, bmq);
+ while ((bqe = bmp_pull(bmp)))
+ if (!bqe->refcount)
+ XFREE(MTYPE_BMP_QUEUE, bqe);
+
+ THREAD_OFF(bmp->t_read);
+ pullwr_del(bmp->pullwr);
+ close(bmp->socket);
+}
+
+static struct bmp_bgp *bmp_bgp_find(struct bgp *bgp)
+{
+ struct bmp_bgp dummy = { .bgp = bgp };
+ return bmp_bgph_find(&bmp_bgph, &dummy);
+}
+
+static struct bmp_bgp *bmp_bgp_get(struct bgp *bgp)
+{
+ struct bmp_bgp *bmpbgp;
+
+ bmpbgp = bmp_bgp_find(bgp);
+ if (bmpbgp)
+ return bmpbgp;
+
+ bmpbgp = XCALLOC(MTYPE_BMP, sizeof(*bmpbgp));
+ bmpbgp->bgp = bgp;
+ bmpbgp->mirror_qsizelimit = ~0UL;
+ bmp_mirrorq_init(&bmpbgp->mirrorq);
+ bmp_bgph_add(&bmp_bgph, bmpbgp);
+
+ return bmpbgp;
+}
+
+static void bmp_bgp_put(struct bmp_bgp *bmpbgp)
+{
+ struct bmp_targets *bt;
+
+ bmp_bgph_del(&bmp_bgph, bmpbgp);
+
+ frr_each_safe(bmp_targets, &bmpbgp->targets, bt)
+ bmp_targets_put(bt);
+
+ bmp_mirrorq_fini(&bmpbgp->mirrorq);
+ XFREE(MTYPE_BMP, bmpbgp);
+}
+
+static int bmp_bgp_del(struct bgp *bgp)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(bgp);
+
+ if (bmpbgp)
+ bmp_bgp_put(bmpbgp);
+ return 0;
+}
+
+static struct bmp_bgp_peer *bmp_bgp_peer_find(uint64_t peerid)
+{
+ struct bmp_bgp_peer dummy = { .peerid = peerid };
+ return bmp_peerh_find(&bmp_peerh, &dummy);
+}
+
+static struct bmp_bgp_peer *bmp_bgp_peer_get(struct peer *peer)
+{
+ struct bmp_bgp_peer *bbpeer;
+
+ bbpeer = bmp_bgp_peer_find(peer->qobj_node.nid);
+ if (bbpeer)
+ return bbpeer;
+
+ bbpeer = XCALLOC(MTYPE_BMP_PEER, sizeof(*bbpeer));
+ bbpeer->peerid = peer->qobj_node.nid;
+ bmp_peerh_add(&bmp_peerh, bbpeer);
+
+ return bbpeer;
+}
+
+static struct bmp_targets *bmp_targets_find1(struct bgp *bgp, const char *name)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(bgp);
+ struct bmp_targets dummy;
+
+ if (!bmpbgp)
+ return NULL;
+ dummy.name = (char *)name;
+ return bmp_targets_find(&bmpbgp->targets, &dummy);
+}
+
+static struct bmp_targets *bmp_targets_get(struct bgp *bgp, const char *name)
+{
+ struct bmp_targets *bt;
+
+ bt = bmp_targets_find1(bgp, name);
+ if (bt)
+ return bt;
+
+ bt = XCALLOC(MTYPE_BMP_TARGETS, sizeof(*bt));
+ bt->name = XSTRDUP(MTYPE_BMP_TARGETSNAME, name);
+ bt->bgp = bgp;
+ bt->bmpbgp = bmp_bgp_get(bgp);
+ bmp_session_init(&bt->sessions);
+ bmp_qhash_init(&bt->updhash);
+ bmp_qlist_init(&bt->updlist);
+ bmp_actives_init(&bt->actives);
+ bmp_listeners_init(&bt->listeners);
+
+ QOBJ_REG(bt, bmp_targets);
+ bmp_targets_add(&bt->bmpbgp->targets, bt);
+ return bt;
+}
+
+static void bmp_targets_put(struct bmp_targets *bt)
+{
+ struct bmp *bmp;
+ struct bmp_active *ba;
+
+ frr_each_safe (bmp_actives, &bt->actives, ba)
+ bmp_active_put(ba);
+
+ frr_each_safe(bmp_session, &bt->sessions, bmp) {
+ bmp_close(bmp);
+ bmp_free(bmp);
+ }
+
+ bmp_targets_del(&bt->bmpbgp->targets, bt);
+ QOBJ_UNREG(bt);
+
+ bmp_listeners_fini(&bt->listeners);
+ bmp_actives_fini(&bt->actives);
+ bmp_qhash_fini(&bt->updhash);
+ bmp_qlist_fini(&bt->updlist);
+
+ XFREE(MTYPE_BMP_ACLNAME, bt->acl_name);
+ XFREE(MTYPE_BMP_ACLNAME, bt->acl6_name);
+ bmp_session_fini(&bt->sessions);
+
+ XFREE(MTYPE_BMP_TARGETSNAME, bt->name);
+ XFREE(MTYPE_BMP_TARGETS, bt);
+}
+
+static struct bmp_listener *bmp_listener_find(struct bmp_targets *bt,
+ const union sockunion *su,
+ int port)
+{
+ struct bmp_listener dummy;
+ dummy.addr = *su;
+ dummy.port = port;
+ return bmp_listeners_find(&bt->listeners, &dummy);
+}
+
+static struct bmp_listener *bmp_listener_get(struct bmp_targets *bt,
+ const union sockunion *su,
+ int port)
+{
+ struct bmp_listener *bl = bmp_listener_find(bt, su, port);
+
+ if (bl)
+ return bl;
+
+ bl = XCALLOC(MTYPE_BMP_LISTENER, sizeof(*bl));
+ bl->targets = bt;
+ bl->addr = *su;
+ bl->port = port;
+ bl->sock = -1;
+
+ bmp_listeners_add(&bt->listeners, bl);
+ return bl;
+}
+
+static void bmp_listener_put(struct bmp_listener *bl)
+{
+ bmp_listeners_del(&bl->targets->listeners, bl);
+ XFREE(MTYPE_BMP_LISTENER, bl);
+}
+
+static void bmp_listener_start(struct bmp_listener *bl)
+{
+ int sock, ret;
+
+ sock = socket(bl->addr.sa.sa_family, SOCK_STREAM, 0);
+ if (sock < 0)
+ return;
+
+ sockopt_reuseaddr(sock);
+ sockopt_reuseport(sock);
+ sockopt_v6only(bl->addr.sa.sa_family, sock);
+ set_cloexec(sock);
+
+ ret = sockunion_bind(sock, &bl->addr, bl->port, &bl->addr);
+ if (ret < 0)
+ goto out_sock;
+
+ ret = listen(sock, 3);
+ if (ret < 0)
+ goto out_sock;
+
+ bl->sock = sock;
+ thread_add_read(bm->master, bmp_accept, bl, sock, &bl->t_accept);
+ return;
+out_sock:
+ close(sock);
+}
+
+static void bmp_listener_stop(struct bmp_listener *bl)
+{
+ THREAD_OFF(bl->t_accept);
+
+ if (bl->sock != -1)
+ close(bl->sock);
+ bl->sock = -1;
+}
+
+static struct bmp_active *bmp_active_find(struct bmp_targets *bt,
+ const char *hostname, int port)
+{
+ struct bmp_active dummy;
+ dummy.hostname = (char *)hostname;
+ dummy.port = port;
+ return bmp_actives_find(&bt->actives, &dummy);
+}
+
+static struct bmp_active *bmp_active_get(struct bmp_targets *bt,
+ const char *hostname, int port)
+{
+ struct bmp_active *ba;
+
+ ba = bmp_active_find(bt, hostname, port);
+ if (ba)
+ return ba;
+
+ ba = XCALLOC(MTYPE_BMP_ACTIVE, sizeof(*ba));
+ ba->targets = bt;
+ ba->hostname = XSTRDUP(MTYPE_TMP, hostname);
+ ba->port = port;
+ ba->minretry = BMP_DFLT_MINRETRY;
+ ba->maxretry = BMP_DFLT_MAXRETRY;
+ ba->socket = -1;
+
+ bmp_actives_add(&bt->actives, ba);
+ return ba;
+}
+
+static void bmp_active_put(struct bmp_active *ba)
+{
+ THREAD_OFF(ba->t_timer);
+ THREAD_OFF(ba->t_read);
+ THREAD_OFF(ba->t_write);
+
+ bmp_actives_del(&ba->targets->actives, ba);
+
+ if (ba->bmp) {
+ ba->bmp->active = NULL;
+ bmp_close(ba->bmp);
+ bmp_free(ba->bmp);
+ }
+ if (ba->socket != -1)
+ close(ba->socket);
+
+ XFREE(MTYPE_TMP, ba->hostname);
+ XFREE(MTYPE_BMP_ACTIVE, ba);
+}
+
+static void bmp_active_setup(struct bmp_active *ba);
+
+static void bmp_active_connect(struct bmp_active *ba)
+{
+ enum connect_result res;
+ char buf[SU_ADDRSTRLEN];
+
+ for (; ba->addrpos < ba->addrtotal; ba->addrpos++) {
+ ba->socket = sockunion_socket(&ba->addrs[ba->addrpos]);
+ if (ba->socket < 0) {
+ zlog_warn("bmp[%s]: failed to create socket",
+ ba->hostname);
+ continue;
+ }
+
+ set_nonblocking(ba->socket);
+ res = sockunion_connect(ba->socket, &ba->addrs[ba->addrpos],
+ htons(ba->port), 0);
+ switch (res) {
+ case connect_error:
+ sockunion2str(&ba->addrs[ba->addrpos], buf,
+ sizeof(buf));
+ zlog_warn("bmp[%s]: failed to connect to %s:%d",
+ ba->hostname, buf, ba->port);
+ close(ba->socket);
+ ba->socket = -1;
+ continue;
+ case connect_success:
+ break;
+ case connect_in_progress:
+ bmp_active_setup(ba);
+ return;
+ }
+ }
+
+ /* exhausted all addresses */
+ ba->curretry += ba->curretry / 2;
+ bmp_active_setup(ba);
+}
+
+static void bmp_active_resolved(struct resolver_query *resq, int numaddrs,
+ union sockunion *addr)
+{
+ struct bmp_active *ba = container_of(resq, struct bmp_active, resq);
+ unsigned i;
+
+ if (numaddrs <= 0) {
+ zlog_warn("bmp[%s]: hostname resolution failed", ba->hostname);
+ ba->curretry += ba->curretry / 2;
+ bmp_active_setup(ba);
+ return;
+ }
+ if (numaddrs > (int)array_size(ba->addrs))
+ numaddrs = array_size(ba->addrs);
+
+ ba->addrpos = 0;
+ ba->addrtotal = numaddrs;
+ for (i = 0; i < ba->addrtotal; i++)
+ memcpy(&ba->addrs[i], &addr[i], sizeof(ba->addrs[0]));
+
+ bmp_active_connect(ba);
+}
+
+static int bmp_active_thread(struct thread *t)
+{
+ struct bmp_active *ba = THREAD_ARG(t);
+ socklen_t slen;
+ int status, ret;
+ char buf[SU_ADDRSTRLEN];
+
+ /* all 3 end up here, though only timer or read+write are active
+ * at a time */
+ THREAD_OFF(ba->t_timer);
+ THREAD_OFF(ba->t_read);
+ THREAD_OFF(ba->t_write);
+
+ if (ba->socket == -1) {
+ resolver_resolve(&ba->resq, AF_UNSPEC, ba->hostname,
+ bmp_active_resolved);
+ return 0;
+ }
+
+ slen = sizeof(status);
+ ret = getsockopt(ba->socket, SOL_SOCKET, SO_ERROR, (void *)&status,
+ &slen);
+
+ sockunion2str(&ba->addrs[ba->addrpos], buf, sizeof(buf));
+ if (ret < 0 || status != 0) {
+ zlog_warn("bmp[%s]: failed to connect to %s:%d",
+ ba->hostname, buf, ba->port);
+ goto out_next;
+ }
+
+ zlog_warn("bmp[%s]: outbound connection to %s:%d",
+ ba->hostname, buf, ba->port);
+
+ ba->bmp = bmp_open(ba->targets, ba->socket);
+ if (!ba->bmp)
+ goto out_next;
+
+ ba->bmp->active = ba;
+ ba->socket = -1;
+ ba->curretry = ba->minretry;
+ return 0;
+
+out_next:
+ close(ba->socket);
+ ba->socket = -1;
+ ba->addrpos++;
+ bmp_active_connect(ba);
+ return 0;
+}
+
+static void bmp_active_disconnected(struct bmp_active *ba)
+{
+ ba->bmp = NULL;
+ bmp_active_setup(ba);
+}
+
+static void bmp_active_setup(struct bmp_active *ba)
+{
+ THREAD_OFF(ba->t_timer);
+ THREAD_OFF(ba->t_read);
+ THREAD_OFF(ba->t_write);
+
+ if (ba->bmp)
+ return;
+ if (ba->resq.callback)
+ return;
+
+ if (ba->curretry > ba->maxretry)
+ ba->curretry = ba->maxretry;
+
+ if (ba->socket == -1)
+ thread_add_timer_msec(bm->master, bmp_active_thread, ba,
+ ba->curretry, &ba->t_timer);
+ else {
+ thread_add_read(bm->master, bmp_active_thread, ba, ba->socket,
+ &ba->t_read);
+ thread_add_write(bm->master, bmp_active_thread, ba, ba->socket,
+ &ba->t_write);
+ }
+}
+
+static struct cmd_node bmp_node = {BMP_NODE, "%s(config-bgp-bmp)# "};
+
+#define BMP_STR "BGP Monitoring Protocol\n"
+
+#ifndef VTYSH_EXTRACT_PL
+#include "bgp_bmp_clippy.c"
+#endif
+
+DEFPY_NOSH(bmp_targets_main,
+ bmp_targets_cmd,
+ "bmp targets BMPTARGETS",
+ BMP_STR
+ "Create BMP target group\n"
+ "Name of the BMP target group\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ struct bmp_targets *bt;
+
+ bt = bmp_targets_get(bgp, bmptargets);
+
+ VTY_PUSH_CONTEXT_SUB(BMP_NODE, bt);
+ return CMD_SUCCESS;
+}
+
+DEFPY(no_bmp_targets_main,
+ no_bmp_targets_cmd,
+ "no bmp targets BMPTARGETS",
+ NO_STR
+ BMP_STR
+ "Delete BMP target group\n"
+ "Name of the BMP target group\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ struct bmp_targets *bt;
+
+ bt = bmp_targets_find1(bgp, bmptargets);
+ if (!bt) {
+ vty_out(vty, "%% BMP target group not found\n");
+ return CMD_WARNING;
+ }
+ bmp_targets_put(bt);
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_listener_main,
+ bmp_listener_cmd,
+ "bmp listener <X:X::X:X|A.B.C.D> port (1-65535)",
+ BMP_STR
+ "Listen for inbound BMP connections\n"
+ "IPv6 address to listen on\n"
+ "IPv4 address to listen on\n"
+ "TCP Port number\n"
+ "TCP Port number\n")
+{
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+ struct bmp_listener *bl;
+
+ bl = bmp_listener_get(bt, listener, port);
+ if (bl->sock == -1)
+ bmp_listener_start(bl);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(no_bmp_listener_main,
+ no_bmp_listener_cmd,
+ "no bmp listener <X:X::X:X|A.B.C.D> port (1-65535)",
+ NO_STR
+ BMP_STR
+ "Create BMP listener\n"
+ "IPv6 address to listen on\n"
+ "IPv4 address to listen on\n"
+ "TCP Port number\n"
+ "TCP Port number\n")
+{
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+ struct bmp_listener *bl;
+
+ bl = bmp_listener_find(bt, listener, port);
+ if (!bl) {
+ vty_out(vty, "%% BMP listener not found\n");
+ return CMD_WARNING;
+ }
+ bmp_listener_stop(bl);
+ bmp_listener_put(bl);
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_connect,
+ bmp_connect_cmd,
+ "[no] bmp connect HOSTNAME port (1-65535) "
+ "{min-retry (100-86400000)"
+ "|max-retry (100-86400000)}",
+ NO_STR
+ BMP_STR
+ "Actively establish connection to monitoring station\n"
+ "Monitoring station hostname or address\n"
+ "TCP port\n"
+ "TCP port\n"
+ "Minimum connection retry interval\n"
+ "Minimum connection retry interval (milliseconds)\n"
+ "Maximum connection retry interval\n"
+ "Maximum connection retry interval (milliseconds)\n")
+{
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+ struct bmp_active *ba;
+
+ if (no) {
+ ba = bmp_active_find(bt, hostname, port);
+ if (!ba) {
+ vty_out(vty, "%% No such active connection found\n");
+ return CMD_WARNING;
+ }
+ bmp_active_put(ba);
+ return CMD_SUCCESS;
+ }
+
+ ba = bmp_active_get(bt, hostname, port);
+ if (min_retry_str)
+ ba->minretry = min_retry;
+ if (max_retry_str)
+ ba->maxretry = max_retry;
+ ba->curretry = ba->minretry;
+ bmp_active_setup(ba);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_acl,
+ bmp_acl_cmd,
+ "[no] <ip|ipv6>$af access-list WORD",
+ NO_STR
+ IP_STR
+ IPV6_STR
+ "Access list to restrict BMP sessions\n"
+ "Access list name\n")
+{
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+ char **what;
+
+ if (no)
+ access_list = NULL;
+ if (!strcmp(af, "ipv6"))
+ what = &bt->acl6_name;
+ else
+ what = &bt->acl_name;
+
+ XFREE(MTYPE_BMP_ACLNAME, *what);
+ if (access_list)
+ *what = XSTRDUP(MTYPE_BMP_ACLNAME, access_list);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_stats_cfg,
+ bmp_stats_cmd,
+ "[no] bmp stats [interval (100-86400000)]",
+ NO_STR
+ BMP_STR
+ "Send BMP statistics messages\n"
+ "Specify BMP stats interval\n"
+ "Interval (milliseconds) to send BMP Stats in\n")
+{
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+
+ THREAD_OFF(bt->t_stats);
+ if (no)
+ bt->stat_msec = 0;
+ else if (interval_str)
+ bt->stat_msec = interval;
+ else
+ bt->stat_msec = BMP_STAT_DEFAULT_TIMER;
+
+ if (bt->stat_msec)
+ thread_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
+ &bt->t_stats);
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_monitor_cfg,
+ bmp_monitor_cmd,
+ "[no] bmp monitor "BGP_AFI_CMD_STR" <unicast|multicast> <pre-policy|post-policy>$policy",
+ NO_STR
+ BMP_STR
+ "Send BMP route monitoring messages\n"
+ BGP_AFI_HELP_STR
+ "Address family modifier\n"
+ "Address family modifier\n"
+ "Send state before policy and filter processing\n"
+ "Send state with policy and filters applied\n")
+{
+ int index = 0;
+ uint8_t flag, prev;
+ afi_t afi;
+ safi_t safi;
+
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+ struct bmp *bmp;
+
+ argv_find_and_parse_afi(argv, argc, &index, &afi);
+ argv_find_and_parse_safi(argv, argc, &index, &safi);
+
+ if (policy[1] == 'r')
+ flag = BMP_MON_PREPOLICY;
+ else
+ flag = BMP_MON_POSTPOLICY;
+
+ prev = bt->afimon[afi][safi];
+ if (no)
+ bt->afimon[afi][safi] &= ~flag;
+ else
+ bt->afimon[afi][safi] |= flag;
+
+ if (prev == bt->afimon[afi][safi])
+ return CMD_SUCCESS;
+
+ frr_each (bmp_session, &bt->sessions, bmp) {
+ if (bmp->syncafi == afi && bmp->syncsafi == safi) {
+ bmp->syncafi = AFI_MAX;
+ bmp->syncsafi = SAFI_MAX;
+ }
+
+ if (!bt->afimon[afi][safi]) {
+ bmp->afistate[afi][safi] = BMP_AFI_INACTIVE;
+ continue;
+ }
+
+ bmp->afistate[afi][safi] = BMP_AFI_NEEDSYNC;
+ }
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_mirror_cfg,
+ bmp_mirror_cmd,
+ "[no] bmp mirror",
+ NO_STR
+ BMP_STR
+ "Send BMP route mirroring messages\n")
+{
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+ struct bmp *bmp;
+
+ if (bt->mirror == !no)
+ return CMD_SUCCESS;
+
+ bt->mirror = !no;
+ if (bt->mirror)
+ return CMD_SUCCESS;
+
+ frr_each (bmp_session, &bt->sessions, bmp) {
+ struct bmp_mirrorq *bmq;
+
+ while ((bmq = bmp_pull_mirror(bmp)))
+ if (!bmq->refcount)
+ XFREE(MTYPE_BMP_MIRRORQ, bmq);
+ }
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_mirror_limit_cfg,
+ bmp_mirror_limit_cmd,
+ "bmp mirror buffer-limit (0-4294967294)",
+ BMP_STR
+ "Route Mirroring settings\n"
+ "Configure maximum memory used for buffered mirroring messages\n"
+ "Limit in bytes\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ struct bmp_bgp *bmpbgp;
+
+ bmpbgp = bmp_bgp_get(bgp);
+ bmpbgp->mirror_qsizelimit = buffer_limit;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(no_bmp_mirror_limit_cfg,
+ no_bmp_mirror_limit_cmd,
+ "no bmp mirror buffer-limit [(0-4294967294)]",
+ NO_STR
+ BMP_STR
+ "Route Mirroring settings\n"
+ "Configure maximum memory used for buffered mirroring messages\n"
+ "Limit in bytes\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ struct bmp_bgp *bmpbgp;
+
+ bmpbgp = bmp_bgp_get(bgp);
+ bmpbgp->mirror_qsizelimit = ~0UL;
+
+ return CMD_SUCCESS;
+}
+
+
+DEFPY(show_bmp,
+ show_bmp_cmd,
+ "show bmp",
+ SHOW_STR
+ BMP_STR)
+{
+ struct bmp_bgp *bmpbgp;
+ struct bmp_targets *bt;
+ struct bmp_listener *bl;
+ struct bmp *bmp;
+ struct ttable *tt;
+ char buf[SU_ADDRSTRLEN];
+
+ frr_each(bmp_bgph, &bmp_bgph, bmpbgp) {
+ vty_out(vty, "BMP state for BGP %s:\n\n",
+ bmpbgp->bgp->name_pretty);
+ vty_out(vty, " Route Mirroring %9zu bytes (%zu messages) pending\n",
+ bmpbgp->mirror_qsize,
+ bmp_mirrorq_count(&bmpbgp->mirrorq));
+ vty_out(vty, " %9zu bytes maximum buffer used\n",
+ bmpbgp->mirror_qsizemax);
+ if (bmpbgp->mirror_qsizelimit != ~0UL)
+ vty_out(vty, " %9zu bytes buffer size limit\n",
+ bmpbgp->mirror_qsizelimit);
+ vty_out(vty, "\n");
+
+ frr_each(bmp_targets, &bmpbgp->targets, bt) {
+ vty_out(vty, " Targets \"%s\":\n", bt->name);
+ vty_out(vty, " Route Mirroring %sabled\n",
+ bt->mirror ? "en" : "dis");
+
+ afi_t afi;
+ safi_t safi;
+
+ FOREACH_AFI_SAFI (afi, safi) {
+ const char *str = NULL;
+
+ switch (bt->afimon[afi][safi]) {
+ case BMP_MON_PREPOLICY:
+ str = "pre-policy";
+ break;
+ case BMP_MON_POSTPOLICY:
+ str = "post-policy";
+ break;
+ case BMP_MON_PREPOLICY | BMP_MON_POSTPOLICY:
+ str = "pre-policy and post-policy";
+ break;
+ }
+ if (!str)
+ continue;
+ vty_out(vty, " Route Monitoring %s %s %s\n",
+ afi2str(afi), safi2str(safi), str);
+ }
+
+ vty_out(vty, " Listeners:\n");
+ frr_each (bmp_listeners, &bt->listeners, bl)
+ vty_out(vty, " %s:%d\n",
+ sockunion2str(&bl->addr, buf,
+ SU_ADDRSTRLEN), bl->port);
+
+ vty_out(vty, "\n %zu connected clients:\n",
+ bmp_session_count(&bt->sessions));
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "remote|uptime|MonSent|MirrSent|MirrLost|ByteSent|ByteQ|ByteQKernel");
+ ttable_rowseps(tt, 0, BOTTOM, true, '-');
+
+ frr_each (bmp_session, &bt->sessions, bmp) {
+ uint64_t total;
+ size_t q, kq;
+
+ pullwr_stats(bmp->pullwr, &total, &q, &kq);
+
+ ttable_add_row(tt, "%s|-|%Lu|%Lu|%Lu|%Lu|%zu|%zu",
+ bmp->remote,
+ bmp->cnt_update,
+ bmp->cnt_mirror,
+ bmp->cnt_mirror_overruns,
+ total, q, kq);
+ }
+ char *out = ttable_dump(tt, "\n");
+ vty_out(vty, "%s", out);
+ XFREE(MTYPE_TMP, out);
+ ttable_del(tt);
+ vty_out(vty, "\n");
+ }
+ }
+
+ return CMD_SUCCESS;
+}
+
+static int bmp_config_write(struct bgp *bgp, struct vty *vty)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(bgp);
+ struct bmp_targets *bt;
+ struct bmp_listener *bl;
+ struct bmp_active *ba;
+ char buf[SU_ADDRSTRLEN];
+ afi_t afi;
+ safi_t safi;
+
+ if (!bmpbgp)
+ return 0;
+
+ if (bmpbgp->mirror_qsizelimit != ~0UL)
+ vty_out(vty, " !\n bmp mirror buffer-limit %zu\n",
+ bmpbgp->mirror_qsizelimit);
+
+ frr_each(bmp_targets, &bmpbgp->targets, bt) {
+ vty_out(vty, " !\n bmp targets %s\n", bt->name);
+
+ if (bt->acl6_name)
+ vty_out(vty, " ipv6 access-list %s\n", bt->acl6_name);
+ if (bt->acl_name)
+ vty_out(vty, " ip access-list %s\n", bt->acl_name);
+
+ if (bt->stat_msec)
+ vty_out(vty, " bmp stats interval %d\n",
+ bt->stat_msec);
+
+ if (bt->mirror)
+ vty_out(vty, " bmp mirror\n");
+
+ FOREACH_AFI_SAFI (afi, safi) {
+ const char *afi_str = (afi == AFI_IP) ? "ipv4" : "ipv6";
+
+ if (bt->afimon[afi][safi] & BMP_MON_PREPOLICY)
+ vty_out(vty, " bmp monitor %s %s pre-policy\n",
+ afi_str, safi2str(safi));
+ if (bt->afimon[afi][safi] & BMP_MON_POSTPOLICY)
+ vty_out(vty, " bmp monitor %s %s post-policy\n",
+ afi_str, safi2str(safi));
+ }
+ frr_each (bmp_listeners, &bt->listeners, bl)
+ vty_out(vty, " \n bmp listener %s port %d\n",
+ sockunion2str(&bl->addr, buf, SU_ADDRSTRLEN),
+ bl->port);
+
+ frr_each (bmp_actives, &bt->actives, ba)
+ vty_out(vty, " bmp connect %s port %u min-retry %u max-retry %u\n",
+ ba->hostname, ba->port, ba->minretry, ba->maxretry);
+ }
+
+ return 0;
+}
+
+static int bgp_bmp_init(struct thread_master *tm)
+{
+ install_node(&bmp_node, NULL);
+ install_default(BMP_NODE);
+ install_element(BGP_NODE, &bmp_targets_cmd);
+ install_element(BGP_NODE, &no_bmp_targets_cmd);
+
+ install_element(BMP_NODE, &bmp_listener_cmd);
+ install_element(BMP_NODE, &no_bmp_listener_cmd);
+ install_element(BMP_NODE, &bmp_connect_cmd);
+ install_element(BMP_NODE, &bmp_acl_cmd);
+ install_element(BMP_NODE, &bmp_stats_cmd);
+ install_element(BMP_NODE, &bmp_monitor_cmd);
+ install_element(BMP_NODE, &bmp_mirror_cmd);
+
+ install_element(BGP_NODE, &bmp_mirror_limit_cmd);
+ install_element(BGP_NODE, &no_bmp_mirror_limit_cmd);
+
+ install_element(VIEW_NODE, &show_bmp_cmd);
+
+ resolver_init(tm);
+ return 0;
+}
+
+static int bgp_bmp_module_init(void)
+{
+ hook_register(bgp_packet_dump, bmp_mirror_packet);
+ hook_register(bgp_packet_send, bmp_outgoing_packet);
+ hook_register(peer_established, bmp_peer_established);
+ hook_register(peer_backward_transition, bmp_peer_backward);
+ hook_register(bgp_process, bmp_process);
+ hook_register(bgp_inst_config_write, bmp_config_write);
+ hook_register(bgp_inst_delete, bmp_bgp_del);
+ hook_register(frr_late_init, bgp_bmp_init);
+ return 0;
+}
+
+FRR_MODULE_SETUP(.name = "bgpd_bmp", .version = FRR_VERSION,
+ .description = "bgpd BMP module",
+ .init = bgp_bmp_module_init)
--- /dev/null
+/* BMP support.
+ * Copyright (C) 2018 Yasuhiro Ohara
+ * Copyright (C) 2019 David Lamparter for NetDEF, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _BGP_BMP_H_
+#define _BGP_BMP_H_
+
+#include "zebra.h"
+#include "typesafe.h"
+#include "pullwr.h"
+#include "qobj.h"
+#include "resolver.h"
+
+#define BMP_VERSION_3 3
+
+#define BMP_LENGTH_POS 1
+
+/* BMP message types */
+#define BMP_TYPE_ROUTE_MONITORING 0
+#define BMP_TYPE_STATISTICS_REPORT 1
+#define BMP_TYPE_PEER_DOWN_NOTIFICATION 2
+#define BMP_TYPE_PEER_UP_NOTIFICATION 3
+#define BMP_TYPE_INITIATION 4
+#define BMP_TYPE_TERMINATION 5
+#define BMP_TYPE_ROUTE_MIRRORING 6
+
+#define BMP_READ_BUFSIZ 1024
+
+/* bmp->state */
+#define BMP_None 0
+#define BMP_PeerUp 2
+#define BMP_Run 3
+
+/* This one is for BMP Route Monitoring messages, i.e. delivering updates
+ * in somewhat processed (as opposed to fully raw, see mirroring below) form.
+ * RFC explicitly says that we can skip old updates if we haven't sent them out
+ * yet and another newer update for the same prefix arrives.
+ *
+ * So, at most one of these can exist for each (bgp, afi, safi, prefix, peerid)
+ * tuple; if some prefix is "re-added" to the queue, the existing entry is
+ * instead moved to the end of the queue. This ensures that the queue size is
+ * bounded by the BGP table size.
+ *
+ * bmp_qlist is the queue itself while bmp_qhash is used to efficiently check
+ * whether a tuple is already on the list. The queue is maintained per
+ * bmp_target.
+ *
+ * refcount = number of "struct bmp *" whose queue position is before this
+ * entry, i.e. number of BMP sessions where we still want to send this out.
+ * Decremented on send so we know when we're done with an entry (i.e. this
+ * always happens from the front of the queue.)
+ */
+
+PREDECL_DLIST(bmp_qlist)
+PREDECL_HASH(bmp_qhash)
+
+struct bmp_queue_entry {
+ struct bmp_qlist_item bli;
+ struct bmp_qhash_item bhi;
+
+ struct prefix p;
+ uint64_t peerid;
+ afi_t afi;
+ safi_t safi;
+
+ size_t refcount;
+};
+
+/* This is for BMP Route Mirroring, which feeds fully raw BGP PDUs out to BMP
+ * receivers. So, this goes directly off packet RX/TX handling instead of
+ * grabbing bits from tables.
+ *
+ * There is *one* queue for each "struct bgp *" where we throw everything on,
+ * with a size limit. Refcount works the same as for monitoring above.
+ */
+
+PREDECL_LIST(bmp_mirrorq)
+
+struct bmp_mirrorq {
+ struct bmp_mirrorq_item bmi;
+
+ size_t refcount;
+ uint64_t peerid;
+ struct timeval tv;
+
+ size_t len;
+ uint8_t data[0];
+};
+
+enum {
+ BMP_AFI_INACTIVE = 0,
+ BMP_AFI_NEEDSYNC,
+ BMP_AFI_SYNC,
+ BMP_AFI_LIVE,
+};
+
+PREDECL_LIST(bmp_session)
+
+struct bmp_active;
+struct bmp_targets;
+
+/* an established BMP session to a peer */
+struct bmp {
+ struct bmp_session_item bsi;
+ struct bmp_targets *targets;
+ struct bmp_active *active;
+
+ int socket;
+ char remote[SU_ADDRSTRLEN + 6];
+ struct thread *t_read;
+
+ struct pullwr *pullwr;
+
+ int state;
+
+ /* queue positions must remain synced with refcounts in the items.
+ * Whenever appending a queue item, we need to know the correct number
+ * of "struct bmp *" that want it, and when moving these positions
+ * ahead we need to make sure that refcount is decremented. Also, on
+ * disconnects we need to walk the queue and drop our reference.
+ */
+ struct bmp_queue_entry *queuepos;
+ struct bmp_mirrorq *mirrorpos;
+ bool mirror_lost;
+
+ /* enum BMP_AFI_* */
+ uint8_t afistate[AFI_MAX][SAFI_MAX];
+
+ /* counters for the various BMP packet types */
+ uint64_t cnt_update, cnt_mirror;
+ /* number of times this peer wasn't fast enough in consuming the
+ * mirror queue
+ */
+ uint64_t cnt_mirror_overruns;
+ struct timeval t_up;
+
+ /* synchronization / startup works by repeatedly finding the next
+ * table entry, the sync* fields note down what we sent last
+ */
+ struct prefix syncpos;
+ uint64_t syncpeerid;
+ afi_t syncafi;
+ safi_t syncsafi;
+};
+
+/* config & state for an active outbound connection. When the connection
+ * succeeds, "bmp" is set up.
+ */
+
+PREDECL_SORTLIST_UNIQ(bmp_actives)
+
+#define BMP_DFLT_MINRETRY 30000
+#define BMP_DFLT_MAXRETRY 720000
+
+struct bmp_active {
+ struct bmp_actives_item bai;
+ struct bmp_targets *targets;
+ struct bmp *bmp;
+
+ char *hostname;
+ int port;
+ unsigned minretry, maxretry;
+
+ struct resolver_query resq;
+
+ unsigned curretry;
+ unsigned addrpos, addrtotal;
+ union sockunion addrs[8];
+ int socket;
+ struct thread *t_timer, *t_read, *t_write;
+};
+
+/* config & state for passive / listening sockets */
+PREDECL_SORTLIST_UNIQ(bmp_listeners)
+
+struct bmp_listener {
+ struct bmp_listeners_item bli;
+
+ struct bmp_targets *targets;
+
+ union sockunion addr;
+ int port;
+
+ struct thread *t_accept;
+ int sock;
+};
+
+/* bmp_targets - plural since it may contain multiple bmp_listener &
+ * bmp_active items. If they have the same config, BMP session should be
+ * put in the same targets since that's a bit more effective.
+ */
+PREDECL_SORTLIST_UNIQ(bmp_targets)
+
+struct bmp_targets {
+ struct bmp_targets_item bti;
+
+ struct bmp_bgp *bmpbgp;
+ struct bgp *bgp;
+ char *name;
+
+ struct bmp_listeners_head listeners;
+
+ char *acl_name;
+ char *acl6_name;
+#define BMP_STAT_DEFAULT_TIMER 60000
+ int stat_msec;
+
+ /* only IPv4 & IPv6 / unicast & multicast supported for now */
+#define BMP_MON_PREPOLICY (1 << 0)
+#define BMP_MON_POSTPOLICY (1 << 1)
+ uint8_t afimon[AFI_MAX][SAFI_MAX];
+ bool mirror;
+
+ struct bmp_actives_head actives;
+
+ struct thread *t_stats;
+ struct bmp_session_head sessions;
+
+ struct bmp_qhash_head updhash;
+ struct bmp_qlist_head updlist;
+
+ uint64_t cnt_accept, cnt_aclrefused;
+
+ QOBJ_FIELDS
+};
+DECLARE_QOBJ_TYPE(bmp_targets)
+
+/* per struct peer * data. Lookup by peer->qobj_node.nid, created on demand,
+ * deleted in peer_backward hook. */
+PREDECL_HASH(bmp_peerh)
+
+struct bmp_bgp_peer {
+ struct bmp_peerh_item bpi;
+
+ uint64_t peerid;
+ /* struct peer *peer; */
+
+ uint8_t *open_rx;
+ size_t open_rx_len;
+
+ uint8_t *open_tx;
+ size_t open_tx_len;
+};
+
+/* per struct bgp * data */
+PREDECL_HASH(bmp_bgph)
+
+struct bmp_bgp {
+ struct bmp_bgph_item bbi;
+
+ struct bgp *bgp;
+ struct bmp_targets_head targets;
+
+ struct bmp_mirrorq_head mirrorq;
+ size_t mirror_qsize, mirror_qsizemax;
+
+ size_t mirror_qsizelimit;
+};
+
+enum {
+ BMP_PEERDOWN_LOCAL_NOTIFY = 1,
+ BMP_PEERDOWN_LOCAL_FSM = 2,
+ BMP_PEERDOWN_REMOTE_NOTIFY = 3,
+ BMP_PEERDOWN_REMOTE_CLOSE = 4,
+ BMP_PEERDOWN_ENDMONITOR = 5,
+};
+
+enum {
+ BMP_STATS_PFX_REJECTED = 0,
+ BMP_STATS_PFX_DUP_ADV = 1,
+ BMP_STATS_PFX_DUP_WITHDRAW = 2,
+ BMP_STATS_UPD_LOOP_CLUSTER = 3,
+ BMP_STATS_UPD_LOOP_ASPATH = 4,
+ BMP_STATS_UPD_LOOP_ORIGINATOR = 5,
+ BMP_STATS_UPD_LOOP_CONFED = 6,
+ BMP_STATS_SIZE_ADJ_RIB_IN = 7,
+ BMP_STATS_SIZE_LOC_RIB = 8,
+ BMP_STATS_SIZE_ADJ_RIB_IN_SAFI = 9,
+ BMP_STATS_SIZE_LOC_RIB_IN_SAFI = 10,
+ BMP_STATS_UPD_7606_WITHDRAW = 11,
+ BMP_STATS_PFX_7606_WITHDRAW = 12,
+ BMP_STATS_UPD_DUP = 13,
+ BMP_STATS_FRR_NH_INVALID = 65531,
+};
+
+DECLARE_MGROUP(BMP)
+
+#endif /*_BGP_BMP_H_*/
struct ecommunity *ecomadd)
{
/* uninstall routes from vrf */
- uninstall_routes_for_vrf(bgp_vrf);
+ if (is_l3vni_live(bgp_vrf))
+ uninstall_routes_for_vrf(bgp_vrf);
/* Cleanup the RT to VRF mapping */
bgp_evpn_unmap_vrf_from_its_rts(bgp_vrf);
listnode_add_sort(bgp_vrf->vrf_import_rtl, ecomadd);
SET_FLAG(bgp_vrf->vrf_flags, BGP_VRF_IMPORT_RT_CFGD);
- /* map VRF to its RTs */
- bgp_evpn_map_vrf_to_its_rts(bgp_vrf);
-
- /* install routes matching the new VRF */
- install_routes_for_vrf(bgp_vrf);
+ /* map VRF to its RTs and install routes matching the new RTs */
+ if (is_l3vni_live(bgp_vrf)) {
+ bgp_evpn_map_vrf_to_its_rts(bgp_vrf);
+ install_routes_for_vrf(bgp_vrf);
+ }
}
void bgp_evpn_unconfigure_import_rt_for_vrf(struct bgp *bgp_vrf,
struct ecommunity *ecom = NULL;
/* uninstall routes from vrf */
- uninstall_routes_for_vrf(bgp_vrf);
+ if (is_l3vni_live(bgp_vrf))
+ uninstall_routes_for_vrf(bgp_vrf);
/* Cleanup the RT to VRF mapping */
bgp_evpn_unmap_vrf_from_its_rts(bgp_vrf);
evpn_auto_rt_import_add_for_vrf(bgp_vrf);
}
- /* map VRFs to its RTs */
- bgp_evpn_map_vrf_to_its_rts(bgp_vrf);
-
- /* install routes matching this new RT */
- install_routes_for_vrf(bgp_vrf);
+ /* map VRFs to its RTs and install routes matching this new RT */
+ if (is_l3vni_live(bgp_vrf)) {
+ bgp_evpn_map_vrf_to_its_rts(bgp_vrf);
+ install_routes_for_vrf(bgp_vrf);
+ }
}
void bgp_evpn_configure_export_rt_for_vrf(struct bgp *bgp_vrf,
return (CHECK_FLAG(vpn->flags, VNI_FLAG_LIVE));
}
+static inline int is_l3vni_live(struct bgp *bgp_vrf)
+{
+ return (bgp_vrf->l3vni && bgp_vrf->l3vni_svi_ifindex);
+}
+
static inline int is_rd_configured(struct bgpevpn *vpn)
{
return (CHECK_FLAG(vpn->flags, VNI_FLAG_RD_CFGD));
*/
DEFUN(show_bgp_l2vpn_evpn_summary,
show_bgp_l2vpn_evpn_summary_cmd,
- "show bgp [vrf VRFNAME] l2vpn evpn summary [json]",
+ "show bgp [vrf VRFNAME] l2vpn evpn summary [failed] [json]",
SHOW_STR
BGP_STR
"bgp vrf\n"
L2VPN_HELP_STR
EVPN_HELP_STR
"Summary of BGP neighbor status\n"
+ "Show only sessions not in Established state\n"
JSON_STR)
{
int idx_vrf = 0;
bool uj = use_json(argc, argv);
char *vrf = NULL;
+ bool show_failed = false;
if (argv_find(argv, argc, "vrf", &idx_vrf))
vrf = argv[++idx_vrf]->arg;
- return bgp_show_summary_vty(vty, vrf, AFI_L2VPN, SAFI_EVPN, uj);
+ if (argv_find(argv, argc, "failed", &idx_vrf))
+ show_failed = true;
+ return bgp_show_summary_vty(vty, vrf, AFI_L2VPN, SAFI_EVPN,
+ show_failed, uj);
}
/*
from_peer->last_event = last_evt;
from_peer->last_major_event = last_maj_evt;
peer->remote_id = from_peer->remote_id;
+ peer->last_reset = from_peer->last_reset;
if (from_peer->hostname != NULL) {
if (peer->hostname) {
"Intf peering v6only config change",
"BFD down received",
"Interface down",
- "Neighbor address lost"};
+ "Neighbor address lost"
+ "Waiting for NHT",
+ "Waiting for Peer IPv6 Addr",
+ "Waiting for VRF to be initialized"};
static int bgp_graceful_restart_timer_expire(struct thread *thread)
{
zlog_debug(
"%s [FSM] Unable to get neighbor's IP address, waiting...",
peer->host);
+ peer->last_reset = PEER_DOWN_NBR_ADDR;
return -1;
}
EC_BGP_FSM,
"%s [FSM] In a VRF that is not initialised yet",
peer->host);
+ peer->last_reset = PEER_DOWN_VRF_UNINIT;
return -1;
}
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Waiting for NHT",
peer->host);
-
+ peer->last_reset = PEER_DOWN_WAITING_NHT;
BGP_EVENT_ADD(peer, TCP_connection_open_failed);
return 0;
}
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%s Address family %s is%spreserved",
- peer->host, afi_safi_print(afi, safi),
+ peer->host, get_afi_safi_str(afi, safi, false),
CHECK_FLAG(
peer->af_cap[afi][safi],
PEER_CAP_RESTART_AF_PRESERVE_RCV)
#include "bgpd/bgpd.h"
#include "bgpd/bgp_table.h"
#include "bgpd/bgp_dump.h"
+#include "bgpd/bgp_bmp.h"
#include "bgpd/bgp_attr.h"
#include "bgpd/bgp_debug.h"
#include "bgpd/bgp_errors.h"
if (bgp_debug_neighbor_events(peer))
zlog_debug("send End-of-RIB for %s to %s",
- afi_safi_print(afi, safi), peer->host);
+ get_afi_safi_str(afi, safi, false), peer->host);
s = stream_new(BGP_MAX_PACKET_SIZE);
bgp_clear_stale_route(peer, afi, safi);
zlog_info("%%NOTIFICATION: rcvd End-of-RIB for %s from %s in vrf %s",
- afi_safi_print(afi, safi), peer->host,
+ get_afi_safi_str(afi, safi, false), peer->host,
vrf ? vrf->name : VRF_DEFAULT_NAME);
}
}
zlog_info(
"%%MAXPFXEXCEED: No. of %s prefix received from %s %ld exceed, "
"limit %ld",
- afi_safi_print(afi, safi), peer->host,
+ get_afi_safi_str(afi, safi, false), peer->host,
peer->pcount[afi][safi], peer->pmax[afi][safi]);
SET_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_PREFIX_LIMIT);
zlog_info(
"%%MAXPFX: No. of %s prefix received from %s reaches %ld, max %ld",
- afi_safi_print(afi, safi), peer->host,
+ get_afi_safi_str(afi, safi, false), peer->host,
peer->pcount[afi][safi], peer->pmax[afi][safi]);
SET_FLAG(peer->af_sflags[afi][safi],
PEER_STATUS_PREFIX_THRESHOLD);
return CMD_WARNING;
}
- vty_out(vty, "BGP %s RIB statistics\n", afi_safi_print(afi, safi));
+ vty_out(vty, "BGP %s RIB statistics\n", get_afi_safi_str(afi, safi, false));
/* labeled-unicast routes live in the unicast table */
if (safi == SAFI_LABELED_UNICAST)
if (use_json) {
json_object_string_add(json, "prefixCountsFor", peer->host);
json_object_string_add(json, "multiProtocol",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, true));
json_object_int_add(json, "pfxCounter",
peer->pcount[afi][safi]);
&& bgp_flag_check(peer->bgp, BGP_FLAG_SHOW_HOSTNAME)) {
vty_out(vty, "Prefix counts for %s/%s, %s\n",
peer->hostname, peer->host,
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
} else {
vty_out(vty, "Prefix counts for %s, %s\n", peer->host,
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
}
vty_out(vty, "PfxCt: %ld\n", peer->pcount[afi][safi]);
if (count) {
if (!uj)
vty_out(vty, "Address Family: %s\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
prefix_bgp_show_prefix_list(vty, afi, name, uj);
} else {
if (uj)
{
VTY_DECLVAR_CONTEXT(route_map_index, index);
int retval = CMD_SUCCESS;
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_add_match(index, command, arg, type);
switch (ret) {
route_map_upd8_dependency(type, arg, index->map->name);
}
break;
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Intentionally doing nothing here.
+ */
+ break;
}
return retval;
const char *arg, route_map_event_t type)
{
VTY_DECLVAR_CONTEXT(route_map_index, index);
- int ret;
+ enum rmap_compile_rets ret;
int retval = CMD_SUCCESS;
char *dep_name = NULL;
const char *tmpstr;
if (type != RMAP_EVENT_MATCH_DELETED && dep_name)
route_map_upd8_dependency(type, dep_name, rmap_name);
break;
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Nothing to do here
+ */
+ break;
}
XFREE(MTYPE_ROUTE_MAP_RULE, dep_name);
"Prefix not found\n")
{
VTY_DECLVAR_CONTEXT(route_map_index, index);
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_add_match(index, "rpki", argv[2]->arg,
RMAP_EVENT_MATCH_ADDED);
case RMAP_COMPILE_ERROR:
vty_out(vty, "%% BGP Argument is malformed.\n");
return CMD_WARNING_CONFIG_FAILED;
+ case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Intentionally doing nothing here
+ */
+ break;
}
}
return CMD_SUCCESS;
"Prefix not found\n")
{
VTY_DECLVAR_CONTEXT(route_map_index, index);
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_delete_match(index, "rpki", argv[3]->arg);
if (ret) {
case RMAP_COMPILE_ERROR:
vty_out(vty, "%% BGP Argument is malformed.\n");
break;
+ case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Nothing to do here
+ */
+ break;
}
return CMD_WARNING_CONFIG_FAILED;
}
#include "command.h"
#include "lib/json.h"
+#include "lib_errors.h"
#include "lib/zclient.h"
#include "prefix.h"
#include "plist.h"
return BGP_IPV4_NODE;
}
+static const char *get_afi_safi_vty_str(afi_t afi, safi_t safi)
+{
+ if (afi == AFI_IP && safi == SAFI_UNICAST)
+ return "IPv4 Unicast";
+ else if (afi == AFI_IP && safi == SAFI_MULTICAST)
+ return "IPv4 Multicast";
+ else if (afi == AFI_IP && safi == SAFI_LABELED_UNICAST)
+ return "IPv4 Labeled Unicast";
+ else if (afi == AFI_IP && safi == SAFI_MPLS_VPN)
+ return "IPv4 VPN";
+ else if (afi == AFI_IP && safi == SAFI_ENCAP)
+ return "IPv4 Encap";
+ else if (afi == AFI_IP && safi == SAFI_FLOWSPEC)
+ return "IPv4 Flowspec";
+ else if (afi == AFI_IP6 && safi == SAFI_UNICAST)
+ return "IPv6 Unicast";
+ else if (afi == AFI_IP6 && safi == SAFI_MULTICAST)
+ return "IPv6 Multicast";
+ else if (afi == AFI_IP6 && safi == SAFI_LABELED_UNICAST)
+ return "IPv6 Labeled Unicast";
+ else if (afi == AFI_IP6 && safi == SAFI_MPLS_VPN)
+ return "IPv6 VPN";
+ else if (afi == AFI_IP6 && safi == SAFI_ENCAP)
+ return "IPv6 Encap";
+ else if (afi == AFI_IP6 && safi == SAFI_FLOWSPEC)
+ return "IPv6 Flowspec";
+ else if (afi == AFI_L2VPN && safi == SAFI_EVPN)
+ return "L2VPN EVPN";
+ else {
+ flog_err(EC_LIB_DEVELOPMENT, "New afi/safi that needs to be taken care of?");
+ return "Unknown";
+ }
+}
+
+/*
+ * Please note that we have intentionally camelCased
+ * the return strings here. So if you want
+ * to use this function, please ensure you
+ * are doing this within json output
+ */
+static const char *get_afi_safi_json_str(afi_t afi, safi_t safi)
+{
+ if (afi == AFI_IP && safi == SAFI_UNICAST)
+ return "ipv4Unicast";
+ else if (afi == AFI_IP && safi == SAFI_MULTICAST)
+ return "ipv4Multicast";
+ else if (afi == AFI_IP && safi == SAFI_LABELED_UNICAST)
+ return "ipv4LabeledUnicast";
+ else if (afi == AFI_IP && safi == SAFI_MPLS_VPN)
+ return "ipv4Vpn";
+ else if (afi == AFI_IP && safi == SAFI_ENCAP)
+ return "ipv4Encap";
+ else if (afi == AFI_IP && safi == SAFI_FLOWSPEC)
+ return "ipv4Flowspec";
+ else if (afi == AFI_IP6 && safi == SAFI_UNICAST)
+ return "ipv6Unicast";
+ else if (afi == AFI_IP6 && safi == SAFI_MULTICAST)
+ return "ipv6Multicast";
+ else if (afi == AFI_IP6 && safi == SAFI_LABELED_UNICAST)
+ return "ipv6LabeledUnicast";
+ else if (afi == AFI_IP6 && safi == SAFI_MPLS_VPN)
+ return "ipv6Vpn";
+ else if (afi == AFI_IP6 && safi == SAFI_ENCAP)
+ return "ipv6Encap";
+ else if (afi == AFI_IP6 && safi == SAFI_FLOWSPEC)
+ return "ipv6Flowspec";
+ else if (afi == AFI_L2VPN && safi == SAFI_EVPN)
+ return "l2VpnEvpn";
+ else {
+ flog_err(EC_LIB_DEVELOPMENT, "New afi/safi that needs to be taken care of?");
+ return "Unknown";
+ }
+}
+
/* Utility function to get address family from current node. */
afi_t bgp_node_afi(struct vty *vty)
{
case BGP_ERR_AF_UNCONFIGURED:
vty_out(vty,
"%%BGP: Enable %s address family for the neighbor %s\n",
- afi_safi_print(afi, safi), peer->host);
+ get_afi_safi_str(afi, safi, false), peer->host);
break;
case BGP_ERR_SOFT_RECONFIG_UNCONFIGURED:
vty_out(vty,
if (!found)
vty_out(vty,
"%%BGP: No %s peer belonging to peer-group %s is configured\n",
- afi_safi_print(afi, safi), arg);
+ get_afi_safi_str(afi, safi, false), arg);
return CMD_SUCCESS;
}
if (!found)
vty_out(vty,
"%%BGP: No external %s peer is configured\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
return CMD_SUCCESS;
}
if (!found)
vty_out(vty,
"%%BGP: No %s peer is configured with AS %s\n",
- afi_safi_print(afi, safi), arg);
+ get_afi_safi_str(afi, safi, false), arg);
return CMD_SUCCESS;
}
json_object_object_add(json, "bestPath", bestpath);
}
+/* Print the error code/subcode for why the peer is down */
+static void bgp_show_peer_reset(struct vty * vty, struct peer *peer,
+ json_object *json_peer, bool use_json)
+{
+ const char *code_str;
+ const char *subcode_str;
+
+ if (use_json) {
+ if (peer->last_reset == PEER_DOWN_NOTIFY_SEND
+ || peer->last_reset == PEER_DOWN_NOTIFY_RECEIVED) {
+ char errorcodesubcode_hexstr[5];
+ char errorcodesubcode_str[256];
+
+ code_str = bgp_notify_code_str(peer->notify.code);
+ subcode_str = bgp_notify_subcode_str(
+ peer->notify.code,
+ peer->notify.subcode);
+
+ sprintf(errorcodesubcode_hexstr, "%02X%02X",
+ peer->notify.code, peer->notify.subcode);
+ json_object_string_add(json_peer,
+ "lastErrorCodeSubcode",
+ errorcodesubcode_hexstr);
+ snprintf(errorcodesubcode_str, 255, "%s%s",
+ code_str, subcode_str);
+ json_object_string_add(json_peer,
+ "lastNotificationReason",
+ errorcodesubcode_str);
+ if (peer->last_reset == PEER_DOWN_NOTIFY_RECEIVED
+ && peer->notify.code == BGP_NOTIFY_CEASE
+ && (peer->notify.subcode
+ == BGP_NOTIFY_CEASE_ADMIN_SHUTDOWN
+ || peer->notify.subcode
+ == BGP_NOTIFY_CEASE_ADMIN_RESET)
+ && peer->notify.length) {
+ char msgbuf[1024];
+ const char *msg_str;
+
+ msg_str = bgp_notify_admin_message(
+ msgbuf, sizeof(msgbuf),
+ (uint8_t *)peer->notify.data,
+ peer->notify.length);
+ if (msg_str)
+ json_object_string_add(
+ json_peer,
+ "lastShutdownDescription",
+ msg_str);
+ }
+
+ }
+ json_object_string_add(json_peer, "lastResetDueTo",
+ peer_down_str[(int)peer->last_reset]);
+ } else {
+ if (peer->last_reset == PEER_DOWN_NOTIFY_SEND
+ || peer->last_reset == PEER_DOWN_NOTIFY_RECEIVED) {
+ code_str = bgp_notify_code_str(peer->notify.code);
+ subcode_str =
+ bgp_notify_subcode_str(peer->notify.code,
+ peer->notify.subcode);
+ vty_out(vty, " Notification %s (%s%s)\n",
+ peer->last_reset == PEER_DOWN_NOTIFY_SEND
+ ? "sent"
+ : "received",
+ code_str, subcode_str);
+ } else {
+ vty_out(vty, " %s\n",
+ peer_down_str[(int)peer->last_reset]);
+ }
+ }
+}
+
+static inline bool bgp_has_peer_failed(struct peer *peer, afi_t afi,
+ safi_t safi)
+{
+ return ((peer->status != Established) ||
+ !peer->afc_recv[afi][safi]);
+}
+
+static void bgp_show_failed_summary(struct vty *vty, struct bgp *bgp,
+ struct peer *peer, json_object *json_peer,
+ int max_neighbor_width, bool use_json)
+{
+ char timebuf[BGP_UPTIME_LEN], dn_flag[2];
+ int len;
+
+ if (use_json) {
+ if (peer_dynamic_neighbor(peer))
+ json_object_boolean_true_add(json_peer,
+ "dynamicPeer");
+ if (peer->hostname)
+ json_object_string_add(json_peer, "hostname",
+ peer->hostname);
+
+ if (peer->domainname)
+ json_object_string_add(json_peer, "domainname",
+ peer->domainname);
+ json_object_int_add(json_peer, "connectionsEstablished",
+ peer->established);
+ json_object_int_add(json_peer, "connectionsDropped",
+ peer->dropped);
+ peer_uptime(peer->uptime, timebuf, BGP_UPTIME_LEN,
+ use_json, json_peer);
+ if (peer->status == Established)
+ json_object_string_add(json_peer, "lastResetDueTo",
+ "AFI/SAFI Not Negotiated");
+ else
+ bgp_show_peer_reset(NULL, peer, json_peer, true);
+ } else {
+ dn_flag[1] = '\0';
+ dn_flag[0] = peer_dynamic_neighbor(peer) ? '*' : '\0';
+ if (peer->hostname
+ && bgp_flag_check(bgp, BGP_FLAG_SHOW_HOSTNAME))
+ len = vty_out(vty, "%s%s(%s)", dn_flag,
+ peer->hostname, peer->host);
+ else
+ len = vty_out(vty, "%s%s", dn_flag, peer->host);
+
+ /* pad the neighbor column with spaces */
+ if (len < max_neighbor_width)
+ vty_out(vty, "%*s", max_neighbor_width - len,
+ " ");
+ vty_out(vty, "%7d %7d %8s", peer->established,
+ peer->dropped,
+ peer_uptime(peer->uptime, timebuf,
+ BGP_UPTIME_LEN, 0, NULL));
+ if (peer->status == Established)
+ vty_out(vty, " AFI/SAFI Not Negotiated\n");
+ else
+ bgp_show_peer_reset(vty, peer, NULL,
+ false);
+ }
+}
+
+
/* Show BGP peer's summary information. */
static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
- bool use_json)
+ bool show_failed, bool use_json)
{
struct peer *peer;
struct listnode *node, *nnode;
char timebuf[BGP_UPTIME_LEN], dn_flag[2];
char neighbor_buf[VTY_BUFSIZ];
int neighbor_col_default_width = 16;
- int len;
+ int len, failed_count = 0;
int max_neighbor_width = 0;
int pfx_rcd_safi;
json_object *json = NULL;
* to
* display the correct PfxRcd value we must look at SAFI_UNICAST
*/
+
if (safi == SAFI_LABELED_UNICAST)
pfx_rcd_safi = SAFI_UNICAST;
else
if (use_json) {
json = json_object_new_object();
json_peers = json_object_new_object();
+ for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
+ if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE))
+ continue;
+
+ if (peer->afc[afi][safi]) {
+ /* See if we have at least a single failed peer */
+ if (bgp_has_peer_failed(peer, afi, safi))
+ failed_count++;
+ count++;
+ }
+ if (peer_dynamic_neighbor(peer))
+ dn_count++;
+ }
+
} else {
/* Loop over all neighbors that will be displayed to determine
* how many
if (len > max_neighbor_width)
max_neighbor_width = len;
+
+ /* See if we have at least a single failed peer */
+ if (bgp_has_peer_failed(peer, afi, safi))
+ failed_count++;
+ count++;
}
}
max_neighbor_width = neighbor_col_default_width;
}
+ if (show_failed && !failed_count) {
+ if (use_json) {
+ json_object_int_add(json, "failedPeersCount", 0);
+ json_object_int_add(json, "dynamicPeers", dn_count);
+ json_object_int_add(json, "totalPeers", count);
+
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ } else {
+ vty_out(vty, "%% No failed BGP neighbors found\n");
+ vty_out(vty, "\nTotal number of neighbors %d\n", count);
+ }
+ return CMD_SUCCESS;
+ }
+
+ count = 0; /* Reset the value as its used again */
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE))
continue;
vty_out(vty, "Neighbor");
vty_out(vty, "%*s", max_neighbor_width - 8,
" ");
- vty_out(vty,
+ if (show_failed)
+ vty_out(vty, "EstdCnt DropCnt ResetTime Reason\n");
+ else
+ vty_out(vty,
"V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd\n");
}
}
count++;
+ /* Works for both failed & successful cases */
+ if (peer_dynamic_neighbor(peer))
+ dn_count++;
if (use_json) {
- json_peer = json_object_new_object();
+ json_peer = NULL;
+
+ if (show_failed &&
+ bgp_has_peer_failed(peer, afi, safi)) {
+ json_peer = json_object_new_object();
+ bgp_show_failed_summary(vty, bgp, peer,
+ json_peer, 0, use_json);
+ } else if (!show_failed) {
+ json_peer = json_object_new_object();
+ if (peer_dynamic_neighbor(peer)) {
+ json_object_boolean_true_add(json_peer,
+ "dynamicPeer");
+ }
- if (peer_dynamic_neighbor(peer)) {
- dn_count++;
- json_object_boolean_true_add(json_peer,
- "dynamicPeer");
+ if (peer->hostname)
+ json_object_string_add(json_peer, "hostname",
+ peer->hostname);
+
+ if (peer->domainname)
+ json_object_string_add(json_peer, "domainname",
+ peer->domainname);
+
+ json_object_int_add(json_peer, "remoteAs", peer->as);
+ json_object_int_add(json_peer, "version", 4);
+ json_object_int_add(json_peer, "msgRcvd",
+ PEER_TOTAL_RX(peer));
+ json_object_int_add(json_peer, "msgSent",
+ PEER_TOTAL_TX(peer));
+
+ json_object_int_add(json_peer, "tableVersion",
+ peer->version[afi][safi]);
+ json_object_int_add(json_peer, "outq",
+ peer->obuf->count);
+ json_object_int_add(json_peer, "inq", 0);
+ peer_uptime(peer->uptime, timebuf, BGP_UPTIME_LEN,
+ use_json, json_peer);
+
+ /*
+ * Adding "pfxRcd" field to match with the corresponding
+ * CLI. "prefixReceivedCount" will be deprecated in
+ * future.
+ */
+ json_object_int_add(json_peer, "prefixReceivedCount",
+ peer->pcount[afi][pfx_rcd_safi]);
+ json_object_int_add(json_peer, "pfxRcd",
+ peer->pcount[afi][pfx_rcd_safi]);
+
+ paf = peer_af_find(peer, afi, pfx_rcd_safi);
+ if (paf && PAF_SUBGRP(paf))
+ json_object_int_add(json_peer,
+ "pfxSnt",
+ (PAF_SUBGRP(paf))->scount);
+ if (CHECK_FLAG(peer->flags, PEER_FLAG_SHUTDOWN))
+ json_object_string_add(json_peer, "state",
+ "Idle (Admin)");
+ else if (peer->afc_recv[afi][safi])
+ json_object_string_add(
+ json_peer, "state",
+ lookup_msg(bgp_status_msg, peer->status,
+ NULL));
+ else if (CHECK_FLAG(peer->sflags,
+ PEER_STATUS_PREFIX_OVERFLOW))
+ json_object_string_add(json_peer, "state",
+ "Idle (PfxCt)");
+ else
+ json_object_string_add(
+ json_peer, "state",
+ lookup_msg(bgp_status_msg, peer->status,
+ NULL));
+ json_object_int_add(json_peer, "connectionsEstablished",
+ peer->established);
+ json_object_int_add(json_peer, "connectionsDropped",
+ peer->dropped);
}
-
- if (peer->hostname)
- json_object_string_add(json_peer, "hostname",
- peer->hostname);
-
- if (peer->domainname)
- json_object_string_add(json_peer, "domainname",
- peer->domainname);
-
- json_object_int_add(json_peer, "remoteAs", peer->as);
- json_object_int_add(json_peer, "version", 4);
- json_object_int_add(json_peer, "msgRcvd",
- PEER_TOTAL_RX(peer));
- json_object_int_add(json_peer, "msgSent",
- PEER_TOTAL_TX(peer));
-
- json_object_int_add(json_peer, "tableVersion",
- peer->version[afi][safi]);
- json_object_int_add(json_peer, "outq",
- peer->obuf->count);
- json_object_int_add(json_peer, "inq", 0);
- peer_uptime(peer->uptime, timebuf, BGP_UPTIME_LEN,
- use_json, json_peer);
-
- /*
- * Adding "pfxRcd" field to match with the corresponding
- * CLI. "prefixReceivedCount" will be deprecated in
- * future.
- */
- json_object_int_add(json_peer, "prefixReceivedCount",
- peer->pcount[afi][pfx_rcd_safi]);
- json_object_int_add(json_peer, "pfxRcd",
- peer->pcount[afi][pfx_rcd_safi]);
-
- paf = peer_af_find(peer, afi, pfx_rcd_safi);
- if (paf && PAF_SUBGRP(paf))
- json_object_int_add(json_peer,
- "pfxSnt",
- (PAF_SUBGRP(paf))->scount);
-
- if (CHECK_FLAG(peer->flags, PEER_FLAG_SHUTDOWN))
- json_object_string_add(json_peer, "state",
- "Idle (Admin)");
- else if (peer->afc_recv[afi][safi])
- json_object_string_add(
- json_peer, "state",
- lookup_msg(bgp_status_msg, peer->status,
- NULL));
- else if (CHECK_FLAG(peer->sflags,
- PEER_STATUS_PREFIX_OVERFLOW))
- json_object_string_add(json_peer, "state",
- "Idle (PfxCt)");
- else
- json_object_string_add(
- json_peer, "state",
- lookup_msg(bgp_status_msg, peer->status,
- NULL));
+ /* Avoid creating empty peer dicts in JSON */
+ if (json_peer == NULL)
+ continue;
if (peer->conf_if)
json_object_string_add(json_peer, "idType",
else if (peer->su.sa.sa_family == AF_INET6)
json_object_string_add(json_peer, "idType",
"ipv6");
-
json_object_object_add(json_peers, peer->host,
json_peer);
} else {
- memset(dn_flag, '\0', sizeof(dn_flag));
- if (peer_dynamic_neighbor(peer)) {
- dn_count++;
- dn_flag[0] = '*';
- }
-
- if (peer->hostname
- && bgp_flag_check(bgp, BGP_FLAG_SHOW_HOSTNAME))
- len = vty_out(vty, "%s%s(%s)", dn_flag,
- peer->hostname, peer->host);
- else
- len = vty_out(vty, "%s%s", dn_flag, peer->host);
-
- /* pad the neighbor column with spaces */
- if (len < max_neighbor_width)
- vty_out(vty, "%*s", max_neighbor_width - len,
- " ");
-
- vty_out(vty, "4 %10u %7u %7u %8" PRIu64 " %4d %4zd %8s",
- peer->as, PEER_TOTAL_RX(peer),
- PEER_TOTAL_TX(peer), peer->version[afi][safi],
- 0, peer->obuf->count,
- peer_uptime(peer->uptime, timebuf,
- BGP_UPTIME_LEN, 0, NULL));
+ if (show_failed &&
+ bgp_has_peer_failed(peer, afi, safi)) {
+ bgp_show_failed_summary(vty, bgp, peer, NULL,
+ max_neighbor_width,
+ use_json);
+ } else if (!show_failed) {
+ memset(dn_flag, '\0', sizeof(dn_flag));
+ if (peer_dynamic_neighbor(peer)) {
+ dn_flag[0] = '*';
+ }
- if (peer->status == Established)
- if (peer->afc_recv[afi][safi])
- vty_out(vty, " %12ld",
- peer->pcount[afi]
- [pfx_rcd_safi]);
- else
- vty_out(vty, " NoNeg");
- else {
- if (CHECK_FLAG(peer->flags, PEER_FLAG_SHUTDOWN))
- vty_out(vty, " Idle (Admin)");
- else if (CHECK_FLAG(
- peer->sflags,
- PEER_STATUS_PREFIX_OVERFLOW))
- vty_out(vty, " Idle (PfxCt)");
+ if (peer->hostname
+ && bgp_flag_check(bgp, BGP_FLAG_SHOW_HOSTNAME))
+ len = vty_out(vty, "%s%s(%s)", dn_flag,
+ peer->hostname, peer->host);
else
- vty_out(vty, " %12s",
- lookup_msg(bgp_status_msg,
- peer->status, NULL));
+ len = vty_out(vty, "%s%s", dn_flag, peer->host);
+
+ /* pad the neighbor column with spaces */
+ if (len < max_neighbor_width)
+ vty_out(vty, "%*s", max_neighbor_width - len,
+ " ");
+
+ vty_out(vty, "4 %10u %7u %7u %8" PRIu64 " %4d %4zd %8s",
+ peer->as, PEER_TOTAL_RX(peer),
+ PEER_TOTAL_TX(peer), peer->version[afi][safi],
+ 0, peer->obuf->count,
+ peer_uptime(peer->uptime, timebuf,
+ BGP_UPTIME_LEN, 0, NULL));
+
+ if (peer->status == Established)
+ if (peer->afc_recv[afi][safi])
+ vty_out(vty, " %12ld",
+ peer->pcount[afi]
+ [pfx_rcd_safi]);
+ else
+ vty_out(vty, " NoNeg");
+ else {
+ if (CHECK_FLAG(peer->flags, PEER_FLAG_SHUTDOWN))
+ vty_out(vty, " Idle (Admin)");
+ else if (CHECK_FLAG(
+ peer->sflags,
+ PEER_STATUS_PREFIX_OVERFLOW))
+ vty_out(vty, " Idle (PfxCt)");
+ else
+ vty_out(vty, " %12s",
+ lookup_msg(bgp_status_msg,
+ peer->status, NULL));
+ }
+ vty_out(vty, "\n");
}
- vty_out(vty, "\n");
+
}
}
if (use_json) {
json_object_object_add(json, "peers", json_peers);
-
+ json_object_int_add(json, "failedPeers", failed_count);
json_object_int_add(json, "totalPeers", count);
json_object_int_add(json, "dynamicPeers", dn_count);
- bgp_show_bestpath_json(bgp, json);
+ if (!show_failed)
+ bgp_show_bestpath_json(bgp, json);
vty_out(vty, "%s\n", json_object_to_json_string_ext(
json, JSON_C_TO_STRING_PRETTY));
vty_out(vty, "\nTotal number of neighbors %d\n", count);
else {
vty_out(vty, "No %s neighbor is configured\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
}
if (dn_count) {
}
static void bgp_show_summary_afi_safi(struct vty *vty, struct bgp *bgp, int afi,
- int safi, bool use_json)
+ int safi, bool show_failed, bool use_json)
{
int is_first = 1;
int afi_wildcard = (afi == AFI_MAX);
is_first = 0;
vty_out(vty, "\"%s\":",
- afi_safi_json(afi,
- safi));
+ get_afi_safi_str(afi,
+ safi,
+ true));
} else {
vty_out(vty, "\n%s Summary:\n",
- afi_safi_print(afi,
- safi));
+ get_afi_safi_str(afi,
+ safi,
+ false));
}
}
- bgp_show_summary(vty, bgp, afi, safi, use_json);
+ bgp_show_summary(vty, bgp, afi, safi, show_failed,
+ use_json);
}
safi++;
if (!safi_wildcard)
}
static void bgp_show_all_instances_summary_vty(struct vty *vty, afi_t afi,
- safi_t safi, bool use_json)
+ safi_t safi, bool show_failed,
+ bool use_json)
{
struct listnode *node, *nnode;
struct bgp *bgp;
? VRF_DEFAULT_NAME
: bgp->name);
}
- bgp_show_summary_afi_safi(vty, bgp, afi, safi, use_json);
+ bgp_show_summary_afi_safi(vty, bgp, afi, safi, show_failed,
+ use_json);
}
if (use_json)
}
int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi,
- safi_t safi, bool use_json)
+ safi_t safi, bool show_failed, bool use_json)
{
struct bgp *bgp;
if (name) {
if (strmatch(name, "all")) {
bgp_show_all_instances_summary_vty(vty, afi, safi,
+ show_failed,
use_json);
return CMD_SUCCESS;
} else {
}
bgp_show_summary_afi_safi(vty, bgp, afi, safi,
- use_json);
+ show_failed, use_json);
return CMD_SUCCESS;
}
}
bgp = bgp_get_default();
if (bgp)
- bgp_show_summary_afi_safi(vty, bgp, afi, safi, use_json);
+ bgp_show_summary_afi_safi(vty, bgp, afi, safi, show_failed,
+ use_json);
else {
if (use_json)
vty_out(vty, "{}\n");
/* `show [ip] bgp summary' commands. */
DEFUN (show_ip_bgp_summary,
show_ip_bgp_summary_cmd,
- "show [ip] bgp [<view|vrf> VIEWVRFNAME] ["BGP_AFI_CMD_STR" ["BGP_SAFI_WITH_LABEL_CMD_STR"]] summary [json]",
+ "show [ip] bgp [<view|vrf> VIEWVRFNAME] ["BGP_AFI_CMD_STR" ["BGP_SAFI_WITH_LABEL_CMD_STR"]] summary [failed] [json]",
SHOW_STR
IP_STR
BGP_STR
BGP_AFI_HELP_STR
BGP_SAFI_WITH_LABEL_HELP_STR
"Summary of BGP neighbor status\n"
+ "Show only sessions not in Established state\n"
JSON_STR)
{
char *vrf = NULL;
afi_t afi = AFI_MAX;
safi_t safi = SAFI_MAX;
+ bool show_failed = false;
int idx = 0;
argv_find_and_parse_safi(argv, argc, &idx, &safi);
}
+ if (argv_find(argv, argc, "failed", &idx))
+ show_failed = true;
+
bool uj = use_json(argc, argv);
- return bgp_show_summary_vty(vty, vrf, afi, safi, uj);
+ return bgp_show_summary_vty(vty, vrf, afi, safi, show_failed, uj);
}
-const char *afi_safi_print(afi_t afi, safi_t safi)
+const char *get_afi_safi_str(afi_t afi, safi_t safi, bool for_json)
{
- if (afi == AFI_IP && safi == SAFI_UNICAST)
- return "IPv4 Unicast";
- else if (afi == AFI_IP && safi == SAFI_MULTICAST)
- return "IPv4 Multicast";
- else if (afi == AFI_IP && safi == SAFI_LABELED_UNICAST)
- return "IPv4 Labeled Unicast";
- else if (afi == AFI_IP && safi == SAFI_MPLS_VPN)
- return "IPv4 VPN";
- else if (afi == AFI_IP && safi == SAFI_ENCAP)
- return "IPv4 Encap";
- else if (afi == AFI_IP && safi == SAFI_FLOWSPEC)
- return "IPv4 Flowspec";
- else if (afi == AFI_IP6 && safi == SAFI_UNICAST)
- return "IPv6 Unicast";
- else if (afi == AFI_IP6 && safi == SAFI_MULTICAST)
- return "IPv6 Multicast";
- else if (afi == AFI_IP6 && safi == SAFI_LABELED_UNICAST)
- return "IPv6 Labeled Unicast";
- else if (afi == AFI_IP6 && safi == SAFI_MPLS_VPN)
- return "IPv6 VPN";
- else if (afi == AFI_IP6 && safi == SAFI_ENCAP)
- return "IPv6 Encap";
- else if (afi == AFI_IP6 && safi == SAFI_FLOWSPEC)
- return "IPv6 Flowspec";
- else if (afi == AFI_L2VPN && safi == SAFI_EVPN)
- return "L2VPN EVPN";
- else
- return "Unknown";
-}
-
-/*
- * Please note that we have intentionally camelCased
- * the return strings here. So if you want
- * to use this function, please ensure you
- * are doing this within json output
- */
-const char *afi_safi_json(afi_t afi, safi_t safi)
-{
- if (afi == AFI_IP && safi == SAFI_UNICAST)
- return "ipv4Unicast";
- else if (afi == AFI_IP && safi == SAFI_MULTICAST)
- return "ipv4Multicast";
- else if (afi == AFI_IP && safi == SAFI_LABELED_UNICAST)
- return "ipv4LabeledUnicast";
- else if (afi == AFI_IP && safi == SAFI_MPLS_VPN)
- return "ipv4Vpn";
- else if (afi == AFI_IP && safi == SAFI_ENCAP)
- return "ipv4Encap";
- else if (afi == AFI_IP && safi == SAFI_FLOWSPEC)
- return "ipv4Flowspec";
- else if (afi == AFI_IP6 && safi == SAFI_UNICAST)
- return "ipv6Unicast";
- else if (afi == AFI_IP6 && safi == SAFI_MULTICAST)
- return "ipv6Multicast";
- else if (afi == AFI_IP6 && safi == SAFI_LABELED_UNICAST)
- return "ipv6LabeledUnicast";
- else if (afi == AFI_IP6 && safi == SAFI_MPLS_VPN)
- return "ipv6Vpn";
- else if (afi == AFI_IP6 && safi == SAFI_ENCAP)
- return "ipv6Encap";
- else if (afi == AFI_IP6 && safi == SAFI_FLOWSPEC)
- return "ipv6Flowspec";
- else if (afi == AFI_L2VPN && safi == SAFI_EVPN)
- return "l2VpnEvpn";
+ if (for_json)
+ return get_afi_safi_json_str(afi, safi);
else
- return "Unknown";
+ return get_afi_safi_vty_str(afi, safi);
}
/* Show BGP peer's information. */
"prefixAllowedRestartIntervalMsecs",
p->pmax_restart[afi][safi] * 60000);
}
- json_object_object_add(json_neigh, afi_safi_print(afi, safi),
+ json_object_object_add(json_neigh, get_afi_safi_str(afi, safi, true),
json_addr);
} else {
filter = &p->filter[afi][safi];
vty_out(vty, " For address family: %s\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
if (peer_group_active(p))
vty_out(vty, " %s peer-group member\n",
char buf1[PREFIX2STR_BUFFER], buf[SU_ADDRSTRLEN];
char timebuf[BGP_UPTIME_LEN];
char dn_flag[2];
- const char *subcode_str;
- const char *code_str;
afi_t afi;
safi_t safi;
uint16_t i;
json_object *json_sub = NULL;
json_sub =
json_object_new_object();
- print_store = afi_safi_print(
- afi, safi);
+ print_store = get_afi_safi_str(
+ afi, safi, true);
if (CHECK_FLAG(
p->af_cap[afi]
[AFI_IP]
[safi],
PEER_CAP_ENHE_AF_RCV)) {
- print_store = afi_safi_print(
+ print_store = get_afi_safi_str(
AFI_IP,
- safi);
+ safi, true);
json_object_string_add(
json_nxt,
print_store,
json_object_object_add(
json_multi,
- afi_safi_print(afi,
- safi),
+ get_afi_safi_str(afi,
+ safi,
+ true),
json_exten);
}
}
restart_af_count++;
json_object_object_add(
json_restart,
- afi_safi_print(
+ get_afi_safi_str(
afi,
- safi),
+ safi,
+ true),
json_sub);
}
}
PEER_CAP_ADDPATH_AF_TX_RCV)) {
vty_out(vty,
" %s: TX ",
- afi_safi_print(
+ get_afi_safi_str(
afi,
- safi));
+ safi,
+ false));
if (CHECK_FLAG(
p->af_cap
PEER_CAP_ADDPATH_AF_TX_ADV))
vty_out(vty,
"advertised %s",
- afi_safi_print(
+ get_afi_safi_str(
afi,
- safi));
+ safi,
+ false));
if (CHECK_FLAG(
p->af_cap
PEER_CAP_ADDPATH_AF_RX_RCV)) {
vty_out(vty,
" %s: RX ",
- afi_safi_print(
+ get_afi_safi_str(
afi,
- safi));
+ safi,
+ false));
if (CHECK_FLAG(
p->af_cap
PEER_CAP_ADDPATH_AF_RX_ADV))
vty_out(vty,
"advertised %s",
- afi_safi_print(
+ get_afi_safi_str(
afi,
- safi));
+ safi,
+ false));
if (CHECK_FLAG(
p->af_cap
PEER_CAP_ENHE_AF_RCV))
vty_out(vty,
" %s\n",
- afi_safi_print(
+ get_afi_safi_str(
AFI_IP,
- safi));
+ safi,
+ false));
}
}
|| p->afc_recv[afi][safi]) {
vty_out(vty,
" Address Family %s:",
- afi_safi_print(afi,
- safi));
+ get_afi_safi_str(
+ afi,
+ safi,
+ false));
if (p->afc_adv[afi][safi])
vty_out(vty,
" advertised");
restart_af_count
? ", "
: "",
- afi_safi_print(
+ get_afi_safi_str(
afi,
- safi),
+ safi,
+ false),
CHECK_FLAG(
p->af_cap
[afi]
PEER_STATUS_EOR_SEND)) {
json_object_boolean_true_add(
json_grace_send,
- afi_safi_print(afi,
- safi));
+ get_afi_safi_str(afi,
+ safi,
+ true));
eor_send_af_count++;
}
}
PEER_STATUS_EOR_RECEIVED)) {
json_object_boolean_true_add(
json_grace_recv,
- afi_safi_print(afi,
- safi));
+ get_afi_safi_str(afi,
+ safi,
+ true));
eor_receive_af_count++;
}
}
vty_out(vty, "%s%s",
eor_send_af_count ? ", "
: "",
- afi_safi_print(afi,
- safi));
+ get_afi_safi_str(afi,
+ safi,
+ false));
eor_send_af_count++;
}
}
eor_receive_af_count
? ", "
: "",
- afi_safi_print(afi,
- safi));
+ get_afi_safi_str(afi,
+ safi,
+ false));
eor_receive_af_count++;
}
}
(tm->tm_sec * 1000)
+ (tm->tm_min * 60000)
+ (tm->tm_hour * 3600000));
- json_object_string_add(
- json_neigh, "lastResetDueTo",
- peer_down_str[(int)p->last_reset]);
- if (p->last_reset == PEER_DOWN_NOTIFY_SEND
- || p->last_reset == PEER_DOWN_NOTIFY_RECEIVED) {
- char errorcodesubcode_hexstr[5];
- char errorcodesubcode_str[256];
-
- code_str = bgp_notify_code_str(p->notify.code);
- subcode_str = bgp_notify_subcode_str(
- p->notify.code, p->notify.subcode);
-
- sprintf(errorcodesubcode_hexstr, "%02X%02X",
- p->notify.code, p->notify.subcode);
- json_object_string_add(json_neigh,
- "lastErrorCodeSubcode",
- errorcodesubcode_hexstr);
- snprintf(errorcodesubcode_str, 255, "%s%s",
- code_str, subcode_str);
- json_object_string_add(json_neigh,
- "lastNotificationReason",
- errorcodesubcode_str);
- if (p->last_reset == PEER_DOWN_NOTIFY_RECEIVED
- && p->notify.code == BGP_NOTIFY_CEASE
- && (p->notify.subcode
- == BGP_NOTIFY_CEASE_ADMIN_SHUTDOWN
- || p->notify.subcode
- == BGP_NOTIFY_CEASE_ADMIN_RESET)
- && p->notify.length) {
- char msgbuf[1024];
- const char *msg_str;
-
- msg_str = bgp_notify_admin_message(
- msgbuf, sizeof(msgbuf),
- (uint8_t *)p->notify.data,
- p->notify.length);
- if (msg_str)
- json_object_string_add(
- json_neigh,
- "lastShutdownDescription",
- msg_str);
- }
- }
+ bgp_show_peer_reset(NULL, p, json_neigh, true);
} else {
vty_out(vty, " Last reset %s, ",
peer_uptime(p->resettime, timebuf,
BGP_UPTIME_LEN, 0, NULL));
- if (p->last_reset == PEER_DOWN_NOTIFY_SEND
- || p->last_reset == PEER_DOWN_NOTIFY_RECEIVED) {
- code_str = bgp_notify_code_str(p->notify.code);
- subcode_str = bgp_notify_subcode_str(
- p->notify.code, p->notify.subcode);
- vty_out(vty, "due to NOTIFICATION %s (%s%s)\n",
- p->last_reset == PEER_DOWN_NOTIFY_SEND
- ? "sent"
- : "received",
- code_str, subcode_str);
- if (p->last_reset == PEER_DOWN_NOTIFY_RECEIVED
- && p->notify.code == BGP_NOTIFY_CEASE
- && (p->notify.subcode
- == BGP_NOTIFY_CEASE_ADMIN_SHUTDOWN
- || p->notify.subcode
- == BGP_NOTIFY_CEASE_ADMIN_RESET)
- && p->notify.length) {
- char msgbuf[1024];
- const char *msg_str;
-
- msg_str = bgp_notify_admin_message(
- msgbuf, sizeof(msgbuf),
- (uint8_t *)p->notify.data,
- p->notify.length);
- if (msg_str)
- vty_out(vty,
- " Message: \"%s\"\n",
- msg_str);
- }
- } else {
- vty_out(vty, "due to %s\n",
- peer_down_str[(int)p->last_reset]);
- }
-
+ bgp_show_peer_reset(vty, p, NULL, false);
if (p->last_reset_cause_size) {
msg = p->last_reset_cause;
vty_out(vty,
/* Provide context for the block */
json_object_string_add(json, "vrf", name ? name : "default");
json_object_string_add(json, "afiSafi",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, true));
if (!CHECK_FLAG(bgp->af_flags[afi][safi],
BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
BGP_CONFIG_VRF_TO_VRF_IMPORT))
vty_out(vty,
"This VRF is not importing %s routes from any other VRF\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
else {
vty_out(vty,
"This VRF is importing %s routes from the following VRFs:\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
for (ALL_LIST_ELEMENTS_RO(
bgp->vpn_policy[afi].import_vrf,
BGP_CONFIG_VRF_TO_VRF_EXPORT))
vty_out(vty,
"This VRF is not exporting %s routes to any other VRF\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
else {
vty_out(vty,
"This VRF is exporting %s routes to the following VRFs:\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
for (ALL_LIST_ELEMENTS_RO(
bgp->vpn_policy[afi].export_vrf,
FOREACH_AFI_SAFI (afi, safi) {
if (conf->afc[afi][safi]) {
af_cfgd = 1;
- vty_out(vty, " %s;", afi_safi_print(afi, safi));
+ vty_out(vty, " %s;", get_afi_safi_str(afi, safi, false));
}
}
if (!af_cfgd)
"Address Family modifier\n"
extern void bgp_vty_init(void);
-extern const char *afi_safi_print(afi_t afi, safi_t safi);
-extern const char *afi_safi_json(afi_t afi, safi_t safi);
+extern const char *get_afi_safi_str(afi_t afi, safi_t safi, bool for_json);
extern void bgp_config_write_update_delay(struct vty *vty, struct bgp *bgp);
extern void bgp_config_write_wpkt_quanta(struct vty *vty, struct bgp *bgp);
extern void bgp_config_write_rpkt_quanta(struct vty *vty, struct bgp *bgp);
safi_t *safi, struct bgp **bgp,
bool use_json);
extern int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi,
- safi_t safi, bool use_json);
+ safi_t safi, bool show_failed, bool use_json);
extern void bgp_vpn_policy_config_write_afi(struct vty *vty, struct bgp *bgp,
afi_t afi);
#endif /* _QUAGGA_BGP_VTY_H */
bgp = peer->bgp;
accept_peer = CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER);
+ bgp_keepalives_off(peer);
bgp_reads_off(peer);
bgp_writes_off(peer);
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON));
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_READS_ON));
+ assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON));
if (CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT))
peer_nsf_stop(peer);
#define PEER_DOWN_BFD_DOWN 24 /* BFD down */
#define PEER_DOWN_IF_DOWN 25 /* Interface down */
#define PEER_DOWN_NBR_ADDR_DEL 26 /* Peer address lost */
+#define PEER_DOWN_WAITING_NHT 27 /* Waiting for NHT to resolve */
+#define PEER_DOWN_NBR_ADDR 28 /* Waiting for peer IPv6 IP Addr */
+#define PEER_DOWN_VRF_UNINIT 29 /* Associated VRF is not init yet */
size_t last_reset_cause_size;
uint8_t last_reset_cause[BGP_MAX_PACKET_SIZE];
# can be loaded as DSO - always include for vtysh
vtysh_scan += $(top_srcdir)/bgpd/bgp_rpki.c
+vtysh_scan += $(top_srcdir)/bgpd/bgp_bmp.c
if ENABLE_BGP_VNC
vtysh_scan += \
if RPKI
module_LTLIBRARIES += bgpd/bgpd_rpki.la
endif
+if BGP_BMP
+module_LTLIBRARIES += bgpd/bgpd_bmp.la
+endif
man8 += $(MANBUILD)/bgpd.8
endif
bgpd/bgp_damp.h \
bgpd/bgp_debug.h \
bgpd/bgp_dump.h \
+ bgpd/bgp_bmp.h \
bgpd/bgp_ecommunity.h \
bgpd/bgp_encap_tlv.h \
bgpd/bgp_encap_types.h \
bgpd_bgpd_rpki_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
bgpd_bgpd_rpki_la_LIBADD = $(RTRLIB_LIBS)
+bgpd_bgpd_bmp_la_SOURCES = bgpd/bgp_bmp.c
+bgpd_bgpd_bmp_la_LIBADD = lib/libfrrcares.la
+bgpd_bgpd_bmp_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
+
bgpd/bgp_evpn_vty_clippy.c: $(CLIPPY_DEPS)
bgpd/bgp_evpn_vty.$(OBJEXT): bgpd/bgp_evpn_vty_clippy.c
bgpd/bgp_vty_clippy.c: $(CLIPPY_DEPS)
bgpd/bgp_rpki_clippy.c: $(CLIPPY_DEPS)
$(AUTOMAKE_DUMMY)bgpd/bgpd_bgpd_rpki_la-bgp_rpki.lo: bgpd/bgp_rpki_clippy.c
$(AUTOMAKE_DUMMY)bgpd/bgpd_rpki_la-bgp_rpki.lo: bgpd/bgp_rpki_clippy.c
+bgpd/bgp_bmp_clippy.c: $(CLIPPY_DEPS)
+bgpd/bgp_bmp.lo: bgpd/bgp_bmp_clippy.c
AS_HELP_STRING([--disable-staticd], [do not build staticd]))
AC_ARG_ENABLE([fabricd],
AS_HELP_STRING([--disable-fabricd], [do not build fabricd]))
-AC_ARG_ENABLE([bgp-announce],
- AS_HELP_STRING([--disable-bgp-announce,], [turn off BGP route announcement]))
AC_ARG_ENABLE([vrrpd],
AS_HELP_STRING([--disable-vrrpd], [do not build vrrpd]))
+AC_ARG_ENABLE([bgp-announce],
+ AS_HELP_STRING([--disable-bgp-announce,], [turn off BGP route announcement]))
AC_ARG_ENABLE([bgp-vnc],
AS_HELP_STRING([--disable-bgp-vnc],[turn off BGP VNC support]))
+AC_ARG_ENABLE([bgp-bmp],
+ AS_HELP_STRING([--disable-bgp-bmp],[turn off BGP BMP support]))
AC_ARG_ENABLE([snmp],
AS_HELP_STRING([--enable-snmp], [enable SNMP support for agentx]))
AC_ARG_ENABLE([config_rollbacks],
AC_CHECK_LIB([nsl], [main])
AC_CHECK_LIB([umem], [main])
SOLARIS="solaris"
+ AC_MSG_WARN([--Solaris support is being considered for deprecation, please let us know if you are still using this--])
;;
linux*)
AC_MSG_RESULT([Linux])
fi
AC_SUBST([HAVE_LIBPCREPOSIX])
+dnl ------------------
+dnl check C-Ares library
+dnl ------------------
+PKG_CHECK_MODULES([CARES], [libcares], [
+ c_ares_found=true
+],[
+ c_ares_found=false
+])
+AM_CONDITIONAL([CARES], [$c_ares_found])
+
dnl ##########################################################################
dnl test "${enable_clippy_only}" != "yes"
fi
NHRPD=""
case "$host_os" in
linux*)
- if test "${enable_nhrpd}" != "no"; then
- NHRPD="nhrpd"
- fi
+ case "${enable_nhrpd}" in
+ no)
+ ;;
+ yes)
+ if test "$c_ares_found" != "true" ; then
+ AC_MSG_ERROR([nhrpd requires libcares. Please install c-ares and its -dev headers.])
+ fi
+ NHRPD="nhrpd"
+ ;;
+ *)
+ if test "$c_ares_found" = "true" ; then
+ NHRPD="nhrpd"
+ fi
+ ;;
+ esac
;;
*)
if test "${enable_nhrpd}" = "yes"; then
AC_DEFINE([ENABLE_BGP_VNC], [1], [Enable BGP VNC support])
fi
+bgpd_bmp=false
+case "${enable_bmp}" in
+ no)
+ ;;
+ yes)
+ if test "$c_ares_found" != "true" ; then
+ AC_MSG_ERROR([BMP support requires libcares. Please install c-ares and its -dev headers.])
+ fi
+ bgpd_bmp=true
+ ;;
+ *)
+ if test "$c_ares_found" = "true" ; then
+ bgpd_bmp=true
+ fi
+ ;;
+esac
+
dnl ##########################################################################
dnl LARGE if block
if test "${enable_clippy_only}" != "yes"; then
dnl ##########################################################################
-dnl ------------------
-dnl check C-Ares library
-dnl ------------------
-if test "${NHRPD}" != ""; then
- PKG_CHECK_MODULES([CARES], [libcares], ,[
- AC_MSG_ERROR([trying to build nhrpd, but libcares not found. install c-ares and its -dev headers.])
- ])
-fi
-AM_CONDITIONAL([CARES], [test "${NHRPD}" != ""])
-
dnl ------------------
dnl check Net-SNMP library
dnl ------------------
dnl various features
AM_CONDITIONAL([SUPPORT_REALMS], [test "${enable_realms}" = "yes"])
AM_CONDITIONAL([ENABLE_BGP_VNC], [test x${enable_bgp_vnc} != xno])
+AM_CONDITIONAL([BGP_BMP], [$bgpd_bmp])
dnl northbound
AM_CONDITIONAL([SQLITE3], [$SQLITE3])
AM_CONDITIONAL([CONFD], [test "x$enable_confd" != "x"])
usr/lib/frr/zebra
usr/lib/*/frr/modules/zebra_irdp.so
usr/lib/*/frr/modules/zebra_fpm.so
+usr/lib/*/frr/modules/bgpd_bmp.so
usr/share/doc/frr/examples
usr/share/man/
usr/share/yang/
Show a bgp peer summary for the specified address family, and subsequent
address-family.
+.. index:: show bgp [afi] [safi] summary failed [json]
+.. clicmd:: show bgp [afi] [safi] summary failed [json]
+
+ Show a bgp peer summary for peers that are not succesfully exchanging routes
+ for the specified address family, and subsequent address-family.
+
.. index:: show bgp [afi] [safi] neighbor [PEER]
.. clicmd:: show bgp [afi] [safi] neighbor [PEER]
--- /dev/null
+.. _bmp:
+
+***
+BMP
+***
+
+:abbr:`BMP` (BGP Monitoring Protocol, :rfc:`7854`) is used to send monitoring
+data from BGP routers to network management entities.
+
+Implementation characteristics
+==============================
+
+The `BMP` implementation in FRR has the following properties:
+
+- only the :rfc:`7854` features are currently implemented. This means protocol
+ version 3 without any extensions. It is not possible to use an older draft
+ protocol version of BMP.
+
+- the following statistics codes are implemented:
+
+ - 0: count of prefixes rejected
+ - 2: count of duplicate prefix withdrawals
+ - 3: count of **prefixes** with loop in cluster id
+ - 4: count of **prefixes** with loop in AS-path
+ - 5: count of **prefixes** with loop in originator
+ - 11: count of updates subjected to :rfc:`7607` "treat as withdrawal"
+ handling due to errors
+ - 65531: *experimental* count of prefixes rejected due to invalid next-hop
+
+ Note that stat items 3, 4 and 5 are specified to count updates, but FRR
+ implements them as prefix-based counters.
+
+- **route mirroring** is fully implemented, however BGP OPEN messages are not
+ currently included in route mirroring messages. Their contents can be
+ extracted from the "peer up" notification for sessions that established
+ successfully. OPEN messages for failed sessions cannot currently be
+ mirrored.
+
+- **route monitoring** is available for IPv4 and IPv6 AFIs, unicast and
+ multicast SAFIs. Other SAFIs (VPN, Labeled-Unicast, Flowspec, etc.) are not
+ currently supported.
+
+- monitoring peers that have BGP **add-path** enabled on the session will
+ result in somewhat unpredictable behaviour. Currently, the outcome is:
+
+ - route mirroring functions as intended, messages are copied verbatim
+ - the add-path ID is never included in route monitoring messages
+ - if multiple paths were received from a peer, an unpredictable path is
+ picked and sent on the BMP session. The selection will differ for
+ pre-policy and post-policy monitoring sessions.
+ - as long as any path is present, something will be advertised on BMP
+ sessions. Only after the last path is gone a withdrawal will be sent on
+ BMP sessions.
+ - updates to additional paths will trigger BMP route monitoring messages.
+ There is no guarantee on consistency regarding which path is sent in these
+ messages.
+
+- monitoring peers with :rfc:`5549` extended next-hops has not been tested.
+
+Starting BMP
+============
+
+BMP is implemented as a loadable module. This means that to use BMP, ``bgpd``
+must be started with the ``-M bmp`` option. It is not possible to enable BMP
+if ``bgpd`` was started without this option.
+
+Configuring BMP
+===============
+
+All of FRR's BMP configuration options are located inside the
+:clicmd:`router bgp ASN` block. Configure BGP first before proceeding to BMP
+setup.
+
+There is one option that applies to the BGP instance as a whole:
+
+.. index:: bmp mirror buffer-limit(0-4294967294)
+.. clicmd:: [no] bmp mirror buffer-limit(0-4294967294)
+
+ This sets the maximum amount of memory used for buffering BGP messages
+ (updates, keepalives, ...) for sending in BMP Route Mirroring.
+
+ The buffer is for the entire BGP instance; if multiple BMP targets are
+ configured they reference the same buffer and do not consume additional
+ memory. Queue overhead is included in accounting this memory, so the
+ actual space available for BGP messages is slightly less than the value
+ configured here.
+
+ If the buffer fills up, the oldest messages are removed from the buffer and
+ any BMP sessions where the now-removed messages were still pending have
+ their **entire** queue flushed and a "Mirroring Messages Lost" BMP message
+ is sent.
+
+ BMP Route Monitoring is not affected by this option.
+
+All other configuration is managed per targets:
+
+.. index:: bmp targets NAME
+.. clicmd:: [no] bmp targets NAME
+
+ Create/delete a targets group. As implied by the plural name, targets may
+ cover multiple outbound active BMP sessions as well as inbound passive
+ listeners.
+
+ If BMP sessions have the same configuration, putting them in the same
+ ``bmp targets`` will reduce overhead.
+
+BMP session configuration
+-------------------------
+
+Inside a ``bmp targets`` block, the following commands control session
+establishment:
+
+.. index:: bmp connect HOSTNAME port (1-65535) {min-retry MSEC|max-retry MSEC}
+.. clicmd:: [no] bmp connect HOSTNAME port (1-65535) {min-retry MSEC|max-retry MSEC}
+
+ Add/remove an active outbound BMP session. HOSTNAME is resolved via DNS,
+ if multiple addresses are returned they are tried in nondeterministic
+ order. Only one connection will be established even if multiple addresses
+ are returned. ``min-retry`` and ``max-retry`` specify (in milliseconds)
+ bounds for exponential backoff.
+
+.. warning::
+
+ ``ip access-list`` and ``ipv6 access-list`` are checked for outbound
+ connections resulting from ``bmp connect`` statements.
+
+.. index:: bmp listener <X:X::X:X|A.B.C.D> port (1-65535)
+.. clicmd:: [no] bmp listener <X:X::X:X|A.B.C.D> port (1-65535)
+
+ Accept incoming BMP sessions on the specified address and port. You can
+ use ``0.0.0.0`` and ``::`` to listen on all IPv4/IPv6 addresses.
+
+.. clicmd:: [no] ip access-list NAME
+.. clicmd:: [no] ipv6 access-list NAME
+
+ Restrict BMP sessions to the addresses allowed by the respective access
+ lists. The access lists are checked for both passive and active BMP
+ sessions. Changes do not affect currently established sessions.
+
+BMP data feed configuration
+---------------------------
+
+The following commands configure what BMP messages are sent on sessions
+associated with a particular ``bmp targets``:
+
+.. index:: bmp stats [interval (100-86400000)]
+.. clicmd:: [no] bmp stats [interval (100-86400000)]
+
+ Send BMP Statistics (counter) messages at the specified interval (in
+ milliseconds.)
+
+.. index:: bmp monitor AFI SAFI <pre-policy|post-policy>
+.. clicmd:: [no] bmp monitor AFI SAFI <pre-policy|post-policy>
+
+ Perform Route Monitoring for the specified AFI and SAFI. Only IPv4 and
+ IPv6 are currently valid for AFI, and only unicast and multicast are valid
+ for SAFI. Other AFI/SAFI combinations may be added in the future.
+
+ All BGP neighbors are included in Route Monitoring. Options to select
+ a subset of BGP sessions may be added in the future.
+
+.. index:: bmp mirror
+.. clicmd:: [no] bmp mirror
+
+ Perform Route Mirroring for all BGP neighbors. Since this provides a
+ direct feed of BGP messages, there are no AFI/SAFI options to be
+ configured.
+
+ All BGP neighbors are included in Route Mirroring. Options to select
+ a subset of BGP sessions may be added in the future.
EIGRP Configuration
===================
-.. index:: router eigrp (1-65535)
-.. clicmd:: router eigrp (1-65535)
+.. index:: router eigrp (1-65535) [vrf NAME]
+.. clicmd:: router eigrp (1-65535) [vrf NAME]
The `router eigrp` command is necessary to enable EIGRP. To disable EIGRP,
use the `no router eigrp (1-65535)` command. EIGRP must be enabled before
- carrying out any of the EIGRP commands.
+ carrying out any of the EIGRP commands. Specify vrf NAME if you want
+ eigrp to work within the specified vrf.
-.. index:: no router eigrp (1-65535)
-.. clicmd:: no router eigrp (1-65535)
+.. index:: no router eigrp (1-65535) [vrf NAME]
+.. clicmd:: no router eigrp (1-65535) [vrf NAME]
Disable EIGRP.
Show EIGRP Information
======================
-.. index:: show ip eigrp topology
-.. clicmd:: show ip eigrp topology
+.. index:: show ip eigrp [vrf NAME] topology
+.. clicmd:: show ip eigrp [vrf NAME] topology
Display current EIGRP status.
P 10.0.2.0/24, 1 successors, FD is 256256, serno: 0
via Connected, enp0s3
+.. index:: show ip eigrp [vrf NAME] interface
+.. clicmd:: show ip eigrp [vrf NAME] interface
+
+ Display the list of interfaces associated with a particular eigrp
+ instance.
+
+..index:: show ip eigrp [vrf NAME] neighbor
+..clicmd:: show ip eigrp [vrf NAME] neighbor
+
+ Display the list of neighbors that have been established within
+ a particular eigrp instance.
EIGRP Debug Commands
====================
static
vnc
vrrp
+ bmp
########
Appendix
doc/user/ldpd.rst \
doc/user/basic.rst \
doc/user/bgp.rst \
+ doc/user/bmp.rst \
doc/user/bugs.rst \
doc/user/conf.py \
doc/user/eigrpd.rst \
DEFPY_NOSH(
router_eigrp,
router_eigrp_cmd,
- "router eigrp (1-65535)$as",
+ "router eigrp (1-65535)$as [vrf NAME]",
ROUTER_STR
EIGRP_STR
- AS_STR)
+ AS_STR
+ VRF_CMD_HELP_STR)
{
char xpath[XPATH_MAXLEN];
int rv;
snprintf(xpath, sizeof(xpath),
- "/frr-eigrpd:eigrpd/instance[asn='%s'][vrf='']",
- as_str);
+ "/frr-eigrpd:eigrpd/instance[asn='%s'][vrf='%s']",
+ as_str, vrf ? vrf : VRF_DEFAULT_NAME);
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
rv = nb_cli_apply_changes(vty, NULL);
return rv;
}
-DEFPY_NOSH(
+DEFPY(
no_router_eigrp,
no_router_eigrp_cmd,
- "no router eigrp (1-65535)$as",
+ "no router eigrp (1-65535)$as [vrf NAME]",
NO_STR
ROUTER_STR
EIGRP_STR
- AS_STR)
+ AS_STR
+ VRF_CMD_HELP_STR)
{
char xpath[XPATH_MAXLEN];
snprintf(xpath, sizeof(xpath),
- "/frr-eigrpd:eigrpd/instance[asn='%s'][vrf='']",
- as_str);
+ "/frr-eigrpd:eigrpd/instance[asn='%s'][vrf='%s']",
+ as_str, vrf ? vrf : VRF_DEFAULT_NAME);
nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
return nb_cli_apply_changes(vty, NULL);
bool show_defaults)
{
const char *asn = yang_dnode_get_string(dnode, "./asn");
+ const char *vrf = yang_dnode_get_string(dnode, "./vrf");
- vty_out(vty, "router eigrp %s\n", asn);
+ vty_out(vty, "router eigrp %s", asn);
+ if (strcmp(vrf, VRF_DEFAULT_NAME))
+ vty_out(vty, " vrf %s", vrf);
+ vty_out(vty, "\n");
}
void eigrp_cli_show_end_header(struct vty *vty, struct lyd_node *dnode)
void eigrp_distribute_update(struct distribute_ctx *ctx,
struct distribute *dist)
{
+ struct eigrp *e = eigrp_lookup(ctx->vrf->vrf_id);
struct interface *ifp;
struct eigrp_interface *ei = NULL;
struct access_list *alist;
struct prefix_list *plist;
// struct route_map *routemap;
- struct eigrp *e;
/* if no interface address is present, set list to eigrp process struct
*/
- e = eigrp_lookup();
- assert(e != NULL);
/* Check if distribute-list was set for process or interface */
if (!dist->ifname) {
return;
}
- ifp = if_lookup_by_name(dist->ifname, VRF_DEFAULT);
+ ifp = if_lookup_by_name(dist->ifname, e->vrf_id);
if (ifp == NULL)
return;
struct distribute *dist;
struct eigrp *eigrp;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_lookup(ifp->vrf_id);
if (!eigrp)
return;
dist = distribute_lookup(eigrp->distribute_ctx, ifp->name);
*/
void eigrp_distribute_update_all(struct prefix_list *notused)
{
- struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT);
+ struct vrf *vrf;
struct interface *ifp;
- FOR_ALL_INTERFACES (vrf, ifp)
- eigrp_distribute_update_interface(ifp);
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ FOR_ALL_INTERFACES (vrf, ifp)
+ eigrp_distribute_update_interface(ifp);
+ }
}
/*
extern void eigrp_distribute_update(struct distribute_ctx *ctx,
struct distribute *dist);
-extern void eigrp_distribute_update_interface(struct interface *);
-extern void eigrp_distribute_update_all(struct prefix_list *);
-extern void eigrp_distribute_update_all_wrapper(struct access_list *);
-extern int eigrp_distribute_timer_process(struct thread *);
-extern int eigrp_distribute_timer_interface(struct thread *);
+extern void eigrp_distribute_update_interface(struct interface *ifp);
+extern void eigrp_distribute_update_all(struct prefix_list *plist);
+extern void eigrp_distribute_update_all_wrapper(struct access_list *alist);
+extern int eigrp_distribute_timer_process(struct thread *thread);
+extern int eigrp_distribute_timer_interface(struct thread *thread);
#endif /* EIGRPD_EIGRP_FILTER_H_ */
prefix->rdistance = prefix->distance = prefix->fdistance = ne->distance;
prefix->reported_metric = ne->total_metric;
- if (eigrp_nbr_count_get()) {
+ if (eigrp_nbr_count_get(eigrp)) {
prefix->req_action |= EIGRP_FSM_NEED_QUERY;
listnode_add(eigrp->topology_changes_internalIPV4, prefix);
} else {
prefix->state = EIGRP_FSM_STATE_ACTIVE_3;
prefix->rdistance = prefix->distance = prefix->fdistance = ne->distance;
prefix->reported_metric = ne->total_metric;
- if (eigrp_nbr_count_get()) {
+ if (eigrp_nbr_count_get(eigrp)) {
prefix->req_action |= EIGRP_FSM_NEED_QUERY;
listnode_add(eigrp->topology_changes_internalIPV4, prefix);
} else {
int eigrp_fsm_event_keep_state(struct eigrp_fsm_action_message *msg)
{
- struct eigrp *eigrp;
+ struct eigrp *eigrp = msg->eigrp;
struct eigrp_prefix_entry *prefix = msg->prefix;
struct eigrp_nexthop_entry *ne = listnode_head(prefix->entries);
if (msg->packet_type == EIGRP_OPC_QUERY)
eigrp_send_reply(msg->adv_router, prefix);
prefix->req_action |= EIGRP_FSM_NEED_UPDATE;
- eigrp = eigrp_lookup();
- assert(eigrp);
listnode_add(eigrp->topology_changes_internalIPV4,
prefix);
}
- eigrp_topology_update_node_flags(prefix);
- eigrp_update_routing_table(prefix);
+ eigrp_topology_update_node_flags(eigrp, prefix);
+ eigrp_update_routing_table(eigrp, prefix);
}
if (msg->packet_type == EIGRP_OPC_QUERY)
prefix->state = EIGRP_FSM_STATE_PASSIVE;
prefix->req_action |= EIGRP_FSM_NEED_UPDATE;
listnode_add(eigrp->topology_changes_internalIPV4, prefix);
- eigrp_topology_update_node_flags(prefix);
- eigrp_update_routing_table(prefix);
- eigrp_update_topology_table_prefix(eigrp->topology_table, prefix);
+ eigrp_topology_update_node_flags(eigrp, prefix);
+ eigrp_update_routing_table(eigrp, prefix);
+ eigrp_update_topology_table_prefix(eigrp, eigrp->topology_table,
+ prefix);
return 1;
}
}
prefix->req_action |= EIGRP_FSM_NEED_UPDATE;
listnode_add(eigrp->topology_changes_internalIPV4, prefix);
- eigrp_topology_update_node_flags(prefix);
- eigrp_update_routing_table(prefix);
- eigrp_update_topology_table_prefix(eigrp->topology_table, prefix);
+ eigrp_topology_update_node_flags(eigrp, prefix);
+ eigrp_update_routing_table(eigrp, prefix);
+ eigrp_update_topology_table_prefix(eigrp, eigrp->topology_table,
+ prefix);
return 1;
}
prefix->rdistance = prefix->distance = best_successor->distance;
prefix->reported_metric = best_successor->total_metric;
- if (eigrp_nbr_count_get()) {
+ if (eigrp_nbr_count_get(eigrp)) {
prefix->req_action |= EIGRP_FSM_NEED_QUERY;
listnode_add(eigrp->topology_changes_internalIPV4, prefix);
} else {
zlog_info("Neighbor %s (%s) is pending: new adjacency",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
/* Expedited hello sent */
eigrp_hello_send(nbr->ei, EIGRP_HELLO_NORMAL, NULL);
"Neighbor %s (%s) is down: Interface PEER-TERMINATION received",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
eigrp_nbr_delete(nbr);
return NULL;
} else {
"Neighbor %s (%s) going down: Kvalue mismatch",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
eigrp_nbr_state_set(nbr, EIGRP_NEIGHBOR_DOWN);
}
}
static void eigrp_peer_termination_decode(struct eigrp_neighbor *nbr,
struct eigrp_tlv_hdr_type *tlv)
{
+ struct eigrp *eigrp = nbr->ei->eigrp;
struct TLV_Peer_Termination_type *param =
(struct TLV_Peer_Termination_type *)tlv;
if (my_ip == received_ip) {
zlog_info("Neighbor %s (%s) is down: Peer Termination received",
inet_ntoa(nbr->src),
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id));
/* set neighbor to DOWN */
nbr->state = EIGRP_NEIGHBOR_DOWN;
/* delete neighbor */
if (IS_DEBUG_EIGRP_PACKET(eigrph->opcode - 1, RECV))
zlog_debug("Processing Hello size[%u] int(%s) nbr(%s)", size,
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT),
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id),
inet_ntoa(nbr->src));
size -= EIGRP_HEADER_LEN;
* Part of conditional receive process
*
*/
-static uint16_t eigrp_sequence_encode(struct stream *s)
+static uint16_t eigrp_sequence_encode(struct eigrp *eigrp, struct stream *s)
{
uint16_t length = EIGRP_TLV_SEQ_BASE_LEN;
- struct eigrp *eigrp;
struct eigrp_interface *ei;
struct listnode *node, *node2, *nnode2;
struct eigrp_neighbor *nbr;
size_t backup_end, size_end;
int found;
- eigrp = eigrp_lookup();
- if (eigrp == NULL) {
- return 0;
- }
-
// add in the parameters TLV
backup_end = stream_get_endp(s);
stream_putw(s, EIGRP_TLV_SEQ);
* Part of conditional receive process
*
*/
-static uint16_t eigrp_next_sequence_encode(struct stream *s)
+static uint16_t eigrp_next_sequence_encode(struct eigrp *eigrp,
+ struct stream *s)
{
uint16_t length = EIGRP_NEXT_SEQUENCE_TLV_SIZE;
- struct eigrp *eigrp;
-
- eigrp = eigrp_lookup();
- if (eigrp == NULL) {
- return 0;
- }
// add in the parameters TLV
stream_putw(s, EIGRP_TLV_NEXT_MCAST_SEQ);
length += eigrp_sw_version_encode(ep->s);
if (flags & EIGRP_HELLO_ADD_SEQUENCE) {
- length += eigrp_sequence_encode(ep->s);
- length += eigrp_next_sequence_encode(ep->s);
+ length += eigrp_sequence_encode(ei->eigrp, ep->s);
+ length += eigrp_next_sequence_encode(ei->eigrp, ep->s);
}
// add in the TID list if doing multi-topology
eigrp_prefix_entry_add(eigrp->topology_table, pe);
listnode_add(eigrp->topology_changes_internalIPV4, pe);
- eigrp_nexthop_entry_add(pe, ne);
+ eigrp_nexthop_entry_add(eigrp, pe, ne);
for (ALL_LIST_ELEMENTS(eigrp->eiflist, node, nnode, ei2)) {
eigrp_update_send(ei2);
struct eigrp_fsm_action_message msg;
ne->prefix = pe;
- eigrp_nexthop_entry_add(pe, ne);
+ eigrp_nexthop_entry_add(eigrp, pe, ne);
msg.packet_type = EIGRP_OPC_UPDATE;
msg.eigrp = eigrp;
{
struct prefix dest_addr;
struct eigrp_prefix_entry *pe;
- struct eigrp *eigrp = eigrp_lookup();
-
- if (!eigrp)
- return;
+ struct eigrp *eigrp = ei->eigrp;
if (source == INTERFACE_DOWN_BY_VTY) {
THREAD_OFF(ei->t_hello);
pe = eigrp_topology_table_lookup_ipv4(eigrp->topology_table,
&dest_addr);
if (pe)
- eigrp_prefix_entry_delete(eigrp->topology_table, pe);
+ eigrp_prefix_entry_delete(eigrp, eigrp->topology_table, pe);
eigrp_if_down(ei);
#include "eigrpd/eigrp_snmp.h"
#include "eigrpd/eigrp_filter.h"
#include "eigrpd/eigrp_errors.h"
+#include "eigrpd/eigrp_vrf.h"
//#include "eigrpd/eigrp_routemap.h"
/* eigprd privileges */
master = eigrp_om->master;
eigrp_error_init();
+ eigrp_vrf_init();
vrf_init(NULL, NULL, NULL, NULL, NULL);
/*EIGRPd init*/
int holddown_timer_expired(struct thread *thread)
{
- struct eigrp_neighbor *nbr;
-
- nbr = THREAD_ARG(thread);
+ struct eigrp_neighbor *nbr = THREAD_ARG(thread);
+ struct eigrp *eigrp = nbr->ei->eigrp;
zlog_info("Neighbor %s (%s) is down: holding time expired",
inet_ntoa(nbr->src),
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id));
nbr->state = EIGRP_NEIGHBOR_DOWN;
eigrp_nbr_delete(nbr);
}
}
-int eigrp_nbr_count_get(void)
+int eigrp_nbr_count_get(struct eigrp *eigrp)
{
struct eigrp_interface *iface;
struct listnode *node, *node2, *nnode2;
struct eigrp_neighbor *nbr;
- struct eigrp *eigrp = eigrp_lookup();
uint32_t counter;
- if (eigrp == NULL) {
- zlog_debug("EIGRP Routing Process not enabled");
- return 0;
- }
-
counter = 0;
for (ALL_LIST_ELEMENTS_RO(eigrp->eiflist, node, iface)) {
for (ALL_LIST_ELEMENTS(iface->nbrs, node2, nnode2, nbr)) {
*/
void eigrp_nbr_hard_restart(struct eigrp_neighbor *nbr, struct vty *vty)
{
- if (nbr == NULL) {
- flog_err(EC_EIGRP_CONFIG,
- "Nbr Hard restart: Neighbor not specified.");
- return;
- }
+ struct eigrp *eigrp = nbr->ei->eigrp;
zlog_debug("Neighbor %s (%s) is down: manually cleared",
inet_ntoa(nbr->src),
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id));
if (vty != NULL) {
vty_time_print(vty, 0);
vty_out(vty, "Neighbor %s (%s) is down: manually cleared\n",
inet_ntoa(nbr->src),
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id));
}
/* send Hello with Peer Termination TLV */
#define _ZEBRA_EIGRP_NEIGHBOR_H
/* Prototypes */
-extern struct eigrp_neighbor *eigrp_nbr_get(struct eigrp_interface *,
- struct eigrp_header *, struct ip *);
-extern struct eigrp_neighbor *eigrp_nbr_new(struct eigrp_interface *);
-extern void eigrp_nbr_delete(struct eigrp_neighbor *);
+extern struct eigrp_neighbor *eigrp_nbr_get(struct eigrp_interface *ei,
+ struct eigrp_header *,
+ struct ip *addr);
+extern struct eigrp_neighbor *eigrp_nbr_new(struct eigrp_interface *ei);
+extern void eigrp_nbr_delete(struct eigrp_neighbor *neigh);
-extern int holddown_timer_expired(struct thread *);
+extern int holddown_timer_expired(struct thread *thread);
-extern int eigrp_neighborship_check(struct eigrp_neighbor *,
- struct TLV_Parameter_Type *);
-extern void eigrp_nbr_state_update(struct eigrp_neighbor *);
-extern void eigrp_nbr_state_set(struct eigrp_neighbor *, uint8_t state);
-extern uint8_t eigrp_nbr_state_get(struct eigrp_neighbor *);
-extern int eigrp_nbr_count_get(void);
-extern const char *eigrp_nbr_state_str(struct eigrp_neighbor *);
-extern struct eigrp_neighbor *eigrp_nbr_lookup_by_addr(struct eigrp_interface *,
- struct in_addr *);
-extern struct eigrp_neighbor *eigrp_nbr_lookup_by_addr_process(struct eigrp *,
- struct in_addr);
+extern int eigrp_neighborship_check(struct eigrp_neighbor *neigh,
+ struct TLV_Parameter_Type *tlv);
+extern void eigrp_nbr_state_update(struct eigrp_neighbor *neigh);
+extern void eigrp_nbr_state_set(struct eigrp_neighbor *neigh, uint8_t state);
+extern uint8_t eigrp_nbr_state_get(struct eigrp_neighbor *neigh);
+extern int eigrp_nbr_count_get(struct eigrp *eigrp);
+extern const char *eigrp_nbr_state_str(struct eigrp_neighbor *neigh);
+extern struct eigrp_neighbor *
+eigrp_nbr_lookup_by_addr(struct eigrp_interface *ei, struct in_addr *addr);
+extern struct eigrp_neighbor *
+eigrp_nbr_lookup_by_addr_process(struct eigrp *eigrp, struct in_addr addr);
extern void eigrp_nbr_hard_restart(struct eigrp_neighbor *nbr, struct vty *vty);
extern int eigrp_nbr_split_horizon_check(struct eigrp_nexthop_entry *ne,
static void eigrp_network_run_interface(struct eigrp *, struct prefix *,
struct interface *);
-int eigrp_sock_init(void)
+int eigrp_sock_init(struct vrf *vrf)
{
int eigrp_sock;
int ret;
#endif
frr_elevate_privs(&eigrpd_privs) {
- eigrp_sock = socket(AF_INET, SOCK_RAW, IPPROTO_EIGRPIGP);
+ eigrp_sock = vrf_socket(
+ AF_INET, SOCK_RAW, IPPROTO_EIGRPIGP, vrf->vrf_id,
+ vrf->vrf_id != VRF_DEFAULT ? vrf->name : NULL);
if (eigrp_sock < 0) {
zlog_err("eigrp_read_sock_init: socket: %s",
safe_strerror(errno));
int eigrp_network_set(struct eigrp *eigrp, struct prefix *p)
{
- struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT);
+ struct vrf *vrf = vrf_lookup_by_id(eigrp->vrf_id);
struct route_node *rn;
struct interface *ifp;
{
struct listnode *node, *nnode;
struct route_node *rn;
- struct eigrp *eigrp;
+ struct eigrp *eigrp = eigrp_lookup(ifp->vrf_id);
/*
* In the event there are multiple eigrp autonymnous systems running,
* we need to check eac one and add the interface as approperate
*/
for (ALL_LIST_ELEMENTS(eigrp_om->eigrp, node, nnode, eigrp)) {
+ if (ifp->vrf_id != eigrp->vrf_id)
+ continue;
+
/* EIGRP must be on and Router-ID must be configured. */
if (eigrp->router_id.s_addr == 0)
continue;
/* Prototypes */
-extern int eigrp_sock_init(void);
+extern int eigrp_sock_init(struct vrf *vrf);
extern int eigrp_if_ipmulticast(struct eigrp *, struct prefix *, unsigned int);
extern int eigrp_network_set(struct eigrp *eigrp, struct prefix *p);
extern int eigrp_network_unset(struct eigrp *eigrp, struct prefix *p);
union nb_resource *resource)
{
struct eigrp *eigrp;
+ const char *vrf;
+ vrf_id_t vrfid;
switch (event) {
case NB_EV_VALIDATE:
/* NOTHING */
break;
case NB_EV_PREPARE:
- eigrp = eigrp_get(yang_dnode_get_string(dnode, "./asn"));
+ vrf = yang_dnode_get_string(dnode, "./vrf");
+ vrfid = vrf_name_to_id(vrf);
+
+ eigrp = eigrp_get(yang_dnode_get_uint16(dnode, "./asn"), vrfid);
resource->ptr = eigrp;
break;
case NB_EV_ABORT:
union nb_resource *resource)
{
struct eigrp_metrics metrics;
+ const char *vrfname;
struct eigrp *eigrp;
uint32_t proto;
+ vrf_id_t vrfid;
switch (event) {
case NB_EV_VALIDATE:
proto = yang_dnode_get_enum(dnode, "./protocol");
- if (vrf_bitmap_check(zclient->redist[AFI_IP][proto],
- VRF_DEFAULT))
+ vrfname = yang_dnode_get_string(dnode, "../vrf");
+ vrfid = vrf_name_to_id(vrfname);
+ if (vrf_bitmap_check(zclient->redist[AFI_IP][proto], vrfid))
return NB_ERR_INCONSISTENCY;
break;
case NB_EV_PREPARE:
break;
}
- eigrp = eigrp_get(yang_dnode_get_string(dnode, "./asn"));
+ eigrp = eigrp_get(yang_dnode_get_uint16(dnode, "./asn"),
+ ifp->vrf_id);
eif = eigrp_interface_lookup(eigrp, ifp->name);
if (eif == NULL)
return NB_ERR_INCONSISTENCY;
break;
case NB_EV_APPLY:
ifp = nb_running_get_entry(dnode, NULL, true);
- eigrp = eigrp_get(yang_dnode_get_string(dnode, "./asn"));
+ eigrp = eigrp_get(yang_dnode_get_uint16(dnode, "./asn"),
+ ifp->vrf_id);
eif = eigrp_interface_lookup(eigrp, ifp->name);
if (eif == NULL)
return NB_ERR_INCONSISTENCY;
static unsigned char zeropad[16] = {0};
/* Forward function reference*/
-static struct stream *eigrp_recv_packet(int, struct interface **,
- struct stream *);
-static int eigrp_verify_header(struct stream *, struct eigrp_interface *,
- struct ip *, struct eigrp_header *);
-static int eigrp_check_network_mask(struct eigrp_interface *, struct in_addr);
+static struct stream *eigrp_recv_packet(struct eigrp *eigrp, int fd,
+ struct interface **ifp,
+ struct stream *s);
+static int eigrp_verify_header(struct stream *s, struct eigrp_interface *ei,
+ struct ip *addr, struct eigrp_header *header);
+static int eigrp_check_network_mask(struct eigrp_interface *ei,
+ struct in_addr mask);
static int eigrp_retrans_count_exceeded(struct eigrp_packet *ep,
struct eigrp_neighbor *nbr)
thread_add_read(master, eigrp_read, eigrp, eigrp->fd, &eigrp->t_read);
stream_reset(eigrp->ibuf);
- if (!(ibuf = eigrp_recv_packet(eigrp->fd, &ifp, eigrp->ibuf))) {
+ if (!(ibuf = eigrp_recv_packet(eigrp, eigrp->fd, &ifp, eigrp->ibuf))) {
/* This raw packet is known to be at least as big as its IP
* header. */
return -1;
ifindex
retrieval but do not. */
c = if_lookup_address((void *)&iph->ip_src, AF_INET,
- VRF_DEFAULT);
+ eigrp->vrf_id);
if (c == NULL)
return 0;
return 0;
}
-static struct stream *eigrp_recv_packet(int fd, struct interface **ifp,
+static struct stream *eigrp_recv_packet(struct eigrp *eigrp,
+ int fd, struct interface **ifp,
struct stream *ibuf)
{
int ret;
ifindex = getsockopt_ifindex(AF_INET, &msgh);
- *ifp = if_lookup_by_index(ifindex, VRF_DEFAULT);
+ *ifp = if_lookup_by_index(ifindex, eigrp->vrf_id);
if (ret != ip_len) {
zlog_warn(
static int eigrp_route_match_add(struct vty *vty, struct route_map_index *index,
const char *command, const char *arg)
{
- int ret;
+ enum rmap_compile_rets ret;
+
ret = route_map_add_match(index, command, arg, type);
switch (ret) {
case RMAP_RULE_MISSING:
return CMD_WARNING_CONFIG_FAILED;
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Intentionally not handling these cases
+ */
break;
}
struct route_map_index *index,
const char *command, const char *arg)
{
- int ret;
+ enum rmap_compile_rets ret;
+
ret = route_map_delete_match(index, command, arg);
switch (ret) {
case RMAP_RULE_MISSING:
return CMD_WARNING_CONFIG_FAILED;
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * These cases intentionally ignored
+ */
break;
}
static int eigrp_route_set_add(struct vty *vty, struct route_map_index *index,
const char *command, const char *arg)
{
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_add_set(index, command, arg);
switch (ret) {
}
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * These cases intentionally left blank here
+ */
break;
}
struct route_map_index *index,
const char *command, const char *arg)
{
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_delete_set(index, command, arg);
switch (ret) {
return CMD_WARNING_CONFIG_FAILED;
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * These cases intentionally not handled
+ */
break;
}
};
struct eigrp {
+ vrf_id_t vrf_id;
+
uint16_t AS; /* Autonomous system number */
uint16_t vrid; /* Virtual Router ID */
uint8_t k_values[6]; /*Array for K values configuration*/
struct list *eiflist; /* eigrp interfaces */
uint8_t passive_interface_default; /* passive-interface default */
- unsigned int fd;
+ int fd;
unsigned int maxsndbuflen;
uint32_t sequence_number; /*Global EIGRP sequence number*/
/*
* Freeing topology table list
*/
-void eigrp_topology_free(struct route_table *table)
+void eigrp_topology_free(struct eigrp *eigrp, struct route_table *table)
{
- eigrp_topology_delete_all(table);
+ eigrp_topology_delete_all(eigrp, table);
route_table_finish(table);
}
/*
* Adding topology entry to topology node
*/
-void eigrp_nexthop_entry_add(struct eigrp_prefix_entry *node,
+void eigrp_nexthop_entry_add(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *node,
struct eigrp_nexthop_entry *entry)
{
struct list *l = list_new();
listnode_add_sort(node->entries, entry);
entry->prefix = node;
- eigrp_zebra_route_add(node->destination, l, node->fdistance);
+ eigrp_zebra_route_add(eigrp, node->destination,
+ l, node->fdistance);
}
list_delete(&l);
/*
* Deleting topology node from topology table
*/
-void eigrp_prefix_entry_delete(struct route_table *table,
+void eigrp_prefix_entry_delete(struct eigrp *eigrp, struct route_table *table,
struct eigrp_prefix_entry *pe)
{
- struct eigrp *eigrp = eigrp_lookup();
struct eigrp_nexthop_entry *ne;
struct listnode *node, *nnode;
struct route_node *rn;
listnode_delete(eigrp->topology_changes_internalIPV4, pe);
for (ALL_LIST_ELEMENTS(pe->entries, node, nnode, ne))
- eigrp_nexthop_entry_delete(pe, ne);
+ eigrp_nexthop_entry_delete(eigrp, pe, ne);
list_delete(&pe->entries);
list_delete(&pe->rij);
- eigrp_zebra_route_delete(pe->destination);
+ eigrp_zebra_route_delete(eigrp, pe->destination);
prefix_free(pe->destination);
rn->info = NULL;
/*
* Deleting topology entry from topology node
*/
-void eigrp_nexthop_entry_delete(struct eigrp_prefix_entry *node,
+void eigrp_nexthop_entry_delete(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *node,
struct eigrp_nexthop_entry *entry)
{
if (listnode_lookup(node->entries, entry) != NULL) {
listnode_delete(node->entries, entry);
- eigrp_zebra_route_delete(node->destination);
+ eigrp_zebra_route_delete(eigrp, node->destination);
XFREE(MTYPE_EIGRP_NEXTHOP_ENTRY, entry);
}
}
/*
* Deleting all nodes from topology table
*/
-void eigrp_topology_delete_all(struct route_table *topology)
+void eigrp_topology_delete_all(struct eigrp *eigrp,
+ struct route_table *topology)
{
struct route_node *rn;
struct eigrp_prefix_entry *pe;
if (!pe)
continue;
- eigrp_prefix_entry_delete(topology, pe);
+ eigrp_prefix_entry_delete(eigrp, topology, pe);
}
}
if (!pe)
continue;
- eigrp_topology_update_node_flags(pe);
+ eigrp_topology_update_node_flags(eigrp, pe);
}
}
-void eigrp_topology_update_node_flags(struct eigrp_prefix_entry *dest)
+void eigrp_topology_update_node_flags(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *dest)
{
struct listnode *node;
struct eigrp_nexthop_entry *entry;
- struct eigrp *eigrp = eigrp_lookup();
-
- assert(eigrp);
for (ALL_LIST_ELEMENTS_RO(dest->entries, node, entry)) {
if (entry->reported_distance < dest->fdistance) {
}
}
-void eigrp_update_routing_table(struct eigrp_prefix_entry *prefix)
+void eigrp_update_routing_table(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *prefix)
{
- struct eigrp *eigrp = eigrp_lookup();
struct list *successors;
struct listnode *node;
struct eigrp_nexthop_entry *entry;
- if (!eigrp)
- return;
-
successors = eigrp_topology_get_successor_max(prefix, eigrp->max_paths);
if (successors) {
- eigrp_zebra_route_add(prefix->destination, successors,
+ eigrp_zebra_route_add(eigrp, prefix->destination, successors,
prefix->fdistance);
for (ALL_LIST_ELEMENTS_RO(successors, node, entry))
entry->flags |= EIGRP_NEXTHOP_ENTRY_INTABLE_FLAG;
list_delete(&successors);
} else {
- eigrp_zebra_route_delete(prefix->destination);
+ eigrp_zebra_route_delete(eigrp, prefix->destination);
for (ALL_LIST_ELEMENTS_RO(prefix->entries, node, entry))
entry->flags &= ~EIGRP_NEXTHOP_ENTRY_INTABLE_FLAG;
}
eigrp_update_send_all(eigrp, nbr->ei);
}
-void eigrp_update_topology_table_prefix(struct route_table *table,
+void eigrp_update_topology_table_prefix(struct eigrp *eigrp,
+ struct route_table *table,
struct eigrp_prefix_entry *prefix)
{
struct listnode *node1, *node2;
struct eigrp_nexthop_entry *entry;
for (ALL_LIST_ELEMENTS(prefix->entries, node1, node2, entry)) {
if (entry->distance == EIGRP_MAX_METRIC) {
- eigrp_nexthop_entry_delete(prefix, entry);
+ eigrp_nexthop_entry_delete(eigrp, prefix, entry);
}
}
if (prefix->distance == EIGRP_MAX_METRIC
&& prefix->nt != EIGRP_TOPOLOGY_TYPE_CONNECTED) {
- eigrp_prefix_entry_delete(table, prefix);
+ eigrp_prefix_entry_delete(eigrp, table, prefix);
}
}
extern void eigrp_topology_init(struct route_table *table);
extern struct eigrp_prefix_entry *eigrp_prefix_entry_new(void);
extern struct eigrp_nexthop_entry *eigrp_nexthop_entry_new(void);
-extern void eigrp_topology_free(struct route_table *table);
+extern void eigrp_topology_free(struct eigrp *eigrp, struct route_table *table);
extern void eigrp_prefix_entry_add(struct route_table *table,
struct eigrp_prefix_entry *pe);
-extern void eigrp_nexthop_entry_add(struct eigrp_prefix_entry *,
- struct eigrp_nexthop_entry *);
-extern void eigrp_prefix_entry_delete(struct route_table *table,
+extern void eigrp_nexthop_entry_add(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *pe,
+ struct eigrp_nexthop_entry *ne);
+extern void eigrp_prefix_entry_delete(struct eigrp *eigrp,
+ struct route_table *table,
struct eigrp_prefix_entry *pe);
-extern void eigrp_nexthop_entry_delete(struct eigrp_prefix_entry *,
- struct eigrp_nexthop_entry *);
-extern void eigrp_topology_delete_all(struct route_table *table);
+extern void eigrp_nexthop_entry_delete(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *pe,
+ struct eigrp_nexthop_entry *ne);
+extern void eigrp_topology_delete_all(struct eigrp *eigrp,
+ struct route_table *table);
extern struct eigrp_prefix_entry *
eigrp_topology_table_lookup_ipv4(struct route_table *table, struct prefix *p);
-extern struct list *eigrp_topology_get_successor(struct eigrp_prefix_entry *);
+extern struct list *eigrp_topology_get_successor(struct eigrp_prefix_entry *pe);
extern struct list *
eigrp_topology_get_successor_max(struct eigrp_prefix_entry *pe,
unsigned int maxpaths);
extern struct eigrp_nexthop_entry *
-eigrp_prefix_entry_lookup(struct list *, struct eigrp_neighbor *);
-extern struct list *eigrp_neighbor_prefixes_lookup(struct eigrp *,
- struct eigrp_neighbor *);
-extern void eigrp_topology_update_all_node_flags(struct eigrp *);
-extern void eigrp_topology_update_node_flags(struct eigrp_prefix_entry *);
+eigrp_prefix_entry_lookup(struct list *entries, struct eigrp_neighbor *neigh);
+extern struct list *eigrp_neighbor_prefixes_lookup(struct eigrp *eigrp,
+ struct eigrp_neighbor *n);
+extern void eigrp_topology_update_all_node_flags(struct eigrp *eigrp);
+extern void eigrp_topology_update_node_flags(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *pe);
extern enum metric_change
-eigrp_topology_update_distance(struct eigrp_fsm_action_message *);
-extern void eigrp_update_routing_table(struct eigrp_prefix_entry *);
-extern void eigrp_topology_neighbor_down(struct eigrp *,
- struct eigrp_neighbor *);
-extern void eigrp_update_topology_table_prefix(struct route_table *table,
+eigrp_topology_update_distance(struct eigrp_fsm_action_message *msg);
+extern void eigrp_update_routing_table(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *pe);
+extern void eigrp_topology_neighbor_down(struct eigrp *eigrp,
+ struct eigrp_neighbor *neigh);
+extern void eigrp_update_topology_table_prefix(struct eigrp *eigrp,
+ struct route_table *table,
struct eigrp_prefix_entry *pe);
#endif
zlog_debug(
"Processing Update size[%u] int(%s) nbr(%s) seq [%u] flags [%0x]",
size,
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT),
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id),
inet_ntoa(nbr->src), nbr->recv_sequence_number, flags);
zlog_info("Neighbor %s (%s) is resync: peer graceful-restart",
inet_ntoa(nbr->src),
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id));
/* get all prefixes from neighbor from topology table */
nbr_prefixes = eigrp_neighbor_prefixes_lookup(eigrp, nbr);
zlog_info("Neighbor %s (%s) is resync: peer graceful-restart",
inet_ntoa(nbr->src),
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id));
/* get all prefixes from neighbor from topology table */
nbr_prefixes = eigrp_neighbor_prefixes_lookup(eigrp, nbr);
zlog_info("Neighbor %s (%s) is down: peer restarted",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
eigrp_nbr_state_set(nbr, EIGRP_NEIGHBOR_PENDING);
zlog_info("Neighbor %s (%s) is pending: new adjacency",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
eigrp_update_send_init(nbr);
}
}
eigrp_prefix_entry_add(eigrp->topology_table,
pe);
- eigrp_nexthop_entry_add(pe, ne);
+ eigrp_nexthop_entry_add(eigrp, pe, ne);
pe->distance = pe->fdistance = pe->rdistance =
ne->distance;
pe->reported_metric = ne->total_metric;
- eigrp_topology_update_node_flags(pe);
+ eigrp_topology_update_node_flags(eigrp, pe);
pe->req_action |= EIGRP_FSM_NEED_UPDATE;
listnode_add(
zlog_info(
"Neighbor %s (%s) is resync: route configuration changed",
inet_ntoa(nbr->src),
- ifindex2ifname(ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(ei->ifp->ifindex, eigrp->vrf_id));
} else if (gr_type == EIGRP_GR_MANUAL) {
/* Graceful restart was called manually */
zlog_info("Neighbor %s (%s) is resync: manually cleared",
inet_ntoa(nbr->src),
- ifindex2ifname(ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(ei->ifp->ifindex, eigrp->vrf_id));
if (vty != NULL) {
vty_time_print(vty, 0);
vty_out(vty,
"Neighbor %s (%s) is resync: manually cleared\n",
inet_ntoa(nbr->src),
- ifindex2ifname(ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(ei->ifp->ifindex,
+ eigrp->vrf_id));
}
}
--- /dev/null
+/*
+ * eigrp - vrf code
+ * Copyright (C) 2019 Cumulus Networks, Inc.
+ * Donald Sharp
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <zebra.h>
+
+#include "vrf.h"
+
+#include "eigrpd/eigrp_vrf.h"
+
+static int eigrp_vrf_new(struct vrf *vrf)
+{
+ return 0;
+}
+
+static int eigrp_vrf_enable(struct vrf *vrf)
+{
+ return 0;
+}
+
+static int eigrp_vrf_disable(struct vrf *vrf)
+{
+ return 0;
+}
+
+static int eigrp_vrf_delete(struct vrf *vrf)
+{
+ return 0;
+}
+
+void eigrp_vrf_init(void)
+{
+ vrf_init(eigrp_vrf_new, eigrp_vrf_enable,
+ eigrp_vrf_disable, eigrp_vrf_delete, NULL);
+}
--- /dev/null
+/*
+ * eigrp - vrf code
+ * Copyright (C) 2019 Cumulus Networks, Inc.
+ * Donald Sharp
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef __EIGRP_VRF_H__
+
+extern void eigrp_vrf_init(void);
+#endif
}
}
+static struct eigrp *eigrp_vty_get_eigrp(struct vty *vty, const char *vrf_name)
+{
+ struct vrf *vrf;
+
+ if (vrf_name)
+ vrf = vrf_lookup_by_name(vrf_name);
+ else
+ vrf = vrf_lookup_by_id(VRF_DEFAULT);
+
+ if (!vrf) {
+ vty_out(vty, "VRF %s specified does not exist",
+ vrf_name ? vrf_name : VRF_DEFAULT_NAME);
+ return NULL;
+ }
+
+ return eigrp_lookup(vrf->vrf_id);
+}
+
DEFPY (show_ip_eigrp_topology_all,
show_ip_eigrp_topology_all_cmd,
- "show ip eigrp topology [all-links$all]",
+ "show ip eigrp [vrf NAME] topology [all-links$all]",
SHOW_STR
IP_STR
"IP-EIGRP show commands\n"
+ VRF_CMD_HELP_STR
"IP-EIGRP topology\n"
"Show all links in topology table\n")
{
struct eigrp_prefix_entry *tn;
struct route_node *rn;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
DEFPY (show_ip_eigrp_topology,
show_ip_eigrp_topology_cmd,
- "show ip eigrp topology <A.B.C.D$address|A.B.C.D/M$prefix>",
+ "show ip eigrp [vrf NAME] topology <A.B.C.D$address|A.B.C.D/M$prefix>",
SHOW_STR
IP_STR
"IP-EIGRP show commands\n"
+ VRF_CMD_HELP_STR
"IP-EIGRP topology\n"
"For a specific address\n"
"For a specific prefix\n")
struct route_node *rn;
struct prefix cmp;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
return CMD_SUCCESS;
}
-DEFUN (show_ip_eigrp_interfaces,
+DEFPY (show_ip_eigrp_interfaces,
show_ip_eigrp_interfaces_cmd,
- "show ip eigrp interfaces [IFNAME] [detail]",
+ "show ip eigrp [vrf NAME] interfaces [IFNAME] [detail]$detail",
SHOW_STR
IP_STR
"IP-EIGRP show commands\n"
+ VRF_CMD_HELP_STR
"IP-EIGRP interfaces\n"
"Interface name to look at\n"
"Detailed information\n")
struct eigrp_interface *ei;
struct eigrp *eigrp;
struct listnode *node;
- int idx = 0;
- bool detail = false;
- const char *ifname = NULL;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, "EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
}
- if (argv_find(argv, argc, "IFNAME", &idx))
- ifname = argv[idx]->arg;
-
- if (argv_find(argv, argc, "detail", &idx))
- detail = true;
-
if (!ifname)
show_ip_eigrp_interface_header(vty, eigrp);
return CMD_SUCCESS;
}
-DEFUN (show_ip_eigrp_neighbors,
+DEFPY (show_ip_eigrp_neighbors,
show_ip_eigrp_neighbors_cmd,
- "show ip eigrp neighbors [IFNAME] [detail]",
+ "show ip eigrp [vrf NAME] neighbors [IFNAME] [detail]$detail",
SHOW_STR
IP_STR
"IP-EIGRP show commands\n"
+ VRF_CMD_HELP_STR
"IP-EIGRP neighbors\n"
"Interface to show on\n"
"Detailed Information\n")
struct eigrp_interface *ei;
struct listnode *node, *node2, *nnode2;
struct eigrp_neighbor *nbr;
- bool detail = false;
- int idx = 0;
- const char *ifname = NULL;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
}
- if (argv_find(argv, argc, "IFNAME", &idx))
- ifname = argv[idx]->arg;
-
- detail = (argv_find(argv, argc, "detail", &idx));
-
show_ip_eigrp_neighbor_header(vty, eigrp);
for (ALL_LIST_ELEMENTS_RO(eigrp->eiflist, node, ei)) {
for (ALL_LIST_ELEMENTS(ei->nbrs, node2, nnode2, nbr)) {
if (detail || (nbr->state == EIGRP_NEIGHBOR_UP))
show_ip_eigrp_neighbor_sub(vty, nbr,
- detail);
+ !!detail);
}
}
}
/*
* Execute hard restart for all neighbors
*/
-DEFUN (clear_ip_eigrp_neighbors,
+DEFPY (clear_ip_eigrp_neighbors,
clear_ip_eigrp_neighbors_cmd,
- "clear ip eigrp neighbors",
+ "clear ip eigrp [vrf NAME] neighbors",
CLEAR_STR
IP_STR
"Clear IP-EIGRP\n"
+ VRF_CMD_HELP_STR
"Clear IP-EIGRP neighbors\n")
{
struct eigrp *eigrp;
struct eigrp_neighbor *nbr;
/* Check if eigrp process is enabled */
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
"Neighbor %s (%s) is down: manually cleared",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
vty_time_print(vty, 0);
vty_out(vty,
"Neighbor %s (%s) is down: manually cleared\n",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
/* set neighbor to DOWN */
nbr->state = EIGRP_NEIGHBOR_DOWN;
/*
* Execute hard restart for all neighbors on interface
*/
-DEFUN (clear_ip_eigrp_neighbors_int,
+DEFPY (clear_ip_eigrp_neighbors_int,
clear_ip_eigrp_neighbors_int_cmd,
- "clear ip eigrp neighbors IFNAME",
+ "clear ip eigrp [vrf NAME] neighbors IFNAME",
CLEAR_STR
IP_STR
"Clear IP-EIGRP\n"
+ VRF_CMD_HELP_STR
"Clear IP-EIGRP neighbors\n"
"Interface's name\n")
{
struct eigrp_interface *ei;
struct listnode *node2, *nnode2;
struct eigrp_neighbor *nbr;
- int idx = 0;
/* Check if eigrp process is enabled */
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
}
/* lookup interface by specified name */
- argv_find(argv, argc, "IFNAME", &idx);
- ei = eigrp_if_lookup_by_name(eigrp, argv[idx]->arg);
+ ei = eigrp_if_lookup_by_name(eigrp, ifname);
if (ei == NULL) {
- vty_out(vty, " Interface (%s) doesn't exist\n", argv[idx]->arg);
+ vty_out(vty, " Interface (%s) doesn't exist\n", ifname);
return CMD_WARNING;
}
zlog_debug("Neighbor %s (%s) is down: manually cleared",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
vty_time_print(vty, 0);
vty_out(vty,
"Neighbor %s (%s) is down: manually cleared\n",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
/* set neighbor to DOWN */
nbr->state = EIGRP_NEIGHBOR_DOWN;
/*
* Execute hard restart for neighbor specified by IP
*/
-DEFUN (clear_ip_eigrp_neighbors_IP,
+DEFPY (clear_ip_eigrp_neighbors_IP,
clear_ip_eigrp_neighbors_IP_cmd,
- "clear ip eigrp neighbors A.B.C.D",
+ "clear ip eigrp [vrf NAME] neighbors A.B.C.D$nbr_addr",
CLEAR_STR
IP_STR
"Clear IP-EIGRP\n"
+ VRF_CMD_HELP_STR
"Clear IP-EIGRP neighbors\n"
"IP-EIGRP neighbor address\n")
{
struct eigrp *eigrp;
struct eigrp_neighbor *nbr;
- struct in_addr nbr_addr;
-
- if (!inet_aton(argv[4]->arg, &nbr_addr)) {
- vty_out(vty, "Unable to parse %s", argv[4]->arg);
- return CMD_WARNING;
- }
/* Check if eigrp process is enabled */
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
/*
* Execute graceful restart for all neighbors
*/
-DEFUN (clear_ip_eigrp_neighbors_soft,
+DEFPY (clear_ip_eigrp_neighbors_soft,
clear_ip_eigrp_neighbors_soft_cmd,
- "clear ip eigrp neighbors soft",
+ "clear ip eigrp [vrf NAME] neighbors soft",
CLEAR_STR
IP_STR
"Clear IP-EIGRP\n"
+ VRF_CMD_HELP_STR
"Clear IP-EIGRP neighbors\n"
"Resync with peers without adjacency reset\n")
{
struct eigrp *eigrp;
/* Check if eigrp process is enabled */
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
/*
* Execute graceful restart for all neighbors on interface
*/
-DEFUN (clear_ip_eigrp_neighbors_int_soft,
+DEFPY (clear_ip_eigrp_neighbors_int_soft,
clear_ip_eigrp_neighbors_int_soft_cmd,
- "clear ip eigrp neighbors IFNAME soft",
+ "clear ip eigrp [vrf NAME] neighbors IFNAME soft",
CLEAR_STR
IP_STR
"Clear IP-EIGRP\n"
+ VRF_CMD_HELP_STR
"Clear IP-EIGRP neighbors\n"
"Interface's name\n"
"Resync with peer without adjacency reset\n")
struct eigrp_interface *ei;
/* Check if eigrp process is enabled */
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
}
/* lookup interface by specified name */
- ei = eigrp_if_lookup_by_name(eigrp, argv[4]->arg);
+ ei = eigrp_if_lookup_by_name(eigrp, ifname);
if (ei == NULL) {
vty_out(vty, " Interface (%s) doesn't exist\n", argv[4]->arg);
return CMD_WARNING;
/*
* Execute graceful restart for neighbor specified by IP
*/
-DEFUN (clear_ip_eigrp_neighbors_IP_soft,
+DEFPY (clear_ip_eigrp_neighbors_IP_soft,
clear_ip_eigrp_neighbors_IP_soft_cmd,
- "clear ip eigrp neighbors A.B.C.D soft",
+ "clear ip eigrp [vrf NAME] neighbors A.B.C.D$nbr_addr soft",
CLEAR_STR
IP_STR
"Clear IP-EIGRP\n"
+ VRF_CMD_HELP_STR
"Clear IP-EIGRP neighbors\n"
"IP-EIGRP neighbor address\n"
"Resync with peer without adjacency reset\n")
{
struct eigrp *eigrp;
struct eigrp_neighbor *nbr;
- struct in_addr nbr_addr;
- if (!inet_aton(argv[4]->arg, &nbr_addr)) {
- vty_out(vty, "Unable to parse: %s", argv[4]->arg);
- return CMD_WARNING;
- }
/* Check if eigrp process is enabled */
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
static int eigrp_interface_address_delete(ZAPI_CALLBACK_ARGS);
static int eigrp_interface_state_up(ZAPI_CALLBACK_ARGS);
static int eigrp_interface_state_down(ZAPI_CALLBACK_ARGS);
-static struct interface *zebra_interface_if_lookup(struct stream *);
+static struct interface *zebra_interface_if_lookup(struct stream *,
+ vrf_id_t vrf_id);
static int eigrp_zebra_read_route(ZAPI_CALLBACK_ARGS);
router_id_zebra = router_id.u.prefix4;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_lookup(vrf_id);
if (eigrp != NULL)
eigrp_router_id_update(eigrp);
if (IPV4_NET127(ntohl(api.prefix.u.prefix4.s_addr)))
return 0;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_lookup(vrf_id);
if (eigrp == NULL)
return 0;
{
struct interface *ifp;
- ifp = zebra_interface_if_lookup(zclient->ibuf);
+ ifp = zebra_interface_if_lookup(zclient->ibuf, vrf_id);
if (ifp == NULL)
return 0;
return 0;
}
-static struct interface *zebra_interface_if_lookup(struct stream *s)
+static struct interface *zebra_interface_if_lookup(struct stream *s,
+ vrf_id_t vrf_id)
{
char ifname_tmp[INTERFACE_NAMSIZ];
stream_get(ifname_tmp, s, INTERFACE_NAMSIZ);
/* And look it up. */
- return if_lookup_by_name(ifname_tmp, VRF_DEFAULT);
+ return if_lookup_by_name(ifname_tmp, vrf_id);
}
-void eigrp_zebra_route_add(struct prefix *p, struct list *successors,
- uint32_t distance)
+void eigrp_zebra_route_add(struct eigrp *eigrp, struct prefix *p,
+ struct list *successors, uint32_t distance)
{
struct zapi_route api;
struct zapi_nexthop *api_nh;
return;
memset(&api, 0, sizeof(api));
- api.vrf_id = VRF_DEFAULT;
+ api.vrf_id = eigrp->vrf_id;
api.type = ZEBRA_ROUTE_EIGRP;
api.safi = SAFI_UNICAST;
api.metric = distance;
if (count >= MULTIPATH_NUM)
break;
api_nh = &api.nexthops[count];
- api_nh->vrf_id = VRF_DEFAULT;
+ api_nh->vrf_id = eigrp->vrf_id;
if (te->adv_router->src.s_addr) {
api_nh->gate.ipv4 = te->adv_router->src;
api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api);
}
-void eigrp_zebra_route_delete(struct prefix *p)
+void eigrp_zebra_route_delete(struct eigrp *eigrp, struct prefix *p)
{
struct zapi_route api;
return;
memset(&api, 0, sizeof(api));
- api.vrf_id = VRF_DEFAULT;
+ api.vrf_id = eigrp->vrf_id;
api.type = ZEBRA_ROUTE_EIGRP;
api.safi = SAFI_UNICAST;
memcpy(&api.prefix, p, sizeof(*p));
return;
}
-int eigrp_is_type_redistributed(int type)
+static int eigrp_is_type_redistributed(int type, vrf_id_t vrf_id)
{
return ((DEFAULT_ROUTE_TYPE(type))
? vrf_bitmap_check(zclient->default_information[AFI_IP],
- VRF_DEFAULT)
+ vrf_id)
: vrf_bitmap_check(zclient->redist[AFI_IP][type],
- VRF_DEFAULT));
+ vrf_id));
}
int eigrp_redistribute_set(struct eigrp *eigrp, int type,
struct eigrp_metrics metric)
{
- if (eigrp_is_type_redistributed(type)) {
+ if (eigrp_is_type_redistributed(type, eigrp->vrf_id)) {
if (eigrp_metrics_is_same(metric, eigrp->dmetric[type])) {
eigrp->dmetric[type] = metric;
}
eigrp->dmetric[type] = metric;
zclient_redistribute(ZEBRA_REDISTRIBUTE_ADD, zclient, AFI_IP, type, 0,
- VRF_DEFAULT);
+ eigrp->vrf_id);
++eigrp->redistribute;
int eigrp_redistribute_unset(struct eigrp *eigrp, int type)
{
- if (eigrp_is_type_redistributed(type)) {
+ if (eigrp_is_type_redistributed(type, eigrp->vrf_id)) {
memset(&eigrp->dmetric[type], 0, sizeof(struct eigrp_metrics));
zclient_redistribute(ZEBRA_REDISTRIBUTE_DELETE, zclient, AFI_IP,
- type, 0, VRF_DEFAULT);
+ type, 0, eigrp->vrf_id);
--eigrp->redistribute;
}
extern void eigrp_zebra_init(void);
-extern void eigrp_zebra_route_add(struct prefix *, struct list *,
- uint32_t distance);
-extern void eigrp_zebra_route_delete(struct prefix *);
+extern void eigrp_zebra_route_add(struct eigrp *eigrp, struct prefix *p,
+ struct list *successors, uint32_t distance);
+extern void eigrp_zebra_route_delete(struct eigrp *eigrp, struct prefix *);
extern int eigrp_redistribute_set(struct eigrp *, int, struct eigrp_metrics);
extern int eigrp_redistribute_unset(struct eigrp *, int);
-extern int eigrp_is_type_redistributed(int);
#endif /* _ZEBRA_EIGRP_ZEBRA_H_ */
struct eigrp_master *eigrp_om;
-static struct eigrp *eigrp_new(const char *);
-
extern struct zclient *zclient;
extern struct in_addr router_id_zebra;
*/
void eigrp_router_id_update(struct eigrp *eigrp)
{
- struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT);
+ struct vrf *vrf = vrf_lookup_by_id(eigrp->vrf_id);
struct interface *ifp;
struct in_addr router_id, router_id_old;
}
/* Allocate new eigrp structure. */
-static struct eigrp *eigrp_new(const char *AS)
+static struct eigrp *eigrp_new(uint16_t as, vrf_id_t vrf_id)
{
struct eigrp *eigrp = XCALLOC(MTYPE_EIGRP_TOP, sizeof(struct eigrp));
- int eigrp_socket;
/* init information relevant to peers */
+ eigrp->vrf_id = vrf_id;
eigrp->vrid = 0;
- eigrp->AS = atoi(AS);
+ eigrp->AS = as;
eigrp->router_id.s_addr = 0;
eigrp->router_id_static.s_addr = 0;
eigrp->sequence_number = 1;
eigrp->passive_interface_default = EIGRP_IF_ACTIVE;
eigrp->networks = eigrp_topology_new();
- if ((eigrp_socket = eigrp_sock_init()) < 0) {
+ eigrp->fd = eigrp_sock_init(vrf_lookup_by_id(vrf_id));
+
+ if (eigrp->fd < 0) {
flog_err_sys(
EC_LIB_SOCKET,
"eigrp_new: fatal error: eigrp_sock_init was unable to open a socket");
exit(1);
}
- eigrp->fd = eigrp_socket;
eigrp->maxsndbuflen = getsockopt_so_sendbuf(eigrp->fd);
eigrp->ibuf = stream_new(EIGRP_PACKET_MAX_LEN + 1);
eigrp->routemap[EIGRP_FILTER_OUT] = NULL;
/* Distribute list install. */
- eigrp->distribute_ctx = distribute_list_ctx_create(
- vrf_lookup_by_id(VRF_DEFAULT));
+ eigrp->distribute_ctx =
+ distribute_list_ctx_create(vrf_lookup_by_id(eigrp->vrf_id));
distribute_list_add_hook(eigrp->distribute_ctx,
eigrp_distribute_update);
distribute_list_delete_hook(eigrp->distribute_ctx,
eigrp_distribute_update);
/*
- eigrp->if_rmap_ctx = if_rmap_ctx_create(
- VRF_DEFAULT_NAME);
+ eigrp->if_rmap_ctx = if_rmap_ctx_create(eigrp->vrf_id);
if_rmap_hook_add (eigrp_if_rmap_update);
if_rmap_hook_delete (eigrp_if_rmap_update);
*/
return eigrp;
}
-struct eigrp *eigrp_get(const char *AS)
+struct eigrp *eigrp_get(uint16_t as, vrf_id_t vrf_id)
{
struct eigrp *eigrp;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_lookup(vrf_id);
if (eigrp == NULL) {
- eigrp = eigrp_new(AS);
+ eigrp = eigrp_new(as, vrf_id);
listnode_add(eigrp_om->eigrp, eigrp);
}
list_delete(&eigrp->eiflist);
list_delete(&eigrp->oi_write_q);
- eigrp_topology_free(eigrp->topology_table);
+ eigrp_topology_free(eigrp, eigrp->topology_table);
eigrp_nbr_delete(eigrp->neighbor_self);
}
/*Look for existing eigrp process*/
-struct eigrp *eigrp_lookup(void)
+struct eigrp *eigrp_lookup(vrf_id_t vrf_id)
{
- if (listcount(eigrp_om->eigrp) == 0)
- return NULL;
+ struct eigrp *eigrp;
+ struct listnode *node, *nnode;
+
+ for (ALL_LIST_ELEMENTS(eigrp_om->eigrp, node, nnode, eigrp))
+ if (eigrp->vrf_id == vrf_id)
+ return eigrp;
- return listgetdata(listhead(eigrp_om->eigrp));
+ return NULL;
}
extern void eigrp_terminate(void);
extern void eigrp_finish_final(struct eigrp *);
extern void eigrp_finish(struct eigrp *);
-extern struct eigrp *eigrp_get(const char *);
-extern struct eigrp *eigrp_lookup(void);
+extern struct eigrp *eigrp_get(uint16_t as, vrf_id_t vrf_id);
+extern struct eigrp *eigrp_lookup(vrf_id_t vrf_id);
extern void eigrp_router_id_update(struct eigrp *);
/* eigrp_cli.c */
eigrpd/eigrp_snmp.c \
eigrpd/eigrp_topology.c \
eigrpd/eigrp_update.c \
+ eigrpd/eigrp_vrf.c \
eigrpd/eigrp_vty.c \
eigrpd/eigrp_zebra.c \
eigrpd/eigrpd.c \
eigrpd/eigrp_packet.h \
eigrpd/eigrp_snmp.h \
eigrpd/eigrp_structs.h \
+ eigrpd/eigrp_vrf.h \
eigrpd/eigrp_vty.h \
eigrpd/eigrp_zebra.h \
# end
DEFINE_MTYPE(ISISD, ISIS_VERTEX, "ISIS vertex")
DEFINE_MTYPE(ISISD, ISIS_ROUTE_INFO, "ISIS route info")
DEFINE_MTYPE(ISISD, ISIS_NEXTHOP, "ISIS nexthop")
-DEFINE_MTYPE(ISISD, ISIS_NEXTHOP6, "ISIS nexthop6")
DEFINE_MTYPE(ISISD, ISIS_DICT, "ISIS dictionary")
DEFINE_MTYPE(ISISD, ISIS_DICT_NODE, "ISIS dictionary node")
DEFINE_MTYPE(ISISD, ISIS_EXT_ROUTE, "ISIS redistributed route")
DECLARE_MTYPE(ISIS_VERTEX)
DECLARE_MTYPE(ISIS_ROUTE_INFO)
DECLARE_MTYPE(ISIS_NEXTHOP)
-DECLARE_MTYPE(ISIS_NEXTHOP6)
DECLARE_MTYPE(ISIS_DICT)
DECLARE_MTYPE(ISIS_DICT_NODE)
DECLARE_MTYPE(ISIS_EXT_ROUTE)
circuit = nb_running_unset_entry(dnode);
if (!circuit)
return NB_ERR_INCONSISTENCY;
- /* delete circuit through csm changes */
- switch (circuit->state) {
- case C_STATE_UP:
- isis_csm_state_change(IF_DOWN_FROM_Z, circuit,
- circuit->interface);
+ if (circuit->state == C_STATE_UP || circuit->state == C_STATE_CONF)
isis_csm_state_change(ISIS_DISABLE, circuit, circuit->area);
- break;
- case C_STATE_CONF:
- isis_csm_state_change(ISIS_DISABLE, circuit, circuit->area);
- break;
- case C_STATE_INIT:
- isis_csm_state_change(IF_DOWN_FROM_Z, circuit,
- circuit->interface);
- break;
- }
return NB_OK;
}
#include "linklist.h"
#include "vty.h"
#include "log.h"
+#include "lib_errors.h"
#include "memory.h"
#include "prefix.h"
#include "hash.h"
#include "isis_route.h"
#include "isis_zebra.h"
-static struct isis_nexthop *isis_nexthop_create(struct in_addr *ip,
+static struct isis_nexthop *nexthoplookup(struct list *nexthops, int family,
+ union g_addr *ip, ifindex_t ifindex);
+
+static struct isis_nexthop *isis_nexthop_create(int family, union g_addr *ip,
ifindex_t ifindex)
{
- struct listnode *node;
struct isis_nexthop *nexthop;
- for (ALL_LIST_ELEMENTS_RO(isis->nexthops, node, nexthop)) {
- if (nexthop->ifindex != ifindex)
- continue;
- if (ip && memcmp(&nexthop->ip, ip, sizeof(struct in_addr)) != 0)
- continue;
-
+ nexthop = nexthoplookup(isis->nexthops, family, ip, ifindex);
+ if (nexthop) {
nexthop->lock++;
return nexthop;
}
nexthop = XCALLOC(MTYPE_ISIS_NEXTHOP, sizeof(struct isis_nexthop));
+ nexthop->family = family;
nexthop->ifindex = ifindex;
- memcpy(&nexthop->ip, ip, sizeof(struct in_addr));
+ nexthop->ip = *ip;
listnode_add(isis->nexthops, nexthop);
nexthop->lock++;
return;
}
-static int nexthoplookup(struct list *nexthops, struct in_addr *ip,
- ifindex_t ifindex)
+static struct isis_nexthop *nexthoplookup(struct list *nexthops, int family,
+ union g_addr *ip, ifindex_t ifindex)
{
struct listnode *node;
struct isis_nexthop *nh;
for (ALL_LIST_ELEMENTS_RO(nexthops, node, nh)) {
- if (!(memcmp(ip, &nh->ip, sizeof(struct in_addr)))
- && ifindex == nh->ifindex)
- return 1;
- }
-
- return 0;
-}
-
-static struct isis_nexthop6 *isis_nexthop6_new(struct in6_addr *ip6,
- ifindex_t ifindex)
-{
- struct isis_nexthop6 *nexthop6;
-
- nexthop6 = XCALLOC(MTYPE_ISIS_NEXTHOP6, sizeof(struct isis_nexthop6));
-
- nexthop6->ifindex = ifindex;
- memcpy(&nexthop6->ip6, ip6, sizeof(struct in6_addr));
- nexthop6->lock++;
-
- return nexthop6;
-}
-
-static struct isis_nexthop6 *isis_nexthop6_create(struct in6_addr *ip6,
- ifindex_t ifindex)
-{
- struct listnode *node;
- struct isis_nexthop6 *nexthop6;
-
- for (ALL_LIST_ELEMENTS_RO(isis->nexthops6, node, nexthop6)) {
- if (nexthop6->ifindex != ifindex)
+ if (nh->family != family)
continue;
- if (ip6
- && memcmp(&nexthop6->ip6, ip6, sizeof(struct in6_addr))
- != 0)
+ if (nh->ifindex != ifindex)
continue;
- nexthop6->lock++;
- return nexthop6;
- }
-
- nexthop6 = isis_nexthop6_new(ip6, ifindex);
-
- return nexthop6;
-}
-
-static void isis_nexthop6_delete(struct isis_nexthop6 *nexthop6)
-{
-
- nexthop6->lock--;
- if (nexthop6->lock == 0) {
- listnode_delete(isis->nexthops6, nexthop6);
- XFREE(MTYPE_ISIS_NEXTHOP6, nexthop6);
- }
-
- return;
-}
-
-static int nexthop6lookup(struct list *nexthops6, struct in6_addr *ip6,
- ifindex_t ifindex)
-{
- struct listnode *node;
- struct isis_nexthop6 *nh6;
+ switch (family) {
+ case AF_INET:
+ if (IPV4_ADDR_CMP(&nh->ip.ipv4, &ip->ipv4))
+ continue;
+ break;
+ case AF_INET6:
+ if (IPV6_ADDR_CMP(&nh->ip.ipv6, &ip->ipv6))
+ continue;
+ break;
+ default:
+ flog_err(EC_LIB_DEVELOPMENT,
+ "%s: unknown address family [%d]", __func__,
+ family);
+ exit(1);
+ }
- for (ALL_LIST_ELEMENTS_RO(nexthops6, node, nh6)) {
- if (!(memcmp(ip6, &nh6->ip6, sizeof(struct in6_addr)))
- && ifindex == nh6->ifindex)
- return 1;
+ return nh;
}
- return 0;
+ return NULL;
}
-static void adjinfo2nexthop(struct list *nexthops, struct isis_adjacency *adj)
+static void adjinfo2nexthop(int family, struct list *nexthops,
+ struct isis_adjacency *adj)
{
struct isis_nexthop *nh;
-
- for (unsigned int i = 0; i < adj->ipv4_address_count; i++) {
- struct in_addr *ipv4_addr = &adj->ipv4_addresses[i];
- if (!nexthoplookup(nexthops, ipv4_addr,
- adj->circuit->interface->ifindex)) {
- nh = isis_nexthop_create(
- ipv4_addr, adj->circuit->interface->ifindex);
- nh->router_address = adj->router_address;
- listnode_add(nexthops, nh);
- return;
+ union g_addr ip = {};
+
+ switch (family) {
+ case AF_INET:
+ for (unsigned int i = 0; i < adj->ipv4_address_count; i++) {
+ ip.ipv4 = adj->ipv4_addresses[i];
+
+ if (!nexthoplookup(nexthops, AF_INET, &ip,
+ adj->circuit->interface->ifindex)) {
+ nh = isis_nexthop_create(
+ AF_INET, &ip,
+ adj->circuit->interface->ifindex);
+ listnode_add(nexthops, nh);
+ break;
+ }
}
- }
-}
-
-static void adjinfo2nexthop6(struct list *nexthops6, struct isis_adjacency *adj)
-{
- struct isis_nexthop6 *nh6;
-
- for (unsigned int i = 0; i < adj->ipv6_address_count; i++) {
- struct in6_addr *ipv6_addr = &adj->ipv6_addresses[i];
- if (!nexthop6lookup(nexthops6, ipv6_addr,
- adj->circuit->interface->ifindex)) {
- nh6 = isis_nexthop6_create(
- ipv6_addr, adj->circuit->interface->ifindex);
- nh6->router_address6 = adj->router_address6;
- listnode_add(nexthops6, nh6);
- return;
+ break;
+ case AF_INET6:
+ for (unsigned int i = 0; i < adj->ipv6_address_count; i++) {
+ ip.ipv6 = adj->ipv6_addresses[i];
+
+ if (!nexthoplookup(nexthops, AF_INET6, &ip,
+ adj->circuit->interface->ifindex)) {
+ nh = isis_nexthop_create(
+ AF_INET6, &ip,
+ adj->circuit->interface->ifindex);
+ listnode_add(nexthops, nh);
+ break;
+ }
}
+ break;
+ default:
+ flog_err(EC_LIB_DEVELOPMENT, "%s: unknown address family [%d]",
+ __func__, family);
+ exit(1);
}
}
rinfo = XCALLOC(MTYPE_ISIS_ROUTE_INFO, sizeof(struct isis_route_info));
- if (prefix->family == AF_INET) {
- rinfo->nexthops = list_new();
- for (ALL_LIST_ELEMENTS_RO(adjacencies, node, adj)) {
- /* check for force resync this route */
- if (CHECK_FLAG(adj->circuit->flags,
- ISIS_CIRCUIT_FLAPPED_AFTER_SPF))
- SET_FLAG(rinfo->flag,
- ISIS_ROUTE_FLAG_ZEBRA_RESYNC);
- /* update neighbor router address */
+ rinfo->nexthops = list_new();
+ for (ALL_LIST_ELEMENTS_RO(adjacencies, node, adj)) {
+ /* check for force resync this route */
+ if (CHECK_FLAG(adj->circuit->flags,
+ ISIS_CIRCUIT_FLAPPED_AFTER_SPF))
+ SET_FLAG(rinfo->flag, ISIS_ROUTE_FLAG_ZEBRA_RESYNC);
+
+ /* update neighbor router address */
+ switch (prefix->family) {
+ case AF_INET:
if (depth == 2 && prefix->prefixlen == 32)
adj->router_address = prefix->u.prefix4;
- adjinfo2nexthop(rinfo->nexthops, adj);
- }
- }
- if (prefix->family == AF_INET6) {
- rinfo->nexthops6 = list_new();
- for (ALL_LIST_ELEMENTS_RO(adjacencies, node, adj)) {
- /* check for force resync this route */
- if (CHECK_FLAG(adj->circuit->flags,
- ISIS_CIRCUIT_FLAPPED_AFTER_SPF))
- SET_FLAG(rinfo->flag,
- ISIS_ROUTE_FLAG_ZEBRA_RESYNC);
- /* update neighbor router address */
+ break;
+ case AF_INET6:
if (depth == 2 && prefix->prefixlen == 128
&& (!src_p || !src_p->prefixlen)) {
adj->router_address6 = prefix->u.prefix6;
}
- adjinfo2nexthop6(rinfo->nexthops6, adj);
+ break;
+ default:
+ flog_err(EC_LIB_DEVELOPMENT,
+ "%s: unknown address family [%d]", __func__,
+ prefix->family);
+ exit(1);
}
+ adjinfo2nexthop(prefix->family, rinfo->nexthops, adj);
}
rinfo->cost = cost;
list_delete(&route_info->nexthops);
}
- if (route_info->nexthops6) {
- route_info->nexthops6->del =
- (void (*)(void *))isis_nexthop6_delete;
- list_delete(&route_info->nexthops6);
- }
-
XFREE(MTYPE_ISIS_ROUTE_INFO, route_info);
}
{
struct listnode *node;
struct isis_nexthop *nexthop;
- struct isis_nexthop6 *nexthop6;
if (!CHECK_FLAG(old->flag, ISIS_ROUTE_FLAG_ZEBRA_SYNCED))
return 0;
if (!isis_route_info_same_attrib(new, old))
return 0;
- if (family == AF_INET) {
- for (ALL_LIST_ELEMENTS_RO(new->nexthops, node, nexthop))
- if (nexthoplookup(old->nexthops, &nexthop->ip,
- nexthop->ifindex)
- == 0)
- return 0;
-
- for (ALL_LIST_ELEMENTS_RO(old->nexthops, node, nexthop))
- if (nexthoplookup(new->nexthops, &nexthop->ip,
- nexthop->ifindex)
- == 0)
- return 0;
- } else if (family == AF_INET6) {
- for (ALL_LIST_ELEMENTS_RO(new->nexthops6, node, nexthop6))
- if (nexthop6lookup(old->nexthops6, &nexthop6->ip6,
- nexthop6->ifindex)
- == 0)
- return 0;
-
- for (ALL_LIST_ELEMENTS_RO(old->nexthops6, node, nexthop6))
- if (nexthop6lookup(new->nexthops6, &nexthop6->ip6,
- nexthop6->ifindex)
- == 0)
- return 0;
- }
+ for (ALL_LIST_ELEMENTS_RO(new->nexthops, node, nexthop))
+ if (!nexthoplookup(old->nexthops, nexthop->family, &nexthop->ip,
+ nexthop->ifindex))
+ return 0;
+
+ for (ALL_LIST_ELEMENTS_RO(old->nexthops, node, nexthop))
+ if (!nexthoplookup(new->nexthops, nexthop->family, &nexthop->ip,
+ nexthop->ifindex))
+ return 0;
return 1;
}
#ifndef _ZEBRA_ISIS_ROUTE_H
#define _ZEBRA_ISIS_ROUTE_H
-struct isis_nexthop6 {
- ifindex_t ifindex;
- struct in6_addr ip6;
- struct in6_addr router_address6;
- unsigned int lock;
-};
+#include "lib/nexthop.h"
struct isis_nexthop {
ifindex_t ifindex;
- struct in_addr ip;
- struct in_addr router_address;
+ int family;
+ union g_addr ip;
unsigned int lock;
};
uint32_t cost;
uint32_t depth;
struct list *nexthops;
- struct list *nexthops6;
};
struct isis_route_info *isis_route_create(struct prefix *prefix,
#include "command.h"
#include "memory.h"
#include "log.h"
+#include "lib_errors.h"
#include "if.h"
#include "network.h"
#include "prefix.h"
struct zapi_route api;
struct zapi_nexthop *api_nh;
struct isis_nexthop *nexthop;
- struct isis_nexthop6 *nexthop6;
struct listnode *node;
int count = 0;
#endif
/* Nexthops */
- switch (prefix->family) {
- case AF_INET:
- for (ALL_LIST_ELEMENTS_RO(route_info->nexthops, node,
- nexthop)) {
- if (count >= MULTIPATH_NUM)
- break;
- api_nh = &api.nexthops[count];
- if (fabricd)
- api_nh->onlink = true;
- api_nh->vrf_id = VRF_DEFAULT;
+ for (ALL_LIST_ELEMENTS_RO(route_info->nexthops, node, nexthop)) {
+ if (count >= MULTIPATH_NUM)
+ break;
+ api_nh = &api.nexthops[count];
+ if (fabricd)
+ api_nh->onlink = true;
+ api_nh->vrf_id = VRF_DEFAULT;
+
+ switch (nexthop->family) {
+ case AF_INET:
/* FIXME: can it be ? */
- if (nexthop->ip.s_addr != INADDR_ANY) {
+ if (nexthop->ip.ipv4.s_addr != INADDR_ANY) {
api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
- api_nh->gate.ipv4 = nexthop->ip;
+ api_nh->gate.ipv4 = nexthop->ip.ipv4;
} else {
api_nh->type = NEXTHOP_TYPE_IFINDEX;
}
- api_nh->ifindex = nexthop->ifindex;
- count++;
- }
- break;
- case AF_INET6:
- for (ALL_LIST_ELEMENTS_RO(route_info->nexthops6, node,
- nexthop6)) {
- if (count >= MULTIPATH_NUM)
- break;
- if (!IN6_IS_ADDR_LINKLOCAL(&nexthop6->ip6)
- && !IN6_IS_ADDR_UNSPECIFIED(&nexthop6->ip6)) {
+ break;
+ case AF_INET6:
+ if (!IN6_IS_ADDR_LINKLOCAL(&nexthop->ip.ipv6)
+ && !IN6_IS_ADDR_UNSPECIFIED(&nexthop->ip.ipv6)) {
continue;
}
-
- api_nh = &api.nexthops[count];
- if (fabricd)
- api_nh->onlink = true;
- api_nh->vrf_id = VRF_DEFAULT;
- api_nh->gate.ipv6 = nexthop6->ip6;
- api_nh->ifindex = nexthop6->ifindex;
+ api_nh->gate.ipv6 = nexthop->ip.ipv6;
api_nh->type = NEXTHOP_TYPE_IPV6_IFINDEX;
- count++;
+ break;
+ default:
+ flog_err(EC_LIB_DEVELOPMENT,
+ "%s: unknown address family [%d]", __func__,
+ nexthop->family);
+ exit(1);
}
- break;
+
+ api_nh->ifindex = nexthop->ifindex;
+ count++;
}
if (!count)
return;
isis->init_circ_list = list_new();
isis->uptime = time(NULL);
isis->nexthops = list_new();
- isis->nexthops6 = list_new();
dyn_cache_init();
/*
* uncomment the next line for full debugs
uint32_t router_id; /* Router ID from zebra */
struct list *area_list; /* list of IS-IS areas */
struct list *init_circ_list;
- struct list *nexthops; /* IPv4 next hops from this IS */
- struct list *nexthops6; /* IPv6 next hops from this IS */
+ struct list *nexthops; /* IP next hops from this IS */
uint8_t max_area_addrs; /* maximumAreaAdresses */
struct area_addr *man_area_addrs; /* manualAreaAddresses */
uint32_t debugs; /* bitmap for debug */
"bfd peer", /* BFD_PEER_NODE */
"openfabric", // OPENFABRIC_NODE
"vrrp", /* VRRP_NODE */
+ "bmp", /* BMP_NODE */
};
/* clang-format on */
case BGP_IPV6M_NODE:
case BGP_EVPN_NODE:
case BGP_IPV6L_NODE:
+ case BMP_NODE:
ret = BGP_NODE;
break;
case BGP_EVPN_VNI_NODE:
case BGP_IPV6M_NODE:
case BGP_EVPN_NODE:
case BGP_IPV6L_NODE:
+ case BMP_NODE:
vty->node = BGP_NODE;
break;
case BGP_EVPN_VNI_NODE:
BFD_PEER_NODE, /* BFD peer configuration mode. */
OPENFABRIC_NODE, /* OpenFabric router configuration node */
VRRP_NODE, /* VRRP node */
+ BMP_NODE, /* BMP config under router bgp */
NODE_TYPE_MAX, /* maximum */
};
#include "atomlist.h"
DEFINE_MTYPE_STATIC(LIB, RCU_THREAD, "RCU thread")
-DEFINE_MTYPE_STATIC(LIB, RCU_NEXT, "RCU sequence barrier")
DECLARE_ATOMLIST(rcu_heads, struct rcu_head, head)
{
struct rcu_next *rn;
- rn = XMALLOC(MTYPE_RCU_NEXT, sizeof(*rn));
+ rn = XMALLOC(MTYPE_RCU_THREAD, sizeof(*rn));
/* note: each RCUA_NEXT item corresponds to exactly one seqno bump.
* This means we don't need to communicate which seqno is which
* "last item is being deleted - start over" case, and then we may end
* up accessing old RCU queue items that are already free'd.
*/
- rcu_free_internal(MTYPE_RCU_NEXT, rn, head_free);
+ rcu_free_internal(MTYPE_RCU_THREAD, rn, head_free);
/* Only allow the RCU sweeper to run after these 2 items are queued.
*
return (int64_t)tv.tv_sec * 1000000LL + tv.tv_usec;
}
+static inline time_t monotime_to_realtime(const struct timeval *mono,
+ struct timeval *realout)
+{
+ struct timeval delta, real;
+
+ monotime_since(mono, &delta);
+ gettimeofday(&real, NULL);
+
+ timersub(&real, &delta, &real);
+ if (realout)
+ *realout = real;
+ return real.tv_sec;
+}
+
/* Char buffer size for time-to-string api */
#define MONOTIME_STRLEN 32
--- /dev/null
+/*
+ * Pull-driven write event handler
+ * Copyright (C) 2019 David Lamparter
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "zebra.h"
+
+#include "pullwr.h"
+#include "memory.h"
+#include "monotime.h"
+
+/* defaults */
+#define PULLWR_THRESH 16384 /* size at which we start to call write() */
+#define PULLWR_MAXSPIN 2500 /* max µs to spend grabbing more data */
+
+struct pullwr {
+ int fd;
+ struct thread_master *tm;
+ /* writer == NULL <=> we're idle */
+ struct thread *writer;
+
+ void *arg;
+ void (*fill)(void *, struct pullwr *);
+ void (*err)(void *, struct pullwr *, bool);
+
+ /* ring buffer (although it's "un-ringed" on resizing, it WILL wrap
+ * around if data is trickling in while keeping it at a constant size)
+ */
+ size_t bufsz, valid, pos;
+ uint64_t total_written;
+ char *buffer;
+
+ size_t thresh; /* PULLWR_THRESH */
+ int64_t maxspin; /* PULLWR_MAXSPIN */
+};
+
+DEFINE_MTYPE_STATIC(LIB, PULLWR_HEAD, "pull-driven write controller")
+DEFINE_MTYPE_STATIC(LIB, PULLWR_BUF, "pull-driven write buffer")
+
+static int pullwr_run(struct thread *t);
+
+struct pullwr *_pullwr_new(struct thread_master *tm, int fd,
+ void *arg,
+ void (*fill)(void *, struct pullwr *),
+ void (*err)(void *, struct pullwr *, bool))
+{
+ struct pullwr *pullwr;
+
+ pullwr = XCALLOC(MTYPE_PULLWR_HEAD, sizeof(*pullwr));
+ pullwr->fd = fd;
+ pullwr->tm = tm;
+ pullwr->arg = arg;
+ pullwr->fill = fill;
+ pullwr->err = err;
+
+ pullwr->thresh = PULLWR_THRESH;
+ pullwr->maxspin = PULLWR_MAXSPIN;
+
+ return pullwr;
+}
+
+void pullwr_del(struct pullwr *pullwr)
+{
+ THREAD_OFF(pullwr->writer);
+
+ XFREE(MTYPE_PULLWR_BUF, pullwr->buffer);
+ XFREE(MTYPE_PULLWR_HEAD, pullwr);
+}
+
+void pullwr_cfg(struct pullwr *pullwr, int64_t max_spin_usec,
+ size_t write_threshold)
+{
+ pullwr->maxspin = max_spin_usec ?: PULLWR_MAXSPIN;
+ pullwr->thresh = write_threshold ?: PULLWR_THRESH;
+}
+
+void pullwr_bump(struct pullwr *pullwr)
+{
+ if (pullwr->writer)
+ return;
+
+ thread_add_timer(pullwr->tm, pullwr_run, pullwr, 0, &pullwr->writer);
+}
+
+static size_t pullwr_iov(struct pullwr *pullwr, struct iovec *iov)
+{
+ size_t len1;
+
+ if (pullwr->valid == 0)
+ return 0;
+
+ if (pullwr->pos + pullwr->valid <= pullwr->bufsz) {
+ iov[0].iov_base = pullwr->buffer + pullwr->pos;
+ iov[0].iov_len = pullwr->valid;
+ return 1;
+ }
+
+ len1 = pullwr->bufsz - pullwr->pos;
+
+ iov[0].iov_base = pullwr->buffer + pullwr->pos;
+ iov[0].iov_len = len1;
+ iov[1].iov_base = pullwr->buffer;
+ iov[1].iov_len = pullwr->valid - len1;
+ return 2;
+}
+
+static void pullwr_resize(struct pullwr *pullwr, size_t need)
+{
+ struct iovec iov[2];
+ size_t niov, newsize;
+ char *newbuf;
+
+ /* the buffer is maintained at pullwr->thresh * 2 since we'll be
+ * trying to fill it as long as it's anywhere below pullwr->thresh.
+ * That means we frequently end up a little short of it and then write
+ * something that goes over the threshold. So, just use double.
+ */
+ if (need) {
+ /* resize up */
+ if (pullwr->bufsz - pullwr->valid >= need)
+ return;
+
+ newsize = MAX((pullwr->valid + need) * 2, pullwr->thresh * 2);
+ newbuf = XMALLOC(MTYPE_PULLWR_BUF, newsize);
+ } else if (!pullwr->valid) {
+ /* resize down, buffer empty */
+ newsize = 0;
+ newbuf = NULL;
+ } else {
+ /* resize down */
+ if (pullwr->bufsz - pullwr->valid < pullwr->thresh)
+ return;
+ newsize = MAX(pullwr->valid, pullwr->thresh * 2);
+ newbuf = XMALLOC(MTYPE_PULLWR_BUF, newsize);
+ }
+
+ niov = pullwr_iov(pullwr, iov);
+ if (niov >= 1) {
+ memcpy(newbuf, iov[0].iov_base, iov[0].iov_len);
+ if (niov >= 2)
+ memcpy(newbuf + iov[0].iov_len,
+ iov[1].iov_base, iov[1].iov_len);
+ }
+
+ XFREE(MTYPE_PULLWR_BUF, pullwr->buffer);
+ pullwr->buffer = newbuf;
+ pullwr->bufsz = newsize;
+ pullwr->pos = 0;
+}
+
+void pullwr_write(struct pullwr *pullwr, const void *data, size_t len)
+{
+ pullwr_resize(pullwr, len);
+
+ if (pullwr->pos + pullwr->valid > pullwr->bufsz) {
+ size_t pos;
+
+ pos = (pullwr->pos + pullwr->valid) % pullwr->bufsz;
+ memcpy(pullwr->buffer + pos, data, len);
+ } else {
+ size_t max1, len1;
+ max1 = pullwr->bufsz - (pullwr->pos + pullwr->valid);
+ max1 = MIN(max1, len);
+
+ memcpy(pullwr->buffer + pullwr->pos + pullwr->valid,
+ data, max1);
+ len1 = len - max1;
+
+ if (len1)
+ memcpy(pullwr->buffer, (char *)data + max1, len1);
+
+ }
+ pullwr->valid += len;
+
+ pullwr_bump(pullwr);
+}
+
+static int pullwr_run(struct thread *t)
+{
+ struct pullwr *pullwr = THREAD_ARG(t);
+ struct iovec iov[2];
+ size_t niov, lastvalid;
+ ssize_t nwr;
+ struct timeval t0;
+ bool maxspun = false;
+
+ monotime(&t0);
+
+ do {
+ lastvalid = pullwr->valid - 1;
+ while (pullwr->valid < pullwr->thresh
+ && pullwr->valid != lastvalid
+ && !maxspun) {
+ lastvalid = pullwr->valid;
+ pullwr->fill(pullwr->arg, pullwr);
+
+ /* check after doing at least one fill() call so we
+ * don't spin without making progress on slow boxes
+ */
+ if (!maxspun && monotime_since(&t0, NULL)
+ >= pullwr->maxspin)
+ maxspun = true;
+ }
+
+ if (pullwr->valid == 0) {
+ /* we made a fill() call above that didn't feed any
+ * data in, and we have nothing more queued, so we go
+ * into idle, i.e. no calling thread_add_write()
+ */
+ pullwr_resize(pullwr, 0);
+ return 0;
+ }
+
+ niov = pullwr_iov(pullwr, iov);
+ assert(niov);
+
+ nwr = writev(pullwr->fd, iov, niov);
+ if (nwr < 0) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ break;
+ pullwr->err(pullwr->arg, pullwr, false);
+ return 0;
+ }
+
+ if (nwr == 0) {
+ pullwr->err(pullwr->arg, pullwr, true);
+ return 0;
+ }
+
+ pullwr->total_written += nwr;
+ pullwr->valid -= nwr;
+ pullwr->pos += nwr;
+ pullwr->pos %= pullwr->bufsz;
+ } while (pullwr->valid == 0 && !maxspun);
+ /* pullwr->valid != 0 implies we did an incomplete write, i.e. socket
+ * is full and we go wait until it's available for writing again.
+ */
+
+ thread_add_write(pullwr->tm, pullwr_run, pullwr, pullwr->fd,
+ &pullwr->writer);
+
+ /* if we hit the time limit, just keep the buffer, we'll probably need
+ * it anyway & another run is already coming up.
+ */
+ if (!maxspun)
+ pullwr_resize(pullwr, 0);
+ return 0;
+}
+
+void pullwr_stats(struct pullwr *pullwr, uint64_t *total_written,
+ size_t *pending, size_t *kernel_pending)
+{
+ int tmp;
+
+ *total_written = pullwr->total_written;
+ *pending = pullwr->valid;
+
+ if (ioctl(pullwr->fd, TIOCOUTQ, &tmp) != 0)
+ tmp = 0;
+ *kernel_pending = tmp;
+}
--- /dev/null
+/*
+ * Pull-driven write event handler
+ * Copyright (C) 2019 David Lamparter
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _WRITEPOLL_H
+#define _WRITEPOLL_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "thread.h"
+#include "stream.h"
+
+struct pullwr;
+
+/* This is a "pull-driven" write event handler. Instead of having some buffer
+ * or being driven by the availability of data, it triggers on the space being
+ * available on the socket for data to be written on and then calls fill() to
+ * get data to be sent.
+ *
+ * pullwr_* maintains an "idle" vs. "active" state, going into idle when a
+ * fill() call completes without feeing more data into it. The overall
+ * semantics are:
+ * - to put data out, call pullwr_write(). This is possible from both inside
+ * fill() callbacks or anywhere else. Doing so puts the pullwr into
+ * active state.
+ * - in active state, the fill() callback will be called and should feed more
+ * data in. It should NOT loop to push out more than one "unit" of data;
+ * the pullwr code handles this by calling fill() until it has enough data.
+ * - if there's nothing more to be sent, fill() returns without doing anything
+ * and pullwr goes into idle state after flushing all buffered data out.
+ * - when new data becomes available, pullwr_bump() should be called to put
+ * the pullwr back into active mode so it will collect data from fill(),
+ * or you can directly call pullwr_write().
+ * - only calling pullwr_write() from within fill() is the cleanest way of
+ * doing things.
+ *
+ * When the err() callback is called, the pullwr should be considered unusable
+ * and released with pullwr_del(). This can be done from inside the callback,
+ * the pullwr code holds no more references on it when calling err().
+ */
+extern struct pullwr *_pullwr_new(struct thread_master *tm, int fd,
+ void *arg,
+ void (*fill)(void *, struct pullwr *),
+ void (*err)(void *, struct pullwr *, bool eof));
+extern void pullwr_del(struct pullwr *pullwr);
+
+/* type-checking wrapper. makes sure fill() and err() take a first argument
+ * whose type is identical to the type of arg.
+ * => use "void fill(struct mystruct *arg, ...)" - no "void *arg"
+ */
+#define pullwr_new(tm, fd, arg, fill, err) ({ \
+ void (*fill_typechk)(typeof(arg), struct pullwr *) = fill; \
+ void (*err_typechk)(typeof(arg), struct pullwr *, bool) = err; \
+ _pullwr_new(tm, fd, arg, (void *)fill_typechk, (void *)err_typechk); \
+})
+
+/* max_spin_usec is the time after which the pullwr event handler will stop
+ * trying to get more data from fill() and yield control back to the
+ * thread_master. It does reschedule itself to continue later; this is
+ * only to make sure we don't freeze the entire process if we're piping a
+ * lot of data to a local endpoint that reads quickly (i.e. no backpressure)
+ *
+ * default: 2500 (2.5 ms)
+ *
+ * write_threshold is the amount of data buffered from fill() calls at which
+ * the pullwr code starts calling write(). But this is not a "limit".
+ * pullwr will keep poking fill() for more data until
+ * (a) max_spin_usec is reached; fill() will be called again later after
+ * returning to the thread_master to give other events a chance to run
+ * (b) fill() returns without pushing any data onto the pullwr with
+ * pullwr_write(), so fill() will NOT be called again until a call to
+ * pullwr_bump() or pullwr_write() comes in.
+ *
+ * default: 16384 (16 kB)
+ *
+ * passing 0 for either value (or not calling it at all) uses the default.
+ */
+extern void pullwr_cfg(struct pullwr *pullwr, int64_t max_spin_usec,
+ size_t write_threshold);
+
+extern void pullwr_bump(struct pullwr *pullwr);
+extern void pullwr_write(struct pullwr *pullwr,
+ const void *data, size_t len);
+
+static inline void pullwr_write_stream(struct pullwr *pullwr,
+ struct stream *s)
+{
+ pullwr_write(pullwr, s->data, stream_get_endp(s));
+}
+
+extern void pullwr_stats(struct pullwr *pullwr, uint64_t *total_written,
+ size_t *pending, size_t *kernel_pending);
+
+#endif /* _WRITEPOLL_H */
const char *command, const char *arg,
route_map_event_t type)
{
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_add_match(index, command, arg, type);
switch (ret) {
frr_protonameinst);
return CMD_WARNING_CONFIG_FAILED;
break;
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Nothing to do here move along
+ */
+ break;
}
return CMD_SUCCESS;
const char *command, const char *arg,
route_map_event_t type)
{
- int ret;
+ enum rmap_compile_rets ret;
int retval = CMD_SUCCESS;
char *dep_name = NULL;
const char *tmpstr;
if (type != RMAP_EVENT_MATCH_DELETED && dep_name)
route_map_upd8_dependency(type, dep_name, rmap_name);
break;
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Nothing to do here
+ */
+ break;
}
XFREE(MTYPE_ROUTE_MAP_RULE, dep_name);
int generic_set_add(struct vty *vty, struct route_map_index *index,
const char *command, const char *arg)
{
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_add_set(index, command, arg);
switch (ret) {
return CMD_WARNING_CONFIG_FAILED;
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
break;
}
int generic_set_delete(struct vty *vty, struct route_map_index *index,
const char *command, const char *arg)
{
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_delete_set(index, command, arg);
switch (ret) {
return CMD_WARNING_CONFIG_FAILED;
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
break;
}
}
/* Add match statement to route map. */
-int route_map_add_match(struct route_map_index *index, const char *match_name,
- const char *match_arg, route_map_event_t type)
+enum rmap_compile_rets route_map_add_match(struct route_map_index *index,
+ const char *match_name,
+ const char *match_arg,
+ route_map_event_t type)
{
struct route_map_rule *rule;
struct route_map_rule *next;
}
/* Delete specified route match rule. */
-int route_map_delete_match(struct route_map_index *index,
- const char *match_name, const char *match_arg)
+enum rmap_compile_rets route_map_delete_match(struct route_map_index *index,
+ const char *match_name,
+ const char *match_arg)
{
struct route_map_rule *rule;
struct route_map_rule_cmd *cmd;
cmd = route_map_lookup_match(match_name);
if (cmd == NULL)
- return 1;
+ return RMAP_RULE_MISSING;
for (rule = index->match_list.head; rule; rule = rule->next)
if (rule->cmd == cmd && (rulecmp(rule->rule_str, match_arg) == 0
index->map->name,
RMAP_EVENT_CALL_ADDED);
}
- return 0;
+ return RMAP_COMPILE_SUCCESS;
}
/* Can't find matched rule. */
- return 1;
+ return RMAP_RULE_MISSING;
}
/* Add route-map set statement to the route map. */
-int route_map_add_set(struct route_map_index *index, const char *set_name,
- const char *set_arg)
+enum rmap_compile_rets route_map_add_set(struct route_map_index *index,
+ const char *set_name,
+ const char *set_arg)
{
struct route_map_rule *rule;
struct route_map_rule *next;
}
/* Delete route map set rule. */
-int route_map_delete_set(struct route_map_index *index, const char *set_name,
- const char *set_arg)
+enum rmap_compile_rets route_map_delete_set(struct route_map_index *index,
+ const char *set_name,
+ const char *set_arg)
{
struct route_map_rule *rule;
struct route_map_rule_cmd *cmd;
cmd = route_map_lookup_set(set_name);
if (cmd == NULL)
- return 1;
+ return RMAP_RULE_MISSING;
for (rule = index->set_list.head; rule; rule = rule->next)
if ((rule->cmd == cmd) && (rulecmp(rule->rule_str, set_arg) == 0
index->map->name,
RMAP_EVENT_CALL_ADDED);
}
- return 0;
+ return RMAP_COMPILE_SUCCESS;
}
/* Can't find matched rule. */
- return 1;
+ return RMAP_RULE_MISSING;
}
static enum route_map_cmd_result_t
};
/* Route map apply error. */
-enum {
+enum rmap_compile_rets {
RMAP_COMPILE_SUCCESS,
/* Route map rule is missing. */
extern void route_map_finish(void);
/* Add match statement to route map. */
-extern int route_map_add_match(struct route_map_index *index,
- const char *match_name, const char *match_arg,
- route_map_event_t type);
+extern enum rmap_compile_rets route_map_add_match(struct route_map_index *index,
+ const char *match_name,
+ const char *match_arg,
+ route_map_event_t type);
/* Delete specified route match rule. */
-extern int route_map_delete_match(struct route_map_index *index,
- const char *match_name,
- const char *match_arg);
+extern enum rmap_compile_rets
+route_map_delete_match(struct route_map_index *index,
+ const char *match_name, const char *match_arg);
extern const char *route_map_get_match_arg(struct route_map_index *index,
const char *match_name);
/* Add route-map set statement to the route map. */
-extern int route_map_add_set(struct route_map_index *index,
- const char *set_name, const char *set_arg);
+extern enum rmap_compile_rets route_map_add_set(struct route_map_index *index,
+ const char *set_name,
+ const char *set_arg);
/* Delete route map set rule. */
-extern int route_map_delete_set(struct route_map_index *index,
- const char *set_name, const char *set_arg);
+extern enum rmap_compile_rets
+route_map_delete_set(struct route_map_index *index,
+ const char *set_name, const char *set_arg);
/* Install rule command to the match list. */
extern void route_map_install_match(struct route_map_rule_cmd *cmd);
lib/prefix.c \
lib/privs.c \
lib/ptm_lib.c \
+ lib/pullwr.c \
lib/qobj.c \
lib/ringbuf.c \
lib/routemap.c \
lib/printfrr.h \
lib/privs.h \
lib/ptm_lib.h \
+ lib/pullwr.h \
lib/pw.h \
lib/qobj.h \
lib/queue.h \
vty_out(vty, "MOTD file not found\n");
} else if (host.motd)
vty_out(vty, "%s", host.motd);
+
+#if CONFDATE > 20200901
+ CPP_NOTICE("Please remove solaris code from system as it is deprecated");
+#endif
+#ifdef SUNOS_5
+ zlog_warn("If you are using FRR on Solaris, the FRR developers would love to hear from you\n");
+ zlog_warn("Please send email to dev@lists.frrouting.org about this message\n");
+ zlog_warn("We are considering deprecating Solaris and want to find users of Solaris systems\n");
+#endif
}
/* Put out prompt and wait input from user. */
route_map_rule_tag_free,
};
-static int route_map_command_status(struct vty *vty, int ret)
+static int route_map_command_status(struct vty *vty, enum rmap_compile_rets ret)
{
switch (ret) {
case RMAP_RULE_MISSING:
return CMD_WARNING_CONFIG_FAILED;
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
break;
}
{
VTY_DECLVAR_CONTEXT(route_map_index, route_map_index);
int idx_external = 2;
- int ret = route_map_add_set(route_map_index, "metric-type",
- argv[idx_external]->arg);
+ enum rmap_compile_rets ret = route_map_add_set(route_map_index,
+ "metric-type",
+ argv[idx_external]->arg);
+
return route_map_command_status(vty, ret);
}
{
VTY_DECLVAR_CONTEXT(route_map_index, route_map_index);
char *ext = (argc == 4) ? argv[3]->text : NULL;
- int ret = route_map_delete_set(route_map_index, "metric-type", ext);
+ enum rmap_compile_rets ret = route_map_delete_set(route_map_index,
+ "metric-type", ext);
+
return route_map_command_status(vty, ret);
}
{
VTY_DECLVAR_CONTEXT(route_map_index, route_map_index);
int idx_ipv6 = 2;
- int ret = route_map_add_set(route_map_index, "forwarding-address",
- argv[idx_ipv6]->arg);
+ enum rmap_compile_rets ret = route_map_add_set(route_map_index,
+ "forwarding-address",
+ argv[idx_ipv6]->arg);
+
return route_map_command_status(vty, ret);
}
{
VTY_DECLVAR_CONTEXT(route_map_index, route_map_index);
int idx_ipv6 = 3;
- int ret = route_map_delete_set(route_map_index, "forwarding-address",
- argv[idx_ipv6]->arg);
+ enum rmap_compile_rets ret = route_map_delete_set(route_map_index,
+ "forwarding-address",
+ argv[idx_ipv6]->arg);
+
return route_map_command_status(vty, ret);
}
macros = Macros()
macros.load('lib/route_types.h')
macros.load(os.path.join(basepath, 'lib/command.h'))
+ macros.load(os.path.join(basepath, 'bgpd/bgp_vty.h'))
# sigh :(
macros['PROTO_REDIST_STR'] = 'FRR_REDIST_STR_ISISD'
%{_libdir}/frr/modules/bgpd_rpki.so
%endif
%{_libdir}/frr/modules/zebra_irdp.so
+%{_libdir}/frr/modules/bgpd_bmp.so
%{_bindir}/*
%config(noreplace) %{configdir}/[!v]*.conf*
%config(noreplace) %attr(750,%{frr_user},%{frr_user}) %{configdir}/daemons
/* Test Preparation: Switch and activate address-family. */
if (!is_attr_type_global(pa->type)) {
test_log(test, "prepare: switch address-family to [%s]",
- afi_safi_print(pa->afi, pa->safi));
+ get_afi_safi_str(pa->afi, pa->safi, false));
test_execute(test, "address-family %s %s",
str_from_afi(pa->afi), str_from_safi(pa->safi));
test_execute(test, "neighbor %s activate", g->name);
/* Test Preparation: Switch and activate address-family. */
if (!is_attr_type_global(pa->type)) {
test_log(test, "prepare: switch address-family to [%s]",
- afi_safi_print(pa->afi, pa->safi));
+ get_afi_safi_str(pa->afi, pa->safi, false));
test_execute(test, "address-family %s %s",
str_from_afi(pa->afi), str_from_safi(pa->safi));
test_execute(test, "neighbor %s activate", g->name);
/* Test Preparation: Switch and activate address-family. */
if (!is_attr_type_global(pa->type)) {
test_log(test, "prepare: switch address-family to [%s]",
- afi_safi_print(pa->afi, pa->safi));
+ get_afi_safi_str(pa->afi, pa->safi, false));
test_execute(test, "address-family %s %s",
str_from_afi(pa->afi), str_from_safi(pa->safi));
test_execute(test, "neighbor %s activate", g->name);
pytest
RUN cd /tmp \
- && wget -q https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-1/Ubuntu-18.04-x86_64-Packages/libyang-dev_0.16.46_amd64.deb \
+ && wget -q https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-10/Debian-AMD64-Packages/libyang-dev_0.16.105-1_amd64.deb \
-O libyang-dev.deb \
- && wget -q https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-1/Ubuntu-18.04-x86_64-Packages/libyang_0.16.46_amd64.deb \
+ && wget -q https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-10/Debian-AMD64-Packages/libyang0.16_0.16.105-1_amd64.deb \
-O libyang.deb \
- && echo "039252cc66eb254a97e160b1c325af669470cde8a02d73ec9f7b920ed3c7997c libyang.deb" | sha256sum -c - \
- && echo "e7e2d5bfc7b33b3218df8bef404432970f9b4ad10d6dbbdcb0e0be2babbb68e9 libyang-dev.deb" | sha256sum -c - \
+ && echo "34bef017e527a590020185f05dc39203bdf1c86223e0d990839623ec629d8598 libyang.deb" | sha256sum -c - \
+ && echo "fe9cc6e3b173ca56ef49428c281e96bf76c0f910aa75cf85098076411484e8f4 libyang-dev.deb" | sha256sum -c - \
&& dpkg -i libyang*.deb \
&& rm libyang*.deb
# Verifying RIB routes
dut = 'r3'
protocol = 'bgp'
- next_hop = '10.0.0.2'
+ next_hop = ['10.0.0.2', '10.0.0.5']
result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop,
protocol=protocol)
assert result is True, "Testcase {} :Failed \n Error: {}". \
--- /dev/null
+{
+ "address_types": [
+ "ipv4",
+ "ipv6"
+ ],
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 24,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link5": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link6": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link7": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link8": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link9": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link10": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link11": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link12": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link13": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link14": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link15": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link16": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link17": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link18": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link19": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link20": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link21": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link22": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link23": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link24": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link25": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link26": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link27": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link28": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link29": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link30": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link31": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link32": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2-link1": {
+ "next_hop_self": true
+ },
+ "r2-link2": {
+ "next_hop_self": true
+ },
+ "r2-link3": {
+ "next_hop_self": true
+ },
+ "r2-link4": {
+ "next_hop_self": true
+ },
+ "r2-link5": {
+ "next_hop_self": true
+ },
+ "r2-link6": {
+ "next_hop_self": true
+ },
+ "r2-link7": {
+ "next_hop_self": true
+ },
+ "r2-link8": {
+ "next_hop_self": true
+ },
+ "r2-link9": {
+ "next_hop_self": true
+ },
+ "r2-link10": {
+ "next_hop_self": true
+ },
+ "r2-link11": {
+ "next_hop_self": true
+ },
+ "r2-link12": {
+ "next_hop_self": true
+ },
+ "r2-link13": {
+ "next_hop_self": true
+ },
+ "r2-link14": {
+ "next_hop_self": true
+ },
+ "r2-link15": {
+ "next_hop_self": true
+ },
+ "r2-link16": {
+ "next_hop_self": true
+ },
+ "r2-link17": {
+ "next_hop_self": true
+ },
+ "r2-link18": {
+ "next_hop_self": true
+ },
+ "r2-link19": {
+ "next_hop_self": true
+ },
+ "r2-link20": {
+ "next_hop_self": true
+ },
+ "r2-link21": {
+ "next_hop_self": true
+ },
+ "r2-link22": {
+ "next_hop_self": true
+ },
+ "r2-link23": {
+ "next_hop_self": true
+ },
+ "r2-link24": {
+ "next_hop_self": true
+ },
+ "r2-link25": {
+ "next_hop_self": true
+ },
+ "r2-link26": {
+ "next_hop_self": true
+ },
+ "r2-link27": {
+ "next_hop_self": true
+ },
+ "r2-link28": {
+ "next_hop_self": true
+ },
+ "r2-link29": {
+ "next_hop_self": true
+ },
+ "r2-link30": {
+ "next_hop_self": true
+ },
+ "r2-link31": {
+ "next_hop_self": true
+ },
+ "r2-link32": {
+ "next_hop_self": true
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2-link1": {
+ "next_hop_self": true
+ },
+ "r2-link2": {
+ "next_hop_self": true
+ },
+ "r2-link3": {
+ "next_hop_self": true
+ },
+ "r2-link4": {
+ "next_hop_self": true
+ },
+ "r2-link5": {
+ "next_hop_self": true
+ },
+ "r2-link6": {
+ "next_hop_self": true
+ },
+ "r2-link7": {
+ "next_hop_self": true
+ },
+ "r2-link8": {
+ "next_hop_self": true
+ },
+ "r2-link9": {
+ "next_hop_self": true
+ },
+ "r2-link10": {
+ "next_hop_self": true
+ },
+ "r2-link11": {
+ "next_hop_self": true
+ },
+ "r2-link12": {
+ "next_hop_self": true
+ },
+ "r2-link13": {
+ "next_hop_self": true
+ },
+ "r2-link14": {
+ "next_hop_self": true
+ },
+ "r2-link15": {
+ "next_hop_self": true
+ },
+ "r2-link16": {
+ "next_hop_self": true
+ },
+ "r2-link17": {
+ "next_hop_self": true
+ },
+ "r2-link18": {
+ "next_hop_self": true
+ },
+ "r2-link19": {
+ "next_hop_self": true
+ },
+ "r2-link20": {
+ "next_hop_self": true
+ },
+ "r2-link21": {
+ "next_hop_self": true
+ },
+ "r2-link22": {
+ "next_hop_self": true
+ },
+ "r2-link23": {
+ "next_hop_self": true
+ },
+ "r2-link24": {
+ "next_hop_self": true
+ },
+ "r2-link25": {
+ "next_hop_self": true
+ },
+ "r2-link26": {
+ "next_hop_self": true
+ },
+ "r2-link27": {
+ "next_hop_self": true
+ },
+ "r2-link28": {
+ "next_hop_self": true
+ },
+ "r2-link29": {
+ "next_hop_self": true
+ },
+ "r2-link30": {
+ "next_hop_self": true
+ },
+ "r2-link31": {
+ "next_hop_self": true
+ },
+ "r2-link32": {
+ "next_hop_self": true
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link5": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link6": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link7": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link8": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link9": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link10": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link11": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link12": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link13": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link14": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link15": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link16": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link17": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link18": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link19": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link20": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link21": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link22": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link23": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link24": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link25": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link26": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link27": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link28": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link29": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link30": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link31": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link32": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "300",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "maximum_paths": {
+ "ebgp": 32
+ },
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {},
+ "r3-link5": {},
+ "r3-link6": {},
+ "r3-link7": {},
+ "r3-link8": {},
+ "r3-link9": {},
+ "r3-link10": {},
+ "r3-link11": {},
+ "r3-link12": {},
+ "r3-link13": {},
+ "r3-link14": {},
+ "r3-link15": {},
+ "r3-link16": {},
+ "r3-link17": {},
+ "r3-link18": {},
+ "r3-link19": {},
+ "r3-link20": {},
+ "r3-link21": {},
+ "r3-link22": {},
+ "r3-link23": {},
+ "r3-link24": {},
+ "r3-link25": {},
+ "r3-link26": {},
+ "r3-link27": {},
+ "r3-link28": {},
+ "r3-link29": {},
+ "r3-link30": {},
+ "r3-link31": {},
+ "r3-link32": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "maximum_paths": {
+ "ebgp": 32
+ },
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {},
+ "r3-link5": {},
+ "r3-link6": {},
+ "r3-link7": {},
+ "r3-link8": {},
+ "r3-link9": {},
+ "r3-link10": {},
+ "r3-link11": {},
+ "r3-link12": {},
+ "r3-link13": {},
+ "r3-link14": {},
+ "r3-link15": {},
+ "r3-link16": {},
+ "r3-link17": {},
+ "r3-link18": {},
+ "r3-link19": {},
+ "r3-link20": {},
+ "r3-link21": {},
+ "r3-link22": {},
+ "r3-link23": {},
+ "r3-link24": {},
+ "r3-link25": {},
+ "r3-link26": {},
+ "r3-link27": {},
+ "r3-link28": {},
+ "r3-link29": {},
+ "r3-link30": {},
+ "r3-link31": {},
+ "r3-link32": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+{
+ "address_types": [
+ "ipv4",
+ "ipv6"
+ ],
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 24,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link5": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link6": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link7": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link8": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link9": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link10": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link11": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link12": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link13": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link14": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link15": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link16": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link17": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link18": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link19": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link20": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link21": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link22": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link23": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link24": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link25": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link26": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link27": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link28": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link29": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link30": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link31": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link32": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2-link1": {
+ "next_hop_self": true
+ },
+ "r2-link2": {
+ "next_hop_self": true
+ },
+ "r2-link3": {
+ "next_hop_self": true
+ },
+ "r2-link4": {
+ "next_hop_self": true
+ },
+ "r2-link5": {
+ "next_hop_self": true
+ },
+ "r2-link6": {
+ "next_hop_self": true
+ },
+ "r2-link7": {
+ "next_hop_self": true
+ },
+ "r2-link8": {
+ "next_hop_self": true
+ },
+ "r2-link9": {
+ "next_hop_self": true
+ },
+ "r2-link10": {
+ "next_hop_self": true
+ },
+ "r2-link11": {
+ "next_hop_self": true
+ },
+ "r2-link12": {
+ "next_hop_self": true
+ },
+ "r2-link13": {
+ "next_hop_self": true
+ },
+ "r2-link14": {
+ "next_hop_self": true
+ },
+ "r2-link15": {
+ "next_hop_self": true
+ },
+ "r2-link16": {
+ "next_hop_self": true
+ },
+ "r2-link17": {
+ "next_hop_self": true
+ },
+ "r2-link18": {
+ "next_hop_self": true
+ },
+ "r2-link19": {
+ "next_hop_self": true
+ },
+ "r2-link20": {
+ "next_hop_self": true
+ },
+ "r2-link21": {
+ "next_hop_self": true
+ },
+ "r2-link22": {
+ "next_hop_self": true
+ },
+ "r2-link23": {
+ "next_hop_self": true
+ },
+ "r2-link24": {
+ "next_hop_self": true
+ },
+ "r2-link25": {
+ "next_hop_self": true
+ },
+ "r2-link26": {
+ "next_hop_self": true
+ },
+ "r2-link27": {
+ "next_hop_self": true
+ },
+ "r2-link28": {
+ "next_hop_self": true
+ },
+ "r2-link29": {
+ "next_hop_self": true
+ },
+ "r2-link30": {
+ "next_hop_self": true
+ },
+ "r2-link31": {
+ "next_hop_self": true
+ },
+ "r2-link32": {
+ "next_hop_self": true
+ }
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2-link1": {
+ "next_hop_self": true
+ },
+ "r2-link2": {
+ "next_hop_self": true
+ },
+ "r2-link3": {
+ "next_hop_self": true
+ },
+ "r2-link4": {
+ "next_hop_self": true
+ },
+ "r2-link5": {
+ "next_hop_self": true
+ },
+ "r2-link6": {
+ "next_hop_self": true
+ },
+ "r2-link7": {
+ "next_hop_self": true
+ },
+ "r2-link8": {
+ "next_hop_self": true
+ },
+ "r2-link9": {
+ "next_hop_self": true
+ },
+ "r2-link10": {
+ "next_hop_self": true
+ },
+ "r2-link11": {
+ "next_hop_self": true
+ },
+ "r2-link12": {
+ "next_hop_self": true
+ },
+ "r2-link13": {
+ "next_hop_self": true
+ },
+ "r2-link14": {
+ "next_hop_self": true
+ },
+ "r2-link15": {
+ "next_hop_self": true
+ },
+ "r2-link16": {
+ "next_hop_self": true
+ },
+ "r2-link17": {
+ "next_hop_self": true
+ },
+ "r2-link18": {
+ "next_hop_self": true
+ },
+ "r2-link19": {
+ "next_hop_self": true
+ },
+ "r2-link20": {
+ "next_hop_self": true
+ },
+ "r2-link21": {
+ "next_hop_self": true
+ },
+ "r2-link22": {
+ "next_hop_self": true
+ },
+ "r2-link23": {
+ "next_hop_self": true
+ },
+ "r2-link24": {
+ "next_hop_self": true
+ },
+ "r2-link25": {
+ "next_hop_self": true
+ },
+ "r2-link26": {
+ "next_hop_self": true
+ },
+ "r2-link27": {
+ "next_hop_self": true
+ },
+ "r2-link28": {
+ "next_hop_self": true
+ },
+ "r2-link29": {
+ "next_hop_self": true
+ },
+ "r2-link30": {
+ "next_hop_self": true
+ },
+ "r2-link31": {
+ "next_hop_self": true
+ },
+ "r2-link32": {
+ "next_hop_self": true
+ }
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link5": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link6": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link7": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link8": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link9": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link10": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link11": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link12": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link13": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link14": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link15": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link16": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link17": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link18": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link19": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link20": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link21": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link22": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link23": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link24": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link25": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link26": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link27": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link28": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link29": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link30": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link31": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link32": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "maximum_paths": {
+ "ibgp": 32
+ },
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {},
+ "r3-link5": {},
+ "r3-link6": {},
+ "r3-link7": {},
+ "r3-link8": {},
+ "r3-link9": {},
+ "r3-link10": {},
+ "r3-link11": {},
+ "r3-link12": {},
+ "r3-link13": {},
+ "r3-link14": {},
+ "r3-link15": {},
+ "r3-link16": {},
+ "r3-link17": {},
+ "r3-link18": {},
+ "r3-link19": {},
+ "r3-link20": {},
+ "r3-link21": {},
+ "r3-link22": {},
+ "r3-link23": {},
+ "r3-link24": {},
+ "r3-link25": {},
+ "r3-link26": {},
+ "r3-link27": {},
+ "r3-link28": {},
+ "r3-link29": {},
+ "r3-link30": {},
+ "r3-link31": {},
+ "r3-link32": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "maximum_paths": {
+ "ibgp": 32
+ },
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {},
+ "r3-link5": {},
+ "r3-link6": {},
+ "r3-link7": {},
+ "r3-link8": {},
+ "r3-link9": {},
+ "r3-link10": {},
+ "r3-link11": {},
+ "r3-link12": {},
+ "r3-link13": {},
+ "r3-link14": {},
+ "r3-link15": {},
+ "r3-link16": {},
+ "r3-link17": {},
+ "r3-link18": {},
+ "r3-link19": {},
+ "r3-link20": {},
+ "r3-link21": {},
+ "r3-link22": {},
+ "r3-link23": {},
+ "r3-link24": {},
+ "r3-link25": {},
+ "r3-link26": {},
+ "r3-link27": {},
+ "r3-link28": {},
+ "r3-link29": {},
+ "r3-link30": {},
+ "r3-link31": {},
+ "r3-link32": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""
+Following tests are covered to test ecmp functionality on EBGP.
+1. Verify routes installed as per maximum-paths configuration (8/16/32)
+2. Disable/Shut selected paths nexthops and verify other next are installed in
+ the RIB of DUT. Enable interfaces and verify RIB count.
+3. Verify BGP table and RIB in DUT after clear BGP routes and neighbors.
+4. Verify routes are cleared from BGP and RIB table of DUT when
+ redistribute static configuration is removed.
+5. Shut BGP neigbors one by one and verify BGP and routing table updated
+ accordingly in DUT
+6. Delete static routes and verify routers are cleared from BGP table and RIB
+ of DUT.
+7. Verify routes are cleared from BGP and RIB table of DUT when advertise
+ network configuration is removed.
+"""
+import os
+import sys
+import time
+import json
+import pytest
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, '../../'))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from mininet.topo import Topo
+
+from lib.common_config import (
+ start_topology, write_test_header,
+ write_test_footer,
+ verify_rib, create_static_routes, check_address_types,
+ interface_status, reset_config_on_routers
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence, create_router_bgp,
+ clear_bgp_and_verify)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology and configuration creation
+jsonFile = "{}/ebgp_ecmp_topo2.json".format(CWD)
+
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+# Global variables
+NEXT_HOPS = {"ipv4": [], "ipv6": []}
+INTF_LIST_R3 = []
+INTF_LIST_R2 = []
+NETWORK = {"ipv4": "11.0.20.1/32", "ipv6": "1::/64"}
+NEXT_HOP_IP = {"ipv4": "10.0.0.1", "ipv6": "fd00::1"}
+BGP_CONVERGENCE = False
+
+
+class CreateTopo(Topo):
+ """
+ Test topology builder.
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function."""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment.
+
+ * `mod`: module name
+ """
+ global NEXT_HOPS, INTF_LIST_R3, INTF_LIST_R2, TEST_STATIC
+ global ADDR_TYPES
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # tgen.mininet_cli()
+ # Api call verify whether BGP is converged
+ ADDR_TYPES = check_address_types()
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, ("setup_module :Failed \n Error:"
+ " {}".format(BGP_CONVERGENCE))
+
+ link_data = [val for links, val in
+ topo["routers"]["r2"]["links"].iteritems()
+ if "r3" in links]
+ for adt in ADDR_TYPES:
+ NEXT_HOPS[adt] = [val[adt].split("/")[0] for val in link_data]
+ if adt == "ipv4":
+ NEXT_HOPS[adt] = sorted(
+ NEXT_HOPS[adt], key=lambda x: int(x.split(".")[2]))
+ elif adt == "ipv6":
+ NEXT_HOPS[adt] = sorted(
+ NEXT_HOPS[adt], key=lambda x: int(x.split(':')[-3], 16))
+
+ INTF_LIST_R2 = [val["interface"].split("/")[0] for val in link_data]
+ INTF_LIST_R2 = sorted(INTF_LIST_R2, key=lambda x: int(x.split("eth")[1]))
+
+ link_data = [val for links, val in
+ topo["routers"]["r3"]["links"].iteritems()
+ if "r2" in links]
+ INTF_LIST_R3 = [val["interface"].split("/")[0] for val in link_data]
+ INTF_LIST_R3 = sorted(INTF_LIST_R3, key=lambda x: int(x.split("eth")[1]))
+
+ # STATIC_ROUTE = True
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+def static_or_nw(tgen, topo, tc_name, test_type, dut):
+
+ if test_type == "redist_static":
+ input_dict_static = {
+ dut: {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"],
+ "next_hop": NEXT_HOP_IP["ipv4"]
+ },
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": NEXT_HOP_IP["ipv6"]
+ }
+ ]
+ }
+ }
+ logger.info("Configuring static route on router %s", dut)
+ result = create_static_routes(tgen, input_dict_static)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_2 = {
+ dut: {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Configuring redistribute static route on router %s", dut)
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ elif test_type == "advertise_nw":
+ input_dict_nw = {
+ dut: {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {"network": NETWORK["ipv4"]}
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [
+ {"network": NETWORK["ipv6"]}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Advertising networks %s %s from router %s",
+ NETWORK["ipv4"], NETWORK["ipv6"], dut)
+ result = create_router_bgp(tgen, topo, input_dict_nw)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+
+@pytest.mark.parametrize("ecmp_num", ["8", "16", "32"])
+@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"])
+def test_modify_ecmp_max_paths(request, ecmp_num, test_type):
+ """
+ Verify routes installed as per maximum-paths
+ configuration (8/16/32).
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ reset_config_on_routers(tgen)
+
+ static_or_nw(tgen, topo, tc_name, test_type, "r2")
+
+ input_dict = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "maximum_paths": {
+ "ebgp": ecmp_num,
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "maximum_paths": {
+ "ebgp": ecmp_num,
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Configuring bgp maximum-paths %s on router r3", ecmp_num)
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_after_clear_bgp(request):
+ """ Verify BGP table and RIB in DUT after clear BGP routes and neighbors"""
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ reset_config_on_routers(tgen)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Clear bgp
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_remove_redistribute_static(request):
+ """ Verify routes are cleared from BGP and RIB table of DUT when
+ redistribute static configuration is removed."""
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static",
+ "delete": True
+
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static",
+ "delete": True
+
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Remove redistribute static")
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3 are deleted", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=[], protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Routes still" \
+ " present in RIB".format(tc_name)
+
+ logger.info("Enable redistribute static")
+ input_dict_2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_shut_bgp_neighbor(request):
+ """
+ Disable/Shut selected paths nexthops and verify other next are installed in
+ the RIB of DUT. Enable interfaces and verify RIB count.
+
+ Shut BGP neigbors one by one and verify BGP and routing table updated
+ accordingly in DUT
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ logger.info(INTF_LIST_R2)
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for intf_num in range(len(INTF_LIST_R2)+1, 16):
+ intf_val = INTF_LIST_R2[intf_num:intf_num+16]
+
+ input_dict_1 = {
+ "r2": {
+ "interface_list": [intf_val],
+ "status": "down"
+ }
+ }
+ logger.info("Shutting down neighbor interface {} on r2".
+ format(intf_val))
+ result = interface_status(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ if intf_num + 16 < 32:
+ check_hops = NEXT_HOPS[addr_type]
+ else:
+ check_hops = []
+
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=check_hops,
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_1 = {
+ "r2": {
+ "interface_list": INTF_LIST_R2,
+ "status": "up"
+ }
+ }
+
+ logger.info("Enabling all neighbor interface {} on r2")
+ result = interface_status(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_remove_static_route(request):
+ """
+ Delete static routes and verify routers are cleared from BGP table,
+ and RIB of DUT.
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type], protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_2 = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "delete": True
+ }
+ ]
+ }
+ }
+
+ logger.info("Remove static routes")
+ result = create_static_routes(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ logger.info("Verifying %s routes on r3 are removed", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_2,
+ next_hop=[], protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Routes still" \
+ " present in RIB".format(tc_name)
+
+ for addr_type in ADDR_TYPES:
+ # Enable static routes
+ input_dict_4 = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Enable static route")
+ result = create_static_routes(tgen, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_4,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+
+def test_ecmp_remove_nw_advertise(request):
+ """
+ Verify routes are cleared from BGP and RIB table of DUT,
+ when advertise network configuration is removed
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_3 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [{
+ "network": NETWORK["ipv4"],
+ "delete": True
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [{
+ "network": NETWORK["ipv6"],
+ "delete": True
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Withdraw advertised networks")
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=[], protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Routes still" \
+ " present in RIB".format(tc_name)
+
+ static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
--- /dev/null
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""
+Following tests are covered to test ecmp functionality on EBGP.
+1. Verify routes installed as per maximum-paths configuration (8/16/32)
+2. Disable/Shut selected paths nexthops and verify other next are installed in
+ the RIB of DUT. Enable interfaces and verify RIB count.
+3. Verify BGP table and RIB in DUT after clear BGP routes and neighbors.
+4. Verify routes are cleared from BGP and RIB table of DUT when
+ redistribute static configuration is removed.
+5. Shut BGP neigbors one by one and verify BGP and routing table updated
+ accordingly in DUT
+6. Delete static routes and verify routers are cleared from BGP table and RIB
+ of DUT.
+7. Verify routes are cleared from BGP and RIB table of DUT when advertise
+ network configuration is removed.
+"""
+import os
+import sys
+import time
+import json
+import pytest
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, '../../'))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from mininet.topo import Topo
+
+from lib.common_config import (
+ start_topology, write_test_header,
+ write_test_footer,
+ verify_rib, create_static_routes, check_address_types,
+ interface_status, reset_config_on_routers
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence, create_router_bgp,
+ clear_bgp_and_verify)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology and configuration creation
+jsonFile = "{}/ibgp_ecmp_topo2.json".format(CWD)
+
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+# Global variables
+NEXT_HOPS = {"ipv4": [], "ipv6": []}
+INTF_LIST_R3 = []
+INTF_LIST_R2 = []
+NETWORK = {"ipv4": "11.0.20.1/32", "ipv6": "1::/64"}
+NEXT_HOP_IP = {"ipv4": "10.0.0.1", "ipv6": "fd00::1"}
+BGP_CONVERGENCE = False
+
+
+class CreateTopo(Topo):
+ """
+ Test topology builder.
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function."""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment.
+
+ * `mod`: module name
+ """
+ global NEXT_HOPS, INTF_LIST_R3, INTF_LIST_R2, TEST_STATIC
+ global ADDR_TYPES
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # tgen.mininet_cli()
+ # Api call verify whether BGP is converged
+ ADDR_TYPES = check_address_types()
+
+ for addr_type in ADDR_TYPES:
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, ("setup_module :Failed \n Error:"
+ " {}".format(BGP_CONVERGENCE))
+
+ link_data = [val for links, val in
+ topo["routers"]["r2"]["links"].iteritems()
+ if "r3" in links]
+ for adt in ADDR_TYPES:
+ NEXT_HOPS[adt] = [val[adt].split("/")[0] for val in link_data]
+ if adt == "ipv4":
+ NEXT_HOPS[adt] = sorted(
+ NEXT_HOPS[adt], key=lambda x: int(x.split(".")[2]))
+ elif adt == "ipv6":
+ NEXT_HOPS[adt] = sorted(
+ NEXT_HOPS[adt], key=lambda x: int(x.split(':')[-3], 16))
+
+ INTF_LIST_R2 = [val["interface"].split("/")[0] for val in link_data]
+ INTF_LIST_R2 = sorted(INTF_LIST_R2, key=lambda x: int(x.split("eth")[1]))
+
+ link_data = [val for links, val in
+ topo["routers"]["r3"]["links"].iteritems()
+ if "r2" in links]
+ INTF_LIST_R3 = [val["interface"].split("/")[0] for val in link_data]
+ INTF_LIST_R3 = sorted(INTF_LIST_R3, key=lambda x: int(x.split("eth")[1]))
+
+ # STATIC_ROUTE = True
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+def static_or_nw(tgen, topo, tc_name, test_type, dut):
+
+ if test_type == "redist_static":
+ input_dict_static = {
+ dut: {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"],
+ "next_hop": NEXT_HOP_IP["ipv4"]
+ },
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": NEXT_HOP_IP["ipv6"]
+ }
+ ]
+ }
+ }
+ logger.info("Configuring static route on router %s", dut)
+ result = create_static_routes(tgen, input_dict_static)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_2 = {
+ dut: {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Configuring redistribute static route on router %s", dut)
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ elif test_type == "advertise_nw":
+ input_dict_nw = {
+ dut: {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {"network": NETWORK["ipv4"]}
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [
+ {"network": NETWORK["ipv6"]}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Advertising networks %s %s from router %s",
+ NETWORK["ipv4"], NETWORK["ipv6"], dut)
+ result = create_router_bgp(tgen, topo, input_dict_nw)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+
+@pytest.mark.parametrize("ecmp_num", ["8", "16", "32"])
+@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"])
+def test_modify_ecmp_max_paths(request, ecmp_num, test_type):
+ """
+ Verify routes installed as per maximum-paths
+ configuration (8/16/32).
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ reset_config_on_routers(tgen)
+
+ static_or_nw(tgen, topo, tc_name, test_type, "r2")
+
+ input_dict = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "maximum_paths": {
+ "ibgp": ecmp_num,
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "maximum_paths": {
+ "ibgp": ecmp_num,
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Configuring bgp maximum-paths %s on router r3", ecmp_num)
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_after_clear_bgp(request):
+ """ Verify BGP table and RIB in DUT after clear BGP routes and neighbors"""
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ reset_config_on_routers(tgen)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Clear bgp
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_remove_redistribute_static(request):
+ """ Verify routes are cleared from BGP and RIB table of DUT when
+ redistribute static configuration is removed."""
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static",
+ "delete": True
+
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static",
+ "delete": True
+
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Remove redistribute static")
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3 are deleted", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=[], protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Routes still" \
+ " present in RIB".format(tc_name)
+
+ logger.info("Enable redistribute static")
+ input_dict_2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_shut_bgp_neighbor(request):
+ """ Shut BGP neigbors one by one and verify BGP and routing table updated
+ accordingly in DUT """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ logger.info(INTF_LIST_R2)
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for intf_num in range(len(INTF_LIST_R2)+1, 16):
+ intf_val = INTF_LIST_R2[intf_num:intf_num+16]
+
+ input_dict_1 = {
+ "r2": {
+ "interface_list": [intf_val],
+ "status": "down"
+ }
+ }
+ logger.info("Shutting down neighbor interface {} on r2".
+ format(intf_val))
+ result = interface_status(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ if intf_num + 16 < 32:
+ check_hops = NEXT_HOPS[addr_type]
+ else:
+ check_hops = []
+
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=check_hops,
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_1 = {
+ "r2": {
+ "interface_list": INTF_LIST_R2,
+ "status": "up"
+ }
+ }
+
+ logger.info("Enabling all neighbor interface {} on r2")
+ result = interface_status(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_remove_static_route(request):
+ """
+ Delete static routes and verify routers are cleared from BGP table,
+ and RIB of DUT.
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type], protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_2 = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "delete": True
+ }
+ ]
+ }
+ }
+
+ logger.info("Remove static routes")
+ result = create_static_routes(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ logger.info("Verifying %s routes on r3 are removed", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_2,
+ next_hop=[], protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Routes still" \
+ " present in RIB".format(tc_name)
+
+ for addr_type in ADDR_TYPES:
+ # Enable static routes
+ input_dict_4 = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Enable static route")
+ result = create_static_routes(tgen, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_4,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_remove_nw_advertise(request):
+ """
+ Verify routes are cleared from BGP and RIB table of DUT,
+ when advertise network configuration is removed
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_3 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [{
+ "network": NETWORK["ipv4"],
+ "delete": True
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [{
+ "network": NETWORK["ipv6"],
+ "delete": True
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Withdraw advertised networks")
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=[], protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Routes still" \
+ " present in RIB".format(tc_name)
+
+ static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
dut = "r1"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
# Configure next-hop-self to bgp neighbor
input_dict_1 = {
"neighbor": {
"r1": {
"dest_link": {
- "r3": {
+ "r2": {
"route_maps": [
{"name": "RMAP_LOCAL_PREF",
"direction": "in"}
}
}
}
+
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result)
tc_name, result)
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
write_test_footer(tc_name)
dut = "r3"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
write_test_footer(tc_name)
result = verify_prefix_lists(tgen, input_dict_2)
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result)
- logger.info(result)
# Delete prefix list
input_dict_2 = {
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
-
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
write_test_footer(tc_name)
dut = "r3"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
write_test_footer(tc_name)
dut = "r3"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
# Modify ip prefix list
input_dict_1 = {
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
write_test_footer(tc_name)
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
# Modify ip prefix list
input_dict_1 = {
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
write_test_footer(tc_name)
while True:
output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json"))
if output['192.168.255.1']['bgpState'] == 'Established':
- if output['192.168.255.1']['addressFamilyInfo']['IPv4 Unicast']['acceptedPrefixCounter'] == 2:
+ if output['192.168.255.1']['addressFamilyInfo']['ipv4Unicast']['acceptedPrefixCounter'] == 2:
return True
def _bgp_comm_list_delete(router):
luCommand('ce1','ping 192.168.1.1 -c 1',' 0. packet loss','pass','CE->PE ping')
luCommand('ce2','ping 192.168.1.1 -c 1',' 0. packet loss','pass','CE->PE ping')
luCommand('ce3','ping 192.168.1.1 -c 1',' 0. packet loss','pass','CE->PE ping')
-luCommand('ce1','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',90)
-luCommand('ce2','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up')
-luCommand('ce3','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up')
+luCommand('ce1','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180)
+luCommand('ce2','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180)
+luCommand('ce3','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180)
luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r4','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
-luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up')
-luCommand('r1','vtysh -c "show bgp summary"',' 00:0','pass','Core adjacencies up')
-luCommand('r3','vtysh -c "show bgp summary"',' 00:0','pass','Core adjacencies up')
-luCommand('r4','vtysh -c "show bgp summary"',' 00:0','pass','Core adjacencies up')
+luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',180)
+luCommand('r1','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up',180)
+luCommand('r3','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up',180)
+luCommand('r4','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up',180)
luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r4','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
-luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',300)
-luCommand('r1','vtysh -c "show bgp summary"',' 00:0','pass','Core adjacencies up')
-luCommand('r3','vtysh -c "show bgp summary"',' 00:0','pass','Core adjacencies up')
-luCommand('r4','vtysh -c "show bgp summary"',' 00:0','pass','Core adjacencies up')
+luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',180)
+luCommand('r1','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up', 180)
+luCommand('r3','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up', 180)
+luCommand('r4','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up', 180)
luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0.* 00:0','pass','All adjacencies up')
luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r4','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
-luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',30)
-luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0','pass','All adjacencies up')
-luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0','pass','All adjacencies up')
-luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0','pass','All adjacencies up')
+luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',180)
+luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0','wait','All adjacencies up',180)
+luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0','wait','All adjacencies up',180)
+luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0','wait','All adjacencies up',180)
luCommand('r1','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping')
luCommand('r1','ping 4.4.4.4 -c 1',' 0. packet loss','wait','PE->PE4 (loopback) ping')
#luCommand('r4','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping')
--enable-static-bin \
--enable-static \
--enable-shared \
+ --enable-dev-build \
--with-moduledir=/usr/lib/frr/modules \
--prefix=/usr \
--localstatedir=/var/run/frr \
# Static routes are created as part of initial configuration,
# verifying RIB
dut = 'r3'
- next_hop = '10.0.0.1'
+ next_hop = ['10.0.0.1', '10.0.0.5']
input_dict = {
"r1": {
"static_routes": [
load_config_to_router,
check_address_types,
generate_ips,
- find_interface_with_greater_ip)
+ find_interface_with_greater_ip,
+ run_frr_cmd, retry)
BGP_CONVERGENCE_TIMEOUT = 10
logger.debug("Router %s: 'bgp' not present in input_dict", router)
continue
- result = __create_bgp_global(tgen, input_dict, router, build)
- if result is True:
+ data_all_bgp = __create_bgp_global(tgen, input_dict, router, build)
+ if data_all_bgp:
bgp_data = input_dict[router]["bgp"]
bgp_addr_data = bgp_data.setdefault("address_family", {})
or ipv6_data.setdefault("unicast", {}) else False
if neigh_unicast:
- result = __create_bgp_unicast_neighbor(
- tgen, topo, input_dict, router, build)
+ data_all_bgp = __create_bgp_unicast_neighbor(
+ tgen, topo, input_dict, router,
+ config_data=data_all_bgp)
+
+ try:
+ result = create_common_configuration(tgen, router, data_all_bgp,
+ "bgp", build)
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
logger.debug("Exiting lib API: create_router_bgp()")
return result
True or False
"""
- result = False
logger.debug("Entering lib API: __create_bgp_global()")
- try:
-
- bgp_data = input_dict[router]["bgp"]
- del_bgp_action = bgp_data.setdefault("delete", False)
- if del_bgp_action:
- config_data = ["no router bgp"]
- result = create_common_configuration(tgen, router, config_data,
- "bgp", build=build)
- return result
- config_data = []
+ bgp_data = input_dict[router]["bgp"]
+ del_bgp_action = bgp_data.setdefault("delete", False)
+ if del_bgp_action:
+ config_data = ["no router bgp"]
- if "local_as" not in bgp_data and build:
- logger.error("Router %s: 'local_as' not present in input_dict"
- "for BGP", router)
- return False
+ return config_data
- local_as = bgp_data.setdefault("local_as", "")
- cmd = "router bgp {}".format(local_as)
- vrf_id = bgp_data.setdefault("vrf", None)
- if vrf_id:
- cmd = "{} vrf {}".format(cmd, vrf_id)
-
- config_data.append(cmd)
+ config_data = []
- router_id = bgp_data.setdefault("router_id", None)
- del_router_id = bgp_data.setdefault("del_router_id", False)
- if del_router_id:
- config_data.append("no bgp router-id")
- if router_id:
- config_data.append("bgp router-id {}".format(
- router_id))
+ if "local_as" not in bgp_data and build:
+ logger.error("Router %s: 'local_as' not present in input_dict"
+ "for BGP", router)
+ return False
- aggregate_address = bgp_data.setdefault("aggregate_address",
- {})
- if aggregate_address:
- network = aggregate_address.setdefault("network", None)
- if not network:
- logger.error("Router %s: 'network' not present in "
- "input_dict for BGP", router)
- else:
- cmd = "aggregate-address {}".format(network)
+ local_as = bgp_data.setdefault("local_as", "")
+ cmd = "router bgp {}".format(local_as)
+ vrf_id = bgp_data.setdefault("vrf", None)
+ if vrf_id:
+ cmd = "{} vrf {}".format(cmd, vrf_id)
+
+ config_data.append(cmd)
+
+ router_id = bgp_data.setdefault("router_id", None)
+ del_router_id = bgp_data.setdefault("del_router_id", False)
+ if del_router_id:
+ config_data.append("no bgp router-id")
+ if router_id:
+ config_data.append("bgp router-id {}".format(
+ router_id))
+
+ aggregate_address = bgp_data.setdefault("aggregate_address",
+ {})
+ if aggregate_address:
+ network = aggregate_address.setdefault("network", None)
+ if not network:
+ logger.error("Router %s: 'network' not present in "
+ "input_dict for BGP", router)
+ else:
+ cmd = "aggregate-address {}".format(network)
- as_set = aggregate_address.setdefault("as_set", False)
- summary = aggregate_address.setdefault("summary", False)
- del_action = aggregate_address.setdefault("delete", False)
- if as_set:
- cmd = "{} {}".format(cmd, "as-set")
- if summary:
- cmd = "{} {}".format(cmd, "summary")
+ as_set = aggregate_address.setdefault("as_set", False)
+ summary = aggregate_address.setdefault("summary", False)
+ del_action = aggregate_address.setdefault("delete", False)
+ if as_set:
+ cmd = "{} {}".format(cmd, "as-set")
+ if summary:
+ cmd = "{} {}".format(cmd, "summary")
- if del_action:
- cmd = "no {}".format(cmd)
+ if del_action:
+ cmd = "no {}".format(cmd)
- config_data.append(cmd)
+ config_data.append(cmd)
- result = create_common_configuration(tgen, router, config_data,
- "bgp", build=build)
- except InvalidCLIError:
- # Traceback
- errormsg = traceback.format_exc()
- logger.error(errormsg)
- return errormsg
-
- logger.debug("Exiting lib API: create_bgp_global()")
- return result
+ return config_data
-def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, build=False):
+def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router,
+ config_data=None):
"""
Helper API to create configuration for address-family unicast
* `build` : Only for initial setup phase this is set as True.
"""
- result = False
logger.debug("Entering lib API: __create_bgp_unicast_neighbor()")
- try:
- config_data = ["router bgp"]
- bgp_data = input_dict[router]["bgp"]["address_family"]
- for addr_type, addr_dict in bgp_data.iteritems():
- if not addr_dict:
- continue
+ add_neigh = True
+ if "router bgp "in config_data:
+ add_neigh = False
+ bgp_data = input_dict[router]["bgp"]["address_family"]
- if not check_address_types(addr_type):
- continue
+ for addr_type, addr_dict in bgp_data.iteritems():
+ if not addr_dict:
+ continue
+ if not check_address_types(addr_type):
+ continue
+
+ addr_data = addr_dict["unicast"]
+ if addr_data:
config_data.append("address-family {} unicast".format(
addr_type
))
- addr_data = addr_dict["unicast"]
- advertise_network = addr_data.setdefault("advertise_networks",
- [])
- for advertise_network_dict in advertise_network:
- network = advertise_network_dict["network"]
- if type(network) is not list:
- network = [network]
-
- if "no_of_network" in advertise_network_dict:
- no_of_network = advertise_network_dict["no_of_network"]
- else:
- no_of_network = 1
-
- del_action = advertise_network_dict.setdefault("delete",
- False)
+ advertise_network = addr_data.setdefault("advertise_networks",
+ [])
+ for advertise_network_dict in advertise_network:
+ network = advertise_network_dict["network"]
+ if type(network) is not list:
+ network = [network]
+
+ if "no_of_network" in advertise_network_dict:
+ no_of_network = advertise_network_dict["no_of_network"]
+ else:
+ no_of_network = 1
- # Generating IPs for verification
- prefix = str(
- ipaddr.IPNetwork(unicode(network[0])).prefixlen)
- network_list = generate_ips(network, no_of_network)
- for ip in network_list:
- ip = str(ipaddr.IPNetwork(unicode(ip)).network)
+ del_action = advertise_network_dict.setdefault("delete",
+ False)
- cmd = "network {}/{}\n".format(ip, prefix)
- if del_action:
- cmd = "no {}".format(cmd)
+ # Generating IPs for verification
+ prefix = str(
+ ipaddr.IPNetwork(unicode(network[0])).prefixlen)
+ network_list = generate_ips(network, no_of_network)
+ for ip in network_list:
+ ip = str(ipaddr.IPNetwork(unicode(ip)).network)
- config_data.append(cmd)
+ cmd = "network {}/{}".format(ip, prefix)
+ if del_action:
+ cmd = "no {}".format(cmd)
- max_paths = addr_data.setdefault("maximum_paths", {})
- if max_paths:
- ibgp = max_paths.setdefault("ibgp", None)
- ebgp = max_paths.setdefault("ebgp", None)
- if ibgp:
- config_data.append("maximum-paths ibgp {}".format(
- ibgp
- ))
- if ebgp:
- config_data.append("maximum-paths {}".format(
- ebgp
- ))
-
- aggregate_address = addr_data.setdefault("aggregate_address",
- {})
- if aggregate_address:
- ip = aggregate_address("network", None)
- attribute = aggregate_address("attribute", None)
- if ip:
- cmd = "aggregate-address {}".format(ip)
- if attribute:
- cmd = "{} {}".format(cmd, attribute)
+ config_data.append(cmd)
- config_data.append(cmd)
+ max_paths = addr_data.setdefault("maximum_paths", {})
+ if max_paths:
+ ibgp = max_paths.setdefault("ibgp", None)
+ ebgp = max_paths.setdefault("ebgp", None)
+ if ibgp:
+ config_data.append("maximum-paths ibgp {}".format(
+ ibgp
+ ))
+ if ebgp:
+ config_data.append("maximum-paths {}".format(
+ ebgp
+ ))
+
+ aggregate_address = addr_data.setdefault("aggregate_address",
+ {})
+ if aggregate_address:
+ ip = aggregate_address("network", None)
+ attribute = aggregate_address("attribute", None)
+ if ip:
+ cmd = "aggregate-address {}".format(ip)
+ if attribute:
+ cmd = "{} {}".format(cmd, attribute)
- redistribute_data = addr_data.setdefault("redistribute", {})
- if redistribute_data:
- for redistribute in redistribute_data:
- if "redist_type" not in redistribute:
- logger.error("Router %s: 'redist_type' not present in "
- "input_dict", router)
- else:
- cmd = "redistribute {}".format(
- redistribute["redist_type"])
- redist_attr = redistribute.setdefault("attribute",
- None)
- if redist_attr:
- cmd = "{} {}".format(cmd, redist_attr)
- del_action = redistribute.setdefault("delete", False)
- if del_action:
- cmd = "no {}".format(cmd)
- config_data.append(cmd)
+ config_data.append(cmd)
- if "neighbor" in addr_data:
- neigh_data = __create_bgp_neighbor(topo, input_dict,
- router, addr_type)
- config_data.extend(neigh_data)
+ redistribute_data = addr_data.setdefault("redistribute", {})
+ if redistribute_data:
+ for redistribute in redistribute_data:
+ if "redist_type" not in redistribute:
+ logger.error("Router %s: 'redist_type' not present in "
+ "input_dict", router)
+ else:
+ cmd = "redistribute {}".format(
+ redistribute["redist_type"])
+ redist_attr = redistribute.setdefault("attribute",
+ None)
+ if redist_attr:
+ cmd = "{} {}".format(cmd, redist_attr)
+ del_action = redistribute.setdefault("delete", False)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
- for addr_type, addr_dict in bgp_data.iteritems():
- if not addr_dict or not check_address_types(addr_type):
- continue
+ if "neighbor" in addr_data:
+ neigh_data = __create_bgp_neighbor(topo, input_dict,
+ router, addr_type, add_neigh)
+ config_data.extend(neigh_data)
- addr_data = addr_dict["unicast"]
- if "neighbor" in addr_data:
- neigh_addr_data = __create_bgp_unicast_address_family(
- topo, input_dict, router, addr_type)
+ for addr_type, addr_dict in bgp_data.iteritems():
+ if not addr_dict or not check_address_types(addr_type):
+ continue
- config_data.extend(neigh_addr_data)
+ addr_data = addr_dict["unicast"]
+ if "neighbor" in addr_data:
+ neigh_addr_data = __create_bgp_unicast_address_family(
+ topo, input_dict, router, addr_type, add_neigh)
- result = create_common_configuration(tgen, router, config_data,
- None, build=build)
+ config_data.extend(neigh_addr_data)
- except InvalidCLIError:
- # Traceback
- errormsg = traceback.format_exc()
- logger.error(errormsg)
- return errormsg
logger.debug("Exiting lib API: __create_bgp_unicast_neighbor()")
- return result
+ return config_data
-def __create_bgp_neighbor(topo, input_dict, router, addr_type):
+def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True):
"""
Helper API to create neighbor specific configuration
neigh_cxt = "neighbor {}".format(ip_addr)
- config_data.append("{} remote-as {}".format(neigh_cxt, remote_as))
+ if add_neigh:
+ config_data.append("{} remote-as {}".format(neigh_cxt, remote_as))
if addr_type == "ipv6":
config_data.append("address-family ipv6 unicast")
config_data.append("{} activate".format(neigh_cxt))
return config_data
-def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type):
+def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type,
+ add_neigh=True):
"""
API prints bgp global config to bgp_json file.
#############################################
# Verification APIs
#############################################
+@retry(attempts=3, wait=2, return_is_str=True)
def verify_router_id(tgen, topo, input_dict):
"""
Running command "show ip bgp json" for DUT and reading router-id
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_router_id()")
+ logger.debug("Entering lib API: verify_router_id()")
for router in input_dict.keys():
if router not in tgen.routers():
continue
"del_router_id", False)
logger.info("Checking router %s router-id", router)
- show_bgp_json = rnode.vtysh_cmd("show ip bgp json",
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json",
isjson=True)
- router_id_out = show_bgp_json["routerId"]
+ router_id_out = show_bgp_json["ipv4Unicast"]["routerId"]
router_id_out = ipaddr.IPv4Address(unicode(router_id_out))
# Once router-id is deleted, highest interface ip should become
router_id_out)
return errormsg
- logger.info("Exiting lib API: verify_router_id()")
+ logger.debug("Exiting lib API: verify_router_id()")
return True
+@retry(attempts=20, wait=2, return_is_str=True)
def verify_bgp_convergence(tgen, topo):
"""
API will verify if BGP is converged with in the given time frame.
Running "show bgp summary json" command and verify bgp neighbor
state is established,
-
Parameters
----------
* `tgen`: topogen object
* `topo`: input json file data
* `addr_type`: ip_type, ipv4/ipv6
-
Usage
-----
# To veriry is BGP is converged for all the routers used in
topology
results = verify_bgp_convergence(tgen, topo, "ipv4")
-
Returns
-------
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_bgp_confergence()")
+ logger.debug("Entering lib API: verify_bgp_convergence()")
for router, rnode in tgen.routers().iteritems():
- logger.info("Verifying BGP Convergence on router %s:", router)
-
- for retry in range(1, 11):
- show_bgp_json = rnode.vtysh_cmd("show bgp summary json",
- isjson=True)
- # Verifying output dictionary show_bgp_json is empty or not
- if not bool(show_bgp_json):
- errormsg = "BGP is not running"
- return errormsg
+ logger.info("Verifying BGP Convergence on router %s", router)
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json",
+ isjson=True)
+ # Verifying output dictionary show_bgp_json is empty or not
+ if not bool(show_bgp_json):
+ errormsg = "BGP is not running"
+ return errormsg
- # To find neighbor ip type
+ # To find neighbor ip type
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+ for addr_type in bgp_addr_type.keys():
+ if not check_address_types(addr_type):
+ continue
total_peer = 0
- bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
- for addr_type in bgp_addr_type.keys():
- if not check_address_types(addr_type):
- continue
-
- bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
-
- for bgp_neighbor in bgp_neighbors:
- total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
-
- for addr_type in bgp_addr_type.keys():
- bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
-
- no_of_peer = 0
- for bgp_neighbor, peer_data in bgp_neighbors.iteritems():
- for dest_link in peer_data["dest_link"].keys():
- data = topo["routers"][bgp_neighbor]["links"]
- if dest_link in data:
- neighbor_ip = \
- data[dest_link][addr_type].split("/")[0]
- if addr_type == "ipv4":
- ipv4_data = show_bgp_json["ipv4Unicast"][
- "peers"]
- nh_state = ipv4_data[neighbor_ip]["state"]
- else:
- ipv6_data = show_bgp_json["ipv6Unicast"][
- "peers"]
- nh_state = ipv6_data[neighbor_ip]["state"]
-
- if nh_state == "Established":
- no_of_peer += 1
- if no_of_peer == total_peer:
- logger.info("BGP is Converged for router %s", router)
- break
- else:
- logger.warning("BGP is not yet Converged for router %s",
- router)
- sleeptime = 2 * retry
- if sleeptime <= BGP_CONVERGENCE_TIMEOUT:
- # Waiting for BGP to converge
- logger.info("Waiting for %s sec for BGP to converge on"
- " router %s...", sleeptime, router)
- sleep(sleeptime)
- else:
- show_bgp_summary = rnode.vtysh_cmd("show bgp summary")
- errormsg = "TIMEOUT!! BGP is not converged in {} " \
- "seconds for router {} \n {}".format(
- BGP_CONVERGENCE_TIMEOUT, router,
- show_bgp_summary)
- return errormsg
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor in bgp_neighbors:
+ total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
+
+ for addr_type in bgp_addr_type.keys():
+ if not check_address_types(addr_type):
+ continue
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ no_of_peer = 0
+ for bgp_neighbor, peer_data in bgp_neighbors.iteritems():
+ for dest_link in peer_data["dest_link"].keys():
+ data = topo["routers"][bgp_neighbor]["links"]
+ if dest_link in data:
+ neighbor_ip = \
+ data[dest_link][addr_type].split("/")[0]
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json["ipv4Unicast"][
+ "peers"]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+ else:
+ ipv6_data = show_bgp_json["ipv6Unicast"][
+ "peers"]
+ nh_state = ipv6_data[neighbor_ip]["state"]
+
+ if nh_state == "Established":
+ no_of_peer += 1
+ if no_of_peer == total_peer:
+ logger.info("BGP is Converged for router %s", router)
+ else:
+ errormsg = "BGP is not converged for router {}".format(
+ router)
+ return errormsg
- logger.info("Exiting API: verify_bgp_confergence()")
+ logger.debug("Exiting API: verify_bgp_convergence()")
return True
errormsg(str) or True
"""
- logger.info("Entering lib API: modify_as_number()")
+ logger.debug("Entering lib API: modify_as_number()")
try:
new_topo = deepcopy(topo["routers"])
logger.error(errormsg)
return errormsg
- logger.info("Exiting lib API: modify_as_number()")
+ logger.debug("Exiting lib API: modify_as_number()")
return True
+@retry(attempts=3, wait=2, return_is_str=True)
def verify_as_numbers(tgen, topo, input_dict):
"""
This API is to verify AS numbers for given DUT by running
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_as_numbers()")
+ logger.debug("Entering lib API: verify_as_numbers()")
for router in input_dict.keys():
if router not in tgen.routers():
continue
logger.info("Verifying AS numbers for dut %s:", router)
- show_ip_bgp_neighbor_json = rnode.vtysh_cmd(
+ show_ip_bgp_neighbor_json = run_frr_cmd(rnode,
"show ip bgp neighbor json", isjson=True)
local_as = input_dict[router]["bgp"]["local_as"]
bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
"neighbor %s, found expected: %s",
router, bgp_neighbor, remote_as)
- logger.info("Exiting lib API: verify_AS_numbers()")
+ logger.debug("Exiting lib API: verify_AS_numbers()")
return True
errormsg(str) or True
"""
- logger.info("Entering lib API: clear_bgp_and_verify()")
+ logger.debug("Entering lib API: clear_bgp_and_verify()")
if router not in tgen.routers():
return False
peer_uptime_before_clear_bgp = {}
# Verifying BGP convergence before bgp clear command
for retry in range(1, 11):
- sleeptime = 2 * retry
- if sleeptime <= BGP_CONVERGENCE_TIMEOUT:
- # Waiting for BGP to converge
- logger.info("Waiting for %s sec for BGP to converge on router"
- " %s...", sleeptime, router)
- sleep(sleeptime)
- else:
- errormsg = "TIMEOUT!! BGP is not converged in {} seconds for" \
- " router {}".format(BGP_CONVERGENCE_TIMEOUT, router)
- return errormsg
+ sleeptime = 3
+ # Waiting for BGP to converge
+ logger.info("Waiting for %s sec for BGP to converge on router"
+ " %s...", sleeptime, router)
+ sleep(sleeptime)
- show_bgp_json = rnode.vtysh_cmd("show bgp summary json",
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json",
isjson=True)
- logger.info(show_bgp_json)
# Verifying output dictionary show_bgp_json is empty or not
if not bool(show_bgp_json):
errormsg = "BGP is not running"
" clear", router)
break
else:
- logger.warning("BGP is not yet Converged for router %s "
- "before bgp clear", router)
+ logger.info("BGP is not yet Converged for router %s "
+ "before bgp clear", router)
+ else:
+ errormsg = "TIMEOUT!! BGP is not converged in 30 seconds for" \
+ " router {}".format(router)
+ return errormsg
logger.info(peer_uptime_before_clear_bgp)
# Clearing BGP
logger.info("Clearing BGP neighborship for router %s..", router)
for addr_type in bgp_addr_type.keys():
if addr_type == "ipv4":
- rnode.vtysh_cmd("clear ip bgp *")
+ run_frr_cmd(rnode, "clear ip bgp *")
elif addr_type == "ipv6":
- rnode.vtysh_cmd("clear bgp ipv6 *")
+ run_frr_cmd(rnode, "clear bgp ipv6 *")
peer_uptime_after_clear_bgp = {}
# Verifying BGP convergence after bgp clear command
- for retry in range(1, 11):
- sleeptime = 2 * retry
- if sleeptime <= BGP_CONVERGENCE_TIMEOUT:
- # Waiting for BGP to converge
- logger.info("Waiting for %s sec for BGP to converge on router"
- " %s...", sleeptime, router)
- sleep(sleeptime)
- else:
- errormsg = "TIMEOUT!! BGP is not converged in {} seconds for" \
- " router {}".format(BGP_CONVERGENCE_TIMEOUT, router)
- return errormsg
+ for retry in range(11):
+ sleeptime = 3
+ # Waiting for BGP to converge
+ logger.info("Waiting for %s sec for BGP to converge on router"
+ " %s...", sleeptime, router)
+ sleep(sleeptime)
+
- show_bgp_json = rnode.vtysh_cmd("show bgp summary json",
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json",
isjson=True)
# Verifying output dictionary show_bgp_json is empty or not
if not bool(show_bgp_json):
router)
break
else:
- logger.warning("BGP is not yet Converged for router %s after"
- " bgp clear", router)
-
+ logger.info("BGP is not yet Converged for router %s after"
+ " bgp clear", router)
+ else:
+ errormsg = "TIMEOUT!! BGP is not converged in 30 seconds for" \
+ " router {}".format(router)
+ return errormsg
logger.info(peer_uptime_after_clear_bgp)
# Comparing peerUptimeEstablishedEpoch dictionaries
if peer_uptime_before_clear_bgp != peer_uptime_after_clear_bgp:
" {}".format(router)
return errormsg
- logger.info("Exiting lib API: clear_bgp_and_verify()")
+ logger.debug("Exiting lib API: clear_bgp_and_verify()")
return True
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_bgp_timers_and_functionality()")
+ logger.debug("Entering lib API: verify_bgp_timers_and_functionality()")
sleep(5)
router_list = tgen.routers()
for router in input_dict.keys():
router)
show_ip_bgp_neighbor_json = \
- rnode.vtysh_cmd("show ip bgp neighbor json", isjson=True)
+ run_frr_cmd(rnode, "show ip bgp neighbor json", isjson=True)
bgp_addr_type = input_dict[router]["bgp"]["address_family"]
sleep(keepalivetimer)
sleep(2)
show_bgp_json = \
- rnode.vtysh_cmd("show bgp summary json",
+ run_frr_cmd(rnode, "show bgp summary json",
isjson=True)
if addr_type == "ipv4":
(holddowntimer - keepalivetimer):
if nh_state != "Established":
errormsg = "BGP neighborship has not gone " \
- "down in {} sec for neighbor {}\n" \
- "show_bgp_json: \n {} ".format(
- timer, bgp_neighbor,
- show_bgp_json)
+ "down in {} sec for neighbor {}" \
+ .format(timer, bgp_neighbor)
return errormsg
else:
logger.info("BGP neighborship is intact in %s"
- " sec for neighbor %s \n "
- "show_bgp_json : \n %s",
- timer, bgp_neighbor,
- show_bgp_json)
+ " sec for neighbor %s",
+ timer, bgp_neighbor)
####################
# Shutting down peer interface and verifying that BGP
sleep(keepalivetimer)
sleep(2)
show_bgp_json = \
- rnode.vtysh_cmd("show bgp summary json",
+ run_frr_cmd(rnode, "show bgp summary json",
isjson=True)
if addr_type == "ipv4":
if timer == holddowntimer:
if nh_state == "Established":
errormsg = "BGP neighborship has not gone " \
- "down in {} sec for neighbor {}\n" \
- "show_bgp_json: \n {} ".format(
- timer, bgp_neighbor,
- show_bgp_json)
+ "down in {} sec for neighbor {}" \
+ .format(timer, bgp_neighbor)
return errormsg
else:
logger.info("BGP neighborship has gone down in"
- " %s sec for neighbor %s \n"
- "show_bgp_json : \n %s",
- timer, bgp_neighbor,
- show_bgp_json)
+ " %s sec for neighbor %s",
+ timer, bgp_neighbor)
- logger.info("Exiting lib API: verify_bgp_timers_and_functionality()")
+ logger.debug("Exiting lib API: verify_bgp_timers_and_functionality()")
return True
+@retry(attempts=3, wait=2, return_is_str=True)
def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict,
attribute):
"""
sleep(2)
logger.info("Verifying router %s RIB for best path:", router)
- sh_ip_bgp_json = rnode.vtysh_cmd(command, isjson=True)
+ sh_ip_bgp_json = run_frr_cmd(rnode, command, isjson=True)
for route_val in input_dict.values():
net_data = route_val["bgp"]["address_family"]["ipv4"]["unicast"]
else:
command = "show ipv6 route json"
- rib_routes_json = rnode.vtysh_cmd(command, isjson=True)
+ rib_routes_json = run_frr_cmd(rnode, command, isjson=True)
# Verifying output dictionary rib_routes_json is not empty
if not bool(rib_routes_json):
return True
+@retry(attempts=3, wait=2, return_is_str=True)
def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict,
attribute):
"""
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_best_path_as_per_admin_distance()")
+ logger.debug("Entering lib API: verify_best_path_as_per_admin_distance()")
router_list = tgen.routers()
if router not in router_list:
return False
compare = "LOWEST"
# Show ip route
- rib_routes_json = rnode.vtysh_cmd(command, isjson=True)
+ rib_routes_json = run_frr_cmd(rnode, command, isjson=True)
# Verifying output dictionary rib_routes_json is not empty
if not bool(rib_routes_json):
from time import sleep
from subprocess import call
from subprocess import STDOUT as SUB_STDOUT
+from subprocess import PIPE as SUB_PIPE
+from subprocess import Popen
+from functools import wraps
+from re import search as re_search
+
import StringIO
import os
import ConfigParser
import traceback
import socket
import ipaddr
+import re
from lib import topotest
from functools import partial
from lib.topolog import logger, logger_config
from lib.topogen import TopoRouter
+from lib.topotest import interface_set_status
FRRCFG_FILE = "frr_json.conf"
FRRCFG_BKUP_FILE = "frr_json_initial.conf"
ERROR_LIST = ["Malformed", "Failure", "Unknown"]
+ROUTER_LIST = []
####
CD = os.path.dirname(os.path.realpath(__file__))
pass
+def run_frr_cmd(rnode, cmd, isjson=False):
+ """
+ Execute frr show commands in priviledged mode
+
+ * `rnode`: router node on which commands needs to executed
+ * `cmd`: Command to be executed on frr
+ * `isjson`: If command is to get json data or not
+
+ :return str:
+ """
+
+ if cmd:
+ ret_data = rnode.vtysh_cmd(cmd, isjson=isjson)
+
+ if True:
+ if isjson:
+ logger.debug(ret_data)
+ print_data = rnode.vtysh_cmd(cmd.rstrip("json"), isjson=False)
+ else:
+ print_data = ret_data
+
+ logger.info('Output for command [ %s] on router %s:\n%s',
+ cmd.rstrip("json"), rnode.name, print_data)
+ return ret_data
+
+ else:
+ raise InvalidCLIError('No actual cmd passed')
+
+
def create_common_configuration(tgen, router, data, config_type=None,
build=False):
"""
frr_cfg_fd.write(config_map[config_type])
for line in data:
frr_cfg_fd.write("{} \n".format(str(line)))
+ frr_cfg_fd.write("\n")
except IOError as err:
logger.error("Unable to open FRR Config File. error(%s): %s" %
logger.debug("Entering API: reset_config_on_routers")
router_list = tgen.routers()
- for rname, router in router_list.iteritems():
+ for rname in ROUTER_LIST:
if routerName and routerName != rname:
continue
+ router = router_list[rname]
+ logger.info("Configuring router %s to initial test configuration",
+ rname)
cfg = router.run("vtysh -c 'show running'")
fname = "{}/{}/frr.sav".format(TMPDIR, rname)
dname = "{}/{}/delta.conf".format(TMPDIR, rname)
f.close()
- command = "/usr/lib/frr/frr-reload.py --input {}/{}/frr.sav" \
- " --test {}/{}/frr_json_initial.conf > {}". \
- format(TMPDIR, rname, TMPDIR, rname, dname)
- result = call(command, shell=True, stderr=SUB_STDOUT)
+ run_cfg_file = "{}/{}/frr.sav".format(TMPDIR, rname)
+ init_cfg_file = "{}/{}/frr_json_initial.conf".format(TMPDIR, rname)
+ command = "/usr/lib/frr/frr-reload.py --input {} --test {} > {}". \
+ format(run_cfg_file, init_cfg_file, dname)
+ result = call(command, shell=True, stderr=SUB_STDOUT,
+ stdout=SUB_PIPE)
# Assert if command fail
if result > 0:
- errormsg = ("Command:{} is failed due to non-zero exit"
- " code".format(command))
- return errormsg
+ logger.error("Delta file creation failed. Command executed %s",
+ command)
+ with open(run_cfg_file, 'r') as fd:
+ logger.info('Running configuration saved in %s is:\n%s',
+ run_cfg_file, fd.read())
+ with open(init_cfg_file, 'r') as fd:
+ logger.info('Test configuration saved in %s is:\n%s',
+ init_cfg_file, fd.read())
+
+ err_cmd = ['/usr/bin/vtysh', '-m', '-f', run_cfg_file]
+ result = Popen(err_cmd, stdout=SUB_PIPE, stderr=SUB_PIPE)
+ output = result.communicate()
+ for out_data in output:
+ temp_data = out_data.decode('utf-8').lower()
+ for out_err in ERROR_LIST:
+ if out_err.lower() in temp_data:
+ logger.error("Found errors while validating data in"
+ " %s", run_cfg_file)
+ raise InvalidCLIError(out_data)
+ raise InvalidCLIError("Unknown error in %s", output)
f = open(dname, "r")
delta = StringIO.StringIO()
delta.write("end\n")
output = router.vtysh_multicmd(delta.getvalue(),
pretty_output=False)
- logger.info("New configuration for router {}:".format(rname))
+
delta.close()
delta = StringIO.StringIO()
cfg = router.run("vtysh -c 'show running'")
# Router current configuration to log file or console if
# "show_router_config" is defined in "pytest.ini"
if show_router_config:
+ logger.info("Configuration on router {} after config reset:".
+ format(rname))
logger.info(delta.getvalue())
delta.close()
logger.debug("Entering API: load_config_to_router")
router_list = tgen.routers()
- for rname, router in router_list.iteritems():
- if rname == routerName:
- try:
- frr_cfg_file = "{}/{}/{}".format(TMPDIR, rname, FRRCFG_FILE)
- frr_cfg_bkup = "{}/{}/{}".format(TMPDIR, rname,
- FRRCFG_BKUP_FILE)
- with open(frr_cfg_file, "r") as cfg:
- data = cfg.read()
- if save_bkup:
- with open(frr_cfg_bkup, "w") as bkup:
- bkup.write(data)
-
- output = router.vtysh_multicmd(data, pretty_output=False)
- for out_err in ERROR_LIST:
- if out_err.lower() in output.lower():
- raise InvalidCLIError("%s" % output)
- except IOError as err:
- errormsg = ("Unable to open config File. error(%s):"
- " %s", (err.errno, err.strerror))
- return errormsg
+ for rname in ROUTER_LIST:
+ if routerName and routerName != rname:
+ continue
- logger.info("New configuration for router {}:".format(rname))
- new_config = router.run("vtysh -c 'show running'")
+ router = router_list[rname]
+ try:
+ frr_cfg_file = "{}/{}/{}".format(TMPDIR, rname, FRRCFG_FILE)
+ frr_cfg_bkup = "{}/{}/{}".format(TMPDIR, rname,
+ FRRCFG_BKUP_FILE)
+ with open(frr_cfg_file, "r+") as cfg:
+ data = cfg.read()
+ logger.info("Applying following configuration on router"
+ " {}:\n{}".format(rname, data))
+ if save_bkup:
+ with open(frr_cfg_bkup, "w") as bkup:
+ bkup.write(data)
+
+ output = router.vtysh_multicmd(data, pretty_output=False)
+ for out_err in ERROR_LIST:
+ if out_err.lower() in output.lower():
+ raise InvalidCLIError("%s" % output)
+
+ cfg.truncate(0)
+ except IOError as err:
+ errormsg = ("Unable to open config File. error(%s):"
+ " %s", (err.errno, err.strerror))
+ return errormsg
- # Router current configuration to log file or console if
- # "show_router_config" is defined in "pytest.ini"
- if show_router_config:
- logger.info(new_config)
+ # Router current configuration to log file or console if
+ # "show_router_config" is defined in "pytest.ini"
+ if show_router_config:
+ new_config = router.run("vtysh -c 'show running'")
+ logger.info(new_config)
logger.debug("Exting API: load_config_to_router")
return True
* `tgen` : topogen object
"""
- global TMPDIR
+ global TMPDIR, ROUTER_LIST
# Starting topology
tgen.start_topology()
# Starting deamons
+
router_list = tgen.routers()
+ ROUTER_LIST = sorted(router_list.keys(),
+ key=lambda x: int(re_search('\d+', x).group(0)))
TMPDIR = os.path.join(LOGDIR, tgen.modname)
- for rname, router in router_list.iteritems():
+ router_list = tgen.routers()
+ for rname in ROUTER_LIST:
+ router = router_list[rname]
try:
os.chdir(TMPDIR)
- # Creating rouer named dir and empty zebra.conf bgpd.conf files
+ # Creating router named dir and empty zebra.conf bgpd.conf files
# inside the current directory
-
if os.path.isdir('{}'.format(rname)):
os.system("rm -rf {}".format(rname))
os.mkdir('{}'.format(rname))
router.load_config(
TopoRouter.RD_ZEBRA,
'{}/{}/zebra.conf'.format(TMPDIR, rname)
- # os.path.join(tmpdir, '{}/zebra.conf'.format(rname))
)
# Loading empty bgpd.conf file to router, to start the bgp deamon
router.load_config(
TopoRouter.RD_BGP,
'{}/{}/bgpd.conf'.format(TMPDIR, rname)
- # os.path.join(tmpdir, '{}/bgpd.conf'.format(rname))
)
# Starting routers
" address" % ip_address)
-def check_address_types(addr_type):
+def check_address_types(addr_type=None):
"""
Checks environment variable set and compares with the current address type
"""
- global ADDRESS_TYPES
- if ADDRESS_TYPES is None:
- ADDRESS_TYPES = "dual"
-
- if ADDRESS_TYPES == "dual":
- ADDRESS_TYPES = ["ipv4", "ipv6"]
- elif ADDRESS_TYPES == "ipv4":
- ADDRESS_TYPES = ["ipv4"]
- elif ADDRESS_TYPES == "ipv6":
- ADDRESS_TYPES = ["ipv6"]
-
- if addr_type not in ADDRESS_TYPES:
+
+ addr_types_env = os.environ.get("ADDRESS_TYPES")
+ if not addr_types_env:
+ addr_types_env = "dual"
+
+ if addr_types_env == "dual":
+ addr_types = ["ipv4", "ipv6"]
+ elif addr_types_env == "ipv4":
+ addr_types = ["ipv4"]
+ elif addr_types_env == "ipv6":
+ addr_types = ["ipv6"]
+
+ if addr_type is None:
+ return addr_types
+
+ if addr_type not in addr_types:
logger.error("{} not in supported/configured address types {}".
- format(addr_type, ADDRESS_TYPES))
+ format(addr_type, addr_types))
return False
- return ADDRESS_TYPES
+ return True
def generate_ips(network, no_of_ips):
""" Display message at beginning of test case"""
count = 20
logger.info("*"*(len(tc_name)+count))
- logger.info("START -> Testcase : %s", tc_name)
+ logger.info("START -> Testcase : %s" % tc_name)
logger.info("*"*(len(tc_name)+count))
""" Display message at end of test case"""
count = 21
logger.info("="*(len(tc_name)+count))
- logger.info("PASSED -> Testcase : %s", tc_name)
+ logger.info("Testcase : %s -> PASSED", tc_name)
logger.info("="*(len(tc_name)+count))
+def interface_status(tgen, topo, input_dict):
+ """
+ Delete ip route maps from device
+
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : for which router, route map has to be deleted
+
+ Usage
+ -----
+ input_dict = {
+ "r3": {
+ "interface_list": ['eth1-r1-r2', 'eth2-r1-r3'],
+ "status": "down"
+ }
+ }
+ Returns
+ -------
+ errormsg(str) or True
+ """
+ logger.debug("Entering lib API: interface_status()")
+
+ try:
+ global frr_cfg
+ for router in input_dict.keys():
+
+ interface_list = input_dict[router]['interface_list']
+ status = input_dict[router].setdefault('status', 'up')
+ for intf in interface_list:
+ rnode = tgen.routers()[router]
+ interface_set_status(rnode, intf, status)
+
+ # Load config to router
+ load_config_to_router(tgen, router)
+
+ except Exception as e:
+ # handle any exception
+ logger.error("Error %s occured. Arguments %s.", e.message, e.args)
+
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: interface_status()")
+ return True
+
+
+def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0):
+ """
+ Retries function execution, if return is an errormsg or exception
+
+ * `attempts`: Number of attempts to make
+ * `wait`: Number of seconds to wait between each attempt
+ * `return_is_str`: Return val is an errormsg in case of failure
+ * `initial_wait`: Sleeps for this much seconds before executing function
+
+ """
+
+ def _retry(func):
+
+ @wraps(func)
+ def func_retry(*args, **kwargs):
+ _wait = kwargs.pop('wait', wait)
+ _attempts = kwargs.pop('attempts', attempts)
+ _attempts = int(_attempts)
+ if _attempts < 0:
+ raise ValueError("attempts must be 0 or greater")
+
+ if initial_wait > 0:
+ logger.info("Waiting for [%s]s as initial delay", initial_wait)
+ sleep(initial_wait)
+
+ _return_is_str = kwargs.pop('return_is_str', return_is_str)
+ for i in range(1, _attempts + 1):
+ try:
+ _expected = kwargs.setdefault('expected', True)
+ kwargs.pop('expected')
+ ret = func(*args, **kwargs)
+ logger.debug("Function returned %s" % ret)
+ if return_is_str and isinstance(ret, bool):
+ return ret
+ elif return_is_str and _expected is False:
+ return ret
+
+ if _attempts == i:
+ return ret
+ except Exception as err:
+ if _attempts == i:
+ logger.info("Max number of attempts (%r) reached",
+ _attempts)
+ raise
+ else:
+ logger.info("Function returned %s", err)
+ if i < _attempts:
+ logger.info("Retry [#%r] after sleeping for %ss"
+ % (i, _wait))
+ sleep(_wait)
+ func_retry._original = func
+ return func_retry
+ return _retry
+
+
+def disable_v6_link_local(tgen, router, intf_name=None):
+ """
+ Disables ipv6 link local addresses for a particular interface or
+ all interfaces
+
+ * `tgen`: tgen onject
+ * `router` : router for which hightest interface should be
+ calculated
+ * `intf_name` : Interface name for which v6 link local needs to
+ be disabled
+ """
+
+ router_list = tgen.routers()
+ for rname, rnode in router_list.iteritems():
+ if rname != router:
+ continue
+
+ linklocal = []
+
+ ifaces = router_list[router].run('ip -6 address')
+
+ # Fix newlines (make them all the same)
+ ifaces = ('\n'.join(ifaces.splitlines()) + '\n').splitlines()
+
+ interface = None
+ ll_per_if_count = 0
+ for line in ifaces:
+ # Interface name
+ m = re.search('[0-9]+: ([^:]+)[@if0-9:]+ <', line)
+ if m:
+ interface = m.group(1).split("@")[0]
+ ll_per_if_count = 0
+
+ # Interface ip
+ m = re.search('inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+'
+ ':[0-9a-f]+[/0-9]*) scope link', line)
+ if m:
+ local = m.group(1)
+ ll_per_if_count += 1
+ if ll_per_if_count > 1:
+ linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
+ else:
+ linklocal += [[interface, local]]
+
+ if len(linklocal[0]) > 1:
+ link_local_dict = {item[0]: item[1] for item in linklocal}
+
+ for lname, laddr in link_local_dict.items():
+
+ if intf_name is not None and lname != intf_name:
+ continue
+
+ cmd = "ip addr del {} dev {}".format(laddr, lname)
+ router_list[router].run(cmd)
+
+
#############################################
# These APIs, will used by testcase
#############################################
interface_name = destRouterLink
else:
interface_name = data["interface"]
- interface_data.append("interface {}\n".format(
+ if "ipv6" in data:
+ disable_v6_link_local(tgen, c_router, interface_name)
+ interface_data.append("interface {}".format(
str(interface_name)
))
if "ipv4" in data:
intf_addr = c_data["links"][destRouterLink]["ipv4"]
- interface_data.append("ip address {}\n".format(
+ interface_data.append("ip address {}".format(
intf_addr
))
if "ipv6" in data:
intf_addr = c_data["links"][destRouterLink]["ipv6"]
- interface_data.append("ipv6 address {}\n".format(
+ interface_data.append("ipv6 address {}".format(
intf_addr
))
+
result = create_common_configuration(tgen, c_router,
interface_data,
"interface_config",
for router in input_dict.keys():
if "static_routes" not in input_dict[router]:
errormsg = "static_routes not present in input_dict"
- logger.info(errormsg)
+ logger.debug(errormsg)
continue
static_routes_list = []
for router in input_dict.keys():
if "prefix_lists" not in input_dict[router]:
errormsg = "prefix_lists not present in input_dict"
- logger.info(errormsg)
+ logger.debug(errormsg)
continue
config_data = []
for router in input_dict.keys():
if "route_maps" not in input_dict[router]:
errormsg = "route_maps not present in input_dict"
- logger.info(errormsg)
+ logger.debug(errormsg)
continue
rmap_data = []
for rmap_name, rmap_value in \
# Weight
if weight:
- rmap_data.append("set weight {} \n".format(
+ rmap_data.append("set weight {}".format(
weight))
# Adding MATCH and SET sequence to RMAP if defined
#############################################
# Verification APIs
#############################################
-def _verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
+@retry(attempts=10, return_is_str=True, initial_wait=2)
+def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
"""
Data will be read from input_dict or input JSON file, API will generate
same prefixes, which were redistributed by either create_static_routes() or
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_rib()")
+ logger.debug("Entering lib API: verify_rib()")
router_list = tgen.routers()
for routerInput in input_dict.keys():
else:
command = "show ipv6 route json"
- sleep(10)
logger.info("Checking router %s RIB:", router)
- rib_routes_json = rnode.vtysh_cmd(command, isjson=True)
+ rib_routes_json = run_frr_cmd(rnode, command, isjson=True)
# Verifying output dictionary rib_routes_json is not empty
if bool(rib_routes_json) is False:
if "no_of_ip" in static_route:
no_of_ip = static_route["no_of_ip"]
else:
- no_of_ip = 0
+ no_of_ip = 1
# Generating IPs for verification
ip_list = generate_ips(network, no_of_ip)
found_hops = [rib_r["ip"] for rib_r in
rib_routes_json[st_rt][0][
"nexthops"]]
- for nh in next_hop:
+ for nh in found_hops:
nh_found = False
- if nh and nh in found_hops:
+ if nh and nh in next_hop:
nh_found = True
else:
errormsg = ("Nexthop {} is Missing for {}"
logger.info("Verified routes in router %s RIB, found routes"
" are: %s", dut, found_routes)
- logger.info("Exiting lib API: verify_rib()")
+ logger.debug("Exiting lib API: verify_rib()")
return True
-def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None, expected=True):
- """
- Wrapper function for `_verify_rib` that tries multiple time to get results.
-
- When the expected result is `False` we actually should expect for an string instead.
- """
-
- # Use currying to hide the parameters and create a test function.
- test_func = partial(_verify_rib, tgen, addr_type, dut, input_dict, next_hop, protocol)
-
- # Call the test function and expect it to return True, otherwise try it again.
- if expected is True:
- _, result = topotest.run_and_expect(test_func, True, count=20, wait=6)
- else:
- _, result = topotest.run_and_expect_type(test_func, str, count=20, wait=6)
-
- # Return as normal.
- return result
-
-
def verify_admin_distance_for_static_routes(tgen, input_dict):
"""
API to verify admin distance for static routes as defined in input_dict/
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_admin_distance_for_static_routes()")
+ logger.debug("Entering lib API: verify_admin_distance_for_static_routes()")
for router in input_dict.keys():
if router not in tgen.routers():
command = "show ip route json"
else:
command = "show ipv6 route json"
- show_ip_route_json = rnode.vtysh_cmd(command, isjson=True)
+ show_ip_route_json = run_frr_cmd(rnode, command, isjson=True)
logger.info("Verifying admin distance for static route %s"
" under dut %s:", static_route, router)
format(network, router))
return errormsg
- logger.info("Exiting lib API: verify_admin_distance_for_static_routes()")
+ logger.debug("Exiting lib API: verify_admin_distance_for_static_routes()")
return True
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_prefix_lists()")
+ logger.debug("Entering lib API: verify_prefix_lists()")
for router in input_dict.keys():
if router not in tgen.routers():
rnode = tgen.routers()[router]
# Show ip prefix list
- show_prefix_list = rnode.vtysh_cmd("show ip prefix-list")
+ show_prefix_list = run_frr_cmd(rnode, "show ip prefix-list")
# Verify Prefix list is deleted
prefix_lists_addr = input_dict[router]["prefix_lists"]
for prefix_list in prefix_lists_addr[addr_type].keys():
if prefix_list in show_prefix_list:
- errormsg = ("Prefix list {} is not deleted from router"
+ errormsg = ("Prefix list {} is/are present in the router"
" {}".format(prefix_list, router))
return errormsg
- logger.info("Prefix list %s is/are deleted successfully"
+ logger.info("Prefix list %s is/are not present in the router"
" from router %s", prefix_list, router)
- logger.info("Exiting lib API: verify_prefix_lissts()")
+ logger.debug("Exiting lib API: verify_prefix_lissts()")
return True
from collections import OrderedDict
from json import dumps as json_dumps
+from re import search as re_search
import ipaddr
import pytest
from lib.bgp import create_router_bgp
+ROUTER_LIST = []
+
+
def build_topo_from_json(tgen, topo):
"""
Reads configuration from JSON file. Adds routers, creates interface
* `topo`: json file data
"""
- listRouters = []
- for routerN in sorted(topo['routers'].iteritems()):
- logger.info('Topo: Add router {}'.format(routerN[0]))
- tgen.add_router(routerN[0])
- listRouters.append(routerN[0])
+ ROUTER_LIST = sorted(topo['routers'].keys(),
+ key=lambda x: int(re_search('\d+', x).group(0)))
+
+ listRouters = ROUTER_LIST[:]
+ for routerN in ROUTER_LIST:
+ logger.info('Topo: Add router {}'.format(routerN))
+ tgen.add_router(routerN)
+ listRouters.append(routerN)
- listRouters.sort()
if 'ipv4base' in topo:
ipv4Next = ipaddr.IPv4Address(topo['link_ip_start']['ipv4'])
ipv4Step = 2 ** (32 - topo['link_ip_start']['v4mask'])
elif 'link' in x:
return int(x.split('-link')[1])
else:
- return int(x.split('r')[1])
+ return int(re_search('\d+', x).group(0))
for destRouterLink, data in sorted(topo['routers'][curRouter]['links']. \
iteritems(),
key=lambda x: link_sort(x[0])):
data = topo["routers"]
for func_type in func_dict.keys():
- logger.info('Building configuration for {}'.format(func_type))
+ logger.info('Checking for {} configuration in input data'.format(
+ func_type))
func_dict.get(func_type)(tgen, data, build=True)
for router in sorted(topo['routers'].keys()):
- logger.info('Configuring router {}...'.format(router))
+ logger.debug('Configuring router {}...'.format(router))
result = load_config_to_router(tgen, router, save_bkup)
if not result:
# Skip pytests example directory
[pytest]
-norecursedirs = .git example-test lib docker
+norecursedirs = .git example-test example-topojson-test lib docker
[topogen]
# Default configuration values
# Display router current configuration during test execution,
# by default configuration will not be shown
-show_router_config = True
+# show_router_config = True
# Default daemons binaries path.
#frrdir = /usr/lib/frr
static struct cmd_node bgp_vnc_l2_group_node = {
BGP_VNC_L2_GROUP_NODE, "%s(config-router-vnc-l2-group)# "};
+static struct cmd_node bmp_node = {BMP_NODE, "%s(config-bgp-bmp)# "};
+
static struct cmd_node ospf_node = {OSPF_NODE, "%s(config-router)# "};
static struct cmd_node eigrp_node = {EIGRP_NODE, "%s(config-router)# "};
}
DEFUNSH(VTYSH_BGPD, router_bgp, router_bgp_cmd,
- "router bgp [(1-4294967295) [<view|vrf> WORD]]",
+ "router bgp [(1-4294967295)$instasn [<view|vrf> WORD]]",
ROUTER_STR BGP_STR AS_STR
"BGP view\nBGP VRF\n"
"View/VRF name\n")
return CMD_SUCCESS;
}
+DEFUNSH(VTYSH_BGPD,
+ bmp_targets,
+ bmp_targets_cmd,
+ "bmp targets BMPTARGETS",
+ "BGP Monitoring Protocol\n"
+ "Create BMP target group\n"
+ "Name of the BMP target group\n")
+{
+ vty->node = BMP_NODE;
+ return CMD_SUCCESS;
+}
+
DEFUNSH(VTYSH_BGPD, address_family_evpn, address_family_evpn_cmd,
"address-family <l2vpn evpn>",
"Enter Address Family command mode\n"
return CMD_SUCCESS;
}
-DEFUNSH(VTYSH_EIGRPD, router_eigrp, router_eigrp_cmd, "router eigrp (1-65535)",
+DEFUNSH(VTYSH_EIGRPD, router_eigrp, router_eigrp_cmd, "router eigrp (1-65535) [vrf NAME]",
"Enable a routing process\n"
"Start EIGRP configuration\n"
- "AS number to use\n")
+ "AS number to use\n"
+ VRF_CMD_HELP_STR)
{
vty->node = EIGRP_NODE;
return CMD_SUCCESS;
case BGP_VNC_DEFAULTS_NODE:
case BGP_VNC_NVE_GROUP_NODE:
case BGP_VNC_L2_GROUP_NODE:
+ case BMP_NODE:
vty->node = BGP_NODE;
break;
case BGP_EVPN_VNI_NODE:
return rpki_exit(self, vty, argc, argv);
}
+DEFUNSH(VTYSH_BGPD, bmp_exit, bmp_exit_cmd, "exit",
+ "Exit current mode and down to previous mode\n")
+{
+ vtysh_exit(vty);
+ return CMD_SUCCESS;
+}
+
+DEFUNSH(VTYSH_BGPD, bmp_quit, bmp_quit_cmd, "quit",
+ "Exit current mode and down to previous mode\n")
+{
+ return bmp_exit(self, vty, argc, argv);
+}
+
DEFUNSH(VTYSH_VRF, exit_vrf_config, exit_vrf_config_cmd, "exit-vrf",
"Exit from VRF configuration mode\n")
{
install_node(&openfabric_node, NULL);
install_node(&vty_node, NULL);
install_node(&rpki_node, NULL);
+ install_node(&bmp_node, NULL);
#if HAVE_BFDD > 0
install_node(&bfd_node, NULL);
install_node(&bfd_peer_node, NULL);
install_element(BGP_FLOWSPECV4_NODE, &exit_address_family_cmd);
install_element(BGP_FLOWSPECV6_NODE, &exit_address_family_cmd);
+ install_element(BGP_NODE, &bmp_targets_cmd);
+ install_element(BMP_NODE, &bmp_exit_cmd);
+ install_element(BMP_NODE, &bmp_quit_cmd);
+ install_element(BMP_NODE, &vtysh_end_all_cmd);
+
install_element(CONFIG_NODE, &rpki_cmd);
install_element(RPKI_NODE, &rpki_exit_cmd);
install_element(RPKI_NODE, &rpki_quit_cmd);
label_buf[0] = '\0';
assert(nexthop);
- for (const struct nexthop *nh = nexthop; nh; nh = nh->rparent) {
- char label_buf1[20];
+ char label_buf1[20];
- nh_label = nh->nh_label;
- if (!nh_label || !nh_label->num_labels)
- continue;
+ nh_label = nexthop->nh_label;
- for (int i = 0; i < nh_label->num_labels; i++) {
- if (nh_label->label[i] == MPLS_LABEL_IMPLICIT_NULL)
- continue;
+ for (int i = 0; nh_label && i < nh_label->num_labels; i++) {
+ if (nh_label->label[i] == MPLS_LABEL_IMPLICIT_NULL)
+ continue;
- if (IS_ZEBRA_DEBUG_KERNEL) {
- if (!num_labels)
- sprintf(label_buf, "label %u",
- nh_label->label[i]);
- else {
- sprintf(label_buf1, "/%u",
- nh_label->label[i]);
- strlcat(label_buf, label_buf1,
- sizeof(label_buf));
- }
+ if (IS_ZEBRA_DEBUG_KERNEL) {
+ if (!num_labels)
+ sprintf(label_buf, "label %u",
+ nh_label->label[i]);
+ else {
+ sprintf(label_buf1, "/%u", nh_label->label[i]);
+ strlcat(label_buf, label_buf1,
+ sizeof(label_buf));
}
-
- out_lse[num_labels] =
- mpls_lse_encode(nh_label->label[i], 0, 0, 0);
- num_labels++;
}
+
+ out_lse[num_labels] =
+ mpls_lse_encode(nh_label->label[i], 0, 0, 0);
+ num_labels++;
}
if (num_labels) {
label_buf[0] = '\0';
assert(nexthop);
- for (const struct nexthop *nh = nexthop; nh; nh = nh->rparent) {
- char label_buf1[20];
+ char label_buf1[20];
- nh_label = nh->nh_label;
- if (!nh_label || !nh_label->num_labels)
- continue;
+ nh_label = nexthop->nh_label;
- for (int i = 0; i < nh_label->num_labels; i++) {
- if (nh_label->label[i] == MPLS_LABEL_IMPLICIT_NULL)
- continue;
+ for (int i = 0; nh_label && i < nh_label->num_labels; i++) {
+ if (nh_label->label[i] == MPLS_LABEL_IMPLICIT_NULL)
+ continue;
- if (IS_ZEBRA_DEBUG_KERNEL) {
- if (!num_labels)
- sprintf(label_buf, "label %u",
- nh_label->label[i]);
- else {
- sprintf(label_buf1, "/%u",
- nh_label->label[i]);
- strlcat(label_buf, label_buf1,
- sizeof(label_buf));
- }
+ if (IS_ZEBRA_DEBUG_KERNEL) {
+ if (!num_labels)
+ sprintf(label_buf, "label %u",
+ nh_label->label[i]);
+ else {
+ sprintf(label_buf1, "/%u", nh_label->label[i]);
+ strlcat(label_buf, label_buf1,
+ sizeof(label_buf));
}
-
- out_lse[num_labels] =
- mpls_lse_encode(nh_label->label[i], 0, 0, 0);
- num_labels++;
}
+
+ out_lse[num_labels] =
+ mpls_lse_encode(nh_label->label[i], 0, 0, 0);
+ num_labels++;
}
if (num_labels) {
#include "lib/nexthop.h"
#include "lib/nexthop_group_private.h"
#include "lib/routemap.h"
+#include "lib/mpls.h"
#include "zebra/connected.h"
#include "zebra/debug.h"
struct nexthop *nexthop)
{
struct nexthop *resolved_hop;
+ uint8_t num_labels = 0;
+ mpls_label_t labels[MPLS_MAX_LABELS];
+ enum lsp_types_t label_type = ZEBRA_LSP_NONE;
+ int i = 0;
resolved_hop = nexthop_new();
SET_FLAG(resolved_hop->flags, NEXTHOP_FLAG_ACTIVE);
if (newhop->flags & NEXTHOP_FLAG_ONLINK)
resolved_hop->flags |= NEXTHOP_FLAG_ONLINK;
- /* Copy labels of the resolved route */
- if (newhop->nh_label)
- nexthop_add_labels(resolved_hop, newhop->nh_label_type,
- newhop->nh_label->num_labels,
- &newhop->nh_label->label[0]);
+ /* Copy labels of the resolved route and the parent resolving to it */
+ if (newhop->nh_label) {
+ for (i = 0; i < newhop->nh_label->num_labels; i++)
+ labels[num_labels++] = newhop->nh_label->label[i];
+ label_type = newhop->nh_label_type;
+ }
+
+ if (nexthop->nh_label) {
+ for (i = 0; i < nexthop->nh_label->num_labels; i++)
+ labels[num_labels++] = nexthop->nh_label->label[i];
+
+ /* If the parent has labels, use its type */
+ label_type = nexthop->nh_label_type;
+ }
+
+ if (num_labels)
+ nexthop_add_labels(resolved_hop, label_type, num_labels,
+ labels);
resolved_hop->rparent = nexthop;
_nexthop_add(&nexthop->resolved, resolved_hop);
const char *arg, route_map_event_t type)
{
VTY_DECLVAR_CONTEXT(route_map_index, index);
- int ret;
+ enum rmap_compile_rets ret;
int retval = CMD_SUCCESS;
ret = route_map_add_match(index, command, arg, type);
route_map_upd8_dependency(type, arg, index->map->name);
}
break;
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Nothing to do here
+ */
+ break;
}
return retval;
const char *arg, route_map_event_t type)
{
VTY_DECLVAR_CONTEXT(route_map_index, index);
- int ret;
+ enum rmap_compile_rets ret;
int retval = CMD_SUCCESS;
char *dep_name = NULL;
const char *tmpstr;
if (type != RMAP_EVENT_MATCH_DELETED && dep_name)
route_map_upd8_dependency(type, dep_name, rmap_name);
break;
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Nothing to do here
+ */
+ break;
}
XFREE(MTYPE_ROUTE_MAP_RULE, dep_name);
re->status);
json_object_int_add(json_route, "internalFlags",
re->flags);
+ json_object_int_add(json_route, "internalNextHopNum",
+ re->nexthop_num);
+ json_object_int_add(json_route, "internalNextHopActiveNum",
+ re->nexthop_active_num);
if (uptime < ONE_DAY_SECOND)
sprintf(buf, "%02d:%02d:%02d", tm->tm_hour, tm->tm_min,
tm->tm_sec);