/* Sanity check: don't leak open sockets. */
if (bs->sock != -1) {
if (bglobal.debug_peer_event)
- zlog_debug("session-enable: previous socket open");
+ zlog_debug("%s: previous socket open", __func__);
close(bs->sock);
bs->sock = -1;
}
if (bglobal.debug_peer_event)
- zlog_debug("session-delete: %s", bs_to_string(bs));
+ zlog_debug("%s: %s", __func__, bs_to_string(bs));
control_notify_config(BCM_NOTIFY_CONFIG_DELETE, bs);
memcpy(&ttlval, CMSG_DATA(cm), sizeof(ttlval));
if (ttlval > 255) {
if (bglobal.debug_network)
- zlog_debug("ipv4-recv: invalid TTL: %u",
- ttlval);
+ zlog_debug("%s: invalid TTL: %u",
+ __func__, ttlval);
return -1;
}
*ttl = ttlval;
memcpy(&ttlval, CMSG_DATA(cm), sizeof(ttlval));
if (ttlval > 255) {
if (bglobal.debug_network)
- zlog_debug("ipv6-recv: invalid TTL: %u",
- ttlval);
+ zlog_debug("%s: invalid TTL: %u",
+ __func__, ttlval);
return -1;
}
if (wlen <= 0) {
if (bglobal.debug_network)
- zlog_debug("udp-send: loopback failure: (%d) %s", errno,
- strerror(errno));
+ zlog_debug("%s: loopback failure: (%d) %s", __func__,
+ errno, strerror(errno));
return -1;
} else if (wlen < (ssize_t)datalen) {
if (bglobal.debug_network)
- zlog_debug("udp-send: partial send: %zd expected %zu",
- wlen, datalen);
+ zlog_debug("%s: partial send: %zd expected %zu",
+ __func__, wlen, datalen);
return -1;
}
wlen = sendmsg(sd, &msg, 0);
if (wlen <= 0) {
if (bglobal.debug_network)
- zlog_debug("udp-send: loopback failure: (%d) %s", errno,
- strerror(errno));
+ zlog_debug("%s: loopback failure: (%d) %s", __func__,
+ errno, strerror(errno));
return -1;
} else if (wlen < (ssize_t)datalen) {
if (bglobal.debug_network)
- zlog_debug("udp-send: partial send: %zd expected %zu",
- wlen, datalen);
+ zlog_debug("%s: partial send: %zd expected %zu",
+ __func__, wlen, datalen);
return -1;
}
int ttl = value;
if (setsockopt(sd, IPPROTO_IP, IP_TTL, &ttl, sizeof(ttl)) == -1) {
- zlog_warn("set-ttl: setsockopt(IP_TTL, %d): %s", value,
+ zlog_warn("%s: setsockopt(IP_TTL, %d): %s", __func__, value,
strerror(errno));
return -1;
}
int tos = value;
if (setsockopt(sd, IPPROTO_IP, IP_TOS, &tos, sizeof(tos)) == -1) {
- zlog_warn("set-tos: setsockopt(IP_TOS, %d): %s", value,
+ zlog_warn("%s: setsockopt(IP_TOS, %d): %s", __func__, value,
strerror(errno));
return -1;
}
int one = 1;
if (setsockopt(sd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) == -1) {
- zlog_warn("set-reuse-addr: setsockopt(SO_REUSEADDR, %d): %s",
- one, strerror(errno));
+ zlog_warn("%s: setsockopt(SO_REUSEADDR, %d): %s", __func__, one,
+ strerror(errno));
return false;
}
return true;
int one = 1;
if (setsockopt(sd, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)) == -1) {
- zlog_warn("set-reuse-port: setsockopt(SO_REUSEPORT, %d): %s",
- one, strerror(errno));
+ zlog_warn("%s: setsockopt(SO_REUSEPORT, %d): %s", __func__, one,
+ strerror(errno));
return false;
}
return true;
#define BATTR_RMAP_IPV6_LL_NHOP_CHANGED (1 << 5)
#define BATTR_RMAP_IPV6_PREFER_GLOBAL_CHANGED (1 << 6)
#define BATTR_RMAP_LINK_BW_SET (1 << 7)
+#define BATTR_RMAP_L3VPN_ACCEPT_GRE (1 << 8)
/* Router Reflector related structure. */
struct cluster_list {
pi->type == ZEBRA_ROUTE_BGP
&& pi->sub_type == BGP_ROUTE_STATIC)
break;
- if (!pi) /* unexpected */
+ if (!pi) {
+ bgp_dest_unlock_node(dest);
return 0;
+ }
attr = pi->attr;
global_dest = bgp_global_evpn_node_get(bgp->rib[afi][safi],
peer->afc_recv[afi][safi] = from_peer->afc_recv[afi][safi];
peer->orf_plist[afi][safi] = from_peer->orf_plist[afi][safi];
peer->llgr[afi][safi] = from_peer->llgr[afi][safi];
- if (from_peer->soo[afi][safi]) {
- ecommunity_free(&peer->soo[afi][safi]);
- peer->soo[afi][safi] =
- ecommunity_dup(from_peer->soo[afi][safi]);
- }
}
if (bgp_getsockname(peer) < 0) {
#include "bgpd/bgp_debug.h" // for bgp_debug_neighbor_events, bgp_type_str
#include "bgpd/bgp_errors.h" // for expanded error reference information
#include "bgpd/bgp_fsm.h" // for BGP_EVENT_ADD, bgp_event
-#include "bgpd/bgp_packet.h" // for bgp_notify_send_with_data, bgp_notify...
+#include "bgpd/bgp_packet.h" // for bgp_notify_io_invalid...
#include "bgpd/bgp_trace.h" // for frrtraces
#include "bgpd/bgpd.h" // for peer, BGP_MARKER_SIZE, bgp_master, bm
/* clang-format on */
return false;
if (memcmp(m_correct, m_rx, BGP_MARKER_SIZE) != 0) {
- bgp_notify_send(peer, BGP_NOTIFY_HEADER_ERR,
- BGP_NOTIFY_HEADER_NOT_SYNC);
+ bgp_notify_io_invalid(peer, BGP_NOTIFY_HEADER_ERR,
+ BGP_NOTIFY_HEADER_NOT_SYNC, NULL, 0);
return false;
}
zlog_debug("%s unknown message type 0x%02x", peer->host,
type);
- bgp_notify_send_with_data(peer, BGP_NOTIFY_HEADER_ERR,
- BGP_NOTIFY_HEADER_BAD_MESTYPE, &type,
- 1);
+ bgp_notify_io_invalid(peer, BGP_NOTIFY_HEADER_ERR,
+ BGP_NOTIFY_HEADER_BAD_MESTYPE, &type, 1);
return false;
}
uint16_t nsize = htons(size);
- bgp_notify_send_with_data(peer, BGP_NOTIFY_HEADER_ERR,
- BGP_NOTIFY_HEADER_BAD_MESLEN,
- (unsigned char *)&nsize, 2);
+ bgp_notify_io_invalid(peer, BGP_NOTIFY_HEADER_ERR,
+ BGP_NOTIFY_HEADER_BAD_MESLEN,
+ (unsigned char *)&nsize, 2);
return false;
}
struct timeval next_update = {0, 0};
struct timespec next_update_ts = {0, 0};
+ /*
+ * The RCU mechanism for each pthread is initialized in a "locked"
+ * state. That's ok for pthreads using the frr_pthread,
+ * thread_fetch event loop, because that event loop unlocks regularly.
+ * For foreign pthreads, the lock needs to be unlocked so that the
+ * background rcu pthread can run.
+ */
+ rcu_read_unlock();
+
peerhash_mtx = XCALLOC(MTYPE_TMP, sizeof(pthread_mutex_t));
peerhash_cond = XCALLOC(MTYPE_TMP, sizeof(pthread_cond_t));
#include "bgpd/bgp_errors.h"
#include "bgpd/bgp_route.h"
+#define BGP_LABELPOOL_ENABLE_TESTS 0
+
+#ifndef VTYSH_EXTRACT_PL
+#include "bgpd/bgp_labelpool_clippy.c"
+#endif
+
+
/*
* Definitions and external declarations.
*/
extern struct zclient *zclient;
+#if BGP_LABELPOOL_ENABLE_TESTS
+static void lptest_init(void);
+static void lptest_finish(void);
+#endif
+
/*
* Remember where pool data are kept
*/
static struct labelpool *lp;
-/* request this many labels at a time from zebra */
-#define LP_CHUNK_SIZE 50
+/*
+ * Number of labels requested at a time from the zebra label manager.
+ * We start small but double the request size each time up to a
+ * maximum size.
+ *
+ * The label space is 20 bits which is shared with other FRR processes
+ * on this host, so to avoid greedily requesting a mostly wasted chunk,
+ * we limit the chunk size to 1/16 of the label space (that's the -4 bits
+ * in the definition below). This limit slightly increases our cost of
+ * finding free labels in our allocated chunks.
+ */
+#define LP_CHUNK_SIZE_MIN 128
+#define LP_CHUNK_SIZE_MAX (1 << (20 - 4))
DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CHUNK, "BGP Label Chunk");
DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_FIFO, "BGP Label FIFO item");
struct lp_chunk {
uint32_t first;
uint32_t last;
+ uint32_t nfree; /* un-allocated count */
+ uint32_t idx_last_allocated; /* start looking here */
+ bitfield_t allocated_map;
};
/*
static void lp_chunk_free(void *goner)
{
+ struct lp_chunk *chunk = (struct lp_chunk *)goner;
+
+ bf_free(chunk->allocated_map);
XFREE(MTYPE_BGP_LABEL_CHUNK, goner);
}
lp->callback_q->spec.workfunc = lp_cbq_docallback;
lp->callback_q->spec.del_item_data = lp_cbq_item_free;
lp->callback_q->spec.max_retries = 0;
+
+ lp->next_chunksize = LP_CHUNK_SIZE_MIN;
+
+#if BGP_LABELPOOL_ENABLE_TESTS
+ lptest_init();
+#endif
}
/* check if a label callback was for a BGP LU node, and if so, unlock it */
struct lp_fifo *lf;
struct work_queue_item *item, *titem;
+#if BGP_LABELPOOL_ENABLE_TESTS
+ lptest_finish();
+#endif
if (!lp)
return;
/*
* Find a free label
- * Linear search is not efficient but should be executed infrequently.
*/
for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
uintptr_t lbl;
+ unsigned int index;
if (debug)
zlog_debug("%s: chunk first=%u last=%u",
__func__, chunk->first, chunk->last);
- for (lbl = chunk->first; lbl <= chunk->last; ++lbl) {
- /* labelid is key to all-request "ledger" list */
- if (!skiplist_insert(lp->inuse, (void *)lbl, labelid)) {
- /*
- * Success
- */
- return lbl;
- }
+ /*
+ * don't look in chunks with no available labels
+ */
+ if (!chunk->nfree)
+ continue;
+
+ /*
+ * roll through bitfield starting where we stopped
+ * last time
+ */
+ index = bf_find_next_clear_bit_wrap(
+ &chunk->allocated_map, chunk->idx_last_allocated + 1,
+ 0);
+
+ /*
+ * since chunk->nfree is non-zero, we should always get
+ * a valid index
+ */
+ assert(index != WORD_MAX);
+
+ lbl = chunk->first + index;
+ if (skiplist_insert(lp->inuse, (void *)lbl, labelid)) {
+ /* something is very wrong */
+ zlog_err("%s: unable to insert inuse label %u (id %p)",
+ __func__, (uint32_t)lbl, labelid);
+ return MPLS_LABEL_NONE;
}
+
+ /*
+ * Success
+ */
+ bf_set_bit(chunk->allocated_map, index);
+ chunk->idx_last_allocated = index;
+ chunk->nfree -= 1;
+
+ return lbl;
}
+
return MPLS_LABEL_NONE;
}
* When connection to zebra is reestablished, previous label assignments
* will be invalidated (via callbacks having the "allocated" parameter unset)
* and new labels will be automatically reassigned by this labelpool module
- * (that is, a requestor does not need to call lp_get() again if it is
+ * (that is, a requestor does not need to call bgp_lp_get() again if it is
* notified via callback that its label has been lost: it will eventually
* get another callback with a new label assignment).
*
+ * The callback function should return 0 to accept the allocation
+ * and non-zero to refuse it. The callback function return value is
+ * ignored for invalidations (i.e., when the "allocated" parameter is false)
+ *
* Prior requests for a given labelid are detected so that requests and
* assignments are not duplicated.
*/
if (lp_fifo_count(&lp->requests) > lp->pending_count) {
if (!zclient || zclient->sock < 0)
return;
- if (zclient_send_get_label_chunk(zclient, 0, LP_CHUNK_SIZE,
- MPLS_LABEL_BASE_ANY)
- != ZCLIENT_SEND_FAILURE)
- lp->pending_count += LP_CHUNK_SIZE;
+ if (zclient_send_get_label_chunk(zclient, 0, lp->next_chunksize,
+ MPLS_LABEL_BASE_ANY) !=
+ ZCLIENT_SEND_FAILURE) {
+ lp->pending_count += lp->next_chunksize;
+ if ((lp->next_chunksize << 1) <= LP_CHUNK_SIZE_MAX)
+ lp->next_chunksize <<= 1;
+ }
}
}
if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
if (label == lcb->label && type == lcb->type) {
+ struct listnode *node;
+ struct lp_chunk *chunk;
uintptr_t lbl = label;
+ bool deallocated = false;
/* no longer in use */
skiplist_delete(lp->inuse, (void *)lbl, NULL);
/* no longer requested */
skiplist_delete(lp->ledger, labelid, NULL);
+
+ /*
+ * Find the chunk this label belongs to and
+ * deallocate the label
+ */
+ for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
+ uint32_t index;
+
+ if ((label < chunk->first) ||
+ (label > chunk->last))
+ continue;
+
+ index = label - chunk->first;
+ assert(bf_test_index(chunk->allocated_map,
+ index));
+ bf_release_index(chunk->allocated_map, index);
+ chunk->nfree += 1;
+ deallocated = true;
+ }
+ assert(deallocated);
}
}
}
struct lp_chunk *chunk;
int debug = BGP_DEBUG(labelpool, LABELPOOL);
struct lp_fifo *lf;
+ uint32_t labelcount;
if (last < first) {
flog_err(EC_BGP_LABEL,
chunk = XCALLOC(MTYPE_BGP_LABEL_CHUNK, sizeof(struct lp_chunk));
+ labelcount = last - first + 1;
+
chunk->first = first;
chunk->last = last;
+ chunk->nfree = labelcount;
+ bf_init(chunk->allocated_map, labelcount);
- listnode_add(lp->chunks, chunk);
+ /*
+ * Optimize for allocation by adding the new (presumably larger)
+ * chunk at the head of the list so it is examined first.
+ */
+ listnode_add_head(lp->chunks, chunk);
- lp->pending_count -= (last - first + 1);
+ lp->pending_count -= labelcount;
if (debug) {
zlog_debug("%s: %zu pending requests", __func__,
lp_fifo_count(&lp->requests));
}
- while ((lf = lp_fifo_first(&lp->requests))) {
+ while (labelcount && (lf = lp_fifo_first(&lp->requests))) {
struct lp_lcb *lcb;
void *labelid = lf->lcb.labelid;
break;
}
+ labelcount -= 1;
+
/*
* we filled the request from local pool.
* Enqueue response work item with new label.
*/
void bgp_lp_event_zebra_up(void)
{
- int labels_needed;
- int chunks_needed;
+ unsigned int labels_needed;
+ unsigned int chunks_needed;
void *labelid;
struct lp_lcb *lcb;
int lm_init_ok;
labels_needed = lp_fifo_count(&lp->requests) +
skiplist_count(lp->inuse);
+ if (labels_needed > lp->next_chunksize) {
+ while ((lp->next_chunksize < labels_needed) &&
+ (lp->next_chunksize << 1 <= LP_CHUNK_SIZE_MAX))
+
+ lp->next_chunksize <<= 1;
+ }
+
/* round up */
- chunks_needed = (labels_needed / LP_CHUNK_SIZE) + 1;
- labels_needed = chunks_needed * LP_CHUNK_SIZE;
+ chunks_needed = (labels_needed / lp->next_chunksize) + 1;
+ labels_needed = chunks_needed * lp->next_chunksize;
lm_init_ok = lm_label_manager_connect(zclient, 1) == 0;
}
json = json_object_new_array();
} else {
- vty_out(vty, "First Last\n");
- vty_out(vty, "--------------\n");
+ vty_out(vty, "%10s %10s %10s %10s\n", "First", "Last", "Size",
+ "nfree");
+ vty_out(vty, "-------------------------------------------\n");
}
for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
+ uint32_t size;
+
+ size = chunk->last - chunk->first + 1;
+
if (uj) {
json_elem = json_object_new_object();
json_object_array_add(json, json_elem);
json_object_int_add(json_elem, "first", chunk->first);
json_object_int_add(json_elem, "last", chunk->last);
+ json_object_int_add(json_elem, "size", size);
+ json_object_int_add(json_elem, "numberFree",
+ chunk->nfree);
} else
- vty_out(vty, "%-10u %-10u\n", chunk->first,
- chunk->last);
+ vty_out(vty, "%10u %10u %10u %10u\n", chunk->first,
+ chunk->last, size, chunk->nfree);
}
if (uj)
vty_json(vty, json);
return CMD_SUCCESS;
}
+#if BGP_LABELPOOL_ENABLE_TESTS
+/*------------------------------------------------------------------------
+ * Testing code start
+ *------------------------------------------------------------------------*/
+
+DEFINE_MTYPE_STATIC(BGPD, LABELPOOL_TEST, "Label pool test");
+
+#define LPT_STAT_INSERT_FAIL 0
+#define LPT_STAT_DELETE_FAIL 1
+#define LPT_STAT_ALLOCATED 2
+#define LPT_STAT_DEALLOCATED 3
+#define LPT_STAT_MAX 4
+
+const char *lpt_counter_names[] = {
+ "sl insert failures",
+ "sl delete failures",
+ "labels allocated",
+ "labels deallocated",
+};
+
+static uint8_t lpt_generation;
+static bool lpt_inprogress;
+static struct skiplist *lp_tests;
+static unsigned int lpt_test_cb_tcb_lookup_fails;
+static unsigned int lpt_release_tcb_lookup_fails;
+static unsigned int lpt_test_event_tcb_lookup_fails;
+static unsigned int lpt_stop_tcb_lookup_fails;
+
+struct lp_test {
+ uint8_t generation;
+ unsigned int request_maximum;
+ unsigned int request_blocksize;
+ uintptr_t request_count; /* match type of labelid */
+ int label_type;
+ struct skiplist *labels;
+ struct timeval starttime;
+ struct skiplist *timestamps_alloc;
+ struct skiplist *timestamps_dealloc;
+ struct thread *event_thread;
+ unsigned int counter[LPT_STAT_MAX];
+};
+
+/* test parameters */
+#define LPT_MAX_COUNT 500000 /* get this many labels in all */
+#define LPT_BLKSIZE 10000 /* this many at a time, then yield */
+#define LPT_TS_INTERVAL 10000 /* timestamp every this many labels */
+
+
+static int test_cb(mpls_label_t label, void *labelid, bool allocated)
+{
+ uintptr_t generation;
+ struct lp_test *tcb;
+
+ generation = ((uintptr_t)labelid >> 24) & 0xff;
+
+ if (skiplist_search(lp_tests, (void *)generation, (void **)&tcb)) {
+
+ /* couldn't find current test in progress */
+ ++lpt_test_cb_tcb_lookup_fails;
+ return -1; /* reject allocation */
+ }
+
+ if (allocated) {
+ ++tcb->counter[LPT_STAT_ALLOCATED];
+ if (!(tcb->counter[LPT_STAT_ALLOCATED] % LPT_TS_INTERVAL)) {
+ uintptr_t time_ms;
+
+ time_ms = monotime_since(&tcb->starttime, NULL) / 1000;
+ skiplist_insert(tcb->timestamps_alloc,
+ (void *)(uintptr_t)tcb
+ ->counter[LPT_STAT_ALLOCATED],
+ (void *)time_ms);
+ }
+ if (skiplist_insert(tcb->labels, labelid,
+ (void *)(uintptr_t)label)) {
+ ++tcb->counter[LPT_STAT_INSERT_FAIL];
+ return -1;
+ }
+ } else {
+ ++tcb->counter[LPT_STAT_DEALLOCATED];
+ if (!(tcb->counter[LPT_STAT_DEALLOCATED] % LPT_TS_INTERVAL)) {
+ uintptr_t time_ms;
+
+ time_ms = monotime_since(&tcb->starttime, NULL) / 1000;
+ skiplist_insert(tcb->timestamps_dealloc,
+ (void *)(uintptr_t)tcb
+ ->counter[LPT_STAT_ALLOCATED],
+ (void *)time_ms);
+ }
+ if (skiplist_delete(tcb->labels, labelid, 0)) {
+ ++tcb->counter[LPT_STAT_DELETE_FAIL];
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static void labelpool_test_event_handler(struct thread *thread)
+{
+ struct lp_test *tcb;
+
+ if (skiplist_search(lp_tests, (void *)(uintptr_t)(lpt_generation),
+ (void **)&tcb)) {
+
+ /* couldn't find current test in progress */
+ ++lpt_test_event_tcb_lookup_fails;
+ return;
+ }
+
+ /*
+ * request a bunch of labels
+ */
+ for (unsigned int i = 0; (i < tcb->request_blocksize) &&
+ (tcb->request_count < tcb->request_maximum);
+ ++i) {
+
+ uintptr_t id;
+
+ ++tcb->request_count;
+
+ /*
+ * construct 32-bit id from request_count and generation
+ */
+ id = ((uintptr_t)tcb->generation << 24) |
+ (tcb->request_count & 0x00ffffff);
+ bgp_lp_get(LP_TYPE_VRF, (void *)id, test_cb);
+ }
+
+ if (tcb->request_count < tcb->request_maximum)
+ thread_add_event(bm->master, labelpool_test_event_handler, NULL,
+ 0, &tcb->event_thread);
+}
+
+static void lptest_stop(void)
+{
+ struct lp_test *tcb;
+
+ if (!lpt_inprogress)
+ return;
+
+ if (skiplist_search(lp_tests, (void *)(uintptr_t)(lpt_generation),
+ (void **)&tcb)) {
+
+ /* couldn't find current test in progress */
+ ++lpt_stop_tcb_lookup_fails;
+ return;
+ }
+
+ if (tcb->event_thread)
+ thread_cancel(&tcb->event_thread);
+
+ lpt_inprogress = false;
+}
+
+static int lptest_start(struct vty *vty)
+{
+ struct lp_test *tcb;
+
+ if (lpt_inprogress) {
+ vty_out(vty, "test already in progress\n");
+ return -1;
+ }
+
+ if (skiplist_count(lp_tests) >=
+ (1 << (8 * sizeof(lpt_generation))) - 1) {
+ /*
+ * Too many test runs
+ */
+ vty_out(vty, "too many tests: clear first\n");
+ return -1;
+ }
+
+ /*
+ * We pack the generation and request number into the labelid;
+ * make sure they fit.
+ */
+ unsigned int n1 = LPT_MAX_COUNT;
+ unsigned int sh = 0;
+ unsigned int label_bits;
+
+ label_bits = 8 * (sizeof(tcb->request_count) - sizeof(lpt_generation));
+
+ /* n1 should be same type as tcb->request_maximum */
+ assert(sizeof(n1) == sizeof(tcb->request_maximum));
+
+ while (n1 >>= 1)
+ ++sh;
+ sh += 1; /* number of bits needed to hold LPT_MAX_COUNT */
+
+ if (sh > label_bits) {
+ vty_out(vty,
+ "Sorry, test iteration count too big on this platform (LPT_MAX_COUNT %u, need %u bits, but label_bits is only %u)\n",
+ LPT_MAX_COUNT, sh, label_bits);
+ return -1;
+ }
+
+ lpt_inprogress = true;
+ ++lpt_generation;
+
+ tcb = XCALLOC(MTYPE_LABELPOOL_TEST, sizeof(*tcb));
+
+ tcb->generation = lpt_generation;
+ tcb->label_type = LP_TYPE_VRF;
+ tcb->request_maximum = LPT_MAX_COUNT;
+ tcb->request_blocksize = LPT_BLKSIZE;
+ tcb->labels = skiplist_new(0, NULL, NULL);
+ tcb->timestamps_alloc = skiplist_new(0, NULL, NULL);
+ tcb->timestamps_dealloc = skiplist_new(0, NULL, NULL);
+ thread_add_event(bm->master, labelpool_test_event_handler, NULL, 0,
+ &tcb->event_thread);
+ monotime(&tcb->starttime);
+
+ skiplist_insert(lp_tests, (void *)(uintptr_t)tcb->generation, tcb);
+ return 0;
+}
+
+DEFPY(start_labelpool_perf_test, start_labelpool_perf_test_cmd,
+ "debug bgp lptest start",
+ DEBUG_STR BGP_STR
+ "label pool test\n"
+ "start\n")
+{
+ lptest_start(vty);
+ return CMD_SUCCESS;
+}
+
+static void lptest_print_stats(struct vty *vty, struct lp_test *tcb)
+{
+ unsigned int i;
+
+ vty_out(vty, "Global Lookup Failures in test_cb: %5u\n",
+ lpt_test_cb_tcb_lookup_fails);
+ vty_out(vty, "Global Lookup Failures in release: %5u\n",
+ lpt_release_tcb_lookup_fails);
+ vty_out(vty, "Global Lookup Failures in event: %5u\n",
+ lpt_test_event_tcb_lookup_fails);
+ vty_out(vty, "Global Lookup Failures in stop: %5u\n",
+ lpt_stop_tcb_lookup_fails);
+ vty_out(vty, "\n");
+
+ if (!tcb) {
+ if (skiplist_search(lp_tests, (void *)(uintptr_t)lpt_generation,
+ (void **)&tcb)) {
+ vty_out(vty, "Error: can't find test %u\n",
+ lpt_generation);
+ return;
+ }
+ }
+
+ vty_out(vty, "Test Generation %u:\n", tcb->generation);
+
+ vty_out(vty, "Counter Value\n");
+ for (i = 0; i < LPT_STAT_MAX; ++i) {
+ vty_out(vty, "%20s: %10u\n", lpt_counter_names[i],
+ tcb->counter[i]);
+ }
+ vty_out(vty, "\n");
+
+ if (tcb->timestamps_alloc) {
+ void *Key;
+ void *Value;
+ void *cursor;
+
+ float elapsed;
+
+ vty_out(vty, "%10s %10s\n", "Count", "Seconds");
+
+ cursor = NULL;
+ while (!skiplist_next(tcb->timestamps_alloc, &Key, &Value,
+ &cursor)) {
+
+ elapsed = ((float)(uintptr_t)Value) / 1000;
+
+ vty_out(vty, "%10llu %10.3f\n",
+ (unsigned long long)(uintptr_t)Key, elapsed);
+ }
+ vty_out(vty, "\n");
+ }
+}
+
+DEFPY(show_labelpool_perf_test, show_labelpool_perf_test_cmd,
+ "debug bgp lptest show",
+ DEBUG_STR BGP_STR
+ "label pool test\n"
+ "show\n")
+{
+
+ if (lp_tests) {
+ void *Key;
+ void *Value;
+ void *cursor;
+
+ cursor = NULL;
+ while (!skiplist_next(lp_tests, &Key, &Value, &cursor)) {
+ lptest_print_stats(vty, (struct lp_test *)Value);
+ }
+ } else {
+ vty_out(vty, "no test results\n");
+ }
+ return CMD_SUCCESS;
+}
+
+DEFPY(stop_labelpool_perf_test, stop_labelpool_perf_test_cmd,
+ "debug bgp lptest stop",
+ DEBUG_STR BGP_STR
+ "label pool test\n"
+ "stop\n")
+{
+
+ if (lpt_inprogress) {
+ lptest_stop();
+ lptest_print_stats(vty, NULL);
+ } else {
+ vty_out(vty, "no test in progress\n");
+ }
+ return CMD_SUCCESS;
+}
+
+DEFPY(clear_labelpool_perf_test, clear_labelpool_perf_test_cmd,
+ "debug bgp lptest clear",
+ DEBUG_STR BGP_STR
+ "label pool test\n"
+ "clear\n")
+{
+
+ if (lpt_inprogress) {
+ lptest_stop();
+ }
+ if (lp_tests) {
+ while (!skiplist_first(lp_tests, NULL, NULL))
+ /* del function of skiplist cleans up tcbs */
+ skiplist_delete_first(lp_tests);
+ }
+ return CMD_SUCCESS;
+}
+
+/*
+ * With the "release" command, we can release labels at intervals through
+ * the ID space. Thus we can to exercise the bitfield-wrapping behavior
+ * of the allocator in a subsequent test.
+ */
+/* clang-format off */
+DEFPY(release_labelpool_perf_test, release_labelpool_perf_test_cmd,
+ "debug bgp lptest release test GENERATION$generation every (1-5)$every_nth",
+ DEBUG_STR
+ BGP_STR
+ "label pool test\n"
+ "release labels\n"
+ "\"test\"\n"
+ "test number\n"
+ "\"every\"\n"
+ "label fraction denominator\n")
+{
+ /* clang-format on */
+
+ unsigned long testnum;
+ char *end;
+ struct lp_test *tcb;
+
+ testnum = strtoul(generation, &end, 0);
+ if (*end) {
+ vty_out(vty, "Invalid test number: \"%s\"\n", generation);
+ return CMD_SUCCESS;
+ }
+ if (lpt_inprogress && (testnum == lpt_generation)) {
+ vty_out(vty,
+ "Error: Test %lu is still in progress (stop first)\n",
+ testnum);
+ return CMD_SUCCESS;
+ }
+
+ if (skiplist_search(lp_tests, (void *)(uintptr_t)testnum,
+ (void **)&tcb)) {
+
+ /* couldn't find current test in progress */
+ vty_out(vty, "Error: Can't look up test number: \"%lu\"\n",
+ testnum);
+ ++lpt_release_tcb_lookup_fails;
+ return CMD_SUCCESS;
+ }
+
+ void *Key, *cKey;
+ void *Value, *cValue;
+ void *cursor;
+ unsigned int iteration;
+ int rc;
+
+ cursor = NULL;
+ iteration = 0;
+ rc = skiplist_next(tcb->labels, &Key, &Value, &cursor);
+
+ while (!rc) {
+ cKey = Key;
+ cValue = Value;
+
+ /* find next item before we delete this one */
+ rc = skiplist_next(tcb->labels, &Key, &Value, &cursor);
+
+ if (!(iteration % every_nth)) {
+ bgp_lp_release(tcb->label_type, cKey,
+ (mpls_label_t)(uintptr_t)cValue);
+ skiplist_delete(tcb->labels, cKey, NULL);
+ ++tcb->counter[LPT_STAT_DEALLOCATED];
+ }
+ ++iteration;
+ }
+
+ return CMD_SUCCESS;
+}
+
+static void lptest_delete(void *val)
+{
+ struct lp_test *tcb = (struct lp_test *)val;
+ void *Key;
+ void *Value;
+ void *cursor;
+
+ if (tcb->labels) {
+ cursor = NULL;
+ while (!skiplist_next(tcb->labels, &Key, &Value, &cursor))
+ bgp_lp_release(tcb->label_type, Key,
+ (mpls_label_t)(uintptr_t)Value);
+ skiplist_free(tcb->labels);
+ tcb->labels = NULL;
+ }
+ if (tcb->timestamps_alloc) {
+ cursor = NULL;
+ skiplist_free(tcb->timestamps_alloc);
+ tcb->timestamps_alloc = NULL;
+ }
+
+ if (tcb->timestamps_dealloc) {
+ cursor = NULL;
+ skiplist_free(tcb->timestamps_dealloc);
+ tcb->timestamps_dealloc = NULL;
+ }
+
+ if (tcb->event_thread)
+ thread_cancel(&tcb->event_thread);
+
+ memset(tcb, 0, sizeof(*tcb));
+
+ XFREE(MTYPE_LABELPOOL_TEST, tcb);
+}
+
+static void lptest_init(void)
+{
+ lp_tests = skiplist_new(0, NULL, lptest_delete);
+}
+
+static void lptest_finish(void)
+{
+ if (lp_tests) {
+ skiplist_free(lp_tests);
+ lp_tests = NULL;
+ }
+}
+
+/*------------------------------------------------------------------------
+ * Testing code end
+ *------------------------------------------------------------------------*/
+#endif /* BGP_LABELPOOL_ENABLE_TESTS */
+
void bgp_lp_vty_init(void)
{
install_element(VIEW_NODE, &show_bgp_labelpool_summary_cmd);
install_element(VIEW_NODE, &show_bgp_labelpool_inuse_cmd);
install_element(VIEW_NODE, &show_bgp_labelpool_requests_cmd);
install_element(VIEW_NODE, &show_bgp_labelpool_chunks_cmd);
+
+#if BGP_LABELPOOL_ENABLE_TESTS
+ install_element(ENABLE_NODE, &start_labelpool_perf_test_cmd);
+ install_element(ENABLE_NODE, &show_labelpool_perf_test_cmd);
+ install_element(ENABLE_NODE, &stop_labelpool_perf_test_cmd);
+ install_element(ENABLE_NODE, &release_labelpool_perf_test_cmd);
+ install_element(ENABLE_NODE, &clear_labelpool_perf_test_cmd);
+#endif /* BGP_LABELPOOL_ENABLE_TESTS */
}
struct work_queue *callback_q;
uint32_t pending_count; /* requested from zebra */
uint32_t reconnect_count; /* zebra reconnections */
+ uint32_t next_chunksize; /* request this many labels */
};
extern void bgp_lp_init(struct thread_master *master, struct labelpool *pool);
", bgp@%s:%d", address, bm->port);
}
+ bgp_if_init();
+
frr_config_fork();
/* must be called after fork() */
bgp_gr_apply_running_config();
new = info_make(ZEBRA_ROUTE_BGP, BGP_ROUTE_IMPORTED, 0,
to_bgp->peer_self, new_attr, bn);
+ if (source_bpi->peer) {
+ extra = bgp_path_info_extra_get(new);
+ extra->peer_orig = peer_lock(source_bpi->peer);
+ }
+
if (nexthop_self_flag)
bgp_path_info_set_flag(bn, new, BGP_PATH_ANNC_NH_SELF);
&& bnc->nexthop_num > 0));
}
-static int bgp_isvalid_labeled_nexthop(struct bgp_nexthop_cache *bnc)
+static int bgp_isvalid_nexthop_for_ebgp(struct bgp_nexthop_cache *bnc,
+ struct bgp_path_info *path)
+{
+ struct interface *ifp = NULL;
+ struct nexthop *nexthop;
+ struct bgp_interface *iifp;
+ struct peer *peer;
+
+ if (!path->extra || !path->extra->peer_orig)
+ return false;
+
+ peer = path->extra->peer_orig;
+
+ /* only connected ebgp peers are valid */
+ if (peer->sort != BGP_PEER_EBGP || peer->ttl != BGP_DEFAULT_TTL ||
+ CHECK_FLAG(peer->flags, PEER_FLAG_DISABLE_CONNECTED_CHECK) ||
+ CHECK_FLAG(peer->bgp->flags, BGP_FLAG_DISABLE_NH_CONNECTED_CHK))
+ return false;
+
+ for (nexthop = bnc->nexthop; nexthop; nexthop = nexthop->next) {
+ if (nexthop->type == NEXTHOP_TYPE_IFINDEX ||
+ nexthop->type == NEXTHOP_TYPE_IPV4_IFINDEX ||
+ nexthop->type == NEXTHOP_TYPE_IPV6_IFINDEX) {
+ ifp = if_lookup_by_index(
+ bnc->ifindex ? bnc->ifindex : nexthop->ifindex,
+ bnc->bgp->vrf_id);
+ }
+ if (!ifp)
+ continue;
+ iifp = ifp->info;
+ if (CHECK_FLAG(iifp->flags, BGP_INTERFACE_MPLS_BGP_FORWARDING))
+ return true;
+ }
+ return false;
+}
+
+static int bgp_isvalid_nexthop_for_mplsovergre(struct bgp_nexthop_cache *bnc,
+ struct bgp_path_info *path)
+{
+ struct interface *ifp = NULL;
+ struct nexthop *nexthop;
+
+ for (nexthop = bnc->nexthop; nexthop; nexthop = nexthop->next) {
+ if (nexthop->type != NEXTHOP_TYPE_BLACKHOLE) {
+ ifp = if_lookup_by_index(
+ bnc->ifindex ? bnc->ifindex : nexthop->ifindex,
+ bnc->bgp->vrf_id);
+ if (ifp && (ifp->ll_type == ZEBRA_LLT_IPGRE ||
+ ifp->ll_type == ZEBRA_LLT_IP6GRE))
+ break;
+ }
+ }
+ if (!ifp)
+ return false;
+
+ if (CHECK_FLAG(path->attr->rmap_change_flags,
+ BATTR_RMAP_L3VPN_ACCEPT_GRE))
+ return true;
+
+ return false;
+}
+
+static int bgp_isvalid_nexthop_for_mpls(struct bgp_nexthop_cache *bnc,
+ struct bgp_path_info *path)
{
/*
- * In the case of MPLS-VPN, the label is learned from LDP or other
+ * - In the case of MPLS-VPN, the label is learned from LDP or other
* protocols, and nexthop tracking is enabled for the label.
* The value is recorded as BGP_NEXTHOP_LABELED_VALID.
- * In the case of SRv6-VPN, we need to track the reachability to the
+ * - In the case of SRv6-VPN, we need to track the reachability to the
* SID (in other words, IPv6 address). As in MPLS, we need to record
* the value as BGP_NEXTHOP_SID_VALID. However, this function is
* currently not implemented, and this function assumes that all
* Transit routes for SRv6-VPN are valid.
+ * - Otherwise check for mpls-gre acceptance
*/
- return (bgp_zebra_num_connects() == 0
- || (bnc && bnc->nexthop_num > 0
- && (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_LABELED_VALID)
- || bnc->bgp->srv6_enabled)));
+ return (bgp_zebra_num_connects() == 0 ||
+ (bnc && (bnc->nexthop_num > 0 &&
+ (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_LABELED_VALID) ||
+ bnc->bgp->srv6_enabled ||
+ bgp_isvalid_nexthop_for_ebgp(bnc, path) ||
+ bgp_isvalid_nexthop_for_mplsovergre(bnc, path)))));
}
static void bgp_unlink_nexthop_check(struct bgp_nexthop_cache *bnc)
*/
if (bgp_route->inst_type == BGP_INSTANCE_TYPE_VIEW)
return 1;
- else if (safi == SAFI_UNICAST && pi
- && pi->sub_type == BGP_ROUTE_IMPORTED && pi->extra
- && pi->extra->num_labels && !bnc->is_evpn_gwip_nexthop) {
- return bgp_isvalid_labeled_nexthop(bnc);
- } else
+ else if (safi == SAFI_UNICAST && pi &&
+ pi->sub_type == BGP_ROUTE_IMPORTED && pi->extra &&
+ pi->extra->num_labels && !bnc->is_evpn_gwip_nexthop)
+ return bgp_isvalid_nexthop_for_mpls(bnc, pi);
+ else
return (bgp_isvalid_nexthop(bnc));
}
&& (path->attr->evpn_overlay.type
!= OVERLAY_INDEX_GATEWAY_IP)) {
bnc_is_valid_nexthop =
- bgp_isvalid_labeled_nexthop(bnc) ? true : false;
+ bgp_isvalid_nexthop_for_mpls(bnc, path) ? true
+ : false;
} else {
if (bgp_update_martian_nexthop(
bnc->bgp, afi, safi, path->type,
* @param data Data portion
* @param datalen length of data portion
*/
-void bgp_notify_send_with_data(struct peer *peer, uint8_t code,
- uint8_t sub_code, uint8_t *data, size_t datalen)
+static void bgp_notify_send_internal(struct peer *peer, uint8_t code,
+ uint8_t sub_code, uint8_t *data,
+ size_t datalen, bool use_curr)
{
struct stream *s;
bool hard_reset = bgp_notify_send_hard_reset(peer, code, sub_code);
* If possible, store last packet for debugging purposes. This check is
* in place because we are sometimes called with a doppelganger peer,
* who tends to have a plethora of fields nulled out.
+ *
+ * Some callers should not attempt this - the io pthread for example
+ * should not touch internals of the peer struct.
*/
- if (peer->curr) {
+ if (use_curr && peer->curr) {
size_t packetsize = stream_get_endp(peer->curr);
assert(packetsize <= peer->max_packet_size);
memcpy(peer->last_reset_cause, peer->curr->data, packetsize);
*/
void bgp_notify_send(struct peer *peer, uint8_t code, uint8_t sub_code)
{
- bgp_notify_send_with_data(peer, code, sub_code, NULL, 0);
+ bgp_notify_send_internal(peer, code, sub_code, NULL, 0, true);
+}
+
+/*
+ * Enqueue notification; called from the main pthread, peer object access is ok.
+ */
+void bgp_notify_send_with_data(struct peer *peer, uint8_t code,
+ uint8_t sub_code, uint8_t *data, size_t datalen)
+{
+ bgp_notify_send_internal(peer, code, sub_code, data, datalen, true);
+}
+
+/*
+ * For use by the io pthread, queueing a notification but avoiding access to
+ * the peer object.
+ */
+void bgp_notify_io_invalid(struct peer *peer, uint8_t code, uint8_t sub_code,
+ uint8_t *data, size_t datalen)
+{
+ /* Avoid touching the peer object */
+ bgp_notify_send_internal(peer, code, sub_code, data, datalen, false);
}
/*
extern void bgp_notify_send(struct peer *, uint8_t, uint8_t);
extern void bgp_notify_send_with_data(struct peer *, uint8_t, uint8_t,
uint8_t *, size_t);
+void bgp_notify_io_invalid(struct peer *peer, uint8_t code, uint8_t sub_code,
+ uint8_t *data, size_t datalen);
extern void bgp_route_refresh_send(struct peer *peer, afi_t afi, safi_t safi,
uint8_t orf_type, uint8_t when_to_refresh,
int remove, uint8_t subtype);
if (e->bgp_orig)
bgp_unlock(e->bgp_orig);
+ if (e->peer_orig)
+ peer_unlock(e->peer_orig);
+
if (e->aggr_suppressors)
list_delete(&e->aggr_suppressors);
static void bgp_peer_as_override(struct bgp *bgp, afi_t afi, safi_t safi,
struct peer *peer, struct attr *attr)
{
+ struct aspath *aspath;
+
if (peer->sort == BGP_PEER_EBGP &&
- peer_af_flag_check(peer, afi, safi, PEER_FLAG_AS_OVERRIDE))
- attr->aspath = aspath_replace_specific_asn(attr->aspath,
- peer->as, bgp->as);
+ peer_af_flag_check(peer, afi, safi, PEER_FLAG_AS_OVERRIDE)) {
+ if (attr->aspath->refcnt)
+ aspath = aspath_dup(attr->aspath);
+ else
+ aspath = attr->aspath;
+
+ attr->aspath = aspath_intern(
+ aspath_replace_specific_asn(aspath, peer->as, bgp->as));
+
+ aspath_free(aspath);
+ }
}
void bgp_attr_add_llgr_community(struct attr *attr)
/* Label index cannot be changed. */
if (bgp_static->label_index != label_index) {
vty_out(vty, "%% cannot change label-index\n");
+ bgp_dest_unlock_node(dest);
return CMD_WARNING_CONFIG_FAILED;
}
aggregate, atomic_aggregate, p);
if (!attr) {
+ bgp_dest_unlock_node(dest);
bgp_aggregate_delete(bgp, p, afi, safi, aggregate);
if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
zlog_debug("%s: %pFX null attribute", __func__,
static void bgp_aggregate_med_update(struct bgp_aggregate *aggregate,
struct bgp *bgp, const struct prefix *p,
afi_t afi, safi_t safi,
- struct bgp_path_info *pi, bool is_adding)
+ struct bgp_path_info *pi)
{
/* MED matching disabled. */
if (!aggregate->match_med)
return;
- /* Aggregation with different MED, nothing to do. */
- if (aggregate->med_mismatched)
- return;
-
- /*
- * Test the current entry:
- *
- * is_adding == true: if the new entry doesn't match then we must
- * install all suppressed routes.
- *
- * is_adding == false: if the entry being removed was the last
- * unmatching entry then we can suppress all routes.
+ /* Aggregation with different MED, recheck if we have got equal MEDs
+ * now.
*/
- if (!is_adding) {
- if (bgp_aggregate_test_all_med(aggregate, bgp, p, afi, safi)
- && aggregate->summary_only)
- bgp_aggregate_toggle_suppressed(aggregate, bgp, p, afi,
- safi, true);
- } else
+ if (aggregate->med_mismatched &&
+ bgp_aggregate_test_all_med(aggregate, bgp, p, afi, safi) &&
+ aggregate->summary_only)
+ bgp_aggregate_toggle_suppressed(aggregate, bgp, p, afi, safi,
+ true);
+ else
bgp_aggregate_med_match(aggregate, bgp, pi);
/* No mismatches, just quit. */
*/
if (aggregate->match_med)
bgp_aggregate_med_update(aggregate, bgp, aggr_p, afi, safi,
- pinew, true);
+ pinew);
if (aggregate->summary_only && AGGREGATE_MED_VALID(aggregate))
aggr_suppress_path(aggregate, pinew);
* "unsuppressing" twice.
*/
if (aggregate->match_med)
- bgp_aggregate_med_update(aggregate, bgp, aggr_p, afi, safi, pi,
- true);
+ bgp_aggregate_med_update(aggregate, bgp, aggr_p, afi, safi, pi);
if (aggregate->count > 0)
aggregate->count--;
switch (nhtype) {
case NEXTHOP_TYPE_IFINDEX:
+ switch (p->family) {
+ case AF_INET:
+ attr.nexthop.s_addr = INADDR_ANY;
+ attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
+ break;
+ case AF_INET6:
+ memset(&attr.mp_nexthop_global, 0,
+ sizeof(attr.mp_nexthop_global));
+ attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL;
+ break;
+ }
break;
case NEXTHOP_TYPE_IPV4:
case NEXTHOP_TYPE_IPV4_IFINDEX:
if (type == bgp_show_type_route_map) {
struct route_map *rmap = output_arg;
struct bgp_path_info path;
- struct attr dummy_attr;
+ struct bgp_path_info_extra extra;
+ struct attr dummy_attr = {};
route_map_result_t ret;
dummy_attr = *pi->attr;
- path.peer = pi->peer;
- path.attr = &dummy_attr;
+ prep_for_rmap_apply(&path, &extra, dest, pi,
+ pi->peer, &dummy_attr);
ret = route_map_apply(rmap, dest_p, &path);
bgp_attr_flush(&dummy_attr);
*/
struct bgp *bgp_orig;
+ /*
+ * Original bgp session to know if the session is a
+ * connected EBGP session or not
+ */
+ struct peer *peer_orig;
+
/*
* Nexthop in context of original bgp instance. Needed
* for label resolution of core mpls routes exported to a vrf.
route_set_ip_nexthop_free
};
+/* `set l3vpn next-hop encapsulation l3vpn gre' */
+
+/* Set nexthop to object */
+struct rmap_l3vpn_nexthop_encapsulation_set {
+ uint8_t protocol;
+};
+
+static enum route_map_cmd_result_t
+route_set_l3vpn_nexthop_encapsulation(void *rule, const struct prefix *prefix,
+ void *object)
+{
+ struct rmap_l3vpn_nexthop_encapsulation_set *rins = rule;
+ struct bgp_path_info *path;
+
+ path = object;
+
+ if (rins->protocol != IPPROTO_GRE)
+ return RMAP_OKAY;
+
+ SET_FLAG(path->attr->rmap_change_flags, BATTR_RMAP_L3VPN_ACCEPT_GRE);
+ return RMAP_OKAY;
+}
+
+/* Route map `l3vpn nexthop encapsulation' compile function. */
+static void *route_set_l3vpn_nexthop_encapsulation_compile(const char *arg)
+{
+ struct rmap_l3vpn_nexthop_encapsulation_set *rins;
+
+ rins = XCALLOC(MTYPE_ROUTE_MAP_COMPILED,
+ sizeof(struct rmap_l3vpn_nexthop_encapsulation_set));
+
+ /* XXX ALL GRE modes are accepted for now: gre or ip6gre */
+ rins->protocol = IPPROTO_GRE;
+
+ return rins;
+}
+
+/* Free route map's compiled `ip nexthop' value. */
+static void route_set_l3vpn_nexthop_encapsulation_free(void *rule)
+{
+ XFREE(MTYPE_ROUTE_MAP_COMPILED, rule);
+}
+
+/* Route map commands for l3vpn next-hop encapsulation set. */
+static const struct route_map_rule_cmd
+ route_set_l3vpn_nexthop_encapsulation_cmd = {
+ "l3vpn next-hop encapsulation",
+ route_set_l3vpn_nexthop_encapsulation,
+ route_set_l3vpn_nexthop_encapsulation_compile,
+ route_set_l3vpn_nexthop_encapsulation_free};
+
/* `set local-preference LOCAL_PREF' */
/* Set local preference. */
aspath_new, replace_asn, own_asn);
}
+ aspath_free(aspath_new);
+
return RMAP_OKAY;
}
void bgp_route_map_update_timer(struct thread *thread)
{
- bm->t_rmap_update = NULL;
-
route_map_walk_update_list(bgp_route_map_process_update_cb);
}
return nb_cli_apply_changes(vty, NULL);
}
+DEFPY_YANG(set_l3vpn_nexthop_encapsulation, set_l3vpn_nexthop_encapsulation_cmd,
+ "[no] set l3vpn next-hop encapsulation gre",
+ NO_STR SET_STR
+ "L3VPN operations\n"
+ "Next hop Information\n"
+ "Encapsulation options (for BGP only)\n"
+ "Accept L3VPN traffic over GRE encapsulation\n")
+{
+ const char *xpath =
+ "./set-action[action='frr-bgp-route-map:set-l3vpn-nexthop-encapsulation']";
+ const char *xpath_value =
+ "./set-action[action='frr-bgp-route-map:set-l3vpn-nexthop-encapsulation']/rmap-set-action/frr-bgp-route-map:l3vpn-nexthop-encapsulation";
+ enum nb_operation operation;
+
+ if (no)
+ operation = NB_OP_DESTROY;
+ else
+ operation = NB_OP_CREATE;
+
+ nb_cli_enqueue_change(vty, xpath, operation, NULL);
+ if (operation == NB_OP_DESTROY)
+ return nb_cli_apply_changes(vty, NULL);
+
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, "gre");
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
DEFUN_YANG (set_local_pref,
set_local_pref_cmd,
"set local-preference WORD",
route_map_install_set(&route_set_ecommunity_none_cmd);
route_map_install_set(&route_set_tag_cmd);
route_map_install_set(&route_set_label_index_cmd);
+ route_map_install_set(&route_set_l3vpn_nexthop_encapsulation_cmd);
install_element(RMAP_NODE, &match_peer_cmd);
install_element(RMAP_NODE, &match_peer_local_cmd);
install_element(RMAP_NODE, &no_set_ipx_vpn_nexthop_cmd);
install_element(RMAP_NODE, &set_originator_id_cmd);
install_element(RMAP_NODE, &no_set_originator_id_cmd);
+ install_element(RMAP_NODE, &set_l3vpn_nexthop_encapsulation_cmd);
route_map_install_match(&route_match_ipv6_address_cmd);
route_map_install_match(&route_match_ipv6_next_hop_cmd);
.destroy = lib_route_map_entry_set_action_rmap_set_action_evpn_gateway_ip_ipv6_destroy,
}
},
+ {
+ .xpath = "/frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:l3vpn-nexthop-encapsulation",
+ .cbs = {
+ .modify = lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_modify,
+ .destroy = lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_destroy,
+ }
+ },
{
.xpath = NULL,
},
struct nb_cb_modify_args *args);
int lib_route_map_entry_set_action_rmap_set_action_evpn_gateway_ip_ipv6_destroy(
struct nb_cb_destroy_args *args);
+int lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_modify(
+ struct nb_cb_modify_args *args);
+int lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_destroy(
+ struct nb_cb_destroy_args *args);
#ifdef __cplusplus
}
return NB_OK;
}
+
+/*
+ * XPath:
+ * /frr-route-map:lib/route-map/entry/set-action/rmap-set-action/l3vpn-nexthop-encapsulation
+ */
+int lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct routemap_hook_context *rhc;
+ const char *type;
+ int rv;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ /* Add configuration. */
+ rhc = nb_running_get_entry(args->dnode, NULL, true);
+ type = yang_dnode_get_string(args->dnode, NULL);
+
+ /* Set destroy information. */
+ rhc->rhc_shook = generic_set_delete;
+ rhc->rhc_rule = "l3vpn next-hop encapsulation";
+ rhc->rhc_event = RMAP_EVENT_SET_DELETED;
+
+ rv = generic_set_add(rhc->rhc_rmi,
+ "l3vpn next-hop encapsulation", type,
+ args->errmsg, args->errmsg_len);
+ if (rv != CMD_SUCCESS) {
+ rhc->rhc_shook = NULL;
+ return NB_ERR_INCONSISTENCY;
+ }
+ }
+
+ return NB_OK;
+}
+
+int lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return lib_route_map_entry_set_destroy(args);
+ }
+
+ return NB_OK;
+}
safi_t safi;
for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++) {
- if (!bgp->rib[afi][safi])
+ struct bgp_table *table = bgp->rib[afi][safi];
+
+ if (!table)
continue;
struct bgp_dest *match;
struct bgp_dest *node;
- match = bgp_table_subtree_lookup(bgp->rib[afi][safi],
- prefix);
+ match = bgp_table_subtree_lookup(table, prefix);
node = match;
while (node) {
node = bgp_route_next_until(node, match);
}
+
+ if (match)
+ bgp_dest_unlock_node(match);
}
}
return bgp_dest_from_rnode(rn);
}
-/*
- * bgp_node_match_ipv4
- */
-static inline struct bgp_dest *
-bgp_node_match_ipv4(const struct bgp_table *table, struct in_addr *addr)
-{
- struct route_node *rn = route_node_match_ipv4(table->route_table, addr);
-
- return bgp_dest_from_rnode(rn);
-}
-
-/*
- * bgp_node_match_ipv6
- */
-static inline struct bgp_dest *
-bgp_node_match_ipv6(const struct bgp_table *table, struct in6_addr *addr)
-{
- struct route_node *rn = route_node_match_ipv6(table->route_table, addr);
-
- return bgp_dest_from_rnode(rn);
-}
-
static inline unsigned long bgp_table_count(const struct bgp_table *const table)
{
return route_table_count(table->route_table);
return bgp_dest_from_rnode(route_table_get_next(table->route_table, p));
}
-/*
- * bgp_table_iter_init
- */
-static inline void bgp_table_iter_init(bgp_table_iter_t *iter,
- struct bgp_table *table)
-{
- bgp_table_lock(table);
- iter->table = table;
- route_table_iter_init(&iter->rt_iter, table->route_table);
-}
-
-/*
- * bgp_table_iter_next
- */
-static inline struct bgp_dest *bgp_table_iter_next(bgp_table_iter_t *iter)
-{
- return bgp_dest_from_rnode(route_table_iter_next(&iter->rt_iter));
-}
-
-/*
- * bgp_table_iter_cleanup
- */
-static inline void bgp_table_iter_cleanup(bgp_table_iter_t *iter)
-{
- route_table_iter_cleanup(&iter->rt_iter);
- bgp_table_unlock(iter->table);
- iter->table = NULL;
-}
-
-/*
- * bgp_table_iter_pause
- */
-static inline void bgp_table_iter_pause(bgp_table_iter_t *iter)
-{
- route_table_iter_pause(&iter->rt_iter);
-}
-
-/*
- * bgp_table_iter_is_done
- */
-static inline int bgp_table_iter_is_done(bgp_table_iter_t *iter)
-{
- return route_table_iter_is_done(&iter->rt_iter);
-}
-
-/*
- * bgp_table_iter_started
- */
-static inline int bgp_table_iter_started(bgp_table_iter_t *iter)
-{
- return route_table_iter_started(&iter->rt_iter);
-}
-
/* This would benefit from a real atomic operation...
* until then. */
static inline uint64_t bgp_table_next_version(struct bgp_table *table)
XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->advmap.cname);
XFREE(MTYPE_BGP_PEER_HOST, src->host);
+
+ ecommunity_free(&src->soo[afi][safi]);
}
static void peer2_updgrp_copy(struct update_group *updgrp, struct peer_af *paf)
if (bgp_debug_neighbor_events(peer)) {
zlog_debug(
- "%pBP Update Group Hash: sort: %d UpdGrpFlags: %ju UpdGrpAFFlags: %u",
+ "%pBP Update Group Hash: sort: %d UpdGrpFlags: %ju UpdGrpAFFlags: %ju",
peer, peer->sort,
- (intmax_t)(peer->flags & PEER_UPDGRP_FLAGS),
- flags & PEER_UPDGRP_AF_FLAGS);
+ (intmax_t)CHECK_FLAG(peer->flags, PEER_UPDGRP_FLAGS),
+ (intmax_t)CHECK_FLAG(flags, PEER_UPDGRP_AF_FLAGS));
zlog_debug(
"%pBP Update Group Hash: addpath: %u UpdGrpCapFlag: %u UpdGrpCapAFFlag: %u route_adv: %u change local as: %u",
peer, (uint32_t)peer->addpath_type[afi][safi],
- peer->cap & PEER_UPDGRP_CAP_FLAGS,
- peer->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS,
+ CHECK_FLAG(peer->cap, PEER_UPDGRP_CAP_FLAGS),
+ CHECK_FLAG(peer->af_cap[afi][safi],
+ PEER_UPDGRP_AF_CAP_FLAGS),
peer->v_routeadv, peer->change_local_as);
zlog_debug(
"%pBP Update Group Hash: max packet size: %u pmax_out: %u Peer Group: %s rmap out: %s",
peer->shared_network &&
peer_afi_active_nego(peer, AFI_IP6));
zlog_debug(
- "%pBP Update Group Hash: Lonesoul: %d ORF prefix: %u ORF old: %u max prefix out: %u",
+ "%pBP Update Group Hash: Lonesoul: %d ORF prefix: %u ORF old: %u max prefix out: %ju",
peer, !!CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL),
CHECK_FLAG(peer->af_cap[afi][safi],
PEER_CAP_ORF_PREFIX_SM_RCV),
CHECK_FLAG(peer->af_cap[afi][safi],
PEER_CAP_ORF_PREFIX_SM_OLD_RCV),
- CHECK_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX_OUT));
+ (intmax_t)CHECK_FLAG(peer->af_flags[afi][safi],
+ PEER_FLAG_MAX_PREFIX_OUT));
zlog_debug("%pBP Update Group Hash key: %u", peer, key);
}
return key;
#include "bgpd/bgp_debug.h"
#include "bgpd/bgp_errors.h"
#include "bgpd/bgp_fsm.h"
+#include "bgpd/bgp_nht.h"
#include "bgpd/bgp_nexthop.h"
#include "bgpd/bgp_network.h"
#include "bgpd/bgp_open.h"
}
static bool peergroup_af_flag_check(struct peer *peer, afi_t afi, safi_t safi,
- uint32_t flag)
+ uint64_t flag)
{
if (!peer_group_active(peer)) {
if (CHECK_FLAG(peer->af_flags_invert[afi][safi], flag))
bgp_post_config_delay, &t_bgp_cfg);
}
+static int config_write_interface_one(struct vty *vty, struct vrf *vrf)
+{
+ int write = 0;
+ struct interface *ifp;
+ struct bgp_interface *iifp;
+
+ FOR_ALL_INTERFACES (vrf, ifp) {
+ iifp = ifp->info;
+ if (!iifp)
+ continue;
+
+ if_vty_config_start(vty, ifp);
+
+ if (CHECK_FLAG(iifp->flags,
+ BGP_INTERFACE_MPLS_BGP_FORWARDING)) {
+ vty_out(vty, " mpls bgp forwarding\n");
+ write++;
+ }
+
+ if_vty_config_end(vty);
+ }
+
+ return write;
+}
+
+/* Configuration write function for bgpd. */
+static int config_write_interface(struct vty *vty)
+{
+ int write = 0;
+ struct vrf *vrf = NULL;
+
+ /* Display all VRF aware OSPF interface configuration */
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ write += config_write_interface_one(vty, vrf);
+ }
+
+ return write;
+}
+
+DEFPY(mpls_bgp_forwarding, mpls_bgp_forwarding_cmd,
+ "[no$no] mpls bgp forwarding",
+ NO_STR MPLS_STR BGP_STR
+ "Enable MPLS forwarding for eBGP directly connected peers\n")
+{
+ bool check;
+ struct bgp_interface *iifp;
+
+ VTY_DECLVAR_CONTEXT(interface, ifp);
+ iifp = ifp->info;
+ if (!iifp) {
+ vty_out(vty, "Interface %s not available\n", ifp->name);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ check = CHECK_FLAG(iifp->flags, BGP_INTERFACE_MPLS_BGP_FORWARDING);
+ if (check != !no) {
+ if (no)
+ UNSET_FLAG(iifp->flags,
+ BGP_INTERFACE_MPLS_BGP_FORWARDING);
+ else
+ SET_FLAG(iifp->flags,
+ BGP_INTERFACE_MPLS_BGP_FORWARDING);
+ /* trigger a nht update on eBGP sessions */
+ if (if_is_operative(ifp))
+ bgp_nht_ifp_up(ifp);
+ }
+ return CMD_SUCCESS;
+}
+
+/* Initialization of BGP interface. */
+static void bgp_vty_if_init(void)
+{
+ /* Install interface node. */
+ if_cmd_init(config_write_interface);
+
+ /* "mpls bgp forwarding" commands. */
+ install_element(INTERFACE_NODE, &mpls_bgp_forwarding_cmd);
+}
+
void bgp_vty_init(void)
{
cmd_variable_handler_register(bgp_var_neighbor);
install_element(BGP_SRV6_NODE, &no_bgp_srv6_locator_cmd);
install_element(BGP_IPV4_NODE, &af_sid_vpn_export_cmd);
install_element(BGP_IPV6_NODE, &af_sid_vpn_export_cmd);
+
+ bgp_vty_if_init();
}
#include "memory.h"
DEFINE_HOOK(bgp_vrf_status_changed, (struct bgp *bgp, struct interface *ifp),
(bgp, ifp));
+DEFINE_MTYPE_STATIC(BGPD, BGP_IF_INFO, "BGP interface context");
+
/* Can we install into zebra? */
static inline bool bgp_install_info_to_zebra(struct bgp *bgp)
{
bgp_zebra_process_srv6_locator_chunk,
};
+static int bgp_if_new_hook(struct interface *ifp)
+{
+ struct bgp_interface *iifp;
+
+ if (ifp->info)
+ return 0;
+ iifp = XCALLOC(MTYPE_BGP_IF_INFO, sizeof(struct bgp_interface));
+ ifp->info = iifp;
+
+ return 0;
+}
+
+static int bgp_if_delete_hook(struct interface *ifp)
+{
+ XFREE(MTYPE_BGP_IF_INFO, ifp->info);
+ return 0;
+}
+
+void bgp_if_init(void)
+{
+ /* Initialize Zebra interface data structure. */
+ hook_register_prio(if_add, 0, bgp_if_new_hook);
+ hook_register_prio(if_del, 0, bgp_if_delete_hook);
+}
+
void bgp_zebra_init(struct thread_master *master, unsigned short instance)
{
zclient_num_connects = 0;
extern void bgp_zebra_init(struct thread_master *master,
unsigned short instance);
+extern void bgp_if_init(void);
extern void bgp_zebra_init_tm_connect(struct bgp *bgp);
extern uint32_t bgp_zebra_tm_get_id(void);
extern bool bgp_zebra_tm_chunk_obtained(void);
}
void peer_af_flag_inherit(struct peer *peer, afi_t afi, safi_t safi,
- uint32_t flag)
+ uint64_t flag)
{
bool group_val;
peer_dst->weight[afi][safi] = peer_src->weight[afi][safi];
peer_dst->addpath_type[afi][safi] =
peer_src->addpath_type[afi][safi];
- if (peer_src->soo[afi][safi]) {
- ecommunity_free(&peer_dst->soo[afi][safi]);
- peer_dst->soo[afi][safi] =
- ecommunity_dup(peer_src->soo[afi][safi]);
- }
}
for (afidx = BGP_AF_START; afidx < BGP_AF_MAX; afidx++) {
{
int in = FILTER_IN;
int out = FILTER_OUT;
- uint32_t flags_tmp;
- uint32_t pflags_ovrd;
+ uint64_t flags_tmp;
+ uint64_t pflags_ovrd;
uint8_t *pfilter_ovrd;
struct peer *conf;
}
static int peer_af_flag_modify(struct peer *peer, afi_t afi, safi_t safi,
- uint32_t flag, bool set)
+ uint64_t flag, bool set)
{
int found;
int size;
return 0;
}
-int peer_af_flag_set(struct peer *peer, afi_t afi, safi_t safi, uint32_t flag)
+int peer_af_flag_set(struct peer *peer, afi_t afi, safi_t safi, uint64_t flag)
{
return peer_af_flag_modify(peer, afi, safi, flag, 1);
}
-int peer_af_flag_unset(struct peer *peer, afi_t afi, safi_t safi, uint32_t flag)
+int peer_af_flag_unset(struct peer *peer, afi_t afi, safi_t safi, uint64_t flag)
{
return peer_af_flag_modify(peer, afi, safi, flag, 0);
}
};
DECLARE_QOBJ_TYPE(bgp);
+struct bgp_interface {
+#define BGP_INTERFACE_MPLS_BGP_FORWARDING (1 << 0)
+ uint32_t flags;
+};
+
DECLARE_HOOK(bgp_inst_delete, (struct bgp *bgp), (bgp));
DECLARE_HOOK(bgp_inst_config_write,
(struct bgp *bgp, struct vty *vty),
* specific attributes are being treated the exact same way as global
* peer attributes.
*/
- uint32_t af_flags_override[AFI_MAX][SAFI_MAX];
- uint32_t af_flags_invert[AFI_MAX][SAFI_MAX];
- uint32_t af_flags[AFI_MAX][SAFI_MAX];
-#define PEER_FLAG_SEND_COMMUNITY (1U << 0) /* send-community */
-#define PEER_FLAG_SEND_EXT_COMMUNITY (1U << 1) /* send-community ext. */
-#define PEER_FLAG_NEXTHOP_SELF (1U << 2) /* next-hop-self */
-#define PEER_FLAG_REFLECTOR_CLIENT (1U << 3) /* reflector-client */
-#define PEER_FLAG_RSERVER_CLIENT (1U << 4) /* route-server-client */
-#define PEER_FLAG_SOFT_RECONFIG (1U << 5) /* soft-reconfiguration */
-#define PEER_FLAG_AS_PATH_UNCHANGED (1U << 6) /* transparent-as */
-#define PEER_FLAG_NEXTHOP_UNCHANGED (1U << 7) /* transparent-next-hop */
-#define PEER_FLAG_MED_UNCHANGED (1U << 8) /* transparent-next-hop */
-#define PEER_FLAG_DEFAULT_ORIGINATE (1U << 9) /* default-originate */
-#define PEER_FLAG_REMOVE_PRIVATE_AS (1U << 10) /* remove-private-as */
-#define PEER_FLAG_ALLOWAS_IN (1U << 11) /* set allowas-in */
-#define PEER_FLAG_ORF_PREFIX_SM (1U << 12) /* orf capability send-mode */
-#define PEER_FLAG_ORF_PREFIX_RM (1U << 13) /* orf capability receive-mode */
-#define PEER_FLAG_MAX_PREFIX (1U << 14) /* maximum prefix */
-#define PEER_FLAG_MAX_PREFIX_WARNING (1U << 15) /* maximum prefix warning-only */
-#define PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED (1U << 16) /* leave link-local nexthop unchanged */
-#define PEER_FLAG_FORCE_NEXTHOP_SELF (1U << 17) /* next-hop-self force */
-#define PEER_FLAG_REMOVE_PRIVATE_AS_ALL (1U << 18) /* remove-private-as all */
-#define PEER_FLAG_REMOVE_PRIVATE_AS_REPLACE (1U << 19) /* remove-private-as replace-as */
-#define PEER_FLAG_AS_OVERRIDE (1U << 20) /* as-override */
-#define PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE (1U << 21) /* remove-private-as all replace-as */
-#define PEER_FLAG_WEIGHT (1U << 24) /* weight */
-#define PEER_FLAG_ALLOWAS_IN_ORIGIN (1U << 25) /* allowas-in origin */
-#define PEER_FLAG_SEND_LARGE_COMMUNITY (1U << 26) /* Send large Communities */
-#define PEER_FLAG_MAX_PREFIX_OUT (1U << 27) /* outgoing maximum prefix */
-#define PEER_FLAG_MAX_PREFIX_FORCE (1U << 28) /* maximum-prefix <num> force */
-#define PEER_FLAG_DISABLE_ADDPATH_RX (1U << 29) /* disable-addpath-rx */
-#define PEER_FLAG_SOO (1U << 30) /* soo */
+ uint64_t af_flags_override[AFI_MAX][SAFI_MAX];
+ uint64_t af_flags_invert[AFI_MAX][SAFI_MAX];
+ uint64_t af_flags[AFI_MAX][SAFI_MAX];
+#define PEER_FLAG_SEND_COMMUNITY (1ULL << 0)
+#define PEER_FLAG_SEND_EXT_COMMUNITY (1ULL << 1)
+#define PEER_FLAG_NEXTHOP_SELF (1ULL << 2)
+#define PEER_FLAG_REFLECTOR_CLIENT (1ULL << 3)
+#define PEER_FLAG_RSERVER_CLIENT (1ULL << 4)
+#define PEER_FLAG_SOFT_RECONFIG (1ULL << 5)
+#define PEER_FLAG_AS_PATH_UNCHANGED (1ULL << 6)
+#define PEER_FLAG_NEXTHOP_UNCHANGED (1ULL << 7)
+#define PEER_FLAG_MED_UNCHANGED (1ULL << 8)
+#define PEER_FLAG_DEFAULT_ORIGINATE (1ULL << 9)
+#define PEER_FLAG_REMOVE_PRIVATE_AS (1ULL << 10)
+#define PEER_FLAG_ALLOWAS_IN (1ULL << 11)
+#define PEER_FLAG_ORF_PREFIX_SM (1ULL << 12)
+#define PEER_FLAG_ORF_PREFIX_RM (1ULL << 13)
+#define PEER_FLAG_MAX_PREFIX (1ULL << 14)
+#define PEER_FLAG_MAX_PREFIX_WARNING (1ULL << 15)
+#define PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED (1ULL << 16)
+#define PEER_FLAG_FORCE_NEXTHOP_SELF (1ULL << 17)
+#define PEER_FLAG_REMOVE_PRIVATE_AS_ALL (1ULL << 18)
+#define PEER_FLAG_REMOVE_PRIVATE_AS_REPLACE (1ULL << 19)
+#define PEER_FLAG_AS_OVERRIDE (1ULL << 20)
+#define PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE (1ULL << 21)
+#define PEER_FLAG_WEIGHT (1ULL << 24)
+#define PEER_FLAG_ALLOWAS_IN_ORIGIN (1ULL << 25)
+#define PEER_FLAG_SEND_LARGE_COMMUNITY (1ULL << 26)
+#define PEER_FLAG_MAX_PREFIX_OUT (1ULL << 27)
+#define PEER_FLAG_MAX_PREFIX_FORCE (1ULL << 28)
+#define PEER_FLAG_DISABLE_ADDPATH_RX (1ULL << 29)
+#define PEER_FLAG_SOO (1ULL << 30)
enum bgp_addpath_strat addpath_type[AFI_MAX][SAFI_MAX];
extern int peer_flag_unset(struct peer *peer, uint64_t flag);
extern void peer_flag_inherit(struct peer *peer, uint64_t flag);
-extern int peer_af_flag_set(struct peer *, afi_t, safi_t, uint32_t);
-extern int peer_af_flag_unset(struct peer *, afi_t, safi_t, uint32_t);
+extern int peer_af_flag_set(struct peer *peer, afi_t afi, safi_t safi,
+ uint64_t flag);
+extern int peer_af_flag_unset(struct peer *peer, afi_t afi, safi_t safi,
+ uint64_t flag);
extern int peer_af_flag_check(struct peer *, afi_t, safi_t, uint32_t);
extern void peer_af_flag_inherit(struct peer *peer, afi_t afi, safi_t safi,
- uint32_t flag);
+ uint64_t flag);
extern void peer_change_action(struct peer *peer, afi_t afi, safi_t safi,
enum peer_change_type type);
bgpd/bgp_bmp.c \
bgpd/bgp_debug.c \
bgpd/bgp_evpn_vty.c \
+ bgpd/bgp_labelpool.c \
bgpd/bgp_route.c \
bgpd/bgp_routemap.c \
bgpd/bgp_rpki.c \
labeled unicast. *bgpd* also supports inter-VRF route leaking.
+L3VPN over GRE interfaces
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In MPLS-VPN or SRv6-VPN, an L3VPN next-hop entry requires that the path
+chosen respectively contains a labelled path or a valid SID IPv6 address.
+Otherwise the L3VPN entry will not be installed. It is possible to ignore
+that check when the path chosen by the next-hop uses a GRE interface, and
+there is a route-map configured at inbound side of ipv4-vpn or ipv6-vpn
+address family with following syntax:
+
+.. clicmd:: set l3vpn next-hop encapsulation gre
+
+The incoming BGP L3VPN entry is accepted, provided that the next hop of the
+L3VPN entry uses a path that takes the GRE tunnel as outgoing interface. The
+remote endpoint should be configured just behind the GRE tunnel; remote
+device configuration may vary depending whether it acts at edge endpoint or
+not: in any case, the expectation is that incoming MPLS traffic received at
+this endpoint should be considered as a valid path for L3VPN.
+
.. _bgp-vrf-route-leaking:
VRF Route Leaking
being sent back to the same CPE (e.g.: multi-site). This is especially needed
when using ``as-override`` or ``allowas-in`` to prevent routing loops.
+.. clicmd:: mpls bgp forwarding
+
+It is possible to permit BGP install VPN prefixes without transport labels,
+by issuing the following command under the interface configuration context.
+This configuration will install VPN prefixes originated from an e-bgp session,
+and with the next-hop directly connected.
+
.. _bgp-l3vpn-srv6:
L3VPN SRv6
Set the color of a SR-TE Policy to be applied to a learned route. The SR-TE
Policy is uniquely determined by the color and the BGP nexthop.
+.. clicmd:: set l3vpn next-hop encapsulation gre
+
+ Accept L3VPN traffic over GRE encapsulation.
.. _route-map-call-command:
AF_INET, SOCK_RAW, IPPROTO_EIGRPIGP, vrf->vrf_id,
vrf->vrf_id != VRF_DEFAULT ? vrf->name : NULL);
if (eigrp_sock < 0) {
- zlog_err("eigrp_read_sock_init: socket: %s",
- safe_strerror(errno));
+ zlog_err("%s: socket: %s",
+ __func__, safe_strerror(errno));
exit(1);
}
if ((unsigned int)ret < sizeof(*iph)) /* ret must be > 0 now */
{
zlog_warn(
- "eigrp_recv_packet: discarding runt packet of length %d (ip header size is %u)",
- ret, (unsigned int)sizeof(*iph));
+ "%s: discarding runt packet of length %d (ip header size is %u)",
+ __func__, ret, (unsigned int)sizeof(*iph));
return NULL;
}
if (ret != ip_len) {
zlog_warn(
- "eigrp_recv_packet read length mismatch: ip_len is %d, but recvmsg returned %d",
- ip_len, ret);
+ "%s read length mismatch: ip_len is %d, but recvmsg returned %d",
+ __func__, ip_len, ret);
return NULL;
}
if (eigrp->fd < 0) {
flog_err_sys(
EC_LIB_SOCKET,
- "eigrp_new: fatal error: eigrp_sock_init was unable to open a socket");
+ "%s: fatal error: eigrp_sock_init was unable to open a socket",
+ __func__);
exit(1);
}
list_delete(&circuit->ipv6_link);
list_delete(&circuit->ipv6_non_link);
+ if (circuit->ext) {
+ isis_del_ext_subtlvs(circuit->ext);
+ circuit->ext = NULL;
+ }
+
XFREE(MTYPE_TMP, circuit->bfd_config.profile);
XFREE(MTYPE_ISIS_CIRCUIT, circuit->tag);
#include "isisd/isis_dr.h"
#include "isisd/isis_zebra.h"
-DEFINE_MTYPE_STATIC(ISISD, ISIS_MPLS_TE, "ISIS MPLS_TE parameters");
DEFINE_MTYPE_STATIC(ISISD, ISIS_PLIST_NAME, "ISIS prefix-list name");
/*
int isis_instance_destroy(struct nb_cb_destroy_args *args)
{
struct isis_area *area;
+ struct isis *isis;
if (args->event != NB_EV_APPLY)
return NB_OK;
area = nb_running_unset_entry(args->dnode);
-
+ isis = area->isis;
isis_area_destroy(area);
+
+ if (listcount(isis->area_list) == 0)
+ isis_finish(isis);
+
return NB_OK;
}
*/
int isis_instance_mpls_te_create(struct nb_cb_create_args *args)
{
- struct listnode *node;
struct isis_area *area;
- struct isis_circuit *circuit;
if (args->event != NB_EV_APPLY)
return NB_OK;
area = nb_running_get_entry(args->dnode, NULL, true);
- if (area->mta == NULL) {
-
- struct mpls_te_area *new;
-
- zlog_debug("ISIS-TE(%s): Initialize MPLS Traffic Engineering",
- area->area_tag);
-
- new = XCALLOC(MTYPE_ISIS_MPLS_TE, sizeof(struct mpls_te_area));
-
- /* Initialize MPLS_TE structure */
- new->status = enable;
- new->level = 0;
- new->inter_as = off;
- new->interas_areaid.s_addr = 0;
- new->router_id.s_addr = 0;
- new->ted = ls_ted_new(1, "ISIS", 0);
- if (!new->ted)
- zlog_warn("Unable to create Link State Data Base");
-
- area->mta = new;
- } else {
- area->mta->status = enable;
- }
-
- /* Initialize Link State Database */
- if (area->mta->ted)
- isis_te_init_ted(area);
-
- /* Update Extended TLVs according to Interface link parameters */
- for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit))
- isis_link_params_update(circuit, circuit->interface);
+ isis_mpls_te_create(area);
/* Reoriginate STD_TE & GMPLS circuits */
lsp_regenerate_schedule(area, area->is_type, 0);
int isis_instance_mpls_te_destroy(struct nb_cb_destroy_args *args)
{
- struct listnode *node;
struct isis_area *area;
- struct isis_circuit *circuit;
if (args->event != NB_EV_APPLY)
return NB_OK;
area = nb_running_get_entry(args->dnode, NULL, true);
- if (IS_MPLS_TE(area->mta))
- area->mta->status = disable;
- else
- return NB_OK;
-
- /* Remove Link State Database */
- ls_ted_del_all(&area->mta->ted);
-
- /* Flush LSP if circuit engage */
- for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit)) {
- if (!IS_EXT_TE(circuit->ext))
- continue;
-
- /* disable MPLS_TE Circuit keeping SR one's */
- if (IS_SUBTLV(circuit->ext, EXT_ADJ_SID))
- circuit->ext->status = EXT_ADJ_SID;
- else if (IS_SUBTLV(circuit->ext, EXT_LAN_ADJ_SID))
- circuit->ext->status = EXT_LAN_ADJ_SID;
- else
- circuit->ext->status = 0;
- }
+ if (!IS_MPLS_TE(area->mta))
+ return NB_OK;
+
+ isis_mpls_te_disable(area);
/* Reoriginate STD_TE & GMPLS circuits */
lsp_regenerate_schedule(area, area->is_type, 0);
if (area->srdb.enabled)
isis_sr_stop(area);
+ /* Free Adjacency SID list */
+ list_delete(&srdb->adj_sids);
+
/* Clear Prefix-SID configuration. */
while (srdb_prefix_cfg_count(&srdb->config.prefix_sids) > 0) {
struct sr_prefix_cfg *pcfg;
#include "isisd/isis_te.h"
#include "isisd/isis_zebra.h"
+DEFINE_MTYPE_STATIC(ISISD, ISIS_MPLS_TE, "ISIS MPLS_TE parameters");
+
/*------------------------------------------------------------------------*
* Following are control functions for MPLS-TE parameters management.
*------------------------------------------------------------------------*/
+/**
+ * Create MPLS Traffic Engineering structure which belongs to given area.
+ *
+ * @param area IS-IS Area
+ */
+void isis_mpls_te_create(struct isis_area *area)
+{
+ struct listnode *node;
+ struct isis_circuit *circuit;
+
+ if (!area)
+ return;
+
+ if (area->mta == NULL) {
+
+ struct mpls_te_area *new;
+
+ zlog_debug("ISIS-TE(%s): Initialize MPLS Traffic Engineering",
+ area->area_tag);
+
+ new = XCALLOC(MTYPE_ISIS_MPLS_TE, sizeof(struct mpls_te_area));
+
+ /* Initialize MPLS_TE structure */
+ new->status = enable;
+ new->level = 0;
+ new->inter_as = off;
+ new->interas_areaid.s_addr = 0;
+ new->router_id.s_addr = 0;
+ new->ted = ls_ted_new(1, "ISIS", 0);
+ if (!new->ted)
+ zlog_warn("Unable to create Link State Data Base");
+
+ area->mta = new;
+ } else {
+ area->mta->status = enable;
+ }
+
+ /* Initialize Link State Database */
+ if (area->mta->ted)
+ isis_te_init_ted(area);
+
+ /* Update Extended TLVs according to Interface link parameters */
+ for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit))
+ isis_link_params_update(circuit, circuit->interface);
+}
+
+/**
+ * Disable MPLS Traffic Engineering structure which belongs to given area.
+ *
+ * @param area IS-IS Area
+ */
+void isis_mpls_te_disable(struct isis_area *area)
+{
+ struct listnode *node;
+ struct isis_circuit *circuit;
+
+ if (!area->mta)
+ return;
+
+ area->mta->status = disable;
+
+ /* Remove Link State Database */
+ ls_ted_del_all(&area->mta->ted);
+
+ /* Disable Extended SubTLVs on all circuit */
+ for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit)) {
+ if (!IS_EXT_TE(circuit->ext))
+ continue;
+
+ /* disable MPLS_TE Circuit keeping SR one's */
+ if (IS_SUBTLV(circuit->ext, EXT_ADJ_SID))
+ circuit->ext->status = EXT_ADJ_SID;
+ else if (IS_SUBTLV(circuit->ext, EXT_LAN_ADJ_SID))
+ circuit->ext->status = EXT_LAN_ADJ_SID;
+ else
+ circuit->ext->status = 0;
+ }
+}
+
+void isis_mpls_te_term(struct isis_area *area)
+{
+ struct listnode *node;
+ struct isis_circuit *circuit;
+
+ if (!area->mta)
+ return;
+
+ zlog_info("TE(%s): Terminate MPLS TE", __func__);
+ /* Remove Link State Database */
+ ls_ted_del_all(&area->mta->ted);
+
+ /* Remove Extended SubTLVs */
+ zlog_info(" |- Remove Extended SubTLVS for all circuit");
+ for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit)) {
+ zlog_info(" |- Call isis_del_ext_subtlvs()");
+ isis_del_ext_subtlvs(circuit->ext);
+ circuit->ext = NULL;
+ }
+
+ zlog_info(" |- Free MTA structure at %p", area->mta);
+ XFREE(MTYPE_ISIS_MPLS_TE, area->mta);
+}
+
/* Main initialization / update function of the MPLS TE Circuit context */
/* Call when interface TE Link parameters are modified */
void isis_link_params_update(struct isis_circuit *circuit,
/* Prototypes. */
void isis_mpls_te_init(void);
+void isis_mpls_te_create(struct isis_area *area);
+void isis_mpls_te_disable(struct isis_area *area);
+void isis_mpls_te_term(struct isis_area *area);
void isis_link_params_update(struct isis_circuit *, struct interface *);
int isis_mpls_te_update(struct interface *);
void isis_te_lsp_event(struct isis_lsp *lsp, enum lsp_event event);
return ext;
}
+void isis_del_ext_subtlvs(struct isis_ext_subtlvs *ext)
+{
+ struct isis_item *item, *next_item;
+
+ if (!ext)
+ return;
+
+ /* First, free Adj SID and LAN Adj SID list if needed */
+ for (item = ext->adj_sid.head; item; item = next_item) {
+ next_item = item->next;
+ XFREE(MTYPE_ISIS_SUBTLV, item);
+ }
+ for (item = ext->lan_sid.head; item; item = next_item) {
+ next_item = item->next;
+ XFREE(MTYPE_ISIS_SUBTLV, item);
+ }
+ XFREE(MTYPE_ISIS_SUBTLV, ext);
+}
+
/*
* mtid parameter is used to determine if Adjacency is related to IPv4 or IPv6
* Multi-Topology. Special 4096 value i.e. first R flag set is used to indicate
static void free_item_ext_subtlvs(struct isis_ext_subtlvs *exts)
{
- struct isis_item *item, *next_item;
-
- /* First, free Adj SID and LAN Adj SID list if needed */
- for (item = exts->adj_sid.head; item; item = next_item) {
- next_item = item->next;
- XFREE(MTYPE_ISIS_SUBTLV, item);
- }
- for (item = exts->lan_sid.head; item; item = next_item) {
- next_item = item->next;
- XFREE(MTYPE_ISIS_SUBTLV, item);
- }
- XFREE(MTYPE_ISIS_SUBTLV, exts);
+ isis_del_ext_subtlvs(exts);
}
static int pack_item_ext_subtlvs(struct isis_ext_subtlvs *exts,
log, indent,
"TLV size does not match expected size for Adjacency SID!\n");
stream_forward_getp(s, subtlv_len - 2);
+ XFREE(MTYPE_ISIS_SUBTLV, adj);
break;
}
log, indent,
"TLV size does not match expected size for Adjacency SID!\n");
stream_forward_getp(s, subtlv_len - 2);
+ XFREE(MTYPE_ISIS_SUBTLV, adj);
break;
}
stream_forward_getp(
s, subtlv_len - 2
- ISIS_SYS_ID_LEN);
+ XFREE(MTYPE_ISIS_SUBTLV, lan);
break;
}
stream_forward_getp(
s, subtlv_len - 2
- ISIS_SYS_ID_LEN);
+ XFREE(MTYPE_ISIS_SUBTLV, lan);
break;
}
static void free_item_extended_reach(struct isis_item *i)
{
struct isis_extended_reach *item = (struct isis_extended_reach *)i;
+
if (item->subtlvs != NULL)
free_item_ext_subtlvs(item->subtlvs);
XFREE(MTYPE_ISIS_TLV, item);
struct prefix_ipv6 *src,
uint32_t metric);
struct isis_ext_subtlvs *isis_alloc_ext_subtlvs(void);
+void isis_del_ext_subtlvs(struct isis_ext_subtlvs *ext);
void isis_tlvs_add_adj_sid(struct isis_ext_subtlvs *exts,
struct isis_adj_sid *adj);
void isis_tlvs_del_adj_sid(struct isis_ext_subtlvs *exts,
void isis_finish(struct isis *isis)
{
+ struct isis_area *area;
+ struct listnode *node, *nnode;
+
+ for (ALL_LIST_ELEMENTS(isis->area_list, node, nnode, area))
+ isis_area_destroy(area);
+
struct vrf *vrf = NULL;
listnode_delete(im->isis, isis);
isis_csm_state_change(ISIS_DISABLE, circuit, area);
}
+static void delete_area_addr(void *arg)
+{
+ struct area_addr *addr = (struct area_addr *)arg;
+
+ XFREE(MTYPE_ISIS_AREA_ADDR, addr);
+}
+
struct isis_area *isis_area_create(const char *area_tag, const char *vrf_name)
{
struct isis_area *area;
area->circuit_list = list_new();
area->adjacency_list = list_new();
area->area_addrs = list_new();
+ area->area_addrs->del = delete_area_addr;
+
if (!CHECK_FLAG(im->options, F_ISIS_UNIT_TEST))
thread_add_timer(master, lsp_tick, area, 1, &area->t_tick);
flags_initialize(&area->flags);
{
struct listnode *node, *nnode;
struct isis_circuit *circuit;
- struct area_addr *addr;
QOBJ_UNREG(area);
if (fabricd)
fabricd_finish(area->fabricd);
- /* Disable MPLS if necessary before flooding LSP */
- if (IS_MPLS_TE(area->mta))
- area->mta->status = disable;
-
if (area->circuit_list) {
for (ALL_LIST_ELEMENTS(area->circuit_list, node, nnode,
circuit))
list_delete(&area->circuit_list);
}
+ if (area->flags.free_idcs)
+ list_delete(&area->flags.free_idcs);
+
list_delete(&area->adjacency_list);
lsp_db_fini(&area->lspdb[0]);
isis_sr_area_term(area);
+ isis_mpls_te_term(area);
+
spftree_area_del(area);
if (area->spf_timer[0])
if (!CHECK_FLAG(im->options, F_ISIS_UNIT_TEST))
isis_redist_area_finish(area);
- for (ALL_LIST_ELEMENTS(area->area_addrs, node, nnode, addr)) {
- list_delete_node(area->area_addrs, node);
- XFREE(MTYPE_ISIS_AREA_ADDR, addr);
- }
- area->area_addrs = NULL;
+ list_delete(&area->area_addrs);
for (int i = SPF_PREFIX_PRIO_CRITICAL; i <= SPF_PREFIX_PRIO_MEDIUM;
i++) {
area_mt_finish(area);
- if (listcount(area->isis->area_list) == 0) {
- isis_finish(area->isis);
- }
-
XFREE(MTYPE_ISIS_AREA, area);
}
(b) += (w * WORD_SIZE); \
} while (0)
+/*
+ * Find a clear bit in v and return it
+ * Start looking in the word containing bit position start_index.
+ * If necessary, wrap around after bit position max_index.
+ */
+static inline unsigned int
+bf_find_next_clear_bit_wrap(bitfield_t *v, word_t start_index, word_t max_index)
+{
+ int start_bit;
+ unsigned long i, offset, scanbits, wordcount_max, index_max;
+
+ if (start_index > max_index)
+ start_index = 0;
+
+ start_bit = start_index & (WORD_SIZE - 1);
+ wordcount_max = bf_index(max_index) + 1;
+
+ scanbits = WORD_SIZE;
+ for (i = bf_index(start_index); i < v->m; ++i) {
+ if (v->data[i] == WORD_MAX) {
+ /* if the whole word is full move to the next */
+ start_bit = 0;
+ continue;
+ }
+ /* scan one word for clear bits */
+ if ((i == v->m - 1) && (v->m >= wordcount_max))
+ /* max index could be only part of word */
+ scanbits = (max_index % WORD_SIZE) + 1;
+ for (offset = start_bit; offset < scanbits; ++offset) {
+ if (!((v->data[i] >> offset) & 1))
+ return ((i * WORD_SIZE) + offset);
+ }
+ /* move to the next word */
+ start_bit = 0;
+ }
+
+ if (v->m < wordcount_max) {
+ /*
+ * We can expand bitfield, so no need to wrap.
+ * Return the index of the first bit of the next word.
+ * Assumption is that caller will call bf_set_bit which
+ * will allocate additional space.
+ */
+ v->m += 1;
+ v->data = (word_t *)realloc(v->data, v->m * sizeof(word_t));
+ v->data[v->m - 1] = 0;
+ return v->m * WORD_SIZE;
+ }
+
+ /*
+ * start looking for a clear bit at the start of the bitfield and
+ * stop when we reach start_index
+ */
+ scanbits = WORD_SIZE;
+ index_max = bf_index(start_index - 1);
+ for (i = 0; i <= index_max; ++i) {
+ if (i == index_max)
+ scanbits = ((start_index - 1) % WORD_SIZE) + 1;
+ for (offset = start_bit; offset < scanbits; ++offset) {
+ if (!((v->data[i] >> offset) & 1))
+ return ((i * WORD_SIZE) + offset);
+ }
+ /* move to the next word */
+ start_bit = 0;
+ }
+
+ return WORD_MAX;
+}
+
static inline unsigned int bf_find_next_set_bit(bitfield_t v,
word_t start_index)
{
return (ret);
}
-static int route_map_clear_updated(struct route_map *map)
+static void route_map_clear_updated(struct route_map *map)
{
- int ret = -1;
-
if (map) {
map->to_be_processed = false;
if (map->deleted)
route_map_free_map(map);
}
-
- return (ret);
}
/* Lookup route map. If there isn't route map create one and return
(strmatch(A, "frr-bgp-route-map:set-evpn-gateway-ip-ipv4"))
#define IS_SET_BGP_EVPN_GATEWAY_IP_IPV6(A) \
(strmatch(A, "frr-bgp-route-map:set-evpn-gateway-ip-ipv6"))
+#define IS_SET_BGP_L3VPN_NEXTHOP_ENCAPSULATION(A) \
+ (strmatch(A, "frr-bgp-route-map:set-l3vpn-nexthop-encapsulation"))
enum ecommunity_lb_type {
EXPLICIT_BANDWIDTH,
yang_dnode_get_string(
dnode,
"./rmap-set-action/frr-bgp-route-map:evpn-gateway-ip-ipv6"));
+ } else if (IS_SET_BGP_L3VPN_NEXTHOP_ENCAPSULATION(action)) {
+ vty_out(vty, " set l3vpn next-hop encapsulation %s\n",
+ yang_dnode_get_string(
+ dnode,
+ "./rmap-set-action/frr-bgp-route-map:l3vpn-nexthop-encapsulation"));
}
}
zlog_instance = instance;
if (instance) {
- snprintfrr(zlog_tmpdir, sizeof(zlog_tmpdir),
- "/var/tmp/frr/%s-%d.%ld",
- progname, instance, (long)getpid());
+ snprintfrr(zlog_tmpdir, sizeof(zlog_tmpdir), "%s/%s-%d.%ld",
+ TMPBASEDIR, progname, instance, (long)getpid());
zlog_prefixsz = snprintfrr(zlog_prefix, sizeof(zlog_prefix),
"%s[%d]: ", protoname, instance);
} else {
- snprintfrr(zlog_tmpdir, sizeof(zlog_tmpdir),
- "/var/tmp/frr/%s.%ld",
- progname, (long)getpid());
+ snprintfrr(zlog_tmpdir, sizeof(zlog_tmpdir), "%s/%s.%ld",
+ TMPBASEDIR, progname, (long)getpid());
zlog_prefixsz = snprintfrr(zlog_prefix, sizeof(zlog_prefix),
"%s: ", protoname);
}
-static void ospf_maxage_lsa_remover(struct thread *thread)
+void ospf_maxage_lsa_remover(struct thread *thread)
{
struct ospf *ospf = THREAD_ARG(thread);
struct ospf_lsa *lsa, *old;
if (lsa->refresh_list < 0) {
int delay;
int min_delay =
- OSPF_LS_REFRESH_TIME - (2 * OSPF_LS_REFRESH_JITTER);
- int max_delay = OSPF_LS_REFRESH_TIME - OSPF_LS_REFRESH_JITTER;
+ ospf->lsa_refresh_timer - (2 * OSPF_LS_REFRESH_JITTER);
+ int max_delay =
+ ospf->lsa_refresh_timer - OSPF_LS_REFRESH_JITTER;
/* We want to refresh the LSA within OSPF_LS_REFRESH_TIME which
* is
struct ospf_lsa *lsa);
extern void ospf_flush_lsa_from_area(struct ospf *ospf, struct in_addr area_id,
int type);
+extern void ospf_maxage_lsa_remover(struct thread *thread);
#endif /* _ZEBRA_OSPF_LSA_H */
#include "ospfd/ospf_sr.h"
#include "ospfd/ospf_ti_lfa.h"
#include "ospfd/ospf_errors.h"
+
+#ifdef SUPPORT_OSPF_API
#include "ospfd/ospf_apiserver.h"
+#endif
/* Variables to ensure a SPF scheduled log message is printed only once */
/* Update all routers routing table */
ospf->oall_rtrs = ospf->all_rtrs;
ospf->all_rtrs = all_rtrs;
+#ifdef SUPPORT_OSPF_API
ospf_apiserver_notify_reachable(ospf->oall_rtrs, ospf->all_rtrs);
+#endif
/* Free old ABR/ASBR routing table */
if (ospf->old_rtrs)
return CMD_SUCCESS;
}
+DEFPY_HIDDEN(ospf_lsa_refresh_timer, ospf_lsa_refresh_timer_cmd,
+ "[no$no] ospf lsa-refresh [(120-1800)]$value",
+ NO_STR OSPF_STR
+ "OSPF lsa refresh timer\n"
+ "timer value in seconds\n")
+{
+ VTY_DECLVAR_INSTANCE_CONTEXT(ospf, ospf)
+
+ if (no)
+ ospf->lsa_refresh_timer = OSPF_LS_REFRESH_TIME;
+ else
+ ospf->lsa_refresh_timer = value;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY_HIDDEN(ospf_maxage_delay_timer, ospf_maxage_delay_timer_cmd,
+ "[no$no] ospf maxage-delay [(0-60)]$value",
+ NO_STR OSPF_STR
+ "OSPF lsa maxage delay timer\n"
+ "timer value in seconds\n")
+{
+ VTY_DECLVAR_INSTANCE_CONTEXT(ospf, ospf)
+
+ if (no)
+ ospf->maxage_delay = OSPF_LSA_MAXAGE_REMOVE_DELAY_DEFAULT;
+ else
+ ospf->maxage_delay = value;
+
+ THREAD_OFF(ospf->t_maxage);
+ OSPF_TIMER_ON(ospf->t_maxage, ospf_maxage_lsa_remover,
+ ospf->maxage_delay);
+
+ return CMD_SUCCESS;
+}
+
void ospf_vty_clear_init(void)
{
install_element(ENABLE_NODE, &clear_ip_ospf_interface_cmd);
vrf_cmd_init(NULL);
+ install_element(OSPF_NODE, &ospf_lsa_refresh_timer_cmd);
+ install_element(OSPF_NODE, &ospf_maxage_delay_timer_cmd);
+
/* Init interface related vty commands. */
ospf_vty_if_init();
new->lsa_refresh_queue.index = 0;
new->lsa_refresh_interval = OSPF_LSA_REFRESH_INTERVAL_DEFAULT;
+ new->lsa_refresh_timer = OSPF_LS_REFRESH_TIME;
new->t_lsa_refresher = NULL;
thread_add_timer(master, ospf_lsa_refresh_walker, new,
new->lsa_refresh_interval, &new->t_lsa_refresher);
time_t lsa_refresher_started;
#define OSPF_LSA_REFRESH_INTERVAL_DEFAULT 10
uint16_t lsa_refresh_interval;
+ uint16_t lsa_refresh_timer;
/* Distance parameter. */
uint8_t distance_all;
desired = GM_SG_NOINFO;
if (desired != sg->state && !gm_ifp->stopping) {
- if (PIM_DEBUG_IGMP_EVENTS)
+ if (PIM_DEBUG_GM_EVENTS)
zlog_debug(log_sg(sg, "%s => %s"), gm_states[sg->state],
gm_states[desired]);
struct gm_packet_state *pkt;
if (len < sizeof(*hdr)) {
- if (PIM_DEBUG_IGMP_PACKETS)
+ if (PIM_DEBUG_GM_PACKETS)
zlog_debug(log_pkt_src(
"malformed MLDv2 report (truncated header)"));
gm_ifp->stats.rx_drop_malformed++;
size_t max_entries;
if (len < sizeof(*hdr)) {
- if (PIM_DEBUG_IGMP_PACKETS)
+ if (PIM_DEBUG_GM_PACKETS)
zlog_debug(log_pkt_src(
"malformed MLDv1 report (truncated)"));
gm_ifp->stats.rx_drop_malformed++;
struct gm_packet_sg *old_grp;
if (len < sizeof(*hdr)) {
- if (PIM_DEBUG_IGMP_PACKETS)
+ if (PIM_DEBUG_GM_PACKETS)
zlog_debug(log_pkt_src(
"malformed MLDv1 leave (truncated)"));
gm_ifp->stats.rx_drop_malformed++;
remain_ms = monotime_until(&pend->expiry, &remain);
if (remain_ms > 0) {
- if (PIM_DEBUG_IGMP_EVENTS)
+ if (PIM_DEBUG_GM_EVENTS)
zlog_debug(
log_ifp("next general expiry in %" PRId64 "ms"),
remain_ms / 1000);
if (timercmp(&pkt->received, &pend->query, >=))
break;
- if (PIM_DEBUG_IGMP_PACKETS)
+ if (PIM_DEBUG_GM_PACKETS)
zlog_debug(log_ifp("expire packet %p"), pkt);
gm_packet_drop(pkt, true);
}
gm_ifp->n_pending * sizeof(gm_ifp->pending[0]));
}
- if (PIM_DEBUG_IGMP_EVENTS)
+ if (PIM_DEBUG_GM_EVENTS)
zlog_debug(log_ifp("next general expiry waiting for query"));
}
struct gm_if *gm_ifp = pend->iface;
struct gm_sg *sg, *sg_start, sg_ref = {};
- if (PIM_DEBUG_IGMP_EVENTS)
+ if (PIM_DEBUG_GM_EVENTS)
zlog_debug(log_ifp("*,%pPAs S,G timer expired"), &pend->grp);
/* gteq lookup - try to find *,G or S,G (S,G is > *,G)
}
if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &gm_ifp->querier) < 0) {
- if (PIM_DEBUG_IGMP_EVENTS)
+ if (PIM_DEBUG_GM_EVENTS)
zlog_debug(
log_pkt_src("replacing elected querier %pPA"),
&gm_ifp->querier);
query.hdr.icmp6_cksum = in_cksumv(iov, iov_len);
- if (PIM_DEBUG_IGMP_PACKETS)
+ if (PIM_DEBUG_GM_PACKETS)
zlog_debug(
log_ifp("MLD query %pPA -> %pI6 (grp=%pPA, %zu srcs)"),
&pim_ifp->ll_lowest, &dstaddr.sin6_addr, &grp, n_srcs);
gm_ifp = pim_ifp->mld;
gm_ifp->stopping = true;
- if (PIM_DEBUG_IGMP_EVENTS)
+ if (PIM_DEBUG_GM_EVENTS)
zlog_debug(log_ifp("MLD stop"));
THREAD_OFF(gm_ifp->t_query);
querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
if (js_if) {
+ json_object_string_add(js_if, "name", ifp->name);
json_object_string_add(js_if, "state", "up");
json_object_string_addf(js_if, "version", "%d",
gm_ifp->cur_version);
json_object_string_addf(js_if, "otherQuerierTimer",
"%pTH",
gm_ifp->t_other_querier);
+ json_object_int_add(js_if, "timerRobustnessValue",
+ gm_ifp->cur_qrv);
+ json_object_int_add(js_if, "timerQueryIntervalMsec",
+ gm_ifp->cur_query_intv);
+ json_object_int_add(js_if, "timerQueryResponseTimerMsec",
+ gm_ifp->cur_max_resp);
+ json_object_int_add(js_if, "timerLastMemberQueryIntervalMsec",
+ gm_ifp->cur_query_intv_trig);
} else {
vty_out(vty, "%-16s %-5s %d %-25pPA %-5s %11pTH %pTVMs\n",
ifp->name, "up", gm_ifp->cur_version, &gm_ifp->querier,
DEBUG_STR
DEBUG_IGMP_STR)
{
- PIM_DO_DEBUG_IGMP_EVENTS;
- PIM_DO_DEBUG_IGMP_PACKETS;
- PIM_DO_DEBUG_IGMP_TRACE;
+ PIM_DO_DEBUG_GM_EVENTS;
+ PIM_DO_DEBUG_GM_PACKETS;
+ PIM_DO_DEBUG_GM_TRACE;
return CMD_SUCCESS;
}
DEBUG_STR
DEBUG_IGMP_STR)
{
- PIM_DONT_DEBUG_IGMP_EVENTS;
- PIM_DONT_DEBUG_IGMP_PACKETS;
- PIM_DONT_DEBUG_IGMP_TRACE;
+ PIM_DONT_DEBUG_GM_EVENTS;
+ PIM_DONT_DEBUG_GM_PACKETS;
+ PIM_DONT_DEBUG_GM_TRACE;
return CMD_SUCCESS;
}
DEBUG_IGMP_STR
DEBUG_IGMP_EVENTS_STR)
{
- PIM_DO_DEBUG_IGMP_EVENTS;
+ PIM_DO_DEBUG_GM_EVENTS;
return CMD_SUCCESS;
}
DEBUG_IGMP_STR
DEBUG_IGMP_EVENTS_STR)
{
- PIM_DONT_DEBUG_IGMP_EVENTS;
+ PIM_DONT_DEBUG_GM_EVENTS;
return CMD_SUCCESS;
}
DEBUG_IGMP_STR
DEBUG_IGMP_PACKETS_STR)
{
- PIM_DO_DEBUG_IGMP_PACKETS;
+ PIM_DO_DEBUG_GM_PACKETS;
return CMD_SUCCESS;
}
DEBUG_IGMP_STR
DEBUG_IGMP_PACKETS_STR)
{
- PIM_DONT_DEBUG_IGMP_PACKETS;
+ PIM_DONT_DEBUG_GM_PACKETS;
return CMD_SUCCESS;
}
DEBUG_IGMP_STR
DEBUG_IGMP_TRACE_STR)
{
- PIM_DO_DEBUG_IGMP_TRACE;
+ PIM_DO_DEBUG_GM_TRACE;
return CMD_SUCCESS;
}
DEBUG_IGMP_STR
DEBUG_IGMP_TRACE_STR)
{
- PIM_DONT_DEBUG_IGMP_TRACE;
+ PIM_DONT_DEBUG_GM_TRACE;
return CMD_SUCCESS;
}
DEBUG_IGMP_TRACE_STR
"detailed\n")
{
- PIM_DO_DEBUG_IGMP_TRACE_DETAIL;
+ PIM_DO_DEBUG_GM_TRACE_DETAIL;
return CMD_SUCCESS;
}
DEBUG_IGMP_TRACE_STR
"detailed\n")
{
- PIM_DONT_DEBUG_IGMP_TRACE_DETAIL;
+ PIM_DONT_DEBUG_GM_TRACE_DETAIL;
return CMD_SUCCESS;
}
(void)igmp_join_new(ifp, group_addr, source_addr);
- if (PIM_DEBUG_IGMP_EVENTS) {
+ if (PIM_DEBUG_GM_EVENTS) {
char group_str[INET_ADDRSTRLEN];
char source_str[INET_ADDRSTRLEN];
pim_inet4_dump("<grp?>", group_addr, group_str,
}
if (!pim_if_connected_to_source(ifp, from)) {
- if (PIM_DEBUG_IGMP_PACKETS)
+ if (PIM_DEBUG_GM_PACKETS)
zlog_debug("Recv IGMP query on interface: %s from a non-connected source: %s",
ifp->name, from_str);
return 0;
}
if (if_address_is_local(&from, AF_INET, ifp->vrf->vrf_id)) {
- if (PIM_DEBUG_IGMP_PACKETS)
+ if (PIM_DEBUG_GM_PACKETS)
zlog_debug("Recv IGMP query on interface: %s from ourself %s",
ifp->name, from_str);
return 0;
return 0;
}
- if (PIM_DEBUG_IGMP_PACKETS) {
+ if (PIM_DEBUG_GM_PACKETS) {
char group_str[INET_ADDRSTRLEN];
pim_inet4_dump("<group?>", group_addr, group_str,
sizeof(group_str));
pim_inet4_dump("<src?>", ip_hdr->ip_src, from_str, sizeof(from_str));
pim_inet4_dump("<dst?>", ip_hdr->ip_dst, to_str, sizeof(to_str));
- if (PIM_DEBUG_IGMP_PACKETS) {
+ if (PIM_DEBUG_GM_PACKETS) {
zlog_debug(
"Recv IGMP packet from %s to %s on %s: size=%zu ttl=%d msg_type=%d msg_size=%d",
from_str, to_str, igmp->interface->name, len, ip_hdr->ip_ttl,
{
group_timer_off(group);
- if (PIM_DEBUG_IGMP_EVENTS) {
+ if (PIM_DEBUG_GM_EVENTS) {
char group_str[INET_ADDRSTRLEN];
pim_inet4_dump("<group?>", group->group_addr, group_str,
sizeof(group_str));
checksum = in_cksum(query_buf, msg_size);
*(uint16_t *)(query_buf + IGMP_CHECKSUM_OFFSET) = checksum;
- if (PIM_DEBUG_IGMP_PACKETS) {
+ if (PIM_DEBUG_GM_PACKETS) {
char dst_str[INET_ADDRSTRLEN];
char group_str[INET_ADDRSTRLEN];
pim_inet4_dump("<dst?>", dst_addr, dst_str, sizeof(dst_str));
return 0;
if (igmp_msg_len != IGMP_V12_MSG_SIZE) {
- if (PIM_DEBUG_IGMP_PACKETS)
+ if (PIM_DEBUG_GM_PACKETS)
zlog_debug(
"Recv IGMPv2 REPORT from %s on %s: size=%d other than correct=%d",
from_str, ifp->name, igmp_msg_len,
memcpy(&group_addr, igmp_msg + 4, sizeof(struct in_addr));
- if (PIM_DEBUG_IGMP_PACKETS) {
+ if (PIM_DEBUG_GM_PACKETS) {
pim_inet4_dump("<dst?>", group_addr, group_str,
sizeof(group_str));
zlog_debug("Recv IGMPv2 REPORT from %s on %s for %s", from_str,
* the SSM range.
*/
if (pim_is_grp_ssm(pim_ifp->pim, group_addr)) {
- if (PIM_DEBUG_IGMP_PACKETS) {
+ if (PIM_DEBUG_GM_PACKETS) {
zlog_debug(
"Ignoring IGMPv2 group record %pI4 from %s on %s exclude mode in SSM range",
&group_addr.s_addr, from_str, ifp->name);
return 0;
if (igmp_msg_len != IGMP_V12_MSG_SIZE) {
- if (PIM_DEBUG_IGMP_PACKETS)
+ if (PIM_DEBUG_GM_PACKETS)
zlog_debug(
"Recv IGMPv2 LEAVE from %s on %s: size=%d other than correct=%d",
from_str, ifp->name, igmp_msg_len,
memcpy(&group_addr, igmp_msg + 4, sizeof(struct in_addr));
- if (PIM_DEBUG_IGMP_PACKETS) {
+ if (PIM_DEBUG_GM_PACKETS) {
pim_inet4_dump("<dst?>", group_addr, group_str,
sizeof(group_str));
zlog_debug("Recv IGMPv2 LEAVE from %s on %s for %s", from_str,
*/
if ((ntohl(ip_hdr->ip_dst.s_addr) != INADDR_ALLRTRS_GROUP)
&& (ip_hdr->ip_dst.s_addr != group_addr.s_addr)) {
- if (PIM_DEBUG_IGMP_EVENTS)
+ if (PIM_DEBUG_GM_EVENTS)
zlog_debug(
"IGMPv2 Leave message is ignored since received on address other than ALL-ROUTERS or Group-address");
return -1;
source_timer_off(group, source);
struct pim_interface *pim_ifp = group->interface->info;
- if (PIM_DEBUG_IGMP_EVENTS) {
+ if (PIM_DEBUG_GM_EVENTS) {
char group_str[INET_ADDRSTRLEN];
char source_str[INET_ADDRSTRLEN];
pim_inet4_dump("<group?>", group->group_addr, group_str,
checksum = in_cksum(query_buf, msg_size);
*(uint16_t *)(query_buf + IGMP_CHECKSUM_OFFSET) = checksum;
- if (PIM_DEBUG_IGMP_PACKETS) {
+ if (PIM_DEBUG_GM_PACKETS) {
char dst_str[INET_ADDRSTRLEN];
char group_str[INET_ADDRSTRLEN];
pim_inet4_dump("<dst?>", dst_addr, dst_str, sizeof(dst_str));
/* determine filtering status for group */
if (pim_is_group_filtered(pim_ifp, &grp)) {
- if (PIM_DEBUG_IGMP_PACKETS) {
+ if (PIM_DEBUG_GM_PACKETS) {
zlog_debug(
"Filtering IGMPv3 group record %pI4 from %s on %s per prefix-list %s",
&grp.s_addr, from_str, ifp->name,
grp_addr.s_addr = ntohl(grp.s_addr);
if (pim_is_group_224_0_0_0_24(grp_addr)) {
- if (PIM_DEBUG_IGMP_PACKETS) {
+ if (PIM_DEBUG_GM_PACKETS) {
zlog_debug(
"Ignoring IGMPv3 group record %pI4 from %s on %s group range falls in 224.0.0.0/24",
&grp.s_addr, from_str, ifp->name);
switch (rec_type) {
case IGMP_GRP_REC_TYPE_MODE_IS_EXCLUDE:
case IGMP_GRP_REC_TYPE_CHANGE_TO_EXCLUDE_MODE:
- if (PIM_DEBUG_IGMP_PACKETS) {
+ if (PIM_DEBUG_GM_PACKETS) {
zlog_debug(
"Ignoring IGMPv3 group record %pI4 from %s on %s exclude mode in SSM range",
&grp.s_addr, from_str, ifp->name);
return -1;
}
- if (PIM_DEBUG_IGMP_PACKETS) {
+ if (PIM_DEBUG_GM_PACKETS) {
zlog_debug(
"Recv IGMP report v3 from %s on %s: size=%d groups=%d",
from_str, ifp->name, igmp_msg_len, num_groups);
group_record + IGMP_V3_GROUP_RECORD_GROUP_OFFSET,
sizeof(struct in_addr));
- if (PIM_DEBUG_IGMP_PACKETS) {
+ if (PIM_DEBUG_GM_PACKETS) {
zlog_debug(
" Recv IGMP report v3 from %s on %s: record=%d type=%d auxdatalen=%d sources=%d group=%pI4",
from_str, ifp->name, i, rec_type,
return -1;
}
- if (PIM_DEBUG_IGMP_PACKETS) {
+ if (PIM_DEBUG_GM_PACKETS) {
char src_str[200];
if (!inet_ntop(AF_INET, src, src_str,
PIM_DO_DEBUG_PIM_EVENTS;
PIM_DO_DEBUG_PIM_PACKETS;
PIM_DO_DEBUG_PIM_TRACE;
- PIM_DO_DEBUG_IGMP_EVENTS;
- PIM_DO_DEBUG_IGMP_PACKETS;
- PIM_DO_DEBUG_IGMP_TRACE;
+ PIM_DO_DEBUG_GM_EVENTS;
+ PIM_DO_DEBUG_GM_PACKETS;
+ PIM_DO_DEBUG_GM_TRACE;
PIM_DO_DEBUG_ZEBRA;
#endif
connected_src = pim_if_connected_to_source(ifp, ip_hdr->ip_src);
if (!connected_src) {
- if (PIM_DEBUG_IGMP_PACKETS) {
+ if (PIM_DEBUG_GM_PACKETS) {
zlog_debug(
"Recv IGMP packet on interface: %s from a non-connected source: %pI4",
ifp->name, &ip_hdr->ip_src);
ifaddr = connected_src->u.prefix4;
igmp = pim_igmp_sock_lookup_ifaddr(pim_ifp->gm_socket_list, ifaddr);
- if (PIM_DEBUG_IGMP_PACKETS) {
+ if (PIM_DEBUG_GM_PACKETS) {
zlog_debug(
"%s(%s): igmp kernel upcall on %s(%p) for %pI4 -> %pI4",
__func__, pim->vrf->name, ifp->name, igmp,
}
if (igmp)
pim_igmp_packet(igmp, (char *)buf, buf_size);
- else if (PIM_DEBUG_IGMP_PACKETS) {
+ else if (PIM_DEBUG_GM_PACKETS) {
zlog_debug(
"No IGMP socket on interface: %s with connected source: %pFX",
ifp->name, connected_src);
return NULL;
}
-struct pim_neighbor *pim_neighbor_find_prefix(struct interface *ifp,
- const struct prefix *src_prefix)
-{
- pim_addr addr;
-
- addr = pim_addr_from_prefix(src_prefix);
- return pim_neighbor_find(ifp, addr);
-}
-
/*
* Find the *one* interface out
* this interface. If more than
void pim_neighbor_free(struct pim_neighbor *neigh);
struct pim_neighbor *pim_neighbor_find(struct interface *ifp,
pim_addr source_addr);
-struct pim_neighbor *pim_neighbor_find_prefix(struct interface *ifp,
- const struct prefix *src_prefix);
struct pim_neighbor *pim_neighbor_find_by_secondary(struct interface *ifp,
struct prefix *src);
struct pim_neighbor *pim_neighbor_find_if(struct interface *ifp);
vty_out(vty, "debug msdp internal\n");
++writes;
}
- if (PIM_DEBUG_IGMP_EVENTS) {
+ if (PIM_DEBUG_GM_EVENTS) {
vty_out(vty, "debug igmp events\n");
++writes;
}
- if (PIM_DEBUG_IGMP_PACKETS) {
+ if (PIM_DEBUG_GM_PACKETS) {
vty_out(vty, "debug igmp packets\n");
++writes;
}
/* PIM_DEBUG_IGMP_TRACE catches _DETAIL too */
- if (router->debugs & PIM_MASK_IGMP_TRACE) {
+ if (router->debugs & PIM_MASK_GM_TRACE) {
vty_out(vty, "debug igmp trace\n");
++writes;
}
#define PIM_MASK_PIM_PACKETDUMP_RECV (1 << 4)
#define PIM_MASK_PIM_TRACE (1 << 5)
#define PIM_MASK_PIM_TRACE_DETAIL (1 << 6)
-#define PIM_MASK_IGMP_EVENTS (1 << 7)
-#define PIM_MASK_IGMP_PACKETS (1 << 8)
-#define PIM_MASK_IGMP_TRACE (1 << 9)
-#define PIM_MASK_IGMP_TRACE_DETAIL (1 << 10)
+#define PIM_MASK_GM_EVENTS (1 << 7)
+#define PIM_MASK_GM_PACKETS (1 << 8)
+#define PIM_MASK_GM_TRACE (1 << 9)
+#define PIM_MASK_GM_TRACE_DETAIL (1 << 10)
#define PIM_MASK_ZEBRA (1 << 11)
#define PIM_MASK_SSMPINGD (1 << 12)
#define PIM_MASK_MROUTE (1 << 13)
(router->debugs & (PIM_MASK_PIM_TRACE | PIM_MASK_PIM_TRACE_DETAIL))
#define PIM_DEBUG_PIM_TRACE_DETAIL \
(router->debugs & PIM_MASK_PIM_TRACE_DETAIL)
-#define PIM_DEBUG_IGMP_EVENTS (router->debugs & PIM_MASK_IGMP_EVENTS)
-#define PIM_DEBUG_IGMP_PACKETS (router->debugs & PIM_MASK_IGMP_PACKETS)
+#define PIM_DEBUG_GM_EVENTS (router->debugs & PIM_MASK_GM_EVENTS)
+#define PIM_DEBUG_GM_PACKETS (router->debugs & PIM_MASK_GM_PACKETS)
#define PIM_DEBUG_IGMP_TRACE \
- (router->debugs & (PIM_MASK_IGMP_TRACE | PIM_MASK_IGMP_TRACE_DETAIL))
-#define PIM_DEBUG_IGMP_TRACE_DETAIL \
- (router->debugs & PIM_MASK_IGMP_TRACE_DETAIL)
+ (router->debugs & (PIM_MASK_GM_TRACE | PIM_MASK_GM_TRACE_DETAIL))
+#define PIM_DEBUG_IGMP_TRACE_DETAIL (router->debugs & PIM_MASK_GM_TRACE_DETAIL)
#define PIM_DEBUG_ZEBRA (router->debugs & PIM_MASK_ZEBRA)
#define PIM_DEBUG_MLAG (router->debugs & PIM_MASK_MLAG)
#define PIM_DEBUG_SSMPINGD (router->debugs & PIM_MASK_SSMPINGD)
#define PIM_DEBUG_BSM (router->debugs & PIM_MASK_BSM_PROC)
#define PIM_DEBUG_EVENTS \
- (router->debugs \
- & (PIM_MASK_PIM_EVENTS | PIM_MASK_IGMP_EVENTS \
- | PIM_MASK_MSDP_EVENTS | PIM_MASK_BSM_PROC))
+ (router->debugs & (PIM_MASK_PIM_EVENTS | PIM_MASK_GM_EVENTS | \
+ PIM_MASK_MSDP_EVENTS | PIM_MASK_BSM_PROC))
#define PIM_DEBUG_PACKETS \
- (router->debugs \
- & (PIM_MASK_PIM_PACKETS | PIM_MASK_IGMP_PACKETS \
- | PIM_MASK_MSDP_PACKETS))
+ (router->debugs & \
+ (PIM_MASK_PIM_PACKETS | PIM_MASK_GM_PACKETS | PIM_MASK_MSDP_PACKETS))
#define PIM_DEBUG_TRACE \
- (router->debugs & (PIM_MASK_PIM_TRACE | PIM_MASK_IGMP_TRACE))
+ (router->debugs & (PIM_MASK_PIM_TRACE | PIM_MASK_GM_TRACE))
#define PIM_DO_DEBUG_PIM_EVENTS (router->debugs |= PIM_MASK_PIM_EVENTS)
#define PIM_DO_DEBUG_PIM_PACKETS (router->debugs |= PIM_MASK_PIM_PACKETS)
#define PIM_DO_DEBUG_PIM_TRACE (router->debugs |= PIM_MASK_PIM_TRACE)
#define PIM_DO_DEBUG_PIM_TRACE_DETAIL \
(router->debugs |= PIM_MASK_PIM_TRACE_DETAIL)
-#define PIM_DO_DEBUG_IGMP_EVENTS (router->debugs |= PIM_MASK_IGMP_EVENTS)
-#define PIM_DO_DEBUG_IGMP_PACKETS (router->debugs |= PIM_MASK_IGMP_PACKETS)
-#define PIM_DO_DEBUG_IGMP_TRACE (router->debugs |= PIM_MASK_IGMP_TRACE)
-#define PIM_DO_DEBUG_IGMP_TRACE_DETAIL \
- (router->debugs |= PIM_MASK_IGMP_TRACE_DETAIL)
+#define PIM_DO_DEBUG_GM_EVENTS (router->debugs |= PIM_MASK_GM_EVENTS)
+#define PIM_DO_DEBUG_GM_PACKETS (router->debugs |= PIM_MASK_GM_PACKETS)
+#define PIM_DO_DEBUG_GM_TRACE (router->debugs |= PIM_MASK_GM_TRACE)
+#define PIM_DO_DEBUG_GM_TRACE_DETAIL \
+ (router->debugs |= PIM_MASK_GM_TRACE_DETAIL)
#define PIM_DO_DEBUG_ZEBRA (router->debugs |= PIM_MASK_ZEBRA)
#define PIM_DO_DEBUG_MLAG (router->debugs |= PIM_MASK_MLAG)
#define PIM_DO_DEBUG_SSMPINGD (router->debugs |= PIM_MASK_SSMPINGD)
#define PIM_DONT_DEBUG_PIM_TRACE (router->debugs &= ~PIM_MASK_PIM_TRACE)
#define PIM_DONT_DEBUG_PIM_TRACE_DETAIL \
(router->debugs &= ~PIM_MASK_PIM_TRACE_DETAIL)
-#define PIM_DONT_DEBUG_IGMP_EVENTS (router->debugs &= ~PIM_MASK_IGMP_EVENTS)
-#define PIM_DONT_DEBUG_IGMP_PACKETS (router->debugs &= ~PIM_MASK_IGMP_PACKETS)
-#define PIM_DONT_DEBUG_IGMP_TRACE (router->debugs &= ~PIM_MASK_IGMP_TRACE)
-#define PIM_DONT_DEBUG_IGMP_TRACE_DETAIL \
- (router->debugs &= ~PIM_MASK_IGMP_TRACE_DETAIL)
+#define PIM_DONT_DEBUG_GM_EVENTS (router->debugs &= ~PIM_MASK_GM_EVENTS)
+#define PIM_DONT_DEBUG_GM_PACKETS (router->debugs &= ~PIM_MASK_GM_PACKETS)
+#define PIM_DONT_DEBUG_GM_TRACE (router->debugs &= ~PIM_MASK_GM_TRACE)
+#define PIM_DONT_DEBUG_GM_TRACE_DETAIL \
+ (router->debugs &= ~PIM_MASK_GM_TRACE_DETAIL)
#define PIM_DONT_DEBUG_ZEBRA (router->debugs &= ~PIM_MASK_ZEBRA)
#define PIM_DONT_DEBUG_MLAG (router->debugs &= ~PIM_MASK_MLAG)
#define PIM_DONT_DEBUG_SSMPINGD (router->debugs &= ~PIM_MASK_SSMPINGD)
/* Check packet length. */
if (len < (RIP_HEADER_SIZE + RIP_RTE_SIZE)) {
- flog_err(
- EC_RIP_PACKET,
- "rip_auth_md5_set(): packet length %ld is less than minimum length.",
- len);
+ flog_err(EC_RIP_PACKET,
+ "%s: packet length %ld is less than minimum length.",
+ __func__, len);
return;
}
inet_ntop(AF_INET, &sin.sin_addr, dst, sizeof(dst));
}
#undef ADDRESS_SIZE
- zlog_debug("rip_send_packet %pI4 > %s (%s)",
- &ifc->address->u.prefix4, dst,
- ifc->ifp->name);
+ zlog_debug("%s %pI4 > %s (%s)", __func__,
+ &ifc->address->u.prefix4, dst, ifc->ifp->name);
}
if (CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY)) {
/* If this packet come from unknown interface, ignore it. */
if (ifp == NULL) {
zlog_info(
- "rip_read: cannot find interface for packet from %pI4 port %d (VRF %s)",
- &from.sin_addr, ntohs(from.sin_port),
+ "%s: cannot find interface for packet from %pI4 port %d (VRF %s)",
+ __func__, &from.sin_addr, ntohs(from.sin_port),
rip->vrf_name);
return;
}
if (ifc == NULL) {
zlog_info(
- "rip_read: cannot find connected address for packet from %pI4 port %d on interface %s (VRF %s)",
- &from.sin_addr, ntohs(from.sin_port),
+ "%s: cannot find connected address for packet from %pI4 port %d on interface %s (VRF %s)",
+ __func__, &from.sin_addr, ntohs(from.sin_port),
ifp->name, rip->vrf_name);
return;
}
--- /dev/null
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.2 remote-as external
+ neighbor 192.168.1.2 timers 3 10
+ address-family ipv4 unicast
+ redistribute connected
+ neighbor 192.168.1.2 route-map r2 out
+ exit-address-family
+!
+ip prefix-list p1 seq 5 permit 172.16.255.1/32
+ip prefix-list p1 seq 10 permit 172.16.255.2/32
+ip prefix-list p2 seq 15 permit 172.16.255.3/32
+!
+route-map r2 permit 10
+ match ip address prefix-list p1
+ set metric 300
+route-map r2 permit 20
+ match ip address prefix-list p2
+ set metric 400
+!
--- /dev/null
+!
+interface lo
+ ip address 172.16.255.1/32
+ ip address 172.16.255.2/32
+ ip address 172.16.255.3/32
+!
+interface r1-eth0
+ ip address 192.168.1.1/24
+!
+ip forwarding
+!
--- /dev/null
+!
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
+ neighbor 192.168.1.1 timers 3 10
+ neighbor 192.168.2.1 remote-as external
+ neighbor 192.168.2.1 timers 3 10
+ address-family ipv4 unicast
+ aggregate-address 172.16.255.0/24 summary-only matching-MED-only
+ exit-address-family
+!
--- /dev/null
+!
+interface r2-eth0
+ ip address 192.168.1.2/24
+!
+interface r2-eth1
+ ip address 192.168.2.2/24
+!
+ip forwarding
+!
--- /dev/null
+!
+router bgp 65003
+ no bgp ebgp-requires-policy
+ neighbor 192.168.2.2 remote-as external
+ neighbor 192.168.2.2 timers 3 10
+!
--- /dev/null
+!
+interface r3-eth0
+ ip address 192.168.2.1/24
+!
+ip forwarding
+!
--- /dev/null
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2022 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Test if aggregate-address command works fine when suppressing summary-only
+and using matching-MED-only together.
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+from lib.common_config import (
+ step,
+)
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+
+pytestmark = [pytest.mark.bgpd]
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+
+def build_topo(tgen):
+ for routern in range(1, 5):
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for i, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_aggregate_address_matching_med():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+ r3 = tgen.gears["r3"]
+
+ def _bgp_converge():
+ output = json.loads(r3.vtysh_cmd("show bgp ipv4 unicast json"))
+ expected = {
+ "routes": {
+ "172.16.255.0/24": None,
+ "172.16.255.1/32": [{"path": "65002 65001"}],
+ "172.16.255.2/32": [{"path": "65002 65001"}],
+ "172.16.255.3/32": [{"path": "65002 65001"}],
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_converge)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "Failed to see unsuppressed routes from R2"
+
+ step("Change MED for 172.16.255.3/32 from 400 to 300")
+ r1.vtysh_cmd(
+ """
+ configure terminal
+ route-map r2 permit 20
+ set metric 300
+ """
+ )
+
+ step("Check if 172.16.255.0/24 aggregated route was created and others suppressed")
+
+ def _bgp_aggregated_summary_only_med_match():
+ output = json.loads(r3.vtysh_cmd("show bgp ipv4 unicast json"))
+ expected = {
+ "routes": {
+ "172.16.255.0/24": [{"path": "65002"}],
+ "172.16.255.1/32": None,
+ "172.16.255.2/32": None,
+ "172.16.255.3/32": None,
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_aggregated_summary_only_med_match)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "Failed to see unsuppressed routes from R2"
+
+ step("Change MED for 172.16.255.3/32 back to 400 from 300")
+ r1.vtysh_cmd(
+ """
+ configure terminal
+ route-map r2 permit 20
+ set metric 400
+ """
+ )
+ test_func = functools.partial(_bgp_converge)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "Failed to see unsuppressed routes from R2"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
"Ledger":506,
"InUse":506,
"Requests":0,
- "LabelChunks":11,
+ "LabelChunks":3,
"Pending":0,
"Reconnects":0
}
"Ledger":51,
"InUse":51,
"Requests":0,
- "LabelChunks":2,
+ "LabelChunks":1,
"Pending":0,
"Reconnects":0
}
return True
return False
- def _get_pfx_path_from_nh(router, prefix, nh):
- """Return as-path for a specific route + path."""
- output = json.loads(tgen.gears[router].vtysh_cmd(f"show ip bgp {prefix} json"))
- for path in output[prefix]:
- if path["nexthops"]["ip"] == nh:
- return path["aspath"]["string"]
-
def _routers_up(tx_rtrs, rx_rtrs):
"""Ensure all BGP sessions are up and all routes are installed."""
# all sessions go through tx_routers, so ensure all their peers are up
for pfx in prefixes:
good_path = expected_paths[rtr][remove_type][peer][pfx]
real_path = adj_rib_in["receivedRoutes"][pfx]["path"]
- msg = (
- f"{rtr} received incorrect AS-Path from {peer} "
- f'({p_ip}) for {pfx}. remove_type: "{remove_type}"'
- )
- assert real_path == good_path, msg
+ return real_path == good_path
#######################
# Begin Test
# test each variation of remove-private-AS
for rmv_type in remove_types:
_change_remove_type(rmv_type, "add")
- _validate_paths(rmv_type)
+
+ test_func = partial(_validate_paths, rmv_type)
+ _, result = topotest.run_and_expect(test_func, True, count=60, wait=0.5)
+ assert result == True, "Not all routes have correct AS-Path values!"
+
# each variation sets a separate peer flag in bgpd. we need to clear
# the old flag after each iteration so we only test the flags we expect.
_change_remove_type(rmv_type, "del")
--- /dev/null
+router bgp 65500
+ bgp router-id 1.1.1.1
+ no bgp ebgp-requires-policy
+ neighbor 10.125.0.2 remote-as 65501
+ address-family ipv4 unicast
+ no neighbor 10.125.0.2 activate
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 10.125.0.2 activate
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 1.1.1.1
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 101
+ rd vpn export 444:1
+ rt vpn both 52:100
+ export vpn
+ import vpn
+ exit-address-family
+!
+interface r1-eth0
+ mpls bgp forwarding
+!
\ No newline at end of file
--- /dev/null
+{
+ "10.200.0.0/24": [
+ {
+ "prefix": "10.200.0.0/24",
+ "prefixLen": 24,
+ "protocol": "bgp",
+ "vrfName": "vrf1",
+ "selected": true,
+ "destSelected": true,
+ "distance": 20,
+ "metric": 0,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "ip": "10.125.0.2",
+ "afi": "ipv4",
+ "interfaceName": "r1-eth0",
+ "vrf": "default",
+ "active": true,
+ "labels":[
+ 102
+ ]
+ }
+ ]
+ }
+ ],
+ "10.201.0.0/24": [
+ {
+ "prefix": "10.201.0.0/24",
+ "prefixLen": 24,
+ "protocol": "connected",
+ "vrfName": "vrf1",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "nexthops":[
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "r1-eth1",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+log stdout
+interface r1-eth1 vrf vrf1
+ ip address 10.201.0.1/24
+!
+interface r1-eth0
+ ip address 10.125.0.1/24
+!
--- /dev/null
+{
+ "vrfName": "vrf1",
+ "localAS": 65501,
+ "routes":
+ {
+ "10.201.0.0/24": [
+ {
+ "prefix": "10.201.0.0",
+ "prefixLen": 24,
+ "network": "10.201.0.0\/24",
+ "nhVrfName": "default",
+ "nexthops": [
+ {
+ "ip": "10.125.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.200.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "10.200.0.0",
+ "prefixLen": 24,
+ "network": "10.200.0.0\/24",
+ "nexthops": [
+ {
+ "ip": "0.0.0.0",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+}
--- /dev/null
+router bgp 65501
+ bgp router-id 2.2.2.2
+ no bgp ebgp-requires-policy
+ neighbor 10.125.0.1 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 10.125.0.1 activate
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 10.125.0.1 activate
+ exit-address-family
+!
+router bgp 65501 vrf vrf1
+ bgp router-id 2.2.2.2
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 102
+ rd vpn export 444:2
+ rt vpn both 52:100
+ export vpn
+ import vpn
+ exit-address-family
+!
+interface r2-eth0
+ mpls bgp forwarding
+!
--- /dev/null
+log stdout
+interface r2-eth1 vrf vrf1
+ ip address 10.200.0.2/24
+!
+interface r2-eth0
+ ip address 10.125.0.2/24
+!
--- /dev/null
+#!/usr/bin/env python
+
+#
+# test_bgp_vpnv4_ebgp.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2022 by 6WIND
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+ test_bgp_vpnv4_ebgp.py: Test the FRR BGP daemon with EBGP direct connection
+"""
+
+import os
+import sys
+import json
+from functools import partial
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ "Build function"
+
+ # Create 2 routers.
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+
+def _populate_iface():
+ tgen = get_topogen()
+ cmds_list = [
+ 'ip link add vrf1 type vrf table 10',
+ 'echo 100000 > /proc/sys/net/mpls/platform_labels',
+ 'ip link set dev vrf1 up',
+ 'ip link set dev {0}-eth1 master vrf1',
+ 'echo 1 > /proc/sys/net/mpls/conf/{0}-eth0/input',
+ ]
+
+ for cmd in cmds_list:
+ input = cmd.format('r1', '1', '2')
+ logger.info('input: ' + cmd)
+ output = tgen.net['r1'].cmd(cmd.format('r1', '1', '2'))
+ logger.info('output: ' + output)
+
+ for cmd in cmds_list:
+ input = cmd.format('r2', '2', '1')
+ logger.info('input: ' + cmd)
+ output = tgen.net['r2'].cmd(cmd.format('r2', '2', '1'))
+ logger.info('output: ' + output)
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ _populate_iface()
+
+ for rname, router in router_list.items():
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ # Initialize all routers.
+ tgen.start_router()
+
+
+def teardown_module(_mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ tgen.stop_topology()
+
+
+def test_protocols_convergence():
+ """
+ Assert that all protocols have converged
+ statuses as they depend on it.
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears['r1']
+ logger.info("Dump some context for r1")
+ router.vtysh_cmd("show bgp ipv4 vpn")
+ router.vtysh_cmd("show bgp summary")
+ router.vtysh_cmd("show bgp vrf vrf1 ipv4")
+ router.vtysh_cmd("show running-config")
+ router = tgen.gears['r2']
+ logger.info("Dump some context for r2")
+ router.vtysh_cmd("show bgp ipv4 vpn")
+ router.vtysh_cmd("show bgp summary")
+ router.vtysh_cmd("show bgp vrf vrf1 ipv4")
+ router.vtysh_cmd("show running-config")
+
+ # Check IPv4 routing tables on r1
+ logger.info("Checking IPv4 routes for convergence on r1")
+ router = tgen.gears['r1']
+ json_file = "{}/{}/ipv4_routes.json".format(CWD, router.name)
+ if not os.path.isfile(json_file):
+ logger.info("skipping file {}".format(json_file))
+ assert 0, 'ipv4_routes.json file not found'
+ return
+
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show ip route vrf vrf1 json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=40, wait=2)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+ # Check BGP IPv4 routing tables on r2 not installed
+ logger.info("Checking BGP IPv4 routes for convergence on r2")
+ router = tgen.gears['r2']
+ json_file = "{}/{}/bgp_ipv4_routes.json".format(CWD, router.name)
+ if not os.path.isfile(json_file):
+ assert 0, 'bgp_ipv4_routes.json file not found'
+
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp vrf vrf1 ipv4 json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=40, wait=2)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
--- /dev/null
+router bgp 65500
+ bgp router-id 192.0.2.1
+ neighbor 192.0.2.2 remote-as 65500
+ neighbor 192.0.2.2 update-source 192.0.2.1
+ address-family ipv4 unicast
+ no neighbor 192.0.2.2 activate
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 192.0.2.2 activate
+ neighbor 192.0.2.2 route-map rmap in
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 192.0.2.1
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 101
+ rd vpn export 444:1
+ rt vpn both 52:100
+ export vpn
+ import vpn
+ exit-address-family
+!
+route-map rmap permit 1
+ set l3vpn next-hop encapsulation gre
+!
--- /dev/null
+{
+ "10.200.0.0/24": [
+ {
+ "prefix": "10.200.0.0/24",
+ "prefixLen": 24,
+ "protocol": "bgp",
+ "vrfName": "vrf1",
+ "selected": true,
+ "destSelected": true,
+ "distance": 20,
+ "metric": 0,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "ip": "192.168.0.2",
+ "afi": "ipv4",
+ "interfaceName": "r1-gre0",
+ "vrf": "default",
+ "active": true,
+ "labels":[
+ 102
+ ]
+ }
+ ]
+ }
+ ],
+ "10.201.0.0/24": [
+ {
+ "prefix": "10.201.0.0/24",
+ "prefixLen": 24,
+ "protocol": "connected",
+ "vrfName": "vrf1",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "nexthops":[
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "r1-eth1",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+log stdout
+ip route 192.0.2.2/32 192.168.0.2
+interface lo
+ ip address 192.0.2.1/32
+!
+interface r1-gre0
+ ip address 192.168.0.1/24
+!
+interface r1-eth1 vrf vrf1
+ ip address 10.201.0.1/24
+!
+interface r1-eth0
+ ip address 10.125.0.1/24
+!
--- /dev/null
+{
+ "vrfName": "vrf1",
+ "localAS": 65500,
+ "routes":
+ {
+ "10.201.0.0/24": [
+ {
+ "prefix": "10.201.0.0",
+ "prefixLen": 24,
+ "network": "10.201.0.0\/24",
+ "nhVrfName": "default",
+ "nexthops": [
+ {
+ "ip": "192.0.2.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.200.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "10.200.0.0",
+ "prefixLen": 24,
+ "network": "10.200.0.0\/24",
+ "nexthops": [
+ {
+ "ip": "0.0.0.0",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+}
--- /dev/null
+router bgp 65500
+ bgp router-id 192.0.2.2
+ neighbor 192.0.2.1 remote-as 65500
+ neighbor 192.0.2.1 update-source 192.0.2.2
+ address-family ipv4 unicast
+ no neighbor 192.0.2.1 activate
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 192.0.2.1 activate
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 192.0.2.2
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 102
+ rd vpn export 444:2
+ rt vpn both 52:100
+ export vpn
+ import vpn
+ exit-address-family
+!
--- /dev/null
+log stdout
+ip route 192.0.2.1/32 192.168.0.1
+interface lo
+ ip address 192.0.2.2/32
+!
+interface r2-gre0
+ ip address 192.168.0.2/24
+!
+interface r2-eth1 vrf vrf1
+ ip address 10.200.0.2/24
+!
+interface r2-eth0
+ ip address 10.125.0.2/24
+!
--- /dev/null
+#!/usr/bin/env python
+
+#
+# test_bgp_vpnv4_gre.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2021 by 6WIND
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+ test_bgp_vpnv4_gre.py: Test the FRR BGP daemon with BGP IPv6 interface
+ with route advertisements on a separate netns.
+"""
+
+import os
+import sys
+import json
+from functools import partial
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ "Build function"
+
+ # Create 2 routers.
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+
+def _populate_iface():
+ tgen = get_topogen()
+ cmds_list = [
+ 'ip link add vrf1 type vrf table 10',
+ 'echo 10 > /proc/sys/net/mpls/platform_labels',
+ 'ip link set dev vrf1 up',
+ 'ip link set dev {0}-eth1 master vrf1',
+ 'echo 1 > /proc/sys/net/mpls/conf/{0}-eth0/input',
+ 'ip tunnel add {0}-gre0 mode gre ttl 64 dev {0}-eth0 local 10.125.0.{1} remote 10.125.0.{2}',
+ 'ip link set dev {0}-gre0 up',
+ 'echo 1 > /proc/sys/net/mpls/conf/{0}-gre0/input',
+ ]
+
+ for cmd in cmds_list:
+ input = cmd.format('r1', '1', '2')
+ logger.info('input: ' + cmd)
+ output = tgen.net['r1'].cmd(cmd.format('r1', '1', '2'))
+ logger.info('output: ' + output)
+
+ for cmd in cmds_list:
+ input = cmd.format('r2', '2', '1')
+ logger.info('input: ' + cmd)
+ output = tgen.net['r2'].cmd(cmd.format('r2', '2', '1'))
+ logger.info('output: ' + output)
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ _populate_iface()
+
+ for rname, router in router_list.items():
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ # Initialize all routers.
+ tgen.start_router()
+
+
+def teardown_module(_mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ tgen.stop_topology()
+
+
+def test_protocols_convergence():
+ """
+ Assert that all protocols have converged
+ statuses as they depend on it.
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears['r1']
+ logger.info("Dump some context for r1")
+ router.vtysh_cmd("show bgp ipv4 vpn")
+ router.vtysh_cmd("show bgp summary")
+ router.vtysh_cmd("show bgp vrf vrf1 ipv4")
+ router.vtysh_cmd("show running-config")
+ router = tgen.gears['r2']
+ logger.info("Dump some context for r2")
+ router.vtysh_cmd("show bgp ipv4 vpn")
+ router.vtysh_cmd("show bgp summary")
+ router.vtysh_cmd("show bgp vrf vrf1 ipv4")
+ router.vtysh_cmd("show running-config")
+
+ # Check IPv4 routing tables on r1
+ logger.info("Checking IPv4 routes for convergence on r1")
+ router = tgen.gears['r1']
+ json_file = "{}/{}/ipv4_routes.json".format(CWD, router.name)
+ if not os.path.isfile(json_file):
+ logger.info("skipping file {}".format(json_file))
+ assert 0, 'ipv4_routes.json file not found'
+ return
+
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show ip route vrf vrf1 json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=40, wait=2)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+ # Check BGP IPv4 routing tables on r2 not installed
+ logger.info("Checking BGP IPv4 routes for convergence on r2")
+ router = tgen.gears['r2']
+ json_file = "{}/{}/bgp_ipv4_routes.json".format(CWD, router.name)
+ if not os.path.isfile(json_file):
+ assert 0, 'bgp_ipv4_routes.json file not found'
+
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp vrf vrf1 ipv4 json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=40, wait=2)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
Assert that the environment is correctly configured, and get extra config.
"""
+ if config.getoption("--collect-only"):
+ return
+
if "PYTEST_XDIST_WORKER" not in os.environ:
os.environ["PYTEST_XDIST_MODE"] = config.getoption("dist", "no")
os.environ["PYTEST_TOPOTEST_WORKER"] = ""
#define VTYSH_ALL VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_LDPD|VTYSH_BGPD|VTYSH_ISISD|VTYSH_PIMD|VTYSH_PIM6D|VTYSH_NHRPD|VTYSH_EIGRPD|VTYSH_BABELD|VTYSH_SHARPD|VTYSH_PBRD|VTYSH_STATICD|VTYSH_BFDD|VTYSH_FABRICD|VTYSH_VRRPD|VTYSH_PATHD
#define VTYSH_ACL VTYSH_BFDD|VTYSH_BABELD|VTYSH_BGPD|VTYSH_EIGRPD|VTYSH_ISISD|VTYSH_FABRICD|VTYSH_LDPD|VTYSH_NHRPD|VTYSH_OSPF6D|VTYSH_OSPFD|VTYSH_PBRD|VTYSH_PIMD|VTYSH_PIM6D|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_VRRPD|VTYSH_ZEBRA
#define VTYSH_RMAP VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_BGPD|VTYSH_ISISD|VTYSH_PIMD|VTYSH_PIM6D|VTYSH_EIGRPD|VTYSH_FABRICD
-#define VTYSH_INTERFACE VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_ISISD|VTYSH_PIMD|VTYSH_PIM6D|VTYSH_NHRPD|VTYSH_EIGRPD|VTYSH_BABELD|VTYSH_PBRD|VTYSH_FABRICD|VTYSH_VRRPD
-#define VTYSH_VRF VTYSH_INTERFACE|VTYSH_STATICD
+#define VTYSH_INTERFACE_SUBSET \
+ VTYSH_ZEBRA | VTYSH_RIPD | VTYSH_RIPNGD | VTYSH_OSPFD | VTYSH_OSPF6D | \
+ VTYSH_ISISD | VTYSH_PIMD | VTYSH_PIM6D | VTYSH_NHRPD | \
+ VTYSH_EIGRPD | VTYSH_BABELD | VTYSH_PBRD | VTYSH_FABRICD | \
+ VTYSH_VRRPD
+#define VTYSH_INTERFACE VTYSH_INTERFACE_SUBSET | VTYSH_BGPD
+#define VTYSH_VRF VTYSH_INTERFACE_SUBSET | VTYSH_STATICD
#define VTYSH_KEYS VTYSH_RIPD | VTYSH_EIGRPD | VTYSH_OSPF6D
/* Daemons who can process nexthop-group configs */
#define VTYSH_NH_GROUP VTYSH_PBRD|VTYSH_SHARPD
"Set EVPN gateway IP overlay index IPv6";
}
+ identity set-l3vpn-nexthop-encapsulation {
+ base frr-route-map:rmap-set-type;
+ description
+ "Accept L3VPN traffic over other than LSP encapsulation";
+ }
+
grouping extcommunity-non-transitive-types {
leaf two-octet-as-specific {
type boolean;
type inet:ipv6-address;
}
}
+ case l3vpn-nexthop-encapsulation {
+ when
+ "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:action,
+ 'frr-bgp-route-map:set-l3vpn-nexthop-encapsulation')";
+ description
+ "Accept L3VPN traffic over other than LSP encapsulation";
+ leaf l3vpn-nexthop-encapsulation {
+ type enumeration {
+ enum "gre" {
+ value 0;
+ description
+ "GRE protocol";
+ }
+ }
+ }
+ }
}
}
return 1;
}
- /* Deal with errors that occur because of races in link handling. */
- if (is_cmd
- && ((msg_type == RTM_DELROUTE
- && (-errnum == ENODEV || -errnum == ESRCH))
- || (msg_type == RTM_NEWROUTE
- && (-errnum == ENETDOWN || -errnum == EEXIST)))) {
+ /*
+ * Deal with errors that occur because of races in link handling
+ * or types are not supported in kernel.
+ */
+ if (is_cmd &&
+ ((msg_type == RTM_DELROUTE &&
+ (-errnum == ENODEV || -errnum == ESRCH)) ||
+ (msg_type == RTM_NEWROUTE &&
+ (-errnum == ENETDOWN || -errnum == EEXIST)) ||
+ ((msg_type == RTM_NEWTUNNEL || msg_type == RTM_DELTUNNEL ||
+ msg_type == RTM_GETTUNNEL) &&
+ (-errnum == EOPNOTSUPP)))) {
if (IS_ZEBRA_DEBUG_KERNEL)
zlog_debug("%s: error: %s type=%s(%u), seq=%u, pid=%u",
nl->name, safe_strerror(-errnum),
zlog_info(
"VRF %u already configured with NETNS %s",
vrf->vrf_id, ns->name);
- return CMD_WARNING_CONFIG_FAILED;
+ return CMD_WARNING;
}
}
ns = ns_lookup_name(pathname);