]> git.proxmox.com Git - mirror_frr.git/commitdiff
Merge pull request #11899 from opensourcerouting/feature/route_validation_extended_co...
authorRuss White <russ@riw.us>
Tue, 13 Sep 2022 15:30:54 +0000 (11:30 -0400)
committerGitHub <noreply@github.com>
Tue, 13 Sep 2022 15:30:54 +0000 (11:30 -0400)
bgpd: Implement Origin Validation State via extended communities

98 files changed:
bfdd/bfd.c
bfdd/bfd_packet.c
bgpd/bgp_attr.h
bgpd/bgp_evpn.c
bgpd/bgp_fsm.c
bgpd/bgp_io.c
bgpd/bgp_keepalives.c
bgpd/bgp_labelpool.c
bgpd/bgp_labelpool.h
bgpd/bgp_main.c
bgpd/bgp_mplsvpn.c
bgpd/bgp_nht.c
bgpd/bgp_packet.c
bgpd/bgp_packet.h
bgpd/bgp_route.c
bgpd/bgp_route.h
bgpd/bgp_routemap.c
bgpd/bgp_routemap_nb.c
bgpd/bgp_routemap_nb.h
bgpd/bgp_routemap_nb_config.c
bgpd/bgp_rpki.c
bgpd/bgp_table.h
bgpd/bgp_updgrp.c
bgpd/bgp_vty.c
bgpd/bgp_zebra.c
bgpd/bgp_zebra.h
bgpd/bgpd.c
bgpd/bgpd.h
bgpd/subdir.am
doc/user/bgp.rst
doc/user/routemap.rst
eigrpd/eigrp_network.c
eigrpd/eigrp_packet.c
eigrpd/eigrpd.c
isisd/isis_circuit.c
isisd/isis_nb_config.c
isisd/isis_sr.c
isisd/isis_te.c
isisd/isis_te.h
isisd/isis_tlvs.c
isisd/isis_tlvs.h
isisd/isisd.c
lib/bitfield.h
lib/routemap.c
lib/routemap.h
lib/routemap_cli.c
lib/zlog.c
ospfd/ospf_lsa.c
ospfd/ospf_lsa.h
ospfd/ospf_spf.c
ospfd/ospf_vty.c
ospfd/ospfd.c
ospfd/ospfd.h
pimd/pim6_mld.c
pimd/pim_cmd.c
pimd/pim_iface.c
pimd/pim_igmp.c
pimd/pim_igmpv2.c
pimd/pim_igmpv3.c
pimd/pim_main.c
pimd/pim_mroute.c
pimd/pim_neighbor.c
pimd/pim_neighbor.h
pimd/pim_vty.c
pimd/pimd.h
ripd/ripd.c
tests/topotests/bgp_aggregate_address_matching_med/__init__.py [new file with mode: 0644]
tests/topotests/bgp_aggregate_address_matching_med/r1/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_aggregate_address_matching_med/r1/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_aggregate_address_matching_med/r2/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_aggregate_address_matching_med/r2/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_aggregate_address_matching_med/r3/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_aggregate_address_matching_med/r3/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_aggregate_address_matching_med/test_bgp_aggregate_address_matching_med.py [new file with mode: 0644]
tests/topotests/bgp_lu_topo1/R1/labelpool.summ.json
tests/topotests/bgp_lu_topo2/R1/labelpool.summ.json
tests/topotests/bgp_remove_private_as/test_bgp_remove_private_as.py
tests/topotests/bgp_vpnv4_ebgp/__init__.py [new file with mode: 0644]
tests/topotests/bgp_vpnv4_ebgp/r1/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_ebgp/r1/ipv4_routes.json [new file with mode: 0644]
tests/topotests/bgp_vpnv4_ebgp/r1/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_ebgp/r2/bgp_ipv4_routes.json [new file with mode: 0644]
tests/topotests/bgp_vpnv4_ebgp/r2/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_ebgp/r2/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_ebgp/test_bgp_vpnv4_ebgp.py [new file with mode: 0644]
tests/topotests/bgp_vpnv4_gre/__init__.py [new file with mode: 0644]
tests/topotests/bgp_vpnv4_gre/r1/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_gre/r1/ipv4_routes.json [new file with mode: 0644]
tests/topotests/bgp_vpnv4_gre/r1/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_gre/r2/bgp_ipv4_routes.json [new file with mode: 0644]
tests/topotests/bgp_vpnv4_gre/r2/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_gre/r2/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_gre/test_bgp_vpnv4_gre.py [new file with mode: 0644]
tests/topotests/conftest.py
vtysh/vtysh.h
yang/frr-bgp-route-map.yang
zebra/kernel_netlink.c
zebra/zebra_vrf.c

index a1fb67d3577477ac0687abf1d09865291d13ad5e..4367f253e11955f4e8a418c6681b12c2a278941a 100644 (file)
@@ -348,7 +348,7 @@ int bfd_session_enable(struct bfd_session *bs)
        /* Sanity check: don't leak open sockets. */
        if (bs->sock != -1) {
                if (bglobal.debug_peer_event)
-                       zlog_debug("session-enable: previous socket open");
+                       zlog_debug("%s: previous socket open", __func__);
 
                close(bs->sock);
                bs->sock = -1;
@@ -952,7 +952,7 @@ int ptm_bfd_sess_del(struct bfd_peer_cfg *bpc)
        }
 
        if (bglobal.debug_peer_event)
-               zlog_debug("session-delete: %s", bs_to_string(bs));
+               zlog_debug("%s: %s", __func__, bs_to_string(bs));
 
        control_notify_config(BCM_NOTIFY_CONFIG_DELETE, bs);
 
index 98411a87329d6bc4bbabb2f066c717a270857f82..382f78a0f63651f8d8bff157ebc52c4e2032f813 100644 (file)
@@ -578,8 +578,8 @@ ssize_t bfd_recv_ipv4(int sd, uint8_t *msgbuf, size_t msgbuflen, uint8_t *ttl,
                        memcpy(&ttlval, CMSG_DATA(cm), sizeof(ttlval));
                        if (ttlval > 255) {
                                if (bglobal.debug_network)
-                                       zlog_debug("ipv4-recv: invalid TTL: %u",
-                                                  ttlval);
+                                       zlog_debug("%s: invalid TTL: %u",
+                                                  __func__, ttlval);
                                return -1;
                        }
                        *ttl = ttlval;
@@ -686,8 +686,8 @@ ssize_t bfd_recv_ipv6(int sd, uint8_t *msgbuf, size_t msgbuflen, uint8_t *ttl,
                        memcpy(&ttlval, CMSG_DATA(cm), sizeof(ttlval));
                        if (ttlval > 255) {
                                if (bglobal.debug_network)
-                                       zlog_debug("ipv6-recv: invalid TTL: %u",
-                                                  ttlval);
+                                       zlog_debug("%s: invalid TTL: %u",
+                                                  __func__, ttlval);
                                return -1;
                        }
 
@@ -1127,13 +1127,13 @@ int bp_udp_send_fp(int sd, uint8_t *data, size_t datalen,
 
        if (wlen <= 0) {
                if (bglobal.debug_network)
-                       zlog_debug("udp-send: loopback failure: (%d) %s", errno,
-                                  strerror(errno));
+                       zlog_debug("%s: loopback failure: (%d) %s", __func__,
+                                  errno, strerror(errno));
                return -1;
        } else if (wlen < (ssize_t)datalen) {
                if (bglobal.debug_network)
-                       zlog_debug("udp-send: partial send: %zd expected %zu",
-                                  wlen, datalen);
+                       zlog_debug("%s: partial send: %zd expected %zu",
+                                  __func__, wlen, datalen);
                return -1;
        }
 
@@ -1194,13 +1194,13 @@ int bp_udp_send(int sd, uint8_t ttl, uint8_t *data, size_t datalen,
        wlen = sendmsg(sd, &msg, 0);
        if (wlen <= 0) {
                if (bglobal.debug_network)
-                       zlog_debug("udp-send: loopback failure: (%d) %s", errno,
-                                  strerror(errno));
+                       zlog_debug("%s: loopback failure: (%d) %s", __func__,
+                                  errno, strerror(errno));
                return -1;
        } else if (wlen < (ssize_t)datalen) {
                if (bglobal.debug_network)
-                       zlog_debug("udp-send: partial send: %zd expected %zu",
-                                  wlen, datalen);
+                       zlog_debug("%s: partial send: %zd expected %zu",
+                                  __func__, wlen, datalen);
                return -1;
        }
 
@@ -1221,7 +1221,7 @@ int bp_set_ttl(int sd, uint8_t value)
        int ttl = value;
 
        if (setsockopt(sd, IPPROTO_IP, IP_TTL, &ttl, sizeof(ttl)) == -1) {
-               zlog_warn("set-ttl: setsockopt(IP_TTL, %d): %s", value,
+               zlog_warn("%s: setsockopt(IP_TTL, %d): %s", __func__, value,
                          strerror(errno));
                return -1;
        }
@@ -1234,7 +1234,7 @@ int bp_set_tos(int sd, uint8_t value)
        int tos = value;
 
        if (setsockopt(sd, IPPROTO_IP, IP_TOS, &tos, sizeof(tos)) == -1) {
-               zlog_warn("set-tos: setsockopt(IP_TOS, %d): %s", value,
+               zlog_warn("%s: setsockopt(IP_TOS, %d): %s", __func__, value,
                          strerror(errno));
                return -1;
        }
@@ -1247,8 +1247,8 @@ static bool bp_set_reuse_addr(int sd)
        int one = 1;
 
        if (setsockopt(sd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) == -1) {
-               zlog_warn("set-reuse-addr: setsockopt(SO_REUSEADDR, %d): %s",
-                         one, strerror(errno));
+               zlog_warn("%s: setsockopt(SO_REUSEADDR, %d): %s", __func__, one,
+                         strerror(errno));
                return false;
        }
        return true;
@@ -1259,8 +1259,8 @@ static bool bp_set_reuse_port(int sd)
        int one = 1;
 
        if (setsockopt(sd, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)) == -1) {
-               zlog_warn("set-reuse-port: setsockopt(SO_REUSEPORT, %d): %s",
-                         one, strerror(errno));
+               zlog_warn("%s: setsockopt(SO_REUSEPORT, %d): %s", __func__, one,
+                         strerror(errno));
                return false;
        }
        return true;
index 5007fafc297415a585dd60df0c8a924ed09e1dca..4963ea64d093edc155ea260ff753edbf215734c7 100644 (file)
@@ -348,6 +348,7 @@ struct attr {
 #define BATTR_RMAP_IPV6_LL_NHOP_CHANGED (1 << 5)
 #define BATTR_RMAP_IPV6_PREFER_GLOBAL_CHANGED (1 << 6)
 #define BATTR_RMAP_LINK_BW_SET (1 << 7)
+#define BATTR_RMAP_L3VPN_ACCEPT_GRE (1 << 8)
 
 /* Router Reflector related structure. */
 struct cluster_list {
index dc15d9c6951fe7ce408ba3e9eeb06efe1742daa8..58f5e9a226093564f38fa9b93c4b14245ed77b97 100644 (file)
@@ -3631,8 +3631,10 @@ static int update_advertise_vni_routes(struct bgp *bgp, struct bgpevpn *vpn)
                            pi->type == ZEBRA_ROUTE_BGP
                            && pi->sub_type == BGP_ROUTE_STATIC)
                                break;
-               if (!pi) /* unexpected */
+               if (!pi) {
+                       bgp_dest_unlock_node(dest);
                        return 0;
+               }
                attr = pi->attr;
 
                global_dest = bgp_global_evpn_node_get(bgp->rib[afi][safi],
index 1164546df7c4a66b2d9f412d635b4f4e710fc092..a8accc25f505f3c33f46141bf38b992b6acefd09 100644 (file)
@@ -300,11 +300,6 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
                peer->afc_recv[afi][safi] = from_peer->afc_recv[afi][safi];
                peer->orf_plist[afi][safi] = from_peer->orf_plist[afi][safi];
                peer->llgr[afi][safi] = from_peer->llgr[afi][safi];
-               if (from_peer->soo[afi][safi]) {
-                       ecommunity_free(&peer->soo[afi][safi]);
-                       peer->soo[afi][safi] =
-                               ecommunity_dup(from_peer->soo[afi][safi]);
-               }
        }
 
        if (bgp_getsockname(peer) < 0) {
index 7af1fae280b3e3e52294b16a4bb8ee41f4082468..f9bb8d518d960404f5765007a1aea056b4ef9db4 100644 (file)
@@ -37,7 +37,7 @@
 #include "bgpd/bgp_debug.h"    // for bgp_debug_neighbor_events, bgp_type_str
 #include "bgpd/bgp_errors.h"   // for expanded error reference information
 #include "bgpd/bgp_fsm.h"      // for BGP_EVENT_ADD, bgp_event
-#include "bgpd/bgp_packet.h"   // for bgp_notify_send_with_data, bgp_notify...
+#include "bgpd/bgp_packet.h"   // for bgp_notify_io_invalid...
 #include "bgpd/bgp_trace.h"    // for frrtraces
 #include "bgpd/bgpd.h"         // for peer, BGP_MARKER_SIZE, bgp_master, bm
 /* clang-format on */
@@ -526,8 +526,8 @@ static bool validate_header(struct peer *peer)
                return false;
 
        if (memcmp(m_correct, m_rx, BGP_MARKER_SIZE) != 0) {
-               bgp_notify_send(peer, BGP_NOTIFY_HEADER_ERR,
-                               BGP_NOTIFY_HEADER_NOT_SYNC);
+               bgp_notify_io_invalid(peer, BGP_NOTIFY_HEADER_ERR,
+                                     BGP_NOTIFY_HEADER_NOT_SYNC, NULL, 0);
                return false;
        }
 
@@ -547,9 +547,8 @@ static bool validate_header(struct peer *peer)
                        zlog_debug("%s unknown message type 0x%02x", peer->host,
                                   type);
 
-               bgp_notify_send_with_data(peer, BGP_NOTIFY_HEADER_ERR,
-                                         BGP_NOTIFY_HEADER_BAD_MESTYPE, &type,
-                                         1);
+               bgp_notify_io_invalid(peer, BGP_NOTIFY_HEADER_ERR,
+                                     BGP_NOTIFY_HEADER_BAD_MESTYPE, &type, 1);
                return false;
        }
 
@@ -574,9 +573,9 @@ static bool validate_header(struct peer *peer)
 
                uint16_t nsize = htons(size);
 
-               bgp_notify_send_with_data(peer, BGP_NOTIFY_HEADER_ERR,
-                                         BGP_NOTIFY_HEADER_BAD_MESLEN,
-                                         (unsigned char *)&nsize, 2);
+               bgp_notify_io_invalid(peer, BGP_NOTIFY_HEADER_ERR,
+                                     BGP_NOTIFY_HEADER_BAD_MESLEN,
+                                     (unsigned char *)&nsize, 2);
                return false;
        }
 
index 158f1633585639a098218d537625e2d158a160d5..604d6c95097d8379c4360d9a30949f18f9bd1822 100644 (file)
@@ -175,6 +175,15 @@ void *bgp_keepalives_start(void *arg)
        struct timeval next_update = {0, 0};
        struct timespec next_update_ts = {0, 0};
 
+       /*
+        * The RCU mechanism for each pthread is initialized in a "locked"
+        * state. That's ok for pthreads using the frr_pthread,
+        * thread_fetch event loop, because that event loop unlocks regularly.
+        * For foreign pthreads, the lock needs to be unlocked so that the
+        * background rcu pthread can run.
+        */
+       rcu_read_unlock();
+
        peerhash_mtx = XCALLOC(MTYPE_TMP, sizeof(pthread_mutex_t));
        peerhash_cond = XCALLOC(MTYPE_TMP, sizeof(pthread_cond_t));
 
index fa1dcf33e02d24c14b7ccfb0315b47f5e30a4f2f..c227a5e41c403b2bd206d2f445546eeb65a6e734 100644 (file)
 #include "bgpd/bgp_errors.h"
 #include "bgpd/bgp_route.h"
 
+#define BGP_LABELPOOL_ENABLE_TESTS 0
+
+#ifndef VTYSH_EXTRACT_PL
+#include "bgpd/bgp_labelpool_clippy.c"
+#endif
+
+
 /*
  * Definitions and external declarations.
  */
 extern struct zclient *zclient;
 
+#if BGP_LABELPOOL_ENABLE_TESTS
+static void lptest_init(void);
+static void lptest_finish(void);
+#endif
+
 /*
  * Remember where pool data are kept
  */
 static struct labelpool *lp;
 
-/* request this many labels at a time from zebra */
-#define LP_CHUNK_SIZE  50
+/*
+ * Number of labels requested at a time from the zebra label manager.
+ * We start small but double the request size each time up to a
+ * maximum size.
+ *
+ * The label space is 20 bits which is shared with other FRR processes
+ * on this host, so to avoid greedily requesting a mostly wasted chunk,
+ * we limit the chunk size to 1/16 of the label space (that's the -4 bits
+ * in the definition below). This limit slightly increases our cost of
+ * finding free labels in our allocated chunks.
+ */
+#define LP_CHUNK_SIZE_MIN 128
+#define LP_CHUNK_SIZE_MAX (1 << (20 - 4))
 
 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CHUNK, "BGP Label Chunk");
 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_FIFO, "BGP Label FIFO item");
@@ -58,6 +81,9 @@ DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CBQ, "BGP Dynamic Label Callback");
 struct lp_chunk {
        uint32_t        first;
        uint32_t        last;
+       uint32_t nfree;              /* un-allocated count */
+       uint32_t idx_last_allocated; /* start looking here */
+       bitfield_t allocated_map;
 };
 
 /*
@@ -160,6 +186,9 @@ static void lp_lcb_free(void *goner)
 
 static void lp_chunk_free(void *goner)
 {
+       struct lp_chunk *chunk = (struct lp_chunk *)goner;
+
+       bf_free(chunk->allocated_map);
        XFREE(MTYPE_BGP_LABEL_CHUNK, goner);
 }
 
@@ -180,6 +209,12 @@ void bgp_lp_init(struct thread_master *master, struct labelpool *pool)
        lp->callback_q->spec.workfunc = lp_cbq_docallback;
        lp->callback_q->spec.del_item_data = lp_cbq_item_free;
        lp->callback_q->spec.max_retries = 0;
+
+       lp->next_chunksize = LP_CHUNK_SIZE_MIN;
+
+#if BGP_LABELPOOL_ENABLE_TESTS
+       lptest_init();
+#endif
 }
 
 /* check if a label callback was for a BGP LU node, and if so, unlock it */
@@ -201,6 +236,9 @@ void bgp_lp_finish(void)
        struct lp_fifo *lf;
        struct work_queue_item *item, *titem;
 
+#if BGP_LABELPOOL_ENABLE_TESTS
+       lptest_finish();
+#endif
        if (!lp)
                return;
 
@@ -240,25 +278,53 @@ static mpls_label_t get_label_from_pool(void *labelid)
 
        /*
         * Find a free label
-        * Linear search is not efficient but should be executed infrequently.
         */
        for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
                uintptr_t lbl;
+               unsigned int index;
 
                if (debug)
                        zlog_debug("%s: chunk first=%u last=%u",
                                __func__, chunk->first, chunk->last);
 
-               for (lbl = chunk->first; lbl <= chunk->last; ++lbl) {
-                       /* labelid is key to all-request "ledger" list */
-                       if (!skiplist_insert(lp->inuse, (void *)lbl, labelid)) {
-                               /*
-                                * Success
-                                */
-                               return lbl;
-                       }
+               /*
+                * don't look in chunks with no available labels
+                */
+               if (!chunk->nfree)
+                       continue;
+
+               /*
+                * roll through bitfield starting where we stopped
+                * last time
+                */
+               index = bf_find_next_clear_bit_wrap(
+                       &chunk->allocated_map, chunk->idx_last_allocated + 1,
+                       0);
+
+               /*
+                * since chunk->nfree is non-zero, we should always get
+                * a valid index
+                */
+               assert(index != WORD_MAX);
+
+               lbl = chunk->first + index;
+               if (skiplist_insert(lp->inuse, (void *)lbl, labelid)) {
+                       /* something is very wrong */
+                       zlog_err("%s: unable to insert inuse label %u (id %p)",
+                                __func__, (uint32_t)lbl, labelid);
+                       return MPLS_LABEL_NONE;
                }
+
+               /*
+                * Success
+                */
+               bf_set_bit(chunk->allocated_map, index);
+               chunk->idx_last_allocated = index;
+               chunk->nfree -= 1;
+
+               return lbl;
        }
+
        return MPLS_LABEL_NONE;
 }
 
@@ -299,10 +365,14 @@ static struct lp_lcb *lcb_alloc(
  * When connection to zebra is reestablished, previous label assignments
  * will be invalidated (via callbacks having the "allocated" parameter unset)
  * and new labels will be automatically reassigned by this labelpool module
- * (that is, a requestor does not need to call lp_get() again if it is
+ * (that is, a requestor does not need to call bgp_lp_get() again if it is
  * notified via callback that its label has been lost: it will eventually
  * get another callback with a new label assignment).
  *
+ * The callback function should return 0 to accept the allocation
+ * and non-zero to refuse it. The callback function return value is
+ * ignored for invalidations (i.e., when the "allocated" parameter is false)
+ *
  * Prior requests for a given labelid are detected so that requests and
  * assignments are not duplicated.
  */
@@ -392,10 +462,13 @@ void bgp_lp_get(
        if (lp_fifo_count(&lp->requests) > lp->pending_count) {
                if (!zclient || zclient->sock < 0)
                        return;
-               if (zclient_send_get_label_chunk(zclient, 0, LP_CHUNK_SIZE,
-                                                MPLS_LABEL_BASE_ANY)
-                   != ZCLIENT_SEND_FAILURE)
-                       lp->pending_count += LP_CHUNK_SIZE;
+               if (zclient_send_get_label_chunk(zclient, 0, lp->next_chunksize,
+                                                MPLS_LABEL_BASE_ANY) !=
+                   ZCLIENT_SEND_FAILURE) {
+                       lp->pending_count += lp->next_chunksize;
+                       if ((lp->next_chunksize << 1) <= LP_CHUNK_SIZE_MAX)
+                               lp->next_chunksize <<= 1;
+               }
        }
 }
 
@@ -408,13 +481,36 @@ void bgp_lp_release(
 
        if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
                if (label == lcb->label && type == lcb->type) {
+                       struct listnode *node;
+                       struct lp_chunk *chunk;
                        uintptr_t lbl = label;
+                       bool deallocated = false;
 
                        /* no longer in use */
                        skiplist_delete(lp->inuse, (void *)lbl, NULL);
 
                        /* no longer requested */
                        skiplist_delete(lp->ledger, labelid, NULL);
+
+                       /*
+                        * Find the chunk this label belongs to and
+                        * deallocate the label
+                        */
+                       for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
+                               uint32_t index;
+
+                               if ((label < chunk->first) ||
+                                   (label > chunk->last))
+                                       continue;
+
+                               index = label - chunk->first;
+                               assert(bf_test_index(chunk->allocated_map,
+                                                    index));
+                               bf_release_index(chunk->allocated_map, index);
+                               chunk->nfree += 1;
+                               deallocated = true;
+                       }
+                       assert(deallocated);
                }
        }
 }
@@ -427,6 +523,7 @@ void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
        struct lp_chunk *chunk;
        int debug = BGP_DEBUG(labelpool, LABELPOOL);
        struct lp_fifo *lf;
+       uint32_t labelcount;
 
        if (last < first) {
                flog_err(EC_BGP_LABEL,
@@ -437,19 +534,27 @@ void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
 
        chunk = XCALLOC(MTYPE_BGP_LABEL_CHUNK, sizeof(struct lp_chunk));
 
+       labelcount = last - first + 1;
+
        chunk->first = first;
        chunk->last = last;
+       chunk->nfree = labelcount;
+       bf_init(chunk->allocated_map, labelcount);
 
-       listnode_add(lp->chunks, chunk);
+       /*
+        * Optimize for allocation by adding the new (presumably larger)
+        * chunk at the head of the list so it is examined first.
+        */
+       listnode_add_head(lp->chunks, chunk);
 
-       lp->pending_count -= (last - first + 1);
+       lp->pending_count -= labelcount;
 
        if (debug) {
                zlog_debug("%s: %zu pending requests", __func__,
                        lp_fifo_count(&lp->requests));
        }
 
-       while ((lf = lp_fifo_first(&lp->requests))) {
+       while (labelcount && (lf = lp_fifo_first(&lp->requests))) {
 
                struct lp_lcb *lcb;
                void *labelid = lf->lcb.labelid;
@@ -495,6 +600,8 @@ void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
                        break;
                }
 
+               labelcount -= 1;
+
                /*
                 * we filled the request from local pool.
                 * Enqueue response work item with new label.
@@ -535,8 +642,8 @@ void bgp_lp_event_zebra_down(void)
  */
 void bgp_lp_event_zebra_up(void)
 {
-       int labels_needed;
-       int chunks_needed;
+       unsigned int labels_needed;
+       unsigned int chunks_needed;
        void *labelid;
        struct lp_lcb *lcb;
        int lm_init_ok;
@@ -548,9 +655,16 @@ void bgp_lp_event_zebra_up(void)
        labels_needed = lp_fifo_count(&lp->requests) +
                skiplist_count(lp->inuse);
 
+       if (labels_needed > lp->next_chunksize) {
+               while ((lp->next_chunksize < labels_needed) &&
+                      (lp->next_chunksize << 1 <= LP_CHUNK_SIZE_MAX))
+
+                       lp->next_chunksize <<= 1;
+       }
+
        /* round up */
-       chunks_needed = (labels_needed / LP_CHUNK_SIZE) + 1;
-       labels_needed = chunks_needed * LP_CHUNK_SIZE;
+       chunks_needed = (labels_needed / lp->next_chunksize) + 1;
+       labels_needed = chunks_needed * lp->next_chunksize;
 
        lm_init_ok = lm_label_manager_connect(zclient, 1) == 0;
 
@@ -937,25 +1051,496 @@ DEFUN(show_bgp_labelpool_chunks, show_bgp_labelpool_chunks_cmd,
                }
                json = json_object_new_array();
        } else {
-               vty_out(vty, "First    Last\n");
-               vty_out(vty, "--------------\n");
+               vty_out(vty, "%10s %10s %10s %10s\n", "First", "Last", "Size",
+                       "nfree");
+               vty_out(vty, "-------------------------------------------\n");
        }
 
        for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
+               uint32_t size;
+
+               size = chunk->last - chunk->first + 1;
+
                if (uj) {
                        json_elem = json_object_new_object();
                        json_object_array_add(json, json_elem);
                        json_object_int_add(json_elem, "first", chunk->first);
                        json_object_int_add(json_elem, "last", chunk->last);
+                       json_object_int_add(json_elem, "size", size);
+                       json_object_int_add(json_elem, "numberFree",
+                                           chunk->nfree);
                } else
-                       vty_out(vty, "%-10u %-10u\n", chunk->first,
-                               chunk->last);
+                       vty_out(vty, "%10u %10u %10u %10u\n", chunk->first,
+                               chunk->last, size, chunk->nfree);
        }
        if (uj)
                vty_json(vty, json);
        return CMD_SUCCESS;
 }
 
+#if BGP_LABELPOOL_ENABLE_TESTS
+/*------------------------------------------------------------------------
+ *                     Testing code start
+ *------------------------------------------------------------------------*/
+
+DEFINE_MTYPE_STATIC(BGPD, LABELPOOL_TEST, "Label pool test");
+
+#define LPT_STAT_INSERT_FAIL 0
+#define LPT_STAT_DELETE_FAIL 1
+#define LPT_STAT_ALLOCATED 2
+#define LPT_STAT_DEALLOCATED 3
+#define LPT_STAT_MAX 4
+
+const char *lpt_counter_names[] = {
+       "sl insert failures",
+       "sl delete failures",
+       "labels allocated",
+       "labels deallocated",
+};
+
+static uint8_t lpt_generation;
+static bool lpt_inprogress;
+static struct skiplist *lp_tests;
+static unsigned int lpt_test_cb_tcb_lookup_fails;
+static unsigned int lpt_release_tcb_lookup_fails;
+static unsigned int lpt_test_event_tcb_lookup_fails;
+static unsigned int lpt_stop_tcb_lookup_fails;
+
+struct lp_test {
+       uint8_t generation;
+       unsigned int request_maximum;
+       unsigned int request_blocksize;
+       uintptr_t request_count; /* match type of labelid */
+       int label_type;
+       struct skiplist *labels;
+       struct timeval starttime;
+       struct skiplist *timestamps_alloc;
+       struct skiplist *timestamps_dealloc;
+       struct thread *event_thread;
+       unsigned int counter[LPT_STAT_MAX];
+};
+
+/* test parameters */
+#define LPT_MAX_COUNT 500000  /* get this many labels in all */
+#define LPT_BLKSIZE 10000     /* this many at a time, then yield */
+#define LPT_TS_INTERVAL 10000 /* timestamp every this many labels */
+
+
+static int test_cb(mpls_label_t label, void *labelid, bool allocated)
+{
+       uintptr_t generation;
+       struct lp_test *tcb;
+
+       generation = ((uintptr_t)labelid >> 24) & 0xff;
+
+       if (skiplist_search(lp_tests, (void *)generation, (void **)&tcb)) {
+
+               /* couldn't find current test in progress */
+               ++lpt_test_cb_tcb_lookup_fails;
+               return -1; /* reject allocation */
+       }
+
+       if (allocated) {
+               ++tcb->counter[LPT_STAT_ALLOCATED];
+               if (!(tcb->counter[LPT_STAT_ALLOCATED] % LPT_TS_INTERVAL)) {
+                       uintptr_t time_ms;
+
+                       time_ms = monotime_since(&tcb->starttime, NULL) / 1000;
+                       skiplist_insert(tcb->timestamps_alloc,
+                                       (void *)(uintptr_t)tcb
+                                               ->counter[LPT_STAT_ALLOCATED],
+                                       (void *)time_ms);
+               }
+               if (skiplist_insert(tcb->labels, labelid,
+                                   (void *)(uintptr_t)label)) {
+                       ++tcb->counter[LPT_STAT_INSERT_FAIL];
+                       return -1;
+               }
+       } else {
+               ++tcb->counter[LPT_STAT_DEALLOCATED];
+               if (!(tcb->counter[LPT_STAT_DEALLOCATED] % LPT_TS_INTERVAL)) {
+                       uintptr_t time_ms;
+
+                       time_ms = monotime_since(&tcb->starttime, NULL) / 1000;
+                       skiplist_insert(tcb->timestamps_dealloc,
+                                       (void *)(uintptr_t)tcb
+                                               ->counter[LPT_STAT_ALLOCATED],
+                                       (void *)time_ms);
+               }
+               if (skiplist_delete(tcb->labels, labelid, 0)) {
+                       ++tcb->counter[LPT_STAT_DELETE_FAIL];
+                       return -1;
+               }
+       }
+       return 0;
+}
+
+static void labelpool_test_event_handler(struct thread *thread)
+{
+       struct lp_test *tcb;
+
+       if (skiplist_search(lp_tests, (void *)(uintptr_t)(lpt_generation),
+                           (void **)&tcb)) {
+
+               /* couldn't find current test in progress */
+               ++lpt_test_event_tcb_lookup_fails;
+               return;
+       }
+
+       /*
+        * request a bunch of labels
+        */
+       for (unsigned int i = 0; (i < tcb->request_blocksize) &&
+                                (tcb->request_count < tcb->request_maximum);
+            ++i) {
+
+               uintptr_t id;
+
+               ++tcb->request_count;
+
+               /*
+                * construct 32-bit id from request_count and generation
+                */
+               id = ((uintptr_t)tcb->generation << 24) |
+                    (tcb->request_count & 0x00ffffff);
+               bgp_lp_get(LP_TYPE_VRF, (void *)id, test_cb);
+       }
+
+       if (tcb->request_count < tcb->request_maximum)
+               thread_add_event(bm->master, labelpool_test_event_handler, NULL,
+                                0, &tcb->event_thread);
+}
+
+static void lptest_stop(void)
+{
+       struct lp_test *tcb;
+
+       if (!lpt_inprogress)
+               return;
+
+       if (skiplist_search(lp_tests, (void *)(uintptr_t)(lpt_generation),
+                           (void **)&tcb)) {
+
+               /* couldn't find current test in progress */
+               ++lpt_stop_tcb_lookup_fails;
+               return;
+       }
+
+       if (tcb->event_thread)
+               thread_cancel(&tcb->event_thread);
+
+       lpt_inprogress = false;
+}
+
+static int lptest_start(struct vty *vty)
+{
+       struct lp_test *tcb;
+
+       if (lpt_inprogress) {
+               vty_out(vty, "test already in progress\n");
+               return -1;
+       }
+
+       if (skiplist_count(lp_tests) >=
+           (1 << (8 * sizeof(lpt_generation))) - 1) {
+               /*
+                * Too many test runs
+                */
+               vty_out(vty, "too many tests: clear first\n");
+               return -1;
+       }
+
+       /*
+        * We pack the generation and request number into the labelid;
+        * make sure they fit.
+        */
+       unsigned int n1 = LPT_MAX_COUNT;
+       unsigned int sh = 0;
+       unsigned int label_bits;
+
+       label_bits = 8 * (sizeof(tcb->request_count) - sizeof(lpt_generation));
+
+       /* n1 should be same type as tcb->request_maximum */
+       assert(sizeof(n1) == sizeof(tcb->request_maximum));
+
+       while (n1 >>= 1)
+               ++sh;
+       sh += 1; /* number of bits needed to hold LPT_MAX_COUNT */
+
+       if (sh > label_bits) {
+               vty_out(vty,
+                       "Sorry, test iteration count too big on this platform (LPT_MAX_COUNT %u, need %u bits, but label_bits is only %u)\n",
+                       LPT_MAX_COUNT, sh, label_bits);
+               return -1;
+       }
+
+       lpt_inprogress = true;
+       ++lpt_generation;
+
+       tcb = XCALLOC(MTYPE_LABELPOOL_TEST, sizeof(*tcb));
+
+       tcb->generation = lpt_generation;
+       tcb->label_type = LP_TYPE_VRF;
+       tcb->request_maximum = LPT_MAX_COUNT;
+       tcb->request_blocksize = LPT_BLKSIZE;
+       tcb->labels = skiplist_new(0, NULL, NULL);
+       tcb->timestamps_alloc = skiplist_new(0, NULL, NULL);
+       tcb->timestamps_dealloc = skiplist_new(0, NULL, NULL);
+       thread_add_event(bm->master, labelpool_test_event_handler, NULL, 0,
+                        &tcb->event_thread);
+       monotime(&tcb->starttime);
+
+       skiplist_insert(lp_tests, (void *)(uintptr_t)tcb->generation, tcb);
+       return 0;
+}
+
+DEFPY(start_labelpool_perf_test, start_labelpool_perf_test_cmd,
+      "debug bgp lptest start",
+      DEBUG_STR BGP_STR
+      "label pool test\n"
+      "start\n")
+{
+       lptest_start(vty);
+       return CMD_SUCCESS;
+}
+
+static void lptest_print_stats(struct vty *vty, struct lp_test *tcb)
+{
+       unsigned int i;
+
+       vty_out(vty, "Global Lookup Failures in test_cb: %5u\n",
+               lpt_test_cb_tcb_lookup_fails);
+       vty_out(vty, "Global Lookup Failures in release: %5u\n",
+               lpt_release_tcb_lookup_fails);
+       vty_out(vty, "Global Lookup Failures in event:   %5u\n",
+               lpt_test_event_tcb_lookup_fails);
+       vty_out(vty, "Global Lookup Failures in stop:    %5u\n",
+               lpt_stop_tcb_lookup_fails);
+       vty_out(vty, "\n");
+
+       if (!tcb) {
+               if (skiplist_search(lp_tests, (void *)(uintptr_t)lpt_generation,
+                                   (void **)&tcb)) {
+                       vty_out(vty, "Error: can't find test %u\n",
+                               lpt_generation);
+                       return;
+               }
+       }
+
+       vty_out(vty, "Test Generation %u:\n", tcb->generation);
+
+       vty_out(vty, "Counter   Value\n");
+       for (i = 0; i < LPT_STAT_MAX; ++i) {
+               vty_out(vty, "%20s: %10u\n", lpt_counter_names[i],
+                       tcb->counter[i]);
+       }
+       vty_out(vty, "\n");
+
+       if (tcb->timestamps_alloc) {
+               void *Key;
+               void *Value;
+               void *cursor;
+
+               float elapsed;
+
+               vty_out(vty, "%10s %10s\n", "Count", "Seconds");
+
+               cursor = NULL;
+               while (!skiplist_next(tcb->timestamps_alloc, &Key, &Value,
+                                     &cursor)) {
+
+                       elapsed = ((float)(uintptr_t)Value) / 1000;
+
+                       vty_out(vty, "%10llu %10.3f\n",
+                               (unsigned long long)(uintptr_t)Key, elapsed);
+               }
+               vty_out(vty, "\n");
+       }
+}
+
+DEFPY(show_labelpool_perf_test, show_labelpool_perf_test_cmd,
+      "debug bgp lptest show",
+      DEBUG_STR BGP_STR
+      "label pool test\n"
+      "show\n")
+{
+
+       if (lp_tests) {
+               void *Key;
+               void *Value;
+               void *cursor;
+
+               cursor = NULL;
+               while (!skiplist_next(lp_tests, &Key, &Value, &cursor)) {
+                       lptest_print_stats(vty, (struct lp_test *)Value);
+               }
+       } else {
+               vty_out(vty, "no test results\n");
+       }
+       return CMD_SUCCESS;
+}
+
+DEFPY(stop_labelpool_perf_test, stop_labelpool_perf_test_cmd,
+      "debug bgp lptest stop",
+      DEBUG_STR BGP_STR
+      "label pool test\n"
+      "stop\n")
+{
+
+       if (lpt_inprogress) {
+               lptest_stop();
+               lptest_print_stats(vty, NULL);
+       } else {
+               vty_out(vty, "no test in progress\n");
+       }
+       return CMD_SUCCESS;
+}
+
+DEFPY(clear_labelpool_perf_test, clear_labelpool_perf_test_cmd,
+      "debug bgp lptest clear",
+      DEBUG_STR BGP_STR
+      "label pool test\n"
+      "clear\n")
+{
+
+       if (lpt_inprogress) {
+               lptest_stop();
+       }
+       if (lp_tests) {
+               while (!skiplist_first(lp_tests, NULL, NULL))
+                       /* del function of skiplist cleans up tcbs */
+                       skiplist_delete_first(lp_tests);
+       }
+       return CMD_SUCCESS;
+}
+
+/*
+ * With the "release" command, we can release labels at intervals through
+ * the ID space. Thus we can to exercise the bitfield-wrapping behavior
+ * of the allocator in a subsequent test.
+ */
+/* clang-format off */
+DEFPY(release_labelpool_perf_test, release_labelpool_perf_test_cmd,
+      "debug bgp lptest release test GENERATION$generation every (1-5)$every_nth",
+      DEBUG_STR
+      BGP_STR
+      "label pool test\n"
+      "release labels\n"
+      "\"test\"\n"
+      "test number\n"
+      "\"every\"\n"
+      "label fraction denominator\n")
+{
+       /* clang-format on */
+
+       unsigned long testnum;
+       char *end;
+       struct lp_test *tcb;
+
+       testnum = strtoul(generation, &end, 0);
+       if (*end) {
+               vty_out(vty, "Invalid test number: \"%s\"\n", generation);
+               return CMD_SUCCESS;
+       }
+       if (lpt_inprogress && (testnum == lpt_generation)) {
+               vty_out(vty,
+                       "Error: Test %lu is still in progress (stop first)\n",
+                       testnum);
+               return CMD_SUCCESS;
+       }
+
+       if (skiplist_search(lp_tests, (void *)(uintptr_t)testnum,
+                           (void **)&tcb)) {
+
+               /* couldn't find current test in progress */
+               vty_out(vty, "Error: Can't look up test number: \"%lu\"\n",
+                       testnum);
+               ++lpt_release_tcb_lookup_fails;
+               return CMD_SUCCESS;
+       }
+
+       void *Key, *cKey;
+       void *Value, *cValue;
+       void *cursor;
+       unsigned int iteration;
+       int rc;
+
+       cursor = NULL;
+       iteration = 0;
+       rc = skiplist_next(tcb->labels, &Key, &Value, &cursor);
+
+       while (!rc) {
+               cKey = Key;
+               cValue = Value;
+
+               /* find next item before we delete this one */
+               rc = skiplist_next(tcb->labels, &Key, &Value, &cursor);
+
+               if (!(iteration % every_nth)) {
+                       bgp_lp_release(tcb->label_type, cKey,
+                                      (mpls_label_t)(uintptr_t)cValue);
+                       skiplist_delete(tcb->labels, cKey, NULL);
+                       ++tcb->counter[LPT_STAT_DEALLOCATED];
+               }
+               ++iteration;
+       }
+
+       return CMD_SUCCESS;
+}
+
+static void lptest_delete(void *val)
+{
+       struct lp_test *tcb = (struct lp_test *)val;
+       void *Key;
+       void *Value;
+       void *cursor;
+
+       if (tcb->labels) {
+               cursor = NULL;
+               while (!skiplist_next(tcb->labels, &Key, &Value, &cursor))
+                       bgp_lp_release(tcb->label_type, Key,
+                                      (mpls_label_t)(uintptr_t)Value);
+               skiplist_free(tcb->labels);
+               tcb->labels = NULL;
+       }
+       if (tcb->timestamps_alloc) {
+               cursor = NULL;
+               skiplist_free(tcb->timestamps_alloc);
+               tcb->timestamps_alloc = NULL;
+       }
+
+       if (tcb->timestamps_dealloc) {
+               cursor = NULL;
+               skiplist_free(tcb->timestamps_dealloc);
+               tcb->timestamps_dealloc = NULL;
+       }
+
+       if (tcb->event_thread)
+               thread_cancel(&tcb->event_thread);
+
+       memset(tcb, 0, sizeof(*tcb));
+
+       XFREE(MTYPE_LABELPOOL_TEST, tcb);
+}
+
+static void lptest_init(void)
+{
+       lp_tests = skiplist_new(0, NULL, lptest_delete);
+}
+
+static void lptest_finish(void)
+{
+       if (lp_tests) {
+               skiplist_free(lp_tests);
+               lp_tests = NULL;
+       }
+}
+
+/*------------------------------------------------------------------------
+ *                     Testing code end
+ *------------------------------------------------------------------------*/
+#endif /* BGP_LABELPOOL_ENABLE_TESTS */
+
 void bgp_lp_vty_init(void)
 {
        install_element(VIEW_NODE, &show_bgp_labelpool_summary_cmd);
@@ -963,4 +1548,12 @@ void bgp_lp_vty_init(void)
        install_element(VIEW_NODE, &show_bgp_labelpool_inuse_cmd);
        install_element(VIEW_NODE, &show_bgp_labelpool_requests_cmd);
        install_element(VIEW_NODE, &show_bgp_labelpool_chunks_cmd);
+
+#if BGP_LABELPOOL_ENABLE_TESTS
+       install_element(ENABLE_NODE, &start_labelpool_perf_test_cmd);
+       install_element(ENABLE_NODE, &show_labelpool_perf_test_cmd);
+       install_element(ENABLE_NODE, &stop_labelpool_perf_test_cmd);
+       install_element(ENABLE_NODE, &release_labelpool_perf_test_cmd);
+       install_element(ENABLE_NODE, &clear_labelpool_perf_test_cmd);
+#endif /* BGP_LABELPOOL_ENABLE_TESTS */
 }
index d6a8eec84d8d364b548c1552d38edf1600da5a4f..2f3ffe437fa6ca4e2ee298fa42e969945bafd372 100644 (file)
@@ -41,6 +41,7 @@ struct labelpool {
        struct work_queue       *callback_q;
        uint32_t                pending_count;  /* requested from zebra */
        uint32_t reconnect_count;               /* zebra reconnections */
+       uint32_t next_chunksize;                /* request this many labels */
 };
 
 extern void bgp_lp_init(struct thread_master *master, struct labelpool *pool);
index 1297eb440ea36748ae5ca2ecb1f02308bda83912..90ae580babe4866db1a40df29a05b6d0b9c8a28c 100644 (file)
@@ -511,6 +511,8 @@ int main(int argc, char **argv)
                                 ", bgp@%s:%d", address, bm->port);
        }
 
+       bgp_if_init();
+
        frr_config_fork();
        /* must be called after fork() */
        bgp_gr_apply_running_config();
index 2f1cc4dc82f6d2d8d6bba2d99c589d7764fe9b84..5a039b25bc5ce5f5aa7d6b064dbee277d7c496f2 100644 (file)
@@ -978,6 +978,11 @@ leak_update(struct bgp *to_bgp, struct bgp_dest *bn,
        new = info_make(ZEBRA_ROUTE_BGP, BGP_ROUTE_IMPORTED, 0,
                        to_bgp->peer_self, new_attr, bn);
 
+       if (source_bpi->peer) {
+               extra = bgp_path_info_extra_get(new);
+               extra->peer_orig = peer_lock(source_bpi->peer);
+       }
+
        if (nexthop_self_flag)
                bgp_path_info_set_flag(bn, new, BGP_PATH_ANNC_NH_SELF);
 
index 61f1b295ca2fd558554674f7cffbd258100d763c..b38e5b7a9a65e5815f4e223340704d23baf104fa 100644 (file)
@@ -61,22 +61,88 @@ static int bgp_isvalid_nexthop(struct bgp_nexthop_cache *bnc)
                    && bnc->nexthop_num > 0));
 }
 
-static int bgp_isvalid_labeled_nexthop(struct bgp_nexthop_cache *bnc)
+static int bgp_isvalid_nexthop_for_ebgp(struct bgp_nexthop_cache *bnc,
+                                       struct bgp_path_info *path)
+{
+       struct interface *ifp = NULL;
+       struct nexthop *nexthop;
+       struct bgp_interface *iifp;
+       struct peer *peer;
+
+       if (!path->extra || !path->extra->peer_orig)
+               return false;
+
+       peer = path->extra->peer_orig;
+
+       /* only connected ebgp peers are valid */
+       if (peer->sort != BGP_PEER_EBGP || peer->ttl != BGP_DEFAULT_TTL ||
+           CHECK_FLAG(peer->flags, PEER_FLAG_DISABLE_CONNECTED_CHECK) ||
+           CHECK_FLAG(peer->bgp->flags, BGP_FLAG_DISABLE_NH_CONNECTED_CHK))
+               return false;
+
+       for (nexthop = bnc->nexthop; nexthop; nexthop = nexthop->next) {
+               if (nexthop->type == NEXTHOP_TYPE_IFINDEX ||
+                   nexthop->type == NEXTHOP_TYPE_IPV4_IFINDEX ||
+                   nexthop->type == NEXTHOP_TYPE_IPV6_IFINDEX) {
+                       ifp = if_lookup_by_index(
+                               bnc->ifindex ? bnc->ifindex : nexthop->ifindex,
+                               bnc->bgp->vrf_id);
+               }
+               if (!ifp)
+                       continue;
+               iifp = ifp->info;
+               if (CHECK_FLAG(iifp->flags, BGP_INTERFACE_MPLS_BGP_FORWARDING))
+                       return true;
+       }
+       return false;
+}
+
+static int bgp_isvalid_nexthop_for_mplsovergre(struct bgp_nexthop_cache *bnc,
+                                              struct bgp_path_info *path)
+{
+       struct interface *ifp = NULL;
+       struct nexthop *nexthop;
+
+       for (nexthop = bnc->nexthop; nexthop; nexthop = nexthop->next) {
+               if (nexthop->type != NEXTHOP_TYPE_BLACKHOLE) {
+                       ifp = if_lookup_by_index(
+                               bnc->ifindex ? bnc->ifindex : nexthop->ifindex,
+                               bnc->bgp->vrf_id);
+                       if (ifp && (ifp->ll_type == ZEBRA_LLT_IPGRE ||
+                                   ifp->ll_type == ZEBRA_LLT_IP6GRE))
+                               break;
+               }
+       }
+       if (!ifp)
+               return false;
+
+       if (CHECK_FLAG(path->attr->rmap_change_flags,
+                      BATTR_RMAP_L3VPN_ACCEPT_GRE))
+               return true;
+
+       return false;
+}
+
+static int bgp_isvalid_nexthop_for_mpls(struct bgp_nexthop_cache *bnc,
+                                       struct bgp_path_info *path)
 {
        /*
-        * In the case of MPLS-VPN, the label is learned from LDP or other
+        * In the case of MPLS-VPN, the label is learned from LDP or other
         * protocols, and nexthop tracking is enabled for the label.
         * The value is recorded as BGP_NEXTHOP_LABELED_VALID.
-        * In the case of SRv6-VPN, we need to track the reachability to the
+        * In the case of SRv6-VPN, we need to track the reachability to the
         * SID (in other words, IPv6 address). As in MPLS, we need to record
         * the value as BGP_NEXTHOP_SID_VALID. However, this function is
         * currently not implemented, and this function assumes that all
         * Transit routes for SRv6-VPN are valid.
+        * - Otherwise check for mpls-gre acceptance
         */
-       return (bgp_zebra_num_connects() == 0
-               || (bnc && bnc->nexthop_num > 0
-                   && (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_LABELED_VALID)
-                       || bnc->bgp->srv6_enabled)));
+       return (bgp_zebra_num_connects() == 0 ||
+               (bnc && (bnc->nexthop_num > 0 &&
+                        (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_LABELED_VALID) ||
+                         bnc->bgp->srv6_enabled ||
+                         bgp_isvalid_nexthop_for_ebgp(bnc, path) ||
+                         bgp_isvalid_nexthop_for_mplsovergre(bnc, path)))));
 }
 
 static void bgp_unlink_nexthop_check(struct bgp_nexthop_cache *bnc)
@@ -359,11 +425,11 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
         */
        if (bgp_route->inst_type == BGP_INSTANCE_TYPE_VIEW)
                return 1;
-       else if (safi == SAFI_UNICAST && pi
-                && pi->sub_type == BGP_ROUTE_IMPORTED && pi->extra
-                && pi->extra->num_labels && !bnc->is_evpn_gwip_nexthop) {
-               return bgp_isvalid_labeled_nexthop(bnc);
-       else
+       else if (safi == SAFI_UNICAST && pi &&
+                pi->sub_type == BGP_ROUTE_IMPORTED && pi->extra &&
+                pi->extra->num_labels && !bnc->is_evpn_gwip_nexthop)
+               return bgp_isvalid_nexthop_for_mpls(bnc, pi);
+       else
                return (bgp_isvalid_nexthop(bnc));
 }
 
@@ -1063,7 +1129,8 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc)
                    && (path->attr->evpn_overlay.type
                        != OVERLAY_INDEX_GATEWAY_IP)) {
                        bnc_is_valid_nexthop =
-                               bgp_isvalid_labeled_nexthop(bnc) ? true : false;
+                               bgp_isvalid_nexthop_for_mpls(bnc, path) ? true
+                                                                       : false;
                } else {
                        if (bgp_update_martian_nexthop(
                                    bnc->bgp, afi, safi, path->type,
index 7daac44946cc945dd6346d55ce227147bafcb290..90695219a7ea37c518c325b914182400e64a1c54 100644 (file)
@@ -871,8 +871,9 @@ bool bgp_notify_received_hard_reset(struct peer *peer, uint8_t code,
  * @param data      Data portion
  * @param datalen   length of data portion
  */
-void bgp_notify_send_with_data(struct peer *peer, uint8_t code,
-                              uint8_t sub_code, uint8_t *data, size_t datalen)
+static void bgp_notify_send_internal(struct peer *peer, uint8_t code,
+                                    uint8_t sub_code, uint8_t *data,
+                                    size_t datalen, bool use_curr)
 {
        struct stream *s;
        bool hard_reset = bgp_notify_send_hard_reset(peer, code, sub_code);
@@ -917,8 +918,11 @@ void bgp_notify_send_with_data(struct peer *peer, uint8_t code,
         * If possible, store last packet for debugging purposes. This check is
         * in place because we are sometimes called with a doppelganger peer,
         * who tends to have a plethora of fields nulled out.
+        *
+        * Some callers should not attempt this - the io pthread for example
+        * should not touch internals of the peer struct.
         */
-       if (peer->curr) {
+       if (use_curr && peer->curr) {
                size_t packetsize = stream_get_endp(peer->curr);
                assert(packetsize <= peer->max_packet_size);
                memcpy(peer->last_reset_cause, peer->curr->data, packetsize);
@@ -1001,7 +1005,27 @@ void bgp_notify_send_with_data(struct peer *peer, uint8_t code,
  */
 void bgp_notify_send(struct peer *peer, uint8_t code, uint8_t sub_code)
 {
-       bgp_notify_send_with_data(peer, code, sub_code, NULL, 0);
+       bgp_notify_send_internal(peer, code, sub_code, NULL, 0, true);
+}
+
+/*
+ * Enqueue notification; called from the main pthread, peer object access is ok.
+ */
+void bgp_notify_send_with_data(struct peer *peer, uint8_t code,
+                              uint8_t sub_code, uint8_t *data, size_t datalen)
+{
+       bgp_notify_send_internal(peer, code, sub_code, data, datalen, true);
+}
+
+/*
+ * For use by the io pthread, queueing a notification but avoiding access to
+ * the peer object.
+ */
+void bgp_notify_io_invalid(struct peer *peer, uint8_t code, uint8_t sub_code,
+                          uint8_t *data, size_t datalen)
+{
+       /* Avoid touching the peer object */
+       bgp_notify_send_internal(peer, code, sub_code, data, datalen, false);
 }
 
 /*
index a0eb579db7a80a0fb08f7b1f20fed0609815322a..9f6d772bc0a625354f7ce4bc6c2efdf38d841e94 100644 (file)
@@ -62,6 +62,8 @@ extern void bgp_open_send(struct peer *);
 extern void bgp_notify_send(struct peer *, uint8_t, uint8_t);
 extern void bgp_notify_send_with_data(struct peer *, uint8_t, uint8_t,
                                      uint8_t *, size_t);
+void bgp_notify_io_invalid(struct peer *peer, uint8_t code, uint8_t sub_code,
+                          uint8_t *data, size_t datalen);
 extern void bgp_route_refresh_send(struct peer *peer, afi_t afi, safi_t safi,
                                   uint8_t orf_type, uint8_t when_to_refresh,
                                   int remove, uint8_t subtype);
index a1c6e7d4a361c1abc77620877ffda7f8ddeb68f6..162a7ed881f4f7ae29db227fe1fa66caf91e07f6 100644 (file)
@@ -255,6 +255,9 @@ void bgp_path_info_extra_free(struct bgp_path_info_extra **extra)
        if (e->bgp_orig)
                bgp_unlock(e->bgp_orig);
 
+       if (e->peer_orig)
+               peer_unlock(e->peer_orig);
+
        if (e->aggr_suppressors)
                list_delete(&e->aggr_suppressors);
 
@@ -1773,10 +1776,20 @@ static void bgp_peer_remove_private_as(struct bgp *bgp, afi_t afi, safi_t safi,
 static void bgp_peer_as_override(struct bgp *bgp, afi_t afi, safi_t safi,
                                 struct peer *peer, struct attr *attr)
 {
+       struct aspath *aspath;
+
        if (peer->sort == BGP_PEER_EBGP &&
-           peer_af_flag_check(peer, afi, safi, PEER_FLAG_AS_OVERRIDE))
-               attr->aspath = aspath_replace_specific_asn(attr->aspath,
-                                                          peer->as, bgp->as);
+           peer_af_flag_check(peer, afi, safi, PEER_FLAG_AS_OVERRIDE)) {
+               if (attr->aspath->refcnt)
+                       aspath = aspath_dup(attr->aspath);
+               else
+                       aspath = attr->aspath;
+
+               attr->aspath = aspath_intern(
+                       aspath_replace_specific_asn(aspath, peer->as, bgp->as));
+
+               aspath_free(aspath);
+       }
 }
 
 void bgp_attr_add_llgr_community(struct attr *attr)
@@ -6494,6 +6507,7 @@ static int bgp_static_set(struct vty *vty, const char *negate,
                        /* Label index cannot be changed. */
                        if (bgp_static->label_index != label_index) {
                                vty_out(vty, "%% cannot change label-index\n");
+                               bgp_dest_unlock_node(dest);
                                return CMD_WARNING_CONFIG_FAILED;
                        }
 
@@ -7278,6 +7292,7 @@ static void bgp_aggregate_install(
                        aggregate, atomic_aggregate, p);
 
                if (!attr) {
+                       bgp_dest_unlock_node(dest);
                        bgp_aggregate_delete(bgp, p, afi, safi, aggregate);
                        if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
                                zlog_debug("%s: %pFX null attribute", __func__,
@@ -7438,31 +7453,21 @@ void bgp_aggregate_toggle_suppressed(struct bgp_aggregate *aggregate,
 static void bgp_aggregate_med_update(struct bgp_aggregate *aggregate,
                                     struct bgp *bgp, const struct prefix *p,
                                     afi_t afi, safi_t safi,
-                                    struct bgp_path_info *pi, bool is_adding)
+                                    struct bgp_path_info *pi)
 {
        /* MED matching disabled. */
        if (!aggregate->match_med)
                return;
 
-       /* Aggregation with different MED, nothing to do. */
-       if (aggregate->med_mismatched)
-               return;
-
-       /*
-        * Test the current entry:
-        *
-        * is_adding == true: if the new entry doesn't match then we must
-        * install all suppressed routes.
-        *
-        * is_adding == false: if the entry being removed was the last
-        * unmatching entry then we can suppress all routes.
+       /* Aggregation with different MED, recheck if we have got equal MEDs
+        * now.
         */
-       if (!is_adding) {
-               if (bgp_aggregate_test_all_med(aggregate, bgp, p, afi, safi)
-                   && aggregate->summary_only)
-                       bgp_aggregate_toggle_suppressed(aggregate, bgp, p, afi,
-                                                       safi, true);
-       else
+       if (aggregate->med_mismatched &&
+           bgp_aggregate_test_all_med(aggregate, bgp, p, afi, safi) &&
+           aggregate->summary_only)
+               bgp_aggregate_toggle_suppressed(aggregate, bgp, p, afi, safi,
+                                               true);
+       else
                bgp_aggregate_med_match(aggregate, bgp, pi);
 
        /* No mismatches, just quit. */
@@ -7828,7 +7833,7 @@ static void bgp_add_route_to_aggregate(struct bgp *bgp,
         */
        if (aggregate->match_med)
                bgp_aggregate_med_update(aggregate, bgp, aggr_p, afi, safi,
-                                        pinew, true);
+                                        pinew);
 
        if (aggregate->summary_only && AGGREGATE_MED_VALID(aggregate))
                aggr_suppress_path(aggregate, pinew);
@@ -7951,8 +7956,7 @@ static void bgp_remove_route_from_aggregate(struct bgp *bgp, afi_t afi,
         * "unsuppressing" twice.
         */
        if (aggregate->match_med)
-               bgp_aggregate_med_update(aggregate, bgp, aggr_p, afi, safi, pi,
-                                        true);
+               bgp_aggregate_med_update(aggregate, bgp, aggr_p, afi, safi, pi);
 
        if (aggregate->count > 0)
                aggregate->count--;
@@ -8477,6 +8481,17 @@ void bgp_redistribute_add(struct bgp *bgp, struct prefix *p,
 
        switch (nhtype) {
        case NEXTHOP_TYPE_IFINDEX:
+               switch (p->family) {
+               case AF_INET:
+                       attr.nexthop.s_addr = INADDR_ANY;
+                       attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
+                       break;
+               case AF_INET6:
+                       memset(&attr.mp_nexthop_global, 0,
+                              sizeof(attr.mp_nexthop_global));
+                       attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL;
+                       break;
+               }
                break;
        case NEXTHOP_TYPE_IPV4:
        case NEXTHOP_TYPE_IPV4_IFINDEX:
@@ -11160,13 +11175,14 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, safi_t safi,
                        if (type == bgp_show_type_route_map) {
                                struct route_map *rmap = output_arg;
                                struct bgp_path_info path;
-                               struct attr dummy_attr;
+                               struct bgp_path_info_extra extra;
+                               struct attr dummy_attr = {};
                                route_map_result_t ret;
 
                                dummy_attr = *pi->attr;
 
-                               path.peer = pi->peer;
-                               path.attr = &dummy_attr;
+                               prep_for_rmap_apply(&path, &extra, dest, pi,
+                                                   pi->peer, &dummy_attr);
 
                                ret = route_map_apply(rmap, dest_p, &path);
                                bgp_attr_flush(&dummy_attr);
index 3e46c7043e15dd521f86742f237004f8f1c241c2..ddef4ca1bbfabbcb3e0d4460c4726185720219f7 100644 (file)
@@ -235,6 +235,12 @@ struct bgp_path_info_extra {
         */
        struct bgp *bgp_orig;
 
+       /*
+        * Original bgp session to know if the session is a
+        * connected EBGP session or not
+        */
+       struct peer *peer_orig;
+
        /*
         * Nexthop in context of original bgp instance. Needed
         * for label resolution of core mpls routes exported to a vrf.
index 45ceb6bfdeb5eb8dc40abe40efa0c3f9dad5d28c..ded47028a69e951a70e946ec4a5c8d2744d40b3a 100644 (file)
@@ -1953,6 +1953,57 @@ static const struct route_map_rule_cmd route_set_ip_nexthop_cmd = {
        route_set_ip_nexthop_free
 };
 
+/* `set l3vpn next-hop encapsulation l3vpn gre' */
+
+/* Set nexthop to object */
+struct rmap_l3vpn_nexthop_encapsulation_set {
+       uint8_t protocol;
+};
+
+static enum route_map_cmd_result_t
+route_set_l3vpn_nexthop_encapsulation(void *rule, const struct prefix *prefix,
+                                     void *object)
+{
+       struct rmap_l3vpn_nexthop_encapsulation_set *rins = rule;
+       struct bgp_path_info *path;
+
+       path = object;
+
+       if (rins->protocol != IPPROTO_GRE)
+               return RMAP_OKAY;
+
+       SET_FLAG(path->attr->rmap_change_flags, BATTR_RMAP_L3VPN_ACCEPT_GRE);
+       return RMAP_OKAY;
+}
+
+/* Route map `l3vpn nexthop encapsulation' compile function. */
+static void *route_set_l3vpn_nexthop_encapsulation_compile(const char *arg)
+{
+       struct rmap_l3vpn_nexthop_encapsulation_set *rins;
+
+       rins = XCALLOC(MTYPE_ROUTE_MAP_COMPILED,
+                      sizeof(struct rmap_l3vpn_nexthop_encapsulation_set));
+
+       /* XXX ALL GRE modes are accepted for now: gre or ip6gre */
+       rins->protocol = IPPROTO_GRE;
+
+       return rins;
+}
+
+/* Free route map's compiled `ip nexthop' value. */
+static void route_set_l3vpn_nexthop_encapsulation_free(void *rule)
+{
+       XFREE(MTYPE_ROUTE_MAP_COMPILED, rule);
+}
+
+/* Route map commands for l3vpn next-hop encapsulation set. */
+static const struct route_map_rule_cmd
+       route_set_l3vpn_nexthop_encapsulation_cmd = {
+               "l3vpn next-hop encapsulation",
+               route_set_l3vpn_nexthop_encapsulation,
+               route_set_l3vpn_nexthop_encapsulation_compile,
+               route_set_l3vpn_nexthop_encapsulation_free};
+
 /* `set local-preference LOCAL_PREF' */
 
 /* Set local preference. */
@@ -2219,6 +2270,8 @@ route_set_aspath_replace(void *rule, const struct prefix *dummy, void *object)
                        aspath_new, replace_asn, own_asn);
        }
 
+       aspath_free(aspath_new);
+
        return RMAP_OKAY;
 }
 
@@ -4159,8 +4212,6 @@ static void bgp_route_map_process_update_cb(char *rmap_name)
 
 void bgp_route_map_update_timer(struct thread *thread)
 {
-       bm->t_rmap_update = NULL;
-
        route_map_walk_update_list(bgp_route_map_process_update_cb);
 }
 
@@ -5357,6 +5408,34 @@ DEFUN_YANG (no_set_distance,
        return nb_cli_apply_changes(vty, NULL);
 }
 
+DEFPY_YANG(set_l3vpn_nexthop_encapsulation, set_l3vpn_nexthop_encapsulation_cmd,
+          "[no] set l3vpn next-hop encapsulation gre",
+          NO_STR SET_STR
+          "L3VPN operations\n"
+          "Next hop Information\n"
+          "Encapsulation options (for BGP only)\n"
+          "Accept L3VPN traffic over GRE encapsulation\n")
+{
+       const char *xpath =
+               "./set-action[action='frr-bgp-route-map:set-l3vpn-nexthop-encapsulation']";
+       const char *xpath_value =
+               "./set-action[action='frr-bgp-route-map:set-l3vpn-nexthop-encapsulation']/rmap-set-action/frr-bgp-route-map:l3vpn-nexthop-encapsulation";
+       enum nb_operation operation;
+
+       if (no)
+               operation = NB_OP_DESTROY;
+       else
+               operation = NB_OP_CREATE;
+
+       nb_cli_enqueue_change(vty, xpath, operation, NULL);
+       if (operation == NB_OP_DESTROY)
+               return nb_cli_apply_changes(vty, NULL);
+
+       nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, "gre");
+
+       return nb_cli_apply_changes(vty, NULL);
+}
+
 DEFUN_YANG (set_local_pref,
            set_local_pref_cmd,
            "set local-preference WORD",
@@ -6930,6 +7009,7 @@ void bgp_route_map_init(void)
        route_map_install_set(&route_set_ecommunity_none_cmd);
        route_map_install_set(&route_set_tag_cmd);
        route_map_install_set(&route_set_label_index_cmd);
+       route_map_install_set(&route_set_l3vpn_nexthop_encapsulation_cmd);
 
        install_element(RMAP_NODE, &match_peer_cmd);
        install_element(RMAP_NODE, &match_peer_local_cmd);
@@ -7032,6 +7112,7 @@ void bgp_route_map_init(void)
        install_element(RMAP_NODE, &no_set_ipx_vpn_nexthop_cmd);
        install_element(RMAP_NODE, &set_originator_id_cmd);
        install_element(RMAP_NODE, &no_set_originator_id_cmd);
+       install_element(RMAP_NODE, &set_l3vpn_nexthop_encapsulation_cmd);
 
        route_map_install_match(&route_match_ipv6_address_cmd);
        route_map_install_match(&route_match_ipv6_next_hop_cmd);
index 21517d66d795c75202878f5d3c69fb4ced849304..c47c37dc667bc3fc87df1c4cfcb91a10d42e061d 100644 (file)
@@ -413,6 +413,13 @@ const struct frr_yang_module_info frr_bgp_route_map_info = {
                                .destroy = lib_route_map_entry_set_action_rmap_set_action_evpn_gateway_ip_ipv6_destroy,
                        }
                },
+               {
+                       .xpath = "/frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:l3vpn-nexthop-encapsulation",
+                       .cbs = {
+                               .modify = lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_modify,
+                               .destroy = lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_destroy,
+                       }
+               },
                {
                        .xpath = NULL,
                },
index 6064d3ad1aade2d3b7da6c2f35c02234c576a8c2..163e3b55cc0de19a1e32ffd39407a8217afcace8 100644 (file)
@@ -154,6 +154,10 @@ int lib_route_map_entry_set_action_rmap_set_action_evpn_gateway_ip_ipv6_modify(
        struct nb_cb_modify_args *args);
 int lib_route_map_entry_set_action_rmap_set_action_evpn_gateway_ip_ipv6_destroy(
        struct nb_cb_destroy_args *args);
+int lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_modify(
+       struct nb_cb_modify_args *args);
+int lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_destroy(
+       struct nb_cb_destroy_args *args);
 
 #ifdef __cplusplus
 }
index 0b956b5943e85eecac6b9c6cf5a03b50bcc6b724..b18cf9d4ddfe00537728062dd32b54326c683084 100644 (file)
@@ -2976,3 +2976,56 @@ int lib_route_map_entry_set_action_rmap_set_action_evpn_gateway_ip_ipv6_destroy(
 
        return NB_OK;
 }
+
+/*
+ * XPath:
+ * /frr-route-map:lib/route-map/entry/set-action/rmap-set-action/l3vpn-nexthop-encapsulation
+ */
+int lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_modify(
+       struct nb_cb_modify_args *args)
+{
+       struct routemap_hook_context *rhc;
+       const char *type;
+       int rv;
+
+       switch (args->event) {
+       case NB_EV_VALIDATE:
+       case NB_EV_PREPARE:
+       case NB_EV_ABORT:
+               break;
+       case NB_EV_APPLY:
+               /* Add configuration. */
+               rhc = nb_running_get_entry(args->dnode, NULL, true);
+               type = yang_dnode_get_string(args->dnode, NULL);
+
+               /* Set destroy information. */
+               rhc->rhc_shook = generic_set_delete;
+               rhc->rhc_rule = "l3vpn next-hop encapsulation";
+               rhc->rhc_event = RMAP_EVENT_SET_DELETED;
+
+               rv = generic_set_add(rhc->rhc_rmi,
+                                    "l3vpn next-hop encapsulation", type,
+                                    args->errmsg, args->errmsg_len);
+               if (rv != CMD_SUCCESS) {
+                       rhc->rhc_shook = NULL;
+                       return NB_ERR_INCONSISTENCY;
+               }
+       }
+
+       return NB_OK;
+}
+
+int lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_destroy(
+       struct nb_cb_destroy_args *args)
+{
+       switch (args->event) {
+       case NB_EV_VALIDATE:
+       case NB_EV_PREPARE:
+       case NB_EV_ABORT:
+               break;
+       case NB_EV_APPLY:
+               return lib_route_map_entry_set_destroy(args);
+       }
+
+       return NB_OK;
+}
index de1c55964192f9fe37bc6d1fda58e8fb29d8e461..b90c09c68b2175620a3f4ff386f54e8d2ee82263 100644 (file)
@@ -428,14 +428,15 @@ static void bgpd_sync_callback(struct thread *thread)
                safi_t safi;
 
                for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++) {
-                       if (!bgp->rib[afi][safi])
+                       struct bgp_table *table = bgp->rib[afi][safi];
+
+                       if (!table)
                                continue;
 
                        struct bgp_dest *match;
                        struct bgp_dest *node;
 
-                       match = bgp_table_subtree_lookup(bgp->rib[afi][safi],
-                                                        prefix);
+                       match = bgp_table_subtree_lookup(table, prefix);
                        node = match;
 
                        while (node) {
@@ -445,6 +446,9 @@ static void bgpd_sync_callback(struct thread *thread)
 
                                node = bgp_route_next_until(node, match);
                        }
+
+                       if (match)
+                               bgp_dest_unlock_node(match);
                }
        }
 
index d832383ab41b936d5e4e6c4ca0e5ca6c52887422..86cd4f3da105add288fe36d4fe8dd0feeb8ead52 100644 (file)
@@ -256,28 +256,6 @@ static inline struct bgp_dest *bgp_node_match(const struct bgp_table *table,
        return bgp_dest_from_rnode(rn);
 }
 
-/*
- * bgp_node_match_ipv4
- */
-static inline struct bgp_dest *
-bgp_node_match_ipv4(const struct bgp_table *table, struct in_addr *addr)
-{
-       struct route_node *rn = route_node_match_ipv4(table->route_table, addr);
-
-       return bgp_dest_from_rnode(rn);
-}
-
-/*
- * bgp_node_match_ipv6
- */
-static inline struct bgp_dest *
-bgp_node_match_ipv6(const struct bgp_table *table, struct in6_addr *addr)
-{
-       struct route_node *rn = route_node_match_ipv6(table->route_table, addr);
-
-       return bgp_dest_from_rnode(rn);
-}
-
 static inline unsigned long bgp_table_count(const struct bgp_table *const table)
 {
        return route_table_count(table->route_table);
@@ -292,59 +270,6 @@ static inline struct bgp_dest *bgp_table_get_next(const struct bgp_table *table,
        return bgp_dest_from_rnode(route_table_get_next(table->route_table, p));
 }
 
-/*
- * bgp_table_iter_init
- */
-static inline void bgp_table_iter_init(bgp_table_iter_t *iter,
-                                      struct bgp_table *table)
-{
-       bgp_table_lock(table);
-       iter->table = table;
-       route_table_iter_init(&iter->rt_iter, table->route_table);
-}
-
-/*
- * bgp_table_iter_next
- */
-static inline struct bgp_dest *bgp_table_iter_next(bgp_table_iter_t *iter)
-{
-       return bgp_dest_from_rnode(route_table_iter_next(&iter->rt_iter));
-}
-
-/*
- * bgp_table_iter_cleanup
- */
-static inline void bgp_table_iter_cleanup(bgp_table_iter_t *iter)
-{
-       route_table_iter_cleanup(&iter->rt_iter);
-       bgp_table_unlock(iter->table);
-       iter->table = NULL;
-}
-
-/*
- * bgp_table_iter_pause
- */
-static inline void bgp_table_iter_pause(bgp_table_iter_t *iter)
-{
-       route_table_iter_pause(&iter->rt_iter);
-}
-
-/*
- * bgp_table_iter_is_done
- */
-static inline int bgp_table_iter_is_done(bgp_table_iter_t *iter)
-{
-       return route_table_iter_is_done(&iter->rt_iter);
-}
-
-/*
- * bgp_table_iter_started
- */
-static inline int bgp_table_iter_started(bgp_table_iter_t *iter)
-{
-       return route_table_iter_started(&iter->rt_iter);
-}
-
 /* This would benefit from a real atomic operation...
  * until then. */
 static inline uint64_t bgp_table_next_version(struct bgp_table *table)
index d713192d003126c277d2c56cad050a4d4b03181d..0219535b0d57b567959148d08b27ff5e691c6399 100644 (file)
@@ -254,6 +254,8 @@ static void conf_release(struct peer *src, afi_t afi, safi_t safi)
        XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->advmap.cname);
 
        XFREE(MTYPE_BGP_PEER_HOST, src->host);
+
+       ecommunity_free(&src->soo[afi][safi]);
 }
 
 static void peer2_updgrp_copy(struct update_group *updgrp, struct peer_af *paf)
@@ -442,15 +444,16 @@ static unsigned int updgrp_hash_key_make(const void *p)
 
        if (bgp_debug_neighbor_events(peer)) {
                zlog_debug(
-                       "%pBP Update Group Hash: sort: %d UpdGrpFlags: %ju UpdGrpAFFlags: %u",
+                       "%pBP Update Group Hash: sort: %d UpdGrpFlags: %ju UpdGrpAFFlags: %ju",
                        peer, peer->sort,
-                       (intmax_t)(peer->flags & PEER_UPDGRP_FLAGS),
-                       flags & PEER_UPDGRP_AF_FLAGS);
+                       (intmax_t)CHECK_FLAG(peer->flags, PEER_UPDGRP_FLAGS),
+                       (intmax_t)CHECK_FLAG(flags, PEER_UPDGRP_AF_FLAGS));
                zlog_debug(
                        "%pBP Update Group Hash: addpath: %u UpdGrpCapFlag: %u UpdGrpCapAFFlag: %u route_adv: %u change local as: %u",
                        peer, (uint32_t)peer->addpath_type[afi][safi],
-                       peer->cap & PEER_UPDGRP_CAP_FLAGS,
-                       peer->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS,
+                       CHECK_FLAG(peer->cap, PEER_UPDGRP_CAP_FLAGS),
+                       CHECK_FLAG(peer->af_cap[afi][safi],
+                                  PEER_UPDGRP_AF_CAP_FLAGS),
                        peer->v_routeadv, peer->change_local_as);
                zlog_debug(
                        "%pBP Update Group Hash: max packet size: %u pmax_out: %u Peer Group: %s rmap out: %s",
@@ -484,14 +487,14 @@ static unsigned int updgrp_hash_key_make(const void *p)
                        peer->shared_network &&
                                peer_afi_active_nego(peer, AFI_IP6));
                zlog_debug(
-                       "%pBP Update Group Hash: Lonesoul: %d ORF prefix: %u ORF old: %u max prefix out: %u",
+                       "%pBP Update Group Hash: Lonesoul: %d ORF prefix: %u ORF old: %u max prefix out: %ju",
                        peer, !!CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL),
                        CHECK_FLAG(peer->af_cap[afi][safi],
                                   PEER_CAP_ORF_PREFIX_SM_RCV),
                        CHECK_FLAG(peer->af_cap[afi][safi],
                                   PEER_CAP_ORF_PREFIX_SM_OLD_RCV),
-                       CHECK_FLAG(peer->af_flags[afi][safi],
-                                  PEER_FLAG_MAX_PREFIX_OUT));
+                       (intmax_t)CHECK_FLAG(peer->af_flags[afi][safi],
+                                            PEER_FLAG_MAX_PREFIX_OUT));
                zlog_debug("%pBP Update Group Hash key: %u", peer, key);
        }
        return key;
index 881426d9c4624e250bd21855f863a3ff05ba47f5..d84d0919f722d3f5c58d531e59a2134c0b8994df 100644 (file)
@@ -53,6 +53,7 @@
 #include "bgpd/bgp_debug.h"
 #include "bgpd/bgp_errors.h"
 #include "bgpd/bgp_fsm.h"
+#include "bgpd/bgp_nht.h"
 #include "bgpd/bgp_nexthop.h"
 #include "bgpd/bgp_network.h"
 #include "bgpd/bgp_open.h"
@@ -16507,7 +16508,7 @@ static bool peergroup_flag_check(struct peer *peer, uint64_t flag)
 }
 
 static bool peergroup_af_flag_check(struct peer *peer, afi_t afi, safi_t safi,
-                                   uint32_t flag)
+                                   uint64_t flag)
 {
        if (!peer_group_active(peer)) {
                if (CHECK_FLAG(peer->af_flags_invert[afi][safi], flag))
@@ -18124,6 +18125,84 @@ static void bgp_config_end(void)
                         bgp_post_config_delay, &t_bgp_cfg);
 }
 
+static int config_write_interface_one(struct vty *vty, struct vrf *vrf)
+{
+       int write = 0;
+       struct interface *ifp;
+       struct bgp_interface *iifp;
+
+       FOR_ALL_INTERFACES (vrf, ifp) {
+               iifp = ifp->info;
+               if (!iifp)
+                       continue;
+
+               if_vty_config_start(vty, ifp);
+
+               if (CHECK_FLAG(iifp->flags,
+                              BGP_INTERFACE_MPLS_BGP_FORWARDING)) {
+                       vty_out(vty, " mpls bgp forwarding\n");
+                       write++;
+               }
+
+               if_vty_config_end(vty);
+       }
+
+       return write;
+}
+
+/* Configuration write function for bgpd. */
+static int config_write_interface(struct vty *vty)
+{
+       int write = 0;
+       struct vrf *vrf = NULL;
+
+       /* Display all VRF aware OSPF interface configuration */
+       RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+               write += config_write_interface_one(vty, vrf);
+       }
+
+       return write;
+}
+
+DEFPY(mpls_bgp_forwarding, mpls_bgp_forwarding_cmd,
+      "[no$no] mpls bgp forwarding",
+      NO_STR MPLS_STR BGP_STR
+      "Enable MPLS forwarding for eBGP directly connected peers\n")
+{
+       bool check;
+       struct bgp_interface *iifp;
+
+       VTY_DECLVAR_CONTEXT(interface, ifp);
+       iifp = ifp->info;
+       if (!iifp) {
+               vty_out(vty, "Interface %s not available\n", ifp->name);
+               return CMD_WARNING_CONFIG_FAILED;
+       }
+       check = CHECK_FLAG(iifp->flags, BGP_INTERFACE_MPLS_BGP_FORWARDING);
+       if (check != !no) {
+               if (no)
+                       UNSET_FLAG(iifp->flags,
+                                  BGP_INTERFACE_MPLS_BGP_FORWARDING);
+               else
+                       SET_FLAG(iifp->flags,
+                                BGP_INTERFACE_MPLS_BGP_FORWARDING);
+               /* trigger a nht update on eBGP sessions */
+               if (if_is_operative(ifp))
+                       bgp_nht_ifp_up(ifp);
+       }
+       return CMD_SUCCESS;
+}
+
+/* Initialization of BGP interface. */
+static void bgp_vty_if_init(void)
+{
+       /* Install interface node. */
+       if_cmd_init(config_write_interface);
+
+       /* "mpls bgp forwarding" commands. */
+       install_element(INTERFACE_NODE, &mpls_bgp_forwarding_cmd);
+}
+
 void bgp_vty_init(void)
 {
        cmd_variable_handler_register(bgp_var_neighbor);
@@ -19581,6 +19660,8 @@ void bgp_vty_init(void)
        install_element(BGP_SRV6_NODE, &no_bgp_srv6_locator_cmd);
        install_element(BGP_IPV4_NODE, &af_sid_vpn_export_cmd);
        install_element(BGP_IPV6_NODE, &af_sid_vpn_export_cmd);
+
+       bgp_vty_if_init();
 }
 
 #include "memory.h"
index 85e25b88a4668df89d0bf17613ec0e66492d1c48..7dfb5046dd2f1dba3d9c2b297e25c9fbb3348dc3 100644 (file)
@@ -75,6 +75,8 @@ struct zclient *zclient = NULL;
 DEFINE_HOOK(bgp_vrf_status_changed, (struct bgp *bgp, struct interface *ifp),
            (bgp, ifp));
 
+DEFINE_MTYPE_STATIC(BGPD, BGP_IF_INFO, "BGP interface context");
+
 /* Can we install into zebra? */
 static inline bool bgp_install_info_to_zebra(struct bgp *bgp)
 {
@@ -3335,6 +3337,31 @@ static zclient_handler *const bgp_handlers[] = {
                bgp_zebra_process_srv6_locator_chunk,
 };
 
+static int bgp_if_new_hook(struct interface *ifp)
+{
+       struct bgp_interface *iifp;
+
+       if (ifp->info)
+               return 0;
+       iifp = XCALLOC(MTYPE_BGP_IF_INFO, sizeof(struct bgp_interface));
+       ifp->info = iifp;
+
+       return 0;
+}
+
+static int bgp_if_delete_hook(struct interface *ifp)
+{
+       XFREE(MTYPE_BGP_IF_INFO, ifp->info);
+       return 0;
+}
+
+void bgp_if_init(void)
+{
+       /* Initialize Zebra interface data structure. */
+       hook_register_prio(if_add, 0, bgp_if_new_hook);
+       hook_register_prio(if_del, 0, bgp_if_delete_hook);
+}
+
 void bgp_zebra_init(struct thread_master *master, unsigned short instance)
 {
        zclient_num_connects = 0;
index 17f46e49cc6af5b41923a6e66ab29a0c210bbaa4..0a4106941141bdeda4e600c8f13bba9d72accaa3 100644 (file)
@@ -35,6 +35,7 @@
 
 extern void bgp_zebra_init(struct thread_master *master,
                           unsigned short instance);
+extern void bgp_if_init(void);
 extern void bgp_zebra_init_tm_connect(struct bgp *bgp);
 extern uint32_t bgp_zebra_tm_get_id(void);
 extern bool bgp_zebra_tm_chunk_obtained(void);
index 9a9e287da91336cafbbeb3d3390ef1e91189d330..4c151b2d374398862e00ee88758dabfe40b943fd 100644 (file)
@@ -969,7 +969,7 @@ int peer_af_flag_check(struct peer *peer, afi_t afi, safi_t safi, uint32_t flag)
 }
 
 void peer_af_flag_inherit(struct peer *peer, afi_t afi, safi_t safi,
-                         uint32_t flag)
+                         uint64_t flag)
 {
        bool group_val;
 
@@ -1473,11 +1473,6 @@ void peer_xfer_config(struct peer *peer_dst, struct peer *peer_src)
                peer_dst->weight[afi][safi] = peer_src->weight[afi][safi];
                peer_dst->addpath_type[afi][safi] =
                        peer_src->addpath_type[afi][safi];
-               if (peer_src->soo[afi][safi]) {
-                       ecommunity_free(&peer_dst->soo[afi][safi]);
-                       peer_dst->soo[afi][safi] =
-                               ecommunity_dup(peer_src->soo[afi][safi]);
-               }
        }
 
        for (afidx = BGP_AF_START; afidx < BGP_AF_MAX; afidx++) {
@@ -2002,8 +1997,8 @@ static void peer_group2peer_config_copy_af(struct peer_group *group,
 {
        int in = FILTER_IN;
        int out = FILTER_OUT;
-       uint32_t flags_tmp;
-       uint32_t pflags_ovrd;
+       uint64_t flags_tmp;
+       uint64_t pflags_ovrd;
        uint8_t *pfilter_ovrd;
        struct peer *conf;
 
@@ -4579,7 +4574,7 @@ int peer_flag_unset(struct peer *peer, uint64_t flag)
 }
 
 static int peer_af_flag_modify(struct peer *peer, afi_t afi, safi_t safi,
-                              uint32_t flag, bool set)
+                              uint64_t flag, bool set)
 {
        int found;
        int size;
@@ -4762,12 +4757,12 @@ static int peer_af_flag_modify(struct peer *peer, afi_t afi, safi_t safi,
        return 0;
 }
 
-int peer_af_flag_set(struct peer *peer, afi_t afi, safi_t safi, uint32_t flag)
+int peer_af_flag_set(struct peer *peer, afi_t afi, safi_t safi, uint64_t flag)
 {
        return peer_af_flag_modify(peer, afi, safi, flag, 1);
 }
 
-int peer_af_flag_unset(struct peer *peer, afi_t afi, safi_t safi, uint32_t flag)
+int peer_af_flag_unset(struct peer *peer, afi_t afi, safi_t safi, uint64_t flag)
 {
        return peer_af_flag_modify(peer, afi, safi, flag, 0);
 }
index 1d04ccee429780532f6c58268f34747686ff7213..28883c9e7c71b0083a317c7678138fb2d4cd9999 100644 (file)
@@ -778,6 +778,11 @@ struct bgp {
 };
 DECLARE_QOBJ_TYPE(bgp);
 
+struct bgp_interface {
+#define BGP_INTERFACE_MPLS_BGP_FORWARDING (1 << 0)
+       uint32_t flags;
+};
+
 DECLARE_HOOK(bgp_inst_delete, (struct bgp *bgp), (bgp));
 DECLARE_HOOK(bgp_inst_config_write,
                (struct bgp *bgp, struct vty *vty),
@@ -1380,38 +1385,38 @@ struct peer {
         * specific attributes are being treated the exact same way as global
         * peer attributes.
         */
-       uint32_t af_flags_override[AFI_MAX][SAFI_MAX];
-       uint32_t af_flags_invert[AFI_MAX][SAFI_MAX];
-       uint32_t af_flags[AFI_MAX][SAFI_MAX];
-#define PEER_FLAG_SEND_COMMUNITY            (1U << 0) /* send-community */
-#define PEER_FLAG_SEND_EXT_COMMUNITY        (1U << 1) /* send-community ext. */
-#define PEER_FLAG_NEXTHOP_SELF              (1U << 2) /* next-hop-self */
-#define PEER_FLAG_REFLECTOR_CLIENT          (1U << 3) /* reflector-client */
-#define PEER_FLAG_RSERVER_CLIENT            (1U << 4) /* route-server-client */
-#define PEER_FLAG_SOFT_RECONFIG             (1U << 5) /* soft-reconfiguration */
-#define PEER_FLAG_AS_PATH_UNCHANGED         (1U << 6) /* transparent-as */
-#define PEER_FLAG_NEXTHOP_UNCHANGED         (1U << 7) /* transparent-next-hop */
-#define PEER_FLAG_MED_UNCHANGED             (1U << 8) /* transparent-next-hop */
-#define PEER_FLAG_DEFAULT_ORIGINATE         (1U << 9) /* default-originate */
-#define PEER_FLAG_REMOVE_PRIVATE_AS         (1U << 10) /* remove-private-as */
-#define PEER_FLAG_ALLOWAS_IN                (1U << 11) /* set allowas-in */
-#define PEER_FLAG_ORF_PREFIX_SM             (1U << 12) /* orf capability send-mode */
-#define PEER_FLAG_ORF_PREFIX_RM             (1U << 13) /* orf capability receive-mode */
-#define PEER_FLAG_MAX_PREFIX                (1U << 14) /* maximum prefix */
-#define PEER_FLAG_MAX_PREFIX_WARNING        (1U << 15) /* maximum prefix warning-only */
-#define PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED   (1U << 16) /* leave link-local nexthop unchanged */
-#define PEER_FLAG_FORCE_NEXTHOP_SELF        (1U << 17) /* next-hop-self force */
-#define PEER_FLAG_REMOVE_PRIVATE_AS_ALL     (1U << 18) /* remove-private-as all */
-#define PEER_FLAG_REMOVE_PRIVATE_AS_REPLACE (1U << 19) /* remove-private-as replace-as */
-#define PEER_FLAG_AS_OVERRIDE               (1U << 20) /* as-override */
-#define PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE (1U << 21) /* remove-private-as all replace-as */
-#define PEER_FLAG_WEIGHT                    (1U << 24) /* weight */
-#define PEER_FLAG_ALLOWAS_IN_ORIGIN         (1U << 25) /* allowas-in origin */
-#define PEER_FLAG_SEND_LARGE_COMMUNITY      (1U << 26) /* Send large Communities */
-#define PEER_FLAG_MAX_PREFIX_OUT            (1U << 27) /* outgoing maximum prefix */
-#define PEER_FLAG_MAX_PREFIX_FORCE          (1U << 28) /* maximum-prefix <num> force */
-#define PEER_FLAG_DISABLE_ADDPATH_RX        (1U << 29) /* disable-addpath-rx */
-#define PEER_FLAG_SOO                       (1U << 30) /* soo */
+       uint64_t af_flags_override[AFI_MAX][SAFI_MAX];
+       uint64_t af_flags_invert[AFI_MAX][SAFI_MAX];
+       uint64_t af_flags[AFI_MAX][SAFI_MAX];
+#define PEER_FLAG_SEND_COMMUNITY (1ULL << 0)
+#define PEER_FLAG_SEND_EXT_COMMUNITY (1ULL << 1)
+#define PEER_FLAG_NEXTHOP_SELF (1ULL << 2)
+#define PEER_FLAG_REFLECTOR_CLIENT (1ULL << 3)
+#define PEER_FLAG_RSERVER_CLIENT (1ULL << 4)
+#define PEER_FLAG_SOFT_RECONFIG (1ULL << 5)
+#define PEER_FLAG_AS_PATH_UNCHANGED (1ULL << 6)
+#define PEER_FLAG_NEXTHOP_UNCHANGED (1ULL << 7)
+#define PEER_FLAG_MED_UNCHANGED (1ULL << 8)
+#define PEER_FLAG_DEFAULT_ORIGINATE (1ULL << 9)
+#define PEER_FLAG_REMOVE_PRIVATE_AS (1ULL << 10)
+#define PEER_FLAG_ALLOWAS_IN (1ULL << 11)
+#define PEER_FLAG_ORF_PREFIX_SM (1ULL << 12)
+#define PEER_FLAG_ORF_PREFIX_RM (1ULL << 13)
+#define PEER_FLAG_MAX_PREFIX (1ULL << 14)
+#define PEER_FLAG_MAX_PREFIX_WARNING (1ULL << 15)
+#define PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED (1ULL << 16)
+#define PEER_FLAG_FORCE_NEXTHOP_SELF (1ULL << 17)
+#define PEER_FLAG_REMOVE_PRIVATE_AS_ALL (1ULL << 18)
+#define PEER_FLAG_REMOVE_PRIVATE_AS_REPLACE (1ULL << 19)
+#define PEER_FLAG_AS_OVERRIDE (1ULL << 20)
+#define PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE (1ULL << 21)
+#define PEER_FLAG_WEIGHT (1ULL << 24)
+#define PEER_FLAG_ALLOWAS_IN_ORIGIN (1ULL << 25)
+#define PEER_FLAG_SEND_LARGE_COMMUNITY (1ULL << 26)
+#define PEER_FLAG_MAX_PREFIX_OUT (1ULL << 27)
+#define PEER_FLAG_MAX_PREFIX_FORCE (1ULL << 28)
+#define PEER_FLAG_DISABLE_ADDPATH_RX (1ULL << 29)
+#define PEER_FLAG_SOO (1ULL << 30)
 
        enum bgp_addpath_strat addpath_type[AFI_MAX][SAFI_MAX];
 
@@ -2180,11 +2185,13 @@ extern int peer_flag_set(struct peer *peer, uint64_t flag);
 extern int peer_flag_unset(struct peer *peer, uint64_t flag);
 extern void peer_flag_inherit(struct peer *peer, uint64_t flag);
 
-extern int peer_af_flag_set(struct peer *, afi_t, safi_t, uint32_t);
-extern int peer_af_flag_unset(struct peer *, afi_t, safi_t, uint32_t);
+extern int peer_af_flag_set(struct peer *peer, afi_t afi, safi_t safi,
+                           uint64_t flag);
+extern int peer_af_flag_unset(struct peer *peer, afi_t afi, safi_t safi,
+                             uint64_t flag);
 extern int peer_af_flag_check(struct peer *, afi_t, safi_t, uint32_t);
 extern void peer_af_flag_inherit(struct peer *peer, afi_t afi, safi_t safi,
-                                uint32_t flag);
+                                uint64_t flag);
 extern void peer_change_action(struct peer *peer, afi_t afi, safi_t safi,
                               enum peer_change_type type);
 
index 9e3a095529e6f99eae6706a501b72c61345b5662..b1eeb937e180b0cc5ec607392763489c332fa893 100644 (file)
@@ -232,6 +232,7 @@ clippy_scan += \
        bgpd/bgp_bmp.c \
        bgpd/bgp_debug.c \
        bgpd/bgp_evpn_vty.c \
+       bgpd/bgp_labelpool.c \
        bgpd/bgp_route.c \
        bgpd/bgp_routemap.c \
        bgpd/bgp_rpki.c \
index 033b7536393de79216c02cf0788cfdd2a835100c..bd5767beba6c2b5f21f219d2bad3977a10a49890 100644 (file)
@@ -2720,6 +2720,25 @@ are reached using *core* MPLS labels which are distributed using LDP or BGP
 labeled unicast.  *bgpd* also supports inter-VRF route leaking.
 
 
+L3VPN over GRE interfaces
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In MPLS-VPN or SRv6-VPN, an L3VPN next-hop entry requires that the path
+chosen respectively contains a labelled path or a valid SID IPv6 address.
+Otherwise the L3VPN entry will not be installed. It is possible to ignore
+that check when the path chosen by the next-hop uses a GRE interface, and
+there is a route-map configured at inbound side of ipv4-vpn or ipv6-vpn
+address family with following syntax:
+
+.. clicmd:: set l3vpn next-hop encapsulation gre
+
+The incoming BGP L3VPN entry is accepted, provided that the next hop of the
+L3VPN entry uses a path that takes the GRE tunnel as outgoing interface. The
+remote endpoint should be configured just behind the GRE tunnel; remote
+device configuration may vary depending whether it acts at edge endpoint or
+not: in any case, the expectation is that incoming MPLS traffic received at
+this endpoint should be considered as a valid path for L3VPN.
+
 .. _bgp-vrf-route-leaking:
 
 VRF Route Leaking
@@ -2848,6 +2867,13 @@ added for all routes from the CPEs. Routes are validated and prevented from
 being sent back to the same CPE (e.g.: multi-site). This is especially needed
 when using ``as-override`` or ``allowas-in`` to prevent routing loops.
 
+.. clicmd:: mpls bgp forwarding
+
+It is possible to permit BGP install VPN prefixes without transport labels,
+by issuing the following command under the interface configuration context.
+This configuration will install VPN prefixes originated from an e-bgp session,
+and with the next-hop directly connected.
+
 .. _bgp-l3vpn-srv6:
 
 L3VPN SRv6
index 05c9eeb75585b3c88dc2e8ca44980cf248e067e1..5e222576ca3519d5eebb6d8f506855a94689cf35 100644 (file)
@@ -339,6 +339,9 @@ Route Map Set Command
    Set the color of a SR-TE Policy to be applied to a learned route. The SR-TE
    Policy is uniquely determined by the color and the BGP nexthop.
 
+.. clicmd:: set l3vpn next-hop encapsulation gre
+
+   Accept L3VPN traffic over GRE encapsulation.
 
 .. _route-map-call-command:
 
index 6b3a0afc12cd4daf490cf167f6783630c0fd2a65..13db38ce91e2d4501e87359b09b1f4e8dba1a7e4 100644 (file)
@@ -69,8 +69,8 @@ int eigrp_sock_init(struct vrf *vrf)
                        AF_INET, SOCK_RAW, IPPROTO_EIGRPIGP, vrf->vrf_id,
                        vrf->vrf_id != VRF_DEFAULT ? vrf->name : NULL);
                if (eigrp_sock < 0) {
-                       zlog_err("eigrp_read_sock_init: socket: %s",
-                                safe_strerror(errno));
+                       zlog_err("%s: socket: %s",
+                                __func__, safe_strerror(errno));
                        exit(1);
                }
 
index dd5ba8a1641ebcdd58cdddc130d1c8311737f084..1ea6f9813b11c51c7f349fc2f672153ffccd98bf 100644 (file)
@@ -727,8 +727,8 @@ static struct stream *eigrp_recv_packet(struct eigrp *eigrp,
        if ((unsigned int)ret < sizeof(*iph)) /* ret must be > 0 now */
        {
                zlog_warn(
-                       "eigrp_recv_packet: discarding runt packet of length %d (ip header size is %u)",
-                       ret, (unsigned int)sizeof(*iph));
+                       "%s: discarding runt packet of length %d (ip header size is %u)",
+                       __func__, ret, (unsigned int)sizeof(*iph));
                return NULL;
        }
 
@@ -772,8 +772,8 @@ static struct stream *eigrp_recv_packet(struct eigrp *eigrp,
 
        if (ret != ip_len) {
                zlog_warn(
-                       "eigrp_recv_packet read length mismatch: ip_len is %d, but recvmsg returned %d",
-                       ip_len, ret);
+                       "%s read length mismatch: ip_len is %d, but recvmsg returned %d",
+                       __func__, ip_len, ret);
                return NULL;
        }
 
index 7bc7be97061d2a36e0f5423ceed8010f4f309694..0ec957481351cb4571f4fc96164af16350bdf861 100644 (file)
@@ -163,7 +163,8 @@ static struct eigrp *eigrp_new(uint16_t as, vrf_id_t vrf_id)
        if (eigrp->fd < 0) {
                flog_err_sys(
                        EC_LIB_SOCKET,
-                       "eigrp_new: fatal error: eigrp_sock_init was unable to open a socket");
+                       "%s: fatal error: eigrp_sock_init was unable to open a socket",
+                       __func__);
                exit(1);
        }
 
index 178fbe790374a7a862944c1fbc06d2b65daa765f..dcc4ed6e42f52297916c1175edf28ca95bee5e38 100644 (file)
@@ -217,6 +217,11 @@ void isis_circuit_del(struct isis_circuit *circuit)
        list_delete(&circuit->ipv6_link);
        list_delete(&circuit->ipv6_non_link);
 
+       if (circuit->ext) {
+               isis_del_ext_subtlvs(circuit->ext);
+               circuit->ext = NULL;
+       }
+
        XFREE(MTYPE_TMP, circuit->bfd_config.profile);
        XFREE(MTYPE_ISIS_CIRCUIT, circuit->tag);
 
index 79b167718b8f5e042c51cbe26e1044a8d2f13fbe..dbe4a017bc91d7d5ce9f894821d9a8b4ce93ad1c 100644 (file)
@@ -54,7 +54,6 @@
 #include "isisd/isis_dr.h"
 #include "isisd/isis_zebra.h"
 
-DEFINE_MTYPE_STATIC(ISISD, ISIS_MPLS_TE,    "ISIS MPLS_TE parameters");
 DEFINE_MTYPE_STATIC(ISISD, ISIS_PLIST_NAME, "ISIS prefix-list name");
 
 /*
@@ -86,12 +85,17 @@ int isis_instance_create(struct nb_cb_create_args *args)
 int isis_instance_destroy(struct nb_cb_destroy_args *args)
 {
        struct isis_area *area;
+       struct isis *isis;
 
        if (args->event != NB_EV_APPLY)
                return NB_OK;
        area = nb_running_unset_entry(args->dnode);
-
+       isis = area->isis;
        isis_area_destroy(area);
+
+       if (listcount(isis->area_list) == 0)
+               isis_finish(isis);
+
        return NB_OK;
 }
 
@@ -1787,45 +1791,13 @@ int isis_instance_log_adjacency_changes_modify(struct nb_cb_modify_args *args)
  */
 int isis_instance_mpls_te_create(struct nb_cb_create_args *args)
 {
-       struct listnode *node;
        struct isis_area *area;
-       struct isis_circuit *circuit;
 
        if (args->event != NB_EV_APPLY)
                return NB_OK;
 
        area = nb_running_get_entry(args->dnode, NULL, true);
-       if (area->mta == NULL) {
-
-               struct mpls_te_area *new;
-
-               zlog_debug("ISIS-TE(%s): Initialize MPLS Traffic Engineering",
-                          area->area_tag);
-
-               new = XCALLOC(MTYPE_ISIS_MPLS_TE, sizeof(struct mpls_te_area));
-
-               /* Initialize MPLS_TE structure */
-               new->status = enable;
-               new->level = 0;
-               new->inter_as = off;
-               new->interas_areaid.s_addr = 0;
-               new->router_id.s_addr = 0;
-               new->ted = ls_ted_new(1, "ISIS", 0);
-               if (!new->ted)
-                       zlog_warn("Unable to create Link State Data Base");
-
-               area->mta = new;
-       } else {
-               area->mta->status = enable;
-       }
-
-       /* Initialize Link State Database */
-       if (area->mta->ted)
-               isis_te_init_ted(area);
-
-       /* Update Extended TLVs according to Interface link parameters */
-       for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit))
-               isis_link_params_update(circuit, circuit->interface);
+       isis_mpls_te_create(area);
 
        /* Reoriginate STD_TE & GMPLS circuits */
        lsp_regenerate_schedule(area, area->is_type, 0);
@@ -1835,35 +1807,16 @@ int isis_instance_mpls_te_create(struct nb_cb_create_args *args)
 
 int isis_instance_mpls_te_destroy(struct nb_cb_destroy_args *args)
 {
-       struct listnode *node;
        struct isis_area *area;
-       struct isis_circuit *circuit;
 
        if (args->event != NB_EV_APPLY)
                return NB_OK;
 
        area = nb_running_get_entry(args->dnode, NULL, true);
-       if (IS_MPLS_TE(area->mta))
-               area->mta->status = disable;
-       else
-               return NB_OK;
-
-       /* Remove Link State Database */
-       ls_ted_del_all(&area->mta->ted);
-
-       /* Flush LSP if circuit engage */
-       for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit)) {
-               if (!IS_EXT_TE(circuit->ext))
-                       continue;
-
-               /* disable MPLS_TE Circuit keeping SR one's */
-               if (IS_SUBTLV(circuit->ext, EXT_ADJ_SID))
-                       circuit->ext->status = EXT_ADJ_SID;
-               else if (IS_SUBTLV(circuit->ext, EXT_LAN_ADJ_SID))
-                       circuit->ext->status = EXT_LAN_ADJ_SID;
-               else
-                       circuit->ext->status = 0;
-       }
+       if (!IS_MPLS_TE(area->mta))
+               return NB_OK;
+
+       isis_mpls_te_disable(area);
 
        /* Reoriginate STD_TE & GMPLS circuits */
        lsp_regenerate_schedule(area, area->is_type, 0);
index 259047ff66eef8588ce42d80e8d7c21880b38a4b..f70840a637577db4aad55e19f262685972957e2c 100644 (file)
@@ -1253,6 +1253,9 @@ void isis_sr_area_term(struct isis_area *area)
        if (area->srdb.enabled)
                isis_sr_stop(area);
 
+       /* Free Adjacency SID list */
+       list_delete(&srdb->adj_sids);
+
        /* Clear Prefix-SID configuration. */
        while (srdb_prefix_cfg_count(&srdb->config.prefix_sids) > 0) {
                struct sr_prefix_cfg *pcfg;
index 3faff1cc4d75d50a97c4f5cd7bac545b5ba60a96..0093279cde362e3d8f636230c37a35f449104848 100644 (file)
 #include "isisd/isis_te.h"
 #include "isisd/isis_zebra.h"
 
+DEFINE_MTYPE_STATIC(ISISD, ISIS_MPLS_TE,    "ISIS MPLS_TE parameters");
+
 /*------------------------------------------------------------------------*
  * Following are control functions for MPLS-TE parameters management.
  *------------------------------------------------------------------------*/
 
+/**
+ * Create MPLS Traffic Engineering structure which belongs to given area.
+ *
+ * @param area IS-IS Area
+ */
+void isis_mpls_te_create(struct isis_area *area)
+{
+       struct listnode *node;
+       struct isis_circuit *circuit;
+
+       if (!area)
+               return;
+
+       if (area->mta == NULL) {
+
+               struct mpls_te_area *new;
+
+               zlog_debug("ISIS-TE(%s): Initialize MPLS Traffic Engineering",
+                          area->area_tag);
+
+               new = XCALLOC(MTYPE_ISIS_MPLS_TE, sizeof(struct mpls_te_area));
+
+               /* Initialize MPLS_TE structure */
+               new->status = enable;
+               new->level = 0;
+               new->inter_as = off;
+               new->interas_areaid.s_addr = 0;
+               new->router_id.s_addr = 0;
+               new->ted = ls_ted_new(1, "ISIS", 0);
+               if (!new->ted)
+                       zlog_warn("Unable to create Link State Data Base");
+
+               area->mta = new;
+       } else {
+               area->mta->status = enable;
+       }
+
+       /* Initialize Link State Database */
+       if (area->mta->ted)
+               isis_te_init_ted(area);
+
+       /* Update Extended TLVs according to Interface link parameters */
+       for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit))
+               isis_link_params_update(circuit, circuit->interface);
+}
+
+/**
+ * Disable MPLS Traffic Engineering structure which belongs to given area.
+ *
+ * @param area IS-IS Area
+ */
+void isis_mpls_te_disable(struct isis_area *area)
+{
+       struct listnode *node;
+       struct isis_circuit *circuit;
+
+       if (!area->mta)
+               return;
+
+       area->mta->status = disable;
+
+       /* Remove Link State Database */
+       ls_ted_del_all(&area->mta->ted);
+
+       /* Disable Extended SubTLVs on all circuit */
+       for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit)) {
+               if (!IS_EXT_TE(circuit->ext))
+                       continue;
+
+               /* disable MPLS_TE Circuit keeping SR one's */
+               if (IS_SUBTLV(circuit->ext, EXT_ADJ_SID))
+                       circuit->ext->status = EXT_ADJ_SID;
+               else if (IS_SUBTLV(circuit->ext, EXT_LAN_ADJ_SID))
+                       circuit->ext->status = EXT_LAN_ADJ_SID;
+               else
+                       circuit->ext->status = 0;
+       }
+}
+
+void isis_mpls_te_term(struct isis_area *area)
+{
+       struct listnode *node;
+       struct isis_circuit *circuit;
+
+       if (!area->mta)
+               return;
+
+       zlog_info("TE(%s): Terminate MPLS TE", __func__);
+       /* Remove Link State Database */
+       ls_ted_del_all(&area->mta->ted);
+
+       /* Remove Extended SubTLVs */
+       zlog_info(" |- Remove Extended SubTLVS for all circuit");
+       for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit)) {
+               zlog_info("   |- Call isis_del_ext_subtlvs()");
+               isis_del_ext_subtlvs(circuit->ext);
+               circuit->ext = NULL;
+       }
+
+       zlog_info(" |- Free MTA structure at %p", area->mta);
+       XFREE(MTYPE_ISIS_MPLS_TE, area->mta);
+}
+
 /* Main initialization / update function of the MPLS TE Circuit context */
 /* Call when interface TE Link parameters are modified */
 void isis_link_params_update(struct isis_circuit *circuit,
index 56954073dda708a769058c637b04f00e139ef5bc..03525962f52a7a56f6d5c1dac2c297cfd56c768f 100644 (file)
@@ -123,6 +123,9 @@ enum lsp_event { LSP_UNKNOWN, LSP_ADD, LSP_UPD, LSP_DEL, LSP_INC, LSP_TICK };
 
 /* Prototypes. */
 void isis_mpls_te_init(void);
+void isis_mpls_te_create(struct isis_area *area);
+void isis_mpls_te_disable(struct isis_area *area);
+void isis_mpls_te_term(struct isis_area *area);
 void isis_link_params_update(struct isis_circuit *, struct interface *);
 int isis_mpls_te_update(struct interface *);
 void isis_te_lsp_event(struct isis_lsp *lsp, enum lsp_event event);
index b3c3fd4b0b4db058ad8ae750e3766235f795ae96..8907fa256b288b7a3686e64a9090913dede978ce 100644 (file)
@@ -139,6 +139,25 @@ struct isis_ext_subtlvs *isis_alloc_ext_subtlvs(void)
        return ext;
 }
 
+void isis_del_ext_subtlvs(struct isis_ext_subtlvs *ext)
+{
+       struct isis_item *item, *next_item;
+
+       if (!ext)
+               return;
+
+       /* First, free Adj SID and LAN Adj SID list if needed */
+       for (item = ext->adj_sid.head; item; item = next_item) {
+               next_item = item->next;
+               XFREE(MTYPE_ISIS_SUBTLV, item);
+       }
+       for (item = ext->lan_sid.head; item; item = next_item) {
+               next_item = item->next;
+               XFREE(MTYPE_ISIS_SUBTLV, item);
+       }
+       XFREE(MTYPE_ISIS_SUBTLV, ext);
+}
+
 /*
  * mtid parameter is used to determine if Adjacency is related to IPv4 or IPv6
  * Multi-Topology. Special 4096 value i.e. first R flag set is used to indicate
@@ -648,18 +667,7 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts,
 
 static void free_item_ext_subtlvs(struct  isis_ext_subtlvs *exts)
 {
-       struct isis_item *item, *next_item;
-
-       /* First, free Adj SID and LAN Adj SID list if needed */
-       for (item = exts->adj_sid.head; item; item = next_item) {
-               next_item = item->next;
-               XFREE(MTYPE_ISIS_SUBTLV, item);
-       }
-       for (item = exts->lan_sid.head; item; item = next_item) {
-               next_item = item->next;
-               XFREE(MTYPE_ISIS_SUBTLV, item);
-       }
-       XFREE(MTYPE_ISIS_SUBTLV, exts);
+       isis_del_ext_subtlvs(exts);
 }
 
 static int pack_item_ext_subtlvs(struct isis_ext_subtlvs *exts,
@@ -1059,6 +1067,7 @@ static int unpack_item_ext_subtlvs(uint16_t mtid, uint8_t len, struct stream *s,
                                                log, indent,
                                                "TLV size does not match expected size for Adjacency SID!\n");
                                        stream_forward_getp(s, subtlv_len - 2);
+                                       XFREE(MTYPE_ISIS_SUBTLV, adj);
                                        break;
                                }
 
@@ -1070,6 +1079,7 @@ static int unpack_item_ext_subtlvs(uint16_t mtid, uint8_t len, struct stream *s,
                                                log, indent,
                                                "TLV size does not match expected size for Adjacency SID!\n");
                                        stream_forward_getp(s, subtlv_len - 2);
+                                       XFREE(MTYPE_ISIS_SUBTLV, adj);
                                        break;
                                }
 
@@ -1114,6 +1124,7 @@ static int unpack_item_ext_subtlvs(uint16_t mtid, uint8_t len, struct stream *s,
                                        stream_forward_getp(
                                                s, subtlv_len - 2
                                                           - ISIS_SYS_ID_LEN);
+                                       XFREE(MTYPE_ISIS_SUBTLV, lan);
                                        break;
                                }
 
@@ -1127,6 +1138,7 @@ static int unpack_item_ext_subtlvs(uint16_t mtid, uint8_t len, struct stream *s,
                                        stream_forward_getp(
                                                s, subtlv_len - 2
                                                           - ISIS_SYS_ID_LEN);
+                                       XFREE(MTYPE_ISIS_SUBTLV, lan);
                                        break;
                                }
 
@@ -1892,6 +1904,7 @@ static void format_item_extended_reach(uint16_t mtid, struct isis_item *i,
 static void free_item_extended_reach(struct isis_item *i)
 {
        struct isis_extended_reach *item = (struct isis_extended_reach *)i;
+
        if (item->subtlvs != NULL)
                free_item_ext_subtlvs(item->subtlvs);
        XFREE(MTYPE_ISIS_TLV, item);
index 157450dc6c1804cd81336b31f0d1ace41f6b2466..905032bda1aff3912d97b66067df0b69973231de 100644 (file)
@@ -597,6 +597,7 @@ void isis_tlvs_add_ipv6_dstsrc_reach(struct isis_tlvs *tlvs, uint16_t mtid,
                                     struct prefix_ipv6 *src,
                                     uint32_t metric);
 struct isis_ext_subtlvs *isis_alloc_ext_subtlvs(void);
+void isis_del_ext_subtlvs(struct isis_ext_subtlvs *ext);
 void isis_tlvs_add_adj_sid(struct isis_ext_subtlvs *exts,
                           struct isis_adj_sid *adj);
 void isis_tlvs_del_adj_sid(struct isis_ext_subtlvs *exts,
index 3fd2476ad126b0131a9f2495bdfb39cffde0126f..bce3dbb77b4673e7612e767611c7c776d834fcbf 100644 (file)
@@ -224,6 +224,12 @@ struct isis *isis_new(const char *vrf_name)
 
 void isis_finish(struct isis *isis)
 {
+       struct isis_area *area;
+       struct listnode *node, *nnode;
+
+       for (ALL_LIST_ELEMENTS(isis->area_list, node, nnode, area))
+               isis_area_destroy(area);
+
        struct vrf *vrf = NULL;
 
        listnode_delete(im->isis, isis);
@@ -273,6 +279,13 @@ void isis_area_del_circuit(struct isis_area *area, struct isis_circuit *circuit)
        isis_csm_state_change(ISIS_DISABLE, circuit, area);
 }
 
+static void delete_area_addr(void *arg)
+{
+       struct area_addr *addr = (struct area_addr *)arg;
+
+       XFREE(MTYPE_ISIS_AREA_ADDR, addr);
+}
+
 struct isis_area *isis_area_create(const char *area_tag, const char *vrf_name)
 {
        struct isis_area *area;
@@ -318,6 +331,8 @@ struct isis_area *isis_area_create(const char *area_tag, const char *vrf_name)
        area->circuit_list = list_new();
        area->adjacency_list = list_new();
        area->area_addrs = list_new();
+       area->area_addrs->del = delete_area_addr;
+
        if (!CHECK_FLAG(im->options, F_ISIS_UNIT_TEST))
                thread_add_timer(master, lsp_tick, area, 1, &area->t_tick);
        flags_initialize(&area->flags);
@@ -481,17 +496,12 @@ void isis_area_destroy(struct isis_area *area)
 {
        struct listnode *node, *nnode;
        struct isis_circuit *circuit;
-       struct area_addr *addr;
 
        QOBJ_UNREG(area);
 
        if (fabricd)
                fabricd_finish(area->fabricd);
 
-       /* Disable MPLS if necessary before flooding LSP */
-       if (IS_MPLS_TE(area->mta))
-               area->mta->status = disable;
-
        if (area->circuit_list) {
                for (ALL_LIST_ELEMENTS(area->circuit_list, node, nnode,
                                       circuit))
@@ -499,6 +509,9 @@ void isis_area_destroy(struct isis_area *area)
 
                list_delete(&area->circuit_list);
        }
+       if (area->flags.free_idcs)
+               list_delete(&area->flags.free_idcs);
+
        list_delete(&area->adjacency_list);
 
        lsp_db_fini(&area->lspdb[0]);
@@ -510,6 +523,8 @@ void isis_area_destroy(struct isis_area *area)
 
        isis_sr_area_term(area);
 
+       isis_mpls_te_term(area);
+
        spftree_area_del(area);
 
        if (area->spf_timer[0])
@@ -525,11 +540,7 @@ void isis_area_destroy(struct isis_area *area)
        if (!CHECK_FLAG(im->options, F_ISIS_UNIT_TEST))
                isis_redist_area_finish(area);
 
-       for (ALL_LIST_ELEMENTS(area->area_addrs, node, nnode, addr)) {
-               list_delete_node(area->area_addrs, node);
-               XFREE(MTYPE_ISIS_AREA_ADDR, addr);
-       }
-       area->area_addrs = NULL;
+       list_delete(&area->area_addrs);
 
        for (int i = SPF_PREFIX_PRIO_CRITICAL; i <= SPF_PREFIX_PRIO_MEDIUM;
             i++) {
@@ -554,10 +565,6 @@ void isis_area_destroy(struct isis_area *area)
 
        area_mt_finish(area);
 
-       if (listcount(area->isis->area_list) == 0) {
-               isis_finish(area->isis);
-       }
-
        XFREE(MTYPE_ISIS_AREA, area);
 
 }
index a3f361ed9de66babb90724cdb2de8f761ce70b7f..9af4053dafb987c7e0f7fb0e8859df46b7b6a201 100644 (file)
@@ -158,6 +158,75 @@ DECLARE_MTYPE(BITFIELD);
                (b) += (w * WORD_SIZE);                                        \
        } while (0)
 
+/*
+ * Find a clear bit in v and return it
+ * Start looking in the word containing bit position start_index.
+ * If necessary, wrap around after bit position max_index.
+ */
+static inline unsigned int
+bf_find_next_clear_bit_wrap(bitfield_t *v, word_t start_index, word_t max_index)
+{
+       int start_bit;
+       unsigned long i, offset, scanbits, wordcount_max, index_max;
+
+       if (start_index > max_index)
+               start_index = 0;
+
+       start_bit = start_index & (WORD_SIZE - 1);
+       wordcount_max = bf_index(max_index) + 1;
+
+       scanbits = WORD_SIZE;
+       for (i = bf_index(start_index); i < v->m; ++i) {
+               if (v->data[i] == WORD_MAX) {
+                       /* if the whole word is full move to the next */
+                       start_bit = 0;
+                       continue;
+               }
+               /* scan one word for clear bits */
+               if ((i == v->m - 1) && (v->m >= wordcount_max))
+                       /* max index could be only part of word */
+                       scanbits = (max_index % WORD_SIZE) + 1;
+               for (offset = start_bit; offset < scanbits; ++offset) {
+                       if (!((v->data[i] >> offset) & 1))
+                               return ((i * WORD_SIZE) + offset);
+               }
+               /* move to the next word */
+               start_bit = 0;
+       }
+
+       if (v->m < wordcount_max) {
+               /*
+                * We can expand bitfield, so no need to wrap.
+                * Return the index of the first bit of the next word.
+                * Assumption is that caller will call bf_set_bit which
+                * will allocate additional space.
+                */
+               v->m += 1;
+               v->data = (word_t *)realloc(v->data, v->m * sizeof(word_t));
+               v->data[v->m - 1] = 0;
+               return v->m * WORD_SIZE;
+       }
+
+       /*
+        * start looking for a clear bit at the start of the bitfield and
+        * stop when we reach start_index
+        */
+       scanbits = WORD_SIZE;
+       index_max = bf_index(start_index - 1);
+       for (i = 0; i <= index_max; ++i) {
+               if (i == index_max)
+                       scanbits = ((start_index - 1) % WORD_SIZE) + 1;
+               for (offset = start_bit; offset < scanbits; ++offset) {
+                       if (!((v->data[i] >> offset) & 1))
+                               return ((i * WORD_SIZE) + offset);
+               }
+               /* move to the next word */
+               start_bit = 0;
+       }
+
+       return WORD_MAX;
+}
+
 static inline unsigned int bf_find_next_set_bit(bitfield_t v,
                word_t start_index)
 {
index 9529b79419407e09010aa28be7d957a56312fbcc..e6310465e333484752ef7970347cdcc8fdfbef3a 100644 (file)
@@ -812,17 +812,13 @@ int route_map_mark_updated(const char *name)
        return (ret);
 }
 
-static int route_map_clear_updated(struct route_map *map)
+static void route_map_clear_updated(struct route_map *map)
 {
-       int ret = -1;
-
        if (map) {
                map->to_be_processed = false;
                if (map->deleted)
                        route_map_free_map(map);
        }
-
-       return (ret);
 }
 
 /* Lookup route map.  If there isn't route map create one and return
index 1a8a94e375b7ab31ae58a15e07ef49045409d096..a3659258545f2886c1929316b73ea89d164b333a 100644 (file)
@@ -389,6 +389,8 @@ DECLARE_QOBJ_TYPE(route_map);
        (strmatch(A, "frr-bgp-route-map:set-evpn-gateway-ip-ipv4"))
 #define IS_SET_BGP_EVPN_GATEWAY_IP_IPV6(A)                                     \
        (strmatch(A, "frr-bgp-route-map:set-evpn-gateway-ip-ipv6"))
+#define IS_SET_BGP_L3VPN_NEXTHOP_ENCAPSULATION(A)                              \
+       (strmatch(A, "frr-bgp-route-map:set-l3vpn-nexthop-encapsulation"))
 
 enum ecommunity_lb_type {
        EXPLICIT_BANDWIDTH,
index 59253942ad381a98dbca1588e4c51a138c7a8d46..6be5d15ec4f80aef36d935ca1567fc4b6057213d 100644 (file)
@@ -1263,6 +1263,11 @@ void route_map_action_show(struct vty *vty, const struct lyd_node *dnode,
                        yang_dnode_get_string(
                                dnode,
                                "./rmap-set-action/frr-bgp-route-map:evpn-gateway-ip-ipv6"));
+       } else if (IS_SET_BGP_L3VPN_NEXTHOP_ENCAPSULATION(action)) {
+               vty_out(vty, " set l3vpn next-hop encapsulation %s\n",
+                       yang_dnode_get_string(
+                               dnode,
+                               "./rmap-set-action/frr-bgp-route-map:l3vpn-nexthop-encapsulation"));
        }
 }
 
index e0bb34a258547ccbe1a9ba0e08e04ece0c0937dc..6a36a0b123197b7c86feeb52fb355718d8b6e68b 100644 (file)
@@ -991,16 +991,14 @@ void zlog_init(const char *progname, const char *protoname,
        zlog_instance = instance;
 
        if (instance) {
-               snprintfrr(zlog_tmpdir, sizeof(zlog_tmpdir),
-                          "/var/tmp/frr/%s-%d.%ld",
-                          progname, instance, (long)getpid());
+               snprintfrr(zlog_tmpdir, sizeof(zlog_tmpdir), "%s/%s-%d.%ld",
+                          TMPBASEDIR, progname, instance, (long)getpid());
 
                zlog_prefixsz = snprintfrr(zlog_prefix, sizeof(zlog_prefix),
                                           "%s[%d]: ", protoname, instance);
        } else {
-               snprintfrr(zlog_tmpdir, sizeof(zlog_tmpdir),
-                          "/var/tmp/frr/%s.%ld",
-                          progname, (long)getpid());
+               snprintfrr(zlog_tmpdir, sizeof(zlog_tmpdir), "%s/%s.%ld",
+                          TMPBASEDIR, progname, (long)getpid());
 
                zlog_prefixsz = snprintfrr(zlog_prefix, sizeof(zlog_prefix),
                                           "%s: ", protoname);
index 1362554715abfd11bb78c12985d42772023f723f..0df0072f6d7cc548b55ce1bd5cdede25fde644f0 100644 (file)
@@ -3021,7 +3021,7 @@ int ospf_check_nbr_status(struct ospf *ospf)
 }
 
 
-static void ospf_maxage_lsa_remover(struct thread *thread)
+void ospf_maxage_lsa_remover(struct thread *thread)
 {
        struct ospf *ospf = THREAD_ARG(thread);
        struct ospf_lsa *lsa, *old;
@@ -3899,8 +3899,9 @@ void ospf_refresher_register_lsa(struct ospf *ospf, struct ospf_lsa *lsa)
        if (lsa->refresh_list < 0) {
                int delay;
                int min_delay =
-                       OSPF_LS_REFRESH_TIME - (2 * OSPF_LS_REFRESH_JITTER);
-               int max_delay = OSPF_LS_REFRESH_TIME - OSPF_LS_REFRESH_JITTER;
+                       ospf->lsa_refresh_timer - (2 * OSPF_LS_REFRESH_JITTER);
+               int max_delay =
+                       ospf->lsa_refresh_timer - OSPF_LS_REFRESH_JITTER;
 
                /* We want to refresh the LSA within OSPF_LS_REFRESH_TIME which
                 * is
index 4b3be15382f40e851f8024097e2fcaeeed208172..97c15d1e3c99c00457b3c017377e4ba6c441a5a6 100644 (file)
@@ -354,4 +354,5 @@ extern void ospf_check_and_gen_init_seq_lsa(struct ospf_interface *oi,
                                            struct ospf_lsa *lsa);
 extern void ospf_flush_lsa_from_area(struct ospf *ospf, struct in_addr area_id,
                                     int type);
+extern void ospf_maxage_lsa_remover(struct thread *thread);
 #endif /* _ZEBRA_OSPF_LSA_H */
index 46492ff6b06145ad29cf3f9929e8990da4f11fdf..4edc1de8119e719ee6b74ff1d18333a05ef716ae 100644 (file)
 #include "ospfd/ospf_sr.h"
 #include "ospfd/ospf_ti_lfa.h"
 #include "ospfd/ospf_errors.h"
+
+#ifdef SUPPORT_OSPF_API
 #include "ospfd/ospf_apiserver.h"
+#endif
 
 /* Variables to ensure a SPF scheduled log message is printed only once */
 
@@ -1895,7 +1898,9 @@ static void ospf_spf_calculate_schedule_worker(struct thread *thread)
        /* Update all routers routing table */
        ospf->oall_rtrs = ospf->all_rtrs;
        ospf->all_rtrs = all_rtrs;
+#ifdef SUPPORT_OSPF_API
        ospf_apiserver_notify_reachable(ospf->oall_rtrs, ospf->all_rtrs);
+#endif
 
        /* Free old ABR/ASBR routing table */
        if (ospf->old_rtrs)
index 7d72487686a40d0213068456a38d9ae81a5eed55..2a0016ea1957ee9b41d24ca7e0deff2581df2a68 100644 (file)
@@ -12952,6 +12952,42 @@ DEFUN (clear_ip_ospf_interface,
        return CMD_SUCCESS;
 }
 
+DEFPY_HIDDEN(ospf_lsa_refresh_timer, ospf_lsa_refresh_timer_cmd,
+            "[no$no] ospf lsa-refresh [(120-1800)]$value",
+            NO_STR OSPF_STR
+            "OSPF lsa refresh timer\n"
+            "timer value in seconds\n")
+{
+       VTY_DECLVAR_INSTANCE_CONTEXT(ospf, ospf)
+
+       if (no)
+               ospf->lsa_refresh_timer = OSPF_LS_REFRESH_TIME;
+       else
+               ospf->lsa_refresh_timer = value;
+
+       return CMD_SUCCESS;
+}
+
+DEFPY_HIDDEN(ospf_maxage_delay_timer, ospf_maxage_delay_timer_cmd,
+            "[no$no] ospf maxage-delay [(0-60)]$value",
+            NO_STR OSPF_STR
+            "OSPF lsa maxage delay timer\n"
+            "timer value in seconds\n")
+{
+       VTY_DECLVAR_INSTANCE_CONTEXT(ospf, ospf)
+
+       if (no)
+               ospf->maxage_delay = OSPF_LSA_MAXAGE_REMOVE_DELAY_DEFAULT;
+       else
+               ospf->maxage_delay = value;
+
+       THREAD_OFF(ospf->t_maxage);
+       OSPF_TIMER_ON(ospf->t_maxage, ospf_maxage_lsa_remover,
+                     ospf->maxage_delay);
+
+       return CMD_SUCCESS;
+}
+
 void ospf_vty_clear_init(void)
 {
        install_element(ENABLE_NODE, &clear_ip_ospf_interface_cmd);
@@ -13109,6 +13145,9 @@ void ospf_vty_init(void)
 
        vrf_cmd_init(NULL);
 
+       install_element(OSPF_NODE, &ospf_lsa_refresh_timer_cmd);
+       install_element(OSPF_NODE, &ospf_maxage_delay_timer_cmd);
+
        /* Init interface related vty commands. */
        ospf_vty_if_init();
 
index 8512b6a3399cdc611a168d351407d0e9401b4484..e0c36d86fea0b9d497566c9952140cd7dd4514b4 100644 (file)
@@ -392,6 +392,7 @@ struct ospf *ospf_new_alloc(unsigned short instance, const char *name)
 
        new->lsa_refresh_queue.index = 0;
        new->lsa_refresh_interval = OSPF_LSA_REFRESH_INTERVAL_DEFAULT;
+       new->lsa_refresh_timer = OSPF_LS_REFRESH_TIME;
        new->t_lsa_refresher = NULL;
        thread_add_timer(master, ospf_lsa_refresh_walker, new,
                         new->lsa_refresh_interval, &new->t_lsa_refresher);
index 8478c96ddcd63ef4d4c413e0601c15eb550b36f5..3a43010f85f6faff7721114fcb2de3670a77b50a 100644 (file)
@@ -313,6 +313,7 @@ struct ospf {
        time_t lsa_refresher_started;
 #define OSPF_LSA_REFRESH_INTERVAL_DEFAULT 10
        uint16_t lsa_refresh_interval;
+       uint16_t lsa_refresh_timer;
 
        /* Distance parameter. */
        uint8_t distance_all;
index 4b10c4c9c1fc9fd38cdab6763be32b137a4fb91b..38fa1966cd71a5de77acb262d03321e088e17971 100644 (file)
@@ -401,7 +401,7 @@ static void gm_sg_update(struct gm_sg *sg, bool has_expired)
                desired = GM_SG_NOINFO;
 
        if (desired != sg->state && !gm_ifp->stopping) {
-               if (PIM_DEBUG_IGMP_EVENTS)
+               if (PIM_DEBUG_GM_EVENTS)
                        zlog_debug(log_sg(sg, "%s => %s"), gm_states[sg->state],
                                   gm_states[desired]);
 
@@ -817,7 +817,7 @@ static void gm_handle_v2_report(struct gm_if *gm_ifp,
        struct gm_packet_state *pkt;
 
        if (len < sizeof(*hdr)) {
-               if (PIM_DEBUG_IGMP_PACKETS)
+               if (PIM_DEBUG_GM_PACKETS)
                        zlog_debug(log_pkt_src(
                                "malformed MLDv2 report (truncated header)"));
                gm_ifp->stats.rx_drop_malformed++;
@@ -923,7 +923,7 @@ static void gm_handle_v1_report(struct gm_if *gm_ifp,
        size_t max_entries;
 
        if (len < sizeof(*hdr)) {
-               if (PIM_DEBUG_IGMP_PACKETS)
+               if (PIM_DEBUG_GM_PACKETS)
                        zlog_debug(log_pkt_src(
                                "malformed MLDv1 report (truncated)"));
                gm_ifp->stats.rx_drop_malformed++;
@@ -989,7 +989,7 @@ static void gm_handle_v1_leave(struct gm_if *gm_ifp,
        struct gm_packet_sg *old_grp;
 
        if (len < sizeof(*hdr)) {
-               if (PIM_DEBUG_IGMP_PACKETS)
+               if (PIM_DEBUG_GM_PACKETS)
                        zlog_debug(log_pkt_src(
                                "malformed MLDv1 leave (truncated)"));
                gm_ifp->stats.rx_drop_malformed++;
@@ -1049,7 +1049,7 @@ static void gm_t_expire(struct thread *t)
 
                remain_ms = monotime_until(&pend->expiry, &remain);
                if (remain_ms > 0) {
-                       if (PIM_DEBUG_IGMP_EVENTS)
+                       if (PIM_DEBUG_GM_EVENTS)
                                zlog_debug(
                                        log_ifp("next general expiry in %" PRId64 "ms"),
                                        remain_ms / 1000);
@@ -1063,7 +1063,7 @@ static void gm_t_expire(struct thread *t)
                        if (timercmp(&pkt->received, &pend->query, >=))
                                break;
 
-                       if (PIM_DEBUG_IGMP_PACKETS)
+                       if (PIM_DEBUG_GM_PACKETS)
                                zlog_debug(log_ifp("expire packet %p"), pkt);
                        gm_packet_drop(pkt, true);
                }
@@ -1073,7 +1073,7 @@ static void gm_t_expire(struct thread *t)
                        gm_ifp->n_pending * sizeof(gm_ifp->pending[0]));
        }
 
-       if (PIM_DEBUG_IGMP_EVENTS)
+       if (PIM_DEBUG_GM_EVENTS)
                zlog_debug(log_ifp("next general expiry waiting for query"));
 }
 
@@ -1250,7 +1250,7 @@ static void gm_t_grp_expire(struct thread *t)
        struct gm_if *gm_ifp = pend->iface;
        struct gm_sg *sg, *sg_start, sg_ref = {};
 
-       if (PIM_DEBUG_IGMP_EVENTS)
+       if (PIM_DEBUG_GM_EVENTS)
                zlog_debug(log_ifp("*,%pPAs S,G timer expired"), &pend->grp);
 
        /* gteq lookup - try to find *,G or S,G  (S,G is > *,G)
@@ -1442,7 +1442,7 @@ static void gm_handle_query(struct gm_if *gm_ifp,
        }
 
        if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &gm_ifp->querier) < 0) {
-               if (PIM_DEBUG_IGMP_EVENTS)
+               if (PIM_DEBUG_GM_EVENTS)
                        zlog_debug(
                                log_pkt_src("replacing elected querier %pPA"),
                                &gm_ifp->querier);
@@ -1802,7 +1802,7 @@ static void gm_send_query(struct gm_if *gm_ifp, pim_addr grp,
 
        query.hdr.icmp6_cksum = in_cksumv(iov, iov_len);
 
-       if (PIM_DEBUG_IGMP_PACKETS)
+       if (PIM_DEBUG_GM_PACKETS)
                zlog_debug(
                        log_ifp("MLD query %pPA -> %pI6 (grp=%pPA, %zu srcs)"),
                        &pim_ifp->ll_lowest, &dstaddr.sin6_addr, &grp, n_srcs);
@@ -2137,7 +2137,7 @@ void gm_ifp_teardown(struct interface *ifp)
 
        gm_ifp = pim_ifp->mld;
        gm_ifp->stopping = true;
-       if (PIM_DEBUG_IGMP_EVENTS)
+       if (PIM_DEBUG_GM_EVENTS)
                zlog_debug(log_ifp("MLD stop"));
 
        THREAD_OFF(gm_ifp->t_query);
@@ -2423,6 +2423,7 @@ static void gm_show_if_one(struct vty *vty, struct interface *ifp,
        querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
 
        if (js_if) {
+               json_object_string_add(js_if, "name", ifp->name);
                json_object_string_add(js_if, "state", "up");
                json_object_string_addf(js_if, "version", "%d",
                                        gm_ifp->cur_version);
@@ -2438,6 +2439,14 @@ static void gm_show_if_one(struct vty *vty, struct interface *ifp,
                        json_object_string_addf(js_if, "otherQuerierTimer",
                                                "%pTH",
                                                gm_ifp->t_other_querier);
+               json_object_int_add(js_if, "timerRobustnessValue",
+                                   gm_ifp->cur_qrv);
+               json_object_int_add(js_if, "timerQueryIntervalMsec",
+                                   gm_ifp->cur_query_intv);
+               json_object_int_add(js_if, "timerQueryResponseTimerMsec",
+                                   gm_ifp->cur_max_resp);
+               json_object_int_add(js_if, "timerLastMemberQueryIntervalMsec",
+                                   gm_ifp->cur_query_intv_trig);
        } else {
                vty_out(vty, "%-16s  %-5s  %d  %-25pPA  %-5s %11pTH  %pTVMs\n",
                        ifp->name, "up", gm_ifp->cur_version, &gm_ifp->querier,
index f0b6037db91ff691b21286364803764d68ce2da1..c2f7396c180e75fa392616cc26ca17918d414bf3 100644 (file)
@@ -4358,9 +4358,9 @@ DEFUN (debug_igmp,
        DEBUG_STR
        DEBUG_IGMP_STR)
 {
-       PIM_DO_DEBUG_IGMP_EVENTS;
-       PIM_DO_DEBUG_IGMP_PACKETS;
-       PIM_DO_DEBUG_IGMP_TRACE;
+       PIM_DO_DEBUG_GM_EVENTS;
+       PIM_DO_DEBUG_GM_PACKETS;
+       PIM_DO_DEBUG_GM_TRACE;
        return CMD_SUCCESS;
 }
 
@@ -4371,9 +4371,9 @@ DEFUN (no_debug_igmp,
        DEBUG_STR
        DEBUG_IGMP_STR)
 {
-       PIM_DONT_DEBUG_IGMP_EVENTS;
-       PIM_DONT_DEBUG_IGMP_PACKETS;
-       PIM_DONT_DEBUG_IGMP_TRACE;
+       PIM_DONT_DEBUG_GM_EVENTS;
+       PIM_DONT_DEBUG_GM_PACKETS;
+       PIM_DONT_DEBUG_GM_TRACE;
        return CMD_SUCCESS;
 }
 
@@ -4385,7 +4385,7 @@ DEFUN (debug_igmp_events,
        DEBUG_IGMP_STR
        DEBUG_IGMP_EVENTS_STR)
 {
-       PIM_DO_DEBUG_IGMP_EVENTS;
+       PIM_DO_DEBUG_GM_EVENTS;
        return CMD_SUCCESS;
 }
 
@@ -4397,7 +4397,7 @@ DEFUN (no_debug_igmp_events,
        DEBUG_IGMP_STR
        DEBUG_IGMP_EVENTS_STR)
 {
-       PIM_DONT_DEBUG_IGMP_EVENTS;
+       PIM_DONT_DEBUG_GM_EVENTS;
        return CMD_SUCCESS;
 }
 
@@ -4409,7 +4409,7 @@ DEFUN (debug_igmp_packets,
        DEBUG_IGMP_STR
        DEBUG_IGMP_PACKETS_STR)
 {
-       PIM_DO_DEBUG_IGMP_PACKETS;
+       PIM_DO_DEBUG_GM_PACKETS;
        return CMD_SUCCESS;
 }
 
@@ -4421,7 +4421,7 @@ DEFUN (no_debug_igmp_packets,
        DEBUG_IGMP_STR
        DEBUG_IGMP_PACKETS_STR)
 {
-       PIM_DONT_DEBUG_IGMP_PACKETS;
+       PIM_DONT_DEBUG_GM_PACKETS;
        return CMD_SUCCESS;
 }
 
@@ -4433,7 +4433,7 @@ DEFUN (debug_igmp_trace,
        DEBUG_IGMP_STR
        DEBUG_IGMP_TRACE_STR)
 {
-       PIM_DO_DEBUG_IGMP_TRACE;
+       PIM_DO_DEBUG_GM_TRACE;
        return CMD_SUCCESS;
 }
 
@@ -4445,7 +4445,7 @@ DEFUN (no_debug_igmp_trace,
        DEBUG_IGMP_STR
        DEBUG_IGMP_TRACE_STR)
 {
-       PIM_DONT_DEBUG_IGMP_TRACE;
+       PIM_DONT_DEBUG_GM_TRACE;
        return CMD_SUCCESS;
 }
 
@@ -4458,7 +4458,7 @@ DEFUN (debug_igmp_trace_detail,
        DEBUG_IGMP_TRACE_STR
        "detailed\n")
 {
-       PIM_DO_DEBUG_IGMP_TRACE_DETAIL;
+       PIM_DO_DEBUG_GM_TRACE_DETAIL;
        return CMD_SUCCESS;
 }
 
@@ -4471,7 +4471,7 @@ DEFUN (no_debug_igmp_trace_detail,
        DEBUG_IGMP_TRACE_STR
        "detailed\n")
 {
-       PIM_DONT_DEBUG_IGMP_TRACE_DETAIL;
+       PIM_DONT_DEBUG_GM_TRACE_DETAIL;
        return CMD_SUCCESS;
 }
 
index e03e5a263074da692e280217c19fc91e51780187..40c4c2306d29b78955fdda863371ae9951c97419 100644 (file)
@@ -1345,7 +1345,7 @@ ferr_r pim_if_igmp_join_add(struct interface *ifp, struct in_addr group_addr,
 
        (void)igmp_join_new(ifp, group_addr, source_addr);
 
-       if (PIM_DEBUG_IGMP_EVENTS) {
+       if (PIM_DEBUG_GM_EVENTS) {
                char group_str[INET_ADDRSTRLEN];
                char source_str[INET_ADDRSTRLEN];
                pim_inet4_dump("<grp?>", group_addr, group_str,
index fdc56fd3f36aa28fe13a4af03ad92d5eee7e8e46..08ac0cb5c2f570ece28434d9cec93a41e2283c79 100644 (file)
@@ -501,14 +501,14 @@ static int igmp_recv_query(struct gm_sock *igmp, int query_version,
        }
 
        if (!pim_if_connected_to_source(ifp, from)) {
-               if (PIM_DEBUG_IGMP_PACKETS)
+               if (PIM_DEBUG_GM_PACKETS)
                        zlog_debug("Recv IGMP query on interface: %s from a non-connected source: %s",
                                   ifp->name, from_str);
                return 0;
        }
 
        if (if_address_is_local(&from, AF_INET, ifp->vrf->vrf_id)) {
-               if (PIM_DEBUG_IGMP_PACKETS)
+               if (PIM_DEBUG_GM_PACKETS)
                        zlog_debug("Recv IGMP query on interface: %s from ourself %s",
                                   ifp->name, from_str);
                return 0;
@@ -554,7 +554,7 @@ static int igmp_recv_query(struct gm_sock *igmp, int query_version,
                return 0;
        }
 
-       if (PIM_DEBUG_IGMP_PACKETS) {
+       if (PIM_DEBUG_GM_PACKETS) {
                char group_str[INET_ADDRSTRLEN];
                pim_inet4_dump("<group?>", group_addr, group_str,
                               sizeof(group_str));
@@ -750,7 +750,7 @@ int pim_igmp_packet(struct gm_sock *igmp, char *buf, size_t len)
        pim_inet4_dump("<src?>", ip_hdr->ip_src, from_str, sizeof(from_str));
        pim_inet4_dump("<dst?>", ip_hdr->ip_dst, to_str, sizeof(to_str));
 
-       if (PIM_DEBUG_IGMP_PACKETS) {
+       if (PIM_DEBUG_GM_PACKETS) {
                zlog_debug(
                        "Recv IGMP packet from %s to %s on %s: size=%zu ttl=%d msg_type=%d msg_size=%d",
                        from_str, to_str, igmp->interface->name, len, ip_hdr->ip_ttl,
@@ -1362,7 +1362,7 @@ void igmp_group_timer_on(struct gm_group *group, long interval_msec,
 {
        group_timer_off(group);
 
-       if (PIM_DEBUG_IGMP_EVENTS) {
+       if (PIM_DEBUG_GM_EVENTS) {
                char group_str[INET_ADDRSTRLEN];
                pim_inet4_dump("<group?>", group->group_addr, group_str,
                               sizeof(group_str));
index 8d0925cb5673beff0d6defe96c3541753258058f..6e569e280399814a3c38bd0fad6cad9503946742 100644 (file)
@@ -68,7 +68,7 @@ void igmp_v2_send_query(struct gm_group *group, int fd, const char *ifname,
        checksum = in_cksum(query_buf, msg_size);
        *(uint16_t *)(query_buf + IGMP_CHECKSUM_OFFSET) = checksum;
 
-       if (PIM_DEBUG_IGMP_PACKETS) {
+       if (PIM_DEBUG_GM_PACKETS) {
                char dst_str[INET_ADDRSTRLEN];
                char group_str[INET_ADDRSTRLEN];
                pim_inet4_dump("<dst?>", dst_addr, dst_str, sizeof(dst_str));
@@ -121,7 +121,7 @@ int igmp_v2_recv_report(struct gm_sock *igmp, struct in_addr from,
                return 0;
 
        if (igmp_msg_len != IGMP_V12_MSG_SIZE) {
-               if (PIM_DEBUG_IGMP_PACKETS)
+               if (PIM_DEBUG_GM_PACKETS)
                        zlog_debug(
                                "Recv IGMPv2 REPORT from %s on %s: size=%d other than correct=%d",
                                from_str, ifp->name, igmp_msg_len,
@@ -140,7 +140,7 @@ int igmp_v2_recv_report(struct gm_sock *igmp, struct in_addr from,
 
        memcpy(&group_addr, igmp_msg + 4, sizeof(struct in_addr));
 
-       if (PIM_DEBUG_IGMP_PACKETS) {
+       if (PIM_DEBUG_GM_PACKETS) {
                pim_inet4_dump("<dst?>", group_addr, group_str,
                               sizeof(group_str));
                zlog_debug("Recv IGMPv2 REPORT from %s on %s for %s", from_str,
@@ -155,7 +155,7 @@ int igmp_v2_recv_report(struct gm_sock *igmp, struct in_addr from,
         * the SSM range.
         */
        if (pim_is_grp_ssm(pim_ifp->pim, group_addr)) {
-               if (PIM_DEBUG_IGMP_PACKETS) {
+               if (PIM_DEBUG_GM_PACKETS) {
                        zlog_debug(
                                "Ignoring IGMPv2 group record %pI4 from %s on %s exclude mode in SSM range",
                                &group_addr.s_addr, from_str, ifp->name);
@@ -196,7 +196,7 @@ int igmp_v2_recv_leave(struct gm_sock *igmp, struct ip *ip_hdr,
                return 0;
 
        if (igmp_msg_len != IGMP_V12_MSG_SIZE) {
-               if (PIM_DEBUG_IGMP_PACKETS)
+               if (PIM_DEBUG_GM_PACKETS)
                        zlog_debug(
                                "Recv IGMPv2 LEAVE from %s on %s: size=%d other than correct=%d",
                                from_str, ifp->name, igmp_msg_len,
@@ -213,7 +213,7 @@ int igmp_v2_recv_leave(struct gm_sock *igmp, struct ip *ip_hdr,
 
        memcpy(&group_addr, igmp_msg + 4, sizeof(struct in_addr));
 
-       if (PIM_DEBUG_IGMP_PACKETS) {
+       if (PIM_DEBUG_GM_PACKETS) {
                pim_inet4_dump("<dst?>", group_addr, group_str,
                               sizeof(group_str));
                zlog_debug("Recv IGMPv2 LEAVE from %s on %s for %s", from_str,
@@ -237,7 +237,7 @@ int igmp_v2_recv_leave(struct gm_sock *igmp, struct ip *ip_hdr,
        */
        if ((ntohl(ip_hdr->ip_dst.s_addr) != INADDR_ALLRTRS_GROUP)
            && (ip_hdr->ip_dst.s_addr != group_addr.s_addr)) {
-               if (PIM_DEBUG_IGMP_EVENTS)
+               if (PIM_DEBUG_GM_EVENTS)
                        zlog_debug(
                                "IGMPv2 Leave message is ignored since received on address other than ALL-ROUTERS or Group-address");
                return -1;
index 6ed5c501b20d6e797b961ac5310f03c4d723f077..c6d1239fbaa6a028a9dda4e02fdc7b4602a826a2 100644 (file)
@@ -209,7 +209,7 @@ static void igmp_source_timer_on(struct gm_group *group,
        source_timer_off(group, source);
        struct pim_interface *pim_ifp = group->interface->info;
 
-       if (PIM_DEBUG_IGMP_EVENTS) {
+       if (PIM_DEBUG_GM_EVENTS) {
                char group_str[INET_ADDRSTRLEN];
                char source_str[INET_ADDRSTRLEN];
                pim_inet4_dump("<group?>", group->group_addr, group_str,
@@ -1622,7 +1622,7 @@ void igmp_v3_send_query(struct gm_group *group, int fd, const char *ifname,
        checksum = in_cksum(query_buf, msg_size);
        *(uint16_t *)(query_buf + IGMP_CHECKSUM_OFFSET) = checksum;
 
-       if (PIM_DEBUG_IGMP_PACKETS) {
+       if (PIM_DEBUG_GM_PACKETS) {
                char dst_str[INET_ADDRSTRLEN];
                char group_str[INET_ADDRSTRLEN];
                pim_inet4_dump("<dst?>", dst_addr, dst_str, sizeof(dst_str));
@@ -1835,7 +1835,7 @@ static bool igmp_pkt_grp_addr_ok(struct interface *ifp, const char *from_str,
 
        /* determine filtering status for group */
        if (pim_is_group_filtered(pim_ifp, &grp)) {
-               if (PIM_DEBUG_IGMP_PACKETS) {
+               if (PIM_DEBUG_GM_PACKETS) {
                        zlog_debug(
                                "Filtering IGMPv3 group record %pI4 from %s on %s per prefix-list %s",
                                &grp.s_addr, from_str, ifp->name,
@@ -1852,7 +1852,7 @@ static bool igmp_pkt_grp_addr_ok(struct interface *ifp, const char *from_str,
        grp_addr.s_addr = ntohl(grp.s_addr);
 
        if (pim_is_group_224_0_0_0_24(grp_addr)) {
-               if (PIM_DEBUG_IGMP_PACKETS) {
+               if (PIM_DEBUG_GM_PACKETS) {
                        zlog_debug(
                                "Ignoring IGMPv3 group record %pI4 from %s on %s group range falls in 224.0.0.0/24",
                                &grp.s_addr, from_str, ifp->name);
@@ -1871,7 +1871,7 @@ static bool igmp_pkt_grp_addr_ok(struct interface *ifp, const char *from_str,
                switch (rec_type) {
                case IGMP_GRP_REC_TYPE_MODE_IS_EXCLUDE:
                case IGMP_GRP_REC_TYPE_CHANGE_TO_EXCLUDE_MODE:
-                       if (PIM_DEBUG_IGMP_PACKETS) {
+                       if (PIM_DEBUG_GM_PACKETS) {
                                zlog_debug(
                                        "Ignoring IGMPv3 group record %pI4 from %s on %s exclude mode in SSM range",
                                        &grp.s_addr, from_str, ifp->name);
@@ -1930,7 +1930,7 @@ int igmp_v3_recv_report(struct gm_sock *igmp, struct in_addr from,
                return -1;
        }
 
-       if (PIM_DEBUG_IGMP_PACKETS) {
+       if (PIM_DEBUG_GM_PACKETS) {
                zlog_debug(
                        "Recv IGMP report v3 from %s on %s: size=%d groups=%d",
                        from_str, ifp->name, igmp_msg_len, num_groups);
@@ -1967,7 +1967,7 @@ int igmp_v3_recv_report(struct gm_sock *igmp, struct in_addr from,
                       group_record + IGMP_V3_GROUP_RECORD_GROUP_OFFSET,
                       sizeof(struct in_addr));
 
-               if (PIM_DEBUG_IGMP_PACKETS) {
+               if (PIM_DEBUG_GM_PACKETS) {
                        zlog_debug(
                                "    Recv IGMP report v3 from %s on %s: record=%d type=%d auxdatalen=%d sources=%d group=%pI4",
                                from_str, ifp->name, i, rec_type,
@@ -1988,7 +1988,7 @@ int igmp_v3_recv_report(struct gm_sock *igmp, struct in_addr from,
                                return -1;
                        }
 
-                       if (PIM_DEBUG_IGMP_PACKETS) {
+                       if (PIM_DEBUG_GM_PACKETS) {
                                char src_str[200];
 
                                if (!inet_ntop(AF_INET, src, src_str,
index 92c34f51a1433416c9bf2d82ab670da0bf1760ff..c168c0b0986abadd6a032cc1bfc8ef06e9c1b03a 100644 (file)
@@ -153,9 +153,9 @@ int main(int argc, char **argv, char **envp)
        PIM_DO_DEBUG_PIM_EVENTS;
        PIM_DO_DEBUG_PIM_PACKETS;
        PIM_DO_DEBUG_PIM_TRACE;
-       PIM_DO_DEBUG_IGMP_EVENTS;
-       PIM_DO_DEBUG_IGMP_PACKETS;
-       PIM_DO_DEBUG_IGMP_TRACE;
+       PIM_DO_DEBUG_GM_EVENTS;
+       PIM_DO_DEBUG_GM_PACKETS;
+       PIM_DO_DEBUG_GM_TRACE;
        PIM_DO_DEBUG_ZEBRA;
 #endif
 
index 8f1e6184d0617f0981fe1ecf5e1d22f5ffa51456..220706945daf158be81700558b635b12eddca322 100644 (file)
@@ -635,7 +635,7 @@ static int process_igmp_packet(struct pim_instance *pim, const char *buf,
        connected_src = pim_if_connected_to_source(ifp, ip_hdr->ip_src);
 
        if (!connected_src) {
-               if (PIM_DEBUG_IGMP_PACKETS) {
+               if (PIM_DEBUG_GM_PACKETS) {
                        zlog_debug(
                                "Recv IGMP packet on interface: %s from a non-connected source: %pI4",
                                ifp->name, &ip_hdr->ip_src);
@@ -647,7 +647,7 @@ static int process_igmp_packet(struct pim_instance *pim, const char *buf,
        ifaddr = connected_src->u.prefix4;
        igmp = pim_igmp_sock_lookup_ifaddr(pim_ifp->gm_socket_list, ifaddr);
 
-       if (PIM_DEBUG_IGMP_PACKETS) {
+       if (PIM_DEBUG_GM_PACKETS) {
                zlog_debug(
                        "%s(%s): igmp kernel upcall on %s(%p) for %pI4 -> %pI4",
                        __func__, pim->vrf->name, ifp->name, igmp,
@@ -655,7 +655,7 @@ static int process_igmp_packet(struct pim_instance *pim, const char *buf,
        }
        if (igmp)
                pim_igmp_packet(igmp, (char *)buf, buf_size);
-       else if (PIM_DEBUG_IGMP_PACKETS) {
+       else if (PIM_DEBUG_GM_PACKETS) {
                zlog_debug(
                        "No IGMP socket on interface: %s with connected source: %pFX",
                        ifp->name, connected_src);
index 6d6dbb64653b3c11cfc6fc97d5e32558d1ffbe56..7726ac00b0a85047a000e5d712902d7dcc81d62b 100644 (file)
@@ -441,15 +441,6 @@ struct pim_neighbor *pim_neighbor_find(struct interface *ifp,
        return NULL;
 }
 
-struct pim_neighbor *pim_neighbor_find_prefix(struct interface *ifp,
-                                             const struct prefix *src_prefix)
-{
-       pim_addr addr;
-
-       addr = pim_addr_from_prefix(src_prefix);
-       return pim_neighbor_find(ifp, addr);
-}
-
 /*
  * Find the *one* interface out
  * this interface.  If more than
index 2673d22480901b4000bce3d3355843267a04942d..a2a2df9e04d35b56ed7ba6e3f081e24b3ce889ba 100644 (file)
@@ -52,8 +52,6 @@ void pim_neighbor_timer_reset(struct pim_neighbor *neigh, uint16_t holdtime);
 void pim_neighbor_free(struct pim_neighbor *neigh);
 struct pim_neighbor *pim_neighbor_find(struct interface *ifp,
                                       pim_addr source_addr);
-struct pim_neighbor *pim_neighbor_find_prefix(struct interface *ifp,
-                                             const struct prefix *src_prefix);
 struct pim_neighbor *pim_neighbor_find_by_secondary(struct interface *ifp,
                                                    struct prefix *src);
 struct pim_neighbor *pim_neighbor_find_if(struct interface *ifp);
index c18652f72ea0b966d1a1ea1b7224ede76cf722d0..544f926625c4b11177c58a288bcd004c24e75708 100644 (file)
@@ -58,16 +58,16 @@ int pim_debug_config_write(struct vty *vty)
                vty_out(vty, "debug msdp internal\n");
                ++writes;
        }
-       if (PIM_DEBUG_IGMP_EVENTS) {
+       if (PIM_DEBUG_GM_EVENTS) {
                vty_out(vty, "debug igmp events\n");
                ++writes;
        }
-       if (PIM_DEBUG_IGMP_PACKETS) {
+       if (PIM_DEBUG_GM_PACKETS) {
                vty_out(vty, "debug igmp packets\n");
                ++writes;
        }
        /* PIM_DEBUG_IGMP_TRACE catches _DETAIL too */
-       if (router->debugs & PIM_MASK_IGMP_TRACE) {
+       if (router->debugs & PIM_MASK_GM_TRACE) {
                vty_out(vty, "debug igmp trace\n");
                ++writes;
        }
index 9ffa075d2a7ce4e8d4ee01b4f9acacc85355253b..d71b1b697a9aad19e5f9f32feeaa90d5321095c9 100644 (file)
 #define PIM_MASK_PIM_PACKETDUMP_RECV (1 << 4)
 #define PIM_MASK_PIM_TRACE           (1 << 5)
 #define PIM_MASK_PIM_TRACE_DETAIL    (1 << 6)
-#define PIM_MASK_IGMP_EVENTS         (1 << 7)
-#define PIM_MASK_IGMP_PACKETS        (1 << 8)
-#define PIM_MASK_IGMP_TRACE          (1 << 9)
-#define PIM_MASK_IGMP_TRACE_DETAIL   (1 << 10)
+#define PIM_MASK_GM_EVENTS          (1 << 7)
+#define PIM_MASK_GM_PACKETS         (1 << 8)
+#define PIM_MASK_GM_TRACE            (1 << 9)
+#define PIM_MASK_GM_TRACE_DETAIL     (1 << 10)
 #define PIM_MASK_ZEBRA               (1 << 11)
 #define PIM_MASK_SSMPINGD            (1 << 12)
 #define PIM_MASK_MROUTE              (1 << 13)
@@ -160,12 +160,11 @@ extern uint8_t qpim_ecmp_rebalance_enable;
        (router->debugs & (PIM_MASK_PIM_TRACE | PIM_MASK_PIM_TRACE_DETAIL))
 #define PIM_DEBUG_PIM_TRACE_DETAIL                                             \
        (router->debugs & PIM_MASK_PIM_TRACE_DETAIL)
-#define PIM_DEBUG_IGMP_EVENTS (router->debugs & PIM_MASK_IGMP_EVENTS)
-#define PIM_DEBUG_IGMP_PACKETS (router->debugs & PIM_MASK_IGMP_PACKETS)
+#define PIM_DEBUG_GM_EVENTS (router->debugs & PIM_MASK_GM_EVENTS)
+#define PIM_DEBUG_GM_PACKETS (router->debugs & PIM_MASK_GM_PACKETS)
 #define PIM_DEBUG_IGMP_TRACE                                                   \
-       (router->debugs & (PIM_MASK_IGMP_TRACE | PIM_MASK_IGMP_TRACE_DETAIL))
-#define PIM_DEBUG_IGMP_TRACE_DETAIL                                            \
-       (router->debugs & PIM_MASK_IGMP_TRACE_DETAIL)
+       (router->debugs & (PIM_MASK_GM_TRACE | PIM_MASK_GM_TRACE_DETAIL))
+#define PIM_DEBUG_IGMP_TRACE_DETAIL (router->debugs & PIM_MASK_GM_TRACE_DETAIL)
 #define PIM_DEBUG_ZEBRA (router->debugs & PIM_MASK_ZEBRA)
 #define PIM_DEBUG_MLAG (router->debugs & PIM_MASK_MLAG)
 #define PIM_DEBUG_SSMPINGD (router->debugs & PIM_MASK_SSMPINGD)
@@ -187,15 +186,13 @@ extern uint8_t qpim_ecmp_rebalance_enable;
 #define PIM_DEBUG_BSM  (router->debugs & PIM_MASK_BSM_PROC)
 
 #define PIM_DEBUG_EVENTS                                                       \
-       (router->debugs                                                        \
-        & (PIM_MASK_PIM_EVENTS | PIM_MASK_IGMP_EVENTS                         \
-           | PIM_MASK_MSDP_EVENTS | PIM_MASK_BSM_PROC))
+       (router->debugs & (PIM_MASK_PIM_EVENTS | PIM_MASK_GM_EVENTS |          \
+                          PIM_MASK_MSDP_EVENTS | PIM_MASK_BSM_PROC))
 #define PIM_DEBUG_PACKETS                                                      \
-       (router->debugs                                                        \
-        & (PIM_MASK_PIM_PACKETS | PIM_MASK_IGMP_PACKETS                       \
-           | PIM_MASK_MSDP_PACKETS))
+       (router->debugs &                                                      \
+        (PIM_MASK_PIM_PACKETS | PIM_MASK_GM_PACKETS | PIM_MASK_MSDP_PACKETS))
 #define PIM_DEBUG_TRACE                                                        \
-       (router->debugs & (PIM_MASK_PIM_TRACE | PIM_MASK_IGMP_TRACE))
+       (router->debugs & (PIM_MASK_PIM_TRACE | PIM_MASK_GM_TRACE))
 
 #define PIM_DO_DEBUG_PIM_EVENTS (router->debugs |= PIM_MASK_PIM_EVENTS)
 #define PIM_DO_DEBUG_PIM_PACKETS (router->debugs |= PIM_MASK_PIM_PACKETS)
@@ -206,11 +203,11 @@ extern uint8_t qpim_ecmp_rebalance_enable;
 #define PIM_DO_DEBUG_PIM_TRACE (router->debugs |= PIM_MASK_PIM_TRACE)
 #define PIM_DO_DEBUG_PIM_TRACE_DETAIL                                          \
        (router->debugs |= PIM_MASK_PIM_TRACE_DETAIL)
-#define PIM_DO_DEBUG_IGMP_EVENTS (router->debugs |= PIM_MASK_IGMP_EVENTS)
-#define PIM_DO_DEBUG_IGMP_PACKETS (router->debugs |= PIM_MASK_IGMP_PACKETS)
-#define PIM_DO_DEBUG_IGMP_TRACE (router->debugs |= PIM_MASK_IGMP_TRACE)
-#define PIM_DO_DEBUG_IGMP_TRACE_DETAIL                                         \
-       (router->debugs |= PIM_MASK_IGMP_TRACE_DETAIL)
+#define PIM_DO_DEBUG_GM_EVENTS (router->debugs |= PIM_MASK_GM_EVENTS)
+#define PIM_DO_DEBUG_GM_PACKETS (router->debugs |= PIM_MASK_GM_PACKETS)
+#define PIM_DO_DEBUG_GM_TRACE (router->debugs |= PIM_MASK_GM_TRACE)
+#define PIM_DO_DEBUG_GM_TRACE_DETAIL                                           \
+       (router->debugs |= PIM_MASK_GM_TRACE_DETAIL)
 #define PIM_DO_DEBUG_ZEBRA (router->debugs |= PIM_MASK_ZEBRA)
 #define PIM_DO_DEBUG_MLAG (router->debugs |= PIM_MASK_MLAG)
 #define PIM_DO_DEBUG_SSMPINGD (router->debugs |= PIM_MASK_SSMPINGD)
@@ -239,11 +236,11 @@ extern uint8_t qpim_ecmp_rebalance_enable;
 #define PIM_DONT_DEBUG_PIM_TRACE (router->debugs &= ~PIM_MASK_PIM_TRACE)
 #define PIM_DONT_DEBUG_PIM_TRACE_DETAIL                                        \
        (router->debugs &= ~PIM_MASK_PIM_TRACE_DETAIL)
-#define PIM_DONT_DEBUG_IGMP_EVENTS (router->debugs &= ~PIM_MASK_IGMP_EVENTS)
-#define PIM_DONT_DEBUG_IGMP_PACKETS (router->debugs &= ~PIM_MASK_IGMP_PACKETS)
-#define PIM_DONT_DEBUG_IGMP_TRACE (router->debugs &= ~PIM_MASK_IGMP_TRACE)
-#define PIM_DONT_DEBUG_IGMP_TRACE_DETAIL                                       \
-       (router->debugs &= ~PIM_MASK_IGMP_TRACE_DETAIL)
+#define PIM_DONT_DEBUG_GM_EVENTS (router->debugs &= ~PIM_MASK_GM_EVENTS)
+#define PIM_DONT_DEBUG_GM_PACKETS (router->debugs &= ~PIM_MASK_GM_PACKETS)
+#define PIM_DONT_DEBUG_GM_TRACE (router->debugs &= ~PIM_MASK_GM_TRACE)
+#define PIM_DONT_DEBUG_GM_TRACE_DETAIL                                         \
+       (router->debugs &= ~PIM_MASK_GM_TRACE_DETAIL)
 #define PIM_DONT_DEBUG_ZEBRA (router->debugs &= ~PIM_MASK_ZEBRA)
 #define PIM_DONT_DEBUG_MLAG (router->debugs &= ~PIM_MASK_MLAG)
 #define PIM_DONT_DEBUG_SSMPINGD (router->debugs &= ~PIM_MASK_SSMPINGD)
index c3a9369a06a7a655920b2ccd8d014046dfb02c14..8a321d9a91f38ed1dc8c25dc811a2bc9d32f638e 100644 (file)
@@ -1082,10 +1082,9 @@ static void rip_auth_md5_set(struct stream *s, struct rip_interface *ri,
 
        /* Check packet length. */
        if (len < (RIP_HEADER_SIZE + RIP_RTE_SIZE)) {
-               flog_err(
-                       EC_RIP_PACKET,
-                       "rip_auth_md5_set(): packet length %ld is less than minimum length.",
-                       len);
+               flog_err(EC_RIP_PACKET,
+                        "%s: packet length %ld is less than minimum length.",
+                        __func__, len);
                return;
        }
 
@@ -1451,9 +1450,8 @@ static int rip_send_packet(uint8_t *buf, int size, struct sockaddr_in *to,
                        inet_ntop(AF_INET, &sin.sin_addr, dst, sizeof(dst));
                }
 #undef ADDRESS_SIZE
-               zlog_debug("rip_send_packet %pI4 > %s (%s)",
-                          &ifc->address->u.prefix4, dst,
-                          ifc->ifp->name);
+               zlog_debug("%s %pI4 > %s (%s)", __func__,
+                          &ifc->address->u.prefix4, dst, ifc->ifp->name);
        }
 
        if (CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY)) {
@@ -1772,8 +1770,8 @@ static void rip_read(struct thread *t)
        /* If this packet come from unknown interface, ignore it. */
        if (ifp == NULL) {
                zlog_info(
-                       "rip_read: cannot find interface for packet from %pI4 port %d (VRF %s)",
-                       &from.sin_addr, ntohs(from.sin_port),
+                       "%s: cannot find interface for packet from %pI4 port %d (VRF %s)",
+                       __func__, &from.sin_addr, ntohs(from.sin_port),
                        rip->vrf_name);
                return;
        }
@@ -1786,8 +1784,8 @@ static void rip_read(struct thread *t)
 
        if (ifc == NULL) {
                zlog_info(
-                       "rip_read: cannot find connected address for packet from %pI4 port %d on interface %s (VRF %s)",
-                       &from.sin_addr, ntohs(from.sin_port),
+                       "%s: cannot find connected address for packet from %pI4 port %d on interface %s (VRF %s)",
+                       __func__, &from.sin_addr, ntohs(from.sin_port),
                        ifp->name, rip->vrf_name);
                return;
        }
diff --git a/tests/topotests/bgp_aggregate_address_matching_med/__init__.py b/tests/topotests/bgp_aggregate_address_matching_med/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tests/topotests/bgp_aggregate_address_matching_med/r1/bgpd.conf b/tests/topotests/bgp_aggregate_address_matching_med/r1/bgpd.conf
new file mode 100644 (file)
index 0000000..3559760
--- /dev/null
@@ -0,0 +1,21 @@
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.2 remote-as external
+ neighbor 192.168.1.2 timers 3 10
+ address-family ipv4 unicast
+  redistribute connected
+  neighbor 192.168.1.2 route-map r2 out
+ exit-address-family
+!
+ip prefix-list p1 seq 5 permit 172.16.255.1/32
+ip prefix-list p1 seq 10 permit 172.16.255.2/32
+ip prefix-list p2 seq 15 permit 172.16.255.3/32
+!
+route-map r2 permit 10
+ match ip address prefix-list p1
+ set metric 300
+route-map r2 permit 20
+ match ip address prefix-list p2
+ set metric 400
+!
diff --git a/tests/topotests/bgp_aggregate_address_matching_med/r1/zebra.conf b/tests/topotests/bgp_aggregate_address_matching_med/r1/zebra.conf
new file mode 100644 (file)
index 0000000..685adb3
--- /dev/null
@@ -0,0 +1,11 @@
+!
+interface lo
+ ip address 172.16.255.1/32
+ ip address 172.16.255.2/32
+ ip address 172.16.255.3/32
+!
+interface r1-eth0
+ ip address 192.168.1.1/24
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_aggregate_address_matching_med/r2/bgpd.conf b/tests/topotests/bgp_aggregate_address_matching_med/r2/bgpd.conf
new file mode 100644 (file)
index 0000000..9bc9a31
--- /dev/null
@@ -0,0 +1,11 @@
+!
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
+ neighbor 192.168.1.1 timers 3 10
+ neighbor 192.168.2.1 remote-as external
+ neighbor 192.168.2.1 timers 3 10
+ address-family ipv4 unicast
+  aggregate-address 172.16.255.0/24 summary-only matching-MED-only
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_aggregate_address_matching_med/r2/zebra.conf b/tests/topotests/bgp_aggregate_address_matching_med/r2/zebra.conf
new file mode 100644 (file)
index 0000000..f229954
--- /dev/null
@@ -0,0 +1,9 @@
+!
+interface r2-eth0
+ ip address 192.168.1.2/24
+!
+interface r2-eth1
+ ip address 192.168.2.2/24
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_aggregate_address_matching_med/r3/bgpd.conf b/tests/topotests/bgp_aggregate_address_matching_med/r3/bgpd.conf
new file mode 100644 (file)
index 0000000..dfb5ac7
--- /dev/null
@@ -0,0 +1,6 @@
+!
+router bgp 65003
+ no bgp ebgp-requires-policy
+ neighbor 192.168.2.2 remote-as external
+ neighbor 192.168.2.2 timers 3 10
+!
diff --git a/tests/topotests/bgp_aggregate_address_matching_med/r3/zebra.conf b/tests/topotests/bgp_aggregate_address_matching_med/r3/zebra.conf
new file mode 100644 (file)
index 0000000..11e06d4
--- /dev/null
@@ -0,0 +1,6 @@
+!
+interface r3-eth0
+ ip address 192.168.2.1/24
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_aggregate_address_matching_med/test_bgp_aggregate_address_matching_med.py b/tests/topotests/bgp_aggregate_address_matching_med/test_bgp_aggregate_address_matching_med.py
new file mode 100644 (file)
index 0000000..edf50dc
--- /dev/null
@@ -0,0 +1,148 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2022 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Test if aggregate-address command works fine when suppressing summary-only
+and using matching-MED-only together.
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+from lib.common_config import (
+    step,
+)
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+
+pytestmark = [pytest.mark.bgpd]
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+
+def build_topo(tgen):
+    for routern in range(1, 5):
+        tgen.add_router("r{}".format(routern))
+
+    switch = tgen.add_switch("s1")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r2"])
+
+    switch = tgen.add_switch("s2")
+    switch.add_link(tgen.gears["r2"])
+    switch.add_link(tgen.gears["r3"])
+
+
+def setup_module(mod):
+    tgen = Topogen(build_topo, mod.__name__)
+    tgen.start_topology()
+
+    router_list = tgen.routers()
+
+    for i, (rname, router) in enumerate(router_list.items(), 1):
+        router.load_config(
+            TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+        )
+        router.load_config(
+            TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+        )
+
+    tgen.start_router()
+
+
+def teardown_module(mod):
+    tgen = get_topogen()
+    tgen.stop_topology()
+
+
+def test_aggregate_address_matching_med():
+    tgen = get_topogen()
+
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    r1 = tgen.gears["r1"]
+    r3 = tgen.gears["r3"]
+
+    def _bgp_converge():
+        output = json.loads(r3.vtysh_cmd("show bgp ipv4 unicast json"))
+        expected = {
+            "routes": {
+                "172.16.255.0/24": None,
+                "172.16.255.1/32": [{"path": "65002 65001"}],
+                "172.16.255.2/32": [{"path": "65002 65001"}],
+                "172.16.255.3/32": [{"path": "65002 65001"}],
+            }
+        }
+        return topotest.json_cmp(output, expected)
+
+    test_func = functools.partial(_bgp_converge)
+    _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+    assert result is None, "Failed to see unsuppressed routes from R2"
+
+    step("Change MED for 172.16.255.3/32 from 400 to 300")
+    r1.vtysh_cmd(
+        """
+    configure terminal
+        route-map r2 permit 20
+            set metric 300
+    """
+    )
+
+    step("Check if 172.16.255.0/24 aggregated route was created and others suppressed")
+
+    def _bgp_aggregated_summary_only_med_match():
+        output = json.loads(r3.vtysh_cmd("show bgp ipv4 unicast json"))
+        expected = {
+            "routes": {
+                "172.16.255.0/24": [{"path": "65002"}],
+                "172.16.255.1/32": None,
+                "172.16.255.2/32": None,
+                "172.16.255.3/32": None,
+            }
+        }
+        return topotest.json_cmp(output, expected)
+
+    test_func = functools.partial(_bgp_aggregated_summary_only_med_match)
+    _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+    assert result is None, "Failed to see unsuppressed routes from R2"
+
+    step("Change MED for 172.16.255.3/32 back to 400 from 300")
+    r1.vtysh_cmd(
+        """
+    configure terminal
+        route-map r2 permit 20
+            set metric 400
+    """
+    )
+    test_func = functools.partial(_bgp_converge)
+    _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+    assert result is None, "Failed to see unsuppressed routes from R2"
+
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
index 29e6c2cbf702063e7e69eb6de19e0326cf4082c5..e2eee513e6d7f9c4e5db38ecbbdbbf0d66b4a6a2 100644 (file)
@@ -2,7 +2,7 @@
   "Ledger":506,
   "InUse":506,
   "Requests":0,
-  "LabelChunks":11,
+  "LabelChunks":3,
   "Pending":0,
   "Reconnects":0
 }
index 77705e8d35cc8dae2ba93b057ae602845b1323c4..0dc59b58cfb3f4e7cd61a8e1062bbe2eb076dbf1 100644 (file)
@@ -2,7 +2,7 @@
   "Ledger":51,
   "InUse":51,
   "Requests":0,
-  "LabelChunks":2,
+  "LabelChunks":1,
   "Pending":0,
   "Reconnects":0
 }
index 7d52048ebeca5d893b2cf78239a4f817efd56257..1108919e13268db7e326fb68fcf8bda8998ba9ad 100644 (file)
@@ -361,13 +361,6 @@ def test_bgp_remove_private_as():
                 return True
         return False
 
-    def _get_pfx_path_from_nh(router, prefix, nh):
-        """Return as-path for a specific route + path."""
-        output = json.loads(tgen.gears[router].vtysh_cmd(f"show ip bgp {prefix} json"))
-        for path in output[prefix]:
-            if path["nexthops"]["ip"] == nh:
-                return path["aspath"]["string"]
-
     def _routers_up(tx_rtrs, rx_rtrs):
         """Ensure all BGP sessions are up and all routes are installed."""
         # all sessions go through tx_routers, so ensure all their peers are up
@@ -408,11 +401,7 @@ def test_bgp_remove_private_as():
                 for pfx in prefixes:
                     good_path = expected_paths[rtr][remove_type][peer][pfx]
                     real_path = adj_rib_in["receivedRoutes"][pfx]["path"]
-                    msg = (
-                        f"{rtr} received incorrect AS-Path from {peer} "
-                        f'({p_ip}) for {pfx}. remove_type: "{remove_type}"'
-                    )
-                    assert real_path == good_path, msg
+                    return real_path == good_path
 
     #######################
     # Begin Test
@@ -424,7 +413,11 @@ def test_bgp_remove_private_as():
     # test each variation of remove-private-AS
     for rmv_type in remove_types:
         _change_remove_type(rmv_type, "add")
-        _validate_paths(rmv_type)
+
+        test_func = partial(_validate_paths, rmv_type)
+        _, result = topotest.run_and_expect(test_func, True, count=60, wait=0.5)
+        assert result == True, "Not all routes have correct AS-Path values!"
+
         # each variation sets a separate peer flag in bgpd. we need to clear
         # the old flag after each iteration so we only test the flags we expect.
         _change_remove_type(rmv_type, "del")
diff --git a/tests/topotests/bgp_vpnv4_ebgp/__init__.py b/tests/topotests/bgp_vpnv4_ebgp/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tests/topotests/bgp_vpnv4_ebgp/r1/bgpd.conf b/tests/topotests/bgp_vpnv4_ebgp/r1/bgpd.conf
new file mode 100644 (file)
index 0000000..2eebe5e
--- /dev/null
@@ -0,0 +1,25 @@
+router bgp 65500
+ bgp router-id 1.1.1.1
+ no bgp ebgp-requires-policy
+ neighbor 10.125.0.2 remote-as 65501
+ address-family ipv4 unicast
+  no neighbor 10.125.0.2 activate
+ exit-address-family
+ address-family ipv4 vpn
+  neighbor 10.125.0.2 activate
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 1.1.1.1
+ address-family ipv4 unicast
+  redistribute connected
+  label vpn export 101
+  rd vpn export 444:1
+  rt vpn both 52:100
+  export vpn
+  import vpn
+ exit-address-family
+!
+interface r1-eth0
+ mpls bgp forwarding
+!
\ No newline at end of file
diff --git a/tests/topotests/bgp_vpnv4_ebgp/r1/ipv4_routes.json b/tests/topotests/bgp_vpnv4_ebgp/r1/ipv4_routes.json
new file mode 100644 (file)
index 0000000..da7d281
--- /dev/null
@@ -0,0 +1,50 @@
+{
+    "10.200.0.0/24": [
+        {
+            "prefix": "10.200.0.0/24",
+            "prefixLen": 24,
+            "protocol": "bgp",
+            "vrfName": "vrf1",
+            "selected": true,
+            "destSelected": true,
+            "distance": 20,
+            "metric": 0,
+            "nexthops": [
+                {
+                    "flags": 3,
+                    "fib": true,
+                    "ip": "10.125.0.2",
+                    "afi": "ipv4",
+                    "interfaceName": "r1-eth0",
+                    "vrf": "default",
+                    "active": true,
+                   "labels":[
+                       102
+                   ]
+                }
+            ]
+        }
+    ],
+    "10.201.0.0/24": [
+        {
+            "prefix": "10.201.0.0/24",
+            "prefixLen": 24,
+            "protocol": "connected",
+            "vrfName": "vrf1",
+            "selected": true,
+            "destSelected": true,
+            "distance": 0,
+            "metric": 0,
+            "installed": true,
+            "nexthops":[
+                {
+                    "flags": 3,
+                    "fib": true,
+                    "directlyConnected": true,
+                    "interfaceName": "r1-eth1",
+                    "active": true
+                }
+            ]
+        }
+    ]
+}
diff --git a/tests/topotests/bgp_vpnv4_ebgp/r1/zebra.conf b/tests/topotests/bgp_vpnv4_ebgp/r1/zebra.conf
new file mode 100644 (file)
index 0000000..e9ae4e9
--- /dev/null
@@ -0,0 +1,7 @@
+log stdout
+interface r1-eth1 vrf vrf1
+ ip address 10.201.0.1/24
+!
+interface r1-eth0
+ ip address 10.125.0.1/24
+!
diff --git a/tests/topotests/bgp_vpnv4_ebgp/r2/bgp_ipv4_routes.json b/tests/topotests/bgp_vpnv4_ebgp/r2/bgp_ipv4_routes.json
new file mode 100644 (file)
index 0000000..19797dd
--- /dev/null
@@ -0,0 +1,38 @@
+{
+     "vrfName": "vrf1",
+     "localAS": 65501,
+     "routes":
+         {
+            "10.201.0.0/24": [
+                {
+                    "prefix": "10.201.0.0",
+                    "prefixLen": 24,
+                    "network": "10.201.0.0\/24",
+                    "nhVrfName": "default",
+                    "nexthops": [
+                        {
+                            "ip": "10.125.0.1",
+                            "afi": "ipv4",
+                            "used": true
+                        }
+                    ]
+                }
+            ],
+            "10.200.0.0/24": [
+                {
+                    "valid": true,
+                    "bestpath": true,
+                    "prefix": "10.200.0.0",
+                    "prefixLen": 24,
+                    "network": "10.200.0.0\/24",
+                    "nexthops": [
+                        {
+                            "ip": "0.0.0.0",
+                            "afi": "ipv4",
+                            "used": true
+                        }
+                    ]
+                }
+            ]
+         }
+}
diff --git a/tests/topotests/bgp_vpnv4_ebgp/r2/bgpd.conf b/tests/topotests/bgp_vpnv4_ebgp/r2/bgpd.conf
new file mode 100644 (file)
index 0000000..e38c99d
--- /dev/null
@@ -0,0 +1,25 @@
+router bgp 65501
+ bgp router-id 2.2.2.2
+ no bgp ebgp-requires-policy
+ neighbor 10.125.0.1 remote-as 65500
+ address-family ipv4 unicast
+  no neighbor 10.125.0.1 activate
+ exit-address-family
+ address-family ipv4 vpn
+  neighbor 10.125.0.1 activate
+ exit-address-family
+!
+router bgp 65501 vrf vrf1
+ bgp router-id 2.2.2.2
+ address-family ipv4 unicast
+  redistribute connected
+  label vpn export 102
+  rd vpn export 444:2
+  rt vpn both 52:100
+  export vpn
+  import vpn
+ exit-address-family
+!
+interface r2-eth0
+ mpls bgp forwarding
+!
diff --git a/tests/topotests/bgp_vpnv4_ebgp/r2/zebra.conf b/tests/topotests/bgp_vpnv4_ebgp/r2/zebra.conf
new file mode 100644 (file)
index 0000000..6c433ae
--- /dev/null
@@ -0,0 +1,7 @@
+log stdout
+interface r2-eth1 vrf vrf1
+ ip address 10.200.0.2/24
+!
+interface r2-eth0
+ ip address 10.125.0.2/24
+!
diff --git a/tests/topotests/bgp_vpnv4_ebgp/test_bgp_vpnv4_ebgp.py b/tests/topotests/bgp_vpnv4_ebgp/test_bgp_vpnv4_ebgp.py
new file mode 100644 (file)
index 0000000..cd8a9b6
--- /dev/null
@@ -0,0 +1,187 @@
+#!/usr/bin/env python
+
+#
+# test_bgp_vpnv4_ebgp.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2022 by 6WIND
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+ test_bgp_vpnv4_ebgp.py: Test the FRR BGP daemon with EBGP direct connection
+"""
+
+import os
+import sys
+import json
+from functools import partial
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+    "Build function"
+
+    # Create 2 routers.
+    tgen.add_router("r1")
+    tgen.add_router("r2")
+
+    switch = tgen.add_switch("s1")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r2"])
+
+    switch = tgen.add_switch("s2")
+    switch.add_link(tgen.gears["r1"])
+
+    switch = tgen.add_switch("s3")
+    switch.add_link(tgen.gears["r2"])
+
+def _populate_iface():
+    tgen = get_topogen()
+    cmds_list = [
+        'ip link add vrf1 type vrf table 10',
+        'echo 100000 > /proc/sys/net/mpls/platform_labels',
+        'ip link set dev vrf1 up',
+        'ip link set dev {0}-eth1 master vrf1',
+        'echo 1 > /proc/sys/net/mpls/conf/{0}-eth0/input',
+    ]
+
+    for cmd in cmds_list:
+        input = cmd.format('r1', '1', '2')
+        logger.info('input: ' + cmd)
+        output = tgen.net['r1'].cmd(cmd.format('r1', '1', '2'))
+        logger.info('output: ' + output)
+
+    for cmd in cmds_list:
+        input = cmd.format('r2', '2', '1')
+        logger.info('input: ' + cmd)
+        output = tgen.net['r2'].cmd(cmd.format('r2', '2', '1'))
+        logger.info('output: ' + output)
+
+def setup_module(mod):
+    "Sets up the pytest environment"
+    tgen = Topogen(build_topo, mod.__name__)
+    tgen.start_topology()
+
+    router_list = tgen.routers()
+    _populate_iface()
+
+    for rname, router in router_list.items():
+        router.load_config(
+            TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+        )
+        router.load_config(
+            TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+        )
+
+    # Initialize all routers.
+    tgen.start_router()
+
+
+def teardown_module(_mod):
+    "Teardown the pytest environment"
+    tgen = get_topogen()
+
+    tgen.stop_topology()
+
+
+def test_protocols_convergence():
+    """
+    Assert that all protocols have converged
+    statuses as they depend on it.
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router = tgen.gears['r1']
+    logger.info("Dump some context for r1")
+    router.vtysh_cmd("show bgp ipv4 vpn")
+    router.vtysh_cmd("show bgp summary")
+    router.vtysh_cmd("show bgp vrf vrf1 ipv4")
+    router.vtysh_cmd("show running-config")
+    router = tgen.gears['r2']
+    logger.info("Dump some context for r2")
+    router.vtysh_cmd("show bgp ipv4 vpn")
+    router.vtysh_cmd("show bgp summary")
+    router.vtysh_cmd("show bgp vrf vrf1 ipv4")
+    router.vtysh_cmd("show running-config")
+
+    # Check IPv4 routing tables on r1
+    logger.info("Checking IPv4 routes for convergence on r1")
+    router = tgen.gears['r1']
+    json_file = "{}/{}/ipv4_routes.json".format(CWD, router.name)
+    if not os.path.isfile(json_file):
+        logger.info("skipping file {}".format(json_file))
+        assert 0, 'ipv4_routes.json file not found'
+        return
+
+    expected = json.loads(open(json_file).read())
+    test_func = partial(
+        topotest.router_json_cmp,
+        router,
+        "show ip route vrf vrf1 json",
+        expected,
+    )
+    _, result = topotest.run_and_expect(test_func, None, count=40, wait=2)
+    assertmsg = '"{}" JSON output mismatches'.format(router.name)
+    assert result is None, assertmsg
+
+    # Check BGP IPv4 routing tables on r2 not installed
+    logger.info("Checking BGP IPv4 routes for convergence on r2")
+    router = tgen.gears['r2']
+    json_file = "{}/{}/bgp_ipv4_routes.json".format(CWD, router.name)
+    if not os.path.isfile(json_file):
+        assert 0, 'bgp_ipv4_routes.json file not found'
+
+    expected = json.loads(open(json_file).read())
+    test_func = partial(
+        topotest.router_json_cmp,
+        router,
+        "show bgp vrf vrf1 ipv4 json",
+        expected,
+    )
+    _, result = topotest.run_and_expect(test_func, None, count=40, wait=2)
+    assertmsg = '"{}" JSON output mismatches'.format(router.name)
+    assert result is None, assertmsg
+    
+def test_memory_leak():
+    "Run the memory leak test and report results."
+    tgen = get_topogen()
+    if not tgen.is_memleak_enabled():
+        pytest.skip("Memory leak test/report is disabled")
+
+    tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_vpnv4_gre/__init__.py b/tests/topotests/bgp_vpnv4_gre/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tests/topotests/bgp_vpnv4_gre/r1/bgpd.conf b/tests/topotests/bgp_vpnv4_gre/r1/bgpd.conf
new file mode 100644 (file)
index 0000000..0e2d3a8
--- /dev/null
@@ -0,0 +1,26 @@
+router bgp 65500
+ bgp router-id 192.0.2.1
+ neighbor 192.0.2.2 remote-as 65500
+ neighbor 192.0.2.2 update-source 192.0.2.1
+ address-family ipv4 unicast
+  no neighbor 192.0.2.2 activate
+ exit-address-family
+ address-family ipv4 vpn
+  neighbor 192.0.2.2 activate
+  neighbor 192.0.2.2 route-map rmap in
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 192.0.2.1
+ address-family ipv4 unicast
+  redistribute connected
+  label vpn export 101
+  rd vpn export 444:1
+  rt vpn both 52:100
+  export vpn
+  import vpn
+ exit-address-family
+!
+route-map rmap permit 1
+ set l3vpn next-hop encapsulation gre
+!
diff --git a/tests/topotests/bgp_vpnv4_gre/r1/ipv4_routes.json b/tests/topotests/bgp_vpnv4_gre/r1/ipv4_routes.json
new file mode 100644 (file)
index 0000000..5f2732a
--- /dev/null
@@ -0,0 +1,50 @@
+{
+    "10.200.0.0/24": [
+        {
+            "prefix": "10.200.0.0/24",
+            "prefixLen": 24,
+            "protocol": "bgp",
+            "vrfName": "vrf1",
+            "selected": true,
+            "destSelected": true,
+            "distance": 20,
+            "metric": 0,
+            "nexthops": [
+                {
+                    "flags": 3,
+                    "fib": true,
+                    "ip": "192.168.0.2",
+                    "afi": "ipv4",
+                    "interfaceName": "r1-gre0",
+                    "vrf": "default",
+                    "active": true,
+                   "labels":[
+                       102
+                   ]
+                }
+            ]
+        }
+    ],
+    "10.201.0.0/24": [
+        {
+            "prefix": "10.201.0.0/24",
+            "prefixLen": 24,
+            "protocol": "connected",
+            "vrfName": "vrf1",
+            "selected": true,
+            "destSelected": true,
+            "distance": 0,
+            "metric": 0,
+            "installed": true,
+            "nexthops":[
+                {
+                    "flags": 3,
+                    "fib": true,
+                    "directlyConnected": true,
+                    "interfaceName": "r1-eth1",
+                    "active": true
+                }
+            ]
+        }
+    ]
+}
diff --git a/tests/topotests/bgp_vpnv4_gre/r1/zebra.conf b/tests/topotests/bgp_vpnv4_gre/r1/zebra.conf
new file mode 100644 (file)
index 0000000..11780a8
--- /dev/null
@@ -0,0 +1,14 @@
+log stdout
+ip route 192.0.2.2/32 192.168.0.2
+interface lo
+ ip address 192.0.2.1/32
+!
+interface r1-gre0
+ ip address 192.168.0.1/24
+!
+interface r1-eth1 vrf vrf1
+ ip address 10.201.0.1/24
+!
+interface r1-eth0
+ ip address 10.125.0.1/24
+!
diff --git a/tests/topotests/bgp_vpnv4_gre/r2/bgp_ipv4_routes.json b/tests/topotests/bgp_vpnv4_gre/r2/bgp_ipv4_routes.json
new file mode 100644 (file)
index 0000000..e50d5dd
--- /dev/null
@@ -0,0 +1,38 @@
+{
+     "vrfName": "vrf1",
+     "localAS": 65500,
+     "routes":
+         {
+            "10.201.0.0/24": [
+                {
+                    "prefix": "10.201.0.0",
+                    "prefixLen": 24,
+                    "network": "10.201.0.0\/24",
+                    "nhVrfName": "default",
+                    "nexthops": [
+                        {
+                            "ip": "192.0.2.1",
+                            "afi": "ipv4",
+                            "used": true
+                        }
+                    ]
+                }
+            ],
+            "10.200.0.0/24": [
+                {
+                    "valid": true,
+                    "bestpath": true,
+                    "prefix": "10.200.0.0",
+                    "prefixLen": 24,
+                    "network": "10.200.0.0\/24",
+                    "nexthops": [
+                        {
+                            "ip": "0.0.0.0",
+                            "afi": "ipv4",
+                            "used": true
+                        }
+                    ]
+                }
+            ]
+         }
+}
diff --git a/tests/topotests/bgp_vpnv4_gre/r2/bgpd.conf b/tests/topotests/bgp_vpnv4_gre/r2/bgpd.conf
new file mode 100644 (file)
index 0000000..bf05866
--- /dev/null
@@ -0,0 +1,22 @@
+router bgp 65500
+ bgp router-id 192.0.2.2
+ neighbor 192.0.2.1 remote-as 65500
+ neighbor 192.0.2.1 update-source 192.0.2.2
+ address-family ipv4 unicast
+  no neighbor 192.0.2.1 activate
+ exit-address-family
+ address-family ipv4 vpn
+  neighbor 192.0.2.1 activate
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 192.0.2.2
+ address-family ipv4 unicast
+  redistribute connected
+  label vpn export 102
+  rd vpn export 444:2
+  rt vpn both 52:100
+  export vpn
+  import vpn
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_vpnv4_gre/r2/zebra.conf b/tests/topotests/bgp_vpnv4_gre/r2/zebra.conf
new file mode 100644 (file)
index 0000000..de88a4b
--- /dev/null
@@ -0,0 +1,14 @@
+log stdout
+ip route 192.0.2.1/32 192.168.0.1
+interface lo
+ ip address 192.0.2.2/32
+!
+interface r2-gre0
+ ip address 192.168.0.2/24
+!
+interface r2-eth1 vrf vrf1
+ ip address 10.200.0.2/24
+!
+interface r2-eth0
+ ip address 10.125.0.2/24
+!
diff --git a/tests/topotests/bgp_vpnv4_gre/test_bgp_vpnv4_gre.py b/tests/topotests/bgp_vpnv4_gre/test_bgp_vpnv4_gre.py
new file mode 100644 (file)
index 0000000..f562f44
--- /dev/null
@@ -0,0 +1,191 @@
+#!/usr/bin/env python
+
+#
+# test_bgp_vpnv4_gre.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2021 by 6WIND
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+ test_bgp_vpnv4_gre.py: Test the FRR BGP daemon with BGP IPv6 interface
+ with route advertisements on a separate netns.
+"""
+
+import os
+import sys
+import json
+from functools import partial
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+    "Build function"
+
+    # Create 2 routers.
+    tgen.add_router("r1")
+    tgen.add_router("r2")
+
+    switch = tgen.add_switch("s1")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r2"])
+
+    switch = tgen.add_switch("s2")
+    switch.add_link(tgen.gears["r1"])
+
+    switch = tgen.add_switch("s3")
+    switch.add_link(tgen.gears["r2"])
+
+def _populate_iface():
+    tgen = get_topogen()
+    cmds_list = [
+        'ip link add vrf1 type vrf table 10',
+        'echo 10 > /proc/sys/net/mpls/platform_labels',
+        'ip link set dev vrf1 up',
+        'ip link set dev {0}-eth1 master vrf1',
+        'echo 1 > /proc/sys/net/mpls/conf/{0}-eth0/input',
+        'ip tunnel add {0}-gre0 mode gre ttl 64 dev {0}-eth0 local 10.125.0.{1} remote 10.125.0.{2}',
+        'ip link set dev {0}-gre0 up',
+        'echo 1 > /proc/sys/net/mpls/conf/{0}-gre0/input',
+    ]
+
+    for cmd in cmds_list:
+        input = cmd.format('r1', '1', '2')
+        logger.info('input: ' + cmd)
+        output = tgen.net['r1'].cmd(cmd.format('r1', '1', '2'))
+        logger.info('output: ' + output)
+
+    for cmd in cmds_list:
+        input = cmd.format('r2', '2', '1')
+        logger.info('input: ' + cmd)
+        output = tgen.net['r2'].cmd(cmd.format('r2', '2', '1'))
+        logger.info('output: ' + output)
+
+def setup_module(mod):
+    "Sets up the pytest environment"
+    tgen = Topogen(build_topo, mod.__name__)
+    tgen.start_topology()
+
+    router_list = tgen.routers()
+    _populate_iface()
+
+    for rname, router in router_list.items():
+        router.load_config(
+            TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+        )
+        router.load_config(
+            TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+        )
+
+    # Initialize all routers.
+    tgen.start_router()
+
+
+def teardown_module(_mod):
+    "Teardown the pytest environment"
+    tgen = get_topogen()
+
+    tgen.stop_topology()
+
+
+def test_protocols_convergence():
+    """
+    Assert that all protocols have converged
+    statuses as they depend on it.
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router = tgen.gears['r1']
+    logger.info("Dump some context for r1")
+    router.vtysh_cmd("show bgp ipv4 vpn")
+    router.vtysh_cmd("show bgp summary")
+    router.vtysh_cmd("show bgp vrf vrf1 ipv4")
+    router.vtysh_cmd("show running-config")
+    router = tgen.gears['r2']
+    logger.info("Dump some context for r2")
+    router.vtysh_cmd("show bgp ipv4 vpn")
+    router.vtysh_cmd("show bgp summary")
+    router.vtysh_cmd("show bgp vrf vrf1 ipv4")
+    router.vtysh_cmd("show running-config")
+
+    # Check IPv4 routing tables on r1
+    logger.info("Checking IPv4 routes for convergence on r1")
+    router = tgen.gears['r1']
+    json_file = "{}/{}/ipv4_routes.json".format(CWD, router.name)
+    if not os.path.isfile(json_file):
+        logger.info("skipping file {}".format(json_file))
+        assert 0, 'ipv4_routes.json file not found'
+        return
+
+    expected = json.loads(open(json_file).read())
+    test_func = partial(
+        topotest.router_json_cmp,
+        router,
+        "show ip route vrf vrf1 json",
+        expected,
+    )
+    _, result = topotest.run_and_expect(test_func, None, count=40, wait=2)
+    assertmsg = '"{}" JSON output mismatches'.format(router.name)
+    assert result is None, assertmsg
+
+    # Check BGP IPv4 routing tables on r2 not installed
+    logger.info("Checking BGP IPv4 routes for convergence on r2")
+    router = tgen.gears['r2']
+    json_file = "{}/{}/bgp_ipv4_routes.json".format(CWD, router.name)
+    if not os.path.isfile(json_file):
+        assert 0, 'bgp_ipv4_routes.json file not found'
+
+    expected = json.loads(open(json_file).read())
+    test_func = partial(
+        topotest.router_json_cmp,
+        router,
+        "show bgp vrf vrf1 ipv4 json",
+        expected,
+    )
+    _, result = topotest.run_and_expect(test_func, None, count=40, wait=2)
+    assertmsg = '"{}" JSON output mismatches'.format(router.name)
+    assert result is None, assertmsg
+    
+def test_memory_leak():
+    "Run the memory leak test and report results."
+    tgen = get_topogen()
+    if not tgen.is_memleak_enabled():
+        pytest.skip("Memory leak test/report is disabled")
+
+    tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
index f79ca71a6482077fdbb3671d4eba2a49f7dc9e66..2a57f6c26e404cabae8b260b60df3c5994767331 100755 (executable)
@@ -232,6 +232,9 @@ def pytest_configure(config):
     Assert that the environment is correctly configured, and get extra config.
     """
 
+    if config.getoption("--collect-only"):
+        return
+
     if "PYTEST_XDIST_WORKER" not in os.environ:
         os.environ["PYTEST_XDIST_MODE"] = config.getoption("dist", "no")
         os.environ["PYTEST_TOPOTEST_WORKER"] = ""
index 1f02efee20db28d98afe487a2b28f621b7c847a7..68841e2d3883cf1d7515f3a5dc19d01a2b8a9483 100644 (file)
@@ -60,8 +60,13 @@ extern struct thread_master *master;
 #define VTYSH_ALL        VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_LDPD|VTYSH_BGPD|VTYSH_ISISD|VTYSH_PIMD|VTYSH_PIM6D|VTYSH_NHRPD|VTYSH_EIGRPD|VTYSH_BABELD|VTYSH_SHARPD|VTYSH_PBRD|VTYSH_STATICD|VTYSH_BFDD|VTYSH_FABRICD|VTYSH_VRRPD|VTYSH_PATHD
 #define VTYSH_ACL         VTYSH_BFDD|VTYSH_BABELD|VTYSH_BGPD|VTYSH_EIGRPD|VTYSH_ISISD|VTYSH_FABRICD|VTYSH_LDPD|VTYSH_NHRPD|VTYSH_OSPF6D|VTYSH_OSPFD|VTYSH_PBRD|VTYSH_PIMD|VTYSH_PIM6D|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_VRRPD|VTYSH_ZEBRA
 #define VTYSH_RMAP       VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_BGPD|VTYSH_ISISD|VTYSH_PIMD|VTYSH_PIM6D|VTYSH_EIGRPD|VTYSH_FABRICD
-#define VTYSH_INTERFACE          VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_ISISD|VTYSH_PIMD|VTYSH_PIM6D|VTYSH_NHRPD|VTYSH_EIGRPD|VTYSH_BABELD|VTYSH_PBRD|VTYSH_FABRICD|VTYSH_VRRPD
-#define VTYSH_VRF        VTYSH_INTERFACE|VTYSH_STATICD
+#define VTYSH_INTERFACE_SUBSET                                                 \
+       VTYSH_ZEBRA | VTYSH_RIPD | VTYSH_RIPNGD | VTYSH_OSPFD | VTYSH_OSPF6D | \
+               VTYSH_ISISD | VTYSH_PIMD | VTYSH_PIM6D | VTYSH_NHRPD |         \
+               VTYSH_EIGRPD | VTYSH_BABELD | VTYSH_PBRD | VTYSH_FABRICD |     \
+               VTYSH_VRRPD
+#define VTYSH_INTERFACE VTYSH_INTERFACE_SUBSET | VTYSH_BGPD
+#define VTYSH_VRF VTYSH_INTERFACE_SUBSET | VTYSH_STATICD
 #define VTYSH_KEYS VTYSH_RIPD | VTYSH_EIGRPD | VTYSH_OSPF6D
 /* Daemons who can process nexthop-group configs */
 #define VTYSH_NH_GROUP    VTYSH_PBRD|VTYSH_SHARPD
index 84d5b74ddce9b997155d207d3af9fea8ced75a99..3f3d8292117c5ae00bdc513f9b6023b93291e783 100644 (file)
@@ -336,6 +336,12 @@ module frr-bgp-route-map {
       "Set EVPN gateway IP overlay index IPv6";
   }
 
+  identity set-l3vpn-nexthop-encapsulation {
+    base frr-route-map:rmap-set-type;
+    description
+      "Accept L3VPN traffic over other than LSP encapsulation";
+  }
+
   grouping extcommunity-non-transitive-types {
     leaf two-octet-as-specific {
       type boolean;
@@ -931,5 +937,21 @@ module frr-bgp-route-map {
         type inet:ipv6-address;
       }
     }
+    case l3vpn-nexthop-encapsulation {
+      when
+      "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:action,
+      'frr-bgp-route-map:set-l3vpn-nexthop-encapsulation')";
+      description
+        "Accept L3VPN traffic over other than LSP encapsulation";
+      leaf l3vpn-nexthop-encapsulation {
+        type enumeration {
+          enum "gre" {
+            value 0;
+            description
+              "GRE protocol";
+          }
+        }
+      }
+    }
   }
 }
index 45a372f88c9e5e78ec5a4177e64a251009079948..a8b56bb8f2aaec18b7d8cca85804b42d00294c9c 100644 (file)
@@ -1033,12 +1033,18 @@ static int netlink_parse_error(const struct nlsock *nl, struct nlmsghdr *h,
                return 1;
        }
 
-       /* Deal with errors that occur because of races in link handling. */
-       if (is_cmd
-           && ((msg_type == RTM_DELROUTE
-                && (-errnum == ENODEV || -errnum == ESRCH))
-               || (msg_type == RTM_NEWROUTE
-                   && (-errnum == ENETDOWN || -errnum == EEXIST)))) {
+       /*
+        * Deal with errors that occur because of races in link handling
+        * or types are not supported in kernel.
+        */
+       if (is_cmd &&
+           ((msg_type == RTM_DELROUTE &&
+             (-errnum == ENODEV || -errnum == ESRCH)) ||
+            (msg_type == RTM_NEWROUTE &&
+             (-errnum == ENETDOWN || -errnum == EEXIST)) ||
+            ((msg_type == RTM_NEWTUNNEL || msg_type == RTM_DELTUNNEL ||
+              msg_type == RTM_GETTUNNEL) &&
+             (-errnum == EOPNOTSUPP)))) {
                if (IS_ZEBRA_DEBUG_KERNEL)
                        zlog_debug("%s: error: %s type=%s(%u), seq=%u, pid=%u",
                                   nl->name, safe_strerror(-errnum),
index 6624f0beb993913543ed8645fd47e73e9ac571b7..a2844ca9568f69fb775083c6d36a204ffc8bf4c6 100644 (file)
@@ -595,7 +595,7 @@ int zebra_vrf_netns_handler_create(struct vty *vty, struct vrf *vrf,
                                zlog_info(
                                        "VRF %u already configured with NETNS %s",
                                        vrf->vrf_id, ns->name);
-                       return CMD_WARNING_CONFIG_FAILED;
+                       return CMD_WARNING;
                }
        }
        ns = ns_lookup_name(pathname);