IndentCaseLabels: false
AlignEscapedNewlinesLeft: false
AlignTrailingComments: true
+AlignConsecutiveMacros: AcrossComments
AllowAllParametersOfDeclarationOnNextLine: false
AlignAfterOpenBracket: true
SpaceAfterCStyleCast: false
#include "bgpd/bgp_debug.h"
#include "bgpd/bgp_errors.h"
#include "bgpd/bgp_route.h"
+#include "bgpd/bgp_zebra.h"
+#include "bgpd/bgp_vty.h"
+#include "bgpd/bgp_rd.h"
#define BGP_LABELPOOL_ENABLE_TESTS 0
lcb->label);
break;
+ case LP_TYPE_NEXTHOP:
+ if (uj) {
+ json_object_string_add(json_elem, "prefix",
+ "nexthop");
+ json_object_int_add(json_elem, "label",
+ lcb->label);
+ } else
+ vty_out(vty, "%-18s %u\n", "nexthop",
+ lcb->label);
+ break;
}
}
if (uj)
vty_out(vty, "%-18s %u\n", "VRF",
label);
break;
+ case LP_TYPE_NEXTHOP:
+ if (uj) {
+ json_object_string_add(json_elem, "prefix",
+ "nexthop");
+ json_object_int_add(json_elem, "label", label);
+ } else
+ vty_out(vty, "%-18s %u\n", "nexthop",
+ label);
+ break;
}
}
if (uj)
else
vty_out(vty, "VRF\n");
break;
+ case LP_TYPE_NEXTHOP:
+ if (uj)
+ json_object_string_add(json_elem, "prefix",
+ "nexthop");
+ else
+ vty_out(vty, "Nexthop\n");
+ break;
}
}
if (uj)
return CMD_SUCCESS;
}
+static void show_bgp_nexthop_label_afi(struct vty *vty, afi_t afi,
+ struct bgp *bgp, bool detail)
+{
+ struct bgp_label_per_nexthop_cache_head *tree;
+ struct bgp_label_per_nexthop_cache *iter;
+ safi_t safi;
+ void *src;
+ char buf[PREFIX2STR_BUFFER];
+ char labelstr[MPLS_LABEL_STRLEN];
+ struct bgp_dest *dest;
+ struct bgp_path_info *path;
+ struct bgp *bgp_path;
+ struct bgp_table *table;
+ time_t tbuf;
+
+ vty_out(vty, "Current BGP label nexthop cache for %s, VRF %s\n",
+ afi2str(afi), bgp->name_pretty);
+
+ tree = &bgp->mpls_labels_per_nexthop[afi];
+ frr_each (bgp_label_per_nexthop_cache, tree, iter) {
+ if (afi2family(afi) == AF_INET)
+ src = (void *)&iter->nexthop.u.prefix4;
+ else
+ src = (void *)&iter->nexthop.u.prefix6;
+
+ vty_out(vty, " %s, label %s #paths %u\n",
+ inet_ntop(afi2family(afi), src, buf, sizeof(buf)),
+ mpls_label2str(1, &iter->label, labelstr,
+ sizeof(labelstr), 0, true),
+ iter->path_count);
+ if (iter->nh)
+ vty_out(vty, " if %s\n",
+ ifindex2ifname(iter->nh->ifindex,
+ iter->nh->vrf_id));
+ tbuf = time(NULL) - (monotime(NULL) - iter->last_update);
+ vty_out(vty, " Last update: %s", ctime(&tbuf));
+ if (!detail)
+ continue;
+ vty_out(vty, " Paths:\n");
+ LIST_FOREACH (path, &(iter->paths), label_nh_thread) {
+ dest = path->net;
+ table = bgp_dest_table(dest);
+ assert(dest && table);
+ afi = family2afi(bgp_dest_get_prefix(dest)->family);
+ safi = table->safi;
+ bgp_path = table->bgp;
+
+ if (dest->pdest) {
+ vty_out(vty, " %d/%d %pBD RD ", afi, safi,
+ dest);
+
+ vty_out(vty, BGP_RD_AS_FORMAT(bgp->asnotation),
+ (struct prefix_rd *)bgp_dest_get_prefix(
+ dest->pdest));
+ vty_out(vty, " %s flags 0x%x\n",
+ bgp_path->name_pretty, path->flags);
+ } else
+ vty_out(vty, " %d/%d %pBD %s flags 0x%x\n",
+ afi, safi, dest, bgp_path->name_pretty,
+ path->flags);
+ }
+ }
+}
+
+DEFPY(show_bgp_nexthop_label, show_bgp_nexthop_label_cmd,
+ "show bgp [<view|vrf> VIEWVRFNAME] label-nexthop [detail]",
+ SHOW_STR BGP_STR BGP_INSTANCE_HELP_STR
+ "BGP label per-nexthop table\n"
+ "Show detailed information\n")
+{
+ int idx = 0;
+ char *vrf = NULL;
+ struct bgp *bgp;
+ bool detail = false;
+ int afi;
+
+ if (argv_find(argv, argc, "vrf", &idx)) {
+ vrf = argv[++idx]->arg;
+ bgp = bgp_lookup_by_name(vrf);
+ } else
+ bgp = bgp_get_default();
+
+ if (!bgp)
+ return CMD_SUCCESS;
+
+ if (argv_find(argv, argc, "detail", &idx))
+ detail = true;
+
+ for (afi = AFI_IP; afi <= AFI_IP6; afi++)
+ show_bgp_nexthop_label_afi(vty, afi, bgp, detail);
+ return CMD_SUCCESS;
+}
+
#if BGP_LABELPOOL_ENABLE_TESTS
/*------------------------------------------------------------------------
* Testing code start
install_element(ENABLE_NODE, &clear_labelpool_perf_test_cmd);
#endif /* BGP_LABELPOOL_ENABLE_TESTS */
}
+
+DEFINE_MTYPE_STATIC(BGPD, LABEL_PER_NEXTHOP_CACHE,
+ "BGP Label Per Nexthop entry");
+
+/* The nexthops values are compared to
+ * find in the tree the appropriate cache entry
+ */
+int bgp_label_per_nexthop_cache_cmp(const struct bgp_label_per_nexthop_cache *a,
+ const struct bgp_label_per_nexthop_cache *b)
+{
+ return prefix_cmp(&a->nexthop, &b->nexthop);
+}
+
+struct bgp_label_per_nexthop_cache *
+bgp_label_per_nexthop_new(struct bgp_label_per_nexthop_cache_head *tree,
+ struct prefix *nexthop)
+{
+ struct bgp_label_per_nexthop_cache *blnc;
+
+ blnc = XCALLOC(MTYPE_LABEL_PER_NEXTHOP_CACHE,
+ sizeof(struct bgp_label_per_nexthop_cache));
+ blnc->tree = tree;
+ blnc->label = MPLS_INVALID_LABEL;
+ prefix_copy(&blnc->nexthop, nexthop);
+ LIST_INIT(&(blnc->paths));
+ bgp_label_per_nexthop_cache_add(tree, blnc);
+
+ return blnc;
+}
+
+struct bgp_label_per_nexthop_cache *
+bgp_label_per_nexthop_find(struct bgp_label_per_nexthop_cache_head *tree,
+ struct prefix *nexthop)
+{
+ struct bgp_label_per_nexthop_cache blnc = {};
+
+ if (!tree)
+ return NULL;
+
+ memcpy(&blnc.nexthop, nexthop, sizeof(struct prefix));
+ return bgp_label_per_nexthop_cache_find(tree, &blnc);
+}
+
+void bgp_label_per_nexthop_free(struct bgp_label_per_nexthop_cache *blnc)
+{
+ if (blnc->label != MPLS_INVALID_LABEL) {
+ bgp_zebra_send_nexthop_label(ZEBRA_MPLS_LABELS_DELETE,
+ blnc->label, blnc->nh->ifindex,
+ blnc->nh->vrf_id, ZEBRA_LSP_BGP,
+ &blnc->nexthop);
+ bgp_lp_release(LP_TYPE_NEXTHOP, blnc, blnc->label);
+ }
+ bgp_label_per_nexthop_cache_del(blnc->tree, blnc);
+ if (blnc->nh)
+ nexthop_free(blnc->nh);
+ blnc->nh = NULL;
+ XFREE(MTYPE_LABEL_PER_NEXTHOP_CACHE, blnc);
+}
+
+void bgp_label_per_nexthop_init(void)
+{
+ install_element(VIEW_NODE, &show_bgp_nexthop_label_cmd);
+}
*/
#define LP_TYPE_VRF 0x00000001
#define LP_TYPE_BGP_LU 0x00000002
+#define LP_TYPE_NEXTHOP 0x00000003
PREDECL_LIST(lp_fifo);
extern void bgp_lp_event_zebra_up(void);
extern void bgp_lp_vty_init(void);
+struct bgp_label_per_nexthop_cache;
+PREDECL_RBTREE_UNIQ(bgp_label_per_nexthop_cache);
+
+extern int
+bgp_label_per_nexthop_cache_cmp(const struct bgp_label_per_nexthop_cache *a,
+ const struct bgp_label_per_nexthop_cache *b);
+
+struct bgp_label_per_nexthop_cache {
+
+ /* RB-tree entry. */
+ struct bgp_label_per_nexthop_cache_item entry;
+
+ /* the nexthop is the key of the list */
+ struct prefix nexthop;
+
+ /* calculated label */
+ mpls_label_t label;
+
+ /* number of path_vrfs */
+ unsigned int path_count;
+
+ /* back pointer to bgp instance */
+ struct bgp *to_bgp;
+
+ /* copy a nexthop resolution from bgp nexthop tracking
+ * used to extract the interface nexthop
+ */
+ struct nexthop *nh;
+
+ /* list of path_vrfs using it */
+ LIST_HEAD(path_lists, bgp_path_info) paths;
+
+ time_t last_update;
+
+ /* Back pointer to the cache tree this entry belongs to. */
+ struct bgp_label_per_nexthop_cache_head *tree;
+};
+
+DECLARE_RBTREE_UNIQ(bgp_label_per_nexthop_cache,
+ struct bgp_label_per_nexthop_cache, entry,
+ bgp_label_per_nexthop_cache_cmp);
+
+void bgp_label_per_nexthop_free(struct bgp_label_per_nexthop_cache *blnc);
+
+struct bgp_label_per_nexthop_cache *
+bgp_label_per_nexthop_new(struct bgp_label_per_nexthop_cache_head *tree,
+ struct prefix *nexthop);
+struct bgp_label_per_nexthop_cache *
+bgp_label_per_nexthop_find(struct bgp_label_per_nexthop_cache_head *tree,
+ struct prefix *nexthop);
+void bgp_label_per_nexthop_init(void);
#endif /* _FRR_BGP_LABELPOOL_H */
/*
* Routes that are redistributed into BGP from zebra do not get
- * nexthop tracking. However, if those routes are subsequently
- * imported to other RIBs within BGP, the leaked routes do not
- * carry the original BGP_ROUTE_REDISTRIBUTE sub_type. Therefore,
- * in order to determine if the route we are currently leaking
- * should have nexthop tracking, we must find the ultimate
- * parent so we can check its sub_type.
+ * nexthop tracking, unless MPLS allocation per nexthop is
+ * performed. In the default case nexthop tracking does not apply,
+ * if those routes are subsequently imported to other RIBs within
+ * BGP, the leaked routes do not carry the original
+ * BGP_ROUTE_REDISTRIBUTE sub_type. Therefore, in order to determine
+ * if the route we are currently leaking should have nexthop
+ * tracking, we must find the ultimate parent so we can check its
+ * sub_type.
*
* As of now, source_bpi may at most be a second-generation route
* (only one hop back to ultimate parent for vrf-vpn-vrf scheme).
return new;
}
+void bgp_mplsvpn_path_nh_label_unlink(struct bgp_path_info *pi)
+{
+ struct bgp_label_per_nexthop_cache *blnc;
+
+ if (!pi)
+ return;
+
+ blnc = pi->label_nexthop_cache;
+
+ if (!blnc)
+ return;
+
+ LIST_REMOVE(pi, label_nh_thread);
+ pi->label_nexthop_cache->path_count--;
+ pi->label_nexthop_cache = NULL;
+
+ if (LIST_EMPTY(&(blnc->paths)))
+ bgp_label_per_nexthop_free(blnc);
+}
+
+/* Called upon reception of a ZAPI Message from zebra, about
+ * a new available label.
+ */
+static int bgp_mplsvpn_get_label_per_nexthop_cb(mpls_label_t label,
+ void *context, bool allocated)
+{
+ struct bgp_label_per_nexthop_cache *blnc = context;
+ mpls_label_t old_label;
+ int debug = BGP_DEBUG(vpn, VPN_LEAK_LABEL);
+ struct bgp_path_info *pi;
+ struct bgp_table *table;
+
+ old_label = blnc->label;
+
+ if (debug)
+ zlog_debug("%s: label=%u, allocated=%d, nexthop=%pFX", __func__,
+ label, allocated, &blnc->nexthop);
+ if (allocated)
+ /* update the entry with the new label */
+ blnc->label = label;
+ else
+ /*
+ * previously-allocated label is now invalid
+ * eg: zebra deallocated the labels and notifies it
+ */
+ blnc->label = MPLS_INVALID_LABEL;
+
+ if (old_label == blnc->label)
+ return 0; /* no change */
+
+ /* update paths */
+ if (blnc->label != MPLS_INVALID_LABEL)
+ bgp_zebra_send_nexthop_label(
+ ZEBRA_MPLS_LABELS_ADD, blnc->label, blnc->nh->ifindex,
+ blnc->nh->vrf_id, ZEBRA_LSP_BGP, &blnc->nexthop);
+
+ LIST_FOREACH (pi, &(blnc->paths), label_nh_thread) {
+ if (!pi->net)
+ continue;
+ table = bgp_dest_table(pi->net);
+ if (!table)
+ continue;
+ vpn_leak_from_vrf_update(blnc->to_bgp, table->bgp, pi);
+ }
+
+ return 0;
+}
+
+/* Get a per label nexthop value:
+ * - Find and return a per label nexthop from the cache
+ * - else allocate a new per label nexthop cache entry and request a
+ * label to zebra. Return MPLS_INVALID_LABEL
+ */
+static mpls_label_t _vpn_leak_from_vrf_get_per_nexthop_label(
+ struct bgp_path_info *pi, struct bgp *to_bgp, struct bgp *from_bgp,
+ afi_t afi, safi_t safi)
+{
+ struct bgp_nexthop_cache *bnc = pi->nexthop;
+ struct bgp_label_per_nexthop_cache *blnc;
+ struct bgp_label_per_nexthop_cache_head *tree;
+ struct prefix *nh_pfx = NULL;
+ struct prefix nh_gate = {0};
+
+ /* extract the nexthop from the BNC nexthop cache */
+ switch (bnc->nexthop->type) {
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ /* the nexthop is recursive */
+ nh_gate.family = AF_INET;
+ nh_gate.prefixlen = IPV4_MAX_BITLEN;
+ IPV4_ADDR_COPY(&nh_gate.u.prefix4, &bnc->nexthop->gate.ipv4);
+ nh_pfx = &nh_gate;
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ /* the nexthop is recursive */
+ nh_gate.family = AF_INET6;
+ nh_gate.prefixlen = IPV6_MAX_BITLEN;
+ IPV6_ADDR_COPY(&nh_gate.u.prefix6, &bnc->nexthop->gate.ipv6);
+ nh_pfx = &nh_gate;
+ break;
+ case NEXTHOP_TYPE_IFINDEX:
+ /* the nexthop is direcly connected */
+ nh_pfx = &bnc->prefix;
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ assert(!"Blackhole nexthop. Already checked by the caller.");
+ }
+
+ /* find or allocate a nexthop label cache entry */
+ tree = &from_bgp->mpls_labels_per_nexthop[family2afi(nh_pfx->family)];
+ blnc = bgp_label_per_nexthop_find(tree, nh_pfx);
+ if (!blnc) {
+ blnc = bgp_label_per_nexthop_new(tree, nh_pfx);
+ blnc->to_bgp = to_bgp;
+ /* request a label to zebra for this nexthop
+ * the response from zebra will trigger the callback
+ */
+ bgp_lp_get(LP_TYPE_NEXTHOP, blnc,
+ bgp_mplsvpn_get_label_per_nexthop_cb);
+ }
+
+ if (pi->label_nexthop_cache == blnc)
+ /* no change */
+ return blnc->label;
+
+ /* Unlink from any existing nexthop cache. Free the entry if unused.
+ */
+ bgp_mplsvpn_path_nh_label_unlink(pi);
+ if (blnc) {
+ /* updates NHT pi list reference */
+ LIST_INSERT_HEAD(&(blnc->paths), pi, label_nh_thread);
+ pi->label_nexthop_cache = blnc;
+ pi->label_nexthop_cache->path_count++;
+ blnc->last_update = monotime(NULL);
+ }
+
+ /* then add or update the selected nexthop */
+ if (!blnc->nh)
+ blnc->nh = nexthop_dup(bnc->nexthop, NULL);
+ else if (!nexthop_same(bnc->nexthop, blnc->nh)) {
+ nexthop_free(blnc->nh);
+ blnc->nh = nexthop_dup(bnc->nexthop, NULL);
+ if (blnc->label != MPLS_INVALID_LABEL) {
+ bgp_zebra_send_nexthop_label(
+ ZEBRA_MPLS_LABELS_REPLACE, blnc->label,
+ bnc->nexthop->ifindex, bnc->nexthop->vrf_id,
+ ZEBRA_LSP_BGP, &blnc->nexthop);
+ }
+ }
+
+ return blnc->label;
+}
+
+/* Filter out all the cases where a per nexthop label is not possible:
+ * - return an invalid label when the nexthop is invalid
+ * - return the per VRF label when the per nexthop label is not supported
+ * Otherwise, find or request a per label nexthop.
+ */
+static mpls_label_t vpn_leak_from_vrf_get_per_nexthop_label(
+ afi_t afi, safi_t safi, struct bgp_path_info *pi, struct bgp *from_bgp,
+ struct bgp *to_bgp)
+{
+ struct bgp_path_info *bpi_ultimate = bgp_get_imported_bpi_ultimate(pi);
+ struct bgp *bgp_nexthop = NULL;
+ bool nh_valid;
+ afi_t nh_afi;
+ bool is_bgp_static_route;
+
+ is_bgp_static_route = bpi_ultimate->sub_type == BGP_ROUTE_STATIC &&
+ bpi_ultimate->type == ZEBRA_ROUTE_BGP;
+
+ if (is_bgp_static_route == false && afi == AFI_IP &&
+ CHECK_FLAG(pi->attr->flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)) &&
+ (pi->attr->nexthop.s_addr == INADDR_ANY ||
+ !ipv4_unicast_valid(&pi->attr->nexthop))) {
+ /* IPv4 nexthop in standard BGP encoding format.
+ * Format of address is not valid (not any, not unicast).
+ * Fallback to the per VRF label.
+ */
+ bgp_mplsvpn_path_nh_label_unlink(pi);
+ return from_bgp->vpn_policy[afi].tovpn_label;
+ }
+
+ if (is_bgp_static_route == false && afi == AFI_IP &&
+ pi->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV4 &&
+ (pi->attr->mp_nexthop_global_in.s_addr == INADDR_ANY ||
+ !ipv4_unicast_valid(&pi->attr->mp_nexthop_global_in))) {
+ /* IPv4 nexthop is in MP-BGP encoding format.
+ * Format of address is not valid (not any, not unicast).
+ * Fallback to the per VRF label.
+ */
+ bgp_mplsvpn_path_nh_label_unlink(pi);
+ return from_bgp->vpn_policy[afi].tovpn_label;
+ }
+
+ if (is_bgp_static_route == false && afi == AFI_IP6 &&
+ (pi->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL ||
+ pi->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) &&
+ (IN6_IS_ADDR_UNSPECIFIED(&pi->attr->mp_nexthop_global) ||
+ IN6_IS_ADDR_LOOPBACK(&pi->attr->mp_nexthop_global) ||
+ IN6_IS_ADDR_MULTICAST(&pi->attr->mp_nexthop_global))) {
+ /* IPv6 nexthop is in MP-BGP encoding format.
+ * Format of address is not valid
+ * Fallback to the per VRF label.
+ */
+ bgp_mplsvpn_path_nh_label_unlink(pi);
+ return from_bgp->vpn_policy[afi].tovpn_label;
+ }
+
+ /* Check the next-hop reachability.
+ * Get the bgp instance where the bgp_path_info originates.
+ */
+ if (pi->extra && pi->extra->bgp_orig)
+ bgp_nexthop = pi->extra->bgp_orig;
+ else
+ bgp_nexthop = from_bgp;
+
+ nh_afi = BGP_ATTR_NH_AFI(afi, pi->attr);
+ nh_valid = bgp_find_or_add_nexthop(from_bgp, bgp_nexthop, nh_afi, safi,
+ pi, NULL, 0, NULL);
+
+ if (!nh_valid && is_bgp_static_route &&
+ !CHECK_FLAG(from_bgp->flags, BGP_FLAG_IMPORT_CHECK)) {
+ /* "network" prefixes not routable, but since 'no bgp network
+ * import-check' is configured, they are always valid in the BGP
+ * table. Fallback to the per-vrf label
+ */
+ bgp_mplsvpn_path_nh_label_unlink(pi);
+ return from_bgp->vpn_policy[afi].tovpn_label;
+ }
+
+ if (!nh_valid || !pi->nexthop || pi->nexthop->nexthop_num == 0 ||
+ !pi->nexthop->nexthop) {
+ /* invalid next-hop:
+ * do not send the per-vrf label
+ * otherwise, when the next-hop becomes valid,
+ * we will have 2 BGP updates:
+ * - one with the per-vrf label
+ * - the second with the per-nexthop label
+ */
+ bgp_mplsvpn_path_nh_label_unlink(pi);
+ return MPLS_INVALID_LABEL;
+ }
+
+ if (pi->nexthop->nexthop_num > 1 ||
+ pi->nexthop->nexthop->type == NEXTHOP_TYPE_BLACKHOLE) {
+ /* Blackhole or ECMP routes
+ * is not compatible with per-nexthop label.
+ * Fallback to per-vrf label.
+ */
+ bgp_mplsvpn_path_nh_label_unlink(pi);
+ return from_bgp->vpn_policy[afi].tovpn_label;
+ }
+
+ return _vpn_leak_from_vrf_get_per_nexthop_label(pi, to_bgp, from_bgp,
+ afi, safi);
+}
+
/* cf vnc_import_bgp_add_route_mode_nvegroup() and add_vnc_route() */
void vpn_leak_from_vrf_update(struct bgp *to_bgp, /* to */
struct bgp *from_bgp, /* from */
nexthop_self_flag = 1;
}
- label_val = from_bgp->vpn_policy[afi].tovpn_label;
- if (label_val == MPLS_LABEL_NONE) {
+ if (CHECK_FLAG(from_bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP))
+ /* per nexthop label mode */
+ label_val = vpn_leak_from_vrf_get_per_nexthop_label(
+ afi, safi, path_vrf, from_bgp, to_bgp);
+ else
+ /* per VRF label mode */
+ label_val = from_bgp->vpn_policy[afi].tovpn_label;
+
+ if (label_val == MPLS_INVALID_LABEL &&
+ CHECK_FLAG(from_bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP)) {
+ /* no valid label for the moment
+ * when the 'bgp_mplsvpn_get_label_per_nexthop_cb' callback gets
+ * a valid label value, it will call the current function again.
+ */
+ if (debug)
+ zlog_debug(
+ "%s: %s skipping: waiting for a valid per-label nexthop.",
+ __func__, from_bgp->name_pretty);
+ return;
+ }
+ if (label_val == MPLS_LABEL_NONE)
encode_label(MPLS_LABEL_IMPLICIT_NULL, &label);
- } else {
+ else
encode_label(label_val, &label);
- }
/* Set originator ID to "me" */
SET_FLAG(static_attr.flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID));
bpi, afi, safi);
bgp_path_info_delete(bn, bpi);
bgp_process(to_bgp, bn, afi, safi);
+ bgp_mplsvpn_path_nh_label_unlink(
+ bpi->extra->parent);
}
}
}
#define BGP_PREFIX_SID_SRV6_MAX_FUNCTION_LENGTH 20
extern void bgp_mplsvpn_init(void);
+extern void bgp_mplsvpn_path_nh_label_unlink(struct bgp_path_info *pi);
extern int bgp_nlri_parse_vpn(struct peer *, struct attr *, struct bgp_nlri *);
extern uint32_t decode_label(mpls_label_t *);
extern void encode_label(mpls_label_t, mpls_label_t *);
#include "bgpd/bgp_fsm.h"
#include "bgpd/bgp_vty.h"
#include "bgpd/bgp_rd.h"
+#include "bgpd/bgp_mplsvpn.h"
DEFINE_MTYPE_STATIC(BGPD, MARTIAN_STRING, "BGP Martian Addr Intf String");
while (!LIST_EMPTY(&(bnc->paths))) {
struct bgp_path_info *path = LIST_FIRST(&(bnc->paths));
+ bgp_mplsvpn_path_nh_label_unlink(path);
+
path_nh_map(path, bnc, false);
}
#include "bgpd/bgp_flowspec_util.h"
#include "bgpd/bgp_evpn.h"
#include "bgpd/bgp_rd.h"
+#include "bgpd/bgp_mplsvpn.h"
extern struct zclient *zclient;
{
struct bgp_nexthop_cache *bnc = path->nexthop;
+ bgp_mplsvpn_path_nh_label_unlink(path);
+
if (!bnc)
return;
}
LIST_FOREACH (path, &(bnc->paths), nh_thread) {
- if (!(path->type == ZEBRA_ROUTE_BGP
- && ((path->sub_type == BGP_ROUTE_NORMAL)
- || (path->sub_type == BGP_ROUTE_STATIC)
- || (path->sub_type == BGP_ROUTE_IMPORTED))))
+ if (path->type == ZEBRA_ROUTE_BGP &&
+ (path->sub_type == BGP_ROUTE_NORMAL ||
+ path->sub_type == BGP_ROUTE_STATIC ||
+ path->sub_type == BGP_ROUTE_IMPORTED))
+ /* evaluate the path */
+ ;
+ else if (path->sub_type == BGP_ROUTE_REDISTRIBUTE) {
+ /* evaluate the path for redistributed routes
+ * except those from VNC
+ */
+ if ((path->type == ZEBRA_ROUTE_VNC) ||
+ (path->type == ZEBRA_ROUTE_VNC_DIRECT))
+ continue;
+ } else
+ /* don't evaluate the path */
continue;
dest = path->net;
SET_FLAG(path->flags, BGP_PATH_IGP_CHANGED);
path_valid = CHECK_FLAG(path->flags, BGP_PATH_VALID);
- if (path_valid != bnc_is_valid_nexthop) {
+ if (path->type == ZEBRA_ROUTE_BGP &&
+ path->sub_type == BGP_ROUTE_STATIC &&
+ !CHECK_FLAG(bgp_path->flags, BGP_FLAG_IMPORT_CHECK))
+ /* static routes with 'no bgp network import-check' are
+ * always valid. if nht is called with static routes,
+ * the vpn exportation needs to be triggered
+ */
+ vpn_leak_from_vrf_update(bgp_get_default(), bgp_path,
+ path);
+ else if (path->sub_type == BGP_ROUTE_REDISTRIBUTE &&
+ safi == SAFI_UNICAST &&
+ (bgp_path->inst_type == BGP_INSTANCE_TYPE_VRF ||
+ bgp_path->inst_type == BGP_INSTANCE_TYPE_DEFAULT))
+ /* redistribute routes are always valid
+ * if nht is called with redistribute routes, the vpn
+ * exportation needs to be triggered
+ */
+ vpn_leak_from_vrf_update(bgp_get_default(), bgp_path,
+ path);
+ else if (path_valid != bnc_is_valid_nexthop) {
if (path_valid) {
/* No longer valid, clear flag; also for EVPN
* routes, unimport from VRFs if needed.
bgp_evpn_is_prefix_nht_supported(bgp_dest_get_prefix(dest)))
bgp_evpn_unimport_route(bgp_path,
afi, safi, bgp_dest_get_prefix(dest), path);
+ if (safi == SAFI_UNICAST &&
+ (bgp_path->inst_type !=
+ BGP_INSTANCE_TYPE_VIEW))
+ vpn_leak_from_vrf_withdraw(
+ bgp_get_default(), bgp_path,
+ path);
} else {
/* Path becomes valid, set flag; also for EVPN
* routes, import from VRFs if needed.
bgp_evpn_is_prefix_nht_supported(bgp_dest_get_prefix(dest)))
bgp_evpn_import_route(bgp_path,
afi, safi, bgp_dest_get_prefix(dest), path);
+ if (safi == SAFI_UNICAST &&
+ (bgp_path->inst_type !=
+ BGP_INSTANCE_TYPE_VIEW))
+ vpn_leak_from_vrf_update(
+ bgp_get_default(), bgp_path,
+ path);
}
}
*/
assert(attr.aspath);
+ if (p->family == AF_INET6)
+ UNSET_FLAG(attr.flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP));
+
switch (nhtype) {
case NEXTHOP_TYPE_IFINDEX:
switch (p->family) {
case AF_INET:
attr.nexthop.s_addr = INADDR_ANY;
attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
+ attr.mp_nexthop_global_in.s_addr = INADDR_ANY;
break;
case AF_INET6:
memset(&attr.mp_nexthop_global, 0,
case NEXTHOP_TYPE_IPV4_IFINDEX:
attr.nexthop = nexthop->ipv4;
attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
+ attr.mp_nexthop_global_in = nexthop->ipv4;
break;
case NEXTHOP_TYPE_IPV6:
case NEXTHOP_TYPE_IPV6_IFINDEX:
case AF_INET:
attr.nexthop.s_addr = INADDR_ANY;
attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
+ attr.mp_nexthop_global_in.s_addr = INADDR_ANY;
break;
case AF_INET6:
memset(&attr.mp_nexthop_global, 0,
/* Addpath identifiers */
uint32_t addpath_rx_id;
struct bgp_addpath_info_data tx_addpath;
+
+ /* For nexthop per label linked list */
+ LIST_ENTRY(bgp_path_info) label_nh_thread;
+
+ /* Back pointer to the bgp label per nexthop structure */
+ struct bgp_label_per_nexthop_cache *label_nexthop_cache;
};
/* Structure used in BGP path selection */
route_match_ip_next_hop_type_free
};
+/* `match source-protocol` */
+static enum route_map_cmd_result_t
+route_match_source_protocol(void *rule, const struct prefix *prefix,
+ void *object)
+{
+ struct bgp_path_info *path = object;
+ int *protocol = rule;
+
+ if (!path)
+ return RMAP_NOMATCH;
+
+ if (path->type == *protocol)
+ return RMAP_MATCH;
+
+ return RMAP_NOMATCH;
+}
+
+static void *route_match_source_protocol_compile(const char *arg)
+{
+ int *protocol;
+
+ protocol = XMALLOC(MTYPE_ROUTE_MAP_COMPILED, sizeof(*protocol));
+ *protocol = proto_name2num(arg);
+
+ return protocol;
+}
+
+static void route_match_source_protocol_free(void *rule)
+{
+ XFREE(MTYPE_ROUTE_MAP_COMPILED, rule);
+}
+
+static const struct route_map_rule_cmd route_match_source_protocol_cmd = {
+ "source-protocol",
+ route_match_source_protocol,
+ route_match_source_protocol_compile,
+ route_match_source_protocol_free
+};
+
+
/* `match ip route-source prefix-list PREFIX_LIST' */
static enum route_map_cmd_result_t
return nb_cli_apply_changes(vty, NULL);
}
+DEFPY_YANG (match_source_protocol,
+ match_source_protocol_cmd,
+ "match source-protocol " FRR_REDIST_STR_ZEBRA "$proto",
+ MATCH_STR
+ "Match protocol via which the route was learnt\n"
+ FRR_REDIST_HELP_STR_ZEBRA)
+{
+ const char *xpath =
+ "./match-condition[condition='frr-bgp-route-map:source-protocol']";
+ char xpath_value[XPATH_MAXLEN];
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
+ snprintf(xpath_value, sizeof(xpath_value),
+ "%s/rmap-match-condition/frr-bgp-route-map:source-protocol",
+ xpath);
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, proto);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY_YANG (no_match_source_protocol,
+ no_match_source_protocol_cmd,
+ "no match source-protocol [" FRR_REDIST_STR_ZEBRA "]",
+ NO_STR
+ MATCH_STR
+ "Match protocol via which the route was learnt\n"
+ FRR_REDIST_HELP_STR_ZEBRA)
+{
+ const char *xpath =
+ "./match-condition[condition='frr-bgp-route-map:source-protocol']";
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
/* Initialization of route map. */
void bgp_route_map_init(void)
{
route_map_install_match(&route_match_ip_address_prefix_list_cmd);
route_map_install_match(&route_match_ip_next_hop_prefix_list_cmd);
route_map_install_match(&route_match_ip_next_hop_type_cmd);
+ route_map_install_match(&route_match_source_protocol_cmd);
route_map_install_match(&route_match_ip_route_source_prefix_list_cmd);
route_map_install_match(&route_match_aspath_cmd);
route_map_install_match(&route_match_community_cmd);
install_element(RMAP_NODE, &set_ipv6_nexthop_peer_cmd);
install_element(RMAP_NODE, &no_set_ipv6_nexthop_peer_cmd);
install_element(RMAP_NODE, &match_rpki_extcommunity_cmd);
+ install_element(RMAP_NODE, &match_source_protocol_cmd);
+ install_element(RMAP_NODE, &no_match_source_protocol_cmd);
#ifdef HAVE_SCRIPTING
install_element(RMAP_NODE, &match_script_cmd);
#endif
.destroy = lib_route_map_entry_match_condition_rmap_match_condition_source_vrf_destroy,
}
},
+ {
+ .xpath = "/frr-route-map:lib/route-map/entry/match-condition/rmap-match-condition/frr-bgp-route-map:source-protocol",
+ .cbs = {
+ .modify = lib_route_map_entry_match_condition_rmap_match_condition_source_protocol_modify,
+ .destroy = lib_route_map_entry_match_condition_rmap_match_condition_source_protocol_destroy,
+ }
+ },
{
.xpath = "/frr-route-map:lib/route-map/entry/match-condition/rmap-match-condition/frr-bgp-route-map:peer-ipv4-address",
.cbs = {
struct nb_cb_modify_args *args);
int lib_route_map_entry_match_condition_rmap_match_condition_rpki_extcommunity_destroy(
struct nb_cb_destroy_args *args);
+int lib_route_map_entry_match_condition_rmap_match_condition_source_protocol_modify(
+ struct nb_cb_modify_args *args);
+int lib_route_map_entry_match_condition_rmap_match_condition_source_protocol_destroy(
+ struct nb_cb_destroy_args *args);
int lib_route_map_entry_match_condition_rmap_match_condition_probability_modify(struct nb_cb_modify_args *args);
int lib_route_map_entry_match_condition_rmap_match_condition_probability_destroy(struct nb_cb_destroy_args *args);
int lib_route_map_entry_match_condition_rmap_match_condition_source_vrf_modify(struct nb_cb_modify_args *args);
return NB_OK;
}
+/*
+ * XPath:
+ * /frr-route-map:lib/route-map/entry/match-condition/rmap-match-condition/frr-bgp-route-map:source-protocol
+ */
+int lib_route_map_entry_match_condition_rmap_match_condition_source_protocol_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct routemap_hook_context *rhc;
+ enum rmap_compile_rets ret;
+ const char *proto;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ /* Add configuration. */
+ rhc = nb_running_get_entry(args->dnode, NULL, true);
+ proto = yang_dnode_get_string(args->dnode, NULL);
+
+ /* Set destroy information. */
+ rhc->rhc_mhook = bgp_route_match_delete;
+ rhc->rhc_rule = "source-protocol";
+ rhc->rhc_event = RMAP_EVENT_MATCH_DELETED;
+
+ ret = bgp_route_match_add(rhc->rhc_rmi, "source-protocol",
+ proto, RMAP_EVENT_MATCH_ADDED,
+ args->errmsg, args->errmsg_len);
+
+ if (ret != RMAP_COMPILE_SUCCESS) {
+ rhc->rhc_mhook = NULL;
+ return NB_ERR_INCONSISTENCY;
+ }
+ }
+
+ return NB_OK;
+}
+
+int lib_route_map_entry_match_condition_rmap_match_condition_source_protocol_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return lib_route_map_entry_match_destroy(args);
+ }
+
+ return NB_OK;
+}
+
/*
* XPath:
* /frr-route-map:lib/route-map/entry/match-condition/rmap-match-condition/frr-bgp-route-map:rpki-extcommunity
"Between current address-family and vpn\n"
"For routes leaked from current address-family to vpn\n")
+DEFPY(af_label_vpn_export_allocation_mode,
+ af_label_vpn_export_allocation_mode_cmd,
+ "[no$no] label vpn export allocation-mode <per-vrf$label_per_vrf|per-nexthop$label_per_nh>",
+ NO_STR
+ "label value for VRF\n"
+ "Between current address-family and vpn\n"
+ "For routes leaked from current address-family to vpn\n"
+ "Label allocation mode\n"
+ "Allocate one label for all BGP updates of the VRF\n"
+ "Allocate a label per connected next-hop in the VRF\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ afi_t afi;
+ bool old_per_nexthop, new_per_nexthop;
+
+ afi = vpn_policy_getafi(vty, bgp, false);
+
+ old_per_nexthop = !!CHECK_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP);
+ if (no) {
+ if (old_per_nexthop == false && label_per_nh)
+ return CMD_ERR_NO_MATCH;
+ if (old_per_nexthop == true && label_per_vrf)
+ return CMD_ERR_NO_MATCH;
+ new_per_nexthop = false;
+ } else {
+ if (label_per_nh)
+ new_per_nexthop = true;
+ else
+ new_per_nexthop = false;
+ }
+
+ /* no change */
+ if (old_per_nexthop == new_per_nexthop)
+ return CMD_SUCCESS;
+
+ /*
+ * pre-change: un-export vpn routes (vpn->vrf routes unaffected)
+ */
+ vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, afi, bgp_get_default(),
+ bgp);
+
+ if (new_per_nexthop)
+ SET_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP);
+ else
+ UNSET_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP);
+
+ /* post-change: re-export vpn routes */
+ vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, afi, bgp_get_default(),
+ bgp);
+
+ hook_call(bgp_snmp_update_last_changed, bgp);
+ return CMD_SUCCESS;
+}
+
DEFPY (af_label_vpn_export,
af_label_vpn_export_cmd,
"[no] label vpn export <(0-1048575)$label_val|auto$label_auto>",
}
}
+ if (CHECK_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP))
+ vty_out(vty,
+ "%*slabel vpn export allocation-mode per-nexthop\n",
+ indent, "");
+
tovpn_sid_index = bgp->vpn_policy[afi].tovpn_sid_index;
if (CHECK_FLAG(bgp->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_SID_AUTO)) {
install_element(BGP_IPV6_NODE, &af_rd_vpn_export_cmd);
install_element(BGP_IPV4_NODE, &af_label_vpn_export_cmd);
install_element(BGP_IPV6_NODE, &af_label_vpn_export_cmd);
+ install_element(BGP_IPV4_NODE,
+ &af_label_vpn_export_allocation_mode_cmd);
+ install_element(BGP_IPV6_NODE,
+ &af_label_vpn_export_allocation_mode_cmd);
install_element(BGP_IPV4_NODE, &af_nexthop_vpn_export_cmd);
install_element(BGP_IPV6_NODE, &af_nexthop_vpn_export_cmd);
install_element(BGP_IPV4_NODE, &af_rt_vpn_imexport_cmd);
{
return srv6_manager_release_locator_chunk(zclient, name);
}
+
+void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label,
+ ifindex_t ifindex, vrf_id_t vrf_id,
+ enum lsp_types_t ltype, struct prefix *p)
+{
+ struct zapi_labels zl = {};
+ struct zapi_nexthop *znh;
+
+ zl.type = ltype;
+ zl.local_label = label;
+ zl.nexthop_num = 1;
+ znh = &zl.nexthops[0];
+ if (p->family == AF_INET)
+ IPV4_ADDR_COPY(&znh->gate.ipv4, &p->u.prefix4);
+ else
+ IPV6_ADDR_COPY(&znh->gate.ipv6, &p->u.prefix6);
+ if (ifindex == IFINDEX_INTERNAL)
+ znh->type = (p->family == AF_INET) ? NEXTHOP_TYPE_IPV4
+ : NEXTHOP_TYPE_IPV6;
+ else
+ znh->type = (p->family == AF_INET) ? NEXTHOP_TYPE_IPV4_IFINDEX
+ : NEXTHOP_TYPE_IPV6_IFINDEX;
+ znh->ifindex = ifindex;
+ znh->vrf_id = vrf_id;
+ znh->label_num = 0;
+
+ /* vrf_id is DEFAULT_VRF */
+ zebra_send_mpls_labels(zclient, cmd, &zl);
+}
extern int bgp_zebra_stale_timer_update(struct bgp *bgp);
extern int bgp_zebra_srv6_manager_get_locator_chunk(const char *name);
extern int bgp_zebra_srv6_manager_release_locator_chunk(const char *name);
+extern void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label,
+ ifindex_t index, vrf_id_t vrfid,
+ enum lsp_types_t ltype,
+ struct prefix *p);
#endif /* _QUAGGA_BGP_ZEBRA_H */
SET_FLAG(bgp->af_flags[afi][SAFI_MPLS_VPN],
BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL);
}
+
+ for (afi = AFI_IP; afi < AFI_MAX; afi++)
+ bgp_label_per_nexthop_cache_init(
+ &bgp->mpls_labels_per_nexthop[afi]);
+
if (name)
bgp->name = XSTRDUP(MTYPE_BGP, name);
bgp_lp_vty_init();
+ bgp_label_per_nexthop_init();
+
cmd_variable_handler_register(bgp_viewvrf_var_handlers);
}
#define BGP_VPN_POLICY_TOVPN_RD_SET (1 << 1)
#define BGP_VPN_POLICY_TOVPN_NEXTHOP_SET (1 << 2)
#define BGP_VPN_POLICY_TOVPN_SID_AUTO (1 << 3)
+#define BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP (1 << 4)
/*
* If we are importing another vrf into us keep a list of
/* Allocate MPLS labels */
uint8_t allocate_mpls_labels[AFI_MAX][SAFI_MAX];
+ /* Tree for next-hop lookup cache. */
+ struct bgp_label_per_nexthop_cache_head
+ mpls_labels_per_nexthop[AFI_MAX];
+
/* Allocate hash entries to store policy routing information
* The hash are used to host pbr rules somewhere.
* Actually, pbr will only be used by flowspec
extended community values as described in
:ref:`bgp-extended-communities-attribute`.
+.. clicmd:: label vpn export allocation-mode per-vrf|per-nexthop
+
+ Select how labels are allocated in the given VRF. By default, the `per-vrf`
+ mode is selected, and one label is used for all prefixes from the VRF. The
+ `per-nexthop` will use a unique label for all prefixes that are reachable
+ via the same nexthop.
+
.. clicmd:: label vpn export (0..1048575)|auto
Enables an MPLS label to be attached to a route exported from the current
If `poisoned-reverse` is also set, the router sends the poisoned routes
with highest metric back to the sending router.
+.. clicmd:: allow-ecmp [1-MULTIPATH_NUM]
+
+ Control how many ECMP paths RIP can inject for the same prefix. If specified
+ without a number, a maximum is taken (compiled with ``--enable-multipath``).
+
.. _rip-version-control:
RIP Version Control
.. clicmd:: match source-protocol PROTOCOL_NAME
- This is a ZEBRA specific match command. Matches the
+ This is a ZEBRA and BGP specific match command. Matches the
originating protocol specified.
.. clicmd:: match source-instance NUMBER
(strmatch(C, "frr-zebra-route-map:ipv4-next-hop-prefix-length"))
#define IS_MATCH_SRC_PROTO(C) \
(strmatch(C, "frr-zebra-route-map:source-protocol"))
+#define IS_MATCH_BGP_SRC_PROTO(C) \
+ (strmatch(C, "frr-bgp-route-map:source-protocol"))
#define IS_MATCH_SRC_INSTANCE(C) \
(strmatch(C, "frr-zebra-route-map:source-instance"))
/* BGP route-map match conditions */
yang_dnode_get_string(
dnode,
"./rmap-match-condition/frr-zebra-route-map:ipv4-prefix-length"));
- } else if (IS_MATCH_SRC_PROTO(condition)) {
+ } else if (IS_MATCH_SRC_PROTO(condition) ||
+ IS_MATCH_BGP_SRC_PROTO(condition)) {
vty_out(vty, " match source-protocol %s\n",
yang_dnode_get_string(
dnode,
- "./rmap-match-condition/frr-zebra-route-map:source-protocol"));
+ IS_MATCH_SRC_PROTO(condition)
+ ? "./rmap-match-condition/frr-zebra-route-map:source-protocol"
+ : "./rmap-match-condition/frr-bgp-route-map:source-protocol"));
} else if (IS_MATCH_SRC_INSTANCE(condition)) {
vty_out(vty, " match source-instance %s\n",
yang_dnode_get_string(
* session. Dump it, but increment past it's seqnum.
*/
assert(!ospf_opaque_is_owned(old));
+ if (IS_DEBUG_OSPF_CLIENT_API)
+ zlog_debug(
+ "LSA[Type%d:%pI4]: OSPF API Server Originate LSA Old Seq: 0x%x Age: %d",
+ old->data->type, &old->data->id,
+ ntohl(old->data->ls_seqnum),
+ ntohl(old->data->ls_age));
if (IS_LSA_MAX_SEQ(old)) {
flog_warn(EC_OSPF_LSA_INSTALL_FAILURE,
"%s: old LSA at maxseq", __func__);
lsa->data->ls_seqnum = lsa_seqnum_increment(old);
ospf_discard_from_db(ospf, old->lsdb, old);
}
+ if (IS_DEBUG_OSPF_CLIENT_API)
+ zlog_debug(
+ "LSA[Type%d:%pI4]: OSPF API Server Originate LSA New Seq: 0x%x Age: %d",
+ lsa->data->type, &lsa->data->id,
+ ntohl(lsa->data->ls_seqnum), ntohl(lsa->data->ls_age));
/* Install this LSA into LSDB. */
if (ospf_lsa_install(ospf, lsa->oi, lsa) == NULL) {
ospf = ospf_lookup_by_vrf_id(VRF_DEFAULT);
assert(ospf);
+ if (IS_DEBUG_OSPF(lsa, LSA_GENERATE)) {
+ zlog_debug("LSA[Type%d:%pI4]: OSPF API Server LSA Refresher",
+ lsa->data->type, &lsa->data->id);
+ }
+
apiserv = lookup_apiserver_by_lsa(lsa);
if (!apiserv) {
zlog_warn("%s: LSA[%s]: No apiserver?", __func__,
goto out;
}
- if (IS_LSA_MAXAGE(lsa)) {
- ospf_opaque_lsa_flush_schedule(lsa);
- goto out;
- }
-
/* Check if updated version of LSA instance has already prepared. */
new = ospf_lsdb_lookup(&apiserv->reserve, lsa);
if (!new) {
+ if (IS_LSA_MAXAGE(lsa)) {
+ ospf_opaque_lsa_flush_schedule(lsa);
+ goto out;
+ }
+
/* This is a periodic refresh, driven by core OSPF mechanism. */
new = ospf_apiserver_opaque_lsa_new(lsa->area, lsa->oi,
lsa->data);
cost = 1;
else if (cost > 65535)
cost = 65535;
+
+ if (if_is_loopback(oi->ifp))
+ cost = 0;
}
return cost;
mask.s_addr = 0xffffffff;
id.s_addr = oi->address->u.prefix4.s_addr;
- return link_info_set(s, id, mask, LSA_LINK_TYPE_STUB, 0, 0);
+ return link_info_set(s, id, mask, LSA_LINK_TYPE_STUB, 0,
+ oi->output_cost);
}
/* Describe Virtual Link. */
lsa->data->type, &lsa->data->id);
/*
- * Since these LSA entries are not yet installed into corresponding
- * LSDB, just flush them without calling ospf_ls_maxage() afterward.
+ * Install the stale LSA into the Link State Database, add it to the
+ * MaxAge list, and flush it from the OSPF routing domain. For other
+ * LSA types, the installation is done in the refresh function. It is
+ * done inline here since the opaque refresh function is dynamically
+ * registered when opaque LSAs are originated (which is not the case
+ * for stale LSAs).
*/
lsa->data->ls_age = htons(OSPF_LSA_MAXAGE);
+ ospf_lsa_install(
+ top, (lsa->data->type == OSPF_OPAQUE_LINK_LSA) ? nbr->oi : NULL,
+ lsa);
+ ospf_lsa_maxage(top, lsa);
+
switch (lsa->data->type) {
case OSPF_OPAQUE_LINK_LSA:
- ospf_flood_through_area(nbr->oi->area, NULL /*inbr*/, lsa);
- break;
case OSPF_OPAQUE_AREA_LSA:
ospf_flood_through_area(nbr->oi->area, NULL /*inbr*/, lsa);
break;
__func__, lsa->data->type);
return;
}
- ospf_lsa_discard(lsa); /* List "lsas" will be deleted by caller. */
}
/*------------------------------------------------------------------------*
if (current == NULL) {
if (IS_DEBUG_OSPF_EVENT)
zlog_debug(
- "LSA[%s]: Previously originated Opaque-LSA,not found in the LSDB.",
+ "LSA[%s]: Previously originated Opaque-LSA, not found in the LSDB.",
dump_lsa_key(lsa));
SET_FLAG(lsa->flags, OSPF_LSA_SELF);
q_spaces_fini(p_space->q_spaces);
XFREE(MTYPE_OSPF_Q_SPACE, p_space->q_spaces);
+ XFREE(MTYPE_OSPF_P_SPACE, p_space);
}
p_spaces_fini(area->p_spaces);
/*
* XPath: /frr-ripd:ripd/instance/allow-ecmp
*/
-DEFPY_YANG (rip_allow_ecmp,
+DEFUN_YANG (rip_allow_ecmp,
rip_allow_ecmp_cmd,
- "[no] allow-ecmp",
+ "allow-ecmp [" CMD_RANGE_STR(1, MULTIPATH_NUM) "]",
+ "Allow Equal Cost MultiPath\n"
+ "Number of paths\n")
+{
+ int idx_number = 1;
+ char mpaths[3] = {};
+ uint32_t paths = MULTIPATH_NUM;
+
+ if (argv[idx_number])
+ paths = strtol(argv[idx_number]->arg, NULL, 10);
+ snprintf(mpaths, sizeof(mpaths), "%u", paths);
+
+ nb_cli_enqueue_change(vty, "./allow-ecmp", NB_OP_MODIFY, mpaths);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFUN_YANG (no_rip_allow_ecmp,
+ no_rip_allow_ecmp_cmd,
+ "no allow-ecmp [" CMD_RANGE_STR(1, MULTIPATH_NUM) "]",
NO_STR
- "Allow Equal Cost MultiPath\n")
+ "Allow Equal Cost MultiPath\n"
+ "Number of paths\n")
{
- nb_cli_enqueue_change(vty, "./allow-ecmp", NB_OP_MODIFY,
- no ? "false" : "true");
+ nb_cli_enqueue_change(vty, "./allow-ecmp", NB_OP_MODIFY, 0);
return nb_cli_apply_changes(vty, NULL);
}
void cli_show_rip_allow_ecmp(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults)
{
- if (!yang_dnode_get_bool(dnode, NULL))
- vty_out(vty, " no");
+ uint8_t paths;
+
+ paths = yang_dnode_get_uint8(dnode, NULL);
- vty_out(vty, " allow-ecmp\n");
+ if (!paths)
+ vty_out(vty, " no allow-ecmp\n");
+ else
+ vty_out(vty, " allow-ecmp %d\n", paths);
}
/*
install_element(RIP_NODE, &rip_no_distribute_list_cmd);
install_element(RIP_NODE, &rip_allow_ecmp_cmd);
+ install_element(RIP_NODE, &no_rip_allow_ecmp_cmd);
install_element(RIP_NODE, &rip_default_information_originate_cmd);
install_element(RIP_NODE, &rip_default_metric_cmd);
install_element(RIP_NODE, &no_rip_default_metric_cmd);
return NB_OK;
rip = nb_running_get_entry(args->dnode, NULL, true);
- rip->ecmp = yang_dnode_get_bool(args->dnode, NULL);
- if (!rip->ecmp)
+ rip->ecmp = yang_dnode_get_uint8(args->dnode, NULL);
+ if (!rip->ecmp) {
rip_ecmp_disable(rip);
+ return NB_OK;
+ }
+
+ rip_ecmp_change(rip);
return NB_OK;
}
/* All information about zebra. */
struct zclient *zclient = NULL;
+uint32_t zebra_ecmp_count = MULTIPATH_NUM;
/* Send ECMP routes to zebra. */
static void rip_zebra_ipv4_send(struct rip *rip, struct route_node *rp,
struct zapi_nexthop *api_nh;
struct listnode *listnode = NULL;
struct rip_info *rinfo = NULL;
- int count = 0;
+ uint32_t count = 0;
memset(&api, 0, sizeof(api));
api.vrf_id = rip->vrf->vrf_id;
SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
for (ALL_LIST_ELEMENTS_RO(list, listnode, rinfo)) {
- if (count >= MULTIPATH_NUM)
+ if (count >= zebra_ecmp_count)
break;
api_nh = &api.nexthops[count];
api_nh->vrf_id = rip->vrf->vrf_id;
[ZEBRA_REDISTRIBUTE_ROUTE_DEL] = rip_zebra_read_route,
};
+static void rip_zebra_capabilities(struct zclient_capabilities *cap)
+{
+ zebra_ecmp_count = MIN(cap->ecmp, zebra_ecmp_count);
+}
+
void rip_zclient_init(struct event_loop *master)
{
/* Set default value to the zebra client structure. */
array_size(rip_handlers));
zclient_init(zclient, ZEBRA_ROUTE_RIP, 0, &ripd_privs);
zclient->zebra_connected = rip_zebra_connected;
+ zclient->zebra_capabilities = rip_zebra_capabilities;
}
void rip_zclient_stop(void)
{
struct route_node *rp = rinfo_new->rp;
struct rip_info *rinfo = NULL;
+ struct rip_info *rinfo_exist = NULL;
struct list *list = NULL;
+ struct listnode *node = NULL;
+ struct listnode *nnode = NULL;
if (rp->info == NULL)
rp->info = list_new();
if (listcount(list) && !rip->ecmp)
return NULL;
+ /* Add or replace an existing ECMP path with lower neighbor IP */
+ if (listcount(list) && listcount(list) >= rip->ecmp) {
+ struct rip_info *from_highest = NULL;
+
+ /* Find the rip_info struct that has the highest nexthop IP */
+ for (ALL_LIST_ELEMENTS(list, node, nnode, rinfo_exist))
+ if (!from_highest ||
+ (from_highest &&
+ IPV4_ADDR_CMP(&rinfo_exist->from,
+ &from_highest->from) > 0)) {
+ from_highest = rinfo_exist;
+ }
+
+ /* If we have a route in ECMP group, delete the old
+ * one that has a higher next-hop address. Lower IP is
+ * preferred.
+ */
+ if (rip->ecmp > 1 && from_highest &&
+ IPV4_ADDR_CMP(&from_highest->from, &rinfo_new->from) > 0) {
+ rip_ecmp_delete(rip, from_highest);
+ goto add_or_replace;
+ }
+
+ return NULL;
+ }
+
+add_or_replace:
rinfo = rip_info_new();
memcpy(rinfo, rinfo_new, sizeof(struct rip_info));
listnode_add(list, rinfo);
return RB_FIND(rip_instance_head, &rip_instances, &rip);
}
+/* Update ECMP routes to zebra when `allow-ecmp` changed. */
+void rip_ecmp_change(struct rip *rip)
+{
+ struct route_node *rp;
+ struct rip_info *rinfo;
+ struct list *list;
+ struct listnode *node, *nextnode;
+
+ for (rp = route_top(rip->table); rp; rp = route_next(rp)) {
+ list = rp->info;
+ if (list && listcount(list) > 1) {
+ while (listcount(list) > rip->ecmp) {
+ struct rip_info *from_highest = NULL;
+
+ for (ALL_LIST_ELEMENTS(list, node, nextnode,
+ rinfo)) {
+ if (!from_highest ||
+ (from_highest &&
+ IPV4_ADDR_CMP(
+ &rinfo->from,
+ &from_highest->from) > 0))
+ from_highest = rinfo;
+ }
+
+ rip_ecmp_delete(rip, from_highest);
+ }
+ }
+ }
+}
+
/* Create new RIP instance and set it to global variable. */
struct rip *rip_create(const char *vrf_name, struct vrf *vrf, int socket)
{
rip->vrf_name = XSTRDUP(MTYPE_RIP_VRF_NAME, vrf_name);
/* Set initial value. */
- rip->ecmp = yang_get_default_bool("%s/allow-ecmp", RIP_INSTANCE);
+ rip->ecmp = yang_get_default_uint8("%s/allow-ecmp", RIP_INSTANCE);
rip->default_metric =
yang_get_default_uint8("%s/default-metric", RIP_INSTANCE);
rip->distance =
struct route_table *distance_table;
/* RIP ECMP flag */
- bool ecmp;
+ uint8_t ecmp;
/* Are we in passive-interface default mode? */
bool passive_default;
DECLARE_HOOK(rip_ifaddr_add, (struct connected * ifc), (ifc));
DECLARE_HOOK(rip_ifaddr_del, (struct connected * ifc), (ifc));
+extern void rip_ecmp_change(struct rip *rip);
+
#endif /* _ZEBRA_RIP_H */
# tgen.mininet_cli()
+def get_shut_msg_count(tgen):
+ shuts = {}
+ for rtrNum in [2, 4]:
+ shutmsg = tgen.net["r{}".format(rtrNum)].cmd_nostatus(
+ 'grep -c "NOTIFICATION.*Cease/Administrative Shutdown" bgpd.log', warn=False
+ )
+ try:
+ shuts[rtrNum] = int(shutmsg.strip())
+ except ValueError:
+ shuts[rtrNum] = 0
+ return shuts
+
+
def test_bgp_shutdown():
"Test BGP instance shutdown"
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
+ shuts_before = get_shut_msg_count(tgen)
+
tgen.net["r1"].cmd(
'vtysh -c "conf t" -c "router bgp 65000" -c "bgp shutdown message ABCDabcd"'
)
)
assert res is None, assertmsg
+ shuts_after = get_shut_msg_count(tgen)
+
+ for k in shuts_before:
+ assert shuts_before[k] + 1 == shuts_after[k]
+
def test_bgp_shutdown_message():
"Test BGP Peer Shutdown Message"
logger.info("Checking BGP shutdown received on router r{}".format(rtrNum))
shut_message = tgen.net["r{}".format(rtrNum)].cmd(
- 'tail bgpd.log | grep "NOTIFICATION.*Cease/Administrative Shutdown"'
+ 'grep -e "NOTIFICATION.*Cease/Administrative Shutdown.*ABCDabcd" bgpd.log'
)
assertmsg = "BGP shutdown message not received on router R{}".format(rtrNum)
assert shut_message != "", assertmsg
- assertmsg = "Incorrect BGP shutdown message received on router R{}".format(
- rtrNum
- )
- assert "ABCDabcd" in shut_message, assertmsg
-
- # tgen.mininet_cli()
-
def test_bgp_no_shutdown():
"Test BGP instance no shutdown"
--- /dev/null
+!
+interface lo
+ ip address 172.16.255.1/32
+!
+interface r1-eth0
+ ip address 192.168.1.1/24
+!
+interface r1-eth1
+ ip address 192.168.2.1/24
+!
+ip route 10.10.10.10/32 192.168.2.2
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.2 remote-as external
+ neighbor 192.168.1.2 timers 1 3
+ neighbor 192.168.1.2 timers connect 1
+ neighbor 192.168.2.2 remote-as external
+ neighbor 192.168.2.2 timers 1 3
+ neighbor 192.168.2.2 timers connect 1
+ address-family ipv4
+ redistribute connected
+ redistribute static
+ neighbor 192.168.1.2 route-map r2 out
+ neighbor 192.168.2.2 route-map r3 out
+ exit-address-family
+!
+route-map r2 permit 10
+ match source-protocol static
+route-map r3 permit 10
+ match source-protocol connected
+!
--- /dev/null
+!
+interface r2-eth0
+ ip address 192.168.1.2/24
+!
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
+ neighbor 192.168.1.1 timers 1 3
+ neighbor 192.168.1.1 timers connect 1
+!
--- /dev/null
+!
+interface r3-eth0
+ ip address 192.168.2.2/24
+!
+router bgp 65003
+ no bgp ebgp-requires-policy
+ neighbor 192.168.2.1 remote-as external
+ neighbor 192.168.2.1 timers 1 3
+ neighbor 192.168.2.1 timers connect 1
+!
--- /dev/null
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# Copyright (c) 2023 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+
+"""
+Test if r1 can announce only static routes to r2, and only connected
+routes to r3 using `match source-protocol` with route-maps.
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for i, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_route_map_match_source_protocol():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ def _bgp_check_advertised_routes_r2():
+ output = json.loads(
+ tgen.gears["r1"].vtysh_cmd(
+ "show bgp ipv4 unicast neighbors 192.168.1.2 advertised-routes json"
+ )
+ )
+ expected = {
+ "advertisedRoutes": {
+ "10.10.10.10/32": {
+ "valid": True,
+ }
+ },
+ "totalPrefixCounter": 1,
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_check_advertised_routes_r2)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Failed to filter routes by source-protocol for r2"
+
+ def _bgp_check_advertised_routes_r3():
+ output = json.loads(
+ tgen.gears["r1"].vtysh_cmd(
+ "show bgp ipv4 unicast neighbors 192.168.2.2 advertised-routes json"
+ )
+ )
+ expected = {
+ "advertisedRoutes": {
+ "192.168.1.0/24": {
+ "valid": True,
+ },
+ "192.168.2.0/24": {
+ "valid": True,
+ },
+ "172.16.255.1/32": {
+ "valid": True,
+ },
+ },
+ "totalPrefixCounter": 3,
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_check_advertised_routes_r3)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Failed to filter routes by source-protocol for r3"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
--- /dev/null
+{
+ "vrfName": "vrf1",
+ "localAS": 65500,
+ "routes":
+ {
+ "10.200.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "10.200.0.0",
+ "prefixLen": 24,
+ "network": "10.200.0.0\/24",
+ "nexthops": [
+ {
+ "ip": "192.168.0.2",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+
+ ],
+ "172.31.0.11/32": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172.31.0.11",
+ "prefixLen":32,
+ "network":"172.31.0.11/32",
+ "peerId":"192.0.2.100",
+ "nexthops":[
+ {
+ "ip":"192.0.2.11",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "172.31.0.12/32": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172.31.0.12",
+ "prefixLen":32,
+ "network":"172.31.0.12/32",
+ "peerId":"192.0.2.100",
+ "nexthops":[
+ {
+ "ip":"192.0.2.12",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "172.31.0.13/32": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172.31.0.13",
+ "prefixLen":32,
+ "network":"172.31.0.13/32",
+ "peerId":"192.168.255.13",
+ "nexthops":[
+ {
+ "ip":"192.168.255.13",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "172.31.0.14/32": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172.31.0.14",
+ "prefixLen":32,
+ "network":"172.31.0.14/32",
+ "peerId":"(unspec)",
+ "nexthops":[
+ {
+ "ip":"192.0.2.14",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "172.31.0.15/32": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172.31.0.15",
+ "prefixLen":32,
+ "network":"172.31.0.15/32",
+ "peerId":"(unspec)",
+ "nexthops":[
+ {
+ "ip":"192.0.2.12",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "172.31.0.20/32": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172.31.0.20",
+ "prefixLen":32,
+ "network":"172.31.0.20/32",
+ "peerId":"192.0.2.100",
+ "nexthops":[
+ {
+ "ip":"192.0.2.11",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "172.31.0.111/32": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172.31.0.111",
+ "prefixLen":32,
+ "network":"172.31.0.111/32",
+ "peerId":"192.0.2.100",
+ "nexthops":[
+ {
+ "ip":"192.0.2.11",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ }
+}
--- /dev/null
+router bgp 65500
+ bgp router-id 192.168.0.1
+ no bgp ebgp-requires-policy
+ neighbor 192.168.0.2 remote-as 65501
+ address-family ipv4 unicast
+ no neighbor 192.168.0.2 activate
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 192.168.0.2 activate
+ neighbor 192.168.0.2 soft-reconfiguration inbound
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 192.168.0.1
+ neighbor 192.0.2.100 remote-as 65500
+ neighbor 192.168.255.13 remote-as 65500
+ address-family ipv4 unicast
+ redistribute connected
+ redistribute static
+ label vpn export allocation-mode per-nexthop
+ label vpn export auto
+ rd vpn export 444:1
+ rt vpn both 52:100
+ export vpn
+ import vpn
+ exit-address-family
+!
+interface r1-eth0
+ mpls bgp forwarding
+!
--- /dev/null
+{
+ "10.200.0.0/24": [
+ {
+ "prefix": "10.200.0.0/24",
+ "prefixLen": 24,
+ "protocol": "bgp",
+ "vrfName": "vrf1",
+ "selected": true,
+ "destSelected": true,
+ "distance": 20,
+ "metric": 0,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "ip": "10.125.0.2",
+ "afi": "ipv4",
+ "interfaceName": "r1-eth0",
+ "vrf": "default",
+ "active": true,
+ "labels":[
+ 102
+ ]
+ }
+ ]
+ }
+ ],
+ "10.201.0.0/24": [
+ {
+ "prefix": "10.201.0.0/24",
+ "prefixLen": 24,
+ "protocol": "connected",
+ "vrfName": "vrf1",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "nexthops":[
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "r1-eth1",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+log stdout
+debug zebra nht
+!debug zebra kernel msgdump recv
+!debug zebra dplane detailed
+!debug zebra packet recv
+interface r1-eth1 vrf vrf1
+ ip address 192.0.2.1/24
+!
+interface r1-eth2 vrf vrf1
+ ip address 192.168.255.1/24
+!
+interface r1-eth0
+ ip address 192.168.0.1/24
+!
+vrf vrf1
+ ip route 172.31.0.14/32 192.0.2.14
+ ip route 172.31.0.15/32 192.0.2.12
+exit-vrf
--- /dev/null
+router bgp 65500
+ bgp router-id 192.0.2.11
+ no bgp network import-check
+ neighbor 192.0.2.100 remote-as 65500
+ address-family ipv4 unicast
+ network 172.31.0.11/32
+ network 172.31.0.111/32
+ network 172.31.0.20/32
+ exit-address-family
+!
+
--- /dev/null
+log stdout
+interface r11-eth0
+ ip address 192.0.2.11/24
+!
--- /dev/null
+router bgp 65500
+ bgp router-id 192.0.2.12
+ no bgp network import-check
+ neighbor 192.0.2.100 remote-as 65500
+ address-family ipv4 unicast
+ network 172.31.0.12/32
+ exit-address-family
+!
+
--- /dev/null
+log stdout
+interface r12-eth0
+ ip address 192.0.2.12/24
+!
--- /dev/null
+router bgp 65500
+ bgp router-id 192.168.255.13
+ no bgp network import-check
+ address-family ipv4 unicast
+ neighbor 192.168.255.1 remote-as 65500
+ network 172.31.0.13/32
+ exit-address-family
+!
+
--- /dev/null
+log stdout
+interface r13-eth0
+ ip address 192.168.255.13/24
+!
--- /dev/null
+{
+ "vrfName": "vrf1",
+ "localAS": 65501,
+ "routes":
+ {
+ "10.201.0.0/24": [
+ {
+ "prefix": "10.201.0.0",
+ "prefixLen": 24,
+ "network": "10.201.0.0\/24",
+ "nhVrfName": "default",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.200.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "10.200.0.0",
+ "prefixLen": 24,
+ "network": "10.200.0.0\/24",
+ "nexthops": [
+ {
+ "ip": "0.0.0.0",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+}
--- /dev/null
+{
+ "vrfName": "default",
+ "localAS": 65501,
+ "routes":
+ {
+ "routeDistinguishers":
+ {
+ "444:1":
+ {
+ "172.31.0.11/32": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172.31.0.11",
+ "prefixLen": 32,
+ "network": "172.31.0.11\/32",
+ "peerId": "192.168.0.1",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172.31.0.12/32": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172.31.0.12",
+ "prefixLen": 32,
+ "network": "172.31.0.12\/32",
+ "peerId": "192.168.0.1",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172.31.0.13/32": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172.31.0.13",
+ "prefixLen": 32,
+ "network": "172.31.0.13\/32",
+ "peerId": "192.168.0.1",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172.31.0.14/32": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172.31.0.14",
+ "prefixLen": 32,
+ "network": "172.31.0.14\/32",
+ "peerId": "192.168.0.1",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172.31.0.15/32": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172.31.0.15",
+ "prefixLen": 32,
+ "network": "172.31.0.15\/32",
+ "peerId": "192.168.0.1",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172.31.0.20/32": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172.31.0.20",
+ "prefixLen": 32,
+ "network": "172.31.0.20\/32",
+ "peerId": "192.168.0.1",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172.31.0.111/32": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172.31.0.111",
+ "prefixLen": 32,
+ "network": "172.31.0.111\/32",
+ "peerId": "192.168.0.1",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "192.0.2.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "192.0.2.0",
+ "prefixLen": 24,
+ "network": "192.0.2.0\/24",
+ "peerId": "192.168.0.1",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "192.168.255.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "192.168.255.0",
+ "prefixLen": 24,
+ "network": "192.168.255.0\/24",
+ "peerId": "192.168.0.1",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "444:2":
+ {
+ "10.200.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "10.200.0.0",
+ "prefixLen": 24,
+ "network": "10.200.0.0\/24",
+ "peerId": "(unspec)",
+ "nhVrfName": "vrf1",
+ "nexthops": [
+ {
+ "ip": "0.0.0.0",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
--- /dev/null
+router bgp 65501
+ bgp router-id 192.168.0.2
+ no bgp ebgp-requires-policy
+ neighbor 192.168.0.1 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 192.168.0.1 activate
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 192.168.0.1 activate
+ exit-address-family
+!
+router bgp 65501 vrf vrf1
+ bgp router-id 192.168.0.2
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 102
+ rd vpn export 444:2
+ rt vpn both 52:100
+ export vpn
+ import vpn
+ exit-address-family
+!
+interface r2-eth0
+ mpls bgp forwarding
+!
--- /dev/null
+log stdout
+interface r2-eth1 vrf vrf1
+ ip address 10.200.0.2/24
+!
+interface r2-eth0
+ ip address 192.168.0.2/24
+!
--- /dev/null
+router bgp 65500
+ bgp router-id 100.100.100.100
+ no bgp network import-check
+ neighbor 192.0.2.1 remote-as 65500
+ neighbor 192.0.2.11 remote-as 65500
+ neighbor 192.0.2.12 remote-as 65500
+ address-family ipv4 unicast
+ neighbor 192.0.2.1 route-reflector-client
+ neighbor 192.0.2.11 route-reflector-client
+ neighbor 192.0.2.12 route-reflector-client
+ exit-address-family
+!
+
--- /dev/null
+log stdout
+interface rr-eth0
+ ip address 192.0.2.100/24
+!
--- /dev/null
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+#
+# test_bgp_vpnv4_per_nexthop_label.py
+#
+# Copyright 2023 6WIND S.A.
+#
+
+"""
+ test_bgp_vpnv4_per_nexthop_label.py: Test the FRR BGP daemon using EBGP peering
+ Let us exchange VPNv4 updates between both devices
+ Updates from r1 will originate from the same RD, but will have separate
+ label values.
+
+ +----------+
+ | r11 |
+ |192.0.2.11+---+
+ | | | +----+--------+ +----------+
+ +----------+ | 192.0.2.1 |vrf | r1 |192.168.0.0/24| r2 |
+ +-------------------+ | 1+--------------+ |
+ +----------+ | |VRF1|AS65500 | | AS65501 |
+ | r12 | | +-------------+ | VPNV4| |VPNV4 |
+ |192.0.2.12+---+ |192.168.255.1+-+--+--------+ +----------+
+ | | |
+ +----------+ |
+ |
+ +----------+ |
+ | r13 | |
+ |192.168. +---------+
+ | 255.13 |
+ +----------+
+"""
+
+import os
+import sys
+import json
+from functools import partial
+import pytest
+import functools
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+
+pytestmark = [pytest.mark.bgpd]
+
+PREFIXES_R11 = ["172.31.0.11/32", "172.31.0.20/32", "172.31.0.111/32"]
+PREFIXES_R12 = ["172.31.0.12/32", "172.31.0.15/32"]
+PREFIXES_R13 = ["172.31.0.13/32"]
+PREFIXES_REDIST = ["172.31.0.14/32"]
+PREFIXES_CONNECTED = ["192.168.255.0/24", "192.0.2.0/24"]
+
+
+def build_topo(tgen):
+ "Build function"
+
+ # Create 2 routers.
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+ tgen.add_router("r11")
+ tgen.add_router("r12")
+ tgen.add_router("r13")
+ tgen.add_router("r14")
+ tgen.add_router("rr")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r11"])
+ switch.add_link(tgen.gears["r12"])
+ switch.add_link(tgen.gears["rr"])
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r13"])
+
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r14"])
+
+
+def _populate_iface():
+ tgen = get_topogen()
+ cmds_list = [
+ "ip link add vrf1 type vrf table 10",
+ "echo 100000 > /proc/sys/net/mpls/platform_labels",
+ "ip link set dev vrf1 up",
+ "ip link set dev {0}-eth1 master vrf1",
+ "echo 1 > /proc/sys/net/mpls/conf/{0}-eth0/input",
+ ]
+ cmds_list_plus = [
+ "ip link set dev {0}-eth2 master vrf1",
+ ]
+
+ for cmd in cmds_list:
+ input = cmd.format("r1")
+ logger.info("input: " + cmd)
+ output = tgen.net["r1"].cmd(cmd.format("r1"))
+ logger.info("output: " + output)
+
+ for cmd in cmds_list_plus:
+ input = cmd.format("r1")
+ logger.info("input: " + cmd)
+ output = tgen.net["r1"].cmd(cmd.format("r1"))
+ logger.info("output: " + output)
+
+ for cmd in cmds_list:
+ input = cmd.format("r2")
+ logger.info("input: " + cmd)
+ output = tgen.net["r2"].cmd(cmd.format("r2"))
+ logger.info("output: " + output)
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ _populate_iface()
+
+ for rname, router in router_list.items():
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ # Initialize all routers.
+ tgen.start_router()
+
+
+def teardown_module(_mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ tgen.stop_topology()
+
+
+def bgp_vpnv4_table_check(router, group, label_list=None, label_value_expected=None):
+ """
+ Dump and check that vpnv4 entries have the same MPLS label value
+ * 'router': the router to check
+ * 'group': the list of prefixes to check. a single label value for the group has to be found
+ * 'label_list': check that the label values are not present in the vpnv4 entries
+ * that list is updated with the present label value
+ * 'label_value_expected': check that the mpls label read is the same as that value
+ """
+
+ stored_label_inited = False
+ for prefix in group:
+ dump = router.vtysh_cmd("show bgp ipv4 vpn {} json".format(prefix), isjson=True)
+ assert dump, "{0}, {1}, route distinguisher not present".format(
+ router.name, prefix
+ )
+ for rd, pathes in dump.items():
+ for path in pathes["paths"]:
+ assert (
+ "remoteLabel" in path.keys()
+ ), "{0}, {1}, remoteLabel not present".format(router.name, prefix)
+ logger.info(
+ "{0}, {1}, label value is {2}".format(
+ router.name, prefix, path["remoteLabel"]
+ )
+ )
+ if stored_label_inited:
+ assert (
+ path["remoteLabel"] == stored_label
+ ), "{0}, {1}, label value not expected one (expected {2}, observed {3}".format(
+ router.name, prefix, stored_label, path["remoteLabel"]
+ )
+ else:
+ stored_label = path["remoteLabel"]
+ stored_label_inited = True
+ if label_list is not None:
+ assert (
+ stored_label not in label_list
+ ), "{0}, {1}, label already detected in a previous prefix".format(
+ router.name, prefix
+ )
+ label_list.add(stored_label)
+
+ if label_value_expected:
+ assert (
+ path["remoteLabel"] == label_value_expected
+ ), "{0}, {1}, label value not expected (expected {2}, observed {3}".format(
+ router.name, prefix, label_value_expected, path["remoteLabel"]
+ )
+
+
+def bgp_vpnv4_table_check_all(router, label_list=None, same=False):
+ """
+ Dump and check that vpnv4 entries are correctly configured with specific label values
+ * 'router': the router to check
+ * 'label_list': check that the label values are not present in the vpnv4 entries
+ * that list is updated with the present label value found.
+ * 'same': by default, set to False. Addresses groups are classified by addresses.
+ * if set to True, all entries of all groups should have a unique label value
+ """
+ if same:
+ bgp_vpnv4_table_check(
+ router,
+ group=PREFIXES_R11
+ + PREFIXES_R12
+ + PREFIXES_R13
+ + PREFIXES_REDIST
+ + PREFIXES_CONNECTED,
+ label_list=label_list,
+ )
+ else:
+ for group in (
+ PREFIXES_R11,
+ PREFIXES_R12,
+ PREFIXES_R13,
+ PREFIXES_REDIST,
+ PREFIXES_CONNECTED,
+ ):
+ bgp_vpnv4_table_check(router, group=group, label_list=label_list)
+
+
+def mpls_table_check(router, blacklist=None, label_list=None, whitelist=None):
+ """
+ Dump and check 'show mpls table json' output. An assert is triggered in case test fails
+ * 'router': the router to check
+ * 'blacklist': the list of nexthops (IP or interface) that should not be on output
+ * 'label_list': the list of labels that should be in inLabel value
+ * 'whitelist': the list of nexthops (IP or interface) that should be on output
+ """
+ nexthop_list = []
+ if blacklist:
+ nexthop_list.append(blacklist)
+ logger.info("Checking MPLS labels on {}".format(router.name))
+ dump = router.vtysh_cmd("show mpls table json", isjson=True)
+ for in_label, label_info in dump.items():
+ if label_list is not None:
+ label_list.add(in_label)
+ for nh in label_info["nexthops"]:
+ assert (
+ nh["installed"] == True and nh["type"] == "BGP"
+ ), "{}, show mpls table, nexthop is not installed".format(router.name)
+ if "nexthop" in nh.keys():
+ assert (
+ nh["nexthop"] not in nexthop_list
+ ), "{}, show mpls table, duplicated or blacklisted nexthop address".format(
+ router.name
+ )
+ nexthop_list.append(nh["nexthop"])
+ elif "interface" in nh.keys():
+ assert (
+ nh["interface"] not in nexthop_list
+ ), "{}, show mpls table, duplicated or blacklisted nexthop interface".format(
+ router.name
+ )
+ nexthop_list.append(nh["interface"])
+ else:
+ assert (
+ 0
+ ), "{}, show mpls table, entry with neither nexthop nor interface".format(
+ router.name
+ )
+
+ if whitelist:
+ for entry in whitelist:
+ assert (
+ entry in nexthop_list
+ ), "{}, show mpls table, entry with nexthop {} not present in nexthop list".format(
+ router.name, entry
+ )
+
+
+def check_show_bgp_vpn_prefix_not_found(router, ipversion, prefix, rd, label=None):
+ output = json.loads(
+ router.vtysh_cmd("show bgp {} vpn {} json".format(ipversion, prefix))
+ )
+ if label:
+ expected = {rd: {"prefix": prefix, "paths": [{"remoteLabel": label}]}}
+ else:
+ expected = {rd: {"prefix": prefix}}
+ ret = topotest.json_cmp(output, expected)
+ if ret is None:
+ return "not good"
+ return None
+
+
+def check_show_bgp_vpn_prefix_found(router, ipversion, prefix, rd):
+ output = json.loads(
+ router.vtysh_cmd("show bgp {} vpn {} json".format(ipversion, prefix))
+ )
+ expected = {rd: {"prefix": prefix}}
+ return topotest.json_cmp(output, expected)
+
+
+def check_show_mpls_table_entry_label_found(router, inlabel, interface):
+ output = json.loads(router.vtysh_cmd("show mpls table {} json".format(inlabel)))
+ expected = {
+ "inLabel": inlabel,
+ "installed": True,
+ "nexthops": [{"interface": interface}],
+ }
+ return topotest.json_cmp(output, expected)
+
+
+def check_show_mpls_table_entry_label_not_found(router, inlabel):
+ output = json.loads(router.vtysh_cmd("show mpls table {} json".format(inlabel)))
+ expected = {"inlabel": inlabel, "installed": True}
+ ret = topotest.json_cmp(output, expected)
+ if ret is None:
+ return "not good"
+ return None
+
+
+def mpls_entry_get_interface(router, label):
+ """
+ Assert that the label is in MPLS table
+ Assert an outgoing interface is programmed
+ return the outgoing interface
+ """
+ outgoing_interface = None
+
+ logger.info("Checking MPLS labels on {}".format(router.name))
+ dump = router.vtysh_cmd("show mpls table {} json".format(label), isjson=True)
+ assert dump, "{0}, label {1} not present".format(router.name, label)
+
+ for nh in dump["nexthops"]:
+ assert (
+ "interface" in nh.keys()
+ ), "{}, show mpls table, nexthop interface not present for MPLS entry {}".format(
+ router.name, label
+ )
+
+ outgoing_interface = nh["interface"]
+
+ return outgoing_interface
+
+
+def test_protocols_convergence():
+ """
+ Assert that all protocols have converged
+ statuses as they depend on it.
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Check BGP IPv4 routing tables on VRF1 of r1
+ logger.info("Checking BGP IPv4 routes for convergence on r1 VRF1")
+ router = tgen.gears["r1"]
+ json_file = "{}/{}/bgp_ipv4_routes_vrf1.json".format(CWD, router.name)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp vrf vrf1 ipv4 json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+ logger.info("Checking BGP VPNv4 routes for convergence on r2")
+ router = tgen.gears["r2"]
+ json_file = "{}/{}/bgp_vpnv4_routes.json".format(CWD, router.name)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp ipv4 vpn json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+ # Check BGP labels received on r2
+ logger.info("Checking BGP VPNv4 labels on r2")
+ label_list = set()
+ bgp_vpnv4_table_check_all(tgen.gears["r2"], label_list)
+
+ # Check MPLS labels received on r1
+ mpls_table_check(tgen.gears["r1"], label_list)
+
+
+def test_flapping_bgp_vrf_down():
+ """
+ Turn down a remote BGP session
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ logger.info("Unpeering BGP on r11")
+ tgen.gears["r11"].vtysh_cmd(
+ "configure terminal\nrouter bgp 65500\nno neighbor 192.0.2.100\n",
+ isjson=False,
+ )
+
+ def _bgp_prefix_not_found(router, vrf, ipversion, prefix):
+ output = json.loads(
+ router.vtysh_cmd(
+ "show bgp vrf {} {} {} json".format(vrf, ipversion, prefix)
+ )
+ )
+ expected = {"prefix": prefix}
+ ret = topotest.json_cmp(output, expected)
+ if ret is None:
+ return "not good"
+ return None
+
+ # Check prefix from r11 is not present
+ test_func = functools.partial(
+ _bgp_prefix_not_found, tgen.gears["r1"], "vrf1", "ipv4", "172.31.0.11/32"
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert (
+ success
+ ), "r1, prefix 172.31.0.11/32 from r11 did not disappear. r11 still connected to rr ?"
+
+ # Check BGP updated received on r2 are not from r11
+ logger.info("Checking BGP VPNv4 labels on r2")
+ for entry in PREFIXES_R11:
+ dump = tgen.gears["r2"].vtysh_cmd(
+ "show bgp ipv4 vpn {} json".format(entry), isjson=True
+ )
+ for rd in dump:
+ assert False, "r2, {}, route distinguisher {} present".format(entry, rd)
+
+ mpls_table_check(tgen.gears["r1"], blacklist=["192.0.2.11"])
+
+
+def test_flapping_bgp_vrf_up():
+ """
+ Turn up a remote BGP session
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ logger.info("Peering BGP on r11")
+ tgen.gears["r11"].vtysh_cmd(
+ "configure terminal\nrouter bgp 65500\nneighbor 192.0.2.100 remote-as 65500\n",
+ isjson=False,
+ )
+
+ # Check r2 gets prefix 172.31.0.11/128
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found,
+ tgen.gears["r2"],
+ "ipv4",
+ "172.31.0.11/32",
+ "444:1",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert (
+ success
+ ), "r2, prefix 172.31.0.11/32 from r11 not present. r11 still disconnected from rr ?"
+ bgp_vpnv4_table_check_all(tgen.gears["r2"])
+
+
+def test_recursive_route():
+ """
+ Test static recursive route redistributed over BGP
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Enabling recursive static route")
+ tgen.gears["r1"].vtysh_cmd(
+ "configure terminal\nvrf vrf1\nip route 172.31.0.30/32 172.31.0.20\n",
+ isjson=False,
+ )
+ logger.info("Checking BGP VPNv4 labels on r2")
+
+ # Check r2 received vpnv4 update with 172.31.0.30
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found,
+ tgen.gears["r2"],
+ "ipv4",
+ "172.31.0.30/32",
+ "444:1",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r2, vpnv4 update 172.31.0.30 not found"
+
+ bgp_vpnv4_table_check(tgen.gears["r2"], group=PREFIXES_R11 + ["172.31.0.30/32"])
+
+ # diagnostic
+ logger.info("Dumping label nexthop table")
+ tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
+ logger.info("Dumping nexthop table")
+ tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 nexthop detail", isjson=False)
+
+ logger.info("Disabling recursive static route")
+ tgen.gears["r1"].vtysh_cmd(
+ "configure terminal\nvrf vrf1\nno ip route 172.31.0.30/32 172.31.0.20\n",
+ isjson=False,
+ )
+ logger.info("Checking BGP VPNv4 labels on r2")
+
+ # Check r2 removed 172.31.0.30 vpnv4 update
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_not_found,
+ tgen.gears["r2"],
+ "ipv4",
+ "172.31.0.30/32",
+ "444:1",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r2, vpnv4 update 172.31.0.30 still present"
+
+
+def test_prefix_changes_interface():
+ """
+ Test BGP update for a given prefix learnt on different interface
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Enabling a 172.31.0.50/32 prefix for r11")
+ tgen.gears["r11"].vtysh_cmd(
+ "configure terminal\nrouter bgp\naddress-family ipv4 unicast\nnetwork 172.31.0.50/32",
+ isjson=False,
+ )
+
+ # Check r2 received vpnv4 update with 172.31.0.50
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found,
+ tgen.gears["r2"],
+ "ipv4",
+ "172.31.0.50/32",
+ "444:1",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r2, vpnv4 update 172.31.0.50 not found"
+
+ # diagnostic
+ logger.info("Dumping label nexthop table")
+ tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
+
+ label_list = set()
+ bgp_vpnv4_table_check(
+ tgen.gears["r2"],
+ group=["172.31.0.11/32", "172.31.0.111/32", "172.31.0.50/32"],
+ label_list=label_list,
+ )
+
+ assert (
+ len(label_list) == 1
+ ), "Multiple Label values found for updates from r11 found"
+
+ oldlabel = label_list.pop()
+ logger.info("r1, getting the outgoing interface used by label {}".format(oldlabel))
+ old_outgoing_interface = mpls_entry_get_interface(tgen.gears["r1"], oldlabel)
+ logger.info(
+ "r1, outgoing interface used by label {} is {}".format(
+ oldlabel, old_outgoing_interface
+ )
+ )
+
+ logger.info("Moving the 172.31.0.50/32 prefix from r11 to r13")
+ tgen.gears["r11"].vtysh_cmd(
+ "configure terminal\nrouter bgp\naddress-family ipv4 unicast\nno network 172.31.0.50/32",
+ isjson=False,
+ )
+ tgen.gears["r13"].vtysh_cmd(
+ "configure terminal\nrouter bgp\naddress-family ipv4 unicast\nnetwork 172.31.0.50/32",
+ isjson=False,
+ )
+
+ # Check r2 removed 172.31.0.50 vpnv4 update with old label
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_not_found,
+ tgen.gears["r2"],
+ "ipv4",
+ "172.31.0.50/32",
+ "444:1",
+ label=oldlabel,
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert (
+ success
+ ), "r2, vpnv4 update 172.31.0.50 with old label {0} still present".format(oldlabel)
+
+ # diagnostic
+ logger.info("Dumping label nexthop table")
+ tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
+
+ # Check r2 received new 172.31.0.50 vpnv4 update
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found,
+ tgen.gears["r2"],
+ "ipv4",
+ "172.31.0.50/32",
+ "444:1",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r2, vpnv4 update 172.31.0.50 not found"
+
+ label_list = set()
+ bgp_vpnv4_table_check(
+ tgen.gears["r2"],
+ group=PREFIXES_R13 + ["172.31.0.50/32"],
+ label_list=label_list,
+ )
+ assert (
+ len(label_list) == 1
+ ), "Multiple Label values found for updates from r13 found"
+
+ newlabel = label_list.pop()
+ logger.info("r1, getting the outgoing interface used by label {}".format(newlabel))
+ new_outgoing_interface = mpls_entry_get_interface(tgen.gears["r1"], newlabel)
+ logger.info(
+ "r1, outgoing interface used by label {} is {}".format(
+ newlabel, new_outgoing_interface
+ )
+ )
+ if old_outgoing_interface == new_outgoing_interface:
+ assert 0, "r1, outgoing interface did not change whereas BGP update moved"
+
+ logger.info("Restoring state by removing the 172.31.0.50/32 prefix from r13")
+ tgen.gears["r13"].vtysh_cmd(
+ "configure terminal\nrouter bgp\naddress-family ipv4 unicast\nno network 172.31.0.50/32",
+ isjson=False,
+ )
+
+
+def test_changing_default_label_value():
+ """
+ Change the MPLS default value
+ Check that r1 VPNv4 entries have the 222 label value
+ Check that MPLS entry with old label value is no more present
+ Check that MPLS entry for local traffic has inLabel set to 222
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears["r1"]
+
+ # counting the number of labels used in the VPNv4 table
+ label_list = set()
+ logger.info("r1, vpnv4 table, check the number of labels used before modification")
+ bgp_vpnv4_table_check_all(router, label_list)
+ old_len = len(label_list)
+ assert (
+ old_len != 1
+ ), "r1, number of labels used should be greater than 1, oberved {} ".format(old_len)
+
+ logger.info("r1, vrf1, changing the default MPLS label value to export to 222")
+ router.vtysh_cmd(
+ "configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv4 unicast\nlabel vpn export 222\n",
+ isjson=False,
+ )
+
+ # Check r1 updated the MPLS entry with the 222 label value
+ logger.info(
+ "r1, mpls table, check that MPLS entry with inLabel set to 222 has vrf1 interface"
+ )
+ test_func = functools.partial(
+ check_show_mpls_table_entry_label_found, router, 222, "vrf1"
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r1, mpls entry with label 222 not found"
+
+ # check label repartition is ok
+ logger.info("r1, vpnv4 table, check the number of labels used after modification")
+ label_list = set()
+ bgp_vpnv4_table_check_all(router, label_list)
+ new_len = len(label_list)
+ assert (
+ old_len == new_len
+ ), "r1, number of labels after modification differ from previous, observed {}, expected {} ".format(
+ new_len, old_len
+ )
+
+ logger.info(
+ "r1, vpnv4 table, check that prefixes that were using the vrf label have refreshed the label value to 222"
+ )
+ bgp_vpnv4_table_check(
+ router, group=["192.168.255.0/24", "192.0.2.0/24"], label_value_expected=222
+ )
+
+
+def test_unconfigure_allocation_mode_nexthop():
+ """
+ Test unconfiguring allocation mode per nexthop
+ Check that show mpls table has no entry with label 17 (previously used)
+ Check that all VPN updates on r1 should have label value moved to 222
+ Check that show mpls table will only have 222 label value
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Unconfiguring allocation mode per nexthop")
+ router = tgen.gears["r1"]
+ router.vtysh_cmd(
+ "configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv4 unicast\nno label vpn export allocation-mode per-nexthop\n",
+ isjson=False,
+ )
+
+ # Check r1 updated the MPLS entry with the 222 label value
+ logger.info(
+ "r1, mpls table, check that MPLS entry with inLabel set to 17 is not present"
+ )
+ test_func = functools.partial(
+ check_show_mpls_table_entry_label_not_found, router, 17
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r1, mpls entry with label 17 still present"
+
+ # Check vpnv4 routes from r1
+ logger.info("Checking vpnv4 routes on r1")
+ label_list = set()
+ bgp_vpnv4_table_check_all(router, label_list=label_list, same=True)
+ assert len(label_list) == 1, "r1, multiple Label values found for vpnv4 updates"
+
+ new_label = label_list.pop()
+ assert (
+ new_label == 222
+ ), "r1, wrong label value in VPNv4 table, expected 222, observed {}".format(
+ new_label
+ )
+
+ # Check mpls table with 222 value
+ logger.info("Checking MPLS values on show mpls table of r1")
+ label_list = set()
+ label_list.add(222)
+ mpls_table_check(router, label_list=label_list)
+
+
+def test_reconfigure_allocation_mode_nexthop():
+ """
+ Test re-configuring allocation mode per nexthop
+ Check that show mpls table has no entry with label 17
+ Check that all VPN updates on r1 should have multiple label values and not only 222
+ Check that show mpls table will have multiple label values and not only 222
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Reconfiguring allocation mode per nexthop")
+ router = tgen.gears["r1"]
+ router.vtysh_cmd(
+ "configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv4 unicast\nlabel vpn export allocation-mode per-nexthop\n",
+ isjson=False,
+ )
+
+ # Check that show mpls table has no entry with label 17
+ logger.info(
+ "r1, mpls table, check that MPLS entry with inLabel set to 17 is present"
+ )
+ test_func = functools.partial(
+ check_show_mpls_table_entry_label_not_found, router, 17
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r1, mpls entry with label 17 still present"
+
+ # Check vpnv4 routes from r1
+ logger.info("Checking vpnv4 routes on r1")
+ label_list = set()
+ bgp_vpnv4_table_check_all(router, label_list=label_list)
+ assert len(label_list) != 1, "r1, only 1 label values found for vpnv4 updates"
+
+ # Check mpls table with all values
+ logger.info("Checking MPLS values on show mpls table of r1")
+ mpls_table_check(router, label_list=label_list)
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
--- /dev/null
+{
+ "vrfName": "vrf1",
+ "localAS": 65500,
+ "routes":
+ {
+ "10:200::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "10:200::",
+ "prefixLen": 64,
+ "network": "10:200::/64",
+ "nexthops": [
+ {
+ "ip": "192:168::2",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172:31::11/128": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172:31::11",
+ "prefixLen":128,
+ "network":"172:31::11/128",
+ "peerId":"192:2::100",
+ "nexthops":[
+ {
+ "ip":"192:2::11",
+ "afi":"ipv6",
+ "scope":"global"
+ }
+ ]
+ }
+ ],
+ "172:31::12/128": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172:31::12",
+ "prefixLen":128,
+ "network":"172:31::12/128",
+ "peerId":"192:2::100",
+ "nexthops":[
+ {
+ "ip":"192:2::12",
+ "afi":"ipv6",
+ "scope":"global"
+ },
+ {
+ "scope": "link-local",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "172:31::13/128": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172:31::13",
+ "prefixLen":128,
+ "network":"172:31::13/128",
+ "peerId":"192:168::255:13",
+ "nexthops":[
+ {
+ "ip":"192:168::255:13",
+ "afi":"ipv6",
+ "scope": "global"
+ },
+ {
+ "scope": "link-local"
+ }
+ ]
+ }
+ ],
+ "172:31::14/128": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172:31::14",
+ "prefixLen":128,
+ "network":"172:31::14/128",
+ "peerId":"(unspec)",
+ "nexthops":[
+ {
+ "ip":"192:2::14",
+ "afi":"ipv6",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "172:31::15/128": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172:31::15",
+ "prefixLen":128,
+ "network":"172:31::15/128",
+ "peerId":"(unspec)",
+ "nexthops":[
+ {
+ "ip":"192:2::12",
+ "afi":"ipv6",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "172:31::20/128": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172:31::20",
+ "prefixLen":128,
+ "network":"172:31::20/128",
+ "peerId":"192:2::100",
+ "nexthops":[
+ {
+ "ip":"192:2::11",
+ "afi":"ipv6",
+ "scope":"global"
+ }
+ ]
+ }
+ ],
+ "172:31::111/128": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172:31::111",
+ "prefixLen":128,
+ "network":"172:31::111/128",
+ "peerId":"192:2::100",
+ "nexthops":[
+ {
+ "ip":"192:2::11",
+ "afi":"ipv6",
+ "scope":"global"
+ }
+ ]
+ }
+ ],
+ "192:2::/64": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"192:2::",
+ "prefixLen":64,
+ "network":"192:2::/64",
+ "peerId":"(unspec)",
+ "nexthops":[
+ {
+ "ip":"::",
+ "afi":"ipv6",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "192:168::255:0/112": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"192:168::255:0",
+ "prefixLen":112,
+ "network":"192:168::255:0/112",
+ "peerId":"(unspec)",
+ "nexthops":[
+ {
+ "ip":"::",
+ "afi":"ipv6",
+ "used":true
+ }
+ ]
+ }
+ ]
+ }
+}
--- /dev/null
+debug bgp vpn leak-from-vrf
+debug bgp vpn label
+debug bgp nht
+debug bgp updates out
+router bgp 65500
+ bgp router-id 192.168.0.1
+ no bgp ebgp-requires-policy
+ neighbor 192:168::2 remote-as 65501
+ address-family ipv4 unicast
+ no neighbor 192:168::2 activate
+ exit-address-family
+ address-family ipv6 vpn
+ neighbor 192:168::2 activate
+ neighbor 192:168::2 soft-reconfiguration inbound
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 192.168.0.1
+ neighbor 192:2::100 remote-as 65500
+ neighbor 192:168::255:13 remote-as 65500
+ address-family ipv6 unicast
+ neighbor 192:2::100 activate
+ neighbor 192:2::100 route-map rmap in
+ neighbor 192:168::255:13 activate
+ neighbor 192:168::255:13 route-map rmap in
+ redistribute connected
+ redistribute static
+ label vpn export allocation-mode per-nexthop
+ label vpn export auto
+ rd vpn export 444:1
+ rt vpn both 52:100
+ export vpn
+ import vpn
+ exit-address-family
+!
+interface r1-eth0
+ mpls bgp forwarding
+!
+bgp community-list 1 seq 5 permit 10:10
+!
+route-map rmap permit 1
+ match community 1
+ set ipv6 next-hop prefer-global
+!
+route-map rmap permit 2
+!
--- /dev/null
+log stdout
+debug zebra nht
+!debug zebra kernel msgdump recv
+!debug zebra dplane detailed
+!debug zebra packet recv
+interface r1-eth1 vrf vrf1
+ ipv6 address 192:2::1/64
+!
+interface r1-eth2 vrf vrf1
+ ipv6 address 192:168::255:1/112
+!
+interface r1-eth0
+ ip address 192:168::1/112
+!
+vrf vrf1
+ ipv6 route 172:31::14/128 192:2::14
+ ipv6 route 172:31::15/128 192:2::12
+exit-vrf
--- /dev/null
+router bgp 65500
+ bgp router-id 11.11.11.11
+ no bgp network import-check
+ neighbor 192:2::100 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 192:2::100 activate
+ !
+ address-family ipv6 unicast
+ neighbor 192:2::100 activate
+ neighbor 192:2::100 route-map rmap out
+ network 172:31::11/128
+ network 172:31::111/128
+ network 172:31::20/128
+ exit-address-family
+!
+route-map rmap permit 1
+ set community 10:10
+!
--- /dev/null
+log stdout
+interface r11-eth0
+ ipv6 address 192:2::11/64
+!
--- /dev/null
+router bgp 65500
+ bgp router-id 12.12.12.12
+ no bgp network import-check
+ neighbor 192:2::100 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 192:2::100 activate
+ !
+ address-family ipv6 unicast
+ neighbor 192:2::100 activate
+ network 172:31::12/128
+ exit-address-family
+!
+
--- /dev/null
+log stdout
+interface r12-eth0
+ ipv6 address 192:2::12/64
+!
--- /dev/null
+router bgp 65500
+ bgp router-id 13.13.13.13
+ no bgp network import-check
+ neighbor 192:168::255:1 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 192:168::255:1 activate
+ exit-address-family
+ address-family ipv6 unicast
+ neighbor 192:168::255:1 activate
+ neighbor 192:168::255:1 route-map rmap out
+ network 172:31::0:13/128
+ exit-address-family
+!
+route-map rmap permit 1
+ set community 10:10
+!
--- /dev/null
+log stdout
+interface r13-eth0
+ ipv6 address 192:168::255:13/112
+!
--- /dev/null
+{
+ "vrfName": "default",
+ "localAS": 65501,
+ "routes":
+ {
+ "routeDistinguishers":
+ {
+ "444:1":
+ {
+ "172:31::11/128": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172:31::11",
+ "prefixLen": 128,
+ "network": "172:31::11/128",
+ "peerId": "192:168::1",
+ "nexthops": [
+ {
+ "ip": "192:168::1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172:31::12/128": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172:31::12",
+ "prefixLen": 128,
+ "network": "172:31::12/128",
+ "peerId": "192:168::1",
+ "nexthops": [
+ {
+ "ip": "192:168::1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172:31::13/128": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172:31::13",
+ "prefixLen": 128,
+ "network": "172:31::13/128",
+ "peerId": "192:168::1",
+ "nexthops": [
+ {
+ "ip": "192:168::1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172:31::14/128": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172:31::14",
+ "prefixLen": 128,
+ "network": "172:31::14/128",
+ "peerId": "192:168::1",
+ "nexthops": [
+ {
+ "ip": "192:168::1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172:31::15/128": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172:31::15",
+ "prefixLen": 128,
+ "network": "172:31::15/128",
+ "peerId": "192:168::1",
+ "nexthops": [
+ {
+ "ip": "192:168::1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172:31::20/128": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172:31::20",
+ "prefixLen": 128,
+ "network": "172:31::20/128",
+ "peerId": "192:168::1",
+ "nexthops": [
+ {
+ "ip": "192:168::1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172:31::111/128": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172:31::111",
+ "prefixLen": 128,
+ "network": "172:31::111/128",
+ "peerId": "192:168::1",
+ "nexthops": [
+ {
+ "ip": "192:168::1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "192:2::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "192:2::",
+ "prefixLen": 64,
+ "network": "192:2::/64",
+ "peerId": "192:168::1",
+ "nexthops": [
+ {
+ "ip": "192:168::1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "192:168::255:0/112": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "192:168::255:0",
+ "prefixLen": 112,
+ "network": "192:168::255:0/112",
+ "peerId": "192:168::1",
+ "nexthops": [
+ {
+ "ip": "192:168::1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "444:2":
+ {
+ "10:200::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "10:200::",
+ "prefixLen": 64,
+ "network": "10:200::/64",
+ "peerId": "(unspec)",
+ "nhVrfName": "vrf1",
+ "nexthops": [
+ {
+ "ip": "::",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
--- /dev/null
+router bgp 65501
+ bgp router-id 192.168.0.2
+ no bgp ebgp-requires-policy
+ neighbor 192:168::1 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 192:168::1 activate
+ exit-address-family
+ address-family ipv6 vpn
+ neighbor 192:168::1 activate
+ exit-address-family
+!
+router bgp 65501 vrf vrf1
+ bgp router-id 192.168.0.2
+ address-family ipv6 unicast
+ redistribute connected
+ label vpn export 102
+ rd vpn export 444:2
+ rt vpn both 52:100
+ export vpn
+ import vpn
+ exit-address-family
+!
+interface r2-eth0
+ mpls bgp forwarding
+!
--- /dev/null
+log stdout
+interface r2-eth1 vrf vrf1
+ ipv6 address 10:200::2/64
+!
+interface r2-eth0
+ ipv6 address 192:168::2/112
+!
--- /dev/null
+router bgp 65500
+ bgp router-id 100.100.100.100
+ no bgp network import-check
+ neighbor 192:2::1 remote-as 65500
+ neighbor 192:2::11 remote-as 65500
+ neighbor 192:2::12 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 192:2::1 activate
+ no neighbor 192:2::11 activate
+ no neighbor 192:2::12 activate
+ !
+ address-family ipv6 unicast
+ neighbor 192:2::1 activate
+ neighbor 192:2::1 route-reflector-client
+ neighbor 192:2::1 nexthop-local unchanged
+ neighbor 192:2::11 activate
+ neighbor 192:2::11 route-reflector-client
+ neighbor 192:2::11 nexthop-local unchanged
+ neighbor 192:2::12 activate
+ neighbor 192:2::12 route-reflector-client
+ neighbor 192:2::12 nexthop-local unchanged
+ exit-address-family
+!
+
--- /dev/null
+log stdout
+interface rr-eth0
+ ipv6 address 192:2::100/64
+!
--- /dev/null
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+#
+# test_bgp_vpnv6_per_nexthop_label.py
+#
+# Copyright 2023 6WIND S.A.
+#
+
+"""
+ test_bgp_vpnv6_per_nexthop_label.py: Test the FRR BGP daemon using EBGP peering
+ Let us exchange VPNv6 updates between both devices
+ Updates from r1 will originate from the same RD, but will have separate
+ label values.
+
+ +----------+
+ | r11 |
+ |192::2:11 +---+
+ | | | +----+--------+ +----------+
+ +----------+ | 192::2::1 |vrf | r1 |192:168::/112 | r2 |
+ +-------------------+ | 1+--------------+ |
+ +----------+ | |VRF1|AS65500 | | AS65501 |
+ | r12 | | +--------------+ | VPNV4| |VPNV4 |
+ |192::2:12 +---+ |192:168::255:1+-+--+--------+ +----------+
+ | | |
+ +----------+ |
+ |
+ +----------+ |
+ | r13 | |
+ |192:168:: +--------+
+ | 255:13 |
+ +----------+
+"""
+
+import os
+import sys
+import json
+from functools import partial
+import pytest
+import functools
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+
+pytestmark = [pytest.mark.bgpd]
+
+PREFIXES_R11 = ["172:31::11/128", "172:31::20/128", "172:31::111/128"]
+PREFIXES_R12 = ["172:31::12/128"]
+PREFIXES_REDIST_R12 = ["172:31::15/128"]
+PREFIXES_R13 = ["172:31::13/128"]
+PREFIXES_REDIST_R14 = ["172:31::14/128"]
+PREFIXES_CONNECTED = ["192:168::255/112", "192:2::/64"]
+
+
+def build_topo(tgen):
+ "Build function"
+
+ # Create 2 routers.
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+ tgen.add_router("r11")
+ tgen.add_router("r12")
+ tgen.add_router("r13")
+ tgen.add_router("r14")
+ tgen.add_router("rr")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r11"])
+ switch.add_link(tgen.gears["r12"])
+ switch.add_link(tgen.gears["rr"])
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r13"])
+
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r14"])
+
+
+def _populate_iface():
+ tgen = get_topogen()
+ cmds_list = [
+ "ip link add vrf1 type vrf table 10",
+ "echo 100000 > /proc/sys/net/mpls/platform_labels",
+ "ip link set dev vrf1 up",
+ "ip link set dev {0}-eth1 master vrf1",
+ "echo 1 > /proc/sys/net/mpls/conf/{0}-eth0/input",
+ ]
+ cmds_list_plus = [
+ "ip link set dev {0}-eth2 master vrf1",
+ ]
+
+ for cmd in cmds_list:
+ input = cmd.format("r1")
+ logger.info("input: " + cmd)
+ output = tgen.net["r1"].cmd(cmd.format("r1"))
+ logger.info("output: " + output)
+
+ for cmd in cmds_list_plus:
+ input = cmd.format("r1")
+ logger.info("input: " + cmd)
+ output = tgen.net["r1"].cmd(cmd.format("r1"))
+ logger.info("output: " + output)
+
+ for cmd in cmds_list:
+ input = cmd.format("r2")
+ logger.info("input: " + cmd)
+ output = tgen.net["r2"].cmd(cmd.format("r2"))
+ logger.info("output: " + output)
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ _populate_iface()
+
+ for rname, router in router_list.items():
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ # Initialize all routers.
+ tgen.start_router()
+
+
+def teardown_module(_mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ tgen.stop_topology()
+
+
+def bgp_vpnv6_table_check(router, group, label_list=None, label_value_expected=None):
+ """
+ Dump and check that vpnv6 entries have the same MPLS label value
+ * 'router': the router to check
+ * 'group': the list of prefixes to check. a single label value for the group has to be found
+ * 'label_list': check that the label values are not present in the vpnv6 entries
+ * that list is updated with the present label value
+ * 'label_value_expected': check that the mpls label read is the same as that value
+ """
+
+ stored_label_inited = False
+ for prefix in group:
+ dump = router.vtysh_cmd("show bgp ipv6 vpn {} json".format(prefix), isjson=True)
+ for rd, pathes in dump.items():
+ for path in pathes["paths"]:
+ assert (
+ "remoteLabel" in path.keys()
+ ), "{0}, {1}, remoteLabel not present".format(router.name, prefix)
+ logger.info(
+ "{0}, {1}, label value is {2}".format(
+ router.name, prefix, path["remoteLabel"]
+ )
+ )
+ if stored_label_inited:
+ assert (
+ path["remoteLabel"] == stored_label
+ ), "{0}, {1}, label value not expected one (expected {2}, observed {3}".format(
+ router.name, prefix, stored_label, path["remoteLabel"]
+ )
+ else:
+ stored_label = path["remoteLabel"]
+ stored_label_inited = True
+ if label_list is not None:
+ assert (
+ stored_label not in label_list
+ ), "{0}, {1}, label already detected in a previous prefix".format(
+ router.name, prefix
+ )
+ label_list.add(stored_label)
+
+ if label_value_expected:
+ assert (
+ path["remoteLabel"] == label_value_expected
+ ), "{0}, {1}, label value not expected (expected {2}, observed {3}".format(
+ router.name, prefix, label_value_expected, path["remoteLabel"]
+ )
+
+
+def bgp_vpnv6_table_check_all(router, label_list=None, same=False):
+ """
+ Dump and check that vpnv6 entries are correctly configured with specific label values
+ * 'router': the router to check
+ * 'label_list': check that the label values are not present in the vpnv6 entries
+ * that list is updated with the present label value found.
+ * 'same': by default, set to False. Addresses groups are classified by addresses.
+ * if set to True, all entries of all groups should have a unique label value
+ """
+ if same:
+ bgp_vpnv6_table_check(
+ router,
+ group=PREFIXES_R11
+ + PREFIXES_R12
+ + PREFIXES_REDIST_R12
+ + PREFIXES_R13
+ + PREFIXES_REDIST_R14
+ + PREFIXES_CONNECTED,
+ label_list=label_list,
+ )
+ else:
+ for group in (
+ PREFIXES_R11,
+ PREFIXES_R12,
+ PREFIXES_REDIST_R12,
+ PREFIXES_R13,
+ PREFIXES_REDIST_R14,
+ PREFIXES_CONNECTED,
+ ):
+ bgp_vpnv6_table_check(router, group=group, label_list=label_list)
+
+
+def mpls_table_check(router, blacklist=None, label_list=None, whitelist=None):
+ """
+ Dump and check 'show mpls table json' output. An assert is triggered in case test fails
+ * 'router': the router to check
+ * 'blacklist': the list of nexthops (IP or interface) that should not be on output
+ * 'label_list': the list of labels that should be in inLabel value
+ * 'whitelist': the list of nexthops (IP or interface) that should be on output
+ """
+ nexthop_list = []
+ if blacklist:
+ nexthop_list.append(blacklist)
+ logger.info("Checking MPLS labels on {}".format(router.name))
+ dump = router.vtysh_cmd("show mpls table json", isjson=True)
+ for in_label, label_info in dump.items():
+ if label_list is not None:
+ label_list.add(in_label)
+ for nh in label_info["nexthops"]:
+ assert (
+ nh["installed"] == True and nh["type"] == "BGP"
+ ), "{}, show mpls table, nexthop is not installed".format(router.name)
+ if "nexthop" in nh.keys():
+ assert (
+ nh["nexthop"] not in nexthop_list
+ ), "{}, show mpls table, duplicated or blacklisted nexthop address".format(
+ router.name
+ )
+ nexthop_list.append(nh["nexthop"])
+ elif "interface" in nh.keys():
+ assert (
+ nh["interface"] not in nexthop_list
+ ), "{}, show mpls table, duplicated or blacklisted nexthop interface".format(
+ router.name
+ )
+ nexthop_list.append(nh["interface"])
+ else:
+ assert (
+ 0
+ ), "{}, show mpls table, entry with neither nexthop nor interface".format(
+ router.name
+ )
+
+ if whitelist:
+ for entry in whitelist:
+ assert (
+ entry in nexthop_list
+ ), "{}, show mpls table, entry with nexthop {} not present in nexthop list".format(
+ router.name, entry
+ )
+
+
+def check_show_bgp_vpn_prefix_not_found(router, ipversion, prefix, rd, label=None):
+ output = json.loads(
+ router.vtysh_cmd("show bgp {} vpn {} json".format(ipversion, prefix))
+ )
+ if label:
+ expected = {rd: {"prefix": prefix, "paths": [{"remoteLabel": label}]}}
+ else:
+ expected = {rd: {"prefix": prefix}}
+ ret = topotest.json_cmp(output, expected)
+ if ret is None:
+ return "not good"
+ return None
+
+
+def check_show_bgp_vpn_prefix_found(router, ipversion, prefix, rd):
+ output = json.loads(
+ router.vtysh_cmd("show bgp {} vpn {} json".format(ipversion, prefix))
+ )
+ expected = {rd: {"prefix": prefix}}
+ return topotest.json_cmp(output, expected)
+
+
+def check_show_mpls_table_entry_label_found(router, inlabel, interface):
+ output = json.loads(router.vtysh_cmd("show mpls table {} json".format(inlabel)))
+ expected = {
+ "inLabel": inlabel,
+ "installed": True,
+ "nexthops": [{"interface": interface}],
+ }
+ return topotest.json_cmp(output, expected)
+
+
+def check_show_mpls_table_entry_label_not_found(router, inlabel):
+ output = json.loads(router.vtysh_cmd("show mpls table {} json".format(inlabel)))
+ expected = {"inlabel": inlabel, "installed": True}
+ ret = topotest.json_cmp(output, expected)
+ if ret is None:
+ return "not good"
+ return None
+
+
+def mpls_entry_get_interface(router, label):
+ """
+ Assert that the label is in MPLS table
+ Assert an outgoing interface is programmed
+ return the outgoing interface
+ """
+ outgoing_interface = None
+
+ logger.info("Checking MPLS labels on {}".format(router.name))
+ dump = router.vtysh_cmd("show mpls table {} json".format(label), isjson=True)
+ assert dump, "{}, show mpls table, inLabel {} not found".format(router.name, label)
+
+ for nh in dump["nexthops"]:
+ assert (
+ "interface" in nh.keys()
+ ), "{}, show mpls table, nexthop interface not present for MPLS entry {}".format(
+ router.name, label
+ )
+
+ outgoing_interface = nh["interface"]
+
+ return outgoing_interface
+
+
+def test_protocols_convergence():
+ """
+ Assert that all protocols have converged
+ statuses as they depend on it.
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Check BGP IPv6 routing tables on VRF1 of r1
+ logger.info("Checking BGP IPv6 routes for convergence on r1 VRF1")
+ router = tgen.gears["r1"]
+ json_file = "{}/{}/bgp_ipv6_routes_vrf1.json".format(CWD, router.name)
+
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp vrf vrf1 ipv6 json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+ logger.info("Checking BGP VPNv6 routes for convergence on r2")
+ router = tgen.gears["r2"]
+ json_file = "{}/{}/bgp_vpnv6_routes.json".format(CWD, router.name)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp ipv6 vpn json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+ # Check BGP labels received on r2
+ logger.info("Checking BGP VPNv6 labels on r2")
+ label_list = set()
+ bgp_vpnv6_table_check_all(tgen.gears["r2"], label_list)
+
+ # Check MPLS labels received on r1
+ mpls_table_check(tgen.gears["r1"], label_list)
+
+
+def test_flapping_bgp_vrf_down():
+ """
+ Turn down a remote BGP session
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ logger.info("Unpeering BGP on r11")
+ tgen.gears["r11"].vtysh_cmd(
+ "configure terminal\nrouter bgp 65500\nno neighbor 192:2::100\n",
+ isjson=False,
+ )
+
+ def _bgp_prefix_not_found(router, vrf, ipversion, prefix):
+ output = json.loads(
+ router.vtysh_cmd(
+ "show bgp vrf {} {} {} json".format(vrf, ipversion, prefix)
+ )
+ )
+ expected = {"prefix": prefix}
+ ret = topotest.json_cmp(output, expected)
+ if ret is None:
+ return "not good"
+ return None
+
+ # Check prefix from r11 is not present
+ test_func = functools.partial(
+ _bgp_prefix_not_found, tgen.gears["r1"], "vrf1", "ipv6", "172:31::11/128"
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert (
+ success
+ ), "r1, prefix 172:31::11/128 from r11 did not disappear. r11 still connected to rr ?"
+
+ # Check BGP updated received on r2 are not from r11
+ logger.info("Checking BGP VPNv6 labels on r2")
+ for entry in PREFIXES_R11:
+ dump = tgen.gears["r2"].vtysh_cmd(
+ "show bgp ipv6 vpn {} json".format(entry), isjson=True
+ )
+ for rd in dump:
+ assert False, "r2, {}, route distinguisher {} present".format(entry, rd)
+
+ mpls_table_check(tgen.gears["r1"], blacklist=["192:2::11"])
+
+
+def test_flapping_bgp_vrf_up():
+ """
+ Turn up a remote BGP session
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ logger.info("Peering BGP on r11")
+ tgen.gears["r11"].vtysh_cmd(
+ "configure terminal\nrouter bgp 65500\nneighbor 192:2::100 remote-as 65500\n",
+ isjson=False,
+ )
+ tgen.gears["r11"].vtysh_cmd(
+ "configure terminal\nrouter bgp 65500\naddress-family ipv6 unicast\nneighbor 192:2::100 activate\n",
+ isjson=False,
+ )
+
+ # Check r2 gets prefix 172:31::11/128
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found,
+ tgen.gears["r2"],
+ "ipv6",
+ "172:31::11/128",
+ "444:1",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert (
+ success
+ ), "r2, prefix 172:31::11/128 from r11 not present. r11 still disconnected from rr ?"
+ bgp_vpnv6_table_check_all(tgen.gears["r2"])
+
+
+def test_recursive_route():
+ """
+ Test static recursive route redistributed over BGP
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Enabling recursive static route")
+ tgen.gears["r1"].vtysh_cmd(
+ "configure terminal\nvrf vrf1\nipv6 route 172:31::30/128 172:31::20\n",
+ isjson=False,
+ )
+ logger.info("Checking BGP VPNv6 labels on r2")
+ # that route should be sent along with label for 192.0.2.11
+
+ def _prefix30_not_found(router):
+ output = json.loads(router.vtysh_cmd("show bgp ipv6 vpn 172:31::30/128 json"))
+ expected = {"444:1": {"prefix": "172:31::30/128"}}
+ ret = topotest.json_cmp(output, expected)
+ if ret is None:
+ return "not good"
+ return None
+
+ def _prefix30_found(router):
+ output = json.loads(router.vtysh_cmd("show bgp ipv6 vpn 172:31::30/128 json"))
+ expected = {"444:1": {"prefix": "172:31::30/128"}}
+ return topotest.json_cmp(output, expected)
+
+ # Check r2 received vpnv6 update with 172:31::30
+ test_func = functools.partial(_prefix30_found, tgen.gears["r2"])
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r2, VPNv6 update 172:31::30 not found"
+
+ # that route should be sent along with label for 192::2:11
+ bgp_vpnv6_table_check(
+ tgen.gears["r2"],
+ group=PREFIXES_R11 + ["172:31::30/128"],
+ )
+
+ # diagnostic
+ logger.info("Dumping label nexthop table")
+ tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
+ logger.info("Dumping nexthop table")
+ tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 nexthop detail", isjson=False)
+
+ logger.info("Disabling recursive static route")
+ tgen.gears["r1"].vtysh_cmd(
+ "configure terminal\nvrf vrf1\nno ipv6 route 172:31::30/128 172:31::20\n",
+ isjson=False,
+ )
+
+ # Check r2 removed 172:31::30 vpnv6 update
+ test_func = functools.partial(_prefix30_not_found, tgen.gears["r2"])
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r2, VPNv6 update 172:31::30 still present"
+
+
+def test_prefix_changes_interface():
+ """
+ Test BGP update for a given prefix learnt on different interface
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Enabling a 172:31::50/128 prefix for r11")
+ tgen.gears["r11"].vtysh_cmd(
+ "configure terminal\nrouter bgp\naddress-family ipv6 unicast\nnetwork 172:31::50/128",
+ isjson=False,
+ )
+
+ # Check r2 received vpnv6 update with 172:31::50
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found,
+ tgen.gears["r2"],
+ "ipv6",
+ "172:31::50/128",
+ "444:1",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r2, VPNv6 update 172:31::50 not found"
+
+ # diagnostic
+ logger.info("Dumping label nexthop table")
+ tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
+
+ label_list = set()
+ bgp_vpnv6_table_check(
+ tgen.gears["r2"],
+ group=PREFIXES_R11 + ["172:31::50/128"],
+ label_list=label_list,
+ )
+
+ assert (
+ len(label_list) == 1
+ ), "Multiple Label values found for updates from r11 found"
+
+ oldlabel = label_list.pop()
+ logger.info("r1, getting the outgoing interface used by label {}".format(oldlabel))
+ old_outgoing_interface = mpls_entry_get_interface(tgen.gears["r1"], oldlabel)
+ logger.info(
+ "r1, outgoing interface used by label {} is {}".format(
+ oldlabel, old_outgoing_interface
+ )
+ )
+
+ logger.info("Moving the 172:31::50/128 prefix from r11 to r13")
+ tgen.gears["r11"].vtysh_cmd(
+ "configure terminal\nrouter bgp\naddress-family ipv6 unicast\nno network 172:31::50/128",
+ isjson=False,
+ )
+ tgen.gears["r13"].vtysh_cmd(
+ "configure terminal\nrouter bgp\naddress-family ipv6 unicast\nnetwork 172:31::50/128",
+ isjson=False,
+ )
+
+ # Check r2 removed 172:31::50 vpnv6 update with old label
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_not_found,
+ tgen.gears["r2"],
+ "ipv6",
+ "172:31::50/128",
+ "444:1",
+ label=oldlabel,
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert (
+ success
+ ), "r2, vpnv6 update 172:31::50 with old label {0} still present".format(oldlabel)
+
+ # diagnostic
+ logger.info("Dumping label nexthop table")
+ tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
+
+ # Check r2 received new 172:31::50 vpnv6 update
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found,
+ tgen.gears["r2"],
+ "ipv6",
+ "172:31::50/128",
+ "444:1",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r2, vpnv6 update 172:31::50 not found"
+
+ label_list = set()
+ bgp_vpnv6_table_check(
+ tgen.gears["r2"],
+ group=["172:31::13/128", "172:31::50/128"],
+ label_list=label_list,
+ )
+ assert (
+ len(label_list) == 1
+ ), "Multiple Label values found for updates from r13 found"
+
+ newlabel = label_list.pop()
+ logger.info("r1, getting the outgoing interface used by label {}".format(newlabel))
+ new_outgoing_interface = mpls_entry_get_interface(tgen.gears["r1"], newlabel)
+ logger.info(
+ "r1, outgoing interface used by label {} is {}".format(
+ newlabel, new_outgoing_interface
+ )
+ )
+ if old_outgoing_interface == new_outgoing_interface:
+ assert 0, "r1, outgoing interface did not change whereas BGP update moved"
+
+ logger.info("Restoring state by removing the 172:31::50/128 prefix from r13")
+ tgen.gears["r13"].vtysh_cmd(
+ "configure terminal\nrouter bgp\naddress-family ipv6 unicast\nno network 172:31::50/128",
+ isjson=False,
+ )
+
+
+def test_changing_default_label_value():
+ """
+ Change the MPLS default value
+ Check that r1 VPNv6 entries have the 222 label value
+ Check that MPLS entry with old label value is no more present
+ Check that MPLS entry for local traffic has inLabel set to 222
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears["r1"]
+
+ # counting the number of labels used in the VPNv6 table
+ label_list = set()
+ logger.info("r1, VPNv6 table, check the number of labels used before modification")
+ bgp_vpnv6_table_check_all(router, label_list)
+ old_len = len(label_list)
+ assert (
+ old_len != 1
+ ), "r1, number of labels used should be greater than 1, oberved {} ".format(old_len)
+
+ logger.info("r1, vrf1, changing the default MPLS label value to export to 222")
+ router.vtysh_cmd(
+ "configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv6 unicast\nlabel vpn export 222\n",
+ isjson=False,
+ )
+
+ # Check r1 updated the MPLS entry with the 222 label value
+ logger.info(
+ "r1, mpls table, check that MPLS entry with inLabel set to 222 has vrf1 interface"
+ )
+ test_func = functools.partial(
+ check_show_mpls_table_entry_label_found, router, 222, "vrf1"
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r1, mpls entry with label 222 not found"
+
+ # check label repartition is ok
+ logger.info("r1, VPNv6 table, check the number of labels used after modification")
+ label_list = set()
+ bgp_vpnv6_table_check_all(router, label_list)
+ new_len = len(label_list)
+ assert (
+ old_len == new_len
+ ), "r1, number of labels after modification differ from previous, observed {}, expected {} ".format(
+ new_len, old_len
+ )
+
+ logger.info(
+ "r1, VPNv6 table, check that prefixes that were using the vrf label have refreshed the label value to 222"
+ )
+ bgp_vpnv6_table_check(router, group=PREFIXES_CONNECTED, label_value_expected=222)
+
+
+def test_unconfigure_allocation_mode_nexthop():
+ """
+ Test unconfiguring allocation mode per nexthop
+ Check on r2 that new MPLS label values have been propagated
+ Check that show mpls table has no entry with label 17 (previously used)
+ Check that all VPN updates on r1 should have label value moved to 222
+ Check that show mpls table will only have 222 label value
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Unconfiguring allocation mode per nexthop")
+ router = tgen.gears["r1"]
+ dump = router.vtysh_cmd(
+ "configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv6 unicast\nno label vpn export allocation-mode per-nexthop\n",
+ isjson=False,
+ )
+
+ # Check r1 updated the MPLS entry with the 222 label value
+ logger.info(
+ "r1, mpls table, check that MPLS entry with inLabel set to 17 is not present"
+ )
+ test_func = functools.partial(
+ check_show_mpls_table_entry_label_not_found, router, 17
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r1, mpls entry with label 17 still present"
+
+ # Check vpnv6 routes from r1
+ logger.info("Checking VPNv6 routes on r1")
+ label_list = set()
+ bgp_vpnv6_table_check_all(router, label_list=label_list, same=True)
+ assert len(label_list) == 1, "r1, multiple Label values found for VPNv6 updates"
+
+ new_label = label_list.pop()
+ assert (
+ new_label == 222
+ ), "r1, wrong label value in VPNv6 table, expected 222, observed {}".format(
+ new_label
+ )
+
+ # Check mpls table with 222 value
+ logger.info("Checking MPLS values on show mpls table of r1")
+ label_list = set()
+ label_list.add(222)
+ mpls_table_check(router, label_list=label_list)
+
+
+def test_reconfigure_allocation_mode_nexthop():
+ """
+ Test re-configuring allocation mode per nexthop
+ Check that show mpls table has no entry with label 17
+ Check that all VPN updates on r1 should have multiple label values and not only 222
+ Check that show mpls table will have multiple label values and not only 222
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Reconfiguring allocation mode per nexthop")
+ router = tgen.gears["r1"]
+ dump = router.vtysh_cmd(
+ "configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv6 unicast\nlabel vpn export allocation-mode per-nexthop\n",
+ isjson=False,
+ )
+
+ # Check that show mpls table has no entry with label 17
+ logger.info(
+ "r1, mpls table, check that MPLS entry with inLabel set to 17 is present"
+ )
+ test_func = functools.partial(
+ check_show_mpls_table_entry_label_not_found, router, 17
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r1, mpls entry with label 17 still present"
+
+ # Check vpnv6 routes from r1
+ logger.info("Checking VPNv6 routes on r1")
+ label_list = set()
+ bgp_vpnv6_table_check_all(router, label_list=label_list)
+ assert len(label_list) != 1, "r1, only 1 label values found for VPNv6 updates"
+
+ # Check mpls table with all values
+ logger.info("Checking MPLS values on show mpls table of r1")
+ mpls_table_check(router, label_list=label_list)
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
shellopt = self.cfgopt.get_option_list("--shell")
if "all" in shellopt or "." in shellopt:
- self.run_in_window("bash")
+ self.run_in_window("bash", title="munet")
# This is expected by newer munet CLI code
self.config_dirname = ""
# ospf gr information
gr_data = ospf_data.setdefault("graceful-restart", {})
if gr_data:
-
if "opaque" in gr_data and gr_data["opaque"]:
cmd = "capability opaque"
if gr_data.setdefault("delete", False):
else:
data_ip = topo["routers"][ospf_nbr]["links"]
data_rid = topo["routers"][ospf_nbr]["ospf"]["router_id"]
+ logger.info("ospf neighbor %s: router-id: %s", router, data_rid)
if ospf_nbr in data_ip:
nbr_details = nbr_data[ospf_nbr]
elif lan:
try:
nh_state = show_ospf_json[nbr_rid][0]["nbrState"].split("/")[0]
except KeyError:
- errormsg = "[DUT: {}] OSPF peer {} missing,from " "{} ".format(
- router, nbr_rid, ospf_nbr
+ errormsg = (
+ "[DUT: {}] missing OSPF neighbor {} with router-id {}".format(
+ router, ospf_nbr, nbr_rid
+ )
)
return errormsg
return errormsg
for ospf_nbr, nbr_data in ospf_nbr_list.items():
-
try:
data_ip = data_rid = topo["routers"][ospf_nbr]["ospf6"]["router_id"]
except KeyError:
return errormsg
continue
else:
-
for router, rnode in tgen.routers().items():
if "ospf6" not in topo["routers"][router]:
continue
data_ip = data_rid = topo["routers"][nbr_data["nbr"]]["ospf6"][
"router_id"
]
-
+ logger.info("ospf neighbor %s: router-id: %s", ospf_nbr, data_rid)
if ospf_nbr in data_ip:
nbr_details = nbr_data[ospf_nbr]
elif lan:
nh_state = get_index_val.get(neighbor_ip)["state"]
intf_state = get_index_val.get(neighbor_ip)["ifState"]
except TypeError:
- errormsg = "[DUT: {}] OSPF peer {} missing,from " "{} ".format(
- router, nbr_rid, ospf_nbr
+ errormsg = (
+ "[DUT: {}] missing OSPF neighbor {} with router-id {}".format(
+ router, ospf_nbr, nbr_rid
+ )
)
return errormsg
continue
if st_rt in ospf_rib_json:
-
st_found = True
found_routes.append(st_rt)
# XXX not appropriate for ssh
cmd = ["sudo", "-Eu", os.environ["SUDO_USER"]] + cmd
- if not isinstance(nscmd, str):
- nscmd = shlex.join(nscmd)
- cmd.append(nscmd)
+ if title:
+ cmd.append("-t")
+ cmd.append(title)
+
+ if isinstance(nscmd, str):
+ nscmd = shlex.split(nscmd)
+ cmd.extend(nscmd)
elif "DISPLAY" in os.environ:
cmd = [get_exec_path_host("xterm")]
if "SUDO_USER" in os.environ:
"""
TOPOOLOGY =
Please view in a fixed-width font such as Courier.
- +---+ A0 +---+
- +R1 +------------+R2 |
+ +---+ A0 +---+
+ |R1 +------------+R2 |
+-+-+- +--++
| -- -- |
| -- A0 -- |
| -- -- |
| -- -- |
+-+-+- +-+-+
- +R0 +-------------+R3 |
- +---+ A0 +---+
+ |R0 +-------------+R3 |
+ +---+ A0 +---+
TESTCASES =
1. OSPF summarisation functionality.
ip = topo["routers"]["r0"]["links"]["r3"]["ipv4"]
- ip_net = str(ipaddress.ip_interface(u"{}".format(ip)).network)
+ ip_net = str(ipaddress.ip_interface("{}".format(ip)).network)
ospf_summ_r1 = {
"r0": {
"ospf": {"summary-address": [{"prefix": ip_net.split("/")[0], "mask": "8"}]}
step("Repeat steps 1 to 10 of summarisation in non Back bone area.")
reset_config_on_routers(tgen)
- step("Change the area id on the interface on R0")
+ step("Change the area id on the interface on R0 to R1 from 0.0.0.0 to 0.0.0.1")
input_dict = {
"r0": {
"links": {
result = create_interfaces_cfg(tgen, input_dict)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
- step("Change the area id on the interface ")
+ step("Change the area id on the interface on R1 to R0 from 0.0.0.0 to 0.0.0.1")
input_dict = {
"r1": {
"links": {
result = create_interfaces_cfg(tgen, input_dict)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+ # clear neighbor state on both routers to avoid stale state
+ tgen.net["r0"].cmd("clear ip ospf neighbor")
+ tgen.net["r1"].cmd("clear ip ospf neighbor")
+
ospf_covergence = verify_ospf_neighbor(tgen, topo)
assert ospf_covergence is True, "setup_module :Failed \n Error {}".format(
ospf_covergence
import pytest
-from lib.common_config import retry, run_frr_cmd, step
+from lib.common_config import (
+ retry,
+ run_frr_cmd,
+ step,
+ kill_router_daemons,
+ start_router_daemons,
+ shutdown_bringup_interface,
+)
+
from lib.micronet import Timeout, comm_error
from lib.topogen import Topogen, TopoRouter
from lib.topotest import interface_set_status, json_cmp
_test_opaque_add_del(tgen, apibin)
+def _test_opaque_add_restart_add(tgen, apibin):
+ "Test adding an opaque LSA and then restarting ospfd"
+
+ r1 = tgen.gears["r1"]
+ r2 = tgen.gears["r2"]
+
+ p = None
+ pread = None
+ # Log to our stdin, stderr
+ pout = open(os.path.join(r1.net.logdir, "r1/add-del.log"), "a+")
+ try:
+ step("reachable: check for add notification")
+ pread = r2.popen(
+ ["/usr/bin/timeout", "120", apibin, "-v", "--logtag=READER", "wait,120"],
+ encoding=None, # don't buffer
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+ p = r1.popen(
+ [
+ apibin,
+ "-v",
+ "add,10,1.2.3.4,231,1",
+ "add,10,1.2.3.4,231,1,feedaceebeef",
+ "wait, 5",
+ "add,10,1.2.3.4,231,1,feedaceedeadbeef",
+ "wait, 5",
+ "add,10,1.2.3.4,231,1,feedaceebaddbeef",
+ "wait, 5",
+ ]
+ )
+ add_input_dict = {
+ "areas": {
+ "1.2.3.4": {
+ "areaLocalOpaqueLsa": [
+ {
+ "lsId": "231.0.0.1",
+ "advertisedRouter": "1.0.0.0",
+ "sequenceNumber": "80000004",
+ "checksum": "3128",
+ },
+ ],
+ "areaLocalOpaqueLsaCount": 1,
+ },
+ },
+ }
+ step("Check for add LSAs")
+ json_cmd = "show ip ospf da json"
+ assert verify_ospf_database(tgen, r1, add_input_dict, json_cmd) is None
+ assert verify_ospf_database(tgen, r2, add_input_dict, json_cmd) is None
+
+ step("Shutdown the interface on r1 to isolate it for r2")
+ shutdown_bringup_interface(tgen, "r1", "r1-eth0", False)
+
+ time.sleep(2)
+ step("Reset the client")
+ p.send_signal(signal.SIGINT)
+ time.sleep(2)
+ p.wait()
+ p = None
+
+ step("Kill ospfd on R1")
+ kill_router_daemons(tgen, "r1", ["ospfd"])
+ time.sleep(2)
+
+ step("Bring ospfd on R1 back up")
+ start_router_daemons(tgen, "r1", ["ospfd"])
+
+ p = r1.popen(
+ [
+ apibin,
+ "-v",
+ "add,10,1.2.3.4,231,1",
+ "add,10,1.2.3.4,231,1,feedaceecafebeef",
+ "wait, 5",
+ ]
+ )
+
+ step("Bring the interface on r1 back up for connection to r2")
+ shutdown_bringup_interface(tgen, "r1", "r1-eth0", True)
+
+ step("Verify area opaque LSA refresh")
+ json_cmd = "show ip ospf da opaque-area json"
+ add_detail_input_dict = {
+ "areaLocalOpaqueLsa": {
+ "areas": {
+ "1.2.3.4": [
+ {
+ "linkStateId": "231.0.0.1",
+ "advertisingRouter": "1.0.0.0",
+ "lsaSeqNumber": "80000005",
+ "checksum": "a87e",
+ "length": 28,
+ "opaqueDataLength": 8,
+ },
+ ],
+ },
+ },
+ }
+ assert verify_ospf_database(tgen, r1, add_detail_input_dict, json_cmd) is None
+ assert verify_ospf_database(tgen, r2, add_detail_input_dict, json_cmd) is None
+
+ step("Shutdown the interface on r1 to isolate it for r2")
+ shutdown_bringup_interface(tgen, "r1", "r1-eth0", False)
+
+ time.sleep(2)
+ step("Reset the client")
+ p.send_signal(signal.SIGINT)
+ time.sleep(2)
+ p.wait()
+ p = None
+
+ step("Kill ospfd on R1")
+ kill_router_daemons(tgen, "r1", ["ospfd"])
+ time.sleep(2)
+
+ step("Bring ospfd on R1 back up")
+ start_router_daemons(tgen, "r1", ["ospfd"])
+
+ step("Bring the interface on r1 back up for connection to r2")
+ shutdown_bringup_interface(tgen, "r1", "r1-eth0", True)
+
+ step("Verify area opaque LSA Purging")
+ json_cmd = "show ip ospf da opaque-area json"
+ add_detail_input_dict = {
+ "areaLocalOpaqueLsa": {
+ "areas": {
+ "1.2.3.4": [
+ {
+ "lsaAge": 3600,
+ "linkStateId": "231.0.0.1",
+ "advertisingRouter": "1.0.0.0",
+ "lsaSeqNumber": "80000005",
+ "checksum": "a87e",
+ "length": 28,
+ "opaqueDataLength": 8,
+ },
+ ],
+ },
+ },
+ }
+ assert verify_ospf_database(tgen, r1, add_detail_input_dict, json_cmd) is None
+ assert verify_ospf_database(tgen, r2, add_detail_input_dict, json_cmd) is None
+ step("Verify Area Opaque LSA removal after timeout (60 seconds)")
+ time.sleep(60)
+ json_cmd = "show ip ospf da opaque-area json"
+ timeout_detail_input_dict = {
+ "areaLocalOpaqueLsa": {
+ "areas": {
+ "1.2.3.4": [],
+ },
+ },
+ }
+ assert (
+ verify_ospf_database(tgen, r1, timeout_detail_input_dict, json_cmd) is None
+ )
+ assert (
+ verify_ospf_database(tgen, r2, timeout_detail_input_dict, json_cmd) is None
+ )
+
+ except Exception:
+ if p:
+ p.terminate()
+ if p.wait():
+ comm_error(p)
+ p = None
+ raise
+ finally:
+ if pread:
+ pread.terminate()
+ pread.wait()
+ if p:
+ p.terminate()
+ p.wait()
+
+
+@pytest.mark.parametrize("tgen", [2], indirect=True)
+def test_ospf_opaque_restart(tgen):
+ apibin = os.path.join(CLIENTDIR, "ospfclient.py")
+ rc, o, e = tgen.gears["r2"].net.cmd_status([apibin, "--help"])
+ logging.debug("%s --help: rc: %s stdout: '%s' stderr: '%s'", apibin, rc, o, e)
+ _test_opaque_add_restart_add(tgen, apibin)
+
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
ip = topo["routers"]["r0"]["links"]["r3"]["ipv6"]
- ip_net = str(ipaddress.ip_interface(u"{}".format(ip)).network)
+ ip_net = str(ipaddress.ip_interface("{}".format(ip)).network)
ospf_summ_r1 = {
"r0": {
"ospf6": {
result = create_interfaces_cfg(tgen, input_dict)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+ # restart interface state machine on both routers to avoid stale state
+ tgen.net["r0"].cmd("clear ipv6 ospf6 interface")
+ tgen.net["r1"].cmd("clear ipv6 ospf6 interface")
+
ospf_covergence = verify_ospf6_neighbor(tgen, topo)
assert ospf_covergence is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf_covergence
--- /dev/null
+!
+int lo
+ ip address 10.10.10.1/32
+!
+int r4-eth0
+ ip address 192.168.1.4/24
+!
+router rip
+ network 192.168.1.0/24
+ network 10.10.10.1/32
+ timers basic 5 15 10
+exit
+
--- /dev/null
+!
+int lo
+ ip address 10.10.10.1/32
+!
+int r5-eth0
+ ip address 192.168.1.5/24
+!
+router rip
+ network 192.168.1.0/24
+ network 10.10.10.1/32
+ timers basic 5 15 10
+exit
+
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.common_config import step
pytestmark = [pytest.mark.ripd]
def setup_module(mod):
- topodef = {"s1": ("r1", "r2", "r3")}
+ topodef = {"s1": ("r1", "r2", "r3", "r4", "r5")}
tgen = Topogen(topodef, mod.__name__)
tgen.start_topology()
_, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
assert result is None, "Can't see 10.10.10.1/32 as multipath in `show ip rip`"
- def _show_routes():
+ def _show_routes(nh_num):
output = json.loads(r1.vtysh_cmd("show ip route json"))
expected = {
"10.10.10.1/32": [
{
+ "internalNextHopNum": nh_num,
+ "internalNextHopActiveNum": nh_num,
"nexthops": [
{
"ip": "192.168.1.2",
"ip": "192.168.1.3",
"active": True,
},
- ]
+ ],
}
]
}
return topotest.json_cmp(output, expected)
- test_func = functools.partial(_show_routes)
+ test_func = functools.partial(_show_routes, 4)
_, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
- assert result is None, "Can't see 10.10.10.1/32 as multipath in `show ip route`"
+ assert result is None, "Can't see 10.10.10.1/32 as multipath (4) in `show ip route`"
+
+ step(
+ "Configure allow-ecmp 2, ECMP group routes SHOULD have next-hops with the lowest IPs"
+ )
+ r1.vtysh_cmd(
+ """
+ configure terminal
+ router rip
+ allow-ecmp 2
+ """
+ )
+
+ test_func = functools.partial(_show_rip_routes)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assert (
+ result is None
+ ), "Can't see 10.10.10.1/32 as ECMP with the lowest next-hop IPs"
+
+ test_func = functools.partial(_show_routes, 2)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assert result is None, "Can't see 10.10.10.1/32 as multipath (2) in `show ip route`"
if __name__ == "__main__":
"instance": [
{
"vrf": "default",
- "allow-ecmp": "true",
+ "allow-ecmp": 1,
"distance": {
"source": [
{
<ripd xmlns="http://frrouting.org/yang/ripd">
<instance>
<vrf>default</vrf>
- <allow-ecmp>true</allow-ecmp>
+ <allow-ecmp>1</allow-ecmp>
<static-route>10.0.1.0/24</static-route>
<distance>
<source>
prefix rt-types;
}
+ import frr-route-types {
+ prefix frr-route-types;
+ }
+
organization
"Free Range Routing";
contact
"Match IPv6 next hop address";
}
+ identity source-protocol {
+ base frr-route-map:rmap-match-type;
+ description
+ "Match protocol via which the route was learnt";
+ }
+
identity distance {
base frr-route-map:rmap-set-type;
description
"IPv6 address";
}
}
+
+ case source-protocol {
+ when "derived-from-or-self(../frr-route-map:condition, 'frr-bgp-route-map:source-protocol')";
+ leaf source-protocol {
+ type frr-route-types:frr-route-types;
+ }
+ }
}
augment "/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:rmap-set-action/frr-route-map:set-action" {
"VRF name.";
}
leaf allow-ecmp {
- type boolean;
- default "false";
+ type uint8;
+ default 0;
description
"Allow equal-cost multi-path.";
}
memory_order_relaxed);
}
+static struct zebra_dplane_ctx *
+dplane_provider_dequeue_out_ctx(struct zebra_dplane_provider *prov)
+{
+ struct zebra_dplane_ctx *ctx;
+
+ ctx = dplane_ctx_list_pop(&(prov->dp_ctx_out_list));
+ if (!ctx)
+ return NULL;
+
+ atomic_fetch_sub_explicit(&(prov->dp_out_queued), 1,
+ memory_order_relaxed);
+
+ return ctx;
+}
+
/*
* Accessor for provider object
*/
dplane_provider_lock(prov);
while (counter < limit) {
- ctx = dplane_ctx_list_pop(&(prov->dp_ctx_out_list));
+ ctx = dplane_provider_dequeue_out_ctx(prov);
if (ctx) {
dplane_ctx_list_add_tail(&work_list, ctx);
counter++;
static struct zebra_nhlfe *
nhlfe_add(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
enum nexthop_types_t gtype, const union g_addr *gate,
- ifindex_t ifindex, uint8_t num_labels, const mpls_label_t *labels,
- bool is_backup);
+ ifindex_t ifindex, vrf_id_t vrf_id, uint8_t num_labels,
+ const mpls_label_t *labels, bool is_backup);
static int nhlfe_del(struct zebra_nhlfe *nhlfe);
static void nhlfe_free(struct zebra_nhlfe *nhlfe);
static void nhlfe_out_label_update(struct zebra_nhlfe *nhlfe,
changed++;
} else {
/* Add LSP entry to this nexthop */
- nhlfe = nhlfe_add(lsp, lsp_type, nexthop->type,
- &nexthop->gate, nexthop->ifindex,
- nexthop->nh_label->num_labels,
- nexthop->nh_label->label,
- false /*backup*/);
+ nhlfe = nhlfe_add(
+ lsp, lsp_type, nexthop->type, &nexthop->gate,
+ nexthop->ifindex, nexthop->vrf_id,
+ nexthop->nh_label->num_labels,
+ nexthop->nh_label->label, false /*backup*/);
if (!nhlfe)
return -1;
/*
* Locate NHLFE that matches with passed info.
+ * TODO: handle vrf_id if vrf backend is netns based
*/
static struct zebra_nhlfe *nhlfe_find(struct nhlfe_list_head *list,
enum lsp_types_t lsp_type,
static struct zebra_nhlfe *
nhlfe_alloc(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
enum nexthop_types_t gtype, const union g_addr *gate,
- ifindex_t ifindex, uint8_t num_labels, const mpls_label_t *labels)
+ ifindex_t ifindex, vrf_id_t vrf_id, uint8_t num_labels,
+ const mpls_label_t *labels)
{
struct zebra_nhlfe *nhlfe;
struct nexthop *nexthop;
nexthop_add_labels(nexthop, lsp_type, num_labels, labels);
- nexthop->vrf_id = VRF_DEFAULT;
+ nexthop->vrf_id = vrf_id;
nexthop->type = gtype;
switch (nexthop->type) {
case NEXTHOP_TYPE_IPV4:
* Add primary or backup NHLFE. Base entry must have been created and
* duplicate check done.
*/
-static struct zebra_nhlfe *nhlfe_add(struct zebra_lsp *lsp,
- enum lsp_types_t lsp_type,
- enum nexthop_types_t gtype,
- const union g_addr *gate,
- ifindex_t ifindex, uint8_t num_labels,
- const mpls_label_t *labels, bool is_backup)
+static struct zebra_nhlfe *
+nhlfe_add(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
+ enum nexthop_types_t gtype, const union g_addr *gate,
+ ifindex_t ifindex, vrf_id_t vrf_id, uint8_t num_labels,
+ const mpls_label_t *labels, bool is_backup)
{
struct zebra_nhlfe *nhlfe;
if (!lsp)
return NULL;
- /* Must have labels */
- if (num_labels == 0 || labels == NULL) {
- if (IS_ZEBRA_DEBUG_MPLS)
- zlog_debug("%s: invalid nexthop: no labels", __func__);
-
- return NULL;
- }
-
/* Allocate new object */
- nhlfe = nhlfe_alloc(lsp, lsp_type, gtype, gate, ifindex, num_labels,
- labels);
+ nhlfe = nhlfe_alloc(lsp, lsp_type, gtype, gate, ifindex, vrf_id,
+ num_labels, labels);
if (!nhlfe)
return NULL;
json_nhlfe = json_object_new_object();
json_object_string_add(json_nhlfe, "type", nhlfe_type2str(nhlfe->type));
- json_object_int_add(json_nhlfe, "outLabel",
- nexthop->nh_label->label[0]);
-
- json_label_stack = json_object_new_array();
- json_object_object_add(json_nhlfe, "outLabelStack", json_label_stack);
- for (i = 0; i < nexthop->nh_label->num_labels; i++)
- json_object_array_add(
- json_label_stack,
- json_object_new_int(nexthop->nh_label->label[i]));
-
+ if (nexthop->nh_label) {
+ json_object_int_add(json_nhlfe, "outLabel",
+ nexthop->nh_label->label[0]);
+ json_label_stack = json_object_new_array();
+ json_object_object_add(json_nhlfe, "outLabelStack",
+ json_label_stack);
+ for (i = 0; i < nexthop->nh_label->num_labels; i++)
+ json_object_array_add(
+ json_label_stack,
+ json_object_new_int(
+ nexthop->nh_label->label[i]));
+ }
json_object_int_add(json_nhlfe, "distance", nhlfe->distance);
if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED))
case NEXTHOP_TYPE_IPV4_IFINDEX:
json_object_string_addf(json_nhlfe, "nexthop", "%pI4",
&nexthop->gate.ipv4);
+ if (nexthop->ifindex)
+ json_object_string_add(json_nhlfe, "interface",
+ ifindex2ifname(nexthop->ifindex,
+ nexthop->vrf_id));
break;
case NEXTHOP_TYPE_IPV6:
case NEXTHOP_TYPE_IPV6_IFINDEX:
const mpls_label_t *out_labels)
{
/* Just a public pass-through to the internal implementation */
- return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, num_labels,
- out_labels, false /*backup*/);
+ return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, VRF_DEFAULT,
+ num_labels, out_labels, false /*backup*/);
}
/*
uint8_t num_labels, const mpls_label_t *out_labels)
{
/* Just a public pass-through to the internal implementation */
- return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, num_labels,
- out_labels, true);
+ return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, VRF_DEFAULT,
+ num_labels, out_labels, true);
}
/*
{
struct zebra_nhlfe *nhlfe;
- if (nh->nh_label == NULL || nh->nh_label->num_labels == 0)
- return NULL;
-
- nhlfe = nhlfe_add(lsp, lsp_type, nh->type, &nh->gate, nh->ifindex,
- nh->nh_label->num_labels, nh->nh_label->label,
- false /*backup*/);
+ nhlfe = nhlfe_add(
+ lsp, lsp_type, nh->type, &nh->gate, nh->ifindex, nh->vrf_id,
+ nh->nh_label ? nh->nh_label->num_labels : 0,
+ nh->nh_label ? nh->nh_label->label : NULL, false /*backup*/);
return nhlfe;
}
{
struct zebra_nhlfe *nhlfe;
- if (nh->nh_label == NULL || nh->nh_label->num_labels == 0)
- return NULL;
-
- nhlfe = nhlfe_add(lsp, lsp_type, nh->type, &nh->gate,
- nh->ifindex, nh->nh_label->num_labels,
- nh->nh_label->label, true);
+ nhlfe = nhlfe_add(lsp, lsp_type, nh->type, &nh->gate, nh->ifindex,
+ nh->vrf_id,
+ nh->nh_label ? nh->nh_label->num_labels : 0,
+ nh->nh_label ? nh->nh_label->label : NULL, true);
return nhlfe;
}
lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t type,
uint8_t num_out_labels, const mpls_label_t *out_labels,
enum nexthop_types_t gtype, const union g_addr *gate,
- ifindex_t ifindex, bool is_backup)
+ ifindex_t ifindex, vrf_id_t vrf_id, bool is_backup)
{
struct zebra_nhlfe *nhlfe;
char buf[MPLS_LABEL_STRLEN];
struct nexthop *nh = nhlfe->nexthop;
assert(nh);
- assert(nh->nh_label);
/* Clear deleted flag (in case it was set) */
UNSET_FLAG(nhlfe->flags, NHLFE_FLAG_DELETED);
- if (nh->nh_label->num_labels == num_out_labels
- && !memcmp(nh->nh_label->label, out_labels,
- sizeof(mpls_label_t) * num_out_labels))
+
+ if (!nh->nh_label || num_out_labels == 0)
+ /* No change */
+ return nhlfe;
+
+ if (nh->nh_label &&
+ nh->nh_label->num_labels == num_out_labels &&
+ !memcmp(nh->nh_label->label, out_labels,
+ sizeof(mpls_label_t) * num_out_labels))
/* No change */
return nhlfe;
}
/* Update out label(s), trigger processing. */
- if (nh->nh_label->num_labels == num_out_labels)
+ if (nh->nh_label && nh->nh_label->num_labels == num_out_labels)
memcpy(nh->nh_label->label, out_labels,
sizeof(mpls_label_t) * num_out_labels);
else {
}
} else {
/* Add LSP entry to this nexthop */
- nhlfe = nhlfe_add(lsp, type, gtype, gate, ifindex,
+ nhlfe = nhlfe_add(lsp, type, gtype, gate, ifindex, vrf_id,
num_out_labels, out_labels, is_backup);
if (!nhlfe)
return NULL;
char buf2[MPLS_LABEL_STRLEN];
nhlfe2str(nhlfe, buf, sizeof(buf));
- mpls_label2str(num_out_labels, out_labels, buf2,
- sizeof(buf2), 0, 0);
+ if (num_out_labels)
+ mpls_label2str(num_out_labels, out_labels, buf2,
+ sizeof(buf2), 0, 0);
+ else
+ snprintf(buf2, sizeof(buf2), "-");
zlog_debug("Add LSP in-label %u type %d %snexthop %s out-label(s) %s",
lsp->ile.in_label, type, backup_str, buf,
/*
* Install an LSP and forwarding entry; used primarily
* from vrf zapi message processing.
+ * TODO: handle vrf_id parameter when mpls API extends to interface or SRTE
+ * changes
*/
int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type,
mpls_label_t in_label, uint8_t num_out_labels,
lsp = hash_get(lsp_table, &tmp_ile, lsp_alloc);
nhlfe = lsp_add_nhlfe(lsp, type, num_out_labels, out_labels, gtype,
- gate, ifindex, false /*backup*/);
+ gate, ifindex, VRF_DEFAULT, false /*backup*/);
if (nhlfe == NULL)
return -1;
{
struct zebra_nhlfe *nhlfe;
- nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num, znh->labels,
- znh->type, &znh->gate, znh->ifindex,
+ nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num, znh->labels, znh->type,
+ &znh->gate, znh->ifindex, znh->vrf_id,
false /*backup*/);
if (nhlfe == NULL)
return -1;
{
struct zebra_nhlfe *nhlfe;
- nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num,
- znh->labels, znh->type, &znh->gate,
- znh->ifindex, true /*backup*/);
+ nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num, znh->labels, znh->type,
+ &znh->gate, znh->ifindex, znh->vrf_id,
+ true /*backup*/);
if (nhlfe == NULL) {
if (IS_ZEBRA_DEBUG_MPLS)
zlog_debug("%s: unable to add backup nhlfe, label: %u",
} else {
/* Add static LSP entry to this nexthop */
- nhlfe = nhlfe_add(lsp, ZEBRA_LSP_STATIC, gtype, gate,
- ifindex, 1, &out_label, false /*backup*/);
+ nhlfe = nhlfe_add(lsp, ZEBRA_LSP_STATIC, gtype, gate, ifindex,
+ VRF_DEFAULT, 1, &out_label, false /*backup*/);
if (!nhlfe)
return -1;
break;
}
- if (nexthop->type != NEXTHOP_TYPE_IFINDEX)
+ if (nexthop->type != NEXTHOP_TYPE_IFINDEX &&
+ nexthop->nh_label)
out_label_str = mpls_label2str(
nexthop->nh_label->num_labels,
&nexthop->nh_label->label[0],