GRTAGS
GPATH
compile_commands.json
+.ccls
.ccls-cache
.dirstamp
refix
attr.aspath = aspath_empty(bgp->asnotation);
attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_AS_PATH);
- /* Next hop attribute. */
- attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP);
-
if (community) {
uint32_t gshut = COMMUNITY_GSHUT;
attr.aggregator_as = bgp->as;
attr.aggregator_addr = bgp->router_id;
+ /* Aggregate are done for IPv4/IPv6 so checking ipv4 family,
+ * This should only be set for IPv4 AFI type
+ * based on RFC-4760:
+ * "An UPDATE message that carries no NLRI,
+ * other than the one encoded in
+ * the MP_REACH_NLRI attribute,
+ * SHOULD NOT carry the NEXT_HOP
+ * attribute"
+ */
+ if (p->family == AF_INET) {
+ /* Next hop attribute. */
+ attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP);
+ attr.mp_nexthop_len = IPV4_MAX_BYTELEN;
+ }
+
/* Apply route-map */
if (aggregate->rmap.name) {
struct attr attr_tmp = attr;
#include "bgpd/bgp_debug.h"
#include "bgpd/bgp_errors.h"
#include "bgpd/bgp_route.h"
+#include "bgpd/bgp_zebra.h"
+#include "bgpd/bgp_vty.h"
+#include "bgpd/bgp_rd.h"
#define BGP_LABELPOOL_ENABLE_TESTS 0
lcb->label);
break;
+ case LP_TYPE_NEXTHOP:
+ if (uj) {
+ json_object_string_add(json_elem, "prefix",
+ "nexthop");
+ json_object_int_add(json_elem, "label",
+ lcb->label);
+ } else
+ vty_out(vty, "%-18s %u\n", "nexthop",
+ lcb->label);
+ break;
}
}
if (uj)
vty_out(vty, "%-18s %u\n", "VRF",
label);
break;
+ case LP_TYPE_NEXTHOP:
+ if (uj) {
+ json_object_string_add(json_elem, "prefix",
+ "nexthop");
+ json_object_int_add(json_elem, "label", label);
+ } else
+ vty_out(vty, "%-18s %u\n", "nexthop",
+ label);
+ break;
}
}
if (uj)
else
vty_out(vty, "VRF\n");
break;
+ case LP_TYPE_NEXTHOP:
+ if (uj)
+ json_object_string_add(json_elem, "prefix",
+ "nexthop");
+ else
+ vty_out(vty, "Nexthop\n");
+ break;
}
}
if (uj)
return CMD_SUCCESS;
}
+static void show_bgp_nexthop_label_afi(struct vty *vty, afi_t afi,
+ struct bgp *bgp, bool detail)
+{
+ struct bgp_label_per_nexthop_cache_head *tree;
+ struct bgp_label_per_nexthop_cache *iter;
+ safi_t safi;
+ void *src;
+ char buf[PREFIX2STR_BUFFER];
+ char labelstr[MPLS_LABEL_STRLEN];
+ struct bgp_dest *dest;
+ struct bgp_path_info *path;
+ struct bgp *bgp_path;
+ struct bgp_table *table;
+ time_t tbuf;
+
+ vty_out(vty, "Current BGP label nexthop cache for %s, VRF %s\n",
+ afi2str(afi), bgp->name_pretty);
+
+ tree = &bgp->mpls_labels_per_nexthop[afi];
+ frr_each (bgp_label_per_nexthop_cache, tree, iter) {
+ if (afi2family(afi) == AF_INET)
+ src = (void *)&iter->nexthop.u.prefix4;
+ else
+ src = (void *)&iter->nexthop.u.prefix6;
+
+ vty_out(vty, " %s, label %s #paths %u\n",
+ inet_ntop(afi2family(afi), src, buf, sizeof(buf)),
+ mpls_label2str(1, &iter->label, labelstr,
+ sizeof(labelstr), 0, true),
+ iter->path_count);
+ if (iter->nh)
+ vty_out(vty, " if %s\n",
+ ifindex2ifname(iter->nh->ifindex,
+ iter->nh->vrf_id));
+ tbuf = time(NULL) - (monotime(NULL) - iter->last_update);
+ vty_out(vty, " Last update: %s", ctime(&tbuf));
+ if (!detail)
+ continue;
+ vty_out(vty, " Paths:\n");
+ LIST_FOREACH (path, &(iter->paths), label_nh_thread) {
+ dest = path->net;
+ table = bgp_dest_table(dest);
+ assert(dest && table);
+ afi = family2afi(bgp_dest_get_prefix(dest)->family);
+ safi = table->safi;
+ bgp_path = table->bgp;
+
+ if (dest->pdest) {
+ vty_out(vty, " %d/%d %pBD RD ", afi, safi,
+ dest);
+
+ vty_out(vty, BGP_RD_AS_FORMAT(bgp->asnotation),
+ (struct prefix_rd *)bgp_dest_get_prefix(
+ dest->pdest));
+ vty_out(vty, " %s flags 0x%x\n",
+ bgp_path->name_pretty, path->flags);
+ } else
+ vty_out(vty, " %d/%d %pBD %s flags 0x%x\n",
+ afi, safi, dest, bgp_path->name_pretty,
+ path->flags);
+ }
+ }
+}
+
+DEFPY(show_bgp_nexthop_label, show_bgp_nexthop_label_cmd,
+ "show bgp [<view|vrf> VIEWVRFNAME] label-nexthop [detail]",
+ SHOW_STR BGP_STR BGP_INSTANCE_HELP_STR
+ "BGP label per-nexthop table\n"
+ "Show detailed information\n")
+{
+ int idx = 0;
+ char *vrf = NULL;
+ struct bgp *bgp;
+ bool detail = false;
+ int afi;
+
+ if (argv_find(argv, argc, "vrf", &idx)) {
+ vrf = argv[++idx]->arg;
+ bgp = bgp_lookup_by_name(vrf);
+ } else
+ bgp = bgp_get_default();
+
+ if (!bgp)
+ return CMD_SUCCESS;
+
+ if (argv_find(argv, argc, "detail", &idx))
+ detail = true;
+
+ for (afi = AFI_IP; afi <= AFI_IP6; afi++)
+ show_bgp_nexthop_label_afi(vty, afi, bgp, detail);
+ return CMD_SUCCESS;
+}
+
#if BGP_LABELPOOL_ENABLE_TESTS
/*------------------------------------------------------------------------
* Testing code start
install_element(ENABLE_NODE, &clear_labelpool_perf_test_cmd);
#endif /* BGP_LABELPOOL_ENABLE_TESTS */
}
+
+DEFINE_MTYPE_STATIC(BGPD, LABEL_PER_NEXTHOP_CACHE,
+ "BGP Label Per Nexthop entry");
+
+/* The nexthops values are compared to
+ * find in the tree the appropriate cache entry
+ */
+int bgp_label_per_nexthop_cache_cmp(const struct bgp_label_per_nexthop_cache *a,
+ const struct bgp_label_per_nexthop_cache *b)
+{
+ return prefix_cmp(&a->nexthop, &b->nexthop);
+}
+
+struct bgp_label_per_nexthop_cache *
+bgp_label_per_nexthop_new(struct bgp_label_per_nexthop_cache_head *tree,
+ struct prefix *nexthop)
+{
+ struct bgp_label_per_nexthop_cache *blnc;
+
+ blnc = XCALLOC(MTYPE_LABEL_PER_NEXTHOP_CACHE,
+ sizeof(struct bgp_label_per_nexthop_cache));
+ blnc->tree = tree;
+ blnc->label = MPLS_INVALID_LABEL;
+ prefix_copy(&blnc->nexthop, nexthop);
+ LIST_INIT(&(blnc->paths));
+ bgp_label_per_nexthop_cache_add(tree, blnc);
+
+ return blnc;
+}
+
+struct bgp_label_per_nexthop_cache *
+bgp_label_per_nexthop_find(struct bgp_label_per_nexthop_cache_head *tree,
+ struct prefix *nexthop)
+{
+ struct bgp_label_per_nexthop_cache blnc = {};
+
+ if (!tree)
+ return NULL;
+
+ memcpy(&blnc.nexthop, nexthop, sizeof(struct prefix));
+ return bgp_label_per_nexthop_cache_find(tree, &blnc);
+}
+
+void bgp_label_per_nexthop_free(struct bgp_label_per_nexthop_cache *blnc)
+{
+ if (blnc->label != MPLS_INVALID_LABEL) {
+ bgp_zebra_send_nexthop_label(ZEBRA_MPLS_LABELS_DELETE,
+ blnc->label, blnc->nh->ifindex,
+ blnc->nh->vrf_id, ZEBRA_LSP_BGP,
+ &blnc->nexthop);
+ bgp_lp_release(LP_TYPE_NEXTHOP, blnc, blnc->label);
+ }
+ bgp_label_per_nexthop_cache_del(blnc->tree, blnc);
+ if (blnc->nh)
+ nexthop_free(blnc->nh);
+ blnc->nh = NULL;
+ XFREE(MTYPE_LABEL_PER_NEXTHOP_CACHE, blnc);
+}
+
+void bgp_label_per_nexthop_init(void)
+{
+ install_element(VIEW_NODE, &show_bgp_nexthop_label_cmd);
+}
*/
#define LP_TYPE_VRF 0x00000001
#define LP_TYPE_BGP_LU 0x00000002
+#define LP_TYPE_NEXTHOP 0x00000003
PREDECL_LIST(lp_fifo);
extern void bgp_lp_event_zebra_up(void);
extern void bgp_lp_vty_init(void);
+struct bgp_label_per_nexthop_cache;
+PREDECL_RBTREE_UNIQ(bgp_label_per_nexthop_cache);
+
+extern int
+bgp_label_per_nexthop_cache_cmp(const struct bgp_label_per_nexthop_cache *a,
+ const struct bgp_label_per_nexthop_cache *b);
+
+struct bgp_label_per_nexthop_cache {
+
+ /* RB-tree entry. */
+ struct bgp_label_per_nexthop_cache_item entry;
+
+ /* the nexthop is the key of the list */
+ struct prefix nexthop;
+
+ /* calculated label */
+ mpls_label_t label;
+
+ /* number of path_vrfs */
+ unsigned int path_count;
+
+ /* back pointer to bgp instance */
+ struct bgp *to_bgp;
+
+ /* copy a nexthop resolution from bgp nexthop tracking
+ * used to extract the interface nexthop
+ */
+ struct nexthop *nh;
+
+ /* list of path_vrfs using it */
+ LIST_HEAD(path_lists, bgp_path_info) paths;
+
+ time_t last_update;
+
+ /* Back pointer to the cache tree this entry belongs to. */
+ struct bgp_label_per_nexthop_cache_head *tree;
+};
+
+DECLARE_RBTREE_UNIQ(bgp_label_per_nexthop_cache,
+ struct bgp_label_per_nexthop_cache, entry,
+ bgp_label_per_nexthop_cache_cmp);
+
+void bgp_label_per_nexthop_free(struct bgp_label_per_nexthop_cache *blnc);
+
+struct bgp_label_per_nexthop_cache *
+bgp_label_per_nexthop_new(struct bgp_label_per_nexthop_cache_head *tree,
+ struct prefix *nexthop);
+struct bgp_label_per_nexthop_cache *
+bgp_label_per_nexthop_find(struct bgp_label_per_nexthop_cache_head *tree,
+ struct prefix *nexthop);
+void bgp_label_per_nexthop_init(void);
#endif /* _FRR_BGP_LABELPOOL_H */
/*
* Routes that are redistributed into BGP from zebra do not get
- * nexthop tracking. However, if those routes are subsequently
- * imported to other RIBs within BGP, the leaked routes do not
- * carry the original BGP_ROUTE_REDISTRIBUTE sub_type. Therefore,
- * in order to determine if the route we are currently leaking
- * should have nexthop tracking, we must find the ultimate
- * parent so we can check its sub_type.
+ * nexthop tracking, unless MPLS allocation per nexthop is
+ * performed. In the default case nexthop tracking does not apply,
+ * if those routes are subsequently imported to other RIBs within
+ * BGP, the leaked routes do not carry the original
+ * BGP_ROUTE_REDISTRIBUTE sub_type. Therefore, in order to determine
+ * if the route we are currently leaking should have nexthop
+ * tracking, we must find the ultimate parent so we can check its
+ * sub_type.
*
* As of now, source_bpi may at most be a second-generation route
* (only one hop back to ultimate parent for vrf-vpn-vrf scheme).
return new;
}
+void bgp_mplsvpn_path_nh_label_unlink(struct bgp_path_info *pi)
+{
+ struct bgp_label_per_nexthop_cache *blnc;
+
+ if (!pi)
+ return;
+
+ blnc = pi->label_nexthop_cache;
+
+ if (!blnc)
+ return;
+
+ LIST_REMOVE(pi, label_nh_thread);
+ pi->label_nexthop_cache->path_count--;
+ pi->label_nexthop_cache = NULL;
+
+ if (LIST_EMPTY(&(blnc->paths)))
+ bgp_label_per_nexthop_free(blnc);
+}
+
+/* Called upon reception of a ZAPI Message from zebra, about
+ * a new available label.
+ */
+static int bgp_mplsvpn_get_label_per_nexthop_cb(mpls_label_t label,
+ void *context, bool allocated)
+{
+ struct bgp_label_per_nexthop_cache *blnc = context;
+ mpls_label_t old_label;
+ int debug = BGP_DEBUG(vpn, VPN_LEAK_LABEL);
+ struct bgp_path_info *pi;
+ struct bgp_table *table;
+
+ old_label = blnc->label;
+
+ if (debug)
+ zlog_debug("%s: label=%u, allocated=%d, nexthop=%pFX", __func__,
+ label, allocated, &blnc->nexthop);
+ if (allocated)
+ /* update the entry with the new label */
+ blnc->label = label;
+ else
+ /*
+ * previously-allocated label is now invalid
+ * eg: zebra deallocated the labels and notifies it
+ */
+ blnc->label = MPLS_INVALID_LABEL;
+
+ if (old_label == blnc->label)
+ return 0; /* no change */
+
+ /* update paths */
+ if (blnc->label != MPLS_INVALID_LABEL)
+ bgp_zebra_send_nexthop_label(
+ ZEBRA_MPLS_LABELS_ADD, blnc->label, blnc->nh->ifindex,
+ blnc->nh->vrf_id, ZEBRA_LSP_BGP, &blnc->nexthop);
+
+ LIST_FOREACH (pi, &(blnc->paths), label_nh_thread) {
+ if (!pi->net)
+ continue;
+ table = bgp_dest_table(pi->net);
+ if (!table)
+ continue;
+ vpn_leak_from_vrf_update(blnc->to_bgp, table->bgp, pi);
+ }
+
+ return 0;
+}
+
+/* Get a per label nexthop value:
+ * - Find and return a per label nexthop from the cache
+ * - else allocate a new per label nexthop cache entry and request a
+ * label to zebra. Return MPLS_INVALID_LABEL
+ */
+static mpls_label_t _vpn_leak_from_vrf_get_per_nexthop_label(
+ struct bgp_path_info *pi, struct bgp *to_bgp, struct bgp *from_bgp,
+ afi_t afi, safi_t safi)
+{
+ struct bgp_nexthop_cache *bnc = pi->nexthop;
+ struct bgp_label_per_nexthop_cache *blnc;
+ struct bgp_label_per_nexthop_cache_head *tree;
+ struct prefix *nh_pfx = NULL;
+ struct prefix nh_gate = {0};
+
+ /* extract the nexthop from the BNC nexthop cache */
+ switch (bnc->nexthop->type) {
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ /* the nexthop is recursive */
+ nh_gate.family = AF_INET;
+ nh_gate.prefixlen = IPV4_MAX_BITLEN;
+ IPV4_ADDR_COPY(&nh_gate.u.prefix4, &bnc->nexthop->gate.ipv4);
+ nh_pfx = &nh_gate;
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ /* the nexthop is recursive */
+ nh_gate.family = AF_INET6;
+ nh_gate.prefixlen = IPV6_MAX_BITLEN;
+ IPV6_ADDR_COPY(&nh_gate.u.prefix6, &bnc->nexthop->gate.ipv6);
+ nh_pfx = &nh_gate;
+ break;
+ case NEXTHOP_TYPE_IFINDEX:
+ /* the nexthop is direcly connected */
+ nh_pfx = &bnc->prefix;
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ assert(!"Blackhole nexthop. Already checked by the caller.");
+ }
+
+ /* find or allocate a nexthop label cache entry */
+ tree = &from_bgp->mpls_labels_per_nexthop[family2afi(nh_pfx->family)];
+ blnc = bgp_label_per_nexthop_find(tree, nh_pfx);
+ if (!blnc) {
+ blnc = bgp_label_per_nexthop_new(tree, nh_pfx);
+ blnc->to_bgp = to_bgp;
+ /* request a label to zebra for this nexthop
+ * the response from zebra will trigger the callback
+ */
+ bgp_lp_get(LP_TYPE_NEXTHOP, blnc,
+ bgp_mplsvpn_get_label_per_nexthop_cb);
+ }
+
+ if (pi->label_nexthop_cache == blnc)
+ /* no change */
+ return blnc->label;
+
+ /* Unlink from any existing nexthop cache. Free the entry if unused.
+ */
+ bgp_mplsvpn_path_nh_label_unlink(pi);
+ if (blnc) {
+ /* updates NHT pi list reference */
+ LIST_INSERT_HEAD(&(blnc->paths), pi, label_nh_thread);
+ pi->label_nexthop_cache = blnc;
+ pi->label_nexthop_cache->path_count++;
+ blnc->last_update = monotime(NULL);
+ }
+
+ /* then add or update the selected nexthop */
+ if (!blnc->nh)
+ blnc->nh = nexthop_dup(bnc->nexthop, NULL);
+ else if (!nexthop_same(bnc->nexthop, blnc->nh)) {
+ nexthop_free(blnc->nh);
+ blnc->nh = nexthop_dup(bnc->nexthop, NULL);
+ if (blnc->label != MPLS_INVALID_LABEL) {
+ bgp_zebra_send_nexthop_label(
+ ZEBRA_MPLS_LABELS_REPLACE, blnc->label,
+ bnc->nexthop->ifindex, bnc->nexthop->vrf_id,
+ ZEBRA_LSP_BGP, &blnc->nexthop);
+ }
+ }
+
+ return blnc->label;
+}
+
+/* Filter out all the cases where a per nexthop label is not possible:
+ * - return an invalid label when the nexthop is invalid
+ * - return the per VRF label when the per nexthop label is not supported
+ * Otherwise, find or request a per label nexthop.
+ */
+static mpls_label_t vpn_leak_from_vrf_get_per_nexthop_label(
+ afi_t afi, safi_t safi, struct bgp_path_info *pi, struct bgp *from_bgp,
+ struct bgp *to_bgp)
+{
+ struct bgp_path_info *bpi_ultimate = bgp_get_imported_bpi_ultimate(pi);
+ struct bgp *bgp_nexthop = NULL;
+ bool nh_valid;
+ afi_t nh_afi;
+ bool is_bgp_static_route;
+
+ is_bgp_static_route = bpi_ultimate->sub_type == BGP_ROUTE_STATIC &&
+ bpi_ultimate->type == ZEBRA_ROUTE_BGP;
+
+ if (is_bgp_static_route == false && afi == AFI_IP &&
+ CHECK_FLAG(pi->attr->flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)) &&
+ (pi->attr->nexthop.s_addr == INADDR_ANY ||
+ !ipv4_unicast_valid(&pi->attr->nexthop))) {
+ /* IPv4 nexthop in standard BGP encoding format.
+ * Format of address is not valid (not any, not unicast).
+ * Fallback to the per VRF label.
+ */
+ bgp_mplsvpn_path_nh_label_unlink(pi);
+ return from_bgp->vpn_policy[afi].tovpn_label;
+ }
+
+ if (is_bgp_static_route == false && afi == AFI_IP &&
+ pi->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV4 &&
+ (pi->attr->mp_nexthop_global_in.s_addr == INADDR_ANY ||
+ !ipv4_unicast_valid(&pi->attr->mp_nexthop_global_in))) {
+ /* IPv4 nexthop is in MP-BGP encoding format.
+ * Format of address is not valid (not any, not unicast).
+ * Fallback to the per VRF label.
+ */
+ bgp_mplsvpn_path_nh_label_unlink(pi);
+ return from_bgp->vpn_policy[afi].tovpn_label;
+ }
+
+ if (is_bgp_static_route == false && afi == AFI_IP6 &&
+ (pi->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL ||
+ pi->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) &&
+ (IN6_IS_ADDR_UNSPECIFIED(&pi->attr->mp_nexthop_global) ||
+ IN6_IS_ADDR_LOOPBACK(&pi->attr->mp_nexthop_global) ||
+ IN6_IS_ADDR_MULTICAST(&pi->attr->mp_nexthop_global))) {
+ /* IPv6 nexthop is in MP-BGP encoding format.
+ * Format of address is not valid
+ * Fallback to the per VRF label.
+ */
+ bgp_mplsvpn_path_nh_label_unlink(pi);
+ return from_bgp->vpn_policy[afi].tovpn_label;
+ }
+
+ /* Check the next-hop reachability.
+ * Get the bgp instance where the bgp_path_info originates.
+ */
+ if (pi->extra && pi->extra->bgp_orig)
+ bgp_nexthop = pi->extra->bgp_orig;
+ else
+ bgp_nexthop = from_bgp;
+
+ nh_afi = BGP_ATTR_NH_AFI(afi, pi->attr);
+ nh_valid = bgp_find_or_add_nexthop(from_bgp, bgp_nexthop, nh_afi, safi,
+ pi, NULL, 0, NULL);
+
+ if (!nh_valid && is_bgp_static_route &&
+ !CHECK_FLAG(from_bgp->flags, BGP_FLAG_IMPORT_CHECK)) {
+ /* "network" prefixes not routable, but since 'no bgp network
+ * import-check' is configured, they are always valid in the BGP
+ * table. Fallback to the per-vrf label
+ */
+ bgp_mplsvpn_path_nh_label_unlink(pi);
+ return from_bgp->vpn_policy[afi].tovpn_label;
+ }
+
+ if (!nh_valid || !pi->nexthop || pi->nexthop->nexthop_num == 0 ||
+ !pi->nexthop->nexthop) {
+ /* invalid next-hop:
+ * do not send the per-vrf label
+ * otherwise, when the next-hop becomes valid,
+ * we will have 2 BGP updates:
+ * - one with the per-vrf label
+ * - the second with the per-nexthop label
+ */
+ bgp_mplsvpn_path_nh_label_unlink(pi);
+ return MPLS_INVALID_LABEL;
+ }
+
+ if (pi->nexthop->nexthop_num > 1 ||
+ pi->nexthop->nexthop->type == NEXTHOP_TYPE_BLACKHOLE) {
+ /* Blackhole or ECMP routes
+ * is not compatible with per-nexthop label.
+ * Fallback to per-vrf label.
+ */
+ bgp_mplsvpn_path_nh_label_unlink(pi);
+ return from_bgp->vpn_policy[afi].tovpn_label;
+ }
+
+ return _vpn_leak_from_vrf_get_per_nexthop_label(pi, to_bgp, from_bgp,
+ afi, safi);
+}
+
/* cf vnc_import_bgp_add_route_mode_nvegroup() and add_vnc_route() */
void vpn_leak_from_vrf_update(struct bgp *to_bgp, /* to */
struct bgp *from_bgp, /* from */
nexthop_self_flag = 1;
}
- label_val = from_bgp->vpn_policy[afi].tovpn_label;
- if (label_val == MPLS_LABEL_NONE) {
+ if (CHECK_FLAG(from_bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP))
+ /* per nexthop label mode */
+ label_val = vpn_leak_from_vrf_get_per_nexthop_label(
+ afi, safi, path_vrf, from_bgp, to_bgp);
+ else
+ /* per VRF label mode */
+ label_val = from_bgp->vpn_policy[afi].tovpn_label;
+
+ if (label_val == MPLS_INVALID_LABEL &&
+ CHECK_FLAG(from_bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP)) {
+ /* no valid label for the moment
+ * when the 'bgp_mplsvpn_get_label_per_nexthop_cb' callback gets
+ * a valid label value, it will call the current function again.
+ */
+ if (debug)
+ zlog_debug(
+ "%s: %s skipping: waiting for a valid per-label nexthop.",
+ __func__, from_bgp->name_pretty);
+ return;
+ }
+ if (label_val == MPLS_LABEL_NONE)
encode_label(MPLS_LABEL_IMPLICIT_NULL, &label);
- } else {
+ else
encode_label(label_val, &label);
- }
/* Set originator ID to "me" */
SET_FLAG(static_attr.flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID));
bpi, afi, safi);
bgp_path_info_delete(bn, bpi);
bgp_process(to_bgp, bn, afi, safi);
+ bgp_mplsvpn_path_nh_label_unlink(
+ bpi->extra->parent);
}
}
}
#define BGP_PREFIX_SID_SRV6_MAX_FUNCTION_LENGTH 20
extern void bgp_mplsvpn_init(void);
+extern void bgp_mplsvpn_path_nh_label_unlink(struct bgp_path_info *pi);
extern int bgp_nlri_parse_vpn(struct peer *, struct attr *, struct bgp_nlri *);
extern uint32_t decode_label(mpls_label_t *);
extern void encode_label(mpls_label_t, mpls_label_t *);
#include "bgpd/bgp_fsm.h"
#include "bgpd/bgp_vty.h"
#include "bgpd/bgp_rd.h"
+#include "bgpd/bgp_mplsvpn.h"
DEFINE_MTYPE_STATIC(BGPD, MARTIAN_STRING, "BGP Martian Addr Intf String");
while (!LIST_EMPTY(&(bnc->paths))) {
struct bgp_path_info *path = LIST_FIRST(&(bnc->paths));
+ bgp_mplsvpn_path_nh_label_unlink(path);
+
path_nh_map(path, bnc, false);
}
#include "bgpd/bgp_flowspec_util.h"
#include "bgpd/bgp_evpn.h"
#include "bgpd/bgp_rd.h"
+#include "bgpd/bgp_mplsvpn.h"
extern struct zclient *zclient;
{
struct bgp_nexthop_cache *bnc = path->nexthop;
+ bgp_mplsvpn_path_nh_label_unlink(path);
+
if (!bnc)
return;
}
LIST_FOREACH (path, &(bnc->paths), nh_thread) {
- if (!(path->type == ZEBRA_ROUTE_BGP
- && ((path->sub_type == BGP_ROUTE_NORMAL)
- || (path->sub_type == BGP_ROUTE_STATIC)
- || (path->sub_type == BGP_ROUTE_IMPORTED))))
+ if (path->type == ZEBRA_ROUTE_BGP &&
+ (path->sub_type == BGP_ROUTE_NORMAL ||
+ path->sub_type == BGP_ROUTE_STATIC ||
+ path->sub_type == BGP_ROUTE_IMPORTED))
+ /* evaluate the path */
+ ;
+ else if (path->sub_type == BGP_ROUTE_REDISTRIBUTE) {
+ /* evaluate the path for redistributed routes
+ * except those from VNC
+ */
+ if ((path->type == ZEBRA_ROUTE_VNC) ||
+ (path->type == ZEBRA_ROUTE_VNC_DIRECT))
+ continue;
+ } else
+ /* don't evaluate the path */
continue;
dest = path->net;
SET_FLAG(path->flags, BGP_PATH_IGP_CHANGED);
path_valid = CHECK_FLAG(path->flags, BGP_PATH_VALID);
- if (path_valid != bnc_is_valid_nexthop) {
+ if (path->type == ZEBRA_ROUTE_BGP &&
+ path->sub_type == BGP_ROUTE_STATIC &&
+ !CHECK_FLAG(bgp_path->flags, BGP_FLAG_IMPORT_CHECK))
+ /* static routes with 'no bgp network import-check' are
+ * always valid. if nht is called with static routes,
+ * the vpn exportation needs to be triggered
+ */
+ vpn_leak_from_vrf_update(bgp_get_default(), bgp_path,
+ path);
+ else if (path->sub_type == BGP_ROUTE_REDISTRIBUTE &&
+ safi == SAFI_UNICAST &&
+ (bgp_path->inst_type == BGP_INSTANCE_TYPE_VRF ||
+ bgp_path->inst_type == BGP_INSTANCE_TYPE_DEFAULT))
+ /* redistribute routes are always valid
+ * if nht is called with redistribute routes, the vpn
+ * exportation needs to be triggered
+ */
+ vpn_leak_from_vrf_update(bgp_get_default(), bgp_path,
+ path);
+ else if (path_valid != bnc_is_valid_nexthop) {
if (path_valid) {
/* No longer valid, clear flag; also for EVPN
* routes, unimport from VRFs if needed.
bgp_evpn_is_prefix_nht_supported(bgp_dest_get_prefix(dest)))
bgp_evpn_unimport_route(bgp_path,
afi, safi, bgp_dest_get_prefix(dest), path);
+ if (safi == SAFI_UNICAST &&
+ (bgp_path->inst_type !=
+ BGP_INSTANCE_TYPE_VIEW))
+ vpn_leak_from_vrf_withdraw(
+ bgp_get_default(), bgp_path,
+ path);
} else {
/* Path becomes valid, set flag; also for EVPN
* routes, import from VRFs if needed.
bgp_evpn_is_prefix_nht_supported(bgp_dest_get_prefix(dest)))
bgp_evpn_import_route(bgp_path,
afi, safi, bgp_dest_get_prefix(dest), path);
+ if (safi == SAFI_UNICAST &&
+ (bgp_path->inst_type !=
+ BGP_INSTANCE_TYPE_VIEW))
+ vpn_leak_from_vrf_update(
+ bgp_get_default(), bgp_path,
+ path);
}
}
struct bgp_path_info *exist, int *paths_eq)
{
enum bgp_path_selection_reason reason;
- char pfx_buf[PREFIX2STR_BUFFER];
+ char pfx_buf[PREFIX2STR_BUFFER] = {};
return bgp_path_info_cmp(bgp, new, exist, paths_eq, NULL, 0, pfx_buf,
AFI_L2VPN, SAFI_EVPN, &reason);
struct bgp_path_info *nextpi = NULL;
int paths_eq, do_mpath, debug;
struct list mp_list;
- char pfx_buf[PREFIX2STR_BUFFER];
+ char pfx_buf[PREFIX2STR_BUFFER] = {};
char path_buf[PATH_ADDPATH_STR_BUFFER];
bgp_mp_list_init(&mp_list);
asnotation = bgp_get_asnotation(NULL);
- if (!ae)
+ if (!aspath)
ae = aspath_empty(asnotation);
if (!pi)
* If the aggregate information has not changed
* no need to re-install it again.
*/
- if (bgp_aggregate_info_same(orig, origin, aspath, community,
- ecommunity, lcommunity)) {
+ if (pi && bgp_aggregate_info_same(pi, origin, aspath, community,
+ ecommunity, lcommunity)) {
bgp_dest_unlock_node(dest);
if (aspath)
*/
assert(attr.aspath);
+ if (p->family == AF_INET6)
+ UNSET_FLAG(attr.flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP));
+
switch (nhtype) {
case NEXTHOP_TYPE_IFINDEX:
switch (p->family) {
case AF_INET:
attr.nexthop.s_addr = INADDR_ANY;
attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
+ attr.mp_nexthop_global_in.s_addr = INADDR_ANY;
break;
case AF_INET6:
memset(&attr.mp_nexthop_global, 0,
case NEXTHOP_TYPE_IPV4_IFINDEX:
attr.nexthop = nexthop->ipv4;
attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
+ attr.mp_nexthop_global_in = nexthop->ipv4;
break;
case NEXTHOP_TYPE_IPV6:
case NEXTHOP_TYPE_IPV6_IFINDEX:
case AF_INET:
attr.nexthop.s_addr = INADDR_ANY;
attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
+ attr.mp_nexthop_global_in.s_addr = INADDR_ANY;
break;
case AF_INET6:
memset(&attr.mp_nexthop_global, 0,
/* Addpath identifiers */
uint32_t addpath_rx_id;
struct bgp_addpath_info_data tx_addpath;
+
+ /* For nexthop per label linked list */
+ LIST_ENTRY(bgp_path_info) label_nh_thread;
+
+ /* Back pointer to the bgp label per nexthop structure */
+ struct bgp_label_per_nexthop_cache *label_nexthop_cache;
};
/* Structure used in BGP path selection */
route_match_ip_next_hop_type_free
};
+/* `match source-protocol` */
+static enum route_map_cmd_result_t
+route_match_source_protocol(void *rule, const struct prefix *prefix,
+ void *object)
+{
+ struct bgp_path_info *path = object;
+ int *protocol = rule;
+
+ if (!path)
+ return RMAP_NOMATCH;
+
+ if (path->type == *protocol)
+ return RMAP_MATCH;
+
+ return RMAP_NOMATCH;
+}
+
+static void *route_match_source_protocol_compile(const char *arg)
+{
+ int *protocol;
+
+ protocol = XMALLOC(MTYPE_ROUTE_MAP_COMPILED, sizeof(*protocol));
+ *protocol = proto_name2num(arg);
+
+ return protocol;
+}
+
+static void route_match_source_protocol_free(void *rule)
+{
+ XFREE(MTYPE_ROUTE_MAP_COMPILED, rule);
+}
+
+static const struct route_map_rule_cmd route_match_source_protocol_cmd = {
+ "source-protocol",
+ route_match_source_protocol,
+ route_match_source_protocol_compile,
+ route_match_source_protocol_free
+};
+
+
/* `match ip route-source prefix-list PREFIX_LIST' */
static enum route_map_cmd_result_t
return nb_cli_apply_changes(vty, NULL);
}
+DEFPY_YANG (match_source_protocol,
+ match_source_protocol_cmd,
+ "match source-protocol " FRR_REDIST_STR_ZEBRA "$proto",
+ MATCH_STR
+ "Match protocol via which the route was learnt\n"
+ FRR_REDIST_HELP_STR_ZEBRA)
+{
+ const char *xpath =
+ "./match-condition[condition='frr-bgp-route-map:source-protocol']";
+ char xpath_value[XPATH_MAXLEN];
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
+ snprintf(xpath_value, sizeof(xpath_value),
+ "%s/rmap-match-condition/frr-bgp-route-map:source-protocol",
+ xpath);
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, proto);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY_YANG (no_match_source_protocol,
+ no_match_source_protocol_cmd,
+ "no match source-protocol [" FRR_REDIST_STR_ZEBRA "]",
+ NO_STR
+ MATCH_STR
+ "Match protocol via which the route was learnt\n"
+ FRR_REDIST_HELP_STR_ZEBRA)
+{
+ const char *xpath =
+ "./match-condition[condition='frr-bgp-route-map:source-protocol']";
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
/* Initialization of route map. */
void bgp_route_map_init(void)
{
route_map_install_match(&route_match_ip_address_prefix_list_cmd);
route_map_install_match(&route_match_ip_next_hop_prefix_list_cmd);
route_map_install_match(&route_match_ip_next_hop_type_cmd);
+ route_map_install_match(&route_match_source_protocol_cmd);
route_map_install_match(&route_match_ip_route_source_prefix_list_cmd);
route_map_install_match(&route_match_aspath_cmd);
route_map_install_match(&route_match_community_cmd);
install_element(RMAP_NODE, &set_ipv6_nexthop_peer_cmd);
install_element(RMAP_NODE, &no_set_ipv6_nexthop_peer_cmd);
install_element(RMAP_NODE, &match_rpki_extcommunity_cmd);
+ install_element(RMAP_NODE, &match_source_protocol_cmd);
+ install_element(RMAP_NODE, &no_match_source_protocol_cmd);
#ifdef HAVE_SCRIPTING
install_element(RMAP_NODE, &match_script_cmd);
#endif
.destroy = lib_route_map_entry_match_condition_rmap_match_condition_source_vrf_destroy,
}
},
+ {
+ .xpath = "/frr-route-map:lib/route-map/entry/match-condition/rmap-match-condition/frr-bgp-route-map:source-protocol",
+ .cbs = {
+ .modify = lib_route_map_entry_match_condition_rmap_match_condition_source_protocol_modify,
+ .destroy = lib_route_map_entry_match_condition_rmap_match_condition_source_protocol_destroy,
+ }
+ },
{
.xpath = "/frr-route-map:lib/route-map/entry/match-condition/rmap-match-condition/frr-bgp-route-map:peer-ipv4-address",
.cbs = {
struct nb_cb_modify_args *args);
int lib_route_map_entry_match_condition_rmap_match_condition_rpki_extcommunity_destroy(
struct nb_cb_destroy_args *args);
+int lib_route_map_entry_match_condition_rmap_match_condition_source_protocol_modify(
+ struct nb_cb_modify_args *args);
+int lib_route_map_entry_match_condition_rmap_match_condition_source_protocol_destroy(
+ struct nb_cb_destroy_args *args);
int lib_route_map_entry_match_condition_rmap_match_condition_probability_modify(struct nb_cb_modify_args *args);
int lib_route_map_entry_match_condition_rmap_match_condition_probability_destroy(struct nb_cb_destroy_args *args);
int lib_route_map_entry_match_condition_rmap_match_condition_source_vrf_modify(struct nb_cb_modify_args *args);
return NB_OK;
}
+/*
+ * XPath:
+ * /frr-route-map:lib/route-map/entry/match-condition/rmap-match-condition/frr-bgp-route-map:source-protocol
+ */
+int lib_route_map_entry_match_condition_rmap_match_condition_source_protocol_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct routemap_hook_context *rhc;
+ enum rmap_compile_rets ret;
+ const char *proto;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ /* Add configuration. */
+ rhc = nb_running_get_entry(args->dnode, NULL, true);
+ proto = yang_dnode_get_string(args->dnode, NULL);
+
+ /* Set destroy information. */
+ rhc->rhc_mhook = bgp_route_match_delete;
+ rhc->rhc_rule = "source-protocol";
+ rhc->rhc_event = RMAP_EVENT_MATCH_DELETED;
+
+ ret = bgp_route_match_add(rhc->rhc_rmi, "source-protocol",
+ proto, RMAP_EVENT_MATCH_ADDED,
+ args->errmsg, args->errmsg_len);
+
+ if (ret != RMAP_COMPILE_SUCCESS) {
+ rhc->rhc_mhook = NULL;
+ return NB_ERR_INCONSISTENCY;
+ }
+ }
+
+ return NB_OK;
+}
+
+int lib_route_map_entry_match_condition_rmap_match_condition_source_protocol_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return lib_route_map_entry_match_destroy(args);
+ }
+
+ return NB_OK;
+}
+
/*
* XPath:
* /frr-route-map:lib/route-map/entry/match-condition/rmap-match-condition/frr-bgp-route-map:rpki-extcommunity
"Between current address-family and vpn\n"
"For routes leaked from current address-family to vpn\n")
+DEFPY(af_label_vpn_export_allocation_mode,
+ af_label_vpn_export_allocation_mode_cmd,
+ "[no$no] label vpn export allocation-mode <per-vrf$label_per_vrf|per-nexthop$label_per_nh>",
+ NO_STR
+ "label value for VRF\n"
+ "Between current address-family and vpn\n"
+ "For routes leaked from current address-family to vpn\n"
+ "Label allocation mode\n"
+ "Allocate one label for all BGP updates of the VRF\n"
+ "Allocate a label per connected next-hop in the VRF\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ afi_t afi;
+ bool old_per_nexthop, new_per_nexthop;
+
+ afi = vpn_policy_getafi(vty, bgp, false);
+
+ old_per_nexthop = !!CHECK_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP);
+ if (no) {
+ if (old_per_nexthop == false && label_per_nh)
+ return CMD_ERR_NO_MATCH;
+ if (old_per_nexthop == true && label_per_vrf)
+ return CMD_ERR_NO_MATCH;
+ new_per_nexthop = false;
+ } else {
+ if (label_per_nh)
+ new_per_nexthop = true;
+ else
+ new_per_nexthop = false;
+ }
+
+ /* no change */
+ if (old_per_nexthop == new_per_nexthop)
+ return CMD_SUCCESS;
+
+ /*
+ * pre-change: un-export vpn routes (vpn->vrf routes unaffected)
+ */
+ vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, afi, bgp_get_default(),
+ bgp);
+
+ if (new_per_nexthop)
+ SET_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP);
+ else
+ UNSET_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP);
+
+ /* post-change: re-export vpn routes */
+ vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, afi, bgp_get_default(),
+ bgp);
+
+ hook_call(bgp_snmp_update_last_changed, bgp);
+ return CMD_SUCCESS;
+}
+
DEFPY (af_label_vpn_export,
af_label_vpn_export_cmd,
"[no] label vpn export <(0-1048575)$label_val|auto$label_auto>",
&peer->ibuf->count,
memory_order_relaxed);
- json_object_int_add(json_peer, "tableVersion",
- peer->version[afi][safi]);
+ json_object_int_add(
+ json_peer, "tableVersion",
+ (paf && PAF_SUBGRP(paf))
+ ? paf->subgroup->version
+ : 0);
json_object_int_add(json_peer, "outq",
outq_count);
json_object_int_add(json_peer, "inq",
" %9u %9u %8" PRIu64 " %4zu %4zu %8s",
PEER_TOTAL_RX(peer),
PEER_TOTAL_TX(peer),
- peer->version[afi][safi], inq_count,
- outq_count,
+ (paf && PAF_SUBGRP(paf))
+ ? paf->subgroup->version
+ : 0,
+ inq_count, outq_count,
peer_uptime(peer->uptime, timebuf,
BGP_UPTIME_LEN, 0, NULL));
}
}
+ if (CHECK_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP))
+ vty_out(vty,
+ "%*slabel vpn export allocation-mode per-nexthop\n",
+ indent, "");
+
tovpn_sid_index = bgp->vpn_policy[afi].tovpn_sid_index;
if (CHECK_FLAG(bgp->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_SID_AUTO)) {
install_element(BGP_IPV6_NODE, &af_rd_vpn_export_cmd);
install_element(BGP_IPV4_NODE, &af_label_vpn_export_cmd);
install_element(BGP_IPV6_NODE, &af_label_vpn_export_cmd);
+ install_element(BGP_IPV4_NODE,
+ &af_label_vpn_export_allocation_mode_cmd);
+ install_element(BGP_IPV6_NODE,
+ &af_label_vpn_export_allocation_mode_cmd);
install_element(BGP_IPV4_NODE, &af_nexthop_vpn_export_cmd);
install_element(BGP_IPV6_NODE, &af_nexthop_vpn_export_cmd);
install_element(BGP_IPV4_NODE, &af_rt_vpn_imexport_cmd);
{
return srv6_manager_release_locator_chunk(zclient, name);
}
+
+void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label,
+ ifindex_t ifindex, vrf_id_t vrf_id,
+ enum lsp_types_t ltype, struct prefix *p)
+{
+ struct zapi_labels zl = {};
+ struct zapi_nexthop *znh;
+
+ zl.type = ltype;
+ zl.local_label = label;
+ zl.nexthop_num = 1;
+ znh = &zl.nexthops[0];
+ if (p->family == AF_INET)
+ IPV4_ADDR_COPY(&znh->gate.ipv4, &p->u.prefix4);
+ else
+ IPV6_ADDR_COPY(&znh->gate.ipv6, &p->u.prefix6);
+ if (ifindex == IFINDEX_INTERNAL)
+ znh->type = (p->family == AF_INET) ? NEXTHOP_TYPE_IPV4
+ : NEXTHOP_TYPE_IPV6;
+ else
+ znh->type = (p->family == AF_INET) ? NEXTHOP_TYPE_IPV4_IFINDEX
+ : NEXTHOP_TYPE_IPV6_IFINDEX;
+ znh->ifindex = ifindex;
+ znh->vrf_id = vrf_id;
+ znh->label_num = 0;
+
+ /* vrf_id is DEFAULT_VRF */
+ zebra_send_mpls_labels(zclient, cmd, &zl);
+}
extern int bgp_zebra_stale_timer_update(struct bgp *bgp);
extern int bgp_zebra_srv6_manager_get_locator_chunk(const char *name);
extern int bgp_zebra_srv6_manager_release_locator_chunk(const char *name);
+extern void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label,
+ ifindex_t index, vrf_id_t vrfid,
+ enum lsp_types_t ltype,
+ struct prefix *p);
#endif /* _QUAGGA_BGP_ZEBRA_H */
SET_FLAG(bgp->af_flags[afi][SAFI_MPLS_VPN],
BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL);
}
+
+ for (afi = AFI_IP; afi < AFI_MAX; afi++)
+ bgp_label_per_nexthop_cache_init(
+ &bgp->mpls_labels_per_nexthop[afi]);
+
if (name)
bgp->name = XSTRDUP(MTYPE_BGP, name);
bgp_lp_vty_init();
+ bgp_label_per_nexthop_init();
+
cmd_variable_handler_register(bgp_viewvrf_var_handlers);
}
#define BGP_VPN_POLICY_TOVPN_RD_SET (1 << 1)
#define BGP_VPN_POLICY_TOVPN_NEXTHOP_SET (1 << 2)
#define BGP_VPN_POLICY_TOVPN_SID_AUTO (1 << 3)
+#define BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP (1 << 4)
/*
* If we are importing another vrf into us keep a list of
/* Allocate MPLS labels */
uint8_t allocate_mpls_labels[AFI_MAX][SAFI_MAX];
+ /* Tree for next-hop lookup cache. */
+ struct bgp_label_per_nexthop_cache_head
+ mpls_labels_per_nexthop[AFI_MAX];
+
/* Allocate hash entries to store policy routing information
* The hash are used to host pbr rules somewhere.
* Actually, pbr will only be used by flowspec
/* BGP peer group. */
struct peer_group *group;
- uint64_t version[AFI_MAX][SAFI_MAX];
/* BGP peer_af structures, per configured AF on this peer */
struct peer_af *peer_af_array[BGP_AF_MAX];
struct bgp *bgp;
struct bgp_path_info *prev;
struct bgp_path_info *next;
- char pfx_buf[PREFIX2STR_BUFFER];
+ char pfx_buf[PREFIX2STR_BUFFER] = {};
bgp = bgp_get_default(); /* assume 1 instance for now */
fi
])
+AC_ARG_ENABLE([ccls],
+AS_HELP_STRING([--enable-ccls], [Write .ccls config for this build]))
+
AC_ARG_ENABLE([dev_build],
AS_HELP_STRING([--enable-dev-build], [build for development]))
AC_CONFIG_FILES([tools/frr.service])
AC_CONFIG_FILES([tools/frr@.service])
+# dnl write out a ccls file with our compile configuration
+# dnl have to add -Wno-unused-function otherwise foobar_cmd_magic causes
+# dnl all DEFPY(), et al., macros to flag as errors.
+AS_IF([test "$enable_ccls" = "yes"], [
+ AC_CONFIG_COMMANDS([gen-dot-ccls], [
+ cat > "${srcdir}/.ccls" <<EOF
+clang
+-DHAVE_CONFIG_H
+-I.
+-I./include
+-I./lib
+-I./lib/assert
+-DSYSCONFDIR="${ac_frr_sysconfdir}"
+-DCONFDATE=${ac_frr_confdate}
+EOF
+ if test "$ac_abs_top_builddir" != "$ac_abs_top_srcdir"; then
+ echo "-I${ac_abs_top_builddir}" >> "${srcdir}/.ccls"
+ fi
+ if test -n "$FRR_ALL_CCLS_FLAGS"; then
+ echo ${FRR_ALL_CCLS_FLAGS} | tr ' ' '\n' >> "${srcdir}/.ccls"
+ fi
+ if test -n "$FRR_ALL_CCLS_CFLAGS"; then
+ cat >> "${srcdir}/.ccls" <<EOF
+%c $(echo ${FRR_ALL_CCLS_CFLAGS} | sed -e 's/ */\n%c /g')
+%c -Wno-unused-function
+EOF
+fi
+ ], [
+ FRR_ALL_CCLS_FLAGS="$(echo ${LIBYANG_CFLAGS} ${LUA_INCLUDE} ${SQLITE3_CFLAGS} | sed -e 's/ */ /g')"
+ FRR_ALL_CCLS_CFLAGS="$(echo ${CFLAGS} ${WERROR} ${AC_CFLAGS} ${SAN_FLAGS} | sed -e 's/ */ /g')"
+ ac_frr_confdate="${CONFDATE}"
+ ac_frr_sysconfdir="${sysconfdir}/"
+ ])
+])
+
+
AS_IF([test "$with_pkg_git_version" = "yes"], [
AC_CONFIG_COMMANDS([lib/gitversion.h], [
dst="${ac_abs_top_builddir}/lib/gitversion.h"
--- /dev/null
+Ubuntu 22.04 LTS
+================
+
+This document describes installation from source. If you want to build a
+``deb``, see :ref:`packaging-debian`.
+
+Installing Dependencies
+-----------------------
+
+.. code-block:: console
+
+ sudo apt update
+ sudo apt-get install \
+ git autoconf automake libtool make libreadline-dev texinfo \
+ pkg-config libpam0g-dev libjson-c-dev bison flex \
+ libc-ares-dev python3-dev python3-sphinx \
+ install-info build-essential libsnmp-dev perl \
+ libcap-dev python2 libelf-dev libunwind-dev \
+ libyang2 libyang2-dev
+
+.. include:: building-libunwind-note.rst
+
+Note that Ubuntu >= 20 no longer installs python 2.x, so it must be
+installed explicitly. Ensure that your system has a symlink named
+``/usr/bin/python`` pointing at ``/usr/bin/python3``.
+
+.. code-block:: shell
+
+ sudo ln -s /usr/bin/python3 /usr/bin/python
+ python --version
+
+In addition, ``pip`` for python2 must be installed if you wish to run
+the FRR topotests. That version of ``pip`` is not available from the
+ubuntu apt repositories; in order to install it:
+
+.. code-block:: shell
+
+ curl https://bootstrap.pypa.io/pip/2.7/get-pip.py --output get-pip.py
+ sudo python2 ./get-pip.py
+
+ # And verify the installation
+ pip2 --version
+
+
+Protobuf
+^^^^^^^^
+This is optional
+
+.. code-block:: console
+
+ sudo apt-get install protobuf-c-compiler libprotobuf-c-dev
+
+ZeroMQ
+^^^^^^
+This is optional
+
+.. code-block:: console
+
+ sudo apt-get install libzmq5 libzmq3-dev
+
+Building & Installing FRR
+-------------------------
+
+Add FRR user and groups
+^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: console
+
+ sudo groupadd -r -g 92 frr
+ sudo groupadd -r -g 85 frrvty
+ sudo adduser --system --ingroup frr --home /var/run/frr/ \
+ --gecos "FRR suite" --shell /sbin/nologin frr
+ sudo usermod -a -G frrvty frr
+
+Compile
+^^^^^^^
+
+.. include:: include-compile.rst
+
+Install FRR configuration files
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: console
+
+ sudo install -m 775 -o frr -g frr -d /var/log/frr
+ sudo install -m 775 -o frr -g frrvty -d /etc/frr
+ sudo install -m 640 -o frr -g frrvty tools/etc/frr/vtysh.conf /etc/frr/vtysh.conf
+ sudo install -m 640 -o frr -g frr tools/etc/frr/frr.conf /etc/frr/frr.conf
+ sudo install -m 640 -o frr -g frr tools/etc/frr/daemons.conf /etc/frr/daemons.conf
+ sudo install -m 640 -o frr -g frr tools/etc/frr/daemons /etc/frr/daemons
+
+Tweak sysctls
+^^^^^^^^^^^^^
+
+Some sysctls need to be changed in order to enable IPv4/IPv6 forwarding and
+MPLS (if supported by your platform). If your platform does not support MPLS,
+skip the MPLS related configuration in this section.
+
+Edit :file:`/etc/sysctl.conf` and uncomment the following values (ignore the
+other settings):
+
+::
+
+ # Uncomment the next line to enable packet forwarding for IPv4
+ net.ipv4.ip_forward=1
+
+ # Uncomment the next line to enable packet forwarding for IPv6
+ # Enabling this option disables Stateless Address Autoconfiguration
+ # based on Router Advertisements for this host
+ net.ipv6.conf.all.forwarding=1
+
+Reboot or use ``sysctl -p`` to apply the same config to the running system.
+
+Add MPLS kernel modules
+"""""""""""""""""""""""
+
+Ubuntu 20.04 ships with kernel 5.4; MPLS modules are present by default. To
+enable, add the following lines to :file:`/etc/modules-load.d/modules.conf`:
+
+::
+
+ # Load MPLS Kernel Modules
+ mpls_router
+ mpls_iptunnel
+
+
+And load the kernel modules on the running system:
+
+.. code-block:: console
+
+ sudo modprobe mpls-router mpls-iptunnel
+
+If the above command returns an error, you may need to install the appropriate
+or latest linux-modules-extra-<kernel-version>-generic package. For example
+``apt-get install linux-modules-extra-`uname -r`-generic``
+
+Enable MPLS Forwarding
+""""""""""""""""""""""
+
+Edit :file:`/etc/sysctl.conf` and the following lines. Make sure to add a line
+equal to :file:`net.mpls.conf.eth0.input` for each interface used with MPLS.
+
+::
+
+ # Enable MPLS Label processing on all interfaces
+ net.mpls.conf.eth0.input=1
+ net.mpls.conf.eth1.input=1
+ net.mpls.conf.eth2.input=1
+ net.mpls.platform_labels=100000
+
+Install service files
+^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: console
+
+ sudo install -m 644 tools/frr.service /etc/systemd/system/frr.service
+ sudo systemctl enable frr
+
+Enable daemons
+^^^^^^^^^^^^^^
+
+Open :file:`/etc/frr/daemons` with your text editor of choice. Look for the
+section with ``watchfrr_enable=...`` and ``zebra=...`` etc. Enable the daemons
+as required by changing the value to ``yes``.
+
+Start FRR
+^^^^^^^^^
+
+.. code-block:: shell
+
+ systemctl start frr
--localstatedir=/var/run/frr \
--sysconfdir=/etc/frr \
--with-moduledir=\${prefix}/lib/frr/modules \
- --with-libyang-pluginsdir=\${prefix}/lib/frr/libyang_plugins \
--enable-configfile-mask=0640 \
--enable-logfile-mask=0640 \
--enable-snmp=agentx \
Instructions are the same for all setups (i.e. ExaBGP is only used for
BGP tests).
+Tshark is only required if you enable any packet captures on test runs.
+
+Valgrind is only required if you enable valgrind on test runs.
+
Installing Topotest Requirements
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: shell
- apt-get install gdb
- apt-get install iproute2
- apt-get install net-tools
- apt-get install python3-pip
+ apt-get install \
+ gdb \
+ iproute2 \
+ net-tools \
+ python3-pip \
+ iputils-ping \
+ tshark \
+ valgrind
python3 -m pip install wheel
python3 -m pip install 'pytest>=6.2.4'
python3 -m pip install 'pytest-xdist>=2.3.0'
--sysconfdir=/etc/frr \
--enable-vtysh \
--enable-pimd \
+ --enable-pim6d \
--enable-sharpd \
--enable-multipath=64 \
--enable-user=frr \
For the simulated topology, see the description in the python file.
-StdErr log from daemos after exit
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-To enable the reporting of any messages seen on StdErr after the daemons exit,
-the following env variable can be set::
-
- export TOPOTESTS_CHECK_STDERR=Yes
-
-(The value doesn't matter at this time. The check is whether the env
-variable exists or not.) There is no pass/fail on this reporting; the
-Output will be reported to the console.
-
-Collect Memory Leak Information
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-FRR processes can report unfreed memory allocations upon exit. To
-enable the reporting of memory leaks, define an environment variable
-``TOPOTESTS_CHECK_MEMLEAK`` with the file prefix, i.e.::
-
- export TOPOTESTS_CHECK_MEMLEAK="/home/mydir/memleak_"
-
-This will enable the check and output to console and the writing of
-the information to files with the given prefix (followed by testname),
-ie :file:`/home/mydir/memcheck_test_bgp_multiview_topo1.txt` in case
-of a memory leak.
-
Running Topotests with AddressSanitizer
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
--gdb-breakpoints=nb_config_diff \
all-protocol-startup
+Reporting Memleaks with FRR Memory Statistics
+"""""""""""""""""""""""""""""""""""""""""""""
+
+FRR reports all allocated FRR memory objects on exit to standard error.
+Topotest can be run to report such output as errors in order to check for
+memleaks in FRR memory allocations. Specifying the CLI argument
+``--memleaks`` will enable reporting FRR-based memory allocations at exit as errors.
+
+.. code:: shell
+
+ sudo -E pytest --memleaks all-protocol-startup
+
+
+StdErr log from daemos after exit
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When running with ``--memleaks``, to enable the reporting of other,
+non-memory related, messages seen on StdErr after the daemons exit,
+the following env variable can be set::
+
+ export TOPOTESTS_CHECK_STDERR=Yes
+
+(The value doesn't matter at this time. The check is whether the env
+variable exists or not.) There is no pass/fail on this reporting; the
+Output will be reported to the console.
+
+Collect Memory Leak Information
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When running with ``--memleaks``, FRR processes report unfreed memory
+allocations upon exit. To enable also reporting of memory leaks to a specific
+location, define an environment variable ``TOPOTESTS_CHECK_MEMLEAK`` with the
+file prefix, i.e.:
+
+ export TOPOTESTS_CHECK_MEMLEAK="/home/mydir/memleak_"
+
+For tests that support the TOPOTESTS_CHECK_MEMLEAK environment variable, this
+will enable output to the information to files with the given prefix (followed
+by testname), e.g.,:
+file:`/home/mydir/memcheck_test_bgp_multiview_topo1.txt` in case
+of a memory leak.
+
Detecting Memleaks with Valgrind
""""""""""""""""""""""""""""""""
extended community values as described in
:ref:`bgp-extended-communities-attribute`.
+.. clicmd:: label vpn export allocation-mode per-vrf|per-nexthop
+
+ Select how labels are allocated in the given VRF. By default, the `per-vrf`
+ mode is selected, and one label is used for all prefixes from the VRF. The
+ `per-nexthop` will use a unique label for all prefixes that are reachable
+ via the same nexthop.
+
.. clicmd:: label vpn export (0..1048575)|auto
Enables an MPLS label to be attached to a route exported from the current
installation prefix and other directories may be changed using the following
options to the configuration script.
+.. option:: --enable-ccls
+
+ Enable the creation of a :file:`.ccls` file in the top level source
+ directory.
+
+ Some development environments (e.g., LSP server within emacs, et al.) can
+ utilize :clicmd:`ccls` to provide highly sophisticated IDE features (e.g.,
+ semantically accurate jump-to definition/reference, and even code
+ refactoring). The `--enable-ccls` causes :file:`configure` to generate a
+ configuration for the :clicmd:`ccls` command, based on the configured
+ FRR build environment.
+
.. option:: --prefix <prefix>
Install architecture-independent files in `prefix` [/usr/local].
Sets interface's Router Dead Interval. Default value is 40.
+.. clicmd:: ipv6 ospf6 graceful-restart hello-delay HELLODELAYINTERVAL
+
+ Set the length of time during which Grace-LSAs are sent at 1-second intervals
+ while coming back up after an unplanned outage. During this time, no hello
+ packets are sent.
+
+ A higher hello delay will increase the chance that all neighbors are notified
+ about the ongoing graceful restart before receiving a hello packet (which is
+ crucial for the graceful restart to succeed). The hello delay shouldn't be set
+ too high, however, otherwise the adjacencies might time out. As a best practice,
+ it's recommended to set the hello delay and hello interval with the same values.
+ The default value is 10 seconds.
+
.. clicmd:: ipv6 ospf6 retransmit-interval RETRANSMITINTERVAL
Sets interface's Rxmt Interval. Default value is 5.
To perform a graceful shutdown, the "graceful-restart prepare ipv6 ospf"
EXEC-level command needs to be issued before restarting the ospf6d daemon.
+ When Graceful Restart is enabled and the ospf6d daemon crashes or is killed
+ abruptely (e.g. SIGKILL), it will attempt an unplanned Graceful Restart once
+ it restarts.
+
.. clicmd:: graceful-restart helper enable [A.B.C.D]
:clicmd:`ip ospf dead-interval minimal hello-multiplier (2-20)` is also
specified for the interface.
+.. clicmd:: ip ospf graceful-restart hello-delay (1-1800)
+
+ Set the length of time during which Grace-LSAs are sent at 1-second intervals
+ while coming back up after an unplanned outage. During this time, no hello
+ packets are sent.
+
+ A higher hello delay will increase the chance that all neighbors are notified
+ about the ongoing graceful restart before receiving a hello packet (which is
+ crucial for the graceful restart to succeed). The hello delay shouldn't be set
+ too high, however, otherwise the adjacencies might time out. As a best practice,
+ it's recommended to set the hello delay and hello interval with the same values.
+ The default value is 10 seconds.
+
.. clicmd:: ip ospf network (broadcast|non-broadcast|point-to-multipoint|point-to-point [dmvpn])
When configuring a point-to-point network on an interface and the interface
To perform a graceful shutdown, the "graceful-restart prepare ip ospf"
EXEC-level command needs to be issued before restarting the ospfd daemon.
+ When Graceful Restart is enabled and the ospfd daemon crashes or is killed
+ abruptely (e.g. SIGKILL), it will attempt an unplanned Graceful Restart once
+ it restarts.
+
.. clicmd:: graceful-restart helper enable [A.B.C.D]
If `poisoned-reverse` is also set, the router sends the poisoned routes
with highest metric back to the sending router.
+.. clicmd:: allow-ecmp [1-MULTIPATH_NUM]
+
+ Control how many ECMP paths RIP can inject for the same prefix. If specified
+ without a number, a maximum is taken (compiled with ``--enable-multipath``).
+
.. _rip-version-control:
RIP Version Control
.. clicmd:: match source-protocol PROTOCOL_NAME
- This is a ZEBRA specific match command. Matches the
+ This is a ZEBRA and BGP specific match command. Matches the
originating protocol specified.
.. clicmd:: match source-instance NUMBER
Multiple nexthop static route
=============================
-To create multiple nexthops to the same NETWORK, just reenter the same
+To create multiple nexthops to the same NETWORK (also known as a multipath route), just reenter the same
network statement with different nexthop information.
.. code-block:: frr
ip route 10.0.0.0/8 null0 255
-This will install a multihop route via the specified next-hops if they are
+This will install a multipath route via the specified next-hops if they are
reachable, as well as a high-distance blackhole route, which can be useful to
prevent traffic destined for a prefix to match less-specific routes (e.g.
default) should the specified gateways not be reachable. E.g.:
" --limit-fds Limit number of fds supported\n",
lo_always};
+static bool logging_to_stdout = false; /* set when --log stdout specified */
static const struct option lo_cfg[] = {
{"config_file", required_argument, NULL, 'f'},
while ((log_arg = log_args_pop(di->early_logging))) {
command_setup_early_logging(log_arg->target,
di->early_loglevel);
+ /* this is a bit of a hack,
+ but need to notice when
+ the target is stdout */
+ if (strcmp(log_arg->target, "stdout") == 0)
+ logging_to_stdout = true;
XFREE(MTYPE_TMP, log_arg);
}
"%s: failed to open /dev/null: %s", __func__,
safe_strerror(errno));
} else {
- dup2(nullfd, 0);
- dup2(nullfd, 1);
- dup2(nullfd, 2);
+ int fd;
+ /*
+ * only redirect stdin, stdout, stderr to null when a tty also
+ * don't redirect when stdout is set with --log stdout
+ */
+ for (fd = 2; fd >= 0; fd--)
+ if (isatty(fd) &&
+ (fd != STDOUT_FILENO || !logging_to_stdout))
+ dup2(nullfd, fd);
close(nullfd);
}
}
"%s: failed to open /dev/null: %s",
__func__, safe_strerror(errno));
} else {
- dup2(nullfd, 0);
- dup2(nullfd, 1);
- dup2(nullfd, 2);
+ int fd;
+ /*
+ * only redirect stdin, stdout, stderr to null when a
+ * tty also don't redirect when stdout is set with --log
+ * stdout
+ */
+ for (fd = 2; fd >= 0; fd--)
+ if (isatty(fd) &&
+ (fd != STDOUT_FILENO || !logging_to_stdout))
+ dup2(nullfd, fd);
close(nullfd);
}
{
FILE *fp;
char filename[128];
- int have_leftovers;
+ int have_leftovers = 0;
hook_call(frr_fini);
/* frrmod_init -> nothing needed / hooks */
rcu_shutdown();
- if (!debug_memstats_at_exit)
- return;
-
- have_leftovers = log_memstats(stderr, di->name);
+ /* also log memstats to stderr when stderr goes to a file*/
+ if (debug_memstats_at_exit || !isatty(STDERR_FILENO))
+ have_leftovers = log_memstats(stderr, di->name);
/* in case we decide at runtime that we want exit-memstats for
- * a daemon, but it has no stderr because it's daemonized
+ * a daemon
* (only do this if we actually have something to print though)
*/
- if (!have_leftovers)
+ if (!debug_memstats_at_exit || !have_leftovers)
return;
snprintf(filename, sizeof(filename), "/tmp/frr-memstats-%s-%llu-%llu",
#define OSPF_ROUTER_DEAD_INTERVAL_DEFAULT 40
#define OSPF_ROUTER_DEAD_INTERVAL_MINIMAL 1
#define OSPF_HELLO_INTERVAL_DEFAULT 10
+#define OSPF_HELLO_DELAY_DEFAULT 10
#define OSPF_ROUTER_PRIORITY_DEFAULT 1
#define OSPF_RETRANSMIT_INTERVAL_DEFAULT 5
#define OSPF_TRANSMIT_DELAY_DEFAULT 1
return NULL;
}
+static bool
+prefix_list_entry_lookup_prefix(struct prefix_list *plist,
+ struct prefix_list_entry *plist_entry)
+{
+ struct prefix_list_entry *pentry = NULL;
+
+ for (pentry = plist->head; pentry; pentry = pentry->next) {
+ if (pentry == plist_entry)
+ continue;
+ if (prefix_same(&pentry->prefix, &plist_entry->prefix))
+ return true;
+ }
+
+ return false;
+}
+
static void trie_walk_affected(size_t validbits, struct pltrie_table *table,
uint8_t byte, struct prefix_list_entry *object,
void (*fn)(struct prefix_list_entry *object,
void prefix_list_entry_delete(struct prefix_list *plist,
- struct prefix_list_entry *pentry,
- int update_list)
+ struct prefix_list_entry *pentry, int update_list)
{
+ bool duplicate = false;
+
if (plist == NULL || pentry == NULL)
return;
+ if (prefix_list_entry_lookup_prefix(plist, pentry))
+ duplicate = true;
+
prefix_list_trie_del(plist, pentry);
if (pentry->prev)
else
plist->tail = pentry->prev;
- route_map_notify_pentry_dependencies(plist->name, pentry,
- RMAP_EVENT_PLIST_DELETED);
+ if (!duplicate)
+ route_map_notify_pentry_dependencies(plist->name, pentry,
+ RMAP_EVENT_PLIST_DELETED);
+
prefix_list_entry_free(pentry);
plist->count--;
void prefix_list_entry_update_start(struct prefix_list_entry *ple)
{
struct prefix_list *pl = ple->pl;
+ bool duplicate = false;
/* Not installed, nothing to do. */
if (!ple->installed)
return;
+ if (prefix_list_entry_lookup_prefix(pl, ple))
+ duplicate = true;
+
prefix_list_trie_del(pl, ple);
/* List manipulation: shameless copy from `prefix_list_entry_delete`. */
else
pl->tail = ple->prev;
- route_map_notify_pentry_dependencies(pl->name, ple,
- RMAP_EVENT_PLIST_DELETED);
+ if (!duplicate)
+ route_map_notify_pentry_dependencies(pl->name, ple,
+ RMAP_EVENT_PLIST_DELETED);
pl->count--;
route_map_notify_dependencies(pl->name, RMAP_EVENT_PLIST_DELETED);
(strmatch(C, "frr-zebra-route-map:ipv4-next-hop-prefix-length"))
#define IS_MATCH_SRC_PROTO(C) \
(strmatch(C, "frr-zebra-route-map:source-protocol"))
+#define IS_MATCH_BGP_SRC_PROTO(C) \
+ (strmatch(C, "frr-bgp-route-map:source-protocol"))
#define IS_MATCH_SRC_INSTANCE(C) \
(strmatch(C, "frr-zebra-route-map:source-instance"))
/* BGP route-map match conditions */
yang_dnode_get_string(
dnode,
"./rmap-match-condition/frr-zebra-route-map:ipv4-prefix-length"));
- } else if (IS_MATCH_SRC_PROTO(condition)) {
+ } else if (IS_MATCH_SRC_PROTO(condition) ||
+ IS_MATCH_BGP_SRC_PROTO(condition)) {
vty_out(vty, " match source-protocol %s\n",
yang_dnode_get_string(
dnode,
- "./rmap-match-condition/frr-zebra-route-map:source-protocol"));
+ IS_MATCH_SRC_PROTO(condition)
+ ? "./rmap-match-condition/frr-zebra-route-map:source-protocol"
+ : "./rmap-match-condition/frr-bgp-route-map:source-protocol"));
} else if (IS_MATCH_SRC_INSTANCE(condition)) {
vty_out(vty, " match source-instance %s\n",
yang_dnode_get_string(
void *ctx),
void *ctx, bool alloc_xp_copy)
{
- int ret;
+ int ret = 0;
char xpath[MGMTD_MAX_XPATH_LEN];
struct lyd_node *base_dnode = NULL;
struct lyd_node *node;
*/
static struct vty *rollback_vty;
-static bool mgmt_history_record_exists(char *file_path)
+static bool file_exists(const char *path)
{
- int exist;
-
- exist = access(file_path, F_OK);
- if (exist == 0)
- return true;
- else
- return false;
+ return !access(path, F_OK);
}
-static void mgmt_history_remove_file(char *name)
+static void remove_file(const char *path)
{
- if (remove(name) == 0)
- zlog_debug("Old commit info deletion succeeded");
- else
- zlog_err("Old commit info deletion failed");
+ if (!file_exists(path))
+ return;
+ if (unlink(path))
+ zlog_err("Failed to remove commit history file %s: %s", path,
+ safe_strerror(errno));
}
static struct mgmt_cmt_info_t *mgmt_history_new_cmt_info(void)
last_cmt_info = cmt_info;
if (last_cmt_info) {
- mgmt_history_remove_file(last_cmt_info->cmt_json_file);
+ remove_file(last_cmt_info->cmt_json_file);
mgmt_cmt_infos_del(&mm->cmts, last_cmt_info);
XFREE(MTYPE_MGMTD_CMT_INFO, last_cmt_info);
}
struct mgmt_cmt_info_t *new;
int cnt = 0;
+ if (!file_exists(MGMTD_COMMIT_FILE_PATH))
+ return false;
+
fp = fopen(MGMTD_COMMIT_INDEX_FILE_NAME, "rb");
if (!fp) {
- zlog_err("Failed to open file %s rb mode",
- MGMTD_COMMIT_INDEX_FILE_NAME);
+ zlog_err("Failed to open commit history %s for reading: %s",
+ MGMTD_COMMIT_INDEX_FILE_NAME, safe_strerror(errno));
return false;
}
while ((fread(&cmt_info, sizeof(cmt_info), 1, fp)) > 0) {
if (cnt < MGMTD_MAX_COMMIT_LIST) {
- if (!mgmt_history_record_exists(
- cmt_info.cmt_json_file)) {
- zlog_err(
- "Commit record present in index_file, but commit file %s missing",
- cmt_info.cmt_json_file);
+ if (!file_exists(cmt_info.cmt_json_file)) {
+ zlog_err("Commit in index, but file %s missing",
+ cmt_info.cmt_json_file);
continue;
}
memcpy(new, &cmt_info, sizeof(struct mgmt_cmt_info_t));
mgmt_cmt_infos_add_tail(&mm->cmts, new);
} else {
- zlog_err("More records found in index file %s",
- MGMTD_COMMIT_INDEX_FILE_NAME);
+ zlog_warn(
+ "More records found in commit history file %s than expected",
+ MGMTD_COMMIT_INDEX_FILE_NAME);
fclose(fp);
return false;
}
struct mgmt_cmt_info_t cmt_info_set[10];
int cnt = 0;
- mgmt_history_remove_file((char *)MGMTD_COMMIT_INDEX_FILE_NAME);
- fp = fopen(MGMTD_COMMIT_INDEX_FILE_NAME, "ab");
+ fp = fopen(MGMTD_COMMIT_INDEX_FILE_NAME, "wb");
if (!fp) {
- zlog_err("Failed to open file %s ab mode",
- MGMTD_COMMIT_INDEX_FILE_NAME);
+ zlog_err("Failed to open commit history %s for writing: %s",
+ MGMTD_COMMIT_INDEX_FILE_NAME, safe_strerror(errno));
return false;
}
ret = fwrite(&cmt_info_set, sizeof(struct mgmt_cmt_info_t), cnt, fp);
fclose(fp);
if (ret != cnt) {
- zlog_err("Write record failed");
+ zlog_err("Failed to write full commit history, removing file");
+ remove_file(MGMTD_COMMIT_INDEX_FILE_NAME);
return false;
- } else {
- return true;
}
+ return true;
}
static int mgmt_history_rollback_to_cmt(struct vty *vty,
return ret;
}
- mgmt_history_remove_file(cmt_info->cmt_json_file);
+ remove_file(cmt_info->cmt_json_file);
mgmt_cmt_infos_del(&mm->cmts, cmt_info);
XFREE(MTYPE_MGMTD_CMT_INFO, cmt_info);
}
}
cnt++;
- mgmt_history_remove_file(cmt_info->cmt_json_file);
+ remove_file(cmt_info->cmt_json_file);
mgmt_cmt_infos_del(&mm->cmts, cmt_info);
XFREE(MTYPE_MGMTD_CMT_INFO, cmt_info);
}
#include "log.h"
#include "hook.h"
#include "printfrr.h"
+#include "lib_errors.h"
#include "ospf6d/ospf6_lsa.h"
#include "ospf6d/ospf6_lsdb.h"
#include "ospf6d/ospf6_zebra.h"
#include "ospf6d/ospf6_message.h"
#include "ospf6d/ospf6_neighbor.h"
+#include "ospf6d/ospf6_network.h"
#include "ospf6d/ospf6_flood.h"
#include "ospf6d/ospf6_intra.h"
#include "ospf6d/ospf6_spf.h"
#include "ospf6d/ospf6_gr.h"
#include "ospf6d/ospf6_gr_clippy.c"
-static void ospf6_gr_nvm_delete(struct ospf6 *ospf6);
+static void ospf6_gr_grace_period_expired(struct event *thread);
/* Originate and install Grace-LSA for a given interface. */
-static int ospf6_gr_lsa_originate(struct ospf6_interface *oi)
+static int ospf6_gr_lsa_originate(struct ospf6_interface *oi,
+ enum ospf6_gr_restart_reason reason)
{
- struct ospf6_gr_info *gr_info = &oi->area->ospf6->gr_info;
+ struct ospf6 *ospf6 = oi->area->ospf6;
+ struct ospf6_gr_info *gr_info = &ospf6->gr_info;
struct ospf6_lsa_header *lsa_header;
struct ospf6_grace_lsa *grace_lsa;
struct ospf6_lsa *lsa;
+ uint16_t lsa_length;
char buffer[OSPF6_MAX_LSASIZE];
if (IS_OSPF6_DEBUG_ORIGINATE(LINK))
/* Put restart reason. */
grace_lsa->tlv_reason.header.type = htons(RESTART_REASON_TYPE);
grace_lsa->tlv_reason.header.length = htons(RESTART_REASON_LENGTH);
- if (gr_info->restart_support)
- grace_lsa->tlv_reason.reason = OSPF6_GR_SW_RESTART;
- else
- grace_lsa->tlv_reason.reason = OSPF6_GR_UNKNOWN_RESTART;
+ grace_lsa->tlv_reason.reason = reason;
/* Fill LSA Header */
+ lsa_length = sizeof(*lsa_header) + sizeof(*grace_lsa);
lsa_header->age = 0;
lsa_header->type = htons(OSPF6_LSTYPE_GRACE_LSA);
lsa_header->id = htonl(oi->interface->ifindex);
- lsa_header->adv_router = oi->area->ospf6->router_id;
+ lsa_header->adv_router = ospf6->router_id;
lsa_header->seqnum =
ospf6_new_ls_seqnum(lsa_header->type, lsa_header->id,
lsa_header->adv_router, oi->lsdb);
- lsa_header->length = htons(sizeof(*lsa_header) + sizeof(*grace_lsa));
+ lsa_header->length = htons(lsa_length);
/* LSA checksum */
ospf6_lsa_checksum(lsa_header);
- /* create LSA */
- lsa = ospf6_lsa_create(lsa_header);
-
- /* Originate */
- ospf6_lsa_originate_interface(lsa, oi);
+ if (reason == OSPF6_GR_UNKNOWN_RESTART) {
+ struct ospf6_header *oh;
+ uint32_t *uv32;
+ int n;
+ uint16_t length = OSPF6_HEADER_SIZE + 4 + lsa_length;
+ struct iovec iovector[2] = {};
+
+ /* Reserve space for OSPFv3 header. */
+ memmove(&buffer[OSPF6_HEADER_SIZE + 4], buffer, lsa_length);
+
+ /* Fill in the OSPFv3 header. */
+ oh = (struct ospf6_header *)buffer;
+ oh->version = OSPFV3_VERSION;
+ oh->type = OSPF6_MESSAGE_TYPE_LSUPDATE;
+ oh->router_id = oi->area->ospf6->router_id;
+ oh->area_id = oi->area->area_id;
+ oh->instance_id = oi->instance_id;
+ oh->reserved = 0;
+ oh->length = htons(length);
+
+ /* Fill LSA header. */
+ uv32 = (uint32_t *)&buffer[sizeof(*oh)];
+ *uv32 = htonl(1);
+
+ /* Send packet. */
+ iovector[0].iov_base = lsa_header;
+ iovector[0].iov_len = length;
+ n = ospf6_sendmsg(oi->linklocal_addr, &allspfrouters6,
+ oi->interface->ifindex, iovector, ospf6->fd);
+ if (n != length)
+ flog_err(EC_LIB_DEVELOPMENT,
+ "%s: could not send entire message", __func__);
+ } else {
+ /* Create and install LSA. */
+ lsa = ospf6_lsa_create(lsa_header);
+ ospf6_lsa_originate_interface(lsa, oi);
+ }
return 0;
}
ospf6->gr_info.restart_in_progress = false;
ospf6->gr_info.finishing_restart = true;
+ XFREE(MTYPE_TMP, ospf6->gr_info.exit_reason);
+ ospf6->gr_info.exit_reason = XSTRDUP(MTYPE_TMP, reason);
EVENT_OFF(ospf6->gr_info.t_grace_period);
- /* Record in non-volatile memory that the restart is complete. */
- ospf6_gr_nvm_delete(ospf6);
-
for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, onode, area)) {
struct ospf6_interface *oi;
OSPF6_INTRA_PREFIX_LSA_SCHEDULE_STUB(area);
for (ALL_LIST_ELEMENTS_RO(area->if_list, anode, oi)) {
+ /* Disable hello delay. */
+ if (oi->gr.hello_delay.t_grace_send) {
+ oi->gr.hello_delay.elapsed_seconds = 0;
+ EVENT_OFF(oi->gr.hello_delay.t_grace_send);
+ event_add_event(master, ospf6_hello_send, oi, 0,
+ &oi->thread_send_hello);
+ }
+
/* Reoriginate Link-LSA. */
if (oi->type != OSPF_IFTYPE_VIRTUALLINK)
OSPF6_LINK_LSA_EXECUTE(oi);
ospf6_gr_flush_grace_lsas(ospf6);
}
+/* Enter the Graceful Restart mode. */
+void ospf6_gr_restart_enter(struct ospf6 *ospf6,
+ enum ospf6_gr_restart_reason reason, int timestamp)
+{
+ unsigned long remaining_time;
+
+ ospf6->gr_info.restart_in_progress = true;
+ ospf6->gr_info.reason = reason;
+
+ /* Schedule grace period timeout. */
+ remaining_time = timestamp - time(NULL);
+ if (IS_DEBUG_OSPF6_GR)
+ zlog_debug(
+ "GR: remaining time until grace period expires: %lu(s)",
+ remaining_time);
+
+ event_add_timer(master, ospf6_gr_grace_period_expired, ospf6,
+ remaining_time, &ospf6->gr_info.t_grace_period);
+}
+
#define RTR_LSA_MISSING 0
#define RTR_LSA_ADJ_FOUND 1
#define RTR_LSA_ADJ_NOT_FOUND 2
ospf6_gr_restart_exit(ospf6, "grace period has expired");
}
+/* Send extra Grace-LSA out the interface (unplanned outages only). */
+void ospf6_gr_iface_send_grace_lsa(struct event *thread)
+{
+ struct ospf6_interface *oi = EVENT_ARG(thread);
+
+ ospf6_gr_lsa_originate(oi, oi->area->ospf6->gr_info.reason);
+
+ if (++oi->gr.hello_delay.elapsed_seconds < oi->gr.hello_delay.interval)
+ event_add_timer(master, ospf6_gr_iface_send_grace_lsa, oi, 1,
+ &oi->gr.hello_delay.t_grace_send);
+ else
+ event_add_event(master, ospf6_hello_send, oi, 0,
+ &oi->thread_send_hello);
+}
+
/*
* Record in non-volatile memory that the given OSPF instance is attempting to
* perform a graceful restart.
*/
-static void ospf6_gr_nvm_update(struct ospf6 *ospf6)
+static void ospf6_gr_nvm_update(struct ospf6 *ospf6, bool prepare)
{
const char *inst_name;
json_object *json;
json_instance);
}
+ json_object_int_add(json_instance, "gracePeriod",
+ ospf6->gr_info.grace_period);
+
/*
* Record not only the grace period, but also a UNIX timestamp
* corresponding to the end of that period. That way, once ospf6d is
* restarted, it will be possible to take into account the time that
* passed while ospf6d wasn't running.
*/
- json_object_int_add(json_instance, "gracePeriod",
- ospf6->gr_info.grace_period);
- json_object_int_add(json_instance, "timestamp",
- time(NULL) + ospf6->gr_info.grace_period);
+ if (prepare)
+ json_object_int_add(json_instance, "timestamp",
+ time(NULL) + ospf6->gr_info.grace_period);
json_object_to_file_ext((char *)OSPF6D_GR_STATE, json,
JSON_C_TO_STRING_PRETTY);
* Delete GR status information about the given OSPF instance from non-volatile
* memory.
*/
-static void ospf6_gr_nvm_delete(struct ospf6 *ospf6)
+void ospf6_gr_nvm_delete(struct ospf6 *ospf6)
{
const char *inst_name;
json_object *json;
json_object *json_instances;
json_object *json_instance;
json_object *json_timestamp;
+ json_object *json_grace_period;
time_t timestamp = 0;
inst_name = ospf6->name ? ospf6->name : VRF_DEFAULT_NAME;
json_instance);
}
+ json_object_object_get_ex(json_instance, "gracePeriod",
+ &json_grace_period);
json_object_object_get_ex(json_instance, "timestamp", &json_timestamp);
if (json_timestamp) {
time_t now;
- unsigned long remaining_time;
- /* Check if the grace period has already expired. */
+ /* Planned GR: check if the grace period has already expired. */
now = time(NULL);
timestamp = json_object_get_int(json_timestamp);
if (now > timestamp) {
ospf6_gr_restart_exit(
ospf6, "grace period has expired already");
- } else {
- /* Schedule grace period timeout. */
- ospf6->gr_info.restart_in_progress = true;
- remaining_time = timestamp - time(NULL);
- if (IS_DEBUG_OSPF6_GR)
- zlog_debug(
- "GR: remaining time until grace period expires: %lu(s)",
- remaining_time);
- event_add_timer(master, ospf6_gr_grace_period_expired,
- ospf6, remaining_time,
- &ospf6->gr_info.t_grace_period);
- }
+ } else
+ ospf6_gr_restart_enter(ospf6, OSPF6_GR_SW_RESTART,
+ timestamp);
+ } else if (json_grace_period) {
+ uint32_t grace_period;
+
+ /*
+ * Unplanned GR: the Grace-LSAs will be sent later as soon as
+ * the interfaces are operational.
+ */
+ grace_period = json_object_get_int(json_grace_period);
+ ospf6->gr_info.grace_period = grace_period;
+ ospf6_gr_restart_enter(ospf6, OSPF6_GR_UNKNOWN_RESTART,
+ time(NULL) +
+ ospf6->gr_info.grace_period);
}
json_object_object_del(json_instances, inst_name);
json_object_free(json);
}
+void ospf6_gr_unplanned_start_interface(struct ospf6_interface *oi)
+{
+ /*
+ * Can't check OSPF interface state as the OSPF instance might not be
+ * enabled yet.
+ */
+ if (!if_is_operative(oi->interface) || if_is_loopback(oi->interface))
+ return;
+
+ /* Send Grace-LSA. */
+ ospf6_gr_lsa_originate(oi, oi->area->ospf6->gr_info.reason);
+
+ /* Start GR hello-delay interval. */
+ oi->gr.hello_delay.elapsed_seconds = 0;
+ event_add_timer(master, ospf6_gr_iface_send_grace_lsa, oi, 1,
+ &oi->gr.hello_delay.t_grace_send);
+}
+
/* Prepare to start a Graceful Restart. */
static void ospf6_gr_prepare(void)
{
ospf6->gr_info.grace_period,
ospf6_vrf_id_to_name(ospf6->vrf_id));
- /* Freeze OSPF routes in the RIB. */
- if (ospf6_zebra_gr_enable(ospf6, ospf6->gr_info.grace_period)) {
- zlog_warn(
- "%s: failed to activate graceful restart: not connected to zebra",
- __func__);
- continue;
- }
-
/* Send a Grace-LSA to all neighbors. */
for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, anode, area)) {
for (ALL_LIST_ELEMENTS_RO(area->if_list, inode, oi)) {
if (oi->state < OSPF6_INTERFACE_POINTTOPOINT)
continue;
- ospf6_gr_lsa_originate(oi);
+ ospf6_gr_lsa_originate(oi, OSPF6_GR_SW_RESTART);
}
}
/* Record end of the grace period in non-volatile memory. */
- ospf6_gr_nvm_update(ospf6);
+ ospf6_gr_nvm_update(ospf6, true);
/*
* Mark that a Graceful Restart preparation is in progress, to
ospf6->gr_info.restart_support = true;
ospf6->gr_info.grace_period = grace_period;
+ /* Freeze OSPF routes in the RIB. */
+ (void)ospf6_zebra_gr_enable(ospf6, ospf6->gr_info.grace_period);
+
+ /* Record that GR is enabled in non-volatile memory. */
+ ospf6_gr_nvm_update(ospf6, false);
+
return CMD_SUCCESS;
}
ospf6->gr_info.restart_support = false;
ospf6->gr_info.grace_period = OSPF6_DFLT_GRACE_INTERVAL;
+ ospf6_gr_nvm_delete(ospf6);
+ ospf6_zebra_gr_disable(ospf6);
return CMD_SUCCESS;
}
extern int config_write_ospf6_gr_helper(struct vty *vty, struct ospf6 *ospf6);
extern int config_write_ospf6_debug_gr_helper(struct vty *vty);
+extern void ospf6_gr_iface_send_grace_lsa(struct event *thread);
+extern void ospf6_gr_restart_enter(struct ospf6 *ospf6,
+ enum ospf6_gr_restart_reason reason,
+ int timestamp);
extern void ospf6_gr_check_lsdb_consistency(struct ospf6 *ospf,
struct ospf6_area *area);
extern void ospf6_gr_nvm_read(struct ospf6 *ospf);
+extern void ospf6_gr_nvm_delete(struct ospf6 *ospf6);
+extern void ospf6_gr_unplanned_start_interface(struct ospf6_interface *oi);
extern void ospf6_gr_init(void);
#endif /* OSPF6_GR_H */
#include "ospf6_proto.h"
#include "lib/keychain.h"
#include "ospf6_auth_trailer.h"
+#include "ospf6d/ospf6_interface_clippy.c"
DEFINE_MTYPE_STATIC(OSPF6D, OSPF6_IF, "OSPF6 interface");
DEFINE_MTYPE(OSPF6D, OSPF6_AUTH_KEYCHAIN, "OSPF6 auth keychain");
oi->priority = OSPF6_INTERFACE_PRIORITY;
oi->hello_interval = OSPF_HELLO_INTERVAL_DEFAULT;
+ oi->gr.hello_delay.interval = OSPF_HELLO_DELAY_DEFAULT;
oi->dead_interval = OSPF_ROUTER_DEAD_INTERVAL_DEFAULT;
oi->rxmt_interval = OSPF_RETRANSMIT_INTERVAL_DEFAULT;
oi->type = ospf6_default_iftype(ifp);
EVENT_OFF(oi->thread_intra_prefix_lsa);
EVENT_OFF(oi->thread_as_extern_lsa);
EVENT_OFF(oi->thread_wait_timer);
+
+ oi->gr.hello_delay.elapsed_seconds = 0;
+ EVENT_OFF(oi->gr.hello_delay.t_grace_send);
}
static struct in6_addr *
return;
}
+ /*
+ * RFC 3623 - Section 5 ("Unplanned Outages"):
+ * "The grace-LSAs are encapsulated in Link State Update Packets
+ * and sent out to all interfaces, even though the restarted
+ * router has no adjacencies and no knowledge of previous
+ * adjacencies".
+ */
+ if (oi->area->ospf6->gr_info.restart_in_progress &&
+ oi->area->ospf6->gr_info.reason == OSPF6_GR_UNKNOWN_RESTART)
+ ospf6_gr_unplanned_start_interface(oi);
+
#ifdef __FreeBSD__
/*
* There's a delay in FreeBSD between issuing a command to leave a
json_arr, json_object_new_string(lsa->name));
json_object_object_add(json_obj, "pendingLsaLsAck", json_arr);
+ if (oi->gr.hello_delay.interval != 0)
+ json_object_int_add(json_obj, "grHelloDelaySecs",
+ oi->gr.hello_delay.interval);
} else {
timerclear(&res);
if (event_is_scheduled(oi->thread_send_lsupdate))
: "off"));
for (ALL_LSDB(oi->lsack_list, lsa, lsanext))
vty_out(vty, " %s\n", lsa->name);
+
+ if (oi->gr.hello_delay.interval != 0)
+ vty_out(vty, " Graceful Restart hello delay: %us\n",
+ oi->gr.hello_delay.interval);
}
/* BFD specific. */
"Interval time after which a neighbor is declared down\n"
SECONDS_STR)
+DEFPY(ipv6_ospf6_gr_hdelay, ipv6_ospf6_gr_hdelay_cmd,
+ "ipv6 ospf6 graceful-restart hello-delay (1-1800)",
+ IP6_STR
+ OSPF6_STR
+ "Graceful Restart parameters\n"
+ "Delay the sending of the first hello packets.\n"
+ "Delay in seconds\n")
+{
+ VTY_DECLVAR_CONTEXT(interface, ifp);
+ struct ospf6_interface *oi;
+
+ oi = ifp->info;
+ if (oi == NULL)
+ oi = ospf6_interface_create(ifp);
+
+ /* Note: new or updated value won't affect ongoing graceful restart. */
+ oi->gr.hello_delay.interval = hello_delay;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(no_ipv6_ospf6_gr_hdelay, no_ipv6_ospf6_gr_hdelay_cmd,
+ "no ipv6 ospf6 graceful-restart hello-delay [(1-1800)]",
+ NO_STR
+ IP6_STR
+ OSPF6_STR
+ "Graceful Restart parameters\n"
+ "Delay the sending of the first hello packets.\n"
+ "Delay in seconds\n")
+{
+ VTY_DECLVAR_CONTEXT(interface, ifp);
+ struct ospf6_interface *oi;
+
+ oi = ifp->info;
+ if (oi == NULL)
+ oi = ospf6_interface_create(ifp);
+
+ oi->gr.hello_delay.interval = OSPF_HELLO_DELAY_DEFAULT;
+ oi->gr.hello_delay.elapsed_seconds = 0;
+ EVENT_OFF(oi->gr.hello_delay.t_grace_send);
+
+ return CMD_SUCCESS;
+}
+
/* interface variable set command */
DEFUN (ipv6_ospf6_transmitdelay,
ipv6_ospf6_transmitdelay_cmd,
else if (oi->type_cfg && oi->type == OSPF_IFTYPE_BROADCAST)
vty_out(vty, " ipv6 ospf6 network broadcast\n");
+ if (oi->gr.hello_delay.interval != OSPF_HELLO_DELAY_DEFAULT)
+ vty_out(vty,
+ " ipv6 ospf6 graceful-restart hello-delay %u\n",
+ oi->gr.hello_delay.interval);
+
ospf6_bfd_write_config(vty, oi);
ospf6_auth_write_config(vty, &oi->at_data);
install_element(INTERFACE_NODE, &ipv6_ospf6_deadinterval_cmd);
install_element(INTERFACE_NODE, &ipv6_ospf6_hellointerval_cmd);
+ install_element(INTERFACE_NODE, &ipv6_ospf6_gr_hdelay_cmd);
install_element(INTERFACE_NODE, &ipv6_ospf6_priority_cmd);
install_element(INTERFACE_NODE, &ipv6_ospf6_retransmitinterval_cmd);
install_element(INTERFACE_NODE, &ipv6_ospf6_transmitdelay_cmd);
install_element(INTERFACE_NODE, &ipv6_ospf6_instance_cmd);
install_element(INTERFACE_NODE, &no_ipv6_ospf6_deadinterval_cmd);
install_element(INTERFACE_NODE, &no_ipv6_ospf6_hellointerval_cmd);
+ install_element(INTERFACE_NODE, &no_ipv6_ospf6_gr_hdelay_cmd);
install_element(INTERFACE_NODE, &no_ipv6_ospf6_priority_cmd);
install_element(INTERFACE_NODE, &no_ipv6_ospf6_retransmitinterval_cmd);
install_element(INTERFACE_NODE, &no_ipv6_ospf6_transmitdelay_cmd);
uint16_t dead_interval;
uint32_t rxmt_interval;
+ /* Graceful-Restart data. */
+ struct {
+ struct {
+ uint16_t interval;
+ uint16_t elapsed_seconds;
+ struct event *t_grace_send;
+ } hello_delay;
+ } gr;
+
uint32_t state_change;
/* Cost */
oi = (struct ospf6_interface *)EVENT_ARG(thread);
+ /* Check if the GR hello-delay is active. */
+ if (oi->gr.hello_delay.t_grace_send)
+ return;
+
if (oi->state <= OSPF6_INTERFACE_DOWN) {
if (IS_OSPF6_DEBUG_MESSAGE(OSPF6_MESSAGE_TYPE_HELLO, SEND_HDR))
zlog_debug("Unable to send Hello on down interface %s",
* no longer valid.
*/
ospf6_zebra_gr_disable(ospf6);
+ ospf6_zebra_gr_enable(ospf6, ospf6->gr_info.grace_period);
ospf6->gr_info.finishing_restart = false;
}
}
if (ospf6->router_id == 0)
ospf6_router_id_update(ospf6, true);
ospf6_add(ospf6);
+
+ /*
+ * Read from non-volatile memory whether this instance is performing a
+ * graceful restart or not.
+ */
+ ospf6_gr_nvm_read(ospf6);
+
if (ospf6->vrf_id != VRF_UNKNOWN) {
vrf = vrf_lookup_by_id(ospf6->vrf_id);
FOR_ALL_INTERFACES (vrf, ifp) {
if (ospf6->fd < 0)
return ospf6;
- /*
- * Read from non-volatile memory whether this instance is performing a
- * graceful restart or not.
- */
- ospf6_gr_nvm_read(ospf6);
-
event_add_read(master, ospf6_receive, ospf6, ospf6->fd,
&ospf6->t_ospf6_receive);
ospf6_gr_helper_deinit(o);
if (!o->gr_info.prepare_in_progress)
ospf6_flush_self_originated_lsas_now(o);
+ XFREE(MTYPE_TMP, o->gr_info.exit_reason);
ospf6_disable(o);
ospf6_del(o);
if (ospf6 == NULL)
vty_out(vty, "OSPFv3 is not configured\n");
else {
+ if (ospf6->gr_info.restart_support)
+ ospf6_gr_nvm_delete(ospf6);
+
ospf6_delete(ospf6);
ospf6 = NULL;
}
bool prepare_in_progress;
bool finishing_restart;
uint32_t grace_period;
+ int reason;
+ char *exit_reason;
struct event *t_grace_period;
};
int ospf6_zebra_gr_enable(struct ospf6 *ospf6, uint32_t stale_time)
{
+ if (IS_DEBUG_OSPF6_GR)
+ zlog_debug("Zebra enable GR [stale time %u]", stale_time);
+
return ospf6_zebra_gr_update(ospf6, ZEBRA_CLIENT_GR_CAPABILITIES,
stale_time);
}
int ospf6_zebra_gr_disable(struct ospf6 *ospf6)
{
+ if (IS_DEBUG_OSPF6_GR)
+ zlog_debug("Zebra disable GR");
+
return ospf6_zebra_gr_update(ospf6, ZEBRA_CLIENT_GR_DISABLE, 0);
}
static void ospf6_zebra_connected(struct zclient *zclient)
{
+ struct ospf6 *ospf6;
+ struct listnode *node;
+
/* Send the client registration */
bfd_client_sendmsg(zclient, ZEBRA_BFD_CLIENT_REGISTER, VRF_DEFAULT);
zclient_send_reg_requests(zclient, VRF_DEFAULT);
+
+ /* Activate graceful restart if configured. */
+ for (ALL_LIST_ELEMENTS_RO(om6->ospf6, node, ospf6)) {
+ if (!ospf6->gr_info.restart_support)
+ continue;
+ (void)ospf6_zebra_gr_enable(ospf6, ospf6->gr_info.grace_period);
+ }
}
static zclient_handler *const ospf6_handlers[] = {
ospf6d/ospf6_top.c \
ospf6d/ospf6_area.c \
ospf6d/ospf6_asbr.c \
+ ospf6d/ospf6_interface.c \
ospf6d/ospf6_lsa.c \
ospf6d/ospf6_gr_helper.c \
ospf6d/ospf6_gr.c \
* session. Dump it, but increment past it's seqnum.
*/
assert(!ospf_opaque_is_owned(old));
+ if (IS_DEBUG_OSPF_CLIENT_API)
+ zlog_debug(
+ "LSA[Type%d:%pI4]: OSPF API Server Originate LSA Old Seq: 0x%x Age: %d",
+ old->data->type, &old->data->id,
+ ntohl(old->data->ls_seqnum),
+ ntohl(old->data->ls_age));
if (IS_LSA_MAX_SEQ(old)) {
flog_warn(EC_OSPF_LSA_INSTALL_FAILURE,
"%s: old LSA at maxseq", __func__);
lsa->data->ls_seqnum = lsa_seqnum_increment(old);
ospf_discard_from_db(ospf, old->lsdb, old);
}
+ if (IS_DEBUG_OSPF_CLIENT_API)
+ zlog_debug(
+ "LSA[Type%d:%pI4]: OSPF API Server Originate LSA New Seq: 0x%x Age: %d",
+ lsa->data->type, &lsa->data->id,
+ ntohl(lsa->data->ls_seqnum), ntohl(lsa->data->ls_age));
/* Install this LSA into LSDB. */
if (ospf_lsa_install(ospf, lsa->oi, lsa) == NULL) {
ospf = ospf_lookup_by_vrf_id(VRF_DEFAULT);
assert(ospf);
+ if (IS_DEBUG_OSPF(lsa, LSA_GENERATE)) {
+ zlog_debug("LSA[Type%d:%pI4]: OSPF API Server LSA Refresher",
+ lsa->data->type, &lsa->data->id);
+ }
+
apiserv = lookup_apiserver_by_lsa(lsa);
if (!apiserv) {
zlog_warn("%s: LSA[%s]: No apiserver?", __func__,
goto out;
}
- if (IS_LSA_MAXAGE(lsa)) {
- ospf_opaque_lsa_flush_schedule(lsa);
- goto out;
- }
-
/* Check if updated version of LSA instance has already prepared. */
new = ospf_lsdb_lookup(&apiserv->reserve, lsa);
if (!new) {
+ if (IS_LSA_MAXAGE(lsa)) {
+ ospf_opaque_lsa_flush_schedule(lsa);
+ goto out;
+ }
+
/* This is a periodic refresh, driven by core OSPF mechanism. */
new = ospf_apiserver_opaque_lsa_new(lsa->area, lsa->oi,
lsa->data);
*/
if (ospf->gr_info.finishing_restart) {
ospf_zebra_gr_disable(ospf);
+ ospf_zebra_gr_enable(ospf, ospf->gr_info.grace_period);
ospf->gr_info.finishing_restart = false;
}
}
#include "ospfd/ospf_dump.h"
#include "ospfd/ospf_gr_clippy.c"
-static void ospf_gr_nvm_delete(struct ospf *ospf);
+static void ospf_gr_grace_period_expired(struct event *thread);
/* Lookup self-originated Grace-LSA in the LSDB. */
static struct ospf_lsa *ospf_gr_lsa_lookup(struct ospf *ospf,
/* Fill in fields of the Grace-LSA that is being originated. */
static void ospf_gr_lsa_body_set(struct ospf_gr_info *gr_info,
- struct ospf_interface *oi, struct stream *s)
+ struct ospf_interface *oi,
+ enum ospf_gr_restart_reason reason,
+ struct stream *s)
{
struct grace_tlv_graceperiod tlv_period = {};
struct grace_tlv_restart_reason tlv_reason = {};
/* Put restart reason. */
tlv_reason.header.type = htons(RESTART_REASON_TYPE);
tlv_reason.header.length = htons(RESTART_REASON_LENGTH);
- if (gr_info->restart_support)
- tlv_reason.reason = OSPF_GR_SW_RESTART;
- else
- tlv_reason.reason = OSPF_GR_UNKNOWN_RESTART;
+ tlv_reason.reason = reason;
stream_put(s, &tlv_reason, sizeof(tlv_reason));
/* Put IP address. */
}
/* Generate Grace-LSA for a given interface. */
-static struct ospf_lsa *ospf_gr_lsa_new(struct ospf_interface *oi)
+static struct ospf_lsa *ospf_gr_lsa_new(struct ospf_interface *oi,
+ enum ospf_gr_restart_reason reason)
{
struct stream *s;
struct lsa_header *lsah;
lsa_header_set(s, options, lsa_type, lsa_id, oi->ospf->router_id);
/* Set opaque-LSA body fields. */
- ospf_gr_lsa_body_set(&oi->ospf->gr_info, oi, s);
+ ospf_gr_lsa_body_set(&oi->ospf->gr_info, oi, reason, s);
/* Set length. */
length = stream_get_endp(s);
}
/* Originate and install Grace-LSA for a given interface. */
-static void ospf_gr_lsa_originate(struct ospf_interface *oi, bool maxage)
+static void ospf_gr_lsa_originate(struct ospf_interface *oi,
+ enum ospf_gr_restart_reason reason,
+ bool maxage)
{
struct ospf_lsa *lsa, *old;
- if (ospf_interface_neighbor_count(oi) == 0)
+ /* Skip originating a Grace-LSA when not necessary. */
+ if (!if_is_operative(oi->ifp) || if_is_loopback(oi->ifp) ||
+ (reason != OSPF_GR_UNKNOWN_RESTART &&
+ ospf_interface_neighbor_count(oi) == 0))
return;
/* Create new Grace-LSA. */
- lsa = ospf_gr_lsa_new(oi);
+ lsa = ospf_gr_lsa_new(oi, reason);
if (!lsa) {
zlog_warn("%s: ospf_gr_lsa_new() failed", __func__);
return;
if (old)
lsa->data->ls_seqnum = lsa_seqnum_increment(old);
- /* Install this LSA into LSDB. */
- if (ospf_lsa_install(oi->ospf, oi, lsa) == NULL) {
- zlog_warn("%s: ospf_lsa_install() failed", __func__);
- ospf_lsa_unlock(&lsa);
- return;
+ if (!maxage && reason == OSPF_GR_UNKNOWN_RESTART) {
+ struct list *update;
+ struct in_addr addr;
+
+ /*
+ * When performing an unplanned restart, send a handcrafted
+ * Grace-LSA since the interface isn't fully initialized yet.
+ */
+ ospf_lsa_checksum(lsa->data);
+ ospf_lsa_lock(lsa);
+ update = list_new();
+ listnode_add(update, lsa);
+ addr.s_addr = htonl(OSPF_ALLSPFROUTERS);
+ ospf_ls_upd_queue_send(oi, update, addr, true);
+ list_delete(&update);
+ ospf_lsa_discard(lsa);
+ } else {
+ /* Install this LSA into LSDB. */
+ if (ospf_lsa_install(oi->ospf, oi, lsa) == NULL) {
+ zlog_warn("%s: ospf_lsa_install() failed", __func__);
+ ospf_lsa_unlock(&lsa);
+ return;
+ }
+
+ /* Flood the LSA through out the interface */
+ ospf_flood_through_interface(oi, NULL, lsa);
}
/* Update new LSA origination count. */
oi->ospf->lsa_originate_count++;
-
- /* Flood the LSA through out the interface */
- ospf_flood_through_interface(oi, NULL, lsa);
}
/* Flush all self-originated Grace-LSAs. */
struct ospf_interface *oi;
struct listnode *inode;
- if (IS_DEBUG_OSPF_GR)
- zlog_debug(
- "GR: flushing self-originated Grace-LSAs [area %pI4]",
- &area->area_id);
+ for (ALL_LIST_ELEMENTS_RO(area->oiflist, inode, oi)) {
+ if (IS_DEBUG_OSPF_GR)
+ zlog_debug(
+ "GR: flushing self-originated Grace-LSA [area %pI4] [interface %s]",
+ &area->area_id, oi->ifp->name);
- for (ALL_LIST_ELEMENTS_RO(area->oiflist, inode, oi))
- ospf_gr_lsa_originate(oi, true);
+ ospf_gr_lsa_originate(oi, ospf->gr_info.reason, true);
+ }
}
}
ospf->gr_info.restart_in_progress = false;
EVENT_OFF(ospf->gr_info.t_grace_period);
- /* Record in non-volatile memory that the restart is complete. */
- ospf_gr_nvm_delete(ospf);
-
for (ALL_LIST_ELEMENTS_RO(ospf->areas, onode, area)) {
struct ospf_interface *oi;
*/
ospf_router_lsa_update_area(area);
- /*
- * 2) The router should reoriginate network-LSAs on all segments
- * where it is the Designated Router.
- */
- for (ALL_LIST_ELEMENTS_RO(area->oiflist, anode, oi))
+ for (ALL_LIST_ELEMENTS_RO(area->oiflist, anode, oi)) {
+ /* Disable hello delay. */
+ if (oi->gr.hello_delay.t_grace_send) {
+ oi->gr.hello_delay.elapsed_seconds = 0;
+ EVENT_OFF(oi->gr.hello_delay.t_grace_send);
+ OSPF_ISM_TIMER_MSEC_ON(oi->t_hello,
+ ospf_hello_timer, 1);
+ }
+
+ /*
+ * 2) The router should reoriginate network-LSAs on all
+ * segments where it is the Designated Router.
+ */
if (oi->state == ISM_DR)
ospf_network_lsa_update(oi);
+ }
}
/*
* should be removed.
*/
ospf->gr_info.finishing_restart = true;
+ XFREE(MTYPE_TMP, ospf->gr_info.exit_reason);
+ ospf->gr_info.exit_reason = XSTRDUP(MTYPE_TMP, reason);
ospf_spf_calculate_schedule(ospf, SPF_FLAG_GR_FINISH);
/* 6) Any grace-LSAs that the router originated should be flushed. */
ospf_gr_flush_grace_lsas(ospf);
}
+/* Enter the Graceful Restart mode. */
+void ospf_gr_restart_enter(struct ospf *ospf,
+ enum ospf_gr_restart_reason reason, int timestamp)
+{
+ unsigned long remaining_time;
+
+ ospf->gr_info.restart_in_progress = true;
+ ospf->gr_info.reason = reason;
+
+ /* Schedule grace period timeout. */
+ remaining_time = timestamp - time(NULL);
+ if (IS_DEBUG_OSPF_GR)
+ zlog_debug(
+ "GR: remaining time until grace period expires: %lu(s)",
+ remaining_time);
+
+ event_add_timer(master, ospf_gr_grace_period_expired, ospf,
+ remaining_time, &ospf->gr_info.t_grace_period);
+}
+
/* Check if a Router-LSA contains a given link. */
static bool ospf_router_lsa_contains_adj(struct ospf_lsa *lsa,
struct in_addr *id)
return filepath;
}
+/* Send extra Grace-LSA out the interface (unplanned outages only). */
+void ospf_gr_iface_send_grace_lsa(struct event *thread)
+{
+ struct ospf_interface *oi = EVENT_ARG(thread);
+ struct ospf_if_params *params = IF_DEF_PARAMS(oi->ifp);
+
+ ospf_gr_lsa_originate(oi, oi->ospf->gr_info.reason, false);
+
+ if (++oi->gr.hello_delay.elapsed_seconds < params->v_gr_hello_delay)
+ event_add_timer(master, ospf_gr_iface_send_grace_lsa, oi, 1,
+ &oi->gr.hello_delay.t_grace_send);
+ else
+ OSPF_ISM_TIMER_MSEC_ON(oi->t_hello, ospf_hello_timer, 1);
+}
+
/*
* Record in non-volatile memory that the given OSPF instance is attempting to
* perform a graceful restart.
*/
-static void ospf_gr_nvm_update(struct ospf *ospf)
+static void ospf_gr_nvm_update(struct ospf *ospf, bool prepare)
{
char *filepath;
const char *inst_name;
json_instance);
}
+ json_object_int_add(json_instance, "gracePeriod",
+ ospf->gr_info.grace_period);
+
/*
* Record not only the grace period, but also a UNIX timestamp
* corresponding to the end of that period. That way, once ospfd is
* restarted, it will be possible to take into account the time that
* passed while ospfd wasn't running.
*/
- json_object_int_add(json_instance, "gracePeriod",
- ospf->gr_info.grace_period);
- json_object_int_add(json_instance, "timestamp",
- time(NULL) + ospf->gr_info.grace_period);
+ if (prepare)
+ json_object_int_add(json_instance, "timestamp",
+ time(NULL) + ospf->gr_info.grace_period);
json_object_to_file_ext(filepath, json, JSON_C_TO_STRING_PRETTY);
json_object_free(json);
* Delete GR status information about the given OSPF instance from non-volatile
* memory.
*/
-static void ospf_gr_nvm_delete(struct ospf *ospf)
+void ospf_gr_nvm_delete(struct ospf *ospf)
{
char *filepath;
const char *inst_name;
json_object *json_instances;
json_object *json_instance;
json_object *json_timestamp;
+ json_object *json_grace_period;
time_t timestamp = 0;
filepath = ospf_gr_nvm_filepath(ospf);
json_instance);
}
+ json_object_object_get_ex(json_instance, "gracePeriod",
+ &json_grace_period);
json_object_object_get_ex(json_instance, "timestamp", &json_timestamp);
+
if (json_timestamp) {
time_t now;
- unsigned long remaining_time;
- /* Check if the grace period has already expired. */
+ /* Planned GR: check if the grace period has already expired. */
now = time(NULL);
timestamp = json_object_get_int(json_timestamp);
if (now > timestamp) {
ospf_gr_restart_exit(
ospf, "grace period has expired already");
- } else {
- /* Schedule grace period timeout. */
- ospf->gr_info.restart_in_progress = true;
- remaining_time = timestamp - time(NULL);
- if (IS_DEBUG_OSPF_GR)
- zlog_debug(
- "GR: remaining time until grace period expires: %lu(s)",
- remaining_time);
- event_add_timer(master, ospf_gr_grace_period_expired,
- ospf, remaining_time,
- &ospf->gr_info.t_grace_period);
- }
+ } else
+ ospf_gr_restart_enter(ospf, OSPF_GR_SW_RESTART,
+ timestamp);
+ } else if (json_grace_period) {
+ uint32_t grace_period;
+
+ /*
+ * Unplanned GR: the Grace-LSAs will be sent later as soon as
+ * the interfaces are operational.
+ */
+ grace_period = json_object_get_int(json_grace_period);
+ ospf->gr_info.grace_period = grace_period;
+ ospf_gr_restart_enter(ospf, OSPF_GR_UNKNOWN_RESTART,
+ time(NULL) + ospf->gr_info.grace_period);
}
json_object_object_del(json_instances, inst_name);
json_object_free(json);
}
+void ospf_gr_unplanned_start_interface(struct ospf_interface *oi)
+{
+ /* Send Grace-LSA. */
+ ospf_gr_lsa_originate(oi, oi->ospf->gr_info.reason, false);
+
+ /* Start GR hello-delay interval. */
+ oi->gr.hello_delay.elapsed_seconds = 0;
+ event_add_timer(master, ospf_gr_iface_send_grace_lsa, oi, 1,
+ &oi->gr.hello_delay.t_grace_send);
+}
+
/* Prepare to start a Graceful Restart. */
static void ospf_gr_prepare(void)
{
continue;
}
- /* Freeze OSPF routes in the RIB. */
- if (ospf_zebra_gr_enable(ospf, ospf->gr_info.grace_period)) {
- zlog_warn(
- "%s: failed to activate graceful restart: not connected to zebra",
- __func__);
- continue;
- }
-
/* Send a Grace-LSA to all neighbors. */
for (ALL_LIST_ELEMENTS_RO(ospf->oiflist, inode, oi))
- ospf_gr_lsa_originate(oi, false);
+ ospf_gr_lsa_originate(oi, OSPF_GR_SW_RESTART, false);
/* Record end of the grace period in non-volatile memory. */
- ospf_gr_nvm_update(ospf);
+ ospf_gr_nvm_update(ospf, true);
/*
* Mark that a Graceful Restart preparation is in progress, to
ospf->gr_info.restart_support = true;
ospf->gr_info.grace_period = grace_period;
+ /* Freeze OSPF routes in the RIB. */
+ (void)ospf_zebra_gr_enable(ospf, ospf->gr_info.grace_period);
+
+ /* Record that GR is enabled in non-volatile memory. */
+ ospf_gr_nvm_update(ospf, false);
+
return CMD_SUCCESS;
}
ospf->gr_info.restart_support = false;
ospf->gr_info.grace_period = OSPF_DFLT_GRACE_INTERVAL;
+ ospf_gr_nvm_delete(ospf);
+ ospf_zebra_gr_disable(ospf);
return CMD_SUCCESS;
}
uint32_t interval);
extern void ospf_gr_helper_set_supported_planned_only_restart(struct ospf *ospf,
bool planned_only);
-
+extern void ospf_gr_iface_send_grace_lsa(struct event *thread);
+extern void ospf_gr_restart_enter(struct ospf *ospf,
+ enum ospf_gr_restart_reason reason,
+ int timestamp);
extern void ospf_gr_check_lsdb_consistency(struct ospf *ospf,
struct ospf_area *area);
extern void ospf_gr_check_adjs(struct ospf *ospf);
extern void ospf_gr_nvm_read(struct ospf *ospf);
+extern void ospf_gr_nvm_delete(struct ospf *ospf);
+extern void ospf_gr_unplanned_start_interface(struct ospf_interface *oi);
extern void ospf_gr_init(void);
#endif /* _ZEBRA_OSPF_GR_H */
cost = 1;
else if (cost > 65535)
cost = 65535;
+
+ if (if_is_loopback(oi->ifp))
+ cost = 0;
}
return cost;
UNSET_IF_PARAM(oip, passive_interface);
UNSET_IF_PARAM(oip, v_hello);
UNSET_IF_PARAM(oip, fast_hello);
+ UNSET_IF_PARAM(oip, v_gr_hello_delay);
UNSET_IF_PARAM(oip, v_wait);
UNSET_IF_PARAM(oip, priority);
UNSET_IF_PARAM(oip, type);
SET_IF_PARAM(IF_DEF_PARAMS(ifp), fast_hello);
IF_DEF_PARAMS(ifp)->fast_hello = OSPF_FAST_HELLO_DEFAULT;
+ SET_IF_PARAM(IF_DEF_PARAMS(ifp), v_gr_hello_delay);
+ IF_DEF_PARAMS(ifp)->v_gr_hello_delay = OSPF_HELLO_DELAY_DEFAULT;
+
SET_IF_PARAM(IF_DEF_PARAMS(ifp), v_wait);
IF_DEF_PARAMS(ifp)->v_wait = OSPF_ROUTER_DEAD_INTERVAL_DEFAULT;
DECLARE_IF_PARAM(uint32_t, v_wait); /* Router Dead Interval */
bool is_v_wait_set; /* Check for Dead Interval set */
+ /* GR Hello Delay Interval */
+ DECLARE_IF_PARAM(uint16_t, v_gr_hello_delay);
+
/* MTU mismatch check (see RFC2328, chap 10.6) */
DECLARE_IF_PARAM(uint8_t, mtu_ignore);
/* List of configured NBMA neighbor. */
struct list *nbr_nbma;
+ /* Graceful-Restart data. */
+ struct {
+ struct {
+ uint16_t elapsed_seconds;
+ struct event *t_grace_send;
+ } hello_delay;
+ } gr;
+
/* self-originated LSAs. */
struct ospf_lsa *network_lsa_self; /* network-LSA. */
struct list *opaque_lsa_self; /* Type-9 Opaque-LSAs */
oi = EVENT_ARG(thread);
oi->t_hello = NULL;
+ /* Check if the GR hello-delay is active. */
+ if (oi->gr.hello_delay.t_grace_send)
+ return;
+
if (IS_DEBUG_OSPF(ism, ISM_TIMERS))
zlog_debug("ISM[%s]: Timer (Hello timer expire)", IF_NAME(oi));
EVENT_OFF(oi->t_hello);
EVENT_OFF(oi->t_wait);
EVENT_OFF(oi->t_ls_ack);
+ EVENT_OFF(oi->gr.hello_delay.t_grace_send);
break;
case ISM_Loopback:
/* In this state, the interface may be looped back and will be
EVENT_OFF(oi->t_hello);
EVENT_OFF(oi->t_wait);
EVENT_OFF(oi->t_ls_ack);
+ EVENT_OFF(oi->gr.hello_delay.t_grace_send);
break;
case ISM_Waiting:
/* The router is trying to determine the identity of DRouter and
mask.s_addr = 0xffffffff;
id.s_addr = oi->address->u.prefix4.s_addr;
- return link_info_set(s, id, mask, LSA_LINK_TYPE_STUB, 0, 0);
+ return link_info_set(s, id, mask, LSA_LINK_TYPE_STUB, 0,
+ oi->output_cost);
}
/* Describe Virtual Link. */
lsa->data->type, &lsa->data->id);
/*
- * Since these LSA entries are not yet installed into corresponding
- * LSDB, just flush them without calling ospf_ls_maxage() afterward.
+ * Install the stale LSA into the Link State Database, add it to the
+ * MaxAge list, and flush it from the OSPF routing domain. For other
+ * LSA types, the installation is done in the refresh function. It is
+ * done inline here since the opaque refresh function is dynamically
+ * registered when opaque LSAs are originated (which is not the case
+ * for stale LSAs).
*/
lsa->data->ls_age = htons(OSPF_LSA_MAXAGE);
+ ospf_lsa_install(
+ top, (lsa->data->type == OSPF_OPAQUE_LINK_LSA) ? nbr->oi : NULL,
+ lsa);
+ ospf_lsa_maxage(top, lsa);
+
switch (lsa->data->type) {
case OSPF_OPAQUE_LINK_LSA:
- ospf_flood_through_area(nbr->oi->area, NULL /*inbr*/, lsa);
- break;
case OSPF_OPAQUE_AREA_LSA:
ospf_flood_through_area(nbr->oi->area, NULL /*inbr*/, lsa);
break;
__func__, lsa->data->type);
return;
}
- ospf_lsa_discard(lsa); /* List "lsas" will be deleted by caller. */
}
/*------------------------------------------------------------------------*
if (current == NULL) {
if (IS_DEBUG_OSPF_EVENT)
zlog_debug(
- "LSA[%s]: Previously originated Opaque-LSA,not found in the LSDB.",
+ "LSA[%s]: Previously originated Opaque-LSA, not found in the LSDB.",
dump_lsa_key(lsa));
SET_FLAG(lsa->flags, OSPF_LSA_SELF);
return ospf_packet_new(size - sizeof(struct ip));
}
-static void ospf_ls_upd_queue_send(struct ospf_interface *oi,
- struct list *update, struct in_addr addr,
- int send_lsupd_now)
+void ospf_ls_upd_queue_send(struct ospf_interface *oi, struct list *update,
+ struct in_addr addr, int send_lsupd_now)
{
struct ospf_packet *op;
uint16_t length = OSPF_HEADER_SIZE;
extern void ospf_ls_upd_send_lsa(struct ospf_neighbor *, struct ospf_lsa *,
int);
extern void ospf_ls_upd_send(struct ospf_neighbor *, struct list *, int, int);
+extern void ospf_ls_upd_queue_send(struct ospf_interface *oi,
+ struct list *update, struct in_addr addr,
+ int send_lsupd_now);
extern void ospf_ls_ack_send(struct ospf_neighbor *, struct ospf_lsa *);
extern void ospf_ls_ack_send_delayed(struct ospf_interface *);
extern void ospf_ls_retransmit(struct ospf_interface *, struct ospf_lsa *);
q_spaces_fini(p_space->q_spaces);
XFREE(MTYPE_OSPF_Q_SPACE, p_space->q_spaces);
+ XFREE(MTYPE_OSPF_P_SPACE, p_space);
}
p_spaces_fini(area->p_spaces);
return CMD_NOT_MY_INSTANCE;
ospf = ospf_lookup(instance, vrf_name);
- if (ospf)
+ if (ospf) {
+ if (ospf->gr_info.restart_support)
+ ospf_gr_nvm_delete(ospf);
+
ospf_finish(ospf);
- else
+ } else
ret = CMD_WARNING_CONFIG_FAILED;
return ret;
struct ospf_neighbor *nbr;
struct route_node *rn;
uint32_t bandwidth = ifp->bandwidth ? ifp->bandwidth : ifp->speed;
+ struct ospf_if_params *params;
/* Is interface up? */
if (use_json) {
ospf_nbr_count(oi, 0),
ospf_nbr_count(oi, NSM_Full));
+
+ params = IF_DEF_PARAMS(ifp);
+ if (params &&
+ OSPF_IF_PARAM_CONFIGURED(params, v_gr_hello_delay)) {
+ if (use_json) {
+ json_object_int_add(json_interface_sub,
+ "grHelloDelaySecs",
+ params->v_gr_hello_delay);
+ } else
+ vty_out(vty,
+ " Graceful Restart hello delay: %us\n",
+ params->v_gr_hello_delay);
+ }
+
ospf_interface_bfd_show(vty, ifp, json_interface_sub);
/* OSPF Authentication information */
return no_ip_ospf_retransmit_interval(self, vty, argc, argv);
}
+DEFPY (ip_ospf_gr_hdelay,
+ ip_ospf_gr_hdelay_cmd,
+ "ip ospf graceful-restart hello-delay (1-1800)",
+ IP_STR
+ "OSPF interface commands\n"
+ "Graceful Restart parameters\n"
+ "Delay the sending of the first hello packets.\n"
+ "Delay in seconds\n")
+{
+ VTY_DECLVAR_CONTEXT(interface, ifp);
+ struct ospf_if_params *params;
+
+ params = IF_DEF_PARAMS(ifp);
+
+ /* Note: new or updated value won't affect ongoing graceful restart. */
+ SET_IF_PARAM(params, v_gr_hello_delay);
+ params->v_gr_hello_delay = hello_delay;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (no_ip_ospf_gr_hdelay,
+ no_ip_ospf_gr_hdelay_cmd,
+ "no ip ospf graceful-restart hello-delay [(1-1800)]",
+ NO_STR
+ IP_STR
+ "OSPF interface commands\n"
+ "Graceful Restart parameters\n"
+ "Delay the sending of the first hello packets.\n"
+ "Delay in seconds\n")
+{
+ VTY_DECLVAR_CONTEXT(interface, ifp);
+ struct ospf_if_params *params;
+ struct route_node *rn;
+
+ params = IF_DEF_PARAMS(ifp);
+ UNSET_IF_PARAM(params, v_gr_hello_delay);
+ params->v_gr_hello_delay = OSPF_HELLO_DELAY_DEFAULT;
+
+ for (rn = route_top(IF_OIFS(ifp)); rn; rn = route_next(rn)) {
+ struct ospf_interface *oi;
+
+ oi = rn->info;
+ if (!oi)
+ continue;
+
+ oi->gr.hello_delay.elapsed_seconds = 0;
+ EVENT_OFF(oi->gr.hello_delay.t_grace_send);
+ }
+
+ return CMD_SUCCESS;
+}
+
DEFUN (ip_ospf_transmit_delay,
ip_ospf_transmit_delay_addr_cmd,
"ip ospf transmit-delay (1-65535) [A.B.C.D]",
vty_out(vty, "\n");
}
+ /* Hello Graceful-Restart Delay print. */
+ if (OSPF_IF_PARAM_CONFIGURED(params,
+ v_gr_hello_delay) &&
+ params->v_gr_hello_delay !=
+ OSPF_HELLO_DELAY_DEFAULT)
+ vty_out(vty,
+ " ip ospf graceful-restart hello-delay %u\n",
+ params->v_gr_hello_delay);
+
/* Router Priority print. */
if (OSPF_IF_PARAM_CONFIGURED(params, priority)
&& params->priority
int ospf_zebra_gr_enable(struct ospf *ospf, uint32_t stale_time)
{
+ if (IS_DEBUG_OSPF_GR)
+ zlog_debug("Zebra enable GR [stale time %u]", stale_time);
+
return ospf_zebra_gr_update(ospf, ZEBRA_CLIENT_GR_CAPABILITIES,
stale_time);
}
int ospf_zebra_gr_disable(struct ospf *ospf)
{
+ if (IS_DEBUG_OSPF_GR)
+ zlog_debug("Zebra disable GR");
+
return ospf_zebra_gr_update(ospf, ZEBRA_CLIENT_GR_DISABLE, 0);
}
static void ospf_zebra_connected(struct zclient *zclient)
{
+ struct ospf *ospf;
+ struct listnode *node;
+
/* Send the client registration */
bfd_client_sendmsg(zclient, ZEBRA_BFD_CLIENT_REGISTER, VRF_DEFAULT);
zclient_send_reg_requests(zclient, VRF_DEFAULT);
+
+ /* Activate graceful restart if configured. */
+ for (ALL_LIST_ELEMENTS_RO(om->ospf, node, ospf)) {
+ if (!ospf->gr_info.restart_support)
+ continue;
+ (void)ospf_zebra_gr_enable(ospf, ospf->gr_info.grace_period);
+ }
}
/*
if (!ospf->gr_info.prepare_in_progress)
ospf_flush_self_originated_lsas_now(ospf);
+ XFREE(MTYPE_TMP, ospf->gr_info.exit_reason);
/* Unregister redistribution */
for (i = 0; i < ZEBRA_ROUTE_MAX; i++) {
&& if_is_operative(co->ifp))
ospf_if_up(oi);
+ /*
+ * RFC 3623 - Section 5 ("Unplanned Outages"):
+ * "The grace-LSAs are encapsulated in Link State Update Packets
+ * and sent out to all interfaces, even though the restarted
+ * router has no adjacencies and no knowledge of previous
+ * adjacencies".
+ */
+ if (oi->ospf->gr_info.restart_in_progress &&
+ oi->ospf->gr_info.reason == OSPF_GR_UNKNOWN_RESTART)
+ ospf_gr_unplanned_start_interface(oi);
+
return oi;
}
bool prepare_in_progress;
bool finishing_restart;
uint32_t grace_period;
+ int reason;
+ char *exit_reason;
struct event *t_grace_period;
};
}
if (ifp->ifindex < 0) {
- zlog_warn("%s: ifindex=%d < 1 on interface %s", __func__,
+ zlog_warn("%s: ifindex=%d < 0 on interface %s", __func__,
ifp->ifindex, ifp->name);
return -2;
- } else if ((ifp->ifindex == 0) &&
+ } else if ((ifp->ifindex == PIM_OIF_PIM_REGISTER_VIF) &&
((strncmp(ifp->name, "pimreg", 6)) &&
(strncmp(ifp->name, "pim6reg", 7)))) {
- zlog_warn("%s: ifindex=%d == 0 on interface %s", __func__,
+ zlog_warn("%s: ifindex=%d on interface %s", __func__,
ifp->ifindex, ifp->name);
return -2;
}
/*
* XPath: /frr-ripd:ripd/instance/allow-ecmp
*/
-DEFPY_YANG (rip_allow_ecmp,
+DEFUN_YANG (rip_allow_ecmp,
rip_allow_ecmp_cmd,
- "[no] allow-ecmp",
+ "allow-ecmp [" CMD_RANGE_STR(1, MULTIPATH_NUM) "]",
+ "Allow Equal Cost MultiPath\n"
+ "Number of paths\n")
+{
+ int idx_number = 1;
+ char mpaths[3] = {};
+ uint32_t paths = MULTIPATH_NUM;
+
+ if (argv[idx_number])
+ paths = strtol(argv[idx_number]->arg, NULL, 10);
+ snprintf(mpaths, sizeof(mpaths), "%u", paths);
+
+ nb_cli_enqueue_change(vty, "./allow-ecmp", NB_OP_MODIFY, mpaths);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFUN_YANG (no_rip_allow_ecmp,
+ no_rip_allow_ecmp_cmd,
+ "no allow-ecmp [" CMD_RANGE_STR(1, MULTIPATH_NUM) "]",
NO_STR
- "Allow Equal Cost MultiPath\n")
+ "Allow Equal Cost MultiPath\n"
+ "Number of paths\n")
{
- nb_cli_enqueue_change(vty, "./allow-ecmp", NB_OP_MODIFY,
- no ? "false" : "true");
+ nb_cli_enqueue_change(vty, "./allow-ecmp", NB_OP_MODIFY, 0);
return nb_cli_apply_changes(vty, NULL);
}
void cli_show_rip_allow_ecmp(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults)
{
- if (!yang_dnode_get_bool(dnode, NULL))
- vty_out(vty, " no");
+ uint8_t paths;
+
+ paths = yang_dnode_get_uint8(dnode, NULL);
- vty_out(vty, " allow-ecmp\n");
+ if (!paths)
+ vty_out(vty, " no allow-ecmp\n");
+ else
+ vty_out(vty, " allow-ecmp %d\n", paths);
}
/*
install_element(RIP_NODE, &rip_no_distribute_list_cmd);
install_element(RIP_NODE, &rip_allow_ecmp_cmd);
+ install_element(RIP_NODE, &no_rip_allow_ecmp_cmd);
install_element(RIP_NODE, &rip_default_information_originate_cmd);
install_element(RIP_NODE, &rip_default_metric_cmd);
install_element(RIP_NODE, &no_rip_default_metric_cmd);
return NB_OK;
rip = nb_running_get_entry(args->dnode, NULL, true);
- rip->ecmp = yang_dnode_get_bool(args->dnode, NULL);
- if (!rip->ecmp)
+ rip->ecmp = yang_dnode_get_uint8(args->dnode, NULL);
+ if (!rip->ecmp) {
rip_ecmp_disable(rip);
+ return NB_OK;
+ }
+
+ rip_ecmp_change(rip);
return NB_OK;
}
/* All information about zebra. */
struct zclient *zclient = NULL;
+uint32_t zebra_ecmp_count = MULTIPATH_NUM;
/* Send ECMP routes to zebra. */
static void rip_zebra_ipv4_send(struct rip *rip, struct route_node *rp,
struct zapi_nexthop *api_nh;
struct listnode *listnode = NULL;
struct rip_info *rinfo = NULL;
- int count = 0;
+ uint32_t count = 0;
memset(&api, 0, sizeof(api));
api.vrf_id = rip->vrf->vrf_id;
SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
for (ALL_LIST_ELEMENTS_RO(list, listnode, rinfo)) {
- if (count >= MULTIPATH_NUM)
+ if (count >= zebra_ecmp_count)
break;
api_nh = &api.nexthops[count];
api_nh->vrf_id = rip->vrf->vrf_id;
[ZEBRA_REDISTRIBUTE_ROUTE_DEL] = rip_zebra_read_route,
};
+static void rip_zebra_capabilities(struct zclient_capabilities *cap)
+{
+ zebra_ecmp_count = MIN(cap->ecmp, zebra_ecmp_count);
+}
+
void rip_zclient_init(struct event_loop *master)
{
/* Set default value to the zebra client structure. */
array_size(rip_handlers));
zclient_init(zclient, ZEBRA_ROUTE_RIP, 0, &ripd_privs);
zclient->zebra_connected = rip_zebra_connected;
+ zclient->zebra_capabilities = rip_zebra_capabilities;
}
void rip_zclient_stop(void)
{
struct route_node *rp = rinfo_new->rp;
struct rip_info *rinfo = NULL;
+ struct rip_info *rinfo_exist = NULL;
struct list *list = NULL;
+ struct listnode *node = NULL;
+ struct listnode *nnode = NULL;
if (rp->info == NULL)
rp->info = list_new();
if (listcount(list) && !rip->ecmp)
return NULL;
+ /* Add or replace an existing ECMP path with lower neighbor IP */
+ if (listcount(list) && listcount(list) >= rip->ecmp) {
+ struct rip_info *from_highest = NULL;
+
+ /* Find the rip_info struct that has the highest nexthop IP */
+ for (ALL_LIST_ELEMENTS(list, node, nnode, rinfo_exist))
+ if (!from_highest ||
+ (from_highest &&
+ IPV4_ADDR_CMP(&rinfo_exist->from,
+ &from_highest->from) > 0)) {
+ from_highest = rinfo_exist;
+ }
+
+ /* If we have a route in ECMP group, delete the old
+ * one that has a higher next-hop address. Lower IP is
+ * preferred.
+ */
+ if (rip->ecmp > 1 && from_highest &&
+ IPV4_ADDR_CMP(&from_highest->from, &rinfo_new->from) > 0) {
+ rip_ecmp_delete(rip, from_highest);
+ goto add_or_replace;
+ }
+
+ return NULL;
+ }
+
+add_or_replace:
rinfo = rip_info_new();
memcpy(rinfo, rinfo_new, sizeof(struct rip_info));
listnode_add(list, rinfo);
return RB_FIND(rip_instance_head, &rip_instances, &rip);
}
+/* Update ECMP routes to zebra when `allow-ecmp` changed. */
+void rip_ecmp_change(struct rip *rip)
+{
+ struct route_node *rp;
+ struct rip_info *rinfo;
+ struct list *list;
+ struct listnode *node, *nextnode;
+
+ for (rp = route_top(rip->table); rp; rp = route_next(rp)) {
+ list = rp->info;
+ if (list && listcount(list) > 1) {
+ while (listcount(list) > rip->ecmp) {
+ struct rip_info *from_highest = NULL;
+
+ for (ALL_LIST_ELEMENTS(list, node, nextnode,
+ rinfo)) {
+ if (!from_highest ||
+ (from_highest &&
+ IPV4_ADDR_CMP(
+ &rinfo->from,
+ &from_highest->from) > 0))
+ from_highest = rinfo;
+ }
+
+ rip_ecmp_delete(rip, from_highest);
+ }
+ }
+ }
+}
+
/* Create new RIP instance and set it to global variable. */
struct rip *rip_create(const char *vrf_name, struct vrf *vrf, int socket)
{
rip->vrf_name = XSTRDUP(MTYPE_RIP_VRF_NAME, vrf_name);
/* Set initial value. */
- rip->ecmp = yang_get_default_bool("%s/allow-ecmp", RIP_INSTANCE);
+ rip->ecmp = yang_get_default_uint8("%s/allow-ecmp", RIP_INSTANCE);
rip->default_metric =
yang_get_default_uint8("%s/default-metric", RIP_INSTANCE);
rip->distance =
struct route_table *distance_table;
/* RIP ECMP flag */
- bool ecmp;
+ uint8_t ecmp;
/* Are we in passive-interface default mode? */
bool passive_default;
DECLARE_HOOK(rip_ifaddr_add, (struct connected * ifc), (ifc));
DECLARE_HOOK(rip_ifaddr_del, (struct connected * ifc), (ifc));
+extern void rip_ecmp_change(struct rip *rip);
+
#endif /* _ZEBRA_RIP_H */
Timer intervals configured, Hello 1s, Dead 5s, Wait 5s, Retransmit 5
Hello due in XX.XXXs
Neighbor Count is 0, Adjacent neighbor count is 0
+ Graceful Restart hello delay: 10s
r1-eth3 is up
ifindex X, MTU 1500 bytes, BW XX Mbit <UP,BROADCAST,RUNNING,MULTICAST>
Internet Address 192.168.3.1/26, Broadcast 192.168.3.63, Area 0.0.0.0
Timer intervals configured, Hello 1s, Dead 5s, Wait 5s, Retransmit 5
Hello due in XX.XXXs
Neighbor Count is 0, Adjacent neighbor count is 0
+ Graceful Restart hello delay: 10s
if fatal_error != "":
pytest.skip(fatal_error)
+ if os.environ.get("TOPOTESTS_CHECK_STDERR") is None:
+ print(
+ "SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n"
+ )
+ pytest.skip("Skipping test for Stderr output")
+
print("\n\n** Check for error messages in daemons")
print("******************************************\n")
logging.critical('No "/tmp/topotests" directory to save')
sys.exit(1)
subprocess.run(["mv", "/tmp/topotests", args.results])
+ if "SUDO_USER" in os.environ:
+ subprocess.run(["chown", "-R", os.environ["SUDO_USER"], args.results])
# # Old location for results
# if os.path.exists("/tmp/topotests.xml", args.results):
# subprocess.run(["mv", "/tmp/topotests.xml", args.results])
# For all registered routers, load the zebra configuration file
for rname, router in router_list.items():
router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
- router.load_config(TopoRouter.RD_OSPF)
- router.load_config(TopoRouter.RD_BGP)
+ router.load_config(TopoRouter.RD_OSPF, "")
+ router.load_config(TopoRouter.RD_BGP, "")
# After copying the configurations, this function loads configured daemons.
tgen.start_router()
# For all registered routers, load the zebra configuration file
for rname, router in router_list.items():
router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
- router.load_config(TopoRouter.RD_OSPF)
- router.load_config(TopoRouter.RD_BGP)
+ router.load_config(TopoRouter.RD_OSPF, "")
+ router.load_config(TopoRouter.RD_BGP, "")
# After copying the configurations, this function loads configured daemons.
tgen.start_router()
# For all registered routers, load the zebra configuration file
for rname, router in router_list.items():
router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
- router.load_config(TopoRouter.RD_OSPF)
- router.load_config(TopoRouter.RD_BGP)
+ router.load_config(TopoRouter.RD_OSPF, "")
+ router.load_config(TopoRouter.RD_BGP, "")
# After copying the configurations, this function loads configured daemons.
tgen.start_router()
# For all registered routers, load the zebra configuration file
for rname, router in router_list.items():
router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
- router.load_config(TopoRouter.RD_OSPF)
- router.load_config(TopoRouter.RD_BGP)
+ router.load_config(TopoRouter.RD_OSPF, "")
+ router.load_config(TopoRouter.RD_BGP, "")
# After copying the configurations, this function loads configured daemons.
tgen.start_router()
!
router bgp 65001
- timers 3 10
+ timers bgp 3 10
no bgp ebgp-requires-policy
neighbor 192.168.1.2 remote-as external
neighbor 192.168.1.2 timers connect 5
router bgp 65002
- timers 3 10
+ timers bgp 3 10
no bgp ebgp-requires-policy
neighbor 192.168.1.1 remote-as external
neighbor 192.168.1.1 timers connect 5
router bgp 65003
- timers 3 10
+ timers bgp 3 10
no bgp ebgp-requires-policy
neighbor 192.168.2.2 remote-as external
neighbor 192.168.2.2 timers connect 5
router bgp 65004
- timers 3 10
+ timers bgp 3 10
no bgp ebgp-requires-policy
neighbor 192.168.2.2 remote-as external
neighbor 192.168.2.2 timers connect 5
# tgen.mininet_cli()
+def get_shut_msg_count(tgen):
+ shuts = {}
+ for rtrNum in [2, 4]:
+ shutmsg = tgen.net["r{}".format(rtrNum)].cmd_nostatus(
+ 'grep -c "NOTIFICATION.*Cease/Administrative Shutdown" bgpd.log', warn=False
+ )
+ try:
+ shuts[rtrNum] = int(shutmsg.strip())
+ except ValueError:
+ shuts[rtrNum] = 0
+ return shuts
+
+
def test_bgp_shutdown():
"Test BGP instance shutdown"
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
+ shuts_before = get_shut_msg_count(tgen)
+
tgen.net["r1"].cmd(
'vtysh -c "conf t" -c "router bgp 65000" -c "bgp shutdown message ABCDabcd"'
)
)
assert res is None, assertmsg
+ shuts_after = get_shut_msg_count(tgen)
+
+ for k in shuts_before:
+ assert shuts_before[k] + 1 == shuts_after[k]
+
def test_bgp_shutdown_message():
"Test BGP Peer Shutdown Message"
logger.info("Checking BGP shutdown received on router r{}".format(rtrNum))
shut_message = tgen.net["r{}".format(rtrNum)].cmd(
- 'tail bgpd.log | grep "NOTIFICATION.*Cease/Administrative Shutdown"'
+ 'grep -e "NOTIFICATION.*Cease/Administrative Shutdown.*ABCDabcd" bgpd.log'
)
assertmsg = "BGP shutdown message not received on router R{}".format(rtrNum)
assert shut_message != "", assertmsg
- assertmsg = "Incorrect BGP shutdown message received on router R{}".format(
- rtrNum
- )
- assert "ABCDabcd" in shut_message, assertmsg
-
- # tgen.mininet_cli()
-
def test_bgp_no_shutdown():
"Test BGP instance no shutdown"
--- /dev/null
+{
+ "address_types": ["ipv4"],
+ "ipv4base":"192.120.1.0",
+ "ipv4mask":24,
+ "link_ip_start":{"ipv4":"192.120.1.0", "v4mask":30, "ipv6":"fd00::", "v6mask":64},
+ "routers":{
+ "r1":{
+ "links":{
+ "lo": {"ipv4": "auto", "type": "loopback", "add_static_route":"yes"},
+ "r2":{"ipv4":"auto"},
+ "r3":{"ipv4":"auto"}
+ },
+ "bgp":{
+ "local_as":"100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback", "add_static_route":"yes"},
+ "r1":{"ipv4":"auto", "ipv6":"auto"},
+ "r3":{"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as":"100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3":{
+ "links":{
+ "lo": {"ipv4": "auto", "type": "loopback", "add_static_route":"yes"},
+ "r1":{"ipv4":"auto"},
+ "r2":{"ipv4":"auto"},
+ "r4":{"ipv4":"auto"}
+ },
+ "bgp":{
+ "local_as":"100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4":{
+ "links":{
+ "lo": {"ipv4": "auto", "type": "loopback", "add_static_route":"yes"},
+ "r3":{"ipv4":"auto"}
+ },
+ "bgp":{
+ "local_as":"200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
--- /dev/null
+#!/usr/bin/python
+# SPDX-License-Identifier: ISC
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+
+"""
+Following tests are covered to test prefix-list functionality:
+
+Test steps
+- Create topology (setup module)
+ Creating 4 routers topology, r1, r2, r3 are in IBGP and
+ r3, r4 are in EBGP
+- Bring up topology
+- Verify for bgp to converge
+
+IP prefix-list tests
+- Test modify prefix-list action
+"""
+
+import sys
+import time
+import os
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ verify_rib,
+ create_static_routes,
+ create_prefix_lists,
+ step,
+ create_route_maps,
+ check_router_status,
+)
+from lib.topolog import logger
+from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp
+
+from lib.topojson import build_config_from_json
+
+pytestmark = [pytest.mark.bgpd]
+
+
+# Global variables
+bgp_convergence = False
+
+IPV4_PF3 = "192.168.0.0/18"
+IPV4_PF4 = "192.150.10.0/24"
+IPV4_PF5 = "192.168.10.1/32"
+IPV4_PF6 = "192.168.10.10/32"
+IPV4_PF7 = "192.168.10.0/24"
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/prefix_lists.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start daemons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Checking BGP convergence
+ global BGP_CONVERGENCE
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether BGP is converged
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format(
+ BGP_CONVERGENCE
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Tests starting
+#
+#####################################################
+
+
+def test_bug_prefix_lists_deny_to_permit_p1(request):
+ """
+ Verify modification of prefix-list action
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ # base config
+ step("Configure IPV4 and IPv6 IBGP and EBGP session as mentioned in setup")
+ step("Configure static routes on R2 with Null 0 nexthop")
+ input_dict_1 = {
+ "r2": {
+ "static_routes": [
+ {"network": IPV4_PF7, "no_of_ip": 1, "next_hop": "Null0"},
+ {"network": IPV4_PF6, "no_of_ip": 1, "next_hop": "Null0"},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Advertise static route in BGP using redistribute static command")
+ input_dict_4 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "All the static route advertised in R4 as BGP "
+ "routes verify using 'show ip bgp'and 'show bgp'"
+ )
+ dut = "r4"
+ protocol = "bgp"
+
+ input_dict_route = {
+ "r4": {"static_routes": [{"network": IPV4_PF7}, {"network": IPV4_PF6}]}
+ }
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_route)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure IPv4 and IPv6 prefix-list")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1_ipv4": [
+ {"seqid": "5", "network": IPV4_PF7, "action": "deny"}
+ ],
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {"seqid": "10", "network": IPV4_PF7, "action": "permit"}
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "configure route-map seq to permit IPV4 prefix list and seq"
+ "2 to permit IPV6 prefix list and apply it to out direction on R3"
+ )
+
+ input_dict_3 = {
+ "r3": {
+ "route_maps": {
+ "rmap_match_pf_1": [
+ {
+ "action": "permit",
+ "match": {"ipv4": {"prefix_lists": "pf_list_1"}},
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_7 = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_7)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify on R4 should not have any IPv4 and IPv6 BGP routes using "
+ "show ip bgp show bgp"
+ )
+
+ dut = "r4"
+ protocol = "bgp"
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict_route, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n" "Error : Routes are still present \n {}".format(
+ tc_name, result
+ )
+
+ step("Modify IPv4/IPv6 prefix-list sequence 5 to another value on R3")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {"seqid": "5", "network": IPV4_PF4, "action": "deny"}
+ ],
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify /24 and /120 routes present on"
+ "R4 BGP table using show ip bgp show bgp"
+ )
+ input_dict = {"r4": {"static_routes": [{"network": IPV4_PF7}]}}
+
+ dut = "r4"
+ protocol = "bgp"
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Change prefix-list to same as original on R3")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {"seqid": "5", "network": IPV4_PF7, "action": "deny"}
+ ],
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify /24 and /120 routes removed on"
+ "R4 BGP table using show ip bgp show bgp"
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n" "Error : Routes are still present \n {}".format(
+ tc_name, result
+ )
+
+ step("Modify IPv4/IPv6 prefix-list sequence 5 to another value")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {"seqid": "5", "network": IPV4_PF4, "action": "deny"}
+ ],
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Clear BGP on R3 and verify the routes")
+ clear_bgp(tgen, "ipv4", "r3")
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("On R3 add prefix-list permit any for IPv4 and IPv6 seq 15")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {"seqid": "15", "network": "any", "action": "permit"}
+ ],
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify /24 and /32 /120 and /128 routes are present on R4")
+ result = verify_rib(tgen, "ipv4", dut, input_dict_route)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
# the old flag after each iteration so we only test the flags we expect.
_change_remove_type(rmv_type, "del")
- return True
-
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
--- /dev/null
+!
+interface lo
+ ip address 172.16.255.1/32
+!
+interface r1-eth0
+ ip address 192.168.1.1/24
+!
+interface r1-eth1
+ ip address 192.168.2.1/24
+!
+ip route 10.10.10.10/32 192.168.2.2
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.2 remote-as external
+ neighbor 192.168.1.2 timers 1 3
+ neighbor 192.168.1.2 timers connect 1
+ neighbor 192.168.2.2 remote-as external
+ neighbor 192.168.2.2 timers 1 3
+ neighbor 192.168.2.2 timers connect 1
+ address-family ipv4
+ redistribute connected
+ redistribute static
+ neighbor 192.168.1.2 route-map r2 out
+ neighbor 192.168.2.2 route-map r3 out
+ exit-address-family
+!
+route-map r2 permit 10
+ match source-protocol static
+route-map r3 permit 10
+ match source-protocol connected
+!
--- /dev/null
+!
+interface r2-eth0
+ ip address 192.168.1.2/24
+!
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
+ neighbor 192.168.1.1 timers 1 3
+ neighbor 192.168.1.1 timers connect 1
+!
--- /dev/null
+!
+interface r3-eth0
+ ip address 192.168.2.2/24
+!
+router bgp 65003
+ no bgp ebgp-requires-policy
+ neighbor 192.168.2.1 remote-as external
+ neighbor 192.168.2.1 timers 1 3
+ neighbor 192.168.2.1 timers connect 1
+!
--- /dev/null
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# Copyright (c) 2023 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+
+"""
+Test if r1 can announce only static routes to r2, and only connected
+routes to r3 using `match source-protocol` with route-maps.
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for i, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_route_map_match_source_protocol():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ def _bgp_check_advertised_routes_r2():
+ output = json.loads(
+ tgen.gears["r1"].vtysh_cmd(
+ "show bgp ipv4 unicast neighbors 192.168.1.2 advertised-routes json"
+ )
+ )
+ expected = {
+ "advertisedRoutes": {
+ "10.10.10.10/32": {
+ "valid": True,
+ }
+ },
+ "totalPrefixCounter": 1,
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_check_advertised_routes_r2)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Failed to filter routes by source-protocol for r2"
+
+ def _bgp_check_advertised_routes_r3():
+ output = json.loads(
+ tgen.gears["r1"].vtysh_cmd(
+ "show bgp ipv4 unicast neighbors 192.168.2.2 advertised-routes json"
+ )
+ )
+ expected = {
+ "advertisedRoutes": {
+ "192.168.1.0/24": {
+ "valid": True,
+ },
+ "192.168.2.0/24": {
+ "valid": True,
+ },
+ "172.16.255.1/32": {
+ "valid": True,
+ },
+ },
+ "totalPrefixCounter": 3,
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_check_advertised_routes_r3)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Failed to filter routes by source-protocol for r3"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
--- /dev/null
+{
+ "vrfName": "vrf1",
+ "localAS": 65500,
+ "routes":
+ {
+ "10.200.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "10.200.0.0",
+ "prefixLen": 24,
+ "network": "10.200.0.0\/24",
+ "nexthops": [
+ {
+ "ip": "192.168.0.2",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+
+ ],
+ "172.31.0.11/32": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172.31.0.11",
+ "prefixLen":32,
+ "network":"172.31.0.11/32",
+ "peerId":"192.0.2.100",
+ "nexthops":[
+ {
+ "ip":"192.0.2.11",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "172.31.0.12/32": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172.31.0.12",
+ "prefixLen":32,
+ "network":"172.31.0.12/32",
+ "peerId":"192.0.2.100",
+ "nexthops":[
+ {
+ "ip":"192.0.2.12",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "172.31.0.13/32": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172.31.0.13",
+ "prefixLen":32,
+ "network":"172.31.0.13/32",
+ "peerId":"192.168.255.13",
+ "nexthops":[
+ {
+ "ip":"192.168.255.13",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "172.31.0.14/32": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172.31.0.14",
+ "prefixLen":32,
+ "network":"172.31.0.14/32",
+ "peerId":"(unspec)",
+ "nexthops":[
+ {
+ "ip":"192.0.2.14",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "172.31.0.15/32": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172.31.0.15",
+ "prefixLen":32,
+ "network":"172.31.0.15/32",
+ "peerId":"(unspec)",
+ "nexthops":[
+ {
+ "ip":"192.0.2.12",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "172.31.0.20/32": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172.31.0.20",
+ "prefixLen":32,
+ "network":"172.31.0.20/32",
+ "peerId":"192.0.2.100",
+ "nexthops":[
+ {
+ "ip":"192.0.2.11",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "172.31.0.111/32": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172.31.0.111",
+ "prefixLen":32,
+ "network":"172.31.0.111/32",
+ "peerId":"192.0.2.100",
+ "nexthops":[
+ {
+ "ip":"192.0.2.11",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ }
+}
--- /dev/null
+router bgp 65500
+ bgp router-id 192.168.0.1
+ no bgp ebgp-requires-policy
+ neighbor 192.168.0.2 remote-as 65501
+ address-family ipv4 unicast
+ no neighbor 192.168.0.2 activate
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 192.168.0.2 activate
+ neighbor 192.168.0.2 soft-reconfiguration inbound
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 192.168.0.1
+ neighbor 192.0.2.100 remote-as 65500
+ neighbor 192.168.255.13 remote-as 65500
+ address-family ipv4 unicast
+ redistribute connected
+ redistribute static
+ label vpn export allocation-mode per-nexthop
+ label vpn export auto
+ rd vpn export 444:1
+ rt vpn both 52:100
+ export vpn
+ import vpn
+ exit-address-family
+!
+interface r1-eth0
+ mpls bgp forwarding
+!
--- /dev/null
+{
+ "10.200.0.0/24": [
+ {
+ "prefix": "10.200.0.0/24",
+ "prefixLen": 24,
+ "protocol": "bgp",
+ "vrfName": "vrf1",
+ "selected": true,
+ "destSelected": true,
+ "distance": 20,
+ "metric": 0,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "ip": "10.125.0.2",
+ "afi": "ipv4",
+ "interfaceName": "r1-eth0",
+ "vrf": "default",
+ "active": true,
+ "labels":[
+ 102
+ ]
+ }
+ ]
+ }
+ ],
+ "10.201.0.0/24": [
+ {
+ "prefix": "10.201.0.0/24",
+ "prefixLen": 24,
+ "protocol": "connected",
+ "vrfName": "vrf1",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "nexthops":[
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "r1-eth1",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+log stdout
+debug zebra nht
+!debug zebra kernel msgdump recv
+!debug zebra dplane detailed
+!debug zebra packet recv
+interface r1-eth1 vrf vrf1
+ ip address 192.0.2.1/24
+!
+interface r1-eth2 vrf vrf1
+ ip address 192.168.255.1/24
+!
+interface r1-eth0
+ ip address 192.168.0.1/24
+!
+vrf vrf1
+ ip route 172.31.0.14/32 192.0.2.14
+ ip route 172.31.0.15/32 192.0.2.12
+exit-vrf
--- /dev/null
+router bgp 65500
+ bgp router-id 192.0.2.11
+ no bgp network import-check
+ neighbor 192.0.2.100 remote-as 65500
+ address-family ipv4 unicast
+ network 172.31.0.11/32
+ network 172.31.0.111/32
+ network 172.31.0.20/32
+ exit-address-family
+!
+
--- /dev/null
+log stdout
+interface r11-eth0
+ ip address 192.0.2.11/24
+!
--- /dev/null
+router bgp 65500
+ bgp router-id 192.0.2.12
+ no bgp network import-check
+ neighbor 192.0.2.100 remote-as 65500
+ address-family ipv4 unicast
+ network 172.31.0.12/32
+ exit-address-family
+!
+
--- /dev/null
+log stdout
+interface r12-eth0
+ ip address 192.0.2.12/24
+!
--- /dev/null
+router bgp 65500
+ bgp router-id 192.168.255.13
+ no bgp network import-check
+ address-family ipv4 unicast
+ neighbor 192.168.255.1 remote-as 65500
+ network 172.31.0.13/32
+ exit-address-family
+!
+
--- /dev/null
+log stdout
+interface r13-eth0
+ ip address 192.168.255.13/24
+!
--- /dev/null
+{
+ "vrfName": "vrf1",
+ "localAS": 65501,
+ "routes":
+ {
+ "10.201.0.0/24": [
+ {
+ "prefix": "10.201.0.0",
+ "prefixLen": 24,
+ "network": "10.201.0.0\/24",
+ "nhVrfName": "default",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.200.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "10.200.0.0",
+ "prefixLen": 24,
+ "network": "10.200.0.0\/24",
+ "nexthops": [
+ {
+ "ip": "0.0.0.0",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+}
--- /dev/null
+{
+ "vrfName": "default",
+ "localAS": 65501,
+ "routes":
+ {
+ "routeDistinguishers":
+ {
+ "444:1":
+ {
+ "172.31.0.11/32": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172.31.0.11",
+ "prefixLen": 32,
+ "network": "172.31.0.11\/32",
+ "peerId": "192.168.0.1",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172.31.0.12/32": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172.31.0.12",
+ "prefixLen": 32,
+ "network": "172.31.0.12\/32",
+ "peerId": "192.168.0.1",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172.31.0.13/32": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172.31.0.13",
+ "prefixLen": 32,
+ "network": "172.31.0.13\/32",
+ "peerId": "192.168.0.1",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172.31.0.14/32": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172.31.0.14",
+ "prefixLen": 32,
+ "network": "172.31.0.14\/32",
+ "peerId": "192.168.0.1",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172.31.0.15/32": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172.31.0.15",
+ "prefixLen": 32,
+ "network": "172.31.0.15\/32",
+ "peerId": "192.168.0.1",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172.31.0.20/32": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172.31.0.20",
+ "prefixLen": 32,
+ "network": "172.31.0.20\/32",
+ "peerId": "192.168.0.1",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172.31.0.111/32": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172.31.0.111",
+ "prefixLen": 32,
+ "network": "172.31.0.111\/32",
+ "peerId": "192.168.0.1",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "192.0.2.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "192.0.2.0",
+ "prefixLen": 24,
+ "network": "192.0.2.0\/24",
+ "peerId": "192.168.0.1",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "192.168.255.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "192.168.255.0",
+ "prefixLen": 24,
+ "network": "192.168.255.0\/24",
+ "peerId": "192.168.0.1",
+ "nexthops": [
+ {
+ "ip": "192.168.0.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "444:2":
+ {
+ "10.200.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "10.200.0.0",
+ "prefixLen": 24,
+ "network": "10.200.0.0\/24",
+ "peerId": "(unspec)",
+ "nhVrfName": "vrf1",
+ "nexthops": [
+ {
+ "ip": "0.0.0.0",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
--- /dev/null
+router bgp 65501
+ bgp router-id 192.168.0.2
+ no bgp ebgp-requires-policy
+ neighbor 192.168.0.1 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 192.168.0.1 activate
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 192.168.0.1 activate
+ exit-address-family
+!
+router bgp 65501 vrf vrf1
+ bgp router-id 192.168.0.2
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 102
+ rd vpn export 444:2
+ rt vpn both 52:100
+ export vpn
+ import vpn
+ exit-address-family
+!
+interface r2-eth0
+ mpls bgp forwarding
+!
--- /dev/null
+log stdout
+interface r2-eth1 vrf vrf1
+ ip address 10.200.0.2/24
+!
+interface r2-eth0
+ ip address 192.168.0.2/24
+!
--- /dev/null
+router bgp 65500
+ bgp router-id 100.100.100.100
+ no bgp network import-check
+ neighbor 192.0.2.1 remote-as 65500
+ neighbor 192.0.2.11 remote-as 65500
+ neighbor 192.0.2.12 remote-as 65500
+ address-family ipv4 unicast
+ neighbor 192.0.2.1 route-reflector-client
+ neighbor 192.0.2.11 route-reflector-client
+ neighbor 192.0.2.12 route-reflector-client
+ exit-address-family
+!
+
--- /dev/null
+log stdout
+interface rr-eth0
+ ip address 192.0.2.100/24
+!
--- /dev/null
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+#
+# test_bgp_vpnv4_per_nexthop_label.py
+#
+# Copyright 2023 6WIND S.A.
+#
+
+"""
+ test_bgp_vpnv4_per_nexthop_label.py: Test the FRR BGP daemon using EBGP peering
+ Let us exchange VPNv4 updates between both devices
+ Updates from r1 will originate from the same RD, but will have separate
+ label values.
+
+ +----------+
+ | r11 |
+ |192.0.2.11+---+
+ | | | +----+--------+ +----------+
+ +----------+ | 192.0.2.1 |vrf | r1 |192.168.0.0/24| r2 |
+ +-------------------+ | 1+--------------+ |
+ +----------+ | |VRF1|AS65500 | | AS65501 |
+ | r12 | | +-------------+ | VPNV4| |VPNV4 |
+ |192.0.2.12+---+ |192.168.255.1+-+--+--------+ +----------+
+ | | |
+ +----------+ |
+ |
+ +----------+ |
+ | r13 | |
+ |192.168. +---------+
+ | 255.13 |
+ +----------+
+"""
+
+import os
+import sys
+import json
+from functools import partial
+import pytest
+import functools
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+
+pytestmark = [pytest.mark.bgpd]
+
+PREFIXES_R11 = ["172.31.0.11/32", "172.31.0.20/32", "172.31.0.111/32"]
+PREFIXES_R12 = ["172.31.0.12/32", "172.31.0.15/32"]
+PREFIXES_R13 = ["172.31.0.13/32"]
+PREFIXES_REDIST = ["172.31.0.14/32"]
+PREFIXES_CONNECTED = ["192.168.255.0/24", "192.0.2.0/24"]
+
+
+def build_topo(tgen):
+ "Build function"
+
+ # Create 2 routers.
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+ tgen.add_router("r11")
+ tgen.add_router("r12")
+ tgen.add_router("r13")
+ tgen.add_router("r14")
+ tgen.add_router("rr")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r11"])
+ switch.add_link(tgen.gears["r12"])
+ switch.add_link(tgen.gears["rr"])
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r13"])
+
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r14"])
+
+
+def _populate_iface():
+ tgen = get_topogen()
+ cmds_list = [
+ "ip link add vrf1 type vrf table 10",
+ "echo 100000 > /proc/sys/net/mpls/platform_labels",
+ "ip link set dev vrf1 up",
+ "ip link set dev {0}-eth1 master vrf1",
+ "echo 1 > /proc/sys/net/mpls/conf/{0}-eth0/input",
+ ]
+ cmds_list_plus = [
+ "ip link set dev {0}-eth2 master vrf1",
+ ]
+
+ for cmd in cmds_list:
+ input = cmd.format("r1")
+ logger.info("input: " + cmd)
+ output = tgen.net["r1"].cmd(cmd.format("r1"))
+ logger.info("output: " + output)
+
+ for cmd in cmds_list_plus:
+ input = cmd.format("r1")
+ logger.info("input: " + cmd)
+ output = tgen.net["r1"].cmd(cmd.format("r1"))
+ logger.info("output: " + output)
+
+ for cmd in cmds_list:
+ input = cmd.format("r2")
+ logger.info("input: " + cmd)
+ output = tgen.net["r2"].cmd(cmd.format("r2"))
+ logger.info("output: " + output)
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ _populate_iface()
+
+ for rname, router in router_list.items():
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ # Initialize all routers.
+ tgen.start_router()
+
+
+def teardown_module(_mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ tgen.stop_topology()
+
+
+def bgp_vpnv4_table_check(router, group, label_list=None, label_value_expected=None):
+ """
+ Dump and check that vpnv4 entries have the same MPLS label value
+ * 'router': the router to check
+ * 'group': the list of prefixes to check. a single label value for the group has to be found
+ * 'label_list': check that the label values are not present in the vpnv4 entries
+ * that list is updated with the present label value
+ * 'label_value_expected': check that the mpls label read is the same as that value
+ """
+
+ stored_label_inited = False
+ for prefix in group:
+ dump = router.vtysh_cmd("show bgp ipv4 vpn {} json".format(prefix), isjson=True)
+ assert dump, "{0}, {1}, route distinguisher not present".format(
+ router.name, prefix
+ )
+ for rd, pathes in dump.items():
+ for path in pathes["paths"]:
+ assert (
+ "remoteLabel" in path.keys()
+ ), "{0}, {1}, remoteLabel not present".format(router.name, prefix)
+ logger.info(
+ "{0}, {1}, label value is {2}".format(
+ router.name, prefix, path["remoteLabel"]
+ )
+ )
+ if stored_label_inited:
+ assert (
+ path["remoteLabel"] == stored_label
+ ), "{0}, {1}, label value not expected one (expected {2}, observed {3}".format(
+ router.name, prefix, stored_label, path["remoteLabel"]
+ )
+ else:
+ stored_label = path["remoteLabel"]
+ stored_label_inited = True
+ if label_list is not None:
+ assert (
+ stored_label not in label_list
+ ), "{0}, {1}, label already detected in a previous prefix".format(
+ router.name, prefix
+ )
+ label_list.add(stored_label)
+
+ if label_value_expected:
+ assert (
+ path["remoteLabel"] == label_value_expected
+ ), "{0}, {1}, label value not expected (expected {2}, observed {3}".format(
+ router.name, prefix, label_value_expected, path["remoteLabel"]
+ )
+
+
+def bgp_vpnv4_table_check_all(router, label_list=None, same=False):
+ """
+ Dump and check that vpnv4 entries are correctly configured with specific label values
+ * 'router': the router to check
+ * 'label_list': check that the label values are not present in the vpnv4 entries
+ * that list is updated with the present label value found.
+ * 'same': by default, set to False. Addresses groups are classified by addresses.
+ * if set to True, all entries of all groups should have a unique label value
+ """
+ if same:
+ bgp_vpnv4_table_check(
+ router,
+ group=PREFIXES_R11
+ + PREFIXES_R12
+ + PREFIXES_R13
+ + PREFIXES_REDIST
+ + PREFIXES_CONNECTED,
+ label_list=label_list,
+ )
+ else:
+ for group in (
+ PREFIXES_R11,
+ PREFIXES_R12,
+ PREFIXES_R13,
+ PREFIXES_REDIST,
+ PREFIXES_CONNECTED,
+ ):
+ bgp_vpnv4_table_check(router, group=group, label_list=label_list)
+
+
+def mpls_table_check(router, blacklist=None, label_list=None, whitelist=None):
+ """
+ Dump and check 'show mpls table json' output. An assert is triggered in case test fails
+ * 'router': the router to check
+ * 'blacklist': the list of nexthops (IP or interface) that should not be on output
+ * 'label_list': the list of labels that should be in inLabel value
+ * 'whitelist': the list of nexthops (IP or interface) that should be on output
+ """
+ nexthop_list = []
+ if blacklist:
+ nexthop_list.append(blacklist)
+ logger.info("Checking MPLS labels on {}".format(router.name))
+ dump = router.vtysh_cmd("show mpls table json", isjson=True)
+ for in_label, label_info in dump.items():
+ if label_list is not None:
+ label_list.add(in_label)
+ for nh in label_info["nexthops"]:
+ assert (
+ nh["installed"] == True and nh["type"] == "BGP"
+ ), "{}, show mpls table, nexthop is not installed".format(router.name)
+ if "nexthop" in nh.keys():
+ assert (
+ nh["nexthop"] not in nexthop_list
+ ), "{}, show mpls table, duplicated or blacklisted nexthop address".format(
+ router.name
+ )
+ nexthop_list.append(nh["nexthop"])
+ elif "interface" in nh.keys():
+ assert (
+ nh["interface"] not in nexthop_list
+ ), "{}, show mpls table, duplicated or blacklisted nexthop interface".format(
+ router.name
+ )
+ nexthop_list.append(nh["interface"])
+ else:
+ assert (
+ 0
+ ), "{}, show mpls table, entry with neither nexthop nor interface".format(
+ router.name
+ )
+
+ if whitelist:
+ for entry in whitelist:
+ assert (
+ entry in nexthop_list
+ ), "{}, show mpls table, entry with nexthop {} not present in nexthop list".format(
+ router.name, entry
+ )
+
+
+def check_show_bgp_vpn_prefix_not_found(router, ipversion, prefix, rd, label=None):
+ output = json.loads(
+ router.vtysh_cmd("show bgp {} vpn {} json".format(ipversion, prefix))
+ )
+ if label:
+ expected = {rd: {"prefix": prefix, "paths": [{"remoteLabel": label}]}}
+ else:
+ expected = {rd: {"prefix": prefix}}
+ ret = topotest.json_cmp(output, expected)
+ if ret is None:
+ return "not good"
+ return None
+
+
+def check_show_bgp_vpn_prefix_found(router, ipversion, prefix, rd):
+ output = json.loads(
+ router.vtysh_cmd("show bgp {} vpn {} json".format(ipversion, prefix))
+ )
+ expected = {rd: {"prefix": prefix}}
+ return topotest.json_cmp(output, expected)
+
+
+def check_show_mpls_table_entry_label_found(router, inlabel, interface):
+ output = json.loads(router.vtysh_cmd("show mpls table {} json".format(inlabel)))
+ expected = {
+ "inLabel": inlabel,
+ "installed": True,
+ "nexthops": [{"interface": interface}],
+ }
+ return topotest.json_cmp(output, expected)
+
+
+def check_show_mpls_table_entry_label_not_found(router, inlabel):
+ output = json.loads(router.vtysh_cmd("show mpls table {} json".format(inlabel)))
+ expected = {"inlabel": inlabel, "installed": True}
+ ret = topotest.json_cmp(output, expected)
+ if ret is None:
+ return "not good"
+ return None
+
+
+def mpls_entry_get_interface(router, label):
+ """
+ Assert that the label is in MPLS table
+ Assert an outgoing interface is programmed
+ return the outgoing interface
+ """
+ outgoing_interface = None
+
+ logger.info("Checking MPLS labels on {}".format(router.name))
+ dump = router.vtysh_cmd("show mpls table {} json".format(label), isjson=True)
+ assert dump, "{0}, label {1} not present".format(router.name, label)
+
+ for nh in dump["nexthops"]:
+ assert (
+ "interface" in nh.keys()
+ ), "{}, show mpls table, nexthop interface not present for MPLS entry {}".format(
+ router.name, label
+ )
+
+ outgoing_interface = nh["interface"]
+
+ return outgoing_interface
+
+
+def test_protocols_convergence():
+ """
+ Assert that all protocols have converged
+ statuses as they depend on it.
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Check BGP IPv4 routing tables on VRF1 of r1
+ logger.info("Checking BGP IPv4 routes for convergence on r1 VRF1")
+ router = tgen.gears["r1"]
+ json_file = "{}/{}/bgp_ipv4_routes_vrf1.json".format(CWD, router.name)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp vrf vrf1 ipv4 json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+ logger.info("Checking BGP VPNv4 routes for convergence on r2")
+ router = tgen.gears["r2"]
+ json_file = "{}/{}/bgp_vpnv4_routes.json".format(CWD, router.name)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp ipv4 vpn json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+ # Check BGP labels received on r2
+ logger.info("Checking BGP VPNv4 labels on r2")
+ label_list = set()
+ bgp_vpnv4_table_check_all(tgen.gears["r2"], label_list)
+
+ # Check MPLS labels received on r1
+ mpls_table_check(tgen.gears["r1"], label_list)
+
+
+def test_flapping_bgp_vrf_down():
+ """
+ Turn down a remote BGP session
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ logger.info("Unpeering BGP on r11")
+ tgen.gears["r11"].vtysh_cmd(
+ "configure terminal\nrouter bgp 65500\nno neighbor 192.0.2.100\n",
+ isjson=False,
+ )
+
+ def _bgp_prefix_not_found(router, vrf, ipversion, prefix):
+ output = json.loads(
+ router.vtysh_cmd(
+ "show bgp vrf {} {} {} json".format(vrf, ipversion, prefix)
+ )
+ )
+ expected = {"prefix": prefix}
+ ret = topotest.json_cmp(output, expected)
+ if ret is None:
+ return "not good"
+ return None
+
+ # Check prefix from r11 is not present
+ test_func = functools.partial(
+ _bgp_prefix_not_found, tgen.gears["r1"], "vrf1", "ipv4", "172.31.0.11/32"
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert (
+ success
+ ), "r1, prefix 172.31.0.11/32 from r11 did not disappear. r11 still connected to rr ?"
+
+ # Check BGP updated received on r2 are not from r11
+ logger.info("Checking BGP VPNv4 labels on r2")
+ for entry in PREFIXES_R11:
+ dump = tgen.gears["r2"].vtysh_cmd(
+ "show bgp ipv4 vpn {} json".format(entry), isjson=True
+ )
+ for rd in dump:
+ assert False, "r2, {}, route distinguisher {} present".format(entry, rd)
+
+ mpls_table_check(tgen.gears["r1"], blacklist=["192.0.2.11"])
+
+
+def test_flapping_bgp_vrf_up():
+ """
+ Turn up a remote BGP session
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ logger.info("Peering BGP on r11")
+ tgen.gears["r11"].vtysh_cmd(
+ "configure terminal\nrouter bgp 65500\nneighbor 192.0.2.100 remote-as 65500\n",
+ isjson=False,
+ )
+
+ # Check r2 gets prefix 172.31.0.11/128
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found,
+ tgen.gears["r2"],
+ "ipv4",
+ "172.31.0.11/32",
+ "444:1",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert (
+ success
+ ), "r2, prefix 172.31.0.11/32 from r11 not present. r11 still disconnected from rr ?"
+ bgp_vpnv4_table_check_all(tgen.gears["r2"])
+
+
+def test_recursive_route():
+ """
+ Test static recursive route redistributed over BGP
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Enabling recursive static route")
+ tgen.gears["r1"].vtysh_cmd(
+ "configure terminal\nvrf vrf1\nip route 172.31.0.30/32 172.31.0.20\n",
+ isjson=False,
+ )
+ logger.info("Checking BGP VPNv4 labels on r2")
+
+ # Check r2 received vpnv4 update with 172.31.0.30
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found,
+ tgen.gears["r2"],
+ "ipv4",
+ "172.31.0.30/32",
+ "444:1",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r2, vpnv4 update 172.31.0.30 not found"
+
+ bgp_vpnv4_table_check(tgen.gears["r2"], group=PREFIXES_R11 + ["172.31.0.30/32"])
+
+ # diagnostic
+ logger.info("Dumping label nexthop table")
+ tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
+ logger.info("Dumping nexthop table")
+ tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 nexthop detail", isjson=False)
+
+ logger.info("Disabling recursive static route")
+ tgen.gears["r1"].vtysh_cmd(
+ "configure terminal\nvrf vrf1\nno ip route 172.31.0.30/32 172.31.0.20\n",
+ isjson=False,
+ )
+ logger.info("Checking BGP VPNv4 labels on r2")
+
+ # Check r2 removed 172.31.0.30 vpnv4 update
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_not_found,
+ tgen.gears["r2"],
+ "ipv4",
+ "172.31.0.30/32",
+ "444:1",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r2, vpnv4 update 172.31.0.30 still present"
+
+
+def test_prefix_changes_interface():
+ """
+ Test BGP update for a given prefix learnt on different interface
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Enabling a 172.31.0.50/32 prefix for r11")
+ tgen.gears["r11"].vtysh_cmd(
+ "configure terminal\nrouter bgp\naddress-family ipv4 unicast\nnetwork 172.31.0.50/32",
+ isjson=False,
+ )
+
+ # Check r2 received vpnv4 update with 172.31.0.50
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found,
+ tgen.gears["r2"],
+ "ipv4",
+ "172.31.0.50/32",
+ "444:1",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r2, vpnv4 update 172.31.0.50 not found"
+
+ # diagnostic
+ logger.info("Dumping label nexthop table")
+ tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
+
+ label_list = set()
+ bgp_vpnv4_table_check(
+ tgen.gears["r2"],
+ group=["172.31.0.11/32", "172.31.0.111/32", "172.31.0.50/32"],
+ label_list=label_list,
+ )
+
+ assert (
+ len(label_list) == 1
+ ), "Multiple Label values found for updates from r11 found"
+
+ oldlabel = label_list.pop()
+ logger.info("r1, getting the outgoing interface used by label {}".format(oldlabel))
+ old_outgoing_interface = mpls_entry_get_interface(tgen.gears["r1"], oldlabel)
+ logger.info(
+ "r1, outgoing interface used by label {} is {}".format(
+ oldlabel, old_outgoing_interface
+ )
+ )
+
+ logger.info("Moving the 172.31.0.50/32 prefix from r11 to r13")
+ tgen.gears["r11"].vtysh_cmd(
+ "configure terminal\nrouter bgp\naddress-family ipv4 unicast\nno network 172.31.0.50/32",
+ isjson=False,
+ )
+ tgen.gears["r13"].vtysh_cmd(
+ "configure terminal\nrouter bgp\naddress-family ipv4 unicast\nnetwork 172.31.0.50/32",
+ isjson=False,
+ )
+
+ # Check r2 removed 172.31.0.50 vpnv4 update with old label
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_not_found,
+ tgen.gears["r2"],
+ "ipv4",
+ "172.31.0.50/32",
+ "444:1",
+ label=oldlabel,
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert (
+ success
+ ), "r2, vpnv4 update 172.31.0.50 with old label {0} still present".format(oldlabel)
+
+ # diagnostic
+ logger.info("Dumping label nexthop table")
+ tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
+
+ # Check r2 received new 172.31.0.50 vpnv4 update
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found,
+ tgen.gears["r2"],
+ "ipv4",
+ "172.31.0.50/32",
+ "444:1",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r2, vpnv4 update 172.31.0.50 not found"
+
+ label_list = set()
+ bgp_vpnv4_table_check(
+ tgen.gears["r2"],
+ group=PREFIXES_R13 + ["172.31.0.50/32"],
+ label_list=label_list,
+ )
+ assert (
+ len(label_list) == 1
+ ), "Multiple Label values found for updates from r13 found"
+
+ newlabel = label_list.pop()
+ logger.info("r1, getting the outgoing interface used by label {}".format(newlabel))
+ new_outgoing_interface = mpls_entry_get_interface(tgen.gears["r1"], newlabel)
+ logger.info(
+ "r1, outgoing interface used by label {} is {}".format(
+ newlabel, new_outgoing_interface
+ )
+ )
+ if old_outgoing_interface == new_outgoing_interface:
+ assert 0, "r1, outgoing interface did not change whereas BGP update moved"
+
+ logger.info("Restoring state by removing the 172.31.0.50/32 prefix from r13")
+ tgen.gears["r13"].vtysh_cmd(
+ "configure terminal\nrouter bgp\naddress-family ipv4 unicast\nno network 172.31.0.50/32",
+ isjson=False,
+ )
+
+
+def test_changing_default_label_value():
+ """
+ Change the MPLS default value
+ Check that r1 VPNv4 entries have the 222 label value
+ Check that MPLS entry with old label value is no more present
+ Check that MPLS entry for local traffic has inLabel set to 222
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears["r1"]
+
+ # counting the number of labels used in the VPNv4 table
+ label_list = set()
+ logger.info("r1, vpnv4 table, check the number of labels used before modification")
+ bgp_vpnv4_table_check_all(router, label_list)
+ old_len = len(label_list)
+ assert (
+ old_len != 1
+ ), "r1, number of labels used should be greater than 1, oberved {} ".format(old_len)
+
+ logger.info("r1, vrf1, changing the default MPLS label value to export to 222")
+ router.vtysh_cmd(
+ "configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv4 unicast\nlabel vpn export 222\n",
+ isjson=False,
+ )
+
+ # Check r1 updated the MPLS entry with the 222 label value
+ logger.info(
+ "r1, mpls table, check that MPLS entry with inLabel set to 222 has vrf1 interface"
+ )
+ test_func = functools.partial(
+ check_show_mpls_table_entry_label_found, router, 222, "vrf1"
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r1, mpls entry with label 222 not found"
+
+ # check label repartition is ok
+ logger.info("r1, vpnv4 table, check the number of labels used after modification")
+ label_list = set()
+ bgp_vpnv4_table_check_all(router, label_list)
+ new_len = len(label_list)
+ assert (
+ old_len == new_len
+ ), "r1, number of labels after modification differ from previous, observed {}, expected {} ".format(
+ new_len, old_len
+ )
+
+ logger.info(
+ "r1, vpnv4 table, check that prefixes that were using the vrf label have refreshed the label value to 222"
+ )
+ bgp_vpnv4_table_check(
+ router, group=["192.168.255.0/24", "192.0.2.0/24"], label_value_expected=222
+ )
+
+
+def test_unconfigure_allocation_mode_nexthop():
+ """
+ Test unconfiguring allocation mode per nexthop
+ Check that show mpls table has no entry with label 17 (previously used)
+ Check that all VPN updates on r1 should have label value moved to 222
+ Check that show mpls table will only have 222 label value
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Unconfiguring allocation mode per nexthop")
+ router = tgen.gears["r1"]
+ router.vtysh_cmd(
+ "configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv4 unicast\nno label vpn export allocation-mode per-nexthop\n",
+ isjson=False,
+ )
+
+ # Check r1 updated the MPLS entry with the 222 label value
+ logger.info(
+ "r1, mpls table, check that MPLS entry with inLabel set to 17 is not present"
+ )
+ test_func = functools.partial(
+ check_show_mpls_table_entry_label_not_found, router, 17
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r1, mpls entry with label 17 still present"
+
+ # Check vpnv4 routes from r1
+ logger.info("Checking vpnv4 routes on r1")
+ label_list = set()
+ bgp_vpnv4_table_check_all(router, label_list=label_list, same=True)
+ assert len(label_list) == 1, "r1, multiple Label values found for vpnv4 updates"
+
+ new_label = label_list.pop()
+ assert (
+ new_label == 222
+ ), "r1, wrong label value in VPNv4 table, expected 222, observed {}".format(
+ new_label
+ )
+
+ # Check mpls table with 222 value
+ logger.info("Checking MPLS values on show mpls table of r1")
+ label_list = set()
+ label_list.add(222)
+ mpls_table_check(router, label_list=label_list)
+
+
+def test_reconfigure_allocation_mode_nexthop():
+ """
+ Test re-configuring allocation mode per nexthop
+ Check that show mpls table has no entry with label 17
+ Check that all VPN updates on r1 should have multiple label values and not only 222
+ Check that show mpls table will have multiple label values and not only 222
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Reconfiguring allocation mode per nexthop")
+ router = tgen.gears["r1"]
+ router.vtysh_cmd(
+ "configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv4 unicast\nlabel vpn export allocation-mode per-nexthop\n",
+ isjson=False,
+ )
+
+ # Check that show mpls table has no entry with label 17
+ logger.info(
+ "r1, mpls table, check that MPLS entry with inLabel set to 17 is present"
+ )
+ test_func = functools.partial(
+ check_show_mpls_table_entry_label_not_found, router, 17
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r1, mpls entry with label 17 still present"
+
+ # Check vpnv4 routes from r1
+ logger.info("Checking vpnv4 routes on r1")
+ label_list = set()
+ bgp_vpnv4_table_check_all(router, label_list=label_list)
+ assert len(label_list) != 1, "r1, only 1 label values found for vpnv4 updates"
+
+ # Check mpls table with all values
+ logger.info("Checking MPLS values on show mpls table of r1")
+ mpls_table_check(router, label_list=label_list)
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
--- /dev/null
+{
+ "vrfName": "vrf1",
+ "localAS": 65500,
+ "routes":
+ {
+ "10:200::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "10:200::",
+ "prefixLen": 64,
+ "network": "10:200::/64",
+ "nexthops": [
+ {
+ "ip": "192:168::2",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172:31::11/128": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172:31::11",
+ "prefixLen":128,
+ "network":"172:31::11/128",
+ "peerId":"192:2::100",
+ "nexthops":[
+ {
+ "ip":"192:2::11",
+ "afi":"ipv6",
+ "scope":"global"
+ }
+ ]
+ }
+ ],
+ "172:31::12/128": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172:31::12",
+ "prefixLen":128,
+ "network":"172:31::12/128",
+ "peerId":"192:2::100",
+ "nexthops":[
+ {
+ "ip":"192:2::12",
+ "afi":"ipv6",
+ "scope":"global",
+ "used":true
+ },
+ {
+ "scope": "link-local"
+ }
+ ]
+ }
+ ],
+ "172:31::13/128": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172:31::13",
+ "prefixLen":128,
+ "network":"172:31::13/128",
+ "peerId":"192:168::255:13",
+ "nexthops":[
+ {
+ "ip":"192:168::255:13",
+ "afi":"ipv6",
+ "scope": "global",
+ "used":true
+ },
+ {
+ "scope": "link-local"
+ }
+ ]
+ }
+ ],
+ "172:31::14/128": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172:31::14",
+ "prefixLen":128,
+ "network":"172:31::14/128",
+ "peerId":"(unspec)",
+ "nexthops":[
+ {
+ "ip":"192:2::14",
+ "afi":"ipv6",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "172:31::15/128": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172:31::15",
+ "prefixLen":128,
+ "network":"172:31::15/128",
+ "peerId":"(unspec)",
+ "nexthops":[
+ {
+ "ip":"192:2::12",
+ "afi":"ipv6",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "172:31::20/128": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172:31::20",
+ "prefixLen":128,
+ "network":"172:31::20/128",
+ "peerId":"192:2::100",
+ "nexthops":[
+ {
+ "ip":"192:2::11",
+ "afi":"ipv6",
+ "scope":"global",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "172:31::111/128": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"172:31::111",
+ "prefixLen":128,
+ "network":"172:31::111/128",
+ "peerId":"192:2::100",
+ "nexthops":[
+ {
+ "ip":"192:2::11",
+ "afi":"ipv6",
+ "scope":"global",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "192:2::/64": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"192:2::",
+ "prefixLen":64,
+ "network":"192:2::/64",
+ "peerId":"(unspec)",
+ "nexthops":[
+ {
+ "ip":"::",
+ "afi":"ipv6",
+ "used":true
+ }
+ ]
+ }
+ ],
+ "192:168::255:0/112": [
+ {
+ "valid":true,
+ "bestpath":true,
+ "prefix":"192:168::255:0",
+ "prefixLen":112,
+ "network":"192:168::255:0/112",
+ "peerId":"(unspec)",
+ "nexthops":[
+ {
+ "ip":"::",
+ "afi":"ipv6",
+ "used":true
+ }
+ ]
+ }
+ ]
+ }
+}
--- /dev/null
+debug bgp vpn leak-from-vrf
+debug bgp vpn label
+debug bgp nht
+debug bgp updates out
+router bgp 65500
+ bgp router-id 192.168.0.1
+ no bgp ebgp-requires-policy
+ neighbor 192:168::2 remote-as 65501
+ address-family ipv4 unicast
+ no neighbor 192:168::2 activate
+ exit-address-family
+ address-family ipv6 vpn
+ neighbor 192:168::2 activate
+ neighbor 192:168::2 soft-reconfiguration inbound
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 192.168.0.1
+ neighbor 192:2::100 remote-as 65500
+ neighbor 192:168::255:13 remote-as 65500
+ address-family ipv6 unicast
+ neighbor 192:2::100 activate
+ neighbor 192:2::100 route-map rmap in
+ neighbor 192:168::255:13 activate
+ neighbor 192:168::255:13 route-map rmap in
+ redistribute connected
+ redistribute static route-map rmap
+ label vpn export allocation-mode per-nexthop
+ label vpn export auto
+ rd vpn export 444:1
+ rt vpn both 52:100
+ export vpn
+ import vpn
+ exit-address-family
+!
+interface r1-eth0
+ mpls bgp forwarding
+!
+bgp community-list 1 seq 5 permit 10:10
+!
+route-map rmap permit 1
+ set ipv6 next-hop prefer-global
+!
+route-map rmap permit 2
+!
--- /dev/null
+log stdout
+debug zebra nht
+!debug zebra kernel msgdump recv
+!debug zebra dplane detailed
+!debug zebra packet recv
+interface r1-eth1 vrf vrf1
+ ipv6 address 192:2::1/64
+!
+interface r1-eth2 vrf vrf1
+ ipv6 address 192:168::255:1/112
+!
+interface r1-eth0
+ ip address 192:168::1/112
+!
+vrf vrf1
+ ipv6 route 172:31::14/128 192:2::14
+ ipv6 route 172:31::15/128 192:2::12
+exit-vrf
--- /dev/null
+router bgp 65500
+ bgp router-id 11.11.11.11
+ no bgp network import-check
+ neighbor 192:2::100 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 192:2::100 activate
+ !
+ address-family ipv6 unicast
+ neighbor 192:2::100 activate
+ network 172:31::11/128
+ network 172:31::111/128
+ network 172:31::20/128
+ exit-address-family
+!
+
--- /dev/null
+log stdout
+interface r11-eth0
+ ipv6 address 192:2::11/64
+!
--- /dev/null
+router bgp 65500
+ bgp router-id 12.12.12.12
+ no bgp network import-check
+ neighbor 192:2::100 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 192:2::100 activate
+ !
+ address-family ipv6 unicast
+ neighbor 192:2::100 activate
+ network 172:31::12/128
+ exit-address-family
+!
+
--- /dev/null
+log stdout
+interface r12-eth0
+ ipv6 address 192:2::12/64
+!
--- /dev/null
+router bgp 65500
+ bgp router-id 13.13.13.13
+ no bgp network import-check
+ neighbor 192:168::255:1 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 192:168::255:1 activate
+ exit-address-family
+ address-family ipv6 unicast
+ neighbor 192:168::255:1 activate
+ network 172:31::0:13/128
+ exit-address-family
+!
--- /dev/null
+log stdout
+interface r13-eth0
+ ipv6 address 192:168::255:13/112
+!
--- /dev/null
+{
+ "vrfName": "default",
+ "localAS": 65501,
+ "routes":
+ {
+ "routeDistinguishers":
+ {
+ "444:1":
+ {
+ "172:31::11/128": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172:31::11",
+ "prefixLen": 128,
+ "network": "172:31::11/128",
+ "peerId": "192:168::1",
+ "nexthops": [
+ {
+ "ip": "192:168::1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172:31::12/128": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172:31::12",
+ "prefixLen": 128,
+ "network": "172:31::12/128",
+ "peerId": "192:168::1",
+ "nexthops": [
+ {
+ "ip": "192:168::1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172:31::13/128": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172:31::13",
+ "prefixLen": 128,
+ "network": "172:31::13/128",
+ "peerId": "192:168::1",
+ "nexthops": [
+ {
+ "ip": "192:168::1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172:31::14/128": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172:31::14",
+ "prefixLen": 128,
+ "network": "172:31::14/128",
+ "peerId": "192:168::1",
+ "nexthops": [
+ {
+ "ip": "192:168::1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172:31::15/128": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172:31::15",
+ "prefixLen": 128,
+ "network": "172:31::15/128",
+ "peerId": "192:168::1",
+ "nexthops": [
+ {
+ "ip": "192:168::1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172:31::20/128": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172:31::20",
+ "prefixLen": 128,
+ "network": "172:31::20/128",
+ "peerId": "192:168::1",
+ "nexthops": [
+ {
+ "ip": "192:168::1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172:31::111/128": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "172:31::111",
+ "prefixLen": 128,
+ "network": "172:31::111/128",
+ "peerId": "192:168::1",
+ "nexthops": [
+ {
+ "ip": "192:168::1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "192:2::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "192:2::",
+ "prefixLen": 64,
+ "network": "192:2::/64",
+ "peerId": "192:168::1",
+ "nexthops": [
+ {
+ "ip": "192:168::1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "192:168::255:0/112": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "192:168::255:0",
+ "prefixLen": 112,
+ "network": "192:168::255:0/112",
+ "peerId": "192:168::1",
+ "nexthops": [
+ {
+ "ip": "192:168::1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "444:2":
+ {
+ "10:200::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "prefix": "10:200::",
+ "prefixLen": 64,
+ "network": "10:200::/64",
+ "peerId": "(unspec)",
+ "nhVrfName": "vrf1",
+ "nexthops": [
+ {
+ "ip": "::",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
--- /dev/null
+router bgp 65501
+ bgp router-id 192.168.0.2
+ no bgp ebgp-requires-policy
+ neighbor 192:168::1 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 192:168::1 activate
+ exit-address-family
+ address-family ipv6 vpn
+ neighbor 192:168::1 activate
+ exit-address-family
+!
+router bgp 65501 vrf vrf1
+ bgp router-id 192.168.0.2
+ address-family ipv6 unicast
+ redistribute connected
+ label vpn export 102
+ rd vpn export 444:2
+ rt vpn both 52:100
+ export vpn
+ import vpn
+ exit-address-family
+!
+interface r2-eth0
+ mpls bgp forwarding
+!
--- /dev/null
+log stdout
+interface r2-eth1 vrf vrf1
+ ipv6 address 10:200::2/64
+!
+interface r2-eth0
+ ipv6 address 192:168::2/112
+!
--- /dev/null
+router bgp 65500
+ bgp router-id 100.100.100.100
+ no bgp network import-check
+ neighbor 192:2::1 remote-as 65500
+ neighbor 192:2::11 remote-as 65500
+ neighbor 192:2::12 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 192:2::1 activate
+ no neighbor 192:2::11 activate
+ no neighbor 192:2::12 activate
+ !
+ address-family ipv6 unicast
+ neighbor 192:2::1 activate
+ neighbor 192:2::1 route-reflector-client
+ neighbor 192:2::1 nexthop-local unchanged
+ neighbor 192:2::11 activate
+ neighbor 192:2::11 route-reflector-client
+ neighbor 192:2::11 nexthop-local unchanged
+ neighbor 192:2::12 activate
+ neighbor 192:2::12 route-reflector-client
+ neighbor 192:2::12 nexthop-local unchanged
+ exit-address-family
+!
+
--- /dev/null
+log stdout
+interface rr-eth0
+ ipv6 address 192:2::100/64
+!
--- /dev/null
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+#
+# test_bgp_vpnv6_per_nexthop_label.py
+#
+# Copyright 2023 6WIND S.A.
+#
+
+"""
+ test_bgp_vpnv6_per_nexthop_label.py: Test the FRR BGP daemon using EBGP peering
+ Let us exchange VPNv6 updates between both devices
+ Updates from r1 will originate from the same RD, but will have separate
+ label values.
+
+ +----------+
+ | r11 |
+ |192::2:11 +---+
+ | | | +----+--------+ +----------+
+ +----------+ | 192::2::1 |vrf | r1 |192:168::/112 | r2 |
+ +-------------------+ | 1+--------------+ |
+ +----------+ | |VRF1|AS65500 | | AS65501 |
+ | r12 | | +--------------+ | VPNV4| |VPNV4 |
+ |192::2:12 +---+ |192:168::255:1+-+--+--------+ +----------+
+ | | |
+ +----------+ |
+ |
+ +----------+ |
+ | r13 | |
+ |192:168:: +--------+
+ | 255:13 |
+ +----------+
+"""
+
+import os
+import sys
+import json
+from functools import partial
+import pytest
+import functools
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+
+pytestmark = [pytest.mark.bgpd]
+
+PREFIXES_R11 = ["172:31::11/128", "172:31::20/128", "172:31::111/128"]
+PREFIXES_R12 = ["172:31::12/128", "172:31::15/128"]
+PREFIXES_REDIST_R14 = ["172:31::14/128"]
+PREFIXES_CONNECTED = ["192:168::255/112", "192:2::/64"]
+
+
+def build_topo(tgen):
+ "Build function"
+
+ # Create 2 routers.
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+ tgen.add_router("r11")
+ tgen.add_router("r12")
+ tgen.add_router("r13")
+ tgen.add_router("r14")
+ tgen.add_router("rr")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r11"])
+ switch.add_link(tgen.gears["r12"])
+ switch.add_link(tgen.gears["rr"])
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r13"])
+
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r14"])
+
+
+def _populate_iface():
+ tgen = get_topogen()
+ cmds_list = [
+ "ip link add vrf1 type vrf table 10",
+ "echo 100000 > /proc/sys/net/mpls/platform_labels",
+ "ip link set dev vrf1 up",
+ "ip link set dev {0}-eth1 master vrf1",
+ "echo 1 > /proc/sys/net/mpls/conf/{0}-eth0/input",
+ ]
+ cmds_list_plus = [
+ "ip link set dev {0}-eth2 master vrf1",
+ ]
+
+ for cmd in cmds_list:
+ input = cmd.format("r1")
+ logger.info("input: " + cmd)
+ output = tgen.net["r1"].cmd(cmd.format("r1"))
+ logger.info("output: " + output)
+
+ for cmd in cmds_list_plus:
+ input = cmd.format("r1")
+ logger.info("input: " + cmd)
+ output = tgen.net["r1"].cmd(cmd.format("r1"))
+ logger.info("output: " + output)
+
+ for cmd in cmds_list:
+ input = cmd.format("r2")
+ logger.info("input: " + cmd)
+ output = tgen.net["r2"].cmd(cmd.format("r2"))
+ logger.info("output: " + output)
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ _populate_iface()
+
+ for rname, router in router_list.items():
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ # Initialize all routers.
+ tgen.start_router()
+
+
+def teardown_module(_mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ tgen.stop_topology()
+
+
+def bgp_vpnv6_table_check(router, group, label_list=None, label_value_expected=None):
+ """
+ Dump and check that vpnv6 entries have the same MPLS label value
+ * 'router': the router to check
+ * 'group': the list of prefixes to check. a single label value for the group has to be found
+ * 'label_list': check that the label values are not present in the vpnv6 entries
+ * that list is updated with the present label value
+ * 'label_value_expected': check that the mpls label read is the same as that value
+ """
+
+ stored_label_inited = False
+ for prefix in group:
+ dump = router.vtysh_cmd("show bgp ipv6 vpn {} json".format(prefix), isjson=True)
+ for rd, pathes in dump.items():
+ for path in pathes["paths"]:
+ assert (
+ "remoteLabel" in path.keys()
+ ), "{0}, {1}, remoteLabel not present".format(router.name, prefix)
+ logger.info(
+ "{0}, {1}, label value is {2}".format(
+ router.name, prefix, path["remoteLabel"]
+ )
+ )
+ if stored_label_inited:
+ assert (
+ path["remoteLabel"] == stored_label
+ ), "{0}, {1}, label value not expected one (expected {2}, observed {3}".format(
+ router.name, prefix, stored_label, path["remoteLabel"]
+ )
+ else:
+ stored_label = path["remoteLabel"]
+ stored_label_inited = True
+ if label_list is not None:
+ assert (
+ stored_label not in label_list
+ ), "{0}, {1}, label already detected in a previous prefix".format(
+ router.name, prefix
+ )
+ label_list.add(stored_label)
+
+ if label_value_expected:
+ assert (
+ path["remoteLabel"] == label_value_expected
+ ), "{0}, {1}, label value not expected (expected {2}, observed {3}".format(
+ router.name, prefix, label_value_expected, path["remoteLabel"]
+ )
+
+
+def bgp_vpnv6_table_check_all(router, label_list=None, same=False):
+ """
+ Dump and check that vpnv6 entries are correctly configured with specific label values
+ * 'router': the router to check
+ * 'label_list': check that the label values are not present in the vpnv6 entries
+ * that list is updated with the present label value found.
+ * 'same': by default, set to False. Addresses groups are classified by addresses.
+ * if set to True, all entries of all groups should have a unique label value
+ """
+ if same:
+ bgp_vpnv6_table_check(
+ router,
+ group=PREFIXES_R11
+ + PREFIXES_R12
+ + PREFIXES_REDIST_R14
+ + PREFIXES_CONNECTED,
+ label_list=label_list,
+ )
+ else:
+ for group in (
+ PREFIXES_R11,
+ PREFIXES_R12,
+ PREFIXES_REDIST_R14,
+ PREFIXES_CONNECTED,
+ ):
+ bgp_vpnv6_table_check(router, group=group, label_list=label_list)
+
+
+def mpls_table_check(router, blacklist=None, label_list=None, whitelist=None):
+ """
+ Dump and check 'show mpls table json' output. An assert is triggered in case test fails
+ * 'router': the router to check
+ * 'blacklist': the list of nexthops (IP or interface) that should not be on output
+ * 'label_list': the list of labels that should be in inLabel value
+ * 'whitelist': the list of nexthops (IP or interface) that should be on output
+ """
+ nexthop_list = []
+ if blacklist:
+ nexthop_list.append(blacklist)
+ logger.info("Checking MPLS labels on {}".format(router.name))
+ dump = router.vtysh_cmd("show mpls table json", isjson=True)
+ for in_label, label_info in dump.items():
+ if label_list is not None:
+ label_list.add(in_label)
+ for nh in label_info["nexthops"]:
+ assert (
+ nh["installed"] == True and nh["type"] == "BGP"
+ ), "{}, show mpls table, nexthop is not installed".format(router.name)
+ if "nexthop" in nh.keys():
+ assert (
+ nh["nexthop"] not in nexthop_list
+ ), "{}, show mpls table, duplicated or blacklisted nexthop address".format(
+ router.name
+ )
+ nexthop_list.append(nh["nexthop"])
+ elif "interface" in nh.keys():
+ assert (
+ nh["interface"] not in nexthop_list
+ ), "{}, show mpls table, duplicated or blacklisted nexthop interface".format(
+ router.name
+ )
+ nexthop_list.append(nh["interface"])
+ else:
+ assert (
+ 0
+ ), "{}, show mpls table, entry with neither nexthop nor interface".format(
+ router.name
+ )
+
+ if whitelist:
+ for entry in whitelist:
+ assert (
+ entry in nexthop_list
+ ), "{}, show mpls table, entry with nexthop {} not present in nexthop list".format(
+ router.name, entry
+ )
+
+
+def check_show_bgp_vpn_prefix_not_found(router, ipversion, prefix, rd, label=None):
+ output = json.loads(
+ router.vtysh_cmd("show bgp {} vpn {} json".format(ipversion, prefix))
+ )
+ if label:
+ expected = {rd: {"prefix": prefix, "paths": [{"remoteLabel": label}]}}
+ else:
+ expected = {rd: {"prefix": prefix}}
+ ret = topotest.json_cmp(output, expected)
+ if ret is None:
+ return "not good"
+ return None
+
+
+def check_show_bgp_vpn_prefix_found(router, ipversion, prefix, rd):
+ output = json.loads(
+ router.vtysh_cmd("show bgp {} vpn {} json".format(ipversion, prefix))
+ )
+ expected = {rd: {"prefix": prefix}}
+ return topotest.json_cmp(output, expected)
+
+
+def check_show_mpls_table_entry_label_found(router, inlabel, interface):
+ output = json.loads(router.vtysh_cmd("show mpls table {} json".format(inlabel)))
+ expected = {
+ "inLabel": inlabel,
+ "installed": True,
+ "nexthops": [{"interface": interface}],
+ }
+ return topotest.json_cmp(output, expected)
+
+
+def check_show_mpls_table_entry_label_not_found(router, inlabel):
+ output = json.loads(router.vtysh_cmd("show mpls table {} json".format(inlabel)))
+ expected = {"inlabel": inlabel, "installed": True}
+ ret = topotest.json_cmp(output, expected)
+ if ret is None:
+ return "not good"
+ return None
+
+
+def mpls_entry_get_interface(router, label):
+ """
+ Assert that the label is in MPLS table
+ Assert an outgoing interface is programmed
+ return the outgoing interface
+ """
+ outgoing_interface = None
+
+ logger.info("Checking MPLS labels on {}".format(router.name))
+ dump = router.vtysh_cmd("show mpls table {} json".format(label), isjson=True)
+ assert dump, "{}, show mpls table, inLabel {} not found".format(router.name, label)
+
+ for nh in dump["nexthops"]:
+ assert (
+ "interface" in nh.keys()
+ ), "{}, show mpls table, nexthop interface not present for MPLS entry {}".format(
+ router.name, label
+ )
+
+ outgoing_interface = nh["interface"]
+
+ return outgoing_interface
+
+
+def test_protocols_convergence():
+ """
+ Assert that all protocols have converged
+ statuses as they depend on it.
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Check BGP IPv6 routing tables on VRF1 of r1
+ logger.info("Checking BGP IPv6 routes for convergence on r1 VRF1")
+ router = tgen.gears["r1"]
+ json_file = "{}/{}/bgp_ipv6_routes_vrf1.json".format(CWD, router.name)
+
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp vrf vrf1 ipv6 json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+ logger.info("Checking BGP VPNv6 routes for convergence on r2")
+ router = tgen.gears["r2"]
+ json_file = "{}/{}/bgp_vpnv6_routes.json".format(CWD, router.name)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp ipv6 vpn json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+ # Check BGP labels received on r2
+ logger.info("Checking BGP VPNv6 labels on r2")
+ label_list = set()
+ bgp_vpnv6_table_check_all(tgen.gears["r2"], label_list)
+
+ # Check MPLS labels received on r1
+ mpls_table_check(tgen.gears["r1"], label_list)
+
+
+def test_flapping_bgp_vrf_down():
+ """
+ Turn down a remote BGP session
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ logger.info("Unpeering BGP on r11")
+ tgen.gears["r11"].vtysh_cmd(
+ "configure terminal\nrouter bgp 65500\nno neighbor 192:2::100\n",
+ isjson=False,
+ )
+
+ def _bgp_prefix_not_found(router, vrf, ipversion, prefix):
+ output = json.loads(
+ router.vtysh_cmd(
+ "show bgp vrf {} {} {} json".format(vrf, ipversion, prefix)
+ )
+ )
+ expected = {"prefix": prefix}
+ ret = topotest.json_cmp(output, expected)
+ if ret is None:
+ return "not good"
+ return None
+
+ # Check prefix from r11 is not present
+ test_func = functools.partial(
+ _bgp_prefix_not_found, tgen.gears["r1"], "vrf1", "ipv6", "172:31::11/128"
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert (
+ success
+ ), "r1, prefix 172:31::11/128 from r11 did not disappear. r11 still connected to rr ?"
+
+ # Check BGP updated received on r2 are not from r11
+ logger.info("Checking BGP VPNv6 labels on r2")
+ for entry in PREFIXES_R11:
+ dump = tgen.gears["r2"].vtysh_cmd(
+ "show bgp ipv6 vpn {} json".format(entry), isjson=True
+ )
+ for rd in dump:
+ assert False, "r2, {}, route distinguisher {} present".format(entry, rd)
+
+ mpls_table_check(tgen.gears["r1"], blacklist=["192:2::11"])
+
+
+def test_flapping_bgp_vrf_up():
+ """
+ Turn up a remote BGP session
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ logger.info("Peering BGP on r11")
+ tgen.gears["r11"].vtysh_cmd(
+ "configure terminal\nrouter bgp 65500\nneighbor 192:2::100 remote-as 65500\n",
+ isjson=False,
+ )
+ tgen.gears["r11"].vtysh_cmd(
+ "configure terminal\nrouter bgp 65500\naddress-family ipv6 unicast\nneighbor 192:2::100 activate\n",
+ isjson=False,
+ )
+
+ # Check r2 gets prefix 172:31::11/128
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found,
+ tgen.gears["r2"],
+ "ipv6",
+ "172:31::11/128",
+ "444:1",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert (
+ success
+ ), "r2, prefix 172:31::11/128 from r11 not present. r11 still disconnected from rr ?"
+ bgp_vpnv6_table_check_all(tgen.gears["r2"])
+
+
+def test_recursive_route():
+ """
+ Test static recursive route redistributed over BGP
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Enabling recursive static route")
+ tgen.gears["r1"].vtysh_cmd(
+ "configure terminal\nvrf vrf1\nipv6 route 172:31::30/128 172:31::20\n",
+ isjson=False,
+ )
+ logger.info("Checking BGP VPNv6 labels on r2")
+ # that route should be sent along with label for 192.0.2.11
+
+ def _prefix30_not_found(router):
+ output = json.loads(router.vtysh_cmd("show bgp ipv6 vpn 172:31::30/128 json"))
+ expected = {"444:1": {"prefix": "172:31::30/128"}}
+ ret = topotest.json_cmp(output, expected)
+ if ret is None:
+ return "not good"
+ return None
+
+ def _prefix30_found(router):
+ output = json.loads(router.vtysh_cmd("show bgp ipv6 vpn 172:31::30/128 json"))
+ expected = {"444:1": {"prefix": "172:31::30/128"}}
+ return topotest.json_cmp(output, expected)
+
+ # Check r2 received vpnv6 update with 172:31::30
+ test_func = functools.partial(_prefix30_found, tgen.gears["r2"])
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r2, VPNv6 update 172:31::30 not found"
+
+ # that route should be sent along with label for 192::2:11
+ bgp_vpnv6_table_check(
+ tgen.gears["r2"],
+ group=PREFIXES_R11 + ["172:31::30/128"],
+ )
+
+ # diagnostic
+ logger.info("Dumping label nexthop table")
+ tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
+ logger.info("Dumping nexthop table")
+ tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 nexthop detail", isjson=False)
+
+ logger.info("Disabling recursive static route")
+ tgen.gears["r1"].vtysh_cmd(
+ "configure terminal\nvrf vrf1\nno ipv6 route 172:31::30/128 172:31::20\n",
+ isjson=False,
+ )
+
+ # Check r2 removed 172:31::30 vpnv6 update
+ test_func = functools.partial(_prefix30_not_found, tgen.gears["r2"])
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r2, VPNv6 update 172:31::30 still present"
+
+
+def test_prefix_changes_interface():
+ """
+ Test BGP update for a given prefix learnt on different interface
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Enabling a 172:31::50/128 prefix for r11")
+ tgen.gears["r11"].vtysh_cmd(
+ "configure terminal\nrouter bgp\naddress-family ipv6 unicast\nnetwork 172:31::50/128",
+ isjson=False,
+ )
+
+ # Check r2 received vpnv6 update with 172:31::50
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found,
+ tgen.gears["r2"],
+ "ipv6",
+ "172:31::50/128",
+ "444:1",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r2, VPNv6 update 172:31::50 not found"
+
+ # diagnostic
+ logger.info("Dumping label nexthop table")
+ tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
+
+ label_list = set()
+ bgp_vpnv6_table_check(
+ tgen.gears["r2"],
+ group=PREFIXES_R11 + ["172:31::50/128"],
+ label_list=label_list,
+ )
+
+ assert (
+ len(label_list) == 1
+ ), "Multiple Label values found for updates from r11 found"
+
+ oldlabel = label_list.pop()
+ logger.info("r1, getting the outgoing interface used by label {}".format(oldlabel))
+ old_outgoing_interface = mpls_entry_get_interface(tgen.gears["r1"], oldlabel)
+ logger.info(
+ "r1, outgoing interface used by label {} is {}".format(
+ oldlabel, old_outgoing_interface
+ )
+ )
+
+ logger.info("Moving the 172:31::50/128 prefix from r11 to r13")
+ tgen.gears["r11"].vtysh_cmd(
+ "configure terminal\nrouter bgp\naddress-family ipv6 unicast\nno network 172:31::50/128",
+ isjson=False,
+ )
+ tgen.gears["r13"].vtysh_cmd(
+ "configure terminal\nrouter bgp\naddress-family ipv6 unicast\nnetwork 172:31::50/128",
+ isjson=False,
+ )
+
+ # Check r2 removed 172:31::50 vpnv6 update with old label
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_not_found,
+ tgen.gears["r2"],
+ "ipv6",
+ "172:31::50/128",
+ "444:1",
+ label=oldlabel,
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert (
+ success
+ ), "r2, vpnv6 update 172:31::50 with old label {0} still present".format(oldlabel)
+
+ # diagnostic
+ logger.info("Dumping label nexthop table")
+ tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
+
+ # Check r2 received new 172:31::50 vpnv6 update
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found,
+ tgen.gears["r2"],
+ "ipv6",
+ "172:31::50/128",
+ "444:1",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r2, vpnv6 update 172:31::50 not found"
+
+ label_list = set()
+ bgp_vpnv6_table_check(
+ tgen.gears["r2"],
+ group=["172:31::13/128", "172:31::50/128"],
+ label_list=label_list,
+ )
+ assert (
+ len(label_list) == 1
+ ), "Multiple Label values found for updates from r13 found"
+
+ newlabel = label_list.pop()
+ logger.info("r1, getting the outgoing interface used by label {}".format(newlabel))
+ new_outgoing_interface = mpls_entry_get_interface(tgen.gears["r1"], newlabel)
+ logger.info(
+ "r1, outgoing interface used by label {} is {}".format(
+ newlabel, new_outgoing_interface
+ )
+ )
+ if old_outgoing_interface == new_outgoing_interface:
+ assert 0, "r1, outgoing interface did not change whereas BGP update moved"
+
+ logger.info("Restoring state by removing the 172:31::50/128 prefix from r13")
+ tgen.gears["r13"].vtysh_cmd(
+ "configure terminal\nrouter bgp\naddress-family ipv6 unicast\nno network 172:31::50/128",
+ isjson=False,
+ )
+
+
+def test_changing_default_label_value():
+ """
+ Change the MPLS default value
+ Check that r1 VPNv6 entries have the 222 label value
+ Check that MPLS entry with old label value is no more present
+ Check that MPLS entry for local traffic has inLabel set to 222
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears["r1"]
+
+ # counting the number of labels used in the VPNv6 table
+ label_list = set()
+ logger.info("r1, VPNv6 table, check the number of labels used before modification")
+ bgp_vpnv6_table_check_all(router, label_list)
+ old_len = len(label_list)
+ assert (
+ old_len != 1
+ ), "r1, number of labels used should be greater than 1, oberved {} ".format(old_len)
+
+ logger.info("r1, vrf1, changing the default MPLS label value to export to 222")
+ router.vtysh_cmd(
+ "configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv6 unicast\nlabel vpn export 222\n",
+ isjson=False,
+ )
+
+ # Check r1 updated the MPLS entry with the 222 label value
+ logger.info(
+ "r1, mpls table, check that MPLS entry with inLabel set to 222 has vrf1 interface"
+ )
+ test_func = functools.partial(
+ check_show_mpls_table_entry_label_found, router, 222, "vrf1"
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r1, mpls entry with label 222 not found"
+
+ # check label repartition is ok
+ logger.info("r1, VPNv6 table, check the number of labels used after modification")
+ label_list = set()
+ bgp_vpnv6_table_check_all(router, label_list)
+ new_len = len(label_list)
+ assert (
+ old_len == new_len
+ ), "r1, number of labels after modification differ from previous, observed {}, expected {} ".format(
+ new_len, old_len
+ )
+
+ logger.info(
+ "r1, VPNv6 table, check that prefixes that were using the vrf label have refreshed the label value to 222"
+ )
+ bgp_vpnv6_table_check(router, group=PREFIXES_CONNECTED, label_value_expected=222)
+
+
+def test_unconfigure_allocation_mode_nexthop():
+ """
+ Test unconfiguring allocation mode per nexthop
+ Check on r2 that new MPLS label values have been propagated
+ Check that show mpls table has no entry with label 17 (previously used)
+ Check that all VPN updates on r1 should have label value moved to 222
+ Check that show mpls table will only have 222 label value
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Unconfiguring allocation mode per nexthop")
+ router = tgen.gears["r1"]
+ dump = router.vtysh_cmd(
+ "configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv6 unicast\nno label vpn export allocation-mode per-nexthop\n",
+ isjson=False,
+ )
+
+ # Check r1 updated the MPLS entry with the 222 label value
+ logger.info(
+ "r1, mpls table, check that MPLS entry with inLabel set to 17 is not present"
+ )
+ test_func = functools.partial(
+ check_show_mpls_table_entry_label_not_found, router, 17
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r1, mpls entry with label 17 still present"
+
+ # Check vpnv6 routes from r1
+ logger.info("Checking VPNv6 routes on r1")
+ label_list = set()
+ bgp_vpnv6_table_check_all(router, label_list=label_list, same=True)
+ assert len(label_list) == 1, "r1, multiple Label values found for VPNv6 updates"
+
+ new_label = label_list.pop()
+ assert (
+ new_label == 222
+ ), "r1, wrong label value in VPNv6 table, expected 222, observed {}".format(
+ new_label
+ )
+
+ # Check mpls table with 222 value
+ logger.info("Checking MPLS values on show mpls table of r1")
+ label_list = set()
+ label_list.add(222)
+ mpls_table_check(router, label_list=label_list)
+
+
+def test_reconfigure_allocation_mode_nexthop():
+ """
+ Test re-configuring allocation mode per nexthop
+ Check that show mpls table has no entry with label 17
+ Check that all VPN updates on r1 should have multiple label values and not only 222
+ Check that show mpls table will have multiple label values and not only 222
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Reconfiguring allocation mode per nexthop")
+ router = tgen.gears["r1"]
+ dump = router.vtysh_cmd(
+ "configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv6 unicast\nlabel vpn export allocation-mode per-nexthop\n",
+ isjson=False,
+ )
+
+ # Check that show mpls table has no entry with label 17
+ logger.info(
+ "r1, mpls table, check that MPLS entry with inLabel set to 17 is present"
+ )
+ test_func = functools.partial(
+ check_show_mpls_table_entry_label_not_found, router, 17
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r1, mpls entry with label 17 still present"
+
+ # Check vpnv6 routes from r1
+ logger.info("Checking VPNv6 routes on r1")
+ label_list = set()
+ bgp_vpnv6_table_check_all(router, label_list=label_list)
+ assert len(label_list) != 1, "r1, only 1 label values found for VPNv6 updates"
+
+ # Check mpls table with all values
+ logger.info("Checking MPLS values on show mpls table of r1")
+ mpls_table_check(router, label_list=label_list)
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
),
)
+ parser.addoption(
+ "--memleaks",
+ action="store_true",
+ help="Report memstat results as errors",
+ )
+
parser.addoption(
"--pause",
action="store_true",
)
-def check_for_memleaks():
+def check_for_valgrind_memleaks():
assert topotest.g_pytest_config.option.valgrind_memleaks
leaks = []
pytest.fail("valgrind memleaks found for daemons: " + " ".join(daemons))
+def check_for_memleaks():
+ leaks = []
+ tgen = get_topogen() # pylint: disable=redefined-outer-name
+ latest = []
+ existing = []
+ if tgen is not None:
+ logdir = tgen.logdir
+ if hasattr(tgen, "memstat_existing_files"):
+ existing = tgen.memstat_existing_files
+ latest = glob.glob(os.path.join(logdir, "*/*.err"))
+
+ daemons = []
+ for vfile in latest:
+ if vfile in existing:
+ continue
+ with open(vfile, encoding="ascii") as vf:
+ vfcontent = vf.read()
+ num = vfcontent.count("memstats:")
+ if num:
+ existing.append(vfile) # have summary don't check again
+ emsg = "{} types in {}".format(num, vfile)
+ leaks.append(emsg)
+ daemon = re.match(r".*test[a-z_A-Z0-9\+]*/(.*)\.err", vfile).group(1)
+ daemons.append("{}({})".format(daemon, num))
+
+ if tgen is not None:
+ tgen.memstat_existing_files = existing
+
+ if leaks:
+ logger.error("memleaks found:\n\t%s", "\n\t".join(leaks))
+ pytest.fail("memleaks found for daemons: " + " ".join(daemons))
+
+
@pytest.fixture(autouse=True, scope="module")
def module_check_memtest(request):
yield
if request.config.option.valgrind_memleaks:
+ if get_topogen() is not None:
+ check_for_valgrind_memleaks()
+ if request.config.option.memleaks:
if get_topogen() is not None:
check_for_memleaks()
# Check for leaks if requested
if item.config.option.valgrind_memleaks:
+ check_for_valgrind_memleaks()
+ if item.config.option.memleaks:
check_for_memleaks()
if config.option.topology_only and is_xdist:
pytest.exit("Cannot use --topology-only with distributed test mode")
+ pytest.exit("Cannot use --topology-only with distributed test mode")
+
# Check environment now that we have config
if not diagnose_env(rundir):
pytest.exit("environment has errors, please read the logs in %s" % rundir)
+ # slave TOPOTESTS_CHECK_MEMLEAK to memleaks flag
+ if config.option.memleaks:
+ if "TOPOTESTS_CHECK_MEMLEAK" not in os.environ:
+ os.environ["TOPOTESTS_CHECK_MEMLEAK"] = "/dev/null"
+ else:
+ if "TOPOTESTS_CHECK_MEMLEAK" in os.environ:
+ del os.environ["TOPOTESTS_CHECK_MEMLEAK"]
+ if "TOPOTESTS_CHECK_STDERR" in os.environ:
+ del os.environ["TOPOTESTS_CHECK_STDERR"]
+
@pytest.fixture(autouse=True, scope="session")
def setup_session_auto():
"192.168.1.2":{
"remoteAs":65002,
"version":4,
- "tableVersion":0,
"outq":0,
"inq":0,
"pfxRcd":3,
"192.168.2.3":{
"remoteAs":65003,
"version":4,
- "tableVersion":0,
"outq":0,
"inq":0,
"pfxRcd":3,
router,
)
else:
-
ipv4_data = bgp_addr_data.setdefault("ipv4", {})
ipv6_data = bgp_addr_data.setdefault("ipv6", {})
l2vpn_data = bgp_addr_data.setdefault("l2vpn", {})
if advertise_data:
for address_type, unicast_type in advertise_data.items():
-
if type(unicast_type) is dict:
for key, value in unicast_type.items():
cmd = "advertise {} {}".format(address_type, key)
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
try:
-
new_topo = deepcopy(topo["routers"])
router_dict = {}
for router in input_dict.keys():
total_peer = 0
for addr_type in bgp_addr_type.keys():
-
if not check_address_types(addr_type):
continue
peer_uptime_after_clear_bgp = {}
# Verifying BGP convergence after bgp clear command
for retry in range(50):
-
# Waiting for BGP to converge
logger.info(
"Waiting for %s sec for BGP to converge on router" " %s...",
if lmode is None:
if "graceful-restart" in input_dict[dut]["bgp"]:
-
if (
"graceful-restart" in input_dict[dut]["bgp"]["graceful-restart"]
and input_dict[dut]["bgp"]["graceful-restart"][
if rmode is None:
if "graceful-restart" in input_dict[peer]["bgp"]:
-
if (
"graceful-restart"
in input_dict[peer]["bgp"]["graceful-restart"]
eor_json = show_bgp_graceful_json_out[afi]["endOfRibStatus"]
if "endOfRibSend" in eor_json:
-
if eor_json["endOfRibSend"]:
logger.info(
"[DUT: %s]: EOR Send true for %s " "%s", dut, neighbor_ip, afi
"timer"
].items():
if rs_timer == "restart-time":
-
receivedTimer = value
if (
show_bgp_graceful_json_out["timers"][
tgen = get_topogen()
for router, rnode in tgen.routers().items():
if router == dut:
-
if vrf:
ipv4_cmd = "sh ip bgp vrf {} summary json".format(vrf)
show_bgp_json_ipv4 = run_frr_cmd(rnode, ipv4_cmd, isjson=True)
connected_routes = {}
for router, rnode in tgen.routers().items():
if router == dut:
-
ipv4_routes = run_frr_cmd(rnode, "sh ip bgp json", isjson=True)
ipv6_routes = run_frr_cmd(rnode, "sh ip bgp ipv6 unicast json", isjson=True)
is_ipv4_default_attrib_found = False
import json
import re
+
# gpz: get rib in json form and compare against desired routes
class BgpRib:
def log(self, str):
def apply_raw_config(tgen, input_dict):
-
"""
API to configure raw configuration on device. This can be used for any cli
which has not been implemented in JSON.
try:
router_list = tgen.routers()
for router, rnode in router_list.items():
-
result = rnode.check_router_running()
if result != "":
daemons = []
rlist = []
for router in input_dict.keys():
-
interface_list = input_dict[router]["interface_list"]
status = input_dict[router].setdefault("status", "up")
for intf in interface_list:
continue
rmap_data = []
for rmap_name, rmap_value in input_dict[router]["route_maps"].items():
-
for rmap_dict in rmap_value:
del_action = rmap_dict.setdefault("delete", False)
group_addr_range = [group_addr_range]
for grp_addr in group_addr_range:
-
addr_type = validate_ip_address(grp_addr)
if addr_type == "ipv4":
if next_hop is not None:
if "brctl" in input_dict[dut]:
for brctl_dict in input_dict[dut]["brctl"]:
-
brctl_names = brctl_dict.setdefault("brctl_name", [])
addvxlans = brctl_dict.setdefault("addvxlan", [])
stp_values = brctl_dict.setdefault("stp", [])
for brctl_name, vxlan, vrf, stp in zip(
brctl_names, addvxlans, vrfs, stp_values
):
-
ip_cmd_list = []
cmd = "ip link add name {} type bridge stp_state {}".format(
brctl_name, stp
for static_route in static_routes:
if "vrf" in static_route and static_route["vrf"] is not None:
-
logger.info(
"[DUT: {}]: Verifying routes for VRF:"
" {}".format(router, static_route["vrf"])
for static_route in static_routes:
if "vrf" in static_route and static_route["vrf"] is not None:
-
logger.info(
"[DUT: {}]: Verifying routes for VRF:"
" {}".format(router, static_route["vrf"])
self.log("unable to read: " + tstFile)
sys.exit(1)
- def command(self, target, command, regexp, op, result, returnJson, startt=None, force_result=False):
+ def command(
+ self,
+ target,
+ command,
+ regexp,
+ op,
+ result,
+ returnJson,
+ startt=None,
+ force_result=False,
+ ):
global net
if op == "jsoncmp_pass" or op == "jsoncmp_fail":
returnJson = True
if strict and (wait_count == 1):
force_result = True
- found = self.command(target, command, regexp, op, result, returnJson, startt, force_result)
+ found = self.command(
+ target, command, regexp, op, result, returnJson, startt, force_result
+ )
if found is not False:
break
# initialized by luStart
LUtil = None
+
# entry calls
def luStart(
baseScriptDir=".",
if printed > 0:
logger.error("See %s for details of errors" % LUtil.fout_name)
+
#
# Sets default wait type for luCommand(op="wait) (may be overridden by
# specifying luCommand(op="wait-strict") or luCommand(op="wait-nostrict")).
shellopt = self.cfgopt.get_option_list("--shell")
if "all" in shellopt or "." in shellopt:
- self.run_in_window("bash")
+ self.run_in_window("bash", title="munet")
# This is expected by newer munet CLI code
self.config_dirname = ""
# ospf gr information
gr_data = ospf_data.setdefault("graceful-restart", {})
if gr_data:
-
if "opaque" in gr_data and gr_data["opaque"]:
cmd = "capability opaque"
if gr_data.setdefault("delete", False):
else:
data_ip = topo["routers"][ospf_nbr]["links"]
data_rid = topo["routers"][ospf_nbr]["ospf"]["router_id"]
+ logger.info("ospf neighbor %s: router-id: %s", router, data_rid)
if ospf_nbr in data_ip:
nbr_details = nbr_data[ospf_nbr]
elif lan:
try:
nh_state = show_ospf_json[nbr_rid][0]["nbrState"].split("/")[0]
except KeyError:
- errormsg = "[DUT: {}] OSPF peer {} missing,from " "{} ".format(
- router, nbr_rid, ospf_nbr
+ errormsg = (
+ "[DUT: {}] missing OSPF neighbor {} with router-id {}".format(
+ router, ospf_nbr, nbr_rid
+ )
)
return errormsg
return errormsg
for ospf_nbr, nbr_data in ospf_nbr_list.items():
-
try:
data_ip = data_rid = topo["routers"][ospf_nbr]["ospf6"]["router_id"]
except KeyError:
return errormsg
continue
else:
-
for router, rnode in tgen.routers().items():
if "ospf6" not in topo["routers"][router]:
continue
data_ip = data_rid = topo["routers"][nbr_data["nbr"]]["ospf6"][
"router_id"
]
-
+ logger.info("ospf neighbor %s: router-id: %s", ospf_nbr, data_rid)
if ospf_nbr in data_ip:
nbr_details = nbr_data[ospf_nbr]
elif lan:
nh_state = get_index_val.get(neighbor_ip)["state"]
intf_state = get_index_val.get(neighbor_ip)["ifState"]
except TypeError:
- errormsg = "[DUT: {}] OSPF peer {} missing,from " "{} ".format(
- router, nbr_rid, ospf_nbr
+ errormsg = (
+ "[DUT: {}] missing OSPF neighbor {} with router-id {}".format(
+ router, ospf_nbr, nbr_rid
+ )
)
return errormsg
continue
if st_rt in ospf_rib_json:
-
st_found = True
found_routes.append(st_rt)
topo_data = topo["routers"]
for router in router_list.keys():
-
if "pim" not in topo_data[router]:
continue
and data["outboundInterface"] in oil
):
if return_uptime:
-
uptime_dict[grp_addr][src_address] = data["upTime"]
logger.info(
for intf, data in input_dict[dut].items():
interface_json = show_pim_intf_traffic_json[intf]
for state in data:
-
# Verify Tx/Rx
if state in interface_json:
output_dict[dut][state] = interface_json[state]
for intf, data in input_dict[dut].items():
interface_json = show_pim_intf_traffic_json[intf]
for state in data:
-
# Verify Tx/Rx
if state in interface_json:
output_dict[dut][state] = interface_json[state]
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
if "pim" in topo["routers"][dut]:
-
logger.info("[DUT: %s]: Verifying ip pim upstream rpf:", dut)
rnode = tgen.routers()[dut]
grp_addr = grp_addr.split("/")[0]
for source, data in interface_json[grp_addr].items():
-
# Verify pim join
if pim_join:
if data["group"] == grp_addr and data["channelJoinName"] == "JOIN":
rnode = tgen.routers()[dut]
for interface, data in input_dict[dut]["igmp"]["interfaces"].items():
-
statistics = False
report = False
if "statistics" in input_dict[dut]["igmp"]["interfaces"][interface]["igmp"]:
rnode = tgen.routers()[dut]
for interface, data in input_dict[dut]["pim"]["interfaces"].items():
-
logger.info("[DUT: %s]: Verifying PIM interface %s detail:", dut, interface)
show_ip_igmp_intf_json = run_frr_cmd(
elif (
interface_json["pktsIn"] != 0 and interface_json["bytesIn"] != 0
):
-
traffic_dict[traffic_type][interface][
"pktsIn"
] = interface_json["pktsIn"]
interface_json["pktsOut"] != 0
and interface_json["bytesOut"] != 0
):
-
traffic_dict[traffic_type][interface][
"pktsOut"
] = interface_json["pktsOut"]
group_addresses = [group_addresses]
if interface not in show_ip_local_igmp_json:
-
errormsg = (
"[DUT %s]: Verifying local IGMP group received"
" from interface %s [FAILED]!! " % (dut, interface)
for intf, data in input_dict[dut].items():
interface_json = show_pim_intf_traffic_json[intf]
for state in data:
-
# Verify Tx/Rx
if state in interface_json:
output_dict[dut][state] = interface_json[state]
for dut in input_dict.keys():
rnode = tgen.routers()[dut]
for interface, data in input_dict[dut]["mld"]["interfaces"].items():
-
statistics = False
report = False
if "statistics" in input_dict[dut]["mld"]["interfaces"][interface]["mld"]:
rnode = tgen.routers()[dut]
for interface, data in input_dict[dut]["pim6"]["interfaces"].items():
-
logger.info(
"[DUT: %s]: Verifying PIM6 interface %s detail:", dut, interface
)
group_addresses = [group_addresses]
if interface not in show_ipv6_local_mld_json["default"]:
-
errormsg = (
"[DUT %s]: Verifying local MLD group received"
" from interface %s [FAILED]!! " % (dut, interface)
def test_json_list_nested_with_objects():
-
dcomplete = [{"key": 1, "list": [123]}, {"key": 2, "list": [123]}]
dsub1 = [{"key": 2, "list": [123]}, {"key": 1, "list": [123]}]
for destRouterLink, data in sorted(
topo["switches"][curSwitch]["links"].items()
):
-
# Loopback interfaces
if "dst_node" in data:
destRouter = data["dst_node"]
destRouter = destRouterLink
if destRouter in listAllRouters:
-
topo["routers"][destRouter]["links"][curSwitch] = deepcopy(
topo["switches"][curSwitch]["links"][destRouterLink]
)
func_dict = OrderedDict(
[
("vrfs", create_vrf_cfg),
+ ("ospf", create_router_ospf),
("links", create_interfaces_cfg),
("static_routes", create_static_routes),
("prefix_lists", create_prefix_lists),
("igmp", create_igmp_config),
("mld", create_mld_config),
("bgp", create_router_bgp),
- ("ospf", create_router_ospf),
]
)
tgen = create_tgen_from_json(testfile, json_file)
# Start routers (and their daemons)
- start_topology(tgen, topo_daemons(tgen))
+ start_topology(tgen)
# Configure routers
build_config_from_json(tgen)
if not running:
break
- if not running:
- return ""
-
- logger.warning(
- "%s: sending SIGBUS to: %s", self.name, ", ".join([x[0] for x in running])
- )
- for name, pid in running:
- pidfile = "/var/run/{}/{}.pid".format(self.routertype, name)
- logger.info("%s: killing %s", self.name, name)
- self.cmd("kill -SIGBUS %d" % pid)
- self.cmd("rm -- " + pidfile)
-
- sleep(
- 0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name
- )
+ if running:
+ logger.warning(
+ "%s: sending SIGBUS to: %s",
+ self.name,
+ ", ".join([x[0] for x in running]),
+ )
+ for name, pid in running:
+ pidfile = "/var/run/{}/{}.pid".format(self.routertype, name)
+ logger.info("%s: killing %s", self.name, name)
+ self.cmd("kill -SIGBUS %d" % pid)
+ self.cmd("rm -- " + pidfile)
+
+ sleep(
+ 0.5,
+ "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name,
+ )
errors = self.checkRouterCores(reportOnce=True)
if self.checkRouterVersion("<", minErrorVersion):
"""
# Unfortunately this API allowsfor source to not exist for any and all routers.
+ if source is None:
+ source = f"{daemon}.conf"
+
if source:
head, tail = os.path.split(source)
if not head and not self.path_exists(tail):
self.cmd_raises("cp {} {}".format(source, conf_file_mgmt))
self.cmd_raises("cp {} {}".format(source, conf_file))
- if not self.unified_config or daemon == "frr":
+ if not (self.unified_config or daemon == "frr"):
self.cmd_raises("chown {0}:{0} {1}".format(self.routertype, conf_file))
self.cmd_raises("chmod 664 {}".format(conf_file))
return self.getLog("out", daemon)
def getLog(self, log, daemon):
- return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log))
+ filename = "{}/{}/{}.{}".format(self.logdir, self.name, daemon, log)
+ log = ""
+ with open(filename) as file:
+ log = file.read()
+ return log
def startRouterDaemons(self, daemons=None, tgen=None):
"Starts FRR daemons for this router."
logger.info(
"%s: %s %s launched in gdb window", self, self.routertype, daemon
)
- elif daemon in perfds and (self.name in perfds[daemon] or "all" in perfds[daemon]):
+ elif daemon in perfds and (
+ self.name in perfds[daemon] or "all" in perfds[daemon]
+ ):
cmdopt += rediropt
- cmd = " ".join(["perf record {} --".format(perf_options), binary, cmdopt])
+ cmd = " ".join(
+ ["perf record {} --".format(perf_options), binary, cmdopt]
+ )
p = self.popen(cmd)
self.perf_daemons[daemon] = p
if p.poll() and p.returncode:
tail_log_files.append("{}/{}/frr.log".format(self.logdir, self.name))
for tailf in tail_log_files:
- self.run_in_window("tail -f " + tailf, title=tailf, background=True)
+ self.run_in_window("tail -n10000 -F " + tailf, title=tailf, background=True)
return ""
# XXX not appropriate for ssh
cmd = ["sudo", "-Eu", os.environ["SUDO_USER"]] + cmd
- if not isinstance(nscmd, str):
- nscmd = shlex.join(nscmd)
- cmd.append(nscmd)
+ if title:
+ cmd.append("-t")
+ cmd.append(title)
+
+ if isinstance(nscmd, str):
+ nscmd = shlex.split(nscmd)
+ cmd.extend(nscmd)
elif "DISPLAY" in os.environ:
cmd = [get_exec_path_host("xterm")]
if "SUDO_USER" in os.environ:
elif master_fd in r:
o = os.read(master_fd, 10240)
if o:
- iow.write(o.decode("utf-8"))
+ iow.write(o.decode("utf-8", "ignore"))
iow.flush()
finally:
# restore tty settings back
def make_help_str(unet):
-
w = sorted([x if x else "" for x in unet.cli_in_window_cmds])
ww = unet.cli_in_window_cmds
u = sorted([x if x else "" for x in unet.cli_run_cmds])
async def doline(
unet, line, outf, background=False, notty=False
): # pylint: disable=R0911
-
line = line.strip()
m = re.fullmatch(r"^(\S+)(?:\s+(.*))?$", line)
if not m:
rb = rb[: -len(ENDMARKER)]
# Write the output
- sys.stdout.write(rb.decode("utf-8"))
+ sys.stdout.write(rb.decode("utf-8", "ignore"))
async def local_cli(unet, outf, prompt, histfile, background):
self.writer = writer
def write(self, x):
- self.writer.write(x.encode("utf-8"))
+ self.writer.write(x.encode("utf-8", "ignore"))
def flush(self):
self.writer.flush()
check_routers(restarting="rt7")
+#
+# Test rt1 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt1():
+ logger.info("Test: verify rt1 performing an unplanned graceful restart")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ kill_router_daemons(tgen, "rt1", ["ospf6d"], save_config=False)
+ start_router_daemons(tgen, "rt1", ["ospf6d"])
+
+ expect_grace_lsa(restarting="1.1.1.1", helper="rt2")
+ ensure_gr_is_in_zebra("rt1")
+ check_routers(restarting="rt1")
+
+
+#
+# Test rt2 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt2():
+ logger.info("Test: verify rt2 performing an unplanned graceful restart")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ kill_router_daemons(tgen, "rt2", ["ospf6d"], save_config=False)
+ start_router_daemons(tgen, "rt2", ["ospf6d"])
+
+ expect_grace_lsa(restarting="2.2.2.2", helper="rt1")
+ expect_grace_lsa(restarting="2.2.2.2", helper="rt3")
+ ensure_gr_is_in_zebra("rt2")
+ check_routers(restarting="rt2")
+
+
+#
+# Test rt3 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt3():
+ logger.info("Test: verify rt3 performing an unplanned graceful restart")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ kill_router_daemons(tgen, "rt3", ["ospf6d"], save_config=False)
+ start_router_daemons(tgen, "rt3", ["ospf6d"])
+
+ expect_grace_lsa(restarting="3.3.3.3", helper="rt2")
+ expect_grace_lsa(restarting="3.3.3.3", helper="rt4")
+ expect_grace_lsa(restarting="3.3.3.3", helper="rt6")
+ ensure_gr_is_in_zebra("rt3")
+ check_routers(restarting="rt3")
+
+
+#
+# Test rt4 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt4():
+ logger.info("Test: verify rt4 performing an unplanned graceful restart")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ kill_router_daemons(tgen, "rt4", ["ospf6d"], save_config=False)
+ start_router_daemons(tgen, "rt4", ["ospf6d"])
+
+ expect_grace_lsa(restarting="4.4.4.4", helper="rt3")
+ expect_grace_lsa(restarting="4.4.4.4", helper="rt5")
+ ensure_gr_is_in_zebra("rt4")
+ check_routers(restarting="rt4")
+
+
+#
+# Test rt5 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt5():
+ logger.info("Test: verify rt5 performing an unplanned graceful restart")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ kill_router_daemons(tgen, "rt5", ["ospf6d"], save_config=False)
+ start_router_daemons(tgen, "rt5", ["ospf6d"])
+
+ expect_grace_lsa(restarting="5.5.5.5", helper="rt4")
+ ensure_gr_is_in_zebra("rt5")
+ check_routers(restarting="rt5")
+
+
+#
+# Test rt6 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt6():
+ logger.info("Test: verify rt6 performing an unplanned graceful restart")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ kill_router_daemons(tgen, "rt6", ["ospf6d"], save_config=False)
+ start_router_daemons(tgen, "rt6", ["ospf6d"])
+
+ expect_grace_lsa(restarting="6.6.6.6", helper="rt3")
+ expect_grace_lsa(restarting="6.6.6.6", helper="rt7")
+ ensure_gr_is_in_zebra("rt6")
+ check_routers(restarting="rt6")
+
+
+#
+# Test rt7 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt7():
+ logger.info("Test: verify rt7 performing an unplanned graceful restart")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ kill_router_daemons(tgen, "rt7", ["ospf6d"], save_config=False)
+ start_router_daemons(tgen, "rt7", ["ospf6d"])
+
+ expect_grace_lsa(restarting="6.6.6.6", helper="rt6")
+ ensure_gr_is_in_zebra("rt7")
+ check_routers(restarting="rt7")
+
+
# Memory leak test template
def test_memory_leak():
"Run the memory leak test and report results."
"""
TOPOOLOGY =
Please view in a fixed-width font such as Courier.
- +---+ A0 +---+
- +R1 +------------+R2 |
+ +---+ A0 +---+
+ |R1 +------------+R2 |
+-+-+- +--++
| -- -- |
| -- A0 -- |
| -- -- |
| -- -- |
+-+-+- +-+-+
- +R0 +-------------+R3 |
- +---+ A0 +---+
+ |R0 +-------------+R3 |
+ +---+ A0 +---+
TESTCASES =
1. OSPF summarisation functionality.
ip = topo["routers"]["r0"]["links"]["r3"]["ipv4"]
- ip_net = str(ipaddress.ip_interface(u"{}".format(ip)).network)
+ ip_net = str(ipaddress.ip_interface("{}".format(ip)).network)
ospf_summ_r1 = {
"r0": {
"ospf": {"summary-address": [{"prefix": ip_net.split("/")[0], "mask": "8"}]}
step("Repeat steps 1 to 10 of summarisation in non Back bone area.")
reset_config_on_routers(tgen)
- step("Change the area id on the interface on R0")
+ step("Change the area id on the interface on R0 to R1 from 0.0.0.0 to 0.0.0.1")
input_dict = {
"r0": {
"links": {
result = create_interfaces_cfg(tgen, input_dict)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
- step("Change the area id on the interface ")
+ step("Change the area id on the interface on R1 to R0 from 0.0.0.0 to 0.0.0.1")
input_dict = {
"r1": {
"links": {
check_routers(restarting="rt7")
+#
+# Test rt1 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt1():
+ logger.info("Test: verify rt1 performing an unplanned graceful restart")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ kill_router_daemons(tgen, "rt1", ["ospfd"], save_config=False)
+ start_router_daemons(tgen, "rt1", ["ospfd"])
+
+ expect_grace_lsa(restarting="1.1.1.1", area="0.0.0.1", helper="rt2")
+ ensure_gr_is_in_zebra("rt1")
+ check_routers(restarting="rt1")
+
+
+#
+# Test rt2 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt2():
+ logger.info("Test: verify rt2 performing an unplanned graceful restart")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ kill_router_daemons(tgen, "rt2", ["ospfd"], save_config=False)
+ start_router_daemons(tgen, "rt2", ["ospfd"])
+
+ expect_grace_lsa(restarting="2.2.2.2", area="0.0.0.1", helper="rt1")
+ expect_grace_lsa(restarting="2.2.2.2", area="0.0.0.0", helper="rt3")
+ ensure_gr_is_in_zebra("rt2")
+ check_routers(restarting="rt2")
+
+
+#
+# Test rt3 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt3():
+ logger.info("Test: verify rt3 performing an unplanned graceful restart")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ kill_router_daemons(tgen, "rt3", ["ospfd"], save_config=False)
+ start_router_daemons(tgen, "rt3", ["ospfd"])
+
+ expect_grace_lsa(restarting="3.3.3.3", area="0.0.0.0", helper="rt2")
+ expect_grace_lsa(restarting="3.3.3.3", area="0.0.0.0", helper="rt4")
+ expect_grace_lsa(restarting="3.3.3.3", area="0.0.0.0", helper="rt6")
+ ensure_gr_is_in_zebra("rt3")
+ check_routers(restarting="rt3")
+
+
+#
+# Test rt4 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt4():
+ logger.info("Test: verify rt4 performing an unplanned graceful restart")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ kill_router_daemons(tgen, "rt4", ["ospfd"], save_config=False)
+ start_router_daemons(tgen, "rt4", ["ospfd"])
+
+ expect_grace_lsa(restarting="4.4.4.4", area="0.0.0.0", helper="rt3")
+ expect_grace_lsa(restarting="4.4.4.4", area="0.0.0.2", helper="rt5")
+ ensure_gr_is_in_zebra("rt4")
+ check_routers(restarting="rt4")
+
+
+#
+# Test rt5 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt5():
+ logger.info("Test: verify rt5 performing an unplanned graceful restart")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ kill_router_daemons(tgen, "rt5", ["ospfd"], save_config=False)
+ start_router_daemons(tgen, "rt5", ["ospfd"])
+
+ expect_grace_lsa(restarting="5.5.5.5", area="0.0.0.2", helper="rt4")
+ ensure_gr_is_in_zebra("rt5")
+ check_routers(restarting="rt5")
+
+
+#
+# Test rt6 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt6():
+ logger.info("Test: verify rt6 performing an unplanned graceful restart")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ kill_router_daemons(tgen, "rt6", ["ospfd"], save_config=False)
+ start_router_daemons(tgen, "rt6", ["ospfd"])
+
+ expect_grace_lsa(restarting="6.6.6.6", area="0.0.0.0", helper="rt3")
+ expect_grace_lsa(restarting="6.6.6.6", area="0.0.0.3", helper="rt7")
+ ensure_gr_is_in_zebra("rt6")
+ check_routers(restarting="rt6")
+
+
+#
+# Test rt7 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt7():
+ logger.info("Test: verify rt7 performing a graceful restart")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ kill_router_daemons(tgen, "rt7", ["ospfd"], save_config=False)
+ start_router_daemons(tgen, "rt7", ["ospfd"])
+
+ expect_grace_lsa(restarting="7.7.7.7", area="0.0.0.3", helper="rt6")
+ ensure_gr_is_in_zebra("rt7")
+ check_routers(restarting="rt7")
+
+
# Memory leak test template
def test_memory_leak():
"Run the memory leak test and report results."
import pytest
-from lib.common_config import retry, run_frr_cmd, step
+from lib.common_config import (
+ retry,
+ run_frr_cmd,
+ step,
+ kill_router_daemons,
+ start_router_daemons,
+ shutdown_bringup_interface,
+)
+
from lib.micronet import Timeout, comm_error
from lib.topogen import Topogen, TopoRouter
from lib.topotest import interface_set_status, json_cmp
_test_opaque_add_del(tgen, apibin)
+def _test_opaque_add_restart_add(tgen, apibin):
+ "Test adding an opaque LSA and then restarting ospfd"
+
+ r1 = tgen.gears["r1"]
+ r2 = tgen.gears["r2"]
+
+ p = None
+ pread = None
+ # Log to our stdin, stderr
+ pout = open(os.path.join(r1.net.logdir, "r1/add-del.log"), "a+")
+ try:
+ step("reachable: check for add notification")
+ pread = r2.popen(
+ ["/usr/bin/timeout", "120", apibin, "-v", "--logtag=READER", "wait,120"],
+ encoding=None, # don't buffer
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+ p = r1.popen(
+ [
+ apibin,
+ "-v",
+ "add,10,1.2.3.4,231,1",
+ "add,10,1.2.3.4,231,1,feedaceebeef",
+ "wait, 5",
+ "add,10,1.2.3.4,231,1,feedaceedeadbeef",
+ "wait, 5",
+ "add,10,1.2.3.4,231,1,feedaceebaddbeef",
+ "wait, 5",
+ ]
+ )
+ add_input_dict = {
+ "areas": {
+ "1.2.3.4": {
+ "areaLocalOpaqueLsa": [
+ {
+ "lsId": "231.0.0.1",
+ "advertisedRouter": "1.0.0.0",
+ "sequenceNumber": "80000004",
+ "checksum": "3128",
+ },
+ ],
+ "areaLocalOpaqueLsaCount": 1,
+ },
+ },
+ }
+ step("Check for add LSAs")
+ json_cmd = "show ip ospf da json"
+ assert verify_ospf_database(tgen, r1, add_input_dict, json_cmd) is None
+ assert verify_ospf_database(tgen, r2, add_input_dict, json_cmd) is None
+
+ step("Shutdown the interface on r1 to isolate it for r2")
+ shutdown_bringup_interface(tgen, "r1", "r1-eth0", False)
+
+ time.sleep(2)
+ step("Reset the client")
+ p.send_signal(signal.SIGINT)
+ time.sleep(2)
+ p.wait()
+ p = None
+
+ step("Kill ospfd on R1")
+ kill_router_daemons(tgen, "r1", ["ospfd"])
+ time.sleep(2)
+
+ step("Bring ospfd on R1 back up")
+ start_router_daemons(tgen, "r1", ["ospfd"])
+
+ p = r1.popen(
+ [
+ apibin,
+ "-v",
+ "add,10,1.2.3.4,231,1",
+ "add,10,1.2.3.4,231,1,feedaceecafebeef",
+ "wait, 5",
+ ]
+ )
+
+ step("Bring the interface on r1 back up for connection to r2")
+ shutdown_bringup_interface(tgen, "r1", "r1-eth0", True)
+
+ step("Verify area opaque LSA refresh")
+ json_cmd = "show ip ospf da opaque-area json"
+ add_detail_input_dict = {
+ "areaLocalOpaqueLsa": {
+ "areas": {
+ "1.2.3.4": [
+ {
+ "linkStateId": "231.0.0.1",
+ "advertisingRouter": "1.0.0.0",
+ "lsaSeqNumber": "80000005",
+ "checksum": "a87e",
+ "length": 28,
+ "opaqueDataLength": 8,
+ },
+ ],
+ },
+ },
+ }
+ assert verify_ospf_database(tgen, r1, add_detail_input_dict, json_cmd) is None
+ assert verify_ospf_database(tgen, r2, add_detail_input_dict, json_cmd) is None
+
+ step("Shutdown the interface on r1 to isolate it for r2")
+ shutdown_bringup_interface(tgen, "r1", "r1-eth0", False)
+
+ time.sleep(2)
+ step("Reset the client")
+ p.send_signal(signal.SIGINT)
+ time.sleep(2)
+ p.wait()
+ p = None
+
+ step("Kill ospfd on R1")
+ kill_router_daemons(tgen, "r1", ["ospfd"])
+ time.sleep(2)
+
+ step("Bring ospfd on R1 back up")
+ start_router_daemons(tgen, "r1", ["ospfd"])
+
+ step("Bring the interface on r1 back up for connection to r2")
+ shutdown_bringup_interface(tgen, "r1", "r1-eth0", True)
+
+ step("Verify area opaque LSA Purging")
+ json_cmd = "show ip ospf da opaque-area json"
+ add_detail_input_dict = {
+ "areaLocalOpaqueLsa": {
+ "areas": {
+ "1.2.3.4": [
+ {
+ "lsaAge": 3600,
+ "linkStateId": "231.0.0.1",
+ "advertisingRouter": "1.0.0.0",
+ "lsaSeqNumber": "80000005",
+ "checksum": "a87e",
+ "length": 28,
+ "opaqueDataLength": 8,
+ },
+ ],
+ },
+ },
+ }
+ assert verify_ospf_database(tgen, r1, add_detail_input_dict, json_cmd) is None
+ assert verify_ospf_database(tgen, r2, add_detail_input_dict, json_cmd) is None
+ step("Verify Area Opaque LSA removal after timeout (60 seconds)")
+ time.sleep(60)
+ json_cmd = "show ip ospf da opaque-area json"
+ timeout_detail_input_dict = {
+ "areaLocalOpaqueLsa": {
+ "areas": {
+ "1.2.3.4": [],
+ },
+ },
+ }
+ assert (
+ verify_ospf_database(tgen, r1, timeout_detail_input_dict, json_cmd) is None
+ )
+ assert (
+ verify_ospf_database(tgen, r2, timeout_detail_input_dict, json_cmd) is None
+ )
+
+ except Exception:
+ if p:
+ p.terminate()
+ if p.wait():
+ comm_error(p)
+ p = None
+ raise
+ finally:
+ if pread:
+ pread.terminate()
+ pread.wait()
+ if p:
+ p.terminate()
+ p.wait()
+
+
+@pytest.mark.parametrize("tgen", [2], indirect=True)
+def test_ospf_opaque_restart(tgen):
+ apibin = os.path.join(CLIENTDIR, "ospfclient.py")
+ rc, o, e = tgen.gears["r2"].net.cmd_status([apibin, "--help"])
+ logging.debug("%s --help: rc: %s stdout: '%s' stderr: '%s'", apibin, rc, o, e)
+ _test_opaque_add_restart_add(tgen, apibin)
+
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
ip = topo["routers"]["r0"]["links"]["r3"]["ipv6"]
- ip_net = str(ipaddress.ip_interface(u"{}".format(ip)).network)
+ ip_net = str(ipaddress.ip_interface("{}".format(ip)).network)
ospf_summ_r1 = {
"r0": {
"ospf6": {
# Skip pytests example directory
[pytest]
-asyncio_mode = auto
+# asyncio_mode = auto
# We always turn this on inside conftest.py, default shown
# addopts = --junitxml=<rundir>/topotests.xml
--- /dev/null
+!
+int lo
+ ip address 10.10.10.1/32
+!
+int r4-eth0
+ ip address 192.168.1.4/24
+!
+router rip
+ network 192.168.1.0/24
+ network 10.10.10.1/32
+ timers basic 5 15 10
+exit
+
--- /dev/null
+!
+int lo
+ ip address 10.10.10.1/32
+!
+int r5-eth0
+ ip address 192.168.1.5/24
+!
+router rip
+ network 192.168.1.0/24
+ network 10.10.10.1/32
+ timers basic 5 15 10
+exit
+
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.common_config import step
pytestmark = [pytest.mark.ripd]
def setup_module(mod):
- topodef = {"s1": ("r1", "r2", "r3")}
+ topodef = {"s1": ("r1", "r2", "r3", "r4", "r5")}
tgen = Topogen(topodef, mod.__name__)
tgen.start_topology()
_, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
assert result is None, "Can't see 10.10.10.1/32 as multipath in `show ip rip`"
- def _show_routes():
+ def _show_routes(nh_num):
output = json.loads(r1.vtysh_cmd("show ip route json"))
expected = {
"10.10.10.1/32": [
{
+ "internalNextHopNum": nh_num,
+ "internalNextHopActiveNum": nh_num,
"nexthops": [
{
"ip": "192.168.1.2",
"ip": "192.168.1.3",
"active": True,
},
- ]
+ ],
}
]
}
return topotest.json_cmp(output, expected)
- test_func = functools.partial(_show_routes)
+ test_func = functools.partial(_show_routes, 4)
_, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
- assert result is None, "Can't see 10.10.10.1/32 as multipath in `show ip route`"
+ assert result is None, "Can't see 10.10.10.1/32 as multipath (4) in `show ip route`"
+
+ step(
+ "Configure allow-ecmp 2, ECMP group routes SHOULD have next-hops with the lowest IPs"
+ )
+ r1.vtysh_cmd(
+ """
+ configure terminal
+ router rip
+ allow-ecmp 2
+ """
+ )
+
+ test_func = functools.partial(_show_rip_routes)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assert (
+ result is None
+ ), "Can't see 10.10.10.1/32 as ECMP with the lowest next-hop IPs"
+
+ test_func = functools.partial(_show_routes, 2)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assert result is None, "Can't see 10.10.10.1/32 as multipath (2) in `show ip route`"
if __name__ == "__main__":
"instance": [
{
"vrf": "default",
- "allow-ecmp": "true",
+ "allow-ecmp": 1,
"distance": {
"source": [
{
<ripd xmlns="http://frrouting.org/yang/ripd">
<instance>
<vrf>default</vrf>
- <allow-ecmp>true</allow-ecmp>
+ <allow-ecmp>1</allow-ecmp>
<static-route>10.0.1.0/24</static-route>
<distance>
<source>
prefix rt-types;
}
+ import frr-route-types {
+ prefix frr-route-types;
+ }
+
organization
"Free Range Routing";
contact
"Match IPv6 next hop address";
}
+ identity source-protocol {
+ base frr-route-map:rmap-match-type;
+ description
+ "Match protocol via which the route was learnt";
+ }
+
identity distance {
base frr-route-map:rmap-set-type;
description
"IPv6 address";
}
}
+
+ case source-protocol {
+ when "derived-from-or-self(../frr-route-map:condition, 'frr-bgp-route-map:source-protocol')";
+ leaf source-protocol {
+ type frr-route-types:frr-route-types;
+ }
+ }
}
augment "/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:rmap-set-action/frr-route-map:set-action" {
"VRF name.";
}
leaf allow-ecmp {
- type boolean;
- default "false";
+ type uint8;
+ default 0;
description
"Allow equal-cost multi-path.";
}
memory_order_relaxed);
}
+static struct zebra_dplane_ctx *
+dplane_provider_dequeue_out_ctx(struct zebra_dplane_provider *prov)
+{
+ struct zebra_dplane_ctx *ctx;
+
+ ctx = dplane_ctx_list_pop(&(prov->dp_ctx_out_list));
+ if (!ctx)
+ return NULL;
+
+ atomic_fetch_sub_explicit(&(prov->dp_out_queued), 1,
+ memory_order_relaxed);
+
+ return ctx;
+}
+
/*
* Accessor for provider object
*/
dplane_provider_lock(prov);
while (counter < limit) {
- ctx = dplane_ctx_list_pop(&(prov->dp_ctx_out_list));
+ ctx = dplane_provider_dequeue_out_ctx(prov);
if (ctx) {
dplane_ctx_list_add_tail(&work_list, ctx);
counter++;
static struct zebra_nhlfe *
nhlfe_add(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
enum nexthop_types_t gtype, const union g_addr *gate,
- ifindex_t ifindex, uint8_t num_labels, const mpls_label_t *labels,
- bool is_backup);
+ ifindex_t ifindex, vrf_id_t vrf_id, uint8_t num_labels,
+ const mpls_label_t *labels, bool is_backup);
static int nhlfe_del(struct zebra_nhlfe *nhlfe);
static void nhlfe_free(struct zebra_nhlfe *nhlfe);
static void nhlfe_out_label_update(struct zebra_nhlfe *nhlfe,
changed++;
} else {
/* Add LSP entry to this nexthop */
- nhlfe = nhlfe_add(lsp, lsp_type, nexthop->type,
- &nexthop->gate, nexthop->ifindex,
- nexthop->nh_label->num_labels,
- nexthop->nh_label->label,
- false /*backup*/);
+ nhlfe = nhlfe_add(
+ lsp, lsp_type, nexthop->type, &nexthop->gate,
+ nexthop->ifindex, nexthop->vrf_id,
+ nexthop->nh_label->num_labels,
+ nexthop->nh_label->label, false /*backup*/);
if (!nhlfe)
return -1;
/*
* Locate NHLFE that matches with passed info.
+ * TODO: handle vrf_id if vrf backend is netns based
*/
static struct zebra_nhlfe *nhlfe_find(struct nhlfe_list_head *list,
enum lsp_types_t lsp_type,
static struct zebra_nhlfe *
nhlfe_alloc(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
enum nexthop_types_t gtype, const union g_addr *gate,
- ifindex_t ifindex, uint8_t num_labels, const mpls_label_t *labels)
+ ifindex_t ifindex, vrf_id_t vrf_id, uint8_t num_labels,
+ const mpls_label_t *labels)
{
struct zebra_nhlfe *nhlfe;
struct nexthop *nexthop;
nexthop_add_labels(nexthop, lsp_type, num_labels, labels);
- nexthop->vrf_id = VRF_DEFAULT;
+ nexthop->vrf_id = vrf_id;
nexthop->type = gtype;
switch (nexthop->type) {
case NEXTHOP_TYPE_IPV4:
* Add primary or backup NHLFE. Base entry must have been created and
* duplicate check done.
*/
-static struct zebra_nhlfe *nhlfe_add(struct zebra_lsp *lsp,
- enum lsp_types_t lsp_type,
- enum nexthop_types_t gtype,
- const union g_addr *gate,
- ifindex_t ifindex, uint8_t num_labels,
- const mpls_label_t *labels, bool is_backup)
+static struct zebra_nhlfe *
+nhlfe_add(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
+ enum nexthop_types_t gtype, const union g_addr *gate,
+ ifindex_t ifindex, vrf_id_t vrf_id, uint8_t num_labels,
+ const mpls_label_t *labels, bool is_backup)
{
struct zebra_nhlfe *nhlfe;
if (!lsp)
return NULL;
- /* Must have labels */
- if (num_labels == 0 || labels == NULL) {
- if (IS_ZEBRA_DEBUG_MPLS)
- zlog_debug("%s: invalid nexthop: no labels", __func__);
-
- return NULL;
- }
-
/* Allocate new object */
- nhlfe = nhlfe_alloc(lsp, lsp_type, gtype, gate, ifindex, num_labels,
- labels);
+ nhlfe = nhlfe_alloc(lsp, lsp_type, gtype, gate, ifindex, vrf_id,
+ num_labels, labels);
if (!nhlfe)
return NULL;
json_nhlfe = json_object_new_object();
json_object_string_add(json_nhlfe, "type", nhlfe_type2str(nhlfe->type));
- json_object_int_add(json_nhlfe, "outLabel",
- nexthop->nh_label->label[0]);
-
- json_label_stack = json_object_new_array();
- json_object_object_add(json_nhlfe, "outLabelStack", json_label_stack);
- for (i = 0; i < nexthop->nh_label->num_labels; i++)
- json_object_array_add(
- json_label_stack,
- json_object_new_int(nexthop->nh_label->label[i]));
-
+ if (nexthop->nh_label) {
+ json_object_int_add(json_nhlfe, "outLabel",
+ nexthop->nh_label->label[0]);
+ json_label_stack = json_object_new_array();
+ json_object_object_add(json_nhlfe, "outLabelStack",
+ json_label_stack);
+ for (i = 0; i < nexthop->nh_label->num_labels; i++)
+ json_object_array_add(
+ json_label_stack,
+ json_object_new_int(
+ nexthop->nh_label->label[i]));
+ }
json_object_int_add(json_nhlfe, "distance", nhlfe->distance);
if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED))
case NEXTHOP_TYPE_IPV4_IFINDEX:
json_object_string_addf(json_nhlfe, "nexthop", "%pI4",
&nexthop->gate.ipv4);
+ if (nexthop->ifindex)
+ json_object_string_add(json_nhlfe, "interface",
+ ifindex2ifname(nexthop->ifindex,
+ nexthop->vrf_id));
break;
case NEXTHOP_TYPE_IPV6:
case NEXTHOP_TYPE_IPV6_IFINDEX:
const mpls_label_t *out_labels)
{
/* Just a public pass-through to the internal implementation */
- return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, num_labels,
- out_labels, false /*backup*/);
+ return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, VRF_DEFAULT,
+ num_labels, out_labels, false /*backup*/);
}
/*
uint8_t num_labels, const mpls_label_t *out_labels)
{
/* Just a public pass-through to the internal implementation */
- return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, num_labels,
- out_labels, true);
+ return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, VRF_DEFAULT,
+ num_labels, out_labels, true);
}
/*
{
struct zebra_nhlfe *nhlfe;
- if (nh->nh_label == NULL || nh->nh_label->num_labels == 0)
- return NULL;
-
- nhlfe = nhlfe_add(lsp, lsp_type, nh->type, &nh->gate, nh->ifindex,
- nh->nh_label->num_labels, nh->nh_label->label,
- false /*backup*/);
+ nhlfe = nhlfe_add(
+ lsp, lsp_type, nh->type, &nh->gate, nh->ifindex, nh->vrf_id,
+ nh->nh_label ? nh->nh_label->num_labels : 0,
+ nh->nh_label ? nh->nh_label->label : NULL, false /*backup*/);
return nhlfe;
}
{
struct zebra_nhlfe *nhlfe;
- if (nh->nh_label == NULL || nh->nh_label->num_labels == 0)
- return NULL;
-
- nhlfe = nhlfe_add(lsp, lsp_type, nh->type, &nh->gate,
- nh->ifindex, nh->nh_label->num_labels,
- nh->nh_label->label, true);
+ nhlfe = nhlfe_add(lsp, lsp_type, nh->type, &nh->gate, nh->ifindex,
+ nh->vrf_id,
+ nh->nh_label ? nh->nh_label->num_labels : 0,
+ nh->nh_label ? nh->nh_label->label : NULL, true);
return nhlfe;
}
lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t type,
uint8_t num_out_labels, const mpls_label_t *out_labels,
enum nexthop_types_t gtype, const union g_addr *gate,
- ifindex_t ifindex, bool is_backup)
+ ifindex_t ifindex, vrf_id_t vrf_id, bool is_backup)
{
struct zebra_nhlfe *nhlfe;
char buf[MPLS_LABEL_STRLEN];
struct nexthop *nh = nhlfe->nexthop;
assert(nh);
- assert(nh->nh_label);
/* Clear deleted flag (in case it was set) */
UNSET_FLAG(nhlfe->flags, NHLFE_FLAG_DELETED);
- if (nh->nh_label->num_labels == num_out_labels
- && !memcmp(nh->nh_label->label, out_labels,
- sizeof(mpls_label_t) * num_out_labels))
+
+ if (!nh->nh_label || num_out_labels == 0)
+ /* No change */
+ return nhlfe;
+
+ if (nh->nh_label &&
+ nh->nh_label->num_labels == num_out_labels &&
+ !memcmp(nh->nh_label->label, out_labels,
+ sizeof(mpls_label_t) * num_out_labels))
/* No change */
return nhlfe;
}
/* Update out label(s), trigger processing. */
- if (nh->nh_label->num_labels == num_out_labels)
+ if (nh->nh_label && nh->nh_label->num_labels == num_out_labels)
memcpy(nh->nh_label->label, out_labels,
sizeof(mpls_label_t) * num_out_labels);
else {
}
} else {
/* Add LSP entry to this nexthop */
- nhlfe = nhlfe_add(lsp, type, gtype, gate, ifindex,
+ nhlfe = nhlfe_add(lsp, type, gtype, gate, ifindex, vrf_id,
num_out_labels, out_labels, is_backup);
if (!nhlfe)
return NULL;
char buf2[MPLS_LABEL_STRLEN];
nhlfe2str(nhlfe, buf, sizeof(buf));
- mpls_label2str(num_out_labels, out_labels, buf2,
- sizeof(buf2), 0, 0);
+ if (num_out_labels)
+ mpls_label2str(num_out_labels, out_labels, buf2,
+ sizeof(buf2), 0, 0);
+ else
+ snprintf(buf2, sizeof(buf2), "-");
zlog_debug("Add LSP in-label %u type %d %snexthop %s out-label(s) %s",
lsp->ile.in_label, type, backup_str, buf,
/*
* Install an LSP and forwarding entry; used primarily
* from vrf zapi message processing.
+ * TODO: handle vrf_id parameter when mpls API extends to interface or SRTE
+ * changes
*/
int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type,
mpls_label_t in_label, uint8_t num_out_labels,
lsp = hash_get(lsp_table, &tmp_ile, lsp_alloc);
nhlfe = lsp_add_nhlfe(lsp, type, num_out_labels, out_labels, gtype,
- gate, ifindex, false /*backup*/);
+ gate, ifindex, VRF_DEFAULT, false /*backup*/);
if (nhlfe == NULL)
return -1;
{
struct zebra_nhlfe *nhlfe;
- nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num, znh->labels,
- znh->type, &znh->gate, znh->ifindex,
+ nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num, znh->labels, znh->type,
+ &znh->gate, znh->ifindex, znh->vrf_id,
false /*backup*/);
if (nhlfe == NULL)
return -1;
{
struct zebra_nhlfe *nhlfe;
- nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num,
- znh->labels, znh->type, &znh->gate,
- znh->ifindex, true /*backup*/);
+ nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num, znh->labels, znh->type,
+ &znh->gate, znh->ifindex, znh->vrf_id,
+ true /*backup*/);
if (nhlfe == NULL) {
if (IS_ZEBRA_DEBUG_MPLS)
zlog_debug("%s: unable to add backup nhlfe, label: %u",
} else {
/* Add static LSP entry to this nexthop */
- nhlfe = nhlfe_add(lsp, ZEBRA_LSP_STATIC, gtype, gate,
- ifindex, 1, &out_label, false /*backup*/);
+ nhlfe = nhlfe_add(lsp, ZEBRA_LSP_STATIC, gtype, gate, ifindex,
+ VRF_DEFAULT, 1, &out_label, false /*backup*/);
if (!nhlfe)
return -1;
break;
}
- if (nexthop->type != NEXTHOP_TYPE_IFINDEX)
+ if (nexthop->type != NEXTHOP_TYPE_IFINDEX &&
+ nexthop->nh_label)
out_label_str = mpls_label2str(
nexthop->nh_label->num_labels,
&nexthop->nh_label->label[0],
#include "ptm_lib.h"
#include "rib.h"
#include "stream.h"
+#include "lib/version.h"
#include "vrf.h"
#include "vty.h"
#include "lib_errors.h"