]> git.proxmox.com Git - mirror_frr.git/commitdiff
Merge pull request #13506 from anlancs/fix/bfdd-vrf-check
authorIgor Ryzhov <iryzhov@nfware.com>
Mon, 22 May 2023 11:24:34 +0000 (14:24 +0300)
committerGitHub <noreply@github.com>
Mon, 22 May 2023 11:24:34 +0000 (14:24 +0300)
bfdd: Fix malformed session with vrf

154 files changed:
.gitignore
bgpd/bgp_attr.c
bgpd/bgp_labelpool.c
bgpd/bgp_labelpool.h
bgpd/bgp_mplsvpn.c
bgpd/bgp_mplsvpn.h
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_route.c
bgpd/bgp_route.h
bgpd/bgp_routemap.c
bgpd/bgp_routemap_nb.c
bgpd/bgp_routemap_nb.h
bgpd/bgp_routemap_nb_config.c
bgpd/bgp_vty.c
bgpd/bgp_zebra.c
bgpd/bgp_zebra.h
bgpd/bgpd.c
bgpd/bgpd.h
bgpd/rfapi/rfapi_import.c
configure.ac
doc/developer/building-frr-for-ubuntu2204.rst [new file with mode: 0644]
doc/developer/include-compile.rst
doc/developer/topotests.rst
doc/user/bgp.rst
doc/user/installation.rst
doc/user/ospf6d.rst
doc/user/ospfd.rst
doc/user/ripd.rst
doc/user/routemap.rst
doc/user/static.rst
lib/libfrr.c
lib/libospf.h
lib/plist.c
lib/routemap.h
lib/routemap_cli.c
mgmtd/mgmt_ds.c
mgmtd/mgmt_history.c
ospf6d/ospf6_gr.c
ospf6d/ospf6_gr.h
ospf6d/ospf6_interface.c
ospf6d/ospf6_interface.h
ospf6d/ospf6_message.c
ospf6d/ospf6_spf.c
ospf6d/ospf6_top.c
ospf6d/ospf6_top.h
ospf6d/ospf6_zebra.c
ospf6d/subdir.am
ospfd/ospf_apiserver.c
ospfd/ospf_ase.c
ospfd/ospf_gr.c
ospfd/ospf_gr.h
ospfd/ospf_interface.c
ospfd/ospf_interface.h
ospfd/ospf_ism.c
ospfd/ospf_lsa.c
ospfd/ospf_opaque.c
ospfd/ospf_packet.c
ospfd/ospf_packet.h
ospfd/ospf_ti_lfa.c
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
ospfd/ospfd.c
ospfd/ospfd.h
pimd/pim_iface.c
ripd/rip_cli.c
ripd/rip_nb_config.c
ripd/rip_zebra.c
ripd/ripd.c
ripd/ripd.h
tests/topotests/all_protocol_startup/r1/show_ip_ospf_interface.ref
tests/topotests/all_protocol_startup/test_all_protocol_startup.py
tests/topotests/analyze.py
tests/topotests/bgp_auth/test_bgp_auth1.py
tests/topotests/bgp_auth/test_bgp_auth2.py
tests/topotests/bgp_auth/test_bgp_auth3.py
tests/topotests/bgp_auth/test_bgp_auth4.py
tests/topotests/bgp_disable_addpath_rx/r1/bgpd.conf
tests/topotests/bgp_disable_addpath_rx/r2/bgpd.conf
tests/topotests/bgp_disable_addpath_rx/r3/bgpd.conf
tests/topotests/bgp_disable_addpath_rx/r4/bgpd.conf
tests/topotests/bgp_features/test_bgp_features.py
tests/topotests/bgp_prefix_list_topo1/prefix_modify.json [new file with mode: 0644]
tests/topotests/bgp_prefix_list_topo1/test_prefix_modify.py [new file with mode: 0644]
tests/topotests/bgp_remove_private_as/test_bgp_remove_private_as.py
tests/topotests/bgp_route_map_match_source_protocol/__init__.py [new file with mode: 0644]
tests/topotests/bgp_route_map_match_source_protocol/r1/frr.conf [new file with mode: 0644]
tests/topotests/bgp_route_map_match_source_protocol/r2/frr.conf [new file with mode: 0644]
tests/topotests/bgp_route_map_match_source_protocol/r3/frr.conf [new file with mode: 0644]
tests/topotests/bgp_route_map_match_source_protocol/test_bgp_route_map_match_source_protocol.py [new file with mode: 0644]
tests/topotests/bgp_vpnv4_per_nexthop_label/__init__.py [new file with mode: 0644]
tests/topotests/bgp_vpnv4_per_nexthop_label/r1/bgp_ipv4_routes_vrf1.json [new file with mode: 0644]
tests/topotests/bgp_vpnv4_per_nexthop_label/r1/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_per_nexthop_label/r1/ipv4_routes.json [new file with mode: 0644]
tests/topotests/bgp_vpnv4_per_nexthop_label/r1/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_per_nexthop_label/r11/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_per_nexthop_label/r11/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_per_nexthop_label/r12/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_per_nexthop_label/r12/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_per_nexthop_label/r13/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_per_nexthop_label/r13/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_per_nexthop_label/r2/bgp_ipv4_routes.json [new file with mode: 0644]
tests/topotests/bgp_vpnv4_per_nexthop_label/r2/bgp_vpnv4_routes.json [new file with mode: 0644]
tests/topotests/bgp_vpnv4_per_nexthop_label/r2/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_per_nexthop_label/r2/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_per_nexthop_label/rr/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_per_nexthop_label/rr/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py [new file with mode: 0644]
tests/topotests/bgp_vpnv6_per_nexthop_label/__init__.py [new file with mode: 0644]
tests/topotests/bgp_vpnv6_per_nexthop_label/r1/bgp_ipv6_routes_vrf1.json [new file with mode: 0644]
tests/topotests/bgp_vpnv6_per_nexthop_label/r1/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv6_per_nexthop_label/r1/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv6_per_nexthop_label/r11/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv6_per_nexthop_label/r11/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv6_per_nexthop_label/r12/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv6_per_nexthop_label/r12/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv6_per_nexthop_label/r13/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv6_per_nexthop_label/r13/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv6_per_nexthop_label/r2/bgp_vpnv6_routes.json [new file with mode: 0644]
tests/topotests/bgp_vpnv6_per_nexthop_label/r2/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv6_per_nexthop_label/r2/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv6_per_nexthop_label/rr/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv6_per_nexthop_label/rr/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv6_per_nexthop_label/test_bgp_vpnv6_per_nexthop_label.py [new file with mode: 0644]
tests/topotests/conftest.py
tests/topotests/evpn_pim_1/spine/bgp.summ.json
tests/topotests/lib/bgp.py
tests/topotests/lib/bgprib.py
tests/topotests/lib/common_config.py
tests/topotests/lib/lutil.py
tests/topotests/lib/micronet_compat.py
tests/topotests/lib/ospf.py
tests/topotests/lib/pim.py
tests/topotests/lib/test/test_json.py
tests/topotests/lib/topojson.py
tests/topotests/lib/topotest.py
tests/topotests/munet/base.py
tests/topotests/munet/cli.py
tests/topotests/ospf6_gr_topo1/test_ospf6_gr_topo1.py
tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py
tests/topotests/ospf_gr_topo1/test_ospf_gr_topo1.py
tests/topotests/ospfapi/test_ospf_clientapi.py
tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py
tests/topotests/pytest.ini
tests/topotests/rip_allow_ecmp/r4/frr.conf [new file with mode: 0644]
tests/topotests/rip_allow_ecmp/r5/frr.conf [new file with mode: 0644]
tests/topotests/rip_allow_ecmp/test_rip_allow_ecmp.py
yang/example/ripd.json
yang/example/ripd.xml
yang/frr-bgp-route-map.yang
yang/frr-ripd.yang
zebra/zebra_dplane.c
zebra/zebra_mpls.c
zebra/zebra_ptm.c

index 3dd6a44409e3d948a729c3abafc5b07d23a56d58..a66e3ccd3c6a6de44f4f43a17ed3bd8aa864b23a 100644 (file)
@@ -106,6 +106,7 @@ GSYMS
 GRTAGS
 GPATH
 compile_commands.json
+.ccls
 .ccls-cache
 .dirstamp
 refix
index 195298c81518ba455b712096e12ef865fe87688a..d5223a1e6e4a63b86b9648dc4577b61f5ae38498 100644 (file)
@@ -1082,9 +1082,6 @@ struct attr *bgp_attr_aggregate_intern(
                attr.aspath = aspath_empty(bgp->asnotation);
        attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_AS_PATH);
 
-       /* Next hop attribute.  */
-       attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP);
-
        if (community) {
                uint32_t gshut = COMMUNITY_GSHUT;
 
@@ -1121,6 +1118,21 @@ struct attr *bgp_attr_aggregate_intern(
                attr.aggregator_as = bgp->as;
        attr.aggregator_addr = bgp->router_id;
 
+       /* Aggregate are done for IPv4/IPv6 so checking ipv4 family,
+        * This should only be set for IPv4 AFI type
+        * based on RFC-4760:
+        * "An UPDATE message that carries no NLRI,
+        * other than the one encoded in
+        * the MP_REACH_NLRI attribute,
+        * SHOULD NOT carry the NEXT_HOP
+        * attribute"
+        */
+       if (p->family == AF_INET) {
+               /* Next hop attribute.  */
+               attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP);
+               attr.mp_nexthop_len = IPV4_MAX_BYTELEN;
+       }
+
        /* Apply route-map */
        if (aggregate->rmap.name) {
                struct attr attr_tmp = attr;
index 9943f57fb37107738f6789d66f65273c12599f84..faddfc995f3ef7e894aed2bcb4aae64922faa09f 100644 (file)
@@ -23,6 +23,9 @@
 #include "bgpd/bgp_debug.h"
 #include "bgpd/bgp_errors.h"
 #include "bgpd/bgp_route.h"
+#include "bgpd/bgp_zebra.h"
+#include "bgpd/bgp_vty.h"
+#include "bgpd/bgp_rd.h"
 
 #define BGP_LABELPOOL_ENABLE_TESTS 0
 
@@ -830,6 +833,16 @@ DEFUN(show_bgp_labelpool_ledger, show_bgp_labelpool_ledger_cmd,
                                        lcb->label);
 
                        break;
+               case LP_TYPE_NEXTHOP:
+                       if (uj) {
+                               json_object_string_add(json_elem, "prefix",
+                                                      "nexthop");
+                               json_object_int_add(json_elem, "label",
+                                                   lcb->label);
+                       } else
+                               vty_out(vty, "%-18s         %u\n", "nexthop",
+                                       lcb->label);
+                       break;
                }
        }
        if (uj)
@@ -919,6 +932,15 @@ DEFUN(show_bgp_labelpool_inuse, show_bgp_labelpool_inuse_cmd,
                                vty_out(vty, "%-18s         %u\n", "VRF",
                                        label);
                        break;
+               case LP_TYPE_NEXTHOP:
+                       if (uj) {
+                               json_object_string_add(json_elem, "prefix",
+                                                      "nexthop");
+                               json_object_int_add(json_elem, "label", label);
+                       } else
+                               vty_out(vty, "%-18s         %u\n", "nexthop",
+                                       label);
+                       break;
                }
        }
        if (uj)
@@ -991,6 +1013,13 @@ DEFUN(show_bgp_labelpool_requests, show_bgp_labelpool_requests_cmd,
                        else
                                vty_out(vty, "VRF\n");
                        break;
+               case LP_TYPE_NEXTHOP:
+                       if (uj)
+                               json_object_string_add(json_elem, "prefix",
+                                                      "nexthop");
+                       else
+                               vty_out(vty, "Nexthop\n");
+                       break;
                }
        }
        if (uj)
@@ -1053,6 +1082,99 @@ DEFUN(show_bgp_labelpool_chunks, show_bgp_labelpool_chunks_cmd,
        return CMD_SUCCESS;
 }
 
+static void show_bgp_nexthop_label_afi(struct vty *vty, afi_t afi,
+                                      struct bgp *bgp, bool detail)
+{
+       struct bgp_label_per_nexthop_cache_head *tree;
+       struct bgp_label_per_nexthop_cache *iter;
+       safi_t safi;
+       void *src;
+       char buf[PREFIX2STR_BUFFER];
+       char labelstr[MPLS_LABEL_STRLEN];
+       struct bgp_dest *dest;
+       struct bgp_path_info *path;
+       struct bgp *bgp_path;
+       struct bgp_table *table;
+       time_t tbuf;
+
+       vty_out(vty, "Current BGP label nexthop cache for %s, VRF %s\n",
+               afi2str(afi), bgp->name_pretty);
+
+       tree = &bgp->mpls_labels_per_nexthop[afi];
+       frr_each (bgp_label_per_nexthop_cache, tree, iter) {
+               if (afi2family(afi) == AF_INET)
+                       src = (void *)&iter->nexthop.u.prefix4;
+               else
+                       src = (void *)&iter->nexthop.u.prefix6;
+
+               vty_out(vty, " %s, label %s #paths %u\n",
+                       inet_ntop(afi2family(afi), src, buf, sizeof(buf)),
+                       mpls_label2str(1, &iter->label, labelstr,
+                                      sizeof(labelstr), 0, true),
+                       iter->path_count);
+               if (iter->nh)
+                       vty_out(vty, "  if %s\n",
+                               ifindex2ifname(iter->nh->ifindex,
+                                              iter->nh->vrf_id));
+               tbuf = time(NULL) - (monotime(NULL) - iter->last_update);
+               vty_out(vty, "  Last update: %s", ctime(&tbuf));
+               if (!detail)
+                       continue;
+               vty_out(vty, "  Paths:\n");
+               LIST_FOREACH (path, &(iter->paths), label_nh_thread) {
+                       dest = path->net;
+                       table = bgp_dest_table(dest);
+                       assert(dest && table);
+                       afi = family2afi(bgp_dest_get_prefix(dest)->family);
+                       safi = table->safi;
+                       bgp_path = table->bgp;
+
+                       if (dest->pdest) {
+                               vty_out(vty, "    %d/%d %pBD RD ", afi, safi,
+                                       dest);
+
+                               vty_out(vty, BGP_RD_AS_FORMAT(bgp->asnotation),
+                                       (struct prefix_rd *)bgp_dest_get_prefix(
+                                               dest->pdest));
+                               vty_out(vty, " %s flags 0x%x\n",
+                                       bgp_path->name_pretty, path->flags);
+                       } else
+                               vty_out(vty, "    %d/%d %pBD %s flags 0x%x\n",
+                                       afi, safi, dest, bgp_path->name_pretty,
+                                       path->flags);
+               }
+       }
+}
+
+DEFPY(show_bgp_nexthop_label, show_bgp_nexthop_label_cmd,
+      "show bgp [<view|vrf> VIEWVRFNAME] label-nexthop [detail]",
+      SHOW_STR BGP_STR BGP_INSTANCE_HELP_STR
+      "BGP label per-nexthop table\n"
+      "Show detailed information\n")
+{
+       int idx = 0;
+       char *vrf = NULL;
+       struct bgp *bgp;
+       bool detail = false;
+       int afi;
+
+       if (argv_find(argv, argc, "vrf", &idx)) {
+               vrf = argv[++idx]->arg;
+               bgp = bgp_lookup_by_name(vrf);
+       } else
+               bgp = bgp_get_default();
+
+       if (!bgp)
+               return CMD_SUCCESS;
+
+       if (argv_find(argv, argc, "detail", &idx))
+               detail = true;
+
+       for (afi = AFI_IP; afi <= AFI_IP6; afi++)
+               show_bgp_nexthop_label_afi(vty, afi, bgp, detail);
+       return CMD_SUCCESS;
+}
+
 #if BGP_LABELPOOL_ENABLE_TESTS
 /*------------------------------------------------------------------------
  *                     Testing code start
@@ -1532,3 +1654,66 @@ void bgp_lp_vty_init(void)
        install_element(ENABLE_NODE, &clear_labelpool_perf_test_cmd);
 #endif /* BGP_LABELPOOL_ENABLE_TESTS */
 }
+
+DEFINE_MTYPE_STATIC(BGPD, LABEL_PER_NEXTHOP_CACHE,
+                   "BGP Label Per Nexthop entry");
+
+/* The nexthops values are compared to
+ * find in the tree the appropriate cache entry
+ */
+int bgp_label_per_nexthop_cache_cmp(const struct bgp_label_per_nexthop_cache *a,
+                                   const struct bgp_label_per_nexthop_cache *b)
+{
+       return prefix_cmp(&a->nexthop, &b->nexthop);
+}
+
+struct bgp_label_per_nexthop_cache *
+bgp_label_per_nexthop_new(struct bgp_label_per_nexthop_cache_head *tree,
+                         struct prefix *nexthop)
+{
+       struct bgp_label_per_nexthop_cache *blnc;
+
+       blnc = XCALLOC(MTYPE_LABEL_PER_NEXTHOP_CACHE,
+                      sizeof(struct bgp_label_per_nexthop_cache));
+       blnc->tree = tree;
+       blnc->label = MPLS_INVALID_LABEL;
+       prefix_copy(&blnc->nexthop, nexthop);
+       LIST_INIT(&(blnc->paths));
+       bgp_label_per_nexthop_cache_add(tree, blnc);
+
+       return blnc;
+}
+
+struct bgp_label_per_nexthop_cache *
+bgp_label_per_nexthop_find(struct bgp_label_per_nexthop_cache_head *tree,
+                          struct prefix *nexthop)
+{
+       struct bgp_label_per_nexthop_cache blnc = {};
+
+       if (!tree)
+               return NULL;
+
+       memcpy(&blnc.nexthop, nexthop, sizeof(struct prefix));
+       return bgp_label_per_nexthop_cache_find(tree, &blnc);
+}
+
+void bgp_label_per_nexthop_free(struct bgp_label_per_nexthop_cache *blnc)
+{
+       if (blnc->label != MPLS_INVALID_LABEL) {
+               bgp_zebra_send_nexthop_label(ZEBRA_MPLS_LABELS_DELETE,
+                                            blnc->label, blnc->nh->ifindex,
+                                            blnc->nh->vrf_id, ZEBRA_LSP_BGP,
+                                            &blnc->nexthop);
+               bgp_lp_release(LP_TYPE_NEXTHOP, blnc, blnc->label);
+       }
+       bgp_label_per_nexthop_cache_del(blnc->tree, blnc);
+       if (blnc->nh)
+               nexthop_free(blnc->nh);
+       blnc->nh = NULL;
+       XFREE(MTYPE_LABEL_PER_NEXTHOP_CACHE, blnc);
+}
+
+void bgp_label_per_nexthop_init(void)
+{
+       install_element(VIEW_NODE, &show_bgp_nexthop_label_cmd);
+}
index 9526cba0ce30a738a15938b52fb09064ec68e0c4..b33527186e0a10b876494ec4a9db104a48352c36 100644 (file)
@@ -17,6 +17,7 @@
  */
 #define LP_TYPE_VRF    0x00000001
 #define LP_TYPE_BGP_LU 0x00000002
+#define LP_TYPE_NEXTHOP 0x00000003
 
 PREDECL_LIST(lp_fifo);
 
@@ -41,4 +42,55 @@ extern void bgp_lp_event_zebra_down(void);
 extern void bgp_lp_event_zebra_up(void);
 extern void bgp_lp_vty_init(void);
 
+struct bgp_label_per_nexthop_cache;
+PREDECL_RBTREE_UNIQ(bgp_label_per_nexthop_cache);
+
+extern int
+bgp_label_per_nexthop_cache_cmp(const struct bgp_label_per_nexthop_cache *a,
+                               const struct bgp_label_per_nexthop_cache *b);
+
+struct bgp_label_per_nexthop_cache {
+
+       /* RB-tree entry. */
+       struct bgp_label_per_nexthop_cache_item entry;
+
+       /* the nexthop is the key of the list */
+       struct prefix nexthop;
+
+       /* calculated label */
+       mpls_label_t label;
+
+       /* number of path_vrfs */
+       unsigned int path_count;
+
+       /* back pointer to bgp instance */
+       struct bgp *to_bgp;
+
+       /* copy a nexthop resolution from bgp nexthop tracking
+        * used to extract the interface nexthop
+        */
+       struct nexthop *nh;
+
+       /* list of path_vrfs using it */
+       LIST_HEAD(path_lists, bgp_path_info) paths;
+
+       time_t last_update;
+
+       /* Back pointer to the cache tree this entry belongs to. */
+       struct bgp_label_per_nexthop_cache_head *tree;
+};
+
+DECLARE_RBTREE_UNIQ(bgp_label_per_nexthop_cache,
+                   struct bgp_label_per_nexthop_cache, entry,
+                   bgp_label_per_nexthop_cache_cmp);
+
+void bgp_label_per_nexthop_free(struct bgp_label_per_nexthop_cache *blnc);
+
+struct bgp_label_per_nexthop_cache *
+bgp_label_per_nexthop_new(struct bgp_label_per_nexthop_cache_head *tree,
+                         struct prefix *nexthop);
+struct bgp_label_per_nexthop_cache *
+bgp_label_per_nexthop_find(struct bgp_label_per_nexthop_cache_head *tree,
+                          struct prefix *nexthop);
+void bgp_label_per_nexthop_init(void);
 #endif /* _FRR_BGP_LABELPOOL_H */
index 63168f1e7a826ccbbbf38a5b340bb5418d3c3474..ecc84533b05c7dbb2dac26fd888df9f471bacbd4 100644 (file)
@@ -1116,12 +1116,14 @@ leak_update(struct bgp *to_bgp, struct bgp_dest *bn,
 
        /*
         * Routes that are redistributed into BGP from zebra do not get
-        * nexthop tracking. However, if those routes are subsequently
-        * imported to other RIBs within BGP, the leaked routes do not
-        * carry the original BGP_ROUTE_REDISTRIBUTE sub_type. Therefore,
-        * in order to determine if the route we are currently leaking
-        * should have nexthop tracking, we must find the ultimate
-        * parent so we can check its sub_type.
+        * nexthop tracking, unless MPLS allocation per nexthop is
+        * performed. In the default case nexthop tracking does not apply,
+        * if those routes are subsequently imported to other RIBs within
+        * BGP, the leaked routes do not carry the original
+        * BGP_ROUTE_REDISTRIBUTE sub_type. Therefore, in order to determine
+        * if the route we are currently leaking should have nexthop
+        * tracking, we must find the ultimate parent so we can check its
+        * sub_type.
         *
         * As of now, source_bpi may at most be a second-generation route
         * (only one hop back to ultimate parent for vrf-vpn-vrf scheme).
@@ -1336,6 +1338,265 @@ leak_update(struct bgp *to_bgp, struct bgp_dest *bn,
        return new;
 }
 
+void bgp_mplsvpn_path_nh_label_unlink(struct bgp_path_info *pi)
+{
+       struct bgp_label_per_nexthop_cache *blnc;
+
+       if (!pi)
+               return;
+
+       blnc = pi->label_nexthop_cache;
+
+       if (!blnc)
+               return;
+
+       LIST_REMOVE(pi, label_nh_thread);
+       pi->label_nexthop_cache->path_count--;
+       pi->label_nexthop_cache = NULL;
+
+       if (LIST_EMPTY(&(blnc->paths)))
+               bgp_label_per_nexthop_free(blnc);
+}
+
+/* Called upon reception of a ZAPI Message from zebra, about
+ * a new available label.
+ */
+static int bgp_mplsvpn_get_label_per_nexthop_cb(mpls_label_t label,
+                                               void *context, bool allocated)
+{
+       struct bgp_label_per_nexthop_cache *blnc = context;
+       mpls_label_t old_label;
+       int debug = BGP_DEBUG(vpn, VPN_LEAK_LABEL);
+       struct bgp_path_info *pi;
+       struct bgp_table *table;
+
+       old_label = blnc->label;
+
+       if (debug)
+               zlog_debug("%s: label=%u, allocated=%d, nexthop=%pFX", __func__,
+                          label, allocated, &blnc->nexthop);
+       if (allocated)
+               /* update the entry with the new label */
+               blnc->label = label;
+       else
+               /*
+                * previously-allocated label is now invalid
+                * eg: zebra deallocated the labels and notifies it
+                */
+               blnc->label = MPLS_INVALID_LABEL;
+
+       if (old_label == blnc->label)
+               return 0; /* no change */
+
+       /* update paths */
+       if (blnc->label != MPLS_INVALID_LABEL)
+               bgp_zebra_send_nexthop_label(
+                       ZEBRA_MPLS_LABELS_ADD, blnc->label, blnc->nh->ifindex,
+                       blnc->nh->vrf_id, ZEBRA_LSP_BGP, &blnc->nexthop);
+
+       LIST_FOREACH (pi, &(blnc->paths), label_nh_thread) {
+               if (!pi->net)
+                       continue;
+               table = bgp_dest_table(pi->net);
+               if (!table)
+                       continue;
+               vpn_leak_from_vrf_update(blnc->to_bgp, table->bgp, pi);
+       }
+
+       return 0;
+}
+
+/* Get a per label nexthop value:
+ *  - Find and return a per label nexthop from the cache
+ *  - else allocate a new per label nexthop cache entry and request a
+ *    label to zebra. Return MPLS_INVALID_LABEL
+ */
+static mpls_label_t _vpn_leak_from_vrf_get_per_nexthop_label(
+       struct bgp_path_info *pi, struct bgp *to_bgp, struct bgp *from_bgp,
+       afi_t afi, safi_t safi)
+{
+       struct bgp_nexthop_cache *bnc = pi->nexthop;
+       struct bgp_label_per_nexthop_cache *blnc;
+       struct bgp_label_per_nexthop_cache_head *tree;
+       struct prefix *nh_pfx = NULL;
+       struct prefix nh_gate = {0};
+
+       /* extract the nexthop from the BNC nexthop cache */
+       switch (bnc->nexthop->type) {
+       case NEXTHOP_TYPE_IPV4:
+       case NEXTHOP_TYPE_IPV4_IFINDEX:
+               /* the nexthop is recursive */
+               nh_gate.family = AF_INET;
+               nh_gate.prefixlen = IPV4_MAX_BITLEN;
+               IPV4_ADDR_COPY(&nh_gate.u.prefix4, &bnc->nexthop->gate.ipv4);
+               nh_pfx = &nh_gate;
+               break;
+       case NEXTHOP_TYPE_IPV6:
+       case NEXTHOP_TYPE_IPV6_IFINDEX:
+               /* the nexthop is recursive */
+               nh_gate.family = AF_INET6;
+               nh_gate.prefixlen = IPV6_MAX_BITLEN;
+               IPV6_ADDR_COPY(&nh_gate.u.prefix6, &bnc->nexthop->gate.ipv6);
+               nh_pfx = &nh_gate;
+               break;
+       case NEXTHOP_TYPE_IFINDEX:
+               /* the nexthop is direcly connected */
+               nh_pfx = &bnc->prefix;
+               break;
+       case NEXTHOP_TYPE_BLACKHOLE:
+               assert(!"Blackhole nexthop. Already checked by the caller.");
+       }
+
+       /* find or allocate a nexthop label cache entry */
+       tree = &from_bgp->mpls_labels_per_nexthop[family2afi(nh_pfx->family)];
+       blnc = bgp_label_per_nexthop_find(tree, nh_pfx);
+       if (!blnc) {
+               blnc = bgp_label_per_nexthop_new(tree, nh_pfx);
+               blnc->to_bgp = to_bgp;
+               /* request a label to zebra for this nexthop
+                * the response from zebra will trigger the callback
+                */
+               bgp_lp_get(LP_TYPE_NEXTHOP, blnc,
+                          bgp_mplsvpn_get_label_per_nexthop_cb);
+       }
+
+       if (pi->label_nexthop_cache == blnc)
+               /* no change */
+               return blnc->label;
+
+       /* Unlink from any existing nexthop cache. Free the entry if unused.
+        */
+       bgp_mplsvpn_path_nh_label_unlink(pi);
+       if (blnc) {
+               /* updates NHT pi list reference */
+               LIST_INSERT_HEAD(&(blnc->paths), pi, label_nh_thread);
+               pi->label_nexthop_cache = blnc;
+               pi->label_nexthop_cache->path_count++;
+               blnc->last_update = monotime(NULL);
+       }
+
+       /* then add or update the selected nexthop */
+       if (!blnc->nh)
+               blnc->nh = nexthop_dup(bnc->nexthop, NULL);
+       else if (!nexthop_same(bnc->nexthop, blnc->nh)) {
+               nexthop_free(blnc->nh);
+               blnc->nh = nexthop_dup(bnc->nexthop, NULL);
+               if (blnc->label != MPLS_INVALID_LABEL) {
+                       bgp_zebra_send_nexthop_label(
+                               ZEBRA_MPLS_LABELS_REPLACE, blnc->label,
+                               bnc->nexthop->ifindex, bnc->nexthop->vrf_id,
+                               ZEBRA_LSP_BGP, &blnc->nexthop);
+               }
+       }
+
+       return blnc->label;
+}
+
+/* Filter out all the cases where a per nexthop label is not possible:
+ * - return an invalid label when the nexthop is invalid
+ * - return the per VRF label when the per nexthop label is not supported
+ * Otherwise, find or request a per label nexthop.
+ */
+static mpls_label_t vpn_leak_from_vrf_get_per_nexthop_label(
+       afi_t afi, safi_t safi, struct bgp_path_info *pi, struct bgp *from_bgp,
+       struct bgp *to_bgp)
+{
+       struct bgp_path_info *bpi_ultimate = bgp_get_imported_bpi_ultimate(pi);
+       struct bgp *bgp_nexthop = NULL;
+       bool nh_valid;
+       afi_t nh_afi;
+       bool is_bgp_static_route;
+
+       is_bgp_static_route = bpi_ultimate->sub_type == BGP_ROUTE_STATIC &&
+                             bpi_ultimate->type == ZEBRA_ROUTE_BGP;
+
+       if (is_bgp_static_route == false && afi == AFI_IP &&
+           CHECK_FLAG(pi->attr->flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)) &&
+           (pi->attr->nexthop.s_addr == INADDR_ANY ||
+            !ipv4_unicast_valid(&pi->attr->nexthop))) {
+               /* IPv4 nexthop in standard BGP encoding format.
+                * Format of address is not valid (not any, not unicast).
+                * Fallback to the per VRF label.
+                */
+               bgp_mplsvpn_path_nh_label_unlink(pi);
+               return from_bgp->vpn_policy[afi].tovpn_label;
+       }
+
+       if (is_bgp_static_route == false && afi == AFI_IP &&
+           pi->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV4 &&
+           (pi->attr->mp_nexthop_global_in.s_addr == INADDR_ANY ||
+            !ipv4_unicast_valid(&pi->attr->mp_nexthop_global_in))) {
+               /* IPv4 nexthop is in MP-BGP encoding format.
+                * Format of address is not valid (not any, not unicast).
+                * Fallback to the per VRF label.
+                */
+               bgp_mplsvpn_path_nh_label_unlink(pi);
+               return from_bgp->vpn_policy[afi].tovpn_label;
+       }
+
+       if (is_bgp_static_route == false && afi == AFI_IP6 &&
+           (pi->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL ||
+            pi->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) &&
+           (IN6_IS_ADDR_UNSPECIFIED(&pi->attr->mp_nexthop_global) ||
+            IN6_IS_ADDR_LOOPBACK(&pi->attr->mp_nexthop_global) ||
+            IN6_IS_ADDR_MULTICAST(&pi->attr->mp_nexthop_global))) {
+               /* IPv6 nexthop is in MP-BGP encoding format.
+                * Format of address is not valid
+                * Fallback to the per VRF label.
+                */
+               bgp_mplsvpn_path_nh_label_unlink(pi);
+               return from_bgp->vpn_policy[afi].tovpn_label;
+       }
+
+       /* Check the next-hop reachability.
+        * Get the bgp instance where the bgp_path_info originates.
+        */
+       if (pi->extra && pi->extra->bgp_orig)
+               bgp_nexthop = pi->extra->bgp_orig;
+       else
+               bgp_nexthop = from_bgp;
+
+       nh_afi = BGP_ATTR_NH_AFI(afi, pi->attr);
+       nh_valid = bgp_find_or_add_nexthop(from_bgp, bgp_nexthop, nh_afi, safi,
+                                          pi, NULL, 0, NULL);
+
+       if (!nh_valid && is_bgp_static_route &&
+           !CHECK_FLAG(from_bgp->flags, BGP_FLAG_IMPORT_CHECK)) {
+               /* "network" prefixes not routable, but since 'no bgp network
+                * import-check' is configured, they are always valid in the BGP
+                * table. Fallback to the per-vrf label
+                */
+               bgp_mplsvpn_path_nh_label_unlink(pi);
+               return from_bgp->vpn_policy[afi].tovpn_label;
+       }
+
+       if (!nh_valid || !pi->nexthop || pi->nexthop->nexthop_num == 0 ||
+           !pi->nexthop->nexthop) {
+               /* invalid next-hop:
+                * do not send the per-vrf label
+                * otherwise, when the next-hop becomes valid,
+                * we will have 2 BGP updates:
+                * - one with the per-vrf label
+                * - the second with the per-nexthop label
+                */
+               bgp_mplsvpn_path_nh_label_unlink(pi);
+               return MPLS_INVALID_LABEL;
+       }
+
+       if (pi->nexthop->nexthop_num > 1 ||
+           pi->nexthop->nexthop->type == NEXTHOP_TYPE_BLACKHOLE) {
+               /* Blackhole or ECMP routes
+                * is not compatible with per-nexthop label.
+                * Fallback to per-vrf label.
+                */
+               bgp_mplsvpn_path_nh_label_unlink(pi);
+               return from_bgp->vpn_policy[afi].tovpn_label;
+       }
+
+       return _vpn_leak_from_vrf_get_per_nexthop_label(pi, to_bgp, from_bgp,
+                                                       afi, safi);
+}
+
 /* cf vnc_import_bgp_add_route_mode_nvegroup() and add_vnc_route() */
 void vpn_leak_from_vrf_update(struct bgp *to_bgp,           /* to */
                              struct bgp *from_bgp,        /* from */
@@ -1528,12 +1789,32 @@ void vpn_leak_from_vrf_update(struct bgp *to_bgp,            /* to */
                nexthop_self_flag = 1;
        }
 
-       label_val = from_bgp->vpn_policy[afi].tovpn_label;
-       if (label_val == MPLS_LABEL_NONE) {
+       if (CHECK_FLAG(from_bgp->vpn_policy[afi].flags,
+                      BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP))
+               /* per nexthop label mode */
+               label_val = vpn_leak_from_vrf_get_per_nexthop_label(
+                       afi, safi, path_vrf, from_bgp, to_bgp);
+       else
+               /* per VRF label mode */
+               label_val = from_bgp->vpn_policy[afi].tovpn_label;
+
+       if (label_val == MPLS_INVALID_LABEL &&
+           CHECK_FLAG(from_bgp->vpn_policy[afi].flags,
+                      BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP)) {
+               /* no valid label for the moment
+                * when the 'bgp_mplsvpn_get_label_per_nexthop_cb' callback gets
+                * a valid label value, it will call the current function again.
+                */
+               if (debug)
+                       zlog_debug(
+                               "%s: %s skipping: waiting for a valid per-label nexthop.",
+                               __func__, from_bgp->name_pretty);
+               return;
+       }
+       if (label_val == MPLS_LABEL_NONE)
                encode_label(MPLS_LABEL_IMPLICIT_NULL, &label);
-       } else {
+       else
                encode_label(label_val, &label);
-       }
 
        /* Set originator ID to "me" */
        SET_FLAG(static_attr.flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID));
@@ -1770,6 +2051,8 @@ void vpn_leak_from_vrf_withdraw_all(struct bgp *to_bgp, struct bgp *from_bgp,
                                                bpi, afi, safi);
                                        bgp_path_info_delete(bn, bpi);
                                        bgp_process(to_bgp, bn, afi, safi);
+                                       bgp_mplsvpn_path_nh_label_unlink(
+                                               bpi->extra->parent);
                                }
                        }
                }
index c832b4abd444da426a2c101415637842f4f82873..75758edcc2bb4085786cd750352f58ccfcef30c5 100644 (file)
@@ -31,6 +31,7 @@
 #define BGP_PREFIX_SID_SRV6_MAX_FUNCTION_LENGTH 20
 
 extern void bgp_mplsvpn_init(void);
+extern void bgp_mplsvpn_path_nh_label_unlink(struct bgp_path_info *pi);
 extern int bgp_nlri_parse_vpn(struct peer *, struct attr *, struct bgp_nlri *);
 extern uint32_t decode_label(mpls_label_t *);
 extern void encode_label(mpls_label_t, mpls_label_t *);
index 1c79d7d03be34ce8bea84cd64517500c71ec3c60..c878512389d7daadef849ca27c82ab0ba8f92ffb 100644 (file)
@@ -31,6 +31,7 @@
 #include "bgpd/bgp_fsm.h"
 #include "bgpd/bgp_vty.h"
 #include "bgpd/bgp_rd.h"
+#include "bgpd/bgp_mplsvpn.h"
 
 DEFINE_MTYPE_STATIC(BGPD, MARTIAN_STRING, "BGP Martian Addr Intf String");
 
@@ -119,6 +120,8 @@ static void bgp_nexthop_cache_reset(struct bgp_nexthop_cache_head *tree)
                while (!LIST_EMPTY(&(bnc->paths))) {
                        struct bgp_path_info *path = LIST_FIRST(&(bnc->paths));
 
+                       bgp_mplsvpn_path_nh_label_unlink(path);
+
                        path_nh_map(path, bnc, false);
                }
 
index a294ebcc63062a5c956e248786adbe1ea7a8d94c..bda163d7a50943d9260973a18fb86868437d6ed2 100644 (file)
@@ -31,6 +31,7 @@
 #include "bgpd/bgp_flowspec_util.h"
 #include "bgpd/bgp_evpn.h"
 #include "bgpd/bgp_rd.h"
+#include "bgpd/bgp_mplsvpn.h"
 
 extern struct zclient *zclient;
 
@@ -149,6 +150,8 @@ void bgp_unlink_nexthop(struct bgp_path_info *path)
 {
        struct bgp_nexthop_cache *bnc = path->nexthop;
 
+       bgp_mplsvpn_path_nh_label_unlink(path);
+
        if (!bnc)
                return;
 
@@ -1134,10 +1137,21 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc)
        }
 
        LIST_FOREACH (path, &(bnc->paths), nh_thread) {
-               if (!(path->type == ZEBRA_ROUTE_BGP
-                     && ((path->sub_type == BGP_ROUTE_NORMAL)
-                         || (path->sub_type == BGP_ROUTE_STATIC)
-                         || (path->sub_type == BGP_ROUTE_IMPORTED))))
+               if (path->type == ZEBRA_ROUTE_BGP &&
+                   (path->sub_type == BGP_ROUTE_NORMAL ||
+                    path->sub_type == BGP_ROUTE_STATIC ||
+                    path->sub_type == BGP_ROUTE_IMPORTED))
+                       /* evaluate the path */
+                       ;
+               else if (path->sub_type == BGP_ROUTE_REDISTRIBUTE) {
+                       /* evaluate the path for redistributed routes
+                        * except those from VNC
+                        */
+                       if ((path->type == ZEBRA_ROUTE_VNC) ||
+                           (path->type == ZEBRA_ROUTE_VNC_DIRECT))
+                               continue;
+               } else
+                       /* don't evaluate the path */
                        continue;
 
                dest = path->net;
@@ -1230,7 +1244,26 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc)
                        SET_FLAG(path->flags, BGP_PATH_IGP_CHANGED);
 
                path_valid = CHECK_FLAG(path->flags, BGP_PATH_VALID);
-               if (path_valid != bnc_is_valid_nexthop) {
+               if (path->type == ZEBRA_ROUTE_BGP &&
+                   path->sub_type == BGP_ROUTE_STATIC &&
+                   !CHECK_FLAG(bgp_path->flags, BGP_FLAG_IMPORT_CHECK))
+                       /* static routes with 'no bgp network import-check' are
+                        * always valid. if nht is called with static routes,
+                        * the vpn exportation needs to be triggered
+                        */
+                       vpn_leak_from_vrf_update(bgp_get_default(), bgp_path,
+                                                path);
+               else if (path->sub_type == BGP_ROUTE_REDISTRIBUTE &&
+                        safi == SAFI_UNICAST &&
+                        (bgp_path->inst_type == BGP_INSTANCE_TYPE_VRF ||
+                         bgp_path->inst_type == BGP_INSTANCE_TYPE_DEFAULT))
+                       /* redistribute routes are always valid
+                        * if nht is called with redistribute routes, the vpn
+                        * exportation needs to be triggered
+                        */
+                       vpn_leak_from_vrf_update(bgp_get_default(), bgp_path,
+                                                path);
+               else if (path_valid != bnc_is_valid_nexthop) {
                        if (path_valid) {
                                /* No longer valid, clear flag; also for EVPN
                                 * routes, unimport from VRFs if needed.
@@ -1243,6 +1276,12 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc)
                                    bgp_evpn_is_prefix_nht_supported(bgp_dest_get_prefix(dest)))
                                        bgp_evpn_unimport_route(bgp_path,
                                                afi, safi, bgp_dest_get_prefix(dest), path);
+                               if (safi == SAFI_UNICAST &&
+                                   (bgp_path->inst_type !=
+                                    BGP_INSTANCE_TYPE_VIEW))
+                                       vpn_leak_from_vrf_withdraw(
+                                               bgp_get_default(), bgp_path,
+                                               path);
                        } else {
                                /* Path becomes valid, set flag; also for EVPN
                                 * routes, import from VRFs if needed.
@@ -1255,6 +1294,12 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc)
                                    bgp_evpn_is_prefix_nht_supported(bgp_dest_get_prefix(dest)))
                                        bgp_evpn_import_route(bgp_path,
                                                afi, safi, bgp_dest_get_prefix(dest), path);
+                               if (safi == SAFI_UNICAST &&
+                                   (bgp_path->inst_type !=
+                                    BGP_INSTANCE_TYPE_VIEW))
+                                       vpn_leak_from_vrf_update(
+                                               bgp_get_default(), bgp_path,
+                                               path);
                        }
                }
 
index c60b55cf0800bf7b45006c37591a8ccc6cc9fc21..eb51bc1fd672333a5ecb3dbb63205af66645e690 100644 (file)
@@ -1466,7 +1466,7 @@ int bgp_evpn_path_info_cmp(struct bgp *bgp, struct bgp_path_info *new,
                             struct bgp_path_info *exist, int *paths_eq)
 {
        enum bgp_path_selection_reason reason;
-       char pfx_buf[PREFIX2STR_BUFFER];
+       char pfx_buf[PREFIX2STR_BUFFER] = {};
 
        return bgp_path_info_cmp(bgp, new, exist, paths_eq, NULL, 0, pfx_buf,
                                AFI_L2VPN, SAFI_EVPN, &reason);
@@ -2653,7 +2653,7 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_dest *dest,
        struct bgp_path_info *nextpi = NULL;
        int paths_eq, do_mpath, debug;
        struct list mp_list;
-       char pfx_buf[PREFIX2STR_BUFFER];
+       char pfx_buf[PREFIX2STR_BUFFER] = {};
        char path_buf[PATH_ADDPATH_STR_BUFFER];
 
        bgp_mp_list_init(&mp_list);
@@ -7418,7 +7418,7 @@ static bool bgp_aggregate_info_same(struct bgp_path_info *pi, uint8_t origin,
 
        asnotation = bgp_get_asnotation(NULL);
 
-       if (!ae)
+       if (!aspath)
                ae = aspath_empty(asnotation);
 
        if (!pi)
@@ -7477,8 +7477,8 @@ static void bgp_aggregate_install(
                 * If the aggregate information has not changed
                 * no need to re-install it again.
                 */
-               if (bgp_aggregate_info_same(orig, origin, aspath, community,
-                                           ecommunity, lcommunity)) {
+               if (pi && bgp_aggregate_info_same(pi, origin, aspath, community,
+                                                 ecommunity, lcommunity)) {
                        bgp_dest_unlock_node(dest);
 
                        if (aspath)
@@ -8676,12 +8676,16 @@ void bgp_redistribute_add(struct bgp *bgp, struct prefix *p,
         */
        assert(attr.aspath);
 
+       if (p->family == AF_INET6)
+               UNSET_FLAG(attr.flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP));
+
        switch (nhtype) {
        case NEXTHOP_TYPE_IFINDEX:
                switch (p->family) {
                case AF_INET:
                        attr.nexthop.s_addr = INADDR_ANY;
                        attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
+                       attr.mp_nexthop_global_in.s_addr = INADDR_ANY;
                        break;
                case AF_INET6:
                        memset(&attr.mp_nexthop_global, 0,
@@ -8694,6 +8698,7 @@ void bgp_redistribute_add(struct bgp *bgp, struct prefix *p,
        case NEXTHOP_TYPE_IPV4_IFINDEX:
                attr.nexthop = nexthop->ipv4;
                attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
+               attr.mp_nexthop_global_in = nexthop->ipv4;
                break;
        case NEXTHOP_TYPE_IPV6:
        case NEXTHOP_TYPE_IPV6_IFINDEX:
@@ -8705,6 +8710,7 @@ void bgp_redistribute_add(struct bgp *bgp, struct prefix *p,
                case AF_INET:
                        attr.nexthop.s_addr = INADDR_ANY;
                        attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
+                       attr.mp_nexthop_global_in.s_addr = INADDR_ANY;
                        break;
                case AF_INET6:
                        memset(&attr.mp_nexthop_global, 0,
index a64144b62557fcb1197bd42814ad1ca6194e834e..fbdd5fae7d68a949a0c64d1a85788afd303062e9 100644 (file)
@@ -319,6 +319,12 @@ struct bgp_path_info {
        /* Addpath identifiers */
        uint32_t addpath_rx_id;
        struct bgp_addpath_info_data tx_addpath;
+
+       /* For nexthop per label linked list */
+       LIST_ENTRY(bgp_path_info) label_nh_thread;
+
+       /* Back pointer to the bgp label per nexthop structure */
+       struct bgp_label_per_nexthop_cache *label_nexthop_cache;
 };
 
 /* Structure used in BGP path selection */
index 10fc3ecda4455abb5e9ed8f4f3566dd950baf4de..7db110be933900ad3d16b8c9d15a3817eb2367cc 100644 (file)
@@ -872,6 +872,46 @@ static const struct route_map_rule_cmd
        route_match_ip_next_hop_type_free
 };
 
+/* `match source-protocol` */
+static enum route_map_cmd_result_t
+route_match_source_protocol(void *rule, const struct prefix *prefix,
+                           void *object)
+{
+       struct bgp_path_info *path = object;
+       int *protocol = rule;
+
+       if (!path)
+               return RMAP_NOMATCH;
+
+       if (path->type == *protocol)
+               return RMAP_MATCH;
+
+       return RMAP_NOMATCH;
+}
+
+static void *route_match_source_protocol_compile(const char *arg)
+{
+       int *protocol;
+
+       protocol = XMALLOC(MTYPE_ROUTE_MAP_COMPILED, sizeof(*protocol));
+       *protocol = proto_name2num(arg);
+
+       return protocol;
+}
+
+static void route_match_source_protocol_free(void *rule)
+{
+       XFREE(MTYPE_ROUTE_MAP_COMPILED, rule);
+}
+
+static const struct route_map_rule_cmd route_match_source_protocol_cmd = {
+       "source-protocol",
+       route_match_source_protocol,
+       route_match_source_protocol_compile,
+       route_match_source_protocol_free
+};
+
+
 /* `match ip route-source prefix-list PREFIX_LIST' */
 
 static enum route_map_cmd_result_t
@@ -7177,6 +7217,42 @@ DEFPY_YANG (match_rpki_extcommunity,
        return nb_cli_apply_changes(vty, NULL);
 }
 
+DEFPY_YANG (match_source_protocol,
+            match_source_protocol_cmd,
+           "match source-protocol " FRR_REDIST_STR_ZEBRA "$proto",
+           MATCH_STR
+           "Match protocol via which the route was learnt\n"
+           FRR_REDIST_HELP_STR_ZEBRA)
+{
+       const char *xpath =
+               "./match-condition[condition='frr-bgp-route-map:source-protocol']";
+       char xpath_value[XPATH_MAXLEN];
+
+       nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
+       snprintf(xpath_value, sizeof(xpath_value),
+                "%s/rmap-match-condition/frr-bgp-route-map:source-protocol",
+                xpath);
+       nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, proto);
+
+       return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY_YANG (no_match_source_protocol,
+            no_match_source_protocol_cmd,
+           "no match source-protocol [" FRR_REDIST_STR_ZEBRA "]",
+           NO_STR
+           MATCH_STR
+           "Match protocol via which the route was learnt\n"
+           FRR_REDIST_HELP_STR_ZEBRA)
+{
+       const char *xpath =
+               "./match-condition[condition='frr-bgp-route-map:source-protocol']";
+
+       nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
+
+       return nb_cli_apply_changes(vty, NULL);
+}
+
 /* Initialization of route map. */
 void bgp_route_map_init(void)
 {
@@ -7252,6 +7328,7 @@ void bgp_route_map_init(void)
        route_map_install_match(&route_match_ip_address_prefix_list_cmd);
        route_map_install_match(&route_match_ip_next_hop_prefix_list_cmd);
        route_map_install_match(&route_match_ip_next_hop_type_cmd);
+       route_map_install_match(&route_match_source_protocol_cmd);
        route_map_install_match(&route_match_ip_route_source_prefix_list_cmd);
        route_map_install_match(&route_match_aspath_cmd);
        route_map_install_match(&route_match_community_cmd);
@@ -7441,6 +7518,8 @@ void bgp_route_map_init(void)
        install_element(RMAP_NODE, &set_ipv6_nexthop_peer_cmd);
        install_element(RMAP_NODE, &no_set_ipv6_nexthop_peer_cmd);
        install_element(RMAP_NODE, &match_rpki_extcommunity_cmd);
+       install_element(RMAP_NODE, &match_source_protocol_cmd);
+       install_element(RMAP_NODE, &no_match_source_protocol_cmd);
 #ifdef HAVE_SCRIPTING
        install_element(RMAP_NODE, &match_script_cmd);
 #endif
index 6e8439cc26cfe991649d83292aa613db9fbaafc5..282ebe9116a6651a276c8a2b8abf7431d35c0eee 100644 (file)
@@ -74,6 +74,13 @@ const struct frr_yang_module_info frr_bgp_route_map_info = {
                                .destroy = lib_route_map_entry_match_condition_rmap_match_condition_source_vrf_destroy,
                        }
                },
+           {
+                       .xpath = "/frr-route-map:lib/route-map/entry/match-condition/rmap-match-condition/frr-bgp-route-map:source-protocol",
+                       .cbs = {
+                               .modify = lib_route_map_entry_match_condition_rmap_match_condition_source_protocol_modify,
+                               .destroy = lib_route_map_entry_match_condition_rmap_match_condition_source_protocol_destroy,
+                       }
+               },
                {
                        .xpath = "/frr-route-map:lib/route-map/entry/match-condition/rmap-match-condition/frr-bgp-route-map:peer-ipv4-address",
                        .cbs = {
index bcd1e837e852c3cb3fa759ffcf63b807fd86c563..7066fdb4195cabd2f8bcd1b716ddb7cad3f85ba6 100644 (file)
@@ -30,6 +30,10 @@ int lib_route_map_entry_match_condition_rmap_match_condition_rpki_extcommunity_m
        struct nb_cb_modify_args *args);
 int lib_route_map_entry_match_condition_rmap_match_condition_rpki_extcommunity_destroy(
        struct nb_cb_destroy_args *args);
+int lib_route_map_entry_match_condition_rmap_match_condition_source_protocol_modify(
+       struct nb_cb_modify_args *args);
+int lib_route_map_entry_match_condition_rmap_match_condition_source_protocol_destroy(
+       struct nb_cb_destroy_args *args);
 int lib_route_map_entry_match_condition_rmap_match_condition_probability_modify(struct nb_cb_modify_args *args);
 int lib_route_map_entry_match_condition_rmap_match_condition_probability_destroy(struct nb_cb_destroy_args *args);
 int lib_route_map_entry_match_condition_rmap_match_condition_source_vrf_modify(struct nb_cb_modify_args *args);
index 938a5ec31b7c662fea9466b15aa9abdb6ec6b78b..02564b00041025c76d8a8725ab9829d639bc4ced 100644 (file)
@@ -367,6 +367,60 @@ lib_route_map_entry_match_condition_rmap_match_condition_rpki_destroy(
        return NB_OK;
 }
 
+/*
+ * XPath:
+ * /frr-route-map:lib/route-map/entry/match-condition/rmap-match-condition/frr-bgp-route-map:source-protocol
+ */
+int lib_route_map_entry_match_condition_rmap_match_condition_source_protocol_modify(
+       struct nb_cb_modify_args *args)
+{
+       struct routemap_hook_context *rhc;
+       enum rmap_compile_rets ret;
+       const char *proto;
+
+       switch (args->event) {
+       case NB_EV_VALIDATE:
+       case NB_EV_PREPARE:
+       case NB_EV_ABORT:
+               break;
+       case NB_EV_APPLY:
+               /* Add configuration. */
+               rhc = nb_running_get_entry(args->dnode, NULL, true);
+               proto = yang_dnode_get_string(args->dnode, NULL);
+
+               /* Set destroy information. */
+               rhc->rhc_mhook = bgp_route_match_delete;
+               rhc->rhc_rule = "source-protocol";
+               rhc->rhc_event = RMAP_EVENT_MATCH_DELETED;
+
+               ret = bgp_route_match_add(rhc->rhc_rmi, "source-protocol",
+                                         proto, RMAP_EVENT_MATCH_ADDED,
+                                         args->errmsg, args->errmsg_len);
+
+               if (ret != RMAP_COMPILE_SUCCESS) {
+                       rhc->rhc_mhook = NULL;
+                       return NB_ERR_INCONSISTENCY;
+               }
+       }
+
+       return NB_OK;
+}
+
+int lib_route_map_entry_match_condition_rmap_match_condition_source_protocol_destroy(
+       struct nb_cb_destroy_args *args)
+{
+       switch (args->event) {
+       case NB_EV_VALIDATE:
+       case NB_EV_PREPARE:
+       case NB_EV_ABORT:
+               break;
+       case NB_EV_APPLY:
+               return lib_route_map_entry_match_destroy(args);
+       }
+
+       return NB_OK;
+}
+
 /*
  * XPath:
  * /frr-route-map:lib/route-map/entry/match-condition/rmap-match-condition/frr-bgp-route-map:rpki-extcommunity
index ccf198c3927260913a02ecda5d8e19185196c5cf..7ef9db9f0dd8e60040399fd386f1316d2cdc91d7 100644 (file)
@@ -9183,6 +9183,63 @@ ALIAS (af_rd_vpn_export,
        "Between current address-family and vpn\n"
        "For routes leaked from current address-family to vpn\n")
 
+DEFPY(af_label_vpn_export_allocation_mode,
+      af_label_vpn_export_allocation_mode_cmd,
+      "[no$no] label vpn export allocation-mode <per-vrf$label_per_vrf|per-nexthop$label_per_nh>",
+      NO_STR
+      "label value for VRF\n"
+      "Between current address-family and vpn\n"
+      "For routes leaked from current address-family to vpn\n"
+      "Label allocation mode\n"
+      "Allocate one label for all BGP updates of the VRF\n"
+      "Allocate a label per connected next-hop in the VRF\n")
+{
+       VTY_DECLVAR_CONTEXT(bgp, bgp);
+       afi_t afi;
+       bool old_per_nexthop, new_per_nexthop;
+
+       afi = vpn_policy_getafi(vty, bgp, false);
+
+       old_per_nexthop = !!CHECK_FLAG(bgp->vpn_policy[afi].flags,
+                                      BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP);
+       if (no) {
+               if (old_per_nexthop == false && label_per_nh)
+                       return CMD_ERR_NO_MATCH;
+               if (old_per_nexthop == true && label_per_vrf)
+                       return CMD_ERR_NO_MATCH;
+               new_per_nexthop = false;
+       } else {
+               if (label_per_nh)
+                       new_per_nexthop = true;
+               else
+                       new_per_nexthop = false;
+       }
+
+       /* no change */
+       if (old_per_nexthop == new_per_nexthop)
+               return CMD_SUCCESS;
+
+       /*
+        * pre-change: un-export vpn routes (vpn->vrf routes unaffected)
+        */
+       vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, afi, bgp_get_default(),
+                          bgp);
+
+       if (new_per_nexthop)
+               SET_FLAG(bgp->vpn_policy[afi].flags,
+                        BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP);
+       else
+               UNSET_FLAG(bgp->vpn_policy[afi].flags,
+                          BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP);
+
+       /* post-change: re-export vpn routes */
+       vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, afi, bgp_get_default(),
+                           bgp);
+
+       hook_call(bgp_snmp_update_last_changed, bgp);
+       return CMD_SUCCESS;
+}
+
 DEFPY (af_label_vpn_export,
        af_label_vpn_export_cmd,
        "[no] label vpn export <(0-1048575)$label_val|auto$label_auto>",
@@ -11563,8 +11620,11 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
                                        &peer->ibuf->count,
                                        memory_order_relaxed);
 
-                               json_object_int_add(json_peer, "tableVersion",
-                                                   peer->version[afi][safi]);
+                               json_object_int_add(
+                                       json_peer, "tableVersion",
+                                       (paf && PAF_SUBGRP(paf))
+                                               ? paf->subgroup->version
+                                               : 0);
                                json_object_int_add(json_peer, "outq",
                                                    outq_count);
                                json_object_int_add(json_peer, "inq",
@@ -11742,8 +11802,10 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
                                        " %9u %9u %8" PRIu64 " %4zu %4zu %8s",
                                        PEER_TOTAL_RX(peer),
                                        PEER_TOTAL_TX(peer),
-                                       peer->version[afi][safi], inq_count,
-                                       outq_count,
+                                       (paf && PAF_SUBGRP(paf))
+                                               ? paf->subgroup->version
+                                               : 0,
+                                       inq_count, outq_count,
                                        peer_uptime(peer->uptime, timebuf,
                                                    BGP_UPTIME_LEN, 0, NULL));
 
@@ -17300,6 +17362,12 @@ static void bgp_vpn_policy_config_write_afi(struct vty *vty, struct bgp *bgp,
                }
        }
 
+       if (CHECK_FLAG(bgp->vpn_policy[afi].flags,
+                      BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP))
+               vty_out(vty,
+                       "%*slabel vpn export allocation-mode per-nexthop\n",
+                       indent, "");
+
        tovpn_sid_index = bgp->vpn_policy[afi].tovpn_sid_index;
        if (CHECK_FLAG(bgp->vpn_policy[afi].flags,
                       BGP_VPN_POLICY_TOVPN_SID_AUTO)) {
@@ -20473,6 +20541,10 @@ void bgp_vty_init(void)
        install_element(BGP_IPV6_NODE, &af_rd_vpn_export_cmd);
        install_element(BGP_IPV4_NODE, &af_label_vpn_export_cmd);
        install_element(BGP_IPV6_NODE, &af_label_vpn_export_cmd);
+       install_element(BGP_IPV4_NODE,
+                       &af_label_vpn_export_allocation_mode_cmd);
+       install_element(BGP_IPV6_NODE,
+                       &af_label_vpn_export_allocation_mode_cmd);
        install_element(BGP_IPV4_NODE, &af_nexthop_vpn_export_cmd);
        install_element(BGP_IPV6_NODE, &af_nexthop_vpn_export_cmd);
        install_element(BGP_IPV4_NODE, &af_rt_vpn_imexport_cmd);
index 96b1f3e00f4c1776e9f9028b240869b7643a2838..1965cd270471553eeac051adf2def5aae65f00c5 100644 (file)
@@ -3911,3 +3911,32 @@ int bgp_zebra_srv6_manager_release_locator_chunk(const char *name)
 {
        return srv6_manager_release_locator_chunk(zclient, name);
 }
+
+void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label,
+                                 ifindex_t ifindex, vrf_id_t vrf_id,
+                                 enum lsp_types_t ltype, struct prefix *p)
+{
+       struct zapi_labels zl = {};
+       struct zapi_nexthop *znh;
+
+       zl.type = ltype;
+       zl.local_label = label;
+       zl.nexthop_num = 1;
+       znh = &zl.nexthops[0];
+       if (p->family == AF_INET)
+               IPV4_ADDR_COPY(&znh->gate.ipv4, &p->u.prefix4);
+       else
+               IPV6_ADDR_COPY(&znh->gate.ipv6, &p->u.prefix6);
+       if (ifindex == IFINDEX_INTERNAL)
+               znh->type = (p->family == AF_INET) ? NEXTHOP_TYPE_IPV4
+                                                  : NEXTHOP_TYPE_IPV6;
+       else
+               znh->type = (p->family == AF_INET) ? NEXTHOP_TYPE_IPV4_IFINDEX
+                                                  : NEXTHOP_TYPE_IPV6_IFINDEX;
+       znh->ifindex = ifindex;
+       znh->vrf_id = vrf_id;
+       znh->label_num = 0;
+
+       /* vrf_id is DEFAULT_VRF */
+       zebra_send_mpls_labels(zclient, cmd, &zl);
+}
index b09be890e5eef7d09fb430f66594a8a2d3e88343..7c85d86b317a5cf606ad2918900f473d045421cc 100644 (file)
@@ -118,4 +118,8 @@ extern int bgp_zebra_update(struct bgp *bgp, afi_t afi, safi_t safi,
 extern int bgp_zebra_stale_timer_update(struct bgp *bgp);
 extern int bgp_zebra_srv6_manager_get_locator_chunk(const char *name);
 extern int bgp_zebra_srv6_manager_release_locator_chunk(const char *name);
+extern void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label,
+                                        ifindex_t index, vrf_id_t vrfid,
+                                        enum lsp_types_t ltype,
+                                        struct prefix *p);
 #endif /* _QUAGGA_BGP_ZEBRA_H */
index f2ad51942fcf2e0249a8b636efd148f2f96ed65b..99c6a46e7cd205b9cba5aa7ef2b0e5facbfdd32c 100644 (file)
@@ -3355,6 +3355,11 @@ static struct bgp *bgp_create(as_t *as, const char *name,
                SET_FLAG(bgp->af_flags[afi][SAFI_MPLS_VPN],
                         BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL);
        }
+
+       for (afi = AFI_IP; afi < AFI_MAX; afi++)
+               bgp_label_per_nexthop_cache_init(
+                       &bgp->mpls_labels_per_nexthop[afi]);
+
        if (name)
                bgp->name = XSTRDUP(MTYPE_BGP, name);
 
@@ -8252,6 +8257,8 @@ void bgp_init(unsigned short instance)
 
        bgp_lp_vty_init();
 
+       bgp_label_per_nexthop_init();
+
        cmd_variable_handler_register(bgp_viewvrf_var_handlers);
 }
 
index c3cb6ba91e69fc1969c8bc805579417595610400..9cb1d51088cfc456f344b17b8068f84d382e3751 100644 (file)
@@ -211,6 +211,7 @@ struct vpn_policy {
 #define BGP_VPN_POLICY_TOVPN_RD_SET            (1 << 1)
 #define BGP_VPN_POLICY_TOVPN_NEXTHOP_SET       (1 << 2)
 #define BGP_VPN_POLICY_TOVPN_SID_AUTO          (1 << 3)
+#define BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP (1 << 4)
 
        /*
         * If we are importing another vrf into us keep a list of
@@ -573,6 +574,10 @@ struct bgp {
        /* Allocate MPLS labels */
        uint8_t allocate_mpls_labels[AFI_MAX][SAFI_MAX];
 
+       /* Tree for next-hop lookup cache. */
+       struct bgp_label_per_nexthop_cache_head
+               mpls_labels_per_nexthop[AFI_MAX];
+
        /* Allocate hash entries to store policy routing information
         * The hash are used to host pbr rules somewhere.
         * Actually, pbr will only be used by flowspec
@@ -1114,7 +1119,6 @@ struct peer {
 
        /* BGP peer group.  */
        struct peer_group *group;
-       uint64_t version[AFI_MAX][SAFI_MAX];
 
        /* BGP peer_af structures, per configured AF on this peer */
        struct peer_af *peer_af_array[BGP_AF_MAX];
index 4b8e07a9c4c787b9c2daea8b752d533a3dae9f97..27f7c88d7be19841a443f1183953b10ab50b4d30 100644 (file)
@@ -1939,7 +1939,7 @@ static void rfapiBgpInfoAttachSorted(struct agg_node *rn,
        struct bgp *bgp;
        struct bgp_path_info *prev;
        struct bgp_path_info *next;
-       char pfx_buf[PREFIX2STR_BUFFER];
+       char pfx_buf[PREFIX2STR_BUFFER] = {};
 
 
        bgp = bgp_get_default(); /* assume 1 instance for now */
index b9af7686419325d5076039abda5cda5d16ef1299..0120c517c64a4564ffbb3d16c87f3973896b9978 100644 (file)
@@ -794,6 +794,9 @@ if test "$ac_cv_lib_json_c_json_object_get" = "no"; then
 fi
 ])
 
+AC_ARG_ENABLE([ccls],
+AS_HELP_STRING([--enable-ccls], [Write .ccls config for this build]))
+
 AC_ARG_ENABLE([dev_build],
     AS_HELP_STRING([--enable-dev-build], [build for development]))
 
@@ -2820,6 +2823,42 @@ AC_CONFIG_FILES([tools/frrcommon.sh])
 AC_CONFIG_FILES([tools/frr.service])
 AC_CONFIG_FILES([tools/frr@.service])
 
+# dnl write out a ccls file with our compile configuration
+# dnl have to add -Wno-unused-function otherwise foobar_cmd_magic causes
+# dnl all DEFPY(), et al., macros to flag as errors.
+AS_IF([test "$enable_ccls" = "yes"], [
+    AC_CONFIG_COMMANDS([gen-dot-ccls], [
+        cat > "${srcdir}/.ccls" <<EOF
+clang
+-DHAVE_CONFIG_H
+-I.
+-I./include
+-I./lib
+-I./lib/assert
+-DSYSCONFDIR="${ac_frr_sysconfdir}"
+-DCONFDATE=${ac_frr_confdate}
+EOF
+        if test "$ac_abs_top_builddir" != "$ac_abs_top_srcdir"; then
+            echo "-I${ac_abs_top_builddir}" >> "${srcdir}/.ccls"
+        fi
+        if test -n "$FRR_ALL_CCLS_FLAGS"; then
+            echo ${FRR_ALL_CCLS_FLAGS} | tr ' ' '\n' >> "${srcdir}/.ccls"
+        fi
+        if test -n "$FRR_ALL_CCLS_CFLAGS"; then
+            cat >> "${srcdir}/.ccls" <<EOF
+%c $(echo ${FRR_ALL_CCLS_CFLAGS} | sed -e 's/  */\n%c /g')
+%c -Wno-unused-function
+EOF
+fi
+    ], [
+    FRR_ALL_CCLS_FLAGS="$(echo ${LIBYANG_CFLAGS} ${LUA_INCLUDE} ${SQLITE3_CFLAGS} | sed -e 's/  */ /g')"
+    FRR_ALL_CCLS_CFLAGS="$(echo ${CFLAGS} ${WERROR} ${AC_CFLAGS} ${SAN_FLAGS} | sed -e 's/  */ /g')"
+    ac_frr_confdate="${CONFDATE}"
+    ac_frr_sysconfdir="${sysconfdir}/"
+    ])
+])
+
+
 AS_IF([test "$with_pkg_git_version" = "yes"], [
     AC_CONFIG_COMMANDS([lib/gitversion.h], [
        dst="${ac_abs_top_builddir}/lib/gitversion.h"
diff --git a/doc/developer/building-frr-for-ubuntu2204.rst b/doc/developer/building-frr-for-ubuntu2204.rst
new file mode 100644 (file)
index 0000000..6b941b3
--- /dev/null
@@ -0,0 +1,171 @@
+Ubuntu 22.04 LTS
+================
+
+This document describes installation from source. If you want to build a
+``deb``, see :ref:`packaging-debian`.
+
+Installing Dependencies
+-----------------------
+
+.. code-block:: console
+
+   sudo apt update
+   sudo apt-get install \
+      git autoconf automake libtool make libreadline-dev texinfo \
+      pkg-config libpam0g-dev libjson-c-dev bison flex \
+      libc-ares-dev python3-dev python3-sphinx \
+      install-info build-essential libsnmp-dev perl \
+      libcap-dev python2 libelf-dev libunwind-dev \
+      libyang2 libyang2-dev
+
+.. include:: building-libunwind-note.rst
+
+Note that Ubuntu >= 20 no longer installs python 2.x, so it must be
+installed explicitly. Ensure that your system has a symlink named
+``/usr/bin/python`` pointing at ``/usr/bin/python3``.
+
+.. code-block:: shell
+
+   sudo ln -s /usr/bin/python3 /usr/bin/python
+   python --version
+
+In addition, ``pip`` for python2 must be installed if you wish to run
+the FRR topotests. That version of ``pip`` is not available from the
+ubuntu apt repositories; in order to install it:
+
+.. code-block:: shell
+
+   curl https://bootstrap.pypa.io/pip/2.7/get-pip.py --output get-pip.py
+   sudo python2 ./get-pip.py
+
+   # And verify the installation
+   pip2 --version
+
+
+Protobuf
+^^^^^^^^
+This is optional
+
+.. code-block:: console
+
+   sudo apt-get install protobuf-c-compiler libprotobuf-c-dev
+
+ZeroMQ
+^^^^^^
+This is optional
+
+.. code-block:: console
+
+   sudo apt-get install libzmq5 libzmq3-dev
+
+Building & Installing FRR
+-------------------------
+
+Add FRR user and groups
+^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: console
+
+   sudo groupadd -r -g 92 frr
+   sudo groupadd -r -g 85 frrvty
+   sudo adduser --system --ingroup frr --home /var/run/frr/ \
+      --gecos "FRR suite" --shell /sbin/nologin frr
+   sudo usermod -a -G frrvty frr
+
+Compile
+^^^^^^^
+
+.. include:: include-compile.rst
+
+Install FRR configuration files
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: console
+
+   sudo install -m 775 -o frr -g frr -d /var/log/frr
+   sudo install -m 775 -o frr -g frrvty -d /etc/frr
+   sudo install -m 640 -o frr -g frrvty tools/etc/frr/vtysh.conf /etc/frr/vtysh.conf
+   sudo install -m 640 -o frr -g frr tools/etc/frr/frr.conf /etc/frr/frr.conf
+   sudo install -m 640 -o frr -g frr tools/etc/frr/daemons.conf /etc/frr/daemons.conf
+   sudo install -m 640 -o frr -g frr tools/etc/frr/daemons /etc/frr/daemons
+
+Tweak sysctls
+^^^^^^^^^^^^^
+
+Some sysctls need to be changed in order to enable IPv4/IPv6 forwarding and
+MPLS (if supported by your platform). If your platform does not support MPLS,
+skip the MPLS related configuration in this section.
+
+Edit :file:`/etc/sysctl.conf` and uncomment the following values (ignore the
+other settings):
+
+::
+
+   # Uncomment the next line to enable packet forwarding for IPv4
+   net.ipv4.ip_forward=1
+
+   # Uncomment the next line to enable packet forwarding for IPv6
+   #  Enabling this option disables Stateless Address Autoconfiguration
+   #  based on Router Advertisements for this host
+   net.ipv6.conf.all.forwarding=1
+
+Reboot or use ``sysctl -p`` to apply the same config to the running system.
+
+Add MPLS kernel modules
+"""""""""""""""""""""""
+
+Ubuntu 20.04 ships with kernel 5.4; MPLS modules are present by default.  To
+enable, add the following lines to :file:`/etc/modules-load.d/modules.conf`:
+
+::
+
+   # Load MPLS Kernel Modules
+   mpls_router
+   mpls_iptunnel
+
+
+And load the kernel modules on the running system:
+
+.. code-block:: console
+
+   sudo modprobe mpls-router mpls-iptunnel
+
+If the above command returns an error, you may need to install the appropriate
+or latest linux-modules-extra-<kernel-version>-generic package. For example
+``apt-get install linux-modules-extra-`uname -r`-generic``
+
+Enable MPLS Forwarding
+""""""""""""""""""""""
+
+Edit :file:`/etc/sysctl.conf` and the following lines. Make sure to add a line
+equal to :file:`net.mpls.conf.eth0.input` for each interface used with MPLS.
+
+::
+
+   # Enable MPLS Label processing on all interfaces
+   net.mpls.conf.eth0.input=1
+   net.mpls.conf.eth1.input=1
+   net.mpls.conf.eth2.input=1
+   net.mpls.platform_labels=100000
+
+Install service files
+^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: console
+
+   sudo install -m 644 tools/frr.service /etc/systemd/system/frr.service
+   sudo systemctl enable frr
+
+Enable daemons
+^^^^^^^^^^^^^^
+
+Open :file:`/etc/frr/daemons` with your text editor of choice. Look for the
+section with ``watchfrr_enable=...`` and ``zebra=...`` etc.  Enable the daemons
+as required by changing the value to ``yes``.
+
+Start FRR
+^^^^^^^^^
+
+.. code-block:: shell
+
+   systemctl start frr
index 513cac61797a92962477ca0ae9327d750aa1ad27..b98d237e68a8cff33f8a76638ed2fef82070bd1b 100644 (file)
@@ -17,7 +17,6 @@ obtained by running ``./configure -h``. The options shown below are examples.
        --localstatedir=/var/run/frr \
        --sysconfdir=/etc/frr \
        --with-moduledir=\${prefix}/lib/frr/modules \
-       --with-libyang-pluginsdir=\${prefix}/lib/frr/libyang_plugins \
        --enable-configfile-mask=0640 \
        --enable-logfile-mask=0640 \
        --enable-snmp=agentx \
index 13936e18ed846c0fc2b6f6e6984fe8948c098f75..f44cf9df985d2b30af04dfee489893d92e832c8b 100644 (file)
@@ -17,15 +17,23 @@ Tested with Ubuntu 20.04,Ubuntu 18.04, and Debian 11.
 Instructions are the same for all setups (i.e. ExaBGP is only used for
 BGP tests).
 
+Tshark is only required if you enable any packet captures on test runs.
+
+Valgrind is only required if you enable valgrind on test runs.
+
 Installing Topotest Requirements
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 .. code:: shell
 
-   apt-get install gdb
-   apt-get install iproute2
-   apt-get install net-tools
-   apt-get install python3-pip
+   apt-get install \
+       gdb \
+       iproute2 \
+       net-tools \
+       python3-pip \
+       iputils-ping \
+       tshark \
+       valgrind
    python3 -m pip install wheel
    python3 -m pip install 'pytest>=6.2.4'
    python3 -m pip install 'pytest-xdist>=2.3.0'
@@ -119,6 +127,7 @@ If you prefer to manually build FRR, then use the following suggested config:
        --sysconfdir=/etc/frr \
        --enable-vtysh \
        --enable-pimd \
+       --enable-pim6d \
        --enable-sharpd \
        --enable-multipath=64 \
        --enable-user=frr \
@@ -311,32 +320,6 @@ Test will set exit code which can be used with ``git bisect``.
 
 For the simulated topology, see the description in the python file.
 
-StdErr log from daemos after exit
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-To enable the reporting of any messages seen on StdErr after the daemons exit,
-the following env variable can be set::
-
-   export TOPOTESTS_CHECK_STDERR=Yes
-
-(The value doesn't matter at this time. The check is whether the env
-variable exists or not.) There is no pass/fail on this reporting; the
-Output will be reported to the console.
-
-Collect Memory Leak Information
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-FRR processes can report unfreed memory allocations upon exit. To
-enable the reporting of memory leaks, define an environment variable
-``TOPOTESTS_CHECK_MEMLEAK`` with the file prefix, i.e.::
-
-   export TOPOTESTS_CHECK_MEMLEAK="/home/mydir/memleak_"
-
-This will enable the check and output to console and the writing of
-the information to files with the given prefix (followed by testname),
-ie :file:`/home/mydir/memcheck_test_bgp_multiview_topo1.txt` in case
-of a memory leak.
-
 Running Topotests with AddressSanitizer
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -562,6 +545,48 @@ Here's an example of launching ``zebra`` and ``bgpd`` inside ``gdb`` on router
           --gdb-breakpoints=nb_config_diff \
           all-protocol-startup
 
+Reporting Memleaks with FRR Memory Statistics
+"""""""""""""""""""""""""""""""""""""""""""""
+
+FRR reports all allocated FRR memory objects on exit to standard error.
+Topotest can be run to report such output as errors in order to check for
+memleaks in FRR memory allocations. Specifying the CLI argument
+``--memleaks`` will enable reporting FRR-based memory allocations at exit as errors.
+
+.. code:: shell
+
+   sudo -E pytest --memleaks all-protocol-startup
+
+
+StdErr log from daemos after exit
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When running with ``--memleaks``, to enable the reporting of other,
+non-memory related, messages seen on StdErr after the daemons exit,
+the following env variable can be set::
+
+   export TOPOTESTS_CHECK_STDERR=Yes
+
+(The value doesn't matter at this time. The check is whether the env
+variable exists or not.) There is no pass/fail on this reporting; the
+Output will be reported to the console.
+
+Collect Memory Leak Information
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When running with ``--memleaks``, FRR processes report unfreed memory
+allocations upon exit. To enable also reporting of memory leaks to a specific
+location, define an environment variable ``TOPOTESTS_CHECK_MEMLEAK`` with the
+file prefix, i.e.:
+
+   export TOPOTESTS_CHECK_MEMLEAK="/home/mydir/memleak_"
+
+For tests that support the TOPOTESTS_CHECK_MEMLEAK environment variable, this
+will enable output to the information to files with the given prefix (followed
+by testname), e.g.,:
+file:`/home/mydir/memcheck_test_bgp_multiview_topo1.txt` in case
+of a memory leak.
+
 Detecting Memleaks with Valgrind
 """"""""""""""""""""""""""""""""
 
index ed5d6cb5c649a663659529a71b42dbc069398b7f..f045ca239eef6901b16f199cdcd719eb585da7d1 100644 (file)
@@ -2897,6 +2897,13 @@ address-family:
    extended community values as described in
    :ref:`bgp-extended-communities-attribute`.
 
+.. clicmd:: label vpn export allocation-mode per-vrf|per-nexthop
+
+   Select how labels are allocated in the given VRF. By default, the `per-vrf`
+   mode is selected, and one label is used for all prefixes from the VRF. The
+   `per-nexthop` will use a unique label for all prefixes that are reachable
+   via the same nexthop.
+
 .. clicmd:: label vpn export (0..1048575)|auto
 
    Enables an MPLS label to be attached to a route exported from the current
index 2310d397cd5678cf8b7b614ed8aac8a1f0fb77df..8e8fb246086b89461a1bbd0a36f940c4bb542dce 100644 (file)
@@ -390,6 +390,18 @@ and the configuration files in :file:`/usr/local/etc`. The :file:`/usr/local/`
 installation prefix and other directories may be changed using the following
 options to the configuration script.
 
+.. option:: --enable-ccls
+
+   Enable the creation of a :file:`.ccls` file in the top level source
+   directory.
+
+   Some development environments (e.g., LSP server within emacs, et al.) can
+   utilize :clicmd:`ccls` to provide highly sophisticated IDE features (e.g.,
+   semantically accurate jump-to definition/reference, and even code
+   refactoring). The `--enable-ccls` causes :file:`configure` to generate a
+   configuration for the :clicmd:`ccls` command, based on the configured
+   FRR build environment.
+
 .. option:: --prefix <prefix>
 
    Install architecture-independent files in `prefix` [/usr/local].
index 8dacb9c9dc25bab5e506d0a9ef2f215c5317dd83..2f4c956ffd9059533b338bf124f74b1760e5c861 100644 (file)
@@ -287,6 +287,19 @@ OSPF6 interface
 
    Sets interface's Router Dead Interval. Default value is 40.
 
+.. clicmd:: ipv6 ospf6 graceful-restart hello-delay HELLODELAYINTERVAL
+
+   Set the length of time during which Grace-LSAs are sent at 1-second intervals
+   while coming back up after an unplanned outage. During this time, no hello
+   packets are sent.
+
+   A higher hello delay will increase the chance that all neighbors are notified
+   about the ongoing graceful restart before receiving a hello packet (which is
+   crucial for the graceful restart to succeed). The hello delay shouldn't be set
+   too high, however, otherwise the adjacencies might time out. As a best practice,
+   it's recommended to set the hello delay and hello interval with the same values.
+   The default value is 10 seconds.
+
 .. clicmd:: ipv6 ospf6 retransmit-interval RETRANSMITINTERVAL
 
    Sets interface's Rxmt Interval. Default value is 5.
@@ -343,6 +356,10 @@ Graceful Restart
    To perform a graceful shutdown, the "graceful-restart prepare ipv6 ospf"
    EXEC-level command needs to be issued before restarting the ospf6d daemon.
 
+   When Graceful Restart is enabled and the ospf6d daemon crashes or is killed
+   abruptely (e.g. SIGKILL), it will attempt an unplanned Graceful Restart once
+   it restarts.
+
 .. clicmd:: graceful-restart helper enable [A.B.C.D]
 
 
index 5171832604e5aefb377693d0d519b28d66dcbc0c..effad0fd003d674e723e4784dff82223fb61cfad 100644 (file)
@@ -635,6 +635,19 @@ Interfaces
    :clicmd:`ip ospf dead-interval minimal hello-multiplier (2-20)` is also
    specified for the interface.
 
+.. clicmd:: ip ospf graceful-restart hello-delay (1-1800)
+
+   Set the length of time during which Grace-LSAs are sent at 1-second intervals
+   while coming back up after an unplanned outage. During this time, no hello
+   packets are sent.
+
+   A higher hello delay will increase the chance that all neighbors are notified
+   about the ongoing graceful restart before receiving a hello packet (which is
+   crucial for the graceful restart to succeed). The hello delay shouldn't be set
+   too high, however, otherwise the adjacencies might time out. As a best practice,
+   it's recommended to set the hello delay and hello interval with the same values.
+   The default value is 10 seconds.
+
 .. clicmd:: ip ospf network (broadcast|non-broadcast|point-to-multipoint|point-to-point [dmvpn])
 
    When configuring a point-to-point network on an interface and the interface
@@ -770,6 +783,10 @@ Graceful Restart
    To perform a graceful shutdown, the "graceful-restart prepare ip ospf"
    EXEC-level command needs to be issued before restarting the ospfd daemon.
 
+   When Graceful Restart is enabled and the ospfd daemon crashes or is killed
+   abruptely (e.g. SIGKILL), it will attempt an unplanned Graceful Restart once
+   it restarts.
+
 .. clicmd:: graceful-restart helper enable [A.B.C.D]
 
 
index 67323e61f32db250ebca6ab17d8610cb73779f4b..f9c772430221de0bf212793caaf9fd4503236441 100644 (file)
@@ -159,6 +159,11 @@ RIP Configuration
    If `poisoned-reverse` is also set, the router sends the poisoned routes
    with highest metric back to the sending router.
 
+.. clicmd:: allow-ecmp [1-MULTIPATH_NUM]
+
+   Control how many ECMP paths RIP can inject for the same prefix. If specified
+   without a number, a maximum is taken (compiled with ``--enable-multipath``).
+
 .. _rip-version-control:
 
 RIP Version Control
index b7f53365649d120616b84a89caf986eaf759fd23..bd19ae88e230b9cea9d26514521ce4ca24244969 100644 (file)
@@ -212,7 +212,7 @@ Route Map Match Command
 
 .. clicmd:: match source-protocol PROTOCOL_NAME
 
-  This is a ZEBRA specific match command.  Matches the
+  This is a ZEBRA and BGP specific match command.  Matches the
   originating protocol specified.
 
 .. clicmd:: match source-instance NUMBER
index 05847ba3940940b8ee41522328887ce0e46e9a9f..6d8aca97bbab82160183ad11565bd7487c6f7b4f 100644 (file)
@@ -90,7 +90,7 @@ a static prefix and gateway, with several possible forms.
 Multiple nexthop static route
 =============================
 
-To create multiple nexthops to the same NETWORK, just reenter the same
+To create multiple nexthops to the same NETWORK (also known as a multipath route), just reenter the same
 network statement with different nexthop information.
 
 .. code-block:: frr
@@ -122,7 +122,7 @@ nexthops, if the platform supports this.
    ip route 10.0.0.0/8 null0 255
 
 
-This will install a multihop route via the specified next-hops if they are
+This will install a multipath route via the specified next-hops if they are
 reachable, as well as a high-distance blackhole route, which can be useful to
 prevent traffic destined for a prefix to match less-specific routes (e.g.
 default) should the specified gateways not be reachable. E.g.:
index 07e2eafec59cc449b271c2fec74ab7104c1dd14d..e8900572694ae813393339160a9a05f228449ae9 100644 (file)
@@ -130,6 +130,7 @@ static const struct optspec os_always = {
        "      --limit-fds    Limit number of fds supported\n",
        lo_always};
 
+static bool logging_to_stdout = false; /* set when --log stdout specified */
 
 static const struct option lo_cfg[] = {
        {"config_file", required_argument, NULL, 'f'},
@@ -738,6 +739,11 @@ struct event_loop *frr_init(void)
        while ((log_arg = log_args_pop(di->early_logging))) {
                command_setup_early_logging(log_arg->target,
                                            di->early_loglevel);
+               /* this is a bit of a hack,
+                  but need to notice when
+                  the target is stdout */
+               if (strcmp(log_arg->target, "stdout") == 0)
+                       logging_to_stdout = true;
                XFREE(MTYPE_TMP, log_arg);
        }
 
@@ -1088,9 +1094,15 @@ static void frr_terminal_close(int isexit)
                             "%s: failed to open /dev/null: %s", __func__,
                             safe_strerror(errno));
        } else {
-               dup2(nullfd, 0);
-               dup2(nullfd, 1);
-               dup2(nullfd, 2);
+               int fd;
+               /*
+                * only redirect stdin, stdout, stderr to null when a tty also
+                * don't redirect when stdout is set with --log stdout
+                */
+               for (fd = 2; fd >= 0; fd--)
+                       if (isatty(fd) &&
+                           (fd != STDOUT_FILENO || !logging_to_stdout))
+                               dup2(nullfd, fd);
                close(nullfd);
        }
 }
@@ -1168,9 +1180,16 @@ void frr_run(struct event_loop *master)
                                     "%s: failed to open /dev/null: %s",
                                     __func__, safe_strerror(errno));
                } else {
-                       dup2(nullfd, 0);
-                       dup2(nullfd, 1);
-                       dup2(nullfd, 2);
+                       int fd;
+                       /*
+                        * only redirect stdin, stdout, stderr to null when a
+                        * tty also don't redirect when stdout is set with --log
+                        * stdout
+                        */
+                       for (fd = 2; fd >= 0; fd--)
+                               if (isatty(fd) &&
+                                   (fd != STDOUT_FILENO || !logging_to_stdout))
+                                       dup2(nullfd, fd);
                        close(nullfd);
                }
 
@@ -1194,7 +1213,7 @@ void frr_fini(void)
 {
        FILE *fp;
        char filename[128];
-       int have_leftovers;
+       int have_leftovers = 0;
 
        hook_call(frr_fini);
 
@@ -1220,16 +1239,15 @@ void frr_fini(void)
        /* frrmod_init -> nothing needed / hooks */
        rcu_shutdown();
 
-       if (!debug_memstats_at_exit)
-               return;
-
-       have_leftovers = log_memstats(stderr, di->name);
+       /* also log memstats to stderr when stderr goes to a file*/
+       if (debug_memstats_at_exit || !isatty(STDERR_FILENO))
+               have_leftovers = log_memstats(stderr, di->name);
 
        /* in case we decide at runtime that we want exit-memstats for
-        * a daemon, but it has no stderr because it's daemonized
+        * a daemon
         * (only do this if we actually have something to print though)
         */
-       if (!have_leftovers)
+       if (!debug_memstats_at_exit || !have_leftovers)
                return;
 
        snprintf(filename, sizeof(filename), "/tmp/frr-memstats-%s-%llu-%llu",
index 3262534de59ea5e4e644fa5472a089507596cb38..676b563fff70e3e67032f4cccf9df6c0429cf040 100644 (file)
@@ -55,6 +55,7 @@ extern "C" {
 #define OSPF_ROUTER_DEAD_INTERVAL_DEFAULT  40
 #define OSPF_ROUTER_DEAD_INTERVAL_MINIMAL   1
 #define OSPF_HELLO_INTERVAL_DEFAULT        10
+#define OSPF_HELLO_DELAY_DEFAULT           10
 #define OSPF_ROUTER_PRIORITY_DEFAULT        1
 #define OSPF_RETRANSMIT_INTERVAL_DEFAULT    5
 #define OSPF_TRANSMIT_DELAY_DEFAULT         1
index e286a32f9212ba1b699fba7a5a9eaf4df8f0ac0b..d8ce83d219ead532a474fce9c1078f2d120a4433 100644 (file)
@@ -336,6 +336,22 @@ prefix_list_entry_lookup(struct prefix_list *plist, struct prefix *prefix,
        return NULL;
 }
 
+static bool
+prefix_list_entry_lookup_prefix(struct prefix_list *plist,
+                               struct prefix_list_entry *plist_entry)
+{
+       struct prefix_list_entry *pentry = NULL;
+
+       for (pentry = plist->head; pentry; pentry = pentry->next) {
+               if (pentry == plist_entry)
+                       continue;
+               if (prefix_same(&pentry->prefix, &plist_entry->prefix))
+                       return true;
+       }
+
+       return false;
+}
+
 static void trie_walk_affected(size_t validbits, struct pltrie_table *table,
                               uint8_t byte, struct prefix_list_entry *object,
                               void (*fn)(struct prefix_list_entry *object,
@@ -404,12 +420,16 @@ static void prefix_list_trie_del(struct prefix_list *plist,
 
 
 void prefix_list_entry_delete(struct prefix_list *plist,
-                             struct prefix_list_entry *pentry,
-                             int update_list)
+                             struct prefix_list_entry *pentry, int update_list)
 {
+       bool duplicate = false;
+
        if (plist == NULL || pentry == NULL)
                return;
 
+       if (prefix_list_entry_lookup_prefix(plist, pentry))
+               duplicate = true;
+
        prefix_list_trie_del(plist, pentry);
 
        if (pentry->prev)
@@ -421,8 +441,10 @@ void prefix_list_entry_delete(struct prefix_list *plist,
        else
                plist->tail = pentry->prev;
 
-       route_map_notify_pentry_dependencies(plist->name, pentry,
-                                            RMAP_EVENT_PLIST_DELETED);
+       if (!duplicate)
+               route_map_notify_pentry_dependencies(plist->name, pentry,
+                                                    RMAP_EVENT_PLIST_DELETED);
+
        prefix_list_entry_free(pentry);
 
        plist->count--;
@@ -557,11 +579,15 @@ static void prefix_list_entry_add(struct prefix_list *plist,
 void prefix_list_entry_update_start(struct prefix_list_entry *ple)
 {
        struct prefix_list *pl = ple->pl;
+       bool duplicate = false;
 
        /* Not installed, nothing to do. */
        if (!ple->installed)
                return;
 
+       if (prefix_list_entry_lookup_prefix(pl, ple))
+               duplicate = true;
+
        prefix_list_trie_del(pl, ple);
 
        /* List manipulation: shameless copy from `prefix_list_entry_delete`. */
@@ -574,8 +600,9 @@ void prefix_list_entry_update_start(struct prefix_list_entry *ple)
        else
                pl->tail = ple->prev;
 
-       route_map_notify_pentry_dependencies(pl->name, ple,
-                                            RMAP_EVENT_PLIST_DELETED);
+       if (!duplicate)
+               route_map_notify_pentry_dependencies(pl->name, ple,
+                                                    RMAP_EVENT_PLIST_DELETED);
        pl->count--;
 
        route_map_notify_dependencies(pl->name, RMAP_EVENT_PLIST_DELETED);
index 9b2e18b4a780a65cefa9fcd279ddb85f45c8a5d8..7277744dc5b96d0ae310c860f0045b10f3e1cd23 100644 (file)
@@ -259,6 +259,8 @@ DECLARE_QOBJ_TYPE(route_map);
        (strmatch(C, "frr-zebra-route-map:ipv4-next-hop-prefix-length"))
 #define IS_MATCH_SRC_PROTO(C)                                                  \
        (strmatch(C, "frr-zebra-route-map:source-protocol"))
+#define IS_MATCH_BGP_SRC_PROTO(C)                                              \
+       (strmatch(C, "frr-bgp-route-map:source-protocol"))
 #define IS_MATCH_SRC_INSTANCE(C)                                               \
        (strmatch(C, "frr-zebra-route-map:source-instance"))
 /* BGP route-map match conditions */
index 419086c4c6bb92d03fc3f004cb548ca406bf50ec..0ccc78e8380fecbbf6f1756c97b48579f15543a1 100644 (file)
@@ -599,11 +599,14 @@ void route_map_condition_show(struct vty *vty, const struct lyd_node *dnode,
                        yang_dnode_get_string(
                                dnode,
                                "./rmap-match-condition/frr-zebra-route-map:ipv4-prefix-length"));
-       } else if (IS_MATCH_SRC_PROTO(condition)) {
+       } else if (IS_MATCH_SRC_PROTO(condition) ||
+                  IS_MATCH_BGP_SRC_PROTO(condition)) {
                vty_out(vty, " match source-protocol %s\n",
                        yang_dnode_get_string(
                                dnode,
-                               "./rmap-match-condition/frr-zebra-route-map:source-protocol"));
+                               IS_MATCH_SRC_PROTO(condition)
+                                       ? "./rmap-match-condition/frr-zebra-route-map:source-protocol"
+                                       : "./rmap-match-condition/frr-bgp-route-map:source-protocol"));
        } else if (IS_MATCH_SRC_INSTANCE(condition)) {
                vty_out(vty, " match source-instance %s\n",
                        yang_dnode_get_string(
index b5eaf7bff62dfe8d60f6d293accabd94d9d780e6..b8cb5b2b152ec4fa224cfcf78c4fd17dfe0211cc 100644 (file)
@@ -542,7 +542,7 @@ int mgmt_ds_iter_data(struct mgmt_ds_ctx *ds_ctx, char *base_xpath,
                                                   void *ctx),
                      void *ctx, bool alloc_xp_copy)
 {
-       int ret;
+       int ret = 0;
        char xpath[MGMTD_MAX_XPATH_LEN];
        struct lyd_node *base_dnode = NULL;
        struct lyd_node *node;
index a49718a490bdb47e8313ef99b96ec4c627eb07e3..ab84b1efcf7471a2d0f379d9f7ae20ca954981df 100644 (file)
@@ -35,23 +35,18 @@ DECLARE_DLIST(mgmt_cmt_infos, struct mgmt_cmt_info_t, cmts);
  */
 static struct vty *rollback_vty;
 
-static bool mgmt_history_record_exists(char *file_path)
+static bool file_exists(const char *path)
 {
-       int exist;
-
-       exist = access(file_path, F_OK);
-       if (exist == 0)
-               return true;
-       else
-               return false;
+       return !access(path, F_OK);
 }
 
-static void mgmt_history_remove_file(char *name)
+static void remove_file(const char *path)
 {
-       if (remove(name) == 0)
-               zlog_debug("Old commit info deletion succeeded");
-       else
-               zlog_err("Old commit info deletion failed");
+       if (!file_exists(path))
+               return;
+       if (unlink(path))
+               zlog_err("Failed to remove commit history file %s: %s", path,
+                        safe_strerror(errno));
 }
 
 static struct mgmt_cmt_info_t *mgmt_history_new_cmt_info(void)
@@ -84,7 +79,7 @@ static struct mgmt_cmt_info_t *mgmt_history_create_cmt_rec(void)
                        last_cmt_info = cmt_info;
 
                if (last_cmt_info) {
-                       mgmt_history_remove_file(last_cmt_info->cmt_json_file);
+                       remove_file(last_cmt_info->cmt_json_file);
                        mgmt_cmt_infos_del(&mm->cmts, last_cmt_info);
                        XFREE(MTYPE_MGMTD_CMT_INFO, last_cmt_info);
                }
@@ -114,20 +109,21 @@ static bool mgmt_history_read_cmt_record_index(void)
        struct mgmt_cmt_info_t *new;
        int cnt = 0;
 
+       if (!file_exists(MGMTD_COMMIT_FILE_PATH))
+               return false;
+
        fp = fopen(MGMTD_COMMIT_INDEX_FILE_NAME, "rb");
        if (!fp) {
-               zlog_err("Failed to open file %s rb mode",
-                        MGMTD_COMMIT_INDEX_FILE_NAME);
+               zlog_err("Failed to open commit history %s for reading: %s",
+                        MGMTD_COMMIT_INDEX_FILE_NAME, safe_strerror(errno));
                return false;
        }
 
        while ((fread(&cmt_info, sizeof(cmt_info), 1, fp)) > 0) {
                if (cnt < MGMTD_MAX_COMMIT_LIST) {
-                       if (!mgmt_history_record_exists(
-                                   cmt_info.cmt_json_file)) {
-                               zlog_err(
-                                       "Commit record present in index_file, but commit file %s missing",
-                                       cmt_info.cmt_json_file);
+                       if (!file_exists(cmt_info.cmt_json_file)) {
+                               zlog_err("Commit in index, but file %s missing",
+                                        cmt_info.cmt_json_file);
                                continue;
                        }
 
@@ -136,8 +132,9 @@ static bool mgmt_history_read_cmt_record_index(void)
                        memcpy(new, &cmt_info, sizeof(struct mgmt_cmt_info_t));
                        mgmt_cmt_infos_add_tail(&mm->cmts, new);
                } else {
-                       zlog_err("More records found in index file %s",
-                                MGMTD_COMMIT_INDEX_FILE_NAME);
+                       zlog_warn(
+                               "More records found in commit history file %s than expected",
+                               MGMTD_COMMIT_INDEX_FILE_NAME);
                        fclose(fp);
                        return false;
                }
@@ -157,11 +154,10 @@ static bool mgmt_history_dump_cmt_record_index(void)
        struct mgmt_cmt_info_t cmt_info_set[10];
        int cnt = 0;
 
-       mgmt_history_remove_file((char *)MGMTD_COMMIT_INDEX_FILE_NAME);
-       fp = fopen(MGMTD_COMMIT_INDEX_FILE_NAME, "ab");
+       fp = fopen(MGMTD_COMMIT_INDEX_FILE_NAME, "wb");
        if (!fp) {
-               zlog_err("Failed to open file %s ab mode",
-                        MGMTD_COMMIT_INDEX_FILE_NAME);
+               zlog_err("Failed to open commit history %s for writing: %s",
+                        MGMTD_COMMIT_INDEX_FILE_NAME, safe_strerror(errno));
                return false;
        }
 
@@ -179,11 +175,11 @@ static bool mgmt_history_dump_cmt_record_index(void)
        ret = fwrite(&cmt_info_set, sizeof(struct mgmt_cmt_info_t), cnt, fp);
        fclose(fp);
        if (ret != cnt) {
-               zlog_err("Write record failed");
+               zlog_err("Failed to write full commit history, removing file");
+               remove_file(MGMTD_COMMIT_INDEX_FILE_NAME);
                return false;
-       } else {
-               return true;
        }
+       return true;
 }
 
 static int mgmt_history_rollback_to_cmt(struct vty *vty,
@@ -281,7 +277,7 @@ int mgmt_history_rollback_by_id(struct vty *vty, const char *cmtid_str)
                        return ret;
                }
 
-               mgmt_history_remove_file(cmt_info->cmt_json_file);
+               remove_file(cmt_info->cmt_json_file);
                mgmt_cmt_infos_del(&mm->cmts, cmt_info);
                XFREE(MTYPE_MGMTD_CMT_INFO, cmt_info);
        }
@@ -322,7 +318,7 @@ int mgmt_history_rollback_n(struct vty *vty, int num_cmts)
                }
 
                cnt++;
-               mgmt_history_remove_file(cmt_info->cmt_json_file);
+               remove_file(cmt_info->cmt_json_file);
                mgmt_cmt_infos_del(&mm->cmts, cmt_info);
                XFREE(MTYPE_MGMTD_CMT_INFO, cmt_info);
        }
index 976eb529d7fcaeeaf2b0749907a533c5ed1dc308..3d5d4d259f47e4f39d9583e78a92191f4594b9c2 100644 (file)
@@ -14,6 +14,7 @@
 #include "log.h"
 #include "hook.h"
 #include "printfrr.h"
+#include "lib_errors.h"
 
 #include "ospf6d/ospf6_lsa.h"
 #include "ospf6d/ospf6_lsdb.h"
 #include "ospf6d/ospf6_zebra.h"
 #include "ospf6d/ospf6_message.h"
 #include "ospf6d/ospf6_neighbor.h"
+#include "ospf6d/ospf6_network.h"
 #include "ospf6d/ospf6_flood.h"
 #include "ospf6d/ospf6_intra.h"
 #include "ospf6d/ospf6_spf.h"
 #include "ospf6d/ospf6_gr.h"
 #include "ospf6d/ospf6_gr_clippy.c"
 
-static void ospf6_gr_nvm_delete(struct ospf6 *ospf6);
+static void ospf6_gr_grace_period_expired(struct event *thread);
 
 /* Originate and install Grace-LSA for a given interface. */
-static int ospf6_gr_lsa_originate(struct ospf6_interface *oi)
+static int ospf6_gr_lsa_originate(struct ospf6_interface *oi,
+                                 enum ospf6_gr_restart_reason reason)
 {
-       struct ospf6_gr_info *gr_info = &oi->area->ospf6->gr_info;
+       struct ospf6 *ospf6 = oi->area->ospf6;
+       struct ospf6_gr_info *gr_info = &ospf6->gr_info;
        struct ospf6_lsa_header *lsa_header;
        struct ospf6_grace_lsa *grace_lsa;
        struct ospf6_lsa *lsa;
+       uint16_t lsa_length;
        char buffer[OSPF6_MAX_LSASIZE];
 
        if (IS_OSPF6_DEBUG_ORIGINATE(LINK))
@@ -61,29 +66,59 @@ static int ospf6_gr_lsa_originate(struct ospf6_interface *oi)
        /* Put restart reason. */
        grace_lsa->tlv_reason.header.type = htons(RESTART_REASON_TYPE);
        grace_lsa->tlv_reason.header.length = htons(RESTART_REASON_LENGTH);
-       if (gr_info->restart_support)
-               grace_lsa->tlv_reason.reason = OSPF6_GR_SW_RESTART;
-       else
-               grace_lsa->tlv_reason.reason = OSPF6_GR_UNKNOWN_RESTART;
+       grace_lsa->tlv_reason.reason = reason;
 
        /* Fill LSA Header */
+       lsa_length = sizeof(*lsa_header) + sizeof(*grace_lsa);
        lsa_header->age = 0;
        lsa_header->type = htons(OSPF6_LSTYPE_GRACE_LSA);
        lsa_header->id = htonl(oi->interface->ifindex);
-       lsa_header->adv_router = oi->area->ospf6->router_id;
+       lsa_header->adv_router = ospf6->router_id;
        lsa_header->seqnum =
                ospf6_new_ls_seqnum(lsa_header->type, lsa_header->id,
                                    lsa_header->adv_router, oi->lsdb);
-       lsa_header->length = htons(sizeof(*lsa_header) + sizeof(*grace_lsa));
+       lsa_header->length = htons(lsa_length);
 
        /* LSA checksum */
        ospf6_lsa_checksum(lsa_header);
 
-       /* create LSA */
-       lsa = ospf6_lsa_create(lsa_header);
-
-       /* Originate */
-       ospf6_lsa_originate_interface(lsa, oi);
+       if (reason == OSPF6_GR_UNKNOWN_RESTART) {
+               struct ospf6_header *oh;
+               uint32_t *uv32;
+               int n;
+               uint16_t length = OSPF6_HEADER_SIZE + 4 + lsa_length;
+               struct iovec iovector[2] = {};
+
+               /* Reserve space for OSPFv3 header. */
+               memmove(&buffer[OSPF6_HEADER_SIZE + 4], buffer, lsa_length);
+
+               /* Fill in the OSPFv3 header. */
+               oh = (struct ospf6_header *)buffer;
+               oh->version = OSPFV3_VERSION;
+               oh->type = OSPF6_MESSAGE_TYPE_LSUPDATE;
+               oh->router_id = oi->area->ospf6->router_id;
+               oh->area_id = oi->area->area_id;
+               oh->instance_id = oi->instance_id;
+               oh->reserved = 0;
+               oh->length = htons(length);
+
+               /* Fill LSA header. */
+               uv32 = (uint32_t *)&buffer[sizeof(*oh)];
+               *uv32 = htonl(1);
+
+               /* Send packet. */
+               iovector[0].iov_base = lsa_header;
+               iovector[0].iov_len = length;
+               n = ospf6_sendmsg(oi->linklocal_addr, &allspfrouters6,
+                                 oi->interface->ifindex, iovector, ospf6->fd);
+               if (n != length)
+                       flog_err(EC_LIB_DEVELOPMENT,
+                                "%s: could not send entire message", __func__);
+       } else {
+               /* Create and install LSA. */
+               lsa = ospf6_lsa_create(lsa_header);
+               ospf6_lsa_originate_interface(lsa, oi);
+       }
 
        return 0;
 }
@@ -134,11 +169,10 @@ static void ospf6_gr_restart_exit(struct ospf6 *ospf6, const char *reason)
 
        ospf6->gr_info.restart_in_progress = false;
        ospf6->gr_info.finishing_restart = true;
+       XFREE(MTYPE_TMP, ospf6->gr_info.exit_reason);
+       ospf6->gr_info.exit_reason = XSTRDUP(MTYPE_TMP, reason);
        EVENT_OFF(ospf6->gr_info.t_grace_period);
 
-       /* Record in non-volatile memory that the restart is complete. */
-       ospf6_gr_nvm_delete(ospf6);
-
        for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, onode, area)) {
                struct ospf6_interface *oi;
 
@@ -156,6 +190,14 @@ static void ospf6_gr_restart_exit(struct ospf6 *ospf6, const char *reason)
                OSPF6_INTRA_PREFIX_LSA_SCHEDULE_STUB(area);
 
                for (ALL_LIST_ELEMENTS_RO(area->if_list, anode, oi)) {
+                       /* Disable hello delay. */
+                       if (oi->gr.hello_delay.t_grace_send) {
+                               oi->gr.hello_delay.elapsed_seconds = 0;
+                               EVENT_OFF(oi->gr.hello_delay.t_grace_send);
+                               event_add_event(master, ospf6_hello_send, oi, 0,
+                                               &oi->thread_send_hello);
+                       }
+
                        /* Reoriginate Link-LSA. */
                        if (oi->type != OSPF_IFTYPE_VIRTUALLINK)
                                OSPF6_LINK_LSA_EXECUTE(oi);
@@ -195,6 +237,26 @@ static void ospf6_gr_restart_exit(struct ospf6 *ospf6, const char *reason)
        ospf6_gr_flush_grace_lsas(ospf6);
 }
 
+/* Enter the Graceful Restart mode. */
+void ospf6_gr_restart_enter(struct ospf6 *ospf6,
+                           enum ospf6_gr_restart_reason reason, int timestamp)
+{
+       unsigned long remaining_time;
+
+       ospf6->gr_info.restart_in_progress = true;
+       ospf6->gr_info.reason = reason;
+
+       /* Schedule grace period timeout. */
+       remaining_time = timestamp - time(NULL);
+       if (IS_DEBUG_OSPF6_GR)
+               zlog_debug(
+                       "GR: remaining time until grace period expires: %lu(s)",
+                       remaining_time);
+
+       event_add_timer(master, ospf6_gr_grace_period_expired, ospf6,
+                       remaining_time, &ospf6->gr_info.t_grace_period);
+}
+
 #define RTR_LSA_MISSING 0
 #define RTR_LSA_ADJ_FOUND 1
 #define RTR_LSA_ADJ_NOT_FOUND 2
@@ -466,11 +528,26 @@ static void ospf6_gr_grace_period_expired(struct event *thread)
        ospf6_gr_restart_exit(ospf6, "grace period has expired");
 }
 
+/* Send extra Grace-LSA out the interface (unplanned outages only). */
+void ospf6_gr_iface_send_grace_lsa(struct event *thread)
+{
+       struct ospf6_interface *oi = EVENT_ARG(thread);
+
+       ospf6_gr_lsa_originate(oi, oi->area->ospf6->gr_info.reason);
+
+       if (++oi->gr.hello_delay.elapsed_seconds < oi->gr.hello_delay.interval)
+               event_add_timer(master, ospf6_gr_iface_send_grace_lsa, oi, 1,
+                               &oi->gr.hello_delay.t_grace_send);
+       else
+               event_add_event(master, ospf6_hello_send, oi, 0,
+                               &oi->thread_send_hello);
+}
+
 /*
  * Record in non-volatile memory that the given OSPF instance is attempting to
  * perform a graceful restart.
  */
-static void ospf6_gr_nvm_update(struct ospf6 *ospf6)
+static void ospf6_gr_nvm_update(struct ospf6 *ospf6, bool prepare)
 {
        const char *inst_name;
        json_object *json;
@@ -496,16 +573,18 @@ static void ospf6_gr_nvm_update(struct ospf6 *ospf6)
                                       json_instance);
        }
 
+       json_object_int_add(json_instance, "gracePeriod",
+                           ospf6->gr_info.grace_period);
+
        /*
         * Record not only the grace period, but also a UNIX timestamp
         * corresponding to the end of that period. That way, once ospf6d is
         * restarted, it will be possible to take into account the time that
         * passed while ospf6d wasn't running.
         */
-       json_object_int_add(json_instance, "gracePeriod",
-                           ospf6->gr_info.grace_period);
-       json_object_int_add(json_instance, "timestamp",
-                           time(NULL) + ospf6->gr_info.grace_period);
+       if (prepare)
+               json_object_int_add(json_instance, "timestamp",
+                                   time(NULL) + ospf6->gr_info.grace_period);
 
        json_object_to_file_ext((char *)OSPF6D_GR_STATE, json,
                                JSON_C_TO_STRING_PRETTY);
@@ -516,7 +595,7 @@ static void ospf6_gr_nvm_update(struct ospf6 *ospf6)
  * Delete GR status information about the given OSPF instance from non-volatile
  * memory.
  */
-static void ospf6_gr_nvm_delete(struct ospf6 *ospf6)
+void ospf6_gr_nvm_delete(struct ospf6 *ospf6)
 {
        const char *inst_name;
        json_object *json;
@@ -552,6 +631,7 @@ void ospf6_gr_nvm_read(struct ospf6 *ospf6)
        json_object *json_instances;
        json_object *json_instance;
        json_object *json_timestamp;
+       json_object *json_grace_period;
        time_t timestamp = 0;
 
        inst_name = ospf6->name ? ospf6->name : VRF_DEFAULT_NAME;
@@ -573,29 +653,33 @@ void ospf6_gr_nvm_read(struct ospf6 *ospf6)
                                       json_instance);
        }
 
+       json_object_object_get_ex(json_instance, "gracePeriod",
+                                 &json_grace_period);
        json_object_object_get_ex(json_instance, "timestamp", &json_timestamp);
        if (json_timestamp) {
                time_t now;
-               unsigned long remaining_time;
 
-               /* Check if the grace period has already expired. */
+               /* Planned GR: check if the grace period has already expired. */
                now = time(NULL);
                timestamp = json_object_get_int(json_timestamp);
                if (now > timestamp) {
                        ospf6_gr_restart_exit(
                                ospf6, "grace period has expired already");
-               } else {
-                       /* Schedule grace period timeout. */
-                       ospf6->gr_info.restart_in_progress = true;
-                       remaining_time = timestamp - time(NULL);
-                       if (IS_DEBUG_OSPF6_GR)
-                               zlog_debug(
-                                       "GR: remaining time until grace period expires: %lu(s)",
-                                       remaining_time);
-                       event_add_timer(master, ospf6_gr_grace_period_expired,
-                                       ospf6, remaining_time,
-                                       &ospf6->gr_info.t_grace_period);
-               }
+               } else
+                       ospf6_gr_restart_enter(ospf6, OSPF6_GR_SW_RESTART,
+                                              timestamp);
+       } else if (json_grace_period) {
+               uint32_t grace_period;
+
+               /*
+                * Unplanned GR: the Grace-LSAs will be sent later as soon as
+                * the interfaces are operational.
+                */
+               grace_period = json_object_get_int(json_grace_period);
+               ospf6->gr_info.grace_period = grace_period;
+               ospf6_gr_restart_enter(ospf6, OSPF6_GR_UNKNOWN_RESTART,
+                                      time(NULL) +
+                                              ospf6->gr_info.grace_period);
        }
 
        json_object_object_del(json_instances, inst_name);
@@ -605,6 +689,24 @@ void ospf6_gr_nvm_read(struct ospf6 *ospf6)
        json_object_free(json);
 }
 
+void ospf6_gr_unplanned_start_interface(struct ospf6_interface *oi)
+{
+       /*
+        * Can't check OSPF interface state as the OSPF instance might not be
+        * enabled yet.
+        */
+       if (!if_is_operative(oi->interface) || if_is_loopback(oi->interface))
+               return;
+
+       /* Send Grace-LSA. */
+       ospf6_gr_lsa_originate(oi, oi->area->ospf6->gr_info.reason);
+
+       /* Start GR hello-delay interval. */
+       oi->gr.hello_delay.elapsed_seconds = 0;
+       event_add_timer(master, ospf6_gr_iface_send_grace_lsa, oi, 1,
+                       &oi->gr.hello_delay.t_grace_send);
+}
+
 /* Prepare to start a Graceful Restart. */
 static void ospf6_gr_prepare(void)
 {
@@ -625,25 +727,17 @@ static void ospf6_gr_prepare(void)
                                ospf6->gr_info.grace_period,
                                ospf6_vrf_id_to_name(ospf6->vrf_id));
 
-               /* Freeze OSPF routes in the RIB. */
-               if (ospf6_zebra_gr_enable(ospf6, ospf6->gr_info.grace_period)) {
-                       zlog_warn(
-                               "%s: failed to activate graceful restart: not connected to zebra",
-                               __func__);
-                       continue;
-               }
-
                /* Send a Grace-LSA to all neighbors. */
                for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, anode, area)) {
                        for (ALL_LIST_ELEMENTS_RO(area->if_list, inode, oi)) {
                                if (oi->state < OSPF6_INTERFACE_POINTTOPOINT)
                                        continue;
-                               ospf6_gr_lsa_originate(oi);
+                               ospf6_gr_lsa_originate(oi, OSPF6_GR_SW_RESTART);
                        }
                }
 
                /* Record end of the grace period in non-volatile memory. */
-               ospf6_gr_nvm_update(ospf6);
+               ospf6_gr_nvm_update(ospf6, true);
 
                /*
                 * Mark that a Graceful Restart preparation is in progress, to
@@ -714,6 +808,12 @@ DEFPY(ospf6_graceful_restart, ospf6_graceful_restart_cmd,
        ospf6->gr_info.restart_support = true;
        ospf6->gr_info.grace_period = grace_period;
 
+       /* Freeze OSPF routes in the RIB. */
+       (void)ospf6_zebra_gr_enable(ospf6, ospf6->gr_info.grace_period);
+
+       /* Record that GR is enabled in non-volatile memory. */
+       ospf6_gr_nvm_update(ospf6, false);
+
        return CMD_SUCCESS;
 }
 
@@ -736,6 +836,8 @@ DEFPY(ospf6_no_graceful_restart, ospf6_no_graceful_restart_cmd,
 
        ospf6->gr_info.restart_support = false;
        ospf6->gr_info.grace_period = OSPF6_DFLT_GRACE_INTERVAL;
+       ospf6_gr_nvm_delete(ospf6);
+       ospf6_zebra_gr_disable(ospf6);
 
        return CMD_SUCCESS;
 }
index 2c7e8b341c0c1edc2a658df372491d4d42574ef4..e6566a609872de63bb6dbd92321e40c0cbecb851 100644 (file)
@@ -155,9 +155,15 @@ extern int config_write_ospf6_gr(struct vty *vty, struct ospf6 *ospf6);
 extern int config_write_ospf6_gr_helper(struct vty *vty, struct ospf6 *ospf6);
 extern int config_write_ospf6_debug_gr_helper(struct vty *vty);
 
+extern void ospf6_gr_iface_send_grace_lsa(struct event *thread);
+extern void ospf6_gr_restart_enter(struct ospf6 *ospf6,
+                                  enum ospf6_gr_restart_reason reason,
+                                  int timestamp);
 extern void ospf6_gr_check_lsdb_consistency(struct ospf6 *ospf,
                                            struct ospf6_area *area);
 extern void ospf6_gr_nvm_read(struct ospf6 *ospf);
+extern void ospf6_gr_nvm_delete(struct ospf6 *ospf6);
+extern void ospf6_gr_unplanned_start_interface(struct ospf6_interface *oi);
 extern void ospf6_gr_init(void);
 
 #endif /* OSPF6_GR_H */
index e7148d66ba9a84f4b3fe4a65fb31b3f1f3b236a5..ea059c4be6ca541d4f2f896bca66dedf2c1d0113 100644 (file)
@@ -35,6 +35,7 @@
 #include "ospf6_proto.h"
 #include "lib/keychain.h"
 #include "ospf6_auth_trailer.h"
+#include "ospf6d/ospf6_interface_clippy.c"
 
 DEFINE_MTYPE_STATIC(OSPF6D, OSPF6_IF, "OSPF6 interface");
 DEFINE_MTYPE(OSPF6D, OSPF6_AUTH_KEYCHAIN, "OSPF6 auth keychain");
@@ -202,6 +203,7 @@ struct ospf6_interface *ospf6_interface_create(struct interface *ifp)
        oi->priority = OSPF6_INTERFACE_PRIORITY;
 
        oi->hello_interval = OSPF_HELLO_INTERVAL_DEFAULT;
+       oi->gr.hello_delay.interval = OSPF_HELLO_DELAY_DEFAULT;
        oi->dead_interval = OSPF_ROUTER_DEAD_INTERVAL_DEFAULT;
        oi->rxmt_interval = OSPF_RETRANSMIT_INTERVAL_DEFAULT;
        oi->type = ospf6_default_iftype(ifp);
@@ -324,6 +326,9 @@ void ospf6_interface_disable(struct ospf6_interface *oi)
        EVENT_OFF(oi->thread_intra_prefix_lsa);
        EVENT_OFF(oi->thread_as_extern_lsa);
        EVENT_OFF(oi->thread_wait_timer);
+
+       oi->gr.hello_delay.elapsed_seconds = 0;
+       EVENT_OFF(oi->gr.hello_delay.t_grace_send);
 }
 
 static struct in6_addr *
@@ -772,6 +777,17 @@ void interface_up(struct event *thread)
                return;
        }
 
+       /*
+        * RFC 3623 - Section 5 ("Unplanned Outages"):
+        * "The grace-LSAs are encapsulated in Link State Update Packets
+        * and sent out to all interfaces, even though the restarted
+        * router has no adjacencies and no knowledge of previous
+        * adjacencies".
+        */
+       if (oi->area->ospf6->gr_info.restart_in_progress &&
+           oi->area->ospf6->gr_info.reason == OSPF6_GR_UNKNOWN_RESTART)
+               ospf6_gr_unplanned_start_interface(oi);
+
 #ifdef __FreeBSD__
        /*
         * There's a delay in FreeBSD between issuing a command to leave a
@@ -1180,6 +1196,9 @@ static int ospf6_interface_show(struct vty *vty, struct interface *ifp,
                                json_arr, json_object_new_string(lsa->name));
                json_object_object_add(json_obj, "pendingLsaLsAck", json_arr);
 
+               if (oi->gr.hello_delay.interval != 0)
+                       json_object_int_add(json_obj, "grHelloDelaySecs",
+                                           oi->gr.hello_delay.interval);
        } else {
                timerclear(&res);
                if (event_is_scheduled(oi->thread_send_lsupdate))
@@ -1205,6 +1224,10 @@ static int ospf6_interface_show(struct vty *vty, struct interface *ifp,
                                                                   : "off"));
                for (ALL_LSDB(oi->lsack_list, lsa, lsanext))
                        vty_out(vty, "      %s\n", lsa->name);
+
+               if (oi->gr.hello_delay.interval != 0)
+                       vty_out(vty, "  Graceful Restart hello delay: %us\n",
+                               oi->gr.hello_delay.interval);
        }
 
        /* BFD specific. */
@@ -2157,6 +2180,50 @@ ALIAS (ipv6_ospf6_deadinterval,
        "Interval time after which a neighbor is declared down\n"
        SECONDS_STR)
 
+DEFPY(ipv6_ospf6_gr_hdelay, ipv6_ospf6_gr_hdelay_cmd,
+      "ipv6 ospf6 graceful-restart hello-delay (1-1800)",
+      IP6_STR
+      OSPF6_STR
+      "Graceful Restart parameters\n"
+      "Delay the sending of the first hello packets.\n"
+      "Delay in seconds\n")
+{
+       VTY_DECLVAR_CONTEXT(interface, ifp);
+       struct ospf6_interface *oi;
+
+       oi = ifp->info;
+       if (oi == NULL)
+               oi = ospf6_interface_create(ifp);
+
+       /* Note: new or updated value won't affect ongoing graceful restart. */
+       oi->gr.hello_delay.interval = hello_delay;
+
+       return CMD_SUCCESS;
+}
+
+DEFPY(no_ipv6_ospf6_gr_hdelay, no_ipv6_ospf6_gr_hdelay_cmd,
+      "no ipv6 ospf6 graceful-restart hello-delay [(1-1800)]",
+      NO_STR
+      IP6_STR
+      OSPF6_STR
+      "Graceful Restart parameters\n"
+      "Delay the sending of the first hello packets.\n"
+      "Delay in seconds\n")
+{
+       VTY_DECLVAR_CONTEXT(interface, ifp);
+       struct ospf6_interface *oi;
+
+       oi = ifp->info;
+       if (oi == NULL)
+               oi = ospf6_interface_create(ifp);
+
+       oi->gr.hello_delay.interval = OSPF_HELLO_DELAY_DEFAULT;
+       oi->gr.hello_delay.elapsed_seconds = 0;
+       EVENT_OFF(oi->gr.hello_delay.t_grace_send);
+
+       return CMD_SUCCESS;
+}
+
 /* interface variable set command */
 DEFUN (ipv6_ospf6_transmitdelay,
        ipv6_ospf6_transmitdelay_cmd,
@@ -2624,6 +2691,11 @@ static int config_write_ospf6_interface(struct vty *vty, struct vrf *vrf)
                else if (oi->type_cfg && oi->type == OSPF_IFTYPE_BROADCAST)
                        vty_out(vty, " ipv6 ospf6 network broadcast\n");
 
+               if (oi->gr.hello_delay.interval != OSPF_HELLO_DELAY_DEFAULT)
+                       vty_out(vty,
+                               " ipv6 ospf6 graceful-restart hello-delay %u\n",
+                               oi->gr.hello_delay.interval);
+
                ospf6_bfd_write_config(vty, oi);
 
                ospf6_auth_write_config(vty, &oi->at_data);
@@ -2722,12 +2794,14 @@ void ospf6_interface_init(void)
 
        install_element(INTERFACE_NODE, &ipv6_ospf6_deadinterval_cmd);
        install_element(INTERFACE_NODE, &ipv6_ospf6_hellointerval_cmd);
+       install_element(INTERFACE_NODE, &ipv6_ospf6_gr_hdelay_cmd);
        install_element(INTERFACE_NODE, &ipv6_ospf6_priority_cmd);
        install_element(INTERFACE_NODE, &ipv6_ospf6_retransmitinterval_cmd);
        install_element(INTERFACE_NODE, &ipv6_ospf6_transmitdelay_cmd);
        install_element(INTERFACE_NODE, &ipv6_ospf6_instance_cmd);
        install_element(INTERFACE_NODE, &no_ipv6_ospf6_deadinterval_cmd);
        install_element(INTERFACE_NODE, &no_ipv6_ospf6_hellointerval_cmd);
+       install_element(INTERFACE_NODE, &no_ipv6_ospf6_gr_hdelay_cmd);
        install_element(INTERFACE_NODE, &no_ipv6_ospf6_priority_cmd);
        install_element(INTERFACE_NODE, &no_ipv6_ospf6_retransmitinterval_cmd);
        install_element(INTERFACE_NODE, &no_ipv6_ospf6_transmitdelay_cmd);
index ae0744b25da21edfe320613bdcef16abf64d61eb..5942df0ab5df3e6ad36da75a9c323a67d5fb5077 100644 (file)
@@ -74,6 +74,15 @@ struct ospf6_interface {
        uint16_t dead_interval;
        uint32_t rxmt_interval;
 
+       /* Graceful-Restart data. */
+       struct {
+               struct {
+                       uint16_t interval;
+                       uint16_t elapsed_seconds;
+                       struct event *t_grace_send;
+               } hello_delay;
+       } gr;
+
        uint32_t state_change;
 
        /* Cost */
index 14b02dac7969e54e727fecc295605c7469fe82ee..032988a91f36c48c61db565686d58b1f57faeb58 100644 (file)
@@ -2244,6 +2244,10 @@ void ospf6_hello_send(struct event *thread)
 
        oi = (struct ospf6_interface *)EVENT_ARG(thread);
 
+       /* Check if the GR hello-delay is active. */
+       if (oi->gr.hello_delay.t_grace_send)
+               return;
+
        if (oi->state <= OSPF6_INTERFACE_DOWN) {
                if (IS_OSPF6_DEBUG_MESSAGE(OSPF6_MESSAGE_TYPE_HELLO, SEND_HDR))
                        zlog_debug("Unable to send Hello on down interface %s",
index 0990b14307f2254156c406ae36cf118d29d440f9..e39ae504a23853e907988347019fcf5a178a1875 100644 (file)
@@ -1262,6 +1262,7 @@ static void ospf6_ase_calculate_timer(struct event *t)
                 * no longer valid.
                 */
                ospf6_zebra_gr_disable(ospf6);
+               ospf6_zebra_gr_enable(ospf6, ospf6->gr_info.grace_period);
                ospf6->gr_info.finishing_restart = false;
        }
 }
index c2aa3abeedc667476a9f056076a58473d306cebb..01c962194c45ca6057fc05bf90d401c3c0d6b5dc 100644 (file)
@@ -455,6 +455,13 @@ struct ospf6 *ospf6_instance_create(const char *name)
        if (ospf6->router_id == 0)
                ospf6_router_id_update(ospf6, true);
        ospf6_add(ospf6);
+
+       /*
+        * Read from non-volatile memory whether this instance is performing a
+        * graceful restart or not.
+        */
+       ospf6_gr_nvm_read(ospf6);
+
        if (ospf6->vrf_id != VRF_UNKNOWN) {
                vrf = vrf_lookup_by_id(ospf6->vrf_id);
                FOR_ALL_INTERFACES (vrf, ifp) {
@@ -465,12 +472,6 @@ struct ospf6 *ospf6_instance_create(const char *name)
        if (ospf6->fd < 0)
                return ospf6;
 
-       /*
-        * Read from non-volatile memory whether this instance is performing a
-        * graceful restart or not.
-        */
-       ospf6_gr_nvm_read(ospf6);
-
        event_add_read(master, ospf6_receive, ospf6, ospf6->fd,
                       &ospf6->t_ospf6_receive);
 
@@ -490,6 +491,7 @@ void ospf6_delete(struct ospf6 *o)
        ospf6_gr_helper_deinit(o);
        if (!o->gr_info.prepare_in_progress)
                ospf6_flush_self_originated_lsas_now(o);
+       XFREE(MTYPE_TMP, o->gr_info.exit_reason);
        ospf6_disable(o);
        ospf6_del(o);
 
@@ -693,6 +695,9 @@ DEFUN(no_router_ospf6, no_router_ospf6_cmd, "no router ospf6 [vrf NAME]",
        if (ospf6 == NULL)
                vty_out(vty, "OSPFv3 is not configured\n");
        else {
+               if (ospf6->gr_info.restart_support)
+                       ospf6_gr_nvm_delete(ospf6);
+
                ospf6_delete(ospf6);
                ospf6 = NULL;
        }
index 8fdd29112242c82f1d1a1631d63c58a87d6eddbc..a38dad8fcedd0370857de69204a9fadc3f095965 100644 (file)
@@ -51,6 +51,8 @@ struct ospf6_gr_info {
        bool prepare_in_progress;
        bool finishing_restart;
        uint32_t grace_period;
+       int reason;
+       char *exit_reason;
        struct event *t_grace_period;
 };
 
index 6b3d4955da0593ca019ac1704b2805990802e186..0f631c4d01f154fc36910fed00d8bbb43fa6a9f3 100644 (file)
@@ -239,12 +239,18 @@ static int ospf6_zebra_gr_update(struct ospf6 *ospf6, int command,
 
 int ospf6_zebra_gr_enable(struct ospf6 *ospf6, uint32_t stale_time)
 {
+       if (IS_DEBUG_OSPF6_GR)
+               zlog_debug("Zebra enable GR [stale time %u]", stale_time);
+
        return ospf6_zebra_gr_update(ospf6, ZEBRA_CLIENT_GR_CAPABILITIES,
                                     stale_time);
 }
 
 int ospf6_zebra_gr_disable(struct ospf6 *ospf6)
 {
+       if (IS_DEBUG_OSPF6_GR)
+               zlog_debug("Zebra disable GR");
+
        return ospf6_zebra_gr_update(ospf6, ZEBRA_CLIENT_GR_DISABLE, 0);
 }
 
@@ -735,10 +741,20 @@ uint8_t ospf6_distance_apply(struct prefix_ipv6 *p, struct ospf6_route * or,
 
 static void ospf6_zebra_connected(struct zclient *zclient)
 {
+       struct ospf6 *ospf6;
+       struct listnode *node;
+
        /* Send the client registration */
        bfd_client_sendmsg(zclient, ZEBRA_BFD_CLIENT_REGISTER, VRF_DEFAULT);
 
        zclient_send_reg_requests(zclient, VRF_DEFAULT);
+
+       /* Activate graceful restart if configured. */
+       for (ALL_LIST_ELEMENTS_RO(om6->ospf6, node, ospf6)) {
+               if (!ospf6->gr_info.restart_support)
+                       continue;
+               (void)ospf6_zebra_gr_enable(ospf6, ospf6->gr_info.grace_period);
+       }
 }
 
 static zclient_handler *const ospf6_handlers[] = {
index 3dff03956ceaa421520afdb753e47414f46c4d48..c34db3012da5d9bfec2fb1d98955979f0976c1fe 100644 (file)
@@ -78,6 +78,7 @@ clippy_scan += \
        ospf6d/ospf6_top.c \
        ospf6d/ospf6_area.c \
        ospf6d/ospf6_asbr.c \
+       ospf6d/ospf6_interface.c \
        ospf6d/ospf6_lsa.c \
        ospf6d/ospf6_gr_helper.c \
        ospf6d/ospf6_gr.c \
index 1aacc341aa336c80f9db165b9cd871280177313e..34e59c545b7eed24a5714ef9a2f2474dd136c2bb 100644 (file)
@@ -1764,6 +1764,12 @@ int ospf_apiserver_originate1(struct ospf_lsa *lsa, struct ospf_lsa *old)
                 * session. Dump it, but increment past it's seqnum.
                 */
                assert(!ospf_opaque_is_owned(old));
+               if (IS_DEBUG_OSPF_CLIENT_API)
+                       zlog_debug(
+                               "LSA[Type%d:%pI4]: OSPF API Server Originate LSA Old Seq: 0x%x Age: %d",
+                               old->data->type, &old->data->id,
+                               ntohl(old->data->ls_seqnum),
+                               ntohl(old->data->ls_age));
                if (IS_LSA_MAX_SEQ(old)) {
                        flog_warn(EC_OSPF_LSA_INSTALL_FAILURE,
                                  "%s: old LSA at maxseq", __func__);
@@ -1772,6 +1778,11 @@ int ospf_apiserver_originate1(struct ospf_lsa *lsa, struct ospf_lsa *old)
                lsa->data->ls_seqnum = lsa_seqnum_increment(old);
                ospf_discard_from_db(ospf, old->lsdb, old);
        }
+       if (IS_DEBUG_OSPF_CLIENT_API)
+               zlog_debug(
+                       "LSA[Type%d:%pI4]: OSPF API Server Originate LSA New Seq: 0x%x Age: %d",
+                       lsa->data->type, &lsa->data->id,
+                       ntohl(lsa->data->ls_seqnum), ntohl(lsa->data->ls_age));
 
        /* Install this LSA into LSDB. */
        if (ospf_lsa_install(ospf, lsa->oi, lsa) == NULL) {
@@ -1846,6 +1857,11 @@ struct ospf_lsa *ospf_apiserver_lsa_refresher(struct ospf_lsa *lsa)
        ospf = ospf_lookup_by_vrf_id(VRF_DEFAULT);
        assert(ospf);
 
+       if (IS_DEBUG_OSPF(lsa, LSA_GENERATE)) {
+               zlog_debug("LSA[Type%d:%pI4]: OSPF API Server LSA Refresher",
+                          lsa->data->type, &lsa->data->id);
+       }
+
        apiserv = lookup_apiserver_by_lsa(lsa);
        if (!apiserv) {
                zlog_warn("%s: LSA[%s]: No apiserver?", __func__,
@@ -1855,14 +1871,14 @@ struct ospf_lsa *ospf_apiserver_lsa_refresher(struct ospf_lsa *lsa)
                goto out;
        }
 
-       if (IS_LSA_MAXAGE(lsa)) {
-               ospf_opaque_lsa_flush_schedule(lsa);
-               goto out;
-       }
-
        /* Check if updated version of LSA instance has already prepared. */
        new = ospf_lsdb_lookup(&apiserv->reserve, lsa);
        if (!new) {
+               if (IS_LSA_MAXAGE(lsa)) {
+                       ospf_opaque_lsa_flush_schedule(lsa);
+                       goto out;
+               }
+
                /* This is a periodic refresh, driven by core OSPF mechanism. */
                new = ospf_apiserver_opaque_lsa_new(lsa->area, lsa->oi,
                                                    lsa->data);
index cc2110d43331d208a090a2d3a829af817b6fe47a..610b5fc08e92640856c78875f23b99e57f759032 100644 (file)
@@ -615,6 +615,7 @@ static void ospf_ase_calculate_timer(struct event *t)
         */
        if (ospf->gr_info.finishing_restart) {
                ospf_zebra_gr_disable(ospf);
+               ospf_zebra_gr_enable(ospf, ospf->gr_info.grace_period);
                ospf->gr_info.finishing_restart = false;
        }
 }
index f60f0e863e2670fd5bbe483a9ce1d16687c33769..6999b3f623a6584ddcd38d18984d8502e6b3c9e9 100644 (file)
@@ -33,7 +33,7 @@
 #include "ospfd/ospf_dump.h"
 #include "ospfd/ospf_gr_clippy.c"
 
-static void ospf_gr_nvm_delete(struct ospf *ospf);
+static void ospf_gr_grace_period_expired(struct event *thread);
 
 /* Lookup self-originated Grace-LSA in the LSDB. */
 static struct ospf_lsa *ospf_gr_lsa_lookup(struct ospf *ospf,
@@ -53,7 +53,9 @@ static struct ospf_lsa *ospf_gr_lsa_lookup(struct ospf *ospf,
 
 /* Fill in fields of the Grace-LSA that is being originated. */
 static void ospf_gr_lsa_body_set(struct ospf_gr_info *gr_info,
-                                struct ospf_interface *oi, struct stream *s)
+                                struct ospf_interface *oi,
+                                enum ospf_gr_restart_reason reason,
+                                struct stream *s)
 {
        struct grace_tlv_graceperiod tlv_period = {};
        struct grace_tlv_restart_reason tlv_reason = {};
@@ -68,10 +70,7 @@ static void ospf_gr_lsa_body_set(struct ospf_gr_info *gr_info,
        /* Put restart reason. */
        tlv_reason.header.type = htons(RESTART_REASON_TYPE);
        tlv_reason.header.length = htons(RESTART_REASON_LENGTH);
-       if (gr_info->restart_support)
-               tlv_reason.reason = OSPF_GR_SW_RESTART;
-       else
-               tlv_reason.reason = OSPF_GR_UNKNOWN_RESTART;
+       tlv_reason.reason = reason;
        stream_put(s, &tlv_reason, sizeof(tlv_reason));
 
        /* Put IP address. */
@@ -85,7 +84,8 @@ static void ospf_gr_lsa_body_set(struct ospf_gr_info *gr_info,
 }
 
 /* Generate Grace-LSA for a given interface. */
-static struct ospf_lsa *ospf_gr_lsa_new(struct ospf_interface *oi)
+static struct ospf_lsa *ospf_gr_lsa_new(struct ospf_interface *oi,
+                                       enum ospf_gr_restart_reason reason)
 {
        struct stream *s;
        struct lsa_header *lsah;
@@ -112,7 +112,7 @@ static struct ospf_lsa *ospf_gr_lsa_new(struct ospf_interface *oi)
        lsa_header_set(s, options, lsa_type, lsa_id, oi->ospf->router_id);
 
        /* Set opaque-LSA body fields. */
-       ospf_gr_lsa_body_set(&oi->ospf->gr_info, oi, s);
+       ospf_gr_lsa_body_set(&oi->ospf->gr_info, oi, reason, s);
 
        /* Set length. */
        length = stream_get_endp(s);
@@ -135,15 +135,20 @@ static struct ospf_lsa *ospf_gr_lsa_new(struct ospf_interface *oi)
 }
 
 /* Originate and install Grace-LSA for a given interface. */
-static void ospf_gr_lsa_originate(struct ospf_interface *oi, bool maxage)
+static void ospf_gr_lsa_originate(struct ospf_interface *oi,
+                                 enum ospf_gr_restart_reason reason,
+                                 bool maxage)
 {
        struct ospf_lsa *lsa, *old;
 
-       if (ospf_interface_neighbor_count(oi) == 0)
+       /* Skip originating a Grace-LSA when not necessary. */
+       if (!if_is_operative(oi->ifp) || if_is_loopback(oi->ifp) ||
+           (reason != OSPF_GR_UNKNOWN_RESTART &&
+            ospf_interface_neighbor_count(oi) == 0))
                return;
 
        /* Create new Grace-LSA. */
-       lsa = ospf_gr_lsa_new(oi);
+       lsa = ospf_gr_lsa_new(oi, reason);
        if (!lsa) {
                zlog_warn("%s: ospf_gr_lsa_new() failed", __func__);
                return;
@@ -157,18 +162,36 @@ static void ospf_gr_lsa_originate(struct ospf_interface *oi, bool maxage)
        if (old)
                lsa->data->ls_seqnum = lsa_seqnum_increment(old);
 
-       /* Install this LSA into LSDB. */
-       if (ospf_lsa_install(oi->ospf, oi, lsa) == NULL) {
-               zlog_warn("%s: ospf_lsa_install() failed", __func__);
-               ospf_lsa_unlock(&lsa);
-               return;
+       if (!maxage && reason == OSPF_GR_UNKNOWN_RESTART) {
+               struct list *update;
+               struct in_addr addr;
+
+               /*
+                * When performing an unplanned restart, send a handcrafted
+                * Grace-LSA since the interface isn't fully initialized yet.
+                */
+               ospf_lsa_checksum(lsa->data);
+               ospf_lsa_lock(lsa);
+               update = list_new();
+               listnode_add(update, lsa);
+               addr.s_addr = htonl(OSPF_ALLSPFROUTERS);
+               ospf_ls_upd_queue_send(oi, update, addr, true);
+               list_delete(&update);
+               ospf_lsa_discard(lsa);
+       } else {
+               /* Install this LSA into LSDB. */
+               if (ospf_lsa_install(oi->ospf, oi, lsa) == NULL) {
+                       zlog_warn("%s: ospf_lsa_install() failed", __func__);
+                       ospf_lsa_unlock(&lsa);
+                       return;
+               }
+
+               /* Flood the LSA through out the interface */
+               ospf_flood_through_interface(oi, NULL, lsa);
        }
 
        /* Update new LSA origination count. */
        oi->ospf->lsa_originate_count++;
-
-       /* Flood the LSA through out the interface */
-       ospf_flood_through_interface(oi, NULL, lsa);
 }
 
 /* Flush all self-originated Grace-LSAs. */
@@ -181,13 +204,14 @@ static void ospf_gr_flush_grace_lsas(struct ospf *ospf)
                struct ospf_interface *oi;
                struct listnode *inode;
 
-               if (IS_DEBUG_OSPF_GR)
-                       zlog_debug(
-                               "GR: flushing self-originated Grace-LSAs [area %pI4]",
-                               &area->area_id);
+               for (ALL_LIST_ELEMENTS_RO(area->oiflist, inode, oi)) {
+                       if (IS_DEBUG_OSPF_GR)
+                               zlog_debug(
+                                       "GR: flushing self-originated Grace-LSA [area %pI4] [interface %s]",
+                                       &area->area_id, oi->ifp->name);
 
-               for (ALL_LIST_ELEMENTS_RO(area->oiflist, inode, oi))
-                       ospf_gr_lsa_originate(oi, true);
+                       ospf_gr_lsa_originate(oi, ospf->gr_info.reason, true);
+               }
        }
 }
 
@@ -203,9 +227,6 @@ static void ospf_gr_restart_exit(struct ospf *ospf, const char *reason)
        ospf->gr_info.restart_in_progress = false;
        EVENT_OFF(ospf->gr_info.t_grace_period);
 
-       /* Record in non-volatile memory that the restart is complete. */
-       ospf_gr_nvm_delete(ospf);
-
        for (ALL_LIST_ELEMENTS_RO(ospf->areas, onode, area)) {
                struct ospf_interface *oi;
 
@@ -216,13 +237,22 @@ static void ospf_gr_restart_exit(struct ospf *ospf, const char *reason)
                 */
                ospf_router_lsa_update_area(area);
 
-               /*
-                * 2) The router should reoriginate network-LSAs on all segments
-                *    where it is the Designated Router.
-                */
-               for (ALL_LIST_ELEMENTS_RO(area->oiflist, anode, oi))
+               for (ALL_LIST_ELEMENTS_RO(area->oiflist, anode, oi)) {
+                       /* Disable hello delay. */
+                       if (oi->gr.hello_delay.t_grace_send) {
+                               oi->gr.hello_delay.elapsed_seconds = 0;
+                               EVENT_OFF(oi->gr.hello_delay.t_grace_send);
+                               OSPF_ISM_TIMER_MSEC_ON(oi->t_hello,
+                                                      ospf_hello_timer, 1);
+                       }
+
+                       /*
+                        * 2) The router should reoriginate network-LSAs on all
+                        * segments where it is the Designated Router.
+                        */
                        if (oi->state == ISM_DR)
                                ospf_network_lsa_update(oi);
+               }
        }
 
        /*
@@ -242,12 +272,34 @@ static void ospf_gr_restart_exit(struct ospf *ospf, const char *reason)
         *    should be removed.
         */
        ospf->gr_info.finishing_restart = true;
+       XFREE(MTYPE_TMP, ospf->gr_info.exit_reason);
+       ospf->gr_info.exit_reason = XSTRDUP(MTYPE_TMP, reason);
        ospf_spf_calculate_schedule(ospf, SPF_FLAG_GR_FINISH);
 
        /* 6) Any grace-LSAs that the router originated should be flushed. */
        ospf_gr_flush_grace_lsas(ospf);
 }
 
+/* Enter the Graceful Restart mode. */
+void ospf_gr_restart_enter(struct ospf *ospf,
+                          enum ospf_gr_restart_reason reason, int timestamp)
+{
+       unsigned long remaining_time;
+
+       ospf->gr_info.restart_in_progress = true;
+       ospf->gr_info.reason = reason;
+
+       /* Schedule grace period timeout. */
+       remaining_time = timestamp - time(NULL);
+       if (IS_DEBUG_OSPF_GR)
+               zlog_debug(
+                       "GR: remaining time until grace period expires: %lu(s)",
+                       remaining_time);
+
+       event_add_timer(master, ospf_gr_grace_period_expired, ospf,
+                       remaining_time, &ospf->gr_info.t_grace_period);
+}
+
 /* Check if a Router-LSA contains a given link. */
 static bool ospf_router_lsa_contains_adj(struct ospf_lsa *lsa,
                                         struct in_addr *id)
@@ -518,11 +570,26 @@ static char *ospf_gr_nvm_filepath(struct ospf *ospf)
        return filepath;
 }
 
+/* Send extra Grace-LSA out the interface (unplanned outages only). */
+void ospf_gr_iface_send_grace_lsa(struct event *thread)
+{
+       struct ospf_interface *oi = EVENT_ARG(thread);
+       struct ospf_if_params *params = IF_DEF_PARAMS(oi->ifp);
+
+       ospf_gr_lsa_originate(oi, oi->ospf->gr_info.reason, false);
+
+       if (++oi->gr.hello_delay.elapsed_seconds < params->v_gr_hello_delay)
+               event_add_timer(master, ospf_gr_iface_send_grace_lsa, oi, 1,
+                               &oi->gr.hello_delay.t_grace_send);
+       else
+               OSPF_ISM_TIMER_MSEC_ON(oi->t_hello, ospf_hello_timer, 1);
+}
+
 /*
  * Record in non-volatile memory that the given OSPF instance is attempting to
  * perform a graceful restart.
  */
-static void ospf_gr_nvm_update(struct ospf *ospf)
+static void ospf_gr_nvm_update(struct ospf *ospf, bool prepare)
 {
        char *filepath;
        const char *inst_name;
@@ -550,16 +617,18 @@ static void ospf_gr_nvm_update(struct ospf *ospf)
                                       json_instance);
        }
 
+       json_object_int_add(json_instance, "gracePeriod",
+                           ospf->gr_info.grace_period);
+
        /*
         * Record not only the grace period, but also a UNIX timestamp
         * corresponding to the end of that period. That way, once ospfd is
         * restarted, it will be possible to take into account the time that
         * passed while ospfd wasn't running.
         */
-       json_object_int_add(json_instance, "gracePeriod",
-                           ospf->gr_info.grace_period);
-       json_object_int_add(json_instance, "timestamp",
-                           time(NULL) + ospf->gr_info.grace_period);
+       if (prepare)
+               json_object_int_add(json_instance, "timestamp",
+                                   time(NULL) + ospf->gr_info.grace_period);
 
        json_object_to_file_ext(filepath, json, JSON_C_TO_STRING_PRETTY);
        json_object_free(json);
@@ -569,7 +638,7 @@ static void ospf_gr_nvm_update(struct ospf *ospf)
  * Delete GR status information about the given OSPF instance from non-volatile
  * memory.
  */
-static void ospf_gr_nvm_delete(struct ospf *ospf)
+void ospf_gr_nvm_delete(struct ospf *ospf)
 {
        char *filepath;
        const char *inst_name;
@@ -607,6 +676,7 @@ void ospf_gr_nvm_read(struct ospf *ospf)
        json_object *json_instances;
        json_object *json_instance;
        json_object *json_timestamp;
+       json_object *json_grace_period;
        time_t timestamp = 0;
 
        filepath = ospf_gr_nvm_filepath(ospf);
@@ -629,29 +699,33 @@ void ospf_gr_nvm_read(struct ospf *ospf)
                                       json_instance);
        }
 
+       json_object_object_get_ex(json_instance, "gracePeriod",
+                                 &json_grace_period);
        json_object_object_get_ex(json_instance, "timestamp", &json_timestamp);
+
        if (json_timestamp) {
                time_t now;
-               unsigned long remaining_time;
 
-               /* Check if the grace period has already expired. */
+               /* Planned GR: check if the grace period has already expired. */
                now = time(NULL);
                timestamp = json_object_get_int(json_timestamp);
                if (now > timestamp) {
                        ospf_gr_restart_exit(
                                ospf, "grace period has expired already");
-               } else {
-                       /* Schedule grace period timeout. */
-                       ospf->gr_info.restart_in_progress = true;
-                       remaining_time = timestamp - time(NULL);
-                       if (IS_DEBUG_OSPF_GR)
-                               zlog_debug(
-                                       "GR: remaining time until grace period expires: %lu(s)",
-                                       remaining_time);
-                       event_add_timer(master, ospf_gr_grace_period_expired,
-                                       ospf, remaining_time,
-                                       &ospf->gr_info.t_grace_period);
-               }
+               } else
+                       ospf_gr_restart_enter(ospf, OSPF_GR_SW_RESTART,
+                                             timestamp);
+       } else if (json_grace_period) {
+               uint32_t grace_period;
+
+               /*
+                * Unplanned GR: the Grace-LSAs will be sent later as soon as
+                * the interfaces are operational.
+                */
+               grace_period = json_object_get_int(json_grace_period);
+               ospf->gr_info.grace_period = grace_period;
+               ospf_gr_restart_enter(ospf, OSPF_GR_UNKNOWN_RESTART,
+                                     time(NULL) + ospf->gr_info.grace_period);
        }
 
        json_object_object_del(json_instances, inst_name);
@@ -660,6 +734,17 @@ void ospf_gr_nvm_read(struct ospf *ospf)
        json_object_free(json);
 }
 
+void ospf_gr_unplanned_start_interface(struct ospf_interface *oi)
+{
+       /* Send Grace-LSA. */
+       ospf_gr_lsa_originate(oi, oi->ospf->gr_info.reason, false);
+
+       /* Start GR hello-delay interval. */
+       oi->gr.hello_delay.elapsed_seconds = 0;
+       event_add_timer(master, ospf_gr_iface_send_grace_lsa, oi, 1,
+                       &oi->gr.hello_delay.t_grace_send);
+}
+
 /* Prepare to start a Graceful Restart. */
 static void ospf_gr_prepare(void)
 {
@@ -687,20 +772,12 @@ static void ospf_gr_prepare(void)
                        continue;
                }
 
-               /* Freeze OSPF routes in the RIB. */
-               if (ospf_zebra_gr_enable(ospf, ospf->gr_info.grace_period)) {
-                       zlog_warn(
-                               "%s: failed to activate graceful restart: not connected to zebra",
-                               __func__);
-                       continue;
-               }
-
                /* Send a Grace-LSA to all neighbors. */
                for (ALL_LIST_ELEMENTS_RO(ospf->oiflist, inode, oi))
-                       ospf_gr_lsa_originate(oi, false);
+                       ospf_gr_lsa_originate(oi, OSPF_GR_SW_RESTART, false);
 
                /* Record end of the grace period in non-volatile memory. */
-               ospf_gr_nvm_update(ospf);
+               ospf_gr_nvm_update(ospf, true);
 
                /*
                 * Mark that a Graceful Restart preparation is in progress, to
@@ -749,6 +826,12 @@ DEFPY(graceful_restart, graceful_restart_cmd,
        ospf->gr_info.restart_support = true;
        ospf->gr_info.grace_period = grace_period;
 
+       /* Freeze OSPF routes in the RIB. */
+       (void)ospf_zebra_gr_enable(ospf, ospf->gr_info.grace_period);
+
+       /* Record that GR is enabled in non-volatile memory. */
+       ospf_gr_nvm_update(ospf, false);
+
        return CMD_SUCCESS;
 }
 
@@ -771,6 +854,8 @@ DEFPY(no_graceful_restart, no_graceful_restart_cmd,
 
        ospf->gr_info.restart_support = false;
        ospf->gr_info.grace_period = OSPF_DFLT_GRACE_INTERVAL;
+       ospf_gr_nvm_delete(ospf);
+       ospf_zebra_gr_disable(ospf);
 
        return CMD_SUCCESS;
 }
index 9760bb17282e3f667ffcfa13f59070b686ccd629..750d77381d85b1f14dc6072fc0a9a5b23ff05f08 100644 (file)
@@ -166,11 +166,16 @@ extern void ospf_gr_helper_supported_gracetime_set(struct ospf *ospf,
                                                   uint32_t interval);
 extern void ospf_gr_helper_set_supported_planned_only_restart(struct ospf *ospf,
                                                             bool planned_only);
-
+extern void ospf_gr_iface_send_grace_lsa(struct event *thread);
+extern void ospf_gr_restart_enter(struct ospf *ospf,
+                                 enum ospf_gr_restart_reason reason,
+                                 int timestamp);
 extern void ospf_gr_check_lsdb_consistency(struct ospf *ospf,
                                                  struct ospf_area *area);
 extern void ospf_gr_check_adjs(struct ospf *ospf);
 extern void ospf_gr_nvm_read(struct ospf *ospf);
+extern void ospf_gr_nvm_delete(struct ospf *ospf);
+extern void ospf_gr_unplanned_start_interface(struct ospf_interface *oi);
 extern void ospf_gr_init(void);
 
 #endif /* _ZEBRA_OSPF_GR_H */
index 5742ece1f7d0ede078a80665e130c37588c25d7f..8da982aed8181b683ec30391d7e01c2ce9267493 100644 (file)
@@ -101,6 +101,9 @@ int ospf_if_get_output_cost(struct ospf_interface *oi)
                        cost = 1;
                else if (cost > 65535)
                        cost = 65535;
+
+               if (if_is_loopback(oi->ifp))
+                       cost = 0;
        }
 
        return cost;
@@ -527,6 +530,7 @@ static struct ospf_if_params *ospf_new_if_params(void)
        UNSET_IF_PARAM(oip, passive_interface);
        UNSET_IF_PARAM(oip, v_hello);
        UNSET_IF_PARAM(oip, fast_hello);
+       UNSET_IF_PARAM(oip, v_gr_hello_delay);
        UNSET_IF_PARAM(oip, v_wait);
        UNSET_IF_PARAM(oip, priority);
        UNSET_IF_PARAM(oip, type);
@@ -676,6 +680,9 @@ int ospf_if_new_hook(struct interface *ifp)
        SET_IF_PARAM(IF_DEF_PARAMS(ifp), fast_hello);
        IF_DEF_PARAMS(ifp)->fast_hello = OSPF_FAST_HELLO_DEFAULT;
 
+       SET_IF_PARAM(IF_DEF_PARAMS(ifp), v_gr_hello_delay);
+       IF_DEF_PARAMS(ifp)->v_gr_hello_delay = OSPF_HELLO_DELAY_DEFAULT;
+
        SET_IF_PARAM(IF_DEF_PARAMS(ifp), v_wait);
        IF_DEF_PARAMS(ifp)->v_wait = OSPF_ROUTER_DEAD_INTERVAL_DEFAULT;
 
index 649df437a4f162d4c3d90bf7db9f1f15cd778355..24768b9ab49d3ffa9fbbb85c48de600a80070aaa 100644 (file)
@@ -72,6 +72,9 @@ struct ospf_if_params {
        DECLARE_IF_PARAM(uint32_t, v_wait);  /* Router Dead Interval */
        bool is_v_wait_set;                  /* Check for Dead Interval set */
 
+       /* GR Hello Delay Interval */
+       DECLARE_IF_PARAM(uint16_t, v_gr_hello_delay);
+
        /* MTU mismatch check (see RFC2328, chap 10.6) */
        DECLARE_IF_PARAM(uint8_t, mtu_ignore);
 
@@ -214,6 +217,14 @@ struct ospf_interface {
        /* List of configured NBMA neighbor. */
        struct list *nbr_nbma;
 
+       /* Graceful-Restart data. */
+       struct {
+               struct {
+                       uint16_t elapsed_seconds;
+                       struct event *t_grace_send;
+               } hello_delay;
+       } gr;
+
        /* self-originated LSAs. */
        struct ospf_lsa *network_lsa_self; /* network-LSA. */
        struct list *opaque_lsa_self;      /* Type-9 Opaque-LSAs */
index 9f795ea91850329e442cf44a7979d07864315a78..2516fa75db43c377c6e2b0c17d3c1dc6f34992dc 100644 (file)
@@ -244,6 +244,10 @@ void ospf_hello_timer(struct event *thread)
        oi = EVENT_ARG(thread);
        oi->t_hello = NULL;
 
+       /* Check if the GR hello-delay is active. */
+       if (oi->gr.hello_delay.t_grace_send)
+               return;
+
        if (IS_DEBUG_OSPF(ism, ISM_TIMERS))
                zlog_debug("ISM[%s]: Timer (Hello timer expire)", IF_NAME(oi));
 
@@ -282,6 +286,7 @@ static void ism_timer_set(struct ospf_interface *oi)
                EVENT_OFF(oi->t_hello);
                EVENT_OFF(oi->t_wait);
                EVENT_OFF(oi->t_ls_ack);
+               EVENT_OFF(oi->gr.hello_delay.t_grace_send);
                break;
        case ISM_Loopback:
                /* In this state, the interface may be looped back and will be
@@ -289,6 +294,7 @@ static void ism_timer_set(struct ospf_interface *oi)
                EVENT_OFF(oi->t_hello);
                EVENT_OFF(oi->t_wait);
                EVENT_OFF(oi->t_ls_ack);
+               EVENT_OFF(oi->gr.hello_delay.t_grace_send);
                break;
        case ISM_Waiting:
                /* The router is trying to determine the identity of DRouter and
index 452a3ba3747e597b504905ca2c610853dc5190c4..67f1faf8a9abf948afbec5595f63bf2446f4b21e 100644 (file)
@@ -608,7 +608,8 @@ static int lsa_link_loopback_set(struct stream **s, struct ospf_interface *oi)
 
        mask.s_addr = 0xffffffff;
        id.s_addr = oi->address->u.prefix4.s_addr;
-       return link_info_set(s, id, mask, LSA_LINK_TYPE_STUB, 0, 0);
+       return link_info_set(s, id, mask, LSA_LINK_TYPE_STUB, 0,
+                            oi->output_cost);
 }
 
 /* Describe Virtual Link. */
index c2b40af1c4706161b93bfd35a39466c2786de044..6894c6a009c49e779857cf36b81d1442fe5f83f0 100644 (file)
@@ -2120,14 +2120,21 @@ void ospf_opaque_self_originated_lsa_received(struct ospf_neighbor *nbr,
                        lsa->data->type, &lsa->data->id);
 
        /*
-        * Since these LSA entries are not yet installed into corresponding
-        * LSDB, just flush them without calling ospf_ls_maxage() afterward.
+        * Install the stale LSA into the Link State Database, add it to the
+        * MaxAge list, and flush it from the OSPF routing domain. For other
+        * LSA types, the installation is done in the refresh function. It is
+        * done inline here since the opaque refresh function is dynamically
+        * registered when opaque LSAs are originated (which is not the case
+        * for stale LSAs).
         */
        lsa->data->ls_age = htons(OSPF_LSA_MAXAGE);
+       ospf_lsa_install(
+               top, (lsa->data->type == OSPF_OPAQUE_LINK_LSA) ? nbr->oi : NULL,
+               lsa);
+       ospf_lsa_maxage(top, lsa);
+
        switch (lsa->data->type) {
        case OSPF_OPAQUE_LINK_LSA:
-               ospf_flood_through_area(nbr->oi->area, NULL /*inbr*/, lsa);
-               break;
        case OSPF_OPAQUE_AREA_LSA:
                ospf_flood_through_area(nbr->oi->area, NULL /*inbr*/, lsa);
                break;
@@ -2139,7 +2146,6 @@ void ospf_opaque_self_originated_lsa_received(struct ospf_neighbor *nbr,
                          __func__, lsa->data->type);
                return;
        }
-       ospf_lsa_discard(lsa); /* List "lsas" will be deleted by caller. */
 }
 
 /*------------------------------------------------------------------------*
index 552acfd6d32be08f5c5424f5614b928828c8afd0..d010b8b6e60bd820d9b5aee3ef020d34e1f6aaeb 100644 (file)
@@ -2031,7 +2031,7 @@ static void ospf_ls_upd(struct ospf *ospf, struct ip *iph,
                        if (current == NULL) {
                                if (IS_DEBUG_OSPF_EVENT)
                                        zlog_debug(
-                                               "LSA[%s]: Previously originated Opaque-LSA,not found in the LSDB.",
+                                               "LSA[%s]: Previously originated Opaque-LSA, not found in the LSDB.",
                                                dump_lsa_key(lsa));
 
                                SET_FLAG(lsa->flags, OSPF_LSA_SELF);
@@ -4040,9 +4040,8 @@ static struct ospf_packet *ospf_ls_upd_packet_new(struct list *update,
        return ospf_packet_new(size - sizeof(struct ip));
 }
 
-static void ospf_ls_upd_queue_send(struct ospf_interface *oi,
-                                  struct list *update, struct in_addr addr,
-                                  int send_lsupd_now)
+void ospf_ls_upd_queue_send(struct ospf_interface *oi, struct list *update,
+                           struct in_addr addr, int send_lsupd_now)
 {
        struct ospf_packet *op;
        uint16_t length = OSPF_HEADER_SIZE;
index 4003e2add6412624b00c5c2d076d0e9b6da96777..234738979e95852213ec6204b18a1cec21f4ac39 100644 (file)
@@ -132,6 +132,9 @@ extern void ospf_ls_req_send(struct ospf_neighbor *);
 extern void ospf_ls_upd_send_lsa(struct ospf_neighbor *, struct ospf_lsa *,
                                 int);
 extern void ospf_ls_upd_send(struct ospf_neighbor *, struct list *, int, int);
+extern void ospf_ls_upd_queue_send(struct ospf_interface *oi,
+                                  struct list *update, struct in_addr addr,
+                                  int send_lsupd_now);
 extern void ospf_ls_ack_send(struct ospf_neighbor *, struct ospf_lsa *);
 extern void ospf_ls_ack_send_delayed(struct ospf_interface *);
 extern void ospf_ls_retransmit(struct ospf_interface *, struct ospf_lsa *);
index da9428aba63fb35e358dafa27bd71290b4e265f2..07fc503947825ee9d4e7150d6fbcd47e6cf40153 100644 (file)
@@ -1078,6 +1078,7 @@ void ospf_ti_lfa_free_p_spaces(struct ospf_area *area)
 
                q_spaces_fini(p_space->q_spaces);
                XFREE(MTYPE_OSPF_Q_SPACE, p_space->q_spaces);
+               XFREE(MTYPE_OSPF_P_SPACE, p_space);
        }
 
        p_spaces_fini(area->p_spaces);
index 1b4d26ef3d07f8f72d2558a497006f9ae750a8ba..f92be3fca550d4e0bb28e324f2711d729a01f053 100644 (file)
@@ -232,9 +232,12 @@ DEFUN (no_router_ospf,
                return CMD_NOT_MY_INSTANCE;
 
        ospf = ospf_lookup(instance, vrf_name);
-       if (ospf)
+       if (ospf) {
+               if (ospf->gr_info.restart_support)
+                       ospf_gr_nvm_delete(ospf);
+
                ospf_finish(ospf);
-       else
+       else
                ret = CMD_WARNING_CONFIG_FAILED;
 
        return ret;
@@ -3617,6 +3620,7 @@ static void show_ip_ospf_interface_sub(struct vty *vty, struct ospf *ospf,
        struct ospf_neighbor *nbr;
        struct route_node *rn;
        uint32_t bandwidth = ifp->bandwidth ? ifp->bandwidth : ifp->speed;
+       struct ospf_if_params *params;
 
        /* Is interface up? */
        if (use_json) {
@@ -3936,6 +3940,20 @@ static void show_ip_ospf_interface_sub(struct vty *vty, struct ospf *ospf,
                                ospf_nbr_count(oi, 0),
                                ospf_nbr_count(oi, NSM_Full));
 
+
+               params = IF_DEF_PARAMS(ifp);
+               if (params &&
+                   OSPF_IF_PARAM_CONFIGURED(params, v_gr_hello_delay)) {
+                       if (use_json) {
+                               json_object_int_add(json_interface_sub,
+                                                   "grHelloDelaySecs",
+                                                   params->v_gr_hello_delay);
+                       } else
+                               vty_out(vty,
+                                       "  Graceful Restart hello delay: %us\n",
+                                       params->v_gr_hello_delay);
+               }
+
                ospf_interface_bfd_show(vty, ifp, json_interface_sub);
 
                /* OSPF Authentication information */
@@ -8639,6 +8657,59 @@ DEFUN_HIDDEN (no_ospf_retransmit_interval,
        return no_ip_ospf_retransmit_interval(self, vty, argc, argv);
 }
 
+DEFPY (ip_ospf_gr_hdelay,
+       ip_ospf_gr_hdelay_cmd,
+       "ip ospf graceful-restart hello-delay (1-1800)",
+       IP_STR
+       "OSPF interface commands\n"
+       "Graceful Restart parameters\n"
+       "Delay the sending of the first hello packets.\n"
+       "Delay in seconds\n")
+{
+       VTY_DECLVAR_CONTEXT(interface, ifp);
+       struct ospf_if_params *params;
+
+       params = IF_DEF_PARAMS(ifp);
+
+       /* Note: new or updated value won't affect ongoing graceful restart. */
+       SET_IF_PARAM(params, v_gr_hello_delay);
+       params->v_gr_hello_delay = hello_delay;
+
+       return CMD_SUCCESS;
+}
+
+DEFPY (no_ip_ospf_gr_hdelay,
+       no_ip_ospf_gr_hdelay_cmd,
+       "no ip ospf graceful-restart hello-delay [(1-1800)]",
+       NO_STR
+       IP_STR
+       "OSPF interface commands\n"
+       "Graceful Restart parameters\n"
+       "Delay the sending of the first hello packets.\n"
+       "Delay in seconds\n")
+{
+       VTY_DECLVAR_CONTEXT(interface, ifp);
+       struct ospf_if_params *params;
+       struct route_node *rn;
+
+       params = IF_DEF_PARAMS(ifp);
+       UNSET_IF_PARAM(params, v_gr_hello_delay);
+       params->v_gr_hello_delay = OSPF_HELLO_DELAY_DEFAULT;
+
+       for (rn = route_top(IF_OIFS(ifp)); rn; rn = route_next(rn)) {
+               struct ospf_interface *oi;
+
+               oi = rn->info;
+               if (!oi)
+                       continue;
+
+               oi->gr.hello_delay.elapsed_seconds = 0;
+               EVENT_OFF(oi->gr.hello_delay.t_grace_send);
+       }
+
+       return CMD_SUCCESS;
+}
+
 DEFUN (ip_ospf_transmit_delay,
        ip_ospf_transmit_delay_addr_cmd,
        "ip ospf transmit-delay (1-65535) [A.B.C.D]",
@@ -11831,6 +11902,15 @@ static int config_write_interface_one(struct vty *vty, struct vrf *vrf)
                                vty_out(vty, "\n");
                        }
 
+                       /* Hello Graceful-Restart Delay print. */
+                       if (OSPF_IF_PARAM_CONFIGURED(params,
+                                                    v_gr_hello_delay) &&
+                           params->v_gr_hello_delay !=
+                                   OSPF_HELLO_DELAY_DEFAULT)
+                               vty_out(vty,
+                                       " ip ospf graceful-restart hello-delay %u\n",
+                                       params->v_gr_hello_delay);
+
                        /* Router Priority print. */
                        if (OSPF_IF_PARAM_CONFIGURED(params, priority)
                            && params->priority
index 0b770a8364787c8679326798e9f2e3981cceb6ba..27d74cd4fcd9199543b635720b447f1e22292aef 100644 (file)
@@ -1252,12 +1252,18 @@ static int ospf_zebra_gr_update(struct ospf *ospf, int command,
 
 int ospf_zebra_gr_enable(struct ospf *ospf, uint32_t stale_time)
 {
+       if (IS_DEBUG_OSPF_GR)
+               zlog_debug("Zebra enable GR [stale time %u]", stale_time);
+
        return ospf_zebra_gr_update(ospf, ZEBRA_CLIENT_GR_CAPABILITIES,
                                    stale_time);
 }
 
 int ospf_zebra_gr_disable(struct ospf *ospf)
 {
+       if (IS_DEBUG_OSPF_GR)
+               zlog_debug("Zebra disable GR");
+
        return ospf_zebra_gr_update(ospf, ZEBRA_CLIENT_GR_DISABLE, 0);
 }
 
@@ -2120,10 +2126,20 @@ int ospf_zebra_label_manager_connect(void)
 
 static void ospf_zebra_connected(struct zclient *zclient)
 {
+       struct ospf *ospf;
+       struct listnode *node;
+
        /* Send the client registration */
        bfd_client_sendmsg(zclient, ZEBRA_BFD_CLIENT_REGISTER, VRF_DEFAULT);
 
        zclient_send_reg_requests(zclient, VRF_DEFAULT);
+
+       /* Activate graceful restart if configured. */
+       for (ALL_LIST_ELEMENTS_RO(om->ospf, node, ospf)) {
+               if (!ospf->gr_info.restart_support)
+                       continue;
+               (void)ospf_zebra_gr_enable(ospf, ospf->gr_info.grace_period);
+       }
 }
 
 /*
index 7e83714c0ae18d99b5b113ad6d5739876d93e427..eae1f301a234611f4774c82aa6dca84f06bb50bb 100644 (file)
@@ -720,6 +720,7 @@ static void ospf_finish_final(struct ospf *ospf)
 
        if (!ospf->gr_info.prepare_in_progress)
                ospf_flush_self_originated_lsas_now(ospf);
+       XFREE(MTYPE_TMP, ospf->gr_info.exit_reason);
 
        /* Unregister redistribution */
        for (i = 0; i < ZEBRA_ROUTE_MAX; i++) {
@@ -1131,6 +1132,17 @@ struct ospf_interface *add_ospf_interface(struct connected *co,
            && if_is_operative(co->ifp))
                ospf_if_up(oi);
 
+       /*
+        * RFC 3623 - Section 5 ("Unplanned Outages"):
+        * "The grace-LSAs are encapsulated in Link State Update Packets
+        * and sent out to all interfaces, even though the restarted
+        * router has no adjacencies and no knowledge of previous
+        * adjacencies".
+        */
+       if (oi->ospf->gr_info.restart_in_progress &&
+           oi->ospf->gr_info.reason == OSPF_GR_UNKNOWN_RESTART)
+               ospf_gr_unplanned_start_interface(oi);
+
        return oi;
 }
 
index 1f8d1a32e6b7a2ff2d514aaaa75b3599d9dd2dd0..36936b16f4ab7f82a27ff33ab59bd2e179d5335c 100644 (file)
@@ -148,6 +148,8 @@ struct ospf_gr_info {
        bool prepare_in_progress;
        bool finishing_restart;
        uint32_t grace_period;
+       int reason;
+       char *exit_reason;
        struct event *t_grace_period;
 };
 
index b053e422ec7f7a34f510505be16a6c368ff0c441..b1beb456303ad7ef4d50b1a8e792315ea61064a4 100644 (file)
@@ -979,13 +979,13 @@ int pim_if_add_vif(struct interface *ifp, bool ispimreg, bool is_vxlan_term)
        }
 
        if (ifp->ifindex < 0) {
-               zlog_warn("%s: ifindex=%d < 1 on interface %s", __func__,
+               zlog_warn("%s: ifindex=%d < 0 on interface %s", __func__,
                          ifp->ifindex, ifp->name);
                return -2;
-       } else if ((ifp->ifindex == 0) &&
+       } else if ((ifp->ifindex == PIM_OIF_PIM_REGISTER_VIF) &&
                   ((strncmp(ifp->name, "pimreg", 6)) &&
                    (strncmp(ifp->name, "pim6reg", 7)))) {
-               zlog_warn("%s: ifindex=%d == 0 on interface %s", __func__,
+               zlog_warn("%s: ifindex=%d on interface %s", __func__,
                          ifp->ifindex, ifp->name);
                return -2;
        }
index ac9fc4b1a818ba651e60f633bdbc5dee2bd0ddb5..6f45bb5d9eeacbde2d68f9654a03e085e73b5b71 100644 (file)
@@ -85,14 +85,33 @@ void cli_show_router_rip(struct vty *vty, const struct lyd_node *dnode,
 /*
  * XPath: /frr-ripd:ripd/instance/allow-ecmp
  */
-DEFPY_YANG (rip_allow_ecmp,
+DEFUN_YANG (rip_allow_ecmp,
        rip_allow_ecmp_cmd,
-       "[no] allow-ecmp",
+       "allow-ecmp [" CMD_RANGE_STR(1, MULTIPATH_NUM) "]",
+       "Allow Equal Cost MultiPath\n"
+       "Number of paths\n")
+{
+       int idx_number = 1;
+       char mpaths[3] = {};
+       uint32_t paths = MULTIPATH_NUM;
+
+       if (argv[idx_number])
+               paths = strtol(argv[idx_number]->arg, NULL, 10);
+       snprintf(mpaths, sizeof(mpaths), "%u", paths);
+
+       nb_cli_enqueue_change(vty, "./allow-ecmp", NB_OP_MODIFY, mpaths);
+
+       return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFUN_YANG (no_rip_allow_ecmp,
+       no_rip_allow_ecmp_cmd,
+       "no allow-ecmp [" CMD_RANGE_STR(1, MULTIPATH_NUM) "]",
        NO_STR
-       "Allow Equal Cost MultiPath\n")
+       "Allow Equal Cost MultiPath\n"
+       "Number of paths\n")
 {
-       nb_cli_enqueue_change(vty, "./allow-ecmp", NB_OP_MODIFY,
-                             no ? "false" : "true");
+       nb_cli_enqueue_change(vty, "./allow-ecmp", NB_OP_MODIFY, 0);
 
        return nb_cli_apply_changes(vty, NULL);
 }
@@ -100,10 +119,14 @@ DEFPY_YANG (rip_allow_ecmp,
 void cli_show_rip_allow_ecmp(struct vty *vty, const struct lyd_node *dnode,
                             bool show_defaults)
 {
-       if (!yang_dnode_get_bool(dnode, NULL))
-               vty_out(vty, " no");
+       uint8_t paths;
+
+       paths = yang_dnode_get_uint8(dnode, NULL);
 
-       vty_out(vty, " allow-ecmp\n");
+       if (!paths)
+               vty_out(vty, " no allow-ecmp\n");
+       else
+               vty_out(vty, " allow-ecmp %d\n", paths);
 }
 
 /*
@@ -1156,6 +1179,7 @@ void rip_cli_init(void)
        install_element(RIP_NODE, &rip_no_distribute_list_cmd);
 
        install_element(RIP_NODE, &rip_allow_ecmp_cmd);
+       install_element(RIP_NODE, &no_rip_allow_ecmp_cmd);
        install_element(RIP_NODE, &rip_default_information_originate_cmd);
        install_element(RIP_NODE, &rip_default_metric_cmd);
        install_element(RIP_NODE, &no_rip_default_metric_cmd);
index 5c7bd0fb8623425169152882fb5eb63083c0c793..1117ec9ff7a5112c6b1831e9cf9491554ee0f4aa 100644 (file)
@@ -99,9 +99,13 @@ int ripd_instance_allow_ecmp_modify(struct nb_cb_modify_args *args)
                return NB_OK;
 
        rip = nb_running_get_entry(args->dnode, NULL, true);
-       rip->ecmp = yang_dnode_get_bool(args->dnode, NULL);
-       if (!rip->ecmp)
+       rip->ecmp = yang_dnode_get_uint8(args->dnode, NULL);
+       if (!rip->ecmp) {
                rip_ecmp_disable(rip);
+               return NB_OK;
+       }
+
+       rip_ecmp_change(rip);
 
        return NB_OK;
 }
index 698dcb982d8f5b9affdaa59a542228d71fb5ea96..834c7d2d6519a96b87ac29fb0131da32aed64589 100644 (file)
@@ -20,6 +20,7 @@
 
 /* All information about zebra. */
 struct zclient *zclient = NULL;
+uint32_t zebra_ecmp_count = MULTIPATH_NUM;
 
 /* Send ECMP routes to zebra. */
 static void rip_zebra_ipv4_send(struct rip *rip, struct route_node *rp,
@@ -30,7 +31,7 @@ static void rip_zebra_ipv4_send(struct rip *rip, struct route_node *rp,
        struct zapi_nexthop *api_nh;
        struct listnode *listnode = NULL;
        struct rip_info *rinfo = NULL;
-       int count = 0;
+       uint32_t count = 0;
 
        memset(&api, 0, sizeof(api));
        api.vrf_id = rip->vrf->vrf_id;
@@ -39,7 +40,7 @@ static void rip_zebra_ipv4_send(struct rip *rip, struct route_node *rp,
 
        SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
        for (ALL_LIST_ELEMENTS_RO(list, listnode, rinfo)) {
-               if (count >= MULTIPATH_NUM)
+               if (count >= zebra_ecmp_count)
                        break;
                api_nh = &api.nexthops[count];
                api_nh->vrf_id = rip->vrf->vrf_id;
@@ -227,6 +228,11 @@ zclient_handler *const rip_handlers[] = {
        [ZEBRA_REDISTRIBUTE_ROUTE_DEL] = rip_zebra_read_route,
 };
 
+static void rip_zebra_capabilities(struct zclient_capabilities *cap)
+{
+       zebra_ecmp_count = MIN(cap->ecmp, zebra_ecmp_count);
+}
+
 void rip_zclient_init(struct event_loop *master)
 {
        /* Set default value to the zebra client structure. */
@@ -234,6 +240,7 @@ void rip_zclient_init(struct event_loop *master)
                              array_size(rip_handlers));
        zclient_init(zclient, ZEBRA_ROUTE_RIP, 0, &ripd_privs);
        zclient->zebra_connected = rip_zebra_connected;
+       zclient->zebra_capabilities = rip_zebra_capabilities;
 }
 
 void rip_zclient_stop(void)
index 339b033fb1b7e486ef23bfc847f2fee13ad2e2a5..04a8cad560fe25ae06be0cde144b5dfd9e8e3017 100644 (file)
@@ -156,7 +156,10 @@ struct rip_info *rip_ecmp_add(struct rip *rip, struct rip_info *rinfo_new)
 {
        struct route_node *rp = rinfo_new->rp;
        struct rip_info *rinfo = NULL;
+       struct rip_info *rinfo_exist = NULL;
        struct list *list = NULL;
+       struct listnode *node = NULL;
+       struct listnode *nnode = NULL;
 
        if (rp->info == NULL)
                rp->info = list_new();
@@ -167,6 +170,33 @@ struct rip_info *rip_ecmp_add(struct rip *rip, struct rip_info *rinfo_new)
        if (listcount(list) && !rip->ecmp)
                return NULL;
 
+       /* Add or replace an existing ECMP path with lower neighbor IP */
+       if (listcount(list) && listcount(list) >= rip->ecmp) {
+               struct rip_info *from_highest = NULL;
+
+               /* Find the rip_info struct that has the highest nexthop IP */
+               for (ALL_LIST_ELEMENTS(list, node, nnode, rinfo_exist))
+                       if (!from_highest ||
+                           (from_highest &&
+                            IPV4_ADDR_CMP(&rinfo_exist->from,
+                                          &from_highest->from) > 0)) {
+                               from_highest = rinfo_exist;
+                       }
+
+               /* If we have a route in ECMP group, delete the old
+                * one that has a higher next-hop address. Lower IP is
+                * preferred.
+                */
+               if (rip->ecmp > 1 && from_highest &&
+                   IPV4_ADDR_CMP(&from_highest->from, &rinfo_new->from) > 0) {
+                       rip_ecmp_delete(rip, from_highest);
+                       goto add_or_replace;
+               }
+
+               return NULL;
+       }
+
+add_or_replace:
        rinfo = rip_info_new();
        memcpy(rinfo, rinfo_new, sizeof(struct rip_info));
        listnode_add(list, rinfo);
@@ -2632,6 +2662,36 @@ struct rip *rip_lookup_by_vrf_name(const char *vrf_name)
        return RB_FIND(rip_instance_head, &rip_instances, &rip);
 }
 
+/* Update ECMP routes to zebra when `allow-ecmp` changed. */
+void rip_ecmp_change(struct rip *rip)
+{
+       struct route_node *rp;
+       struct rip_info *rinfo;
+       struct list *list;
+       struct listnode *node, *nextnode;
+
+       for (rp = route_top(rip->table); rp; rp = route_next(rp)) {
+               list = rp->info;
+               if (list && listcount(list) > 1) {
+                       while (listcount(list) > rip->ecmp) {
+                               struct rip_info *from_highest = NULL;
+
+                               for (ALL_LIST_ELEMENTS(list, node, nextnode,
+                                                      rinfo)) {
+                                       if (!from_highest ||
+                                           (from_highest &&
+                                            IPV4_ADDR_CMP(
+                                                    &rinfo->from,
+                                                    &from_highest->from) > 0))
+                                               from_highest = rinfo;
+                               }
+
+                               rip_ecmp_delete(rip, from_highest);
+                       }
+               }
+       }
+}
+
 /* Create new RIP instance and set it to global variable. */
 struct rip *rip_create(const char *vrf_name, struct vrf *vrf, int socket)
 {
@@ -2641,7 +2701,7 @@ struct rip *rip_create(const char *vrf_name, struct vrf *vrf, int socket)
        rip->vrf_name = XSTRDUP(MTYPE_RIP_VRF_NAME, vrf_name);
 
        /* Set initial value. */
-       rip->ecmp = yang_get_default_bool("%s/allow-ecmp", RIP_INSTANCE);
+       rip->ecmp = yang_get_default_uint8("%s/allow-ecmp", RIP_INSTANCE);
        rip->default_metric =
                yang_get_default_uint8("%s/default-metric", RIP_INSTANCE);
        rip->distance =
index bba3c280697b9a080625371a8ff5c9ab52b83c60..2db1e5a6b0ee62bb956c03b5fc28374f7da2e569 100644 (file)
@@ -141,7 +141,7 @@ struct rip {
        struct route_table *distance_table;
 
        /* RIP ECMP flag */
-       bool ecmp;
+       uint8_t ecmp;
 
        /* Are we in passive-interface default mode? */
        bool passive_default;
@@ -537,4 +537,6 @@ extern struct event_loop *master;
 DECLARE_HOOK(rip_ifaddr_add, (struct connected * ifc), (ifc));
 DECLARE_HOOK(rip_ifaddr_del, (struct connected * ifc), (ifc));
 
+extern void rip_ecmp_change(struct rip *rip);
+
 #endif /* _ZEBRA_RIP_H */
index cb63da47153c352cf86f8a0c8d06e7eef784c273..7e28f04e1c91a2a293900443b13b6e451bfa23da 100644 (file)
@@ -10,6 +10,7 @@ r1-eth0 is up
   Timer intervals configured, Hello 1s, Dead 5s, Wait 5s, Retransmit 5
     Hello due in XX.XXXs
   Neighbor Count is 0, Adjacent neighbor count is 0
+  Graceful Restart hello delay: 10s
 r1-eth3 is up
   ifindex X, MTU 1500 bytes, BW XX Mbit <UP,BROADCAST,RUNNING,MULTICAST>
   Internet Address 192.168.3.1/26, Broadcast 192.168.3.63, Area 0.0.0.0
@@ -22,3 +23,4 @@ r1-eth3 is up
   Timer intervals configured, Hello 1s, Dead 5s, Wait 5s, Retransmit 5
     Hello due in XX.XXXs
   Neighbor Count is 0, Adjacent neighbor count is 0
+  Graceful Restart hello delay: 10s
index 92bb99c8f24e182b05e98e9c2f621b661ce5baca..4b7c4de806189a061f3a7b1a4ebbe6b629d94fc5 100644 (file)
@@ -198,6 +198,12 @@ def test_error_messages_daemons():
     if fatal_error != "":
         pytest.skip(fatal_error)
 
+    if os.environ.get("TOPOTESTS_CHECK_STDERR") is None:
+        print(
+            "SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n"
+        )
+        pytest.skip("Skipping test for Stderr output")
+
     print("\n\n** Check for error messages in daemons")
     print("******************************************\n")
 
index 360c9cf1e9e165c7bc94bb771e549ee69826fa89..9c9bfda1edc5a5629f6dfeeec34a0c77bf4ca0f6 100755 (executable)
@@ -161,6 +161,8 @@ def main():
             logging.critical('No "/tmp/topotests" directory to save')
             sys.exit(1)
         subprocess.run(["mv", "/tmp/topotests", args.results])
+        if "SUDO_USER" in os.environ:
+            subprocess.run(["chown", "-R", os.environ["SUDO_USER"], args.results])
         # # Old location for results
         # if os.path.exists("/tmp/topotests.xml", args.results):
         #     subprocess.run(["mv", "/tmp/topotests.xml", args.results])
index 566d391f7ab0ec7234db8489f5cbdf9c08ba10b2..9d47106c072fe1a68a2576caa80ec13c5d9c9982 100644 (file)
@@ -158,8 +158,8 @@ def setup_module(mod):
     # For all registered routers, load the zebra configuration file
     for rname, router in router_list.items():
         router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
-        router.load_config(TopoRouter.RD_OSPF)
-        router.load_config(TopoRouter.RD_BGP)
+        router.load_config(TopoRouter.RD_OSPF, "")
+        router.load_config(TopoRouter.RD_BGP, "")
 
     # After copying the configurations, this function loads configured daemons.
     tgen.start_router()
index 0e9942a22725ea98a17fcb09b4dacd3768f6bf36..6b920367270f4a45d10a47e7902cbca58a3f899e 100644 (file)
@@ -158,8 +158,8 @@ def setup_module(mod):
     # For all registered routers, load the zebra configuration file
     for rname, router in router_list.items():
         router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
-        router.load_config(TopoRouter.RD_OSPF)
-        router.load_config(TopoRouter.RD_BGP)
+        router.load_config(TopoRouter.RD_OSPF, "")
+        router.load_config(TopoRouter.RD_BGP, "")
 
     # After copying the configurations, this function loads configured daemons.
     tgen.start_router()
index 99a8953b3fc316b6701d5a6a8f9e661feece1449..2237c6b1b6dc65614c0f4fd880fa8da136fd11d0 100644 (file)
@@ -157,8 +157,8 @@ def setup_module(mod):
     # For all registered routers, load the zebra configuration file
     for rname, router in router_list.items():
         router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
-        router.load_config(TopoRouter.RD_OSPF)
-        router.load_config(TopoRouter.RD_BGP)
+        router.load_config(TopoRouter.RD_OSPF, "")
+        router.load_config(TopoRouter.RD_BGP, "")
 
     # After copying the configurations, this function loads configured daemons.
     tgen.start_router()
index dffef0eef70079c62d7c23596c3abdee74d211ad..d6fe42504bc28dc40d0de4df81bd7cfc842f32fb 100644 (file)
@@ -157,8 +157,8 @@ def setup_module(mod):
     # For all registered routers, load the zebra configuration file
     for rname, router in router_list.items():
         router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
-        router.load_config(TopoRouter.RD_OSPF)
-        router.load_config(TopoRouter.RD_BGP)
+        router.load_config(TopoRouter.RD_OSPF, "")
+        router.load_config(TopoRouter.RD_BGP, "")
 
     # After copying the configurations, this function loads configured daemons.
     tgen.start_router()
index af1353e0e01bf8342ac1d33d134a1cce715b97c0..44b009e9cad4da184d4c8cf4dcd0d191ffd6857b 100644 (file)
@@ -1,6 +1,6 @@
 !
 router bgp 65001
- timers 3 10
+ timers bgp 3 10
  no bgp ebgp-requires-policy
  neighbor 192.168.1.2 remote-as external
  neighbor 192.168.1.2 timers connect 5
index db68e554d478953e74094e380a600c67390517e2..8274e3f96d82beb8a7dae2e27091571cc920aa60 100644 (file)
@@ -1,5 +1,5 @@
 router bgp 65002
- timers 3 10
+ timers bgp 3 10
  no bgp ebgp-requires-policy
  neighbor 192.168.1.1 remote-as external
  neighbor 192.168.1.1 timers connect 5
index 3ac6a08e474459e4e41e05f7ca5d3ae6451610cb..98eb2e1711c13865dd1655d53c867972cf630c2a 100644 (file)
@@ -1,5 +1,5 @@
 router bgp 65003
- timers 3 10
+ timers bgp 3 10
  no bgp ebgp-requires-policy
  neighbor 192.168.2.2 remote-as external
  neighbor 192.168.2.2 timers connect 5
index 8ab405fbd8d5e11f3bffbc30daac86679872bc1b..68245c4a21f3ad93ce57458887eaa94da9d5d0b1 100644 (file)
@@ -1,5 +1,5 @@
 router bgp 65004
- timers 3 10
+ timers bgp 3 10
  no bgp ebgp-requires-policy
  neighbor 192.168.2.2 remote-as external
  neighbor 192.168.2.2 timers connect 5
index c0a5a1905d747e4235b30f5fd0d816fdf34d33b2..e033a0f00593f89ac79318c01d644ea0edd71bf5 100644 (file)
@@ -176,6 +176,19 @@ def test_bgp_convergence():
     # tgen.mininet_cli()
 
 
+def get_shut_msg_count(tgen):
+    shuts = {}
+    for rtrNum in [2, 4]:
+        shutmsg = tgen.net["r{}".format(rtrNum)].cmd_nostatus(
+            'grep -c "NOTIFICATION.*Cease/Administrative Shutdown" bgpd.log', warn=False
+        )
+        try:
+            shuts[rtrNum] = int(shutmsg.strip())
+        except ValueError:
+            shuts[rtrNum] = 0
+    return shuts
+
+
 def test_bgp_shutdown():
     "Test BGP instance shutdown"
 
@@ -185,6 +198,8 @@ def test_bgp_shutdown():
     if tgen.routers_have_failure():
         pytest.skip(tgen.errors)
 
+    shuts_before = get_shut_msg_count(tgen)
+
     tgen.net["r1"].cmd(
         'vtysh -c "conf t" -c "router bgp 65000" -c "bgp shutdown message ABCDabcd"'
     )
@@ -208,6 +223,11 @@ def test_bgp_shutdown():
         )
         assert res is None, assertmsg
 
+    shuts_after = get_shut_msg_count(tgen)
+
+    for k in shuts_before:
+        assert shuts_before[k] + 1 == shuts_after[k]
+
 
 def test_bgp_shutdown_message():
     "Test BGP Peer Shutdown Message"
@@ -222,18 +242,11 @@ def test_bgp_shutdown_message():
         logger.info("Checking BGP shutdown received on router r{}".format(rtrNum))
 
         shut_message = tgen.net["r{}".format(rtrNum)].cmd(
-            'tail bgpd.log | grep "NOTIFICATION.*Cease/Administrative Shutdown"'
+            'grep -e "NOTIFICATION.*Cease/Administrative Shutdown.*ABCDabcd" bgpd.log'
         )
         assertmsg = "BGP shutdown message not received on router R{}".format(rtrNum)
         assert shut_message != "", assertmsg
 
-        assertmsg = "Incorrect BGP shutdown message received on router R{}".format(
-            rtrNum
-        )
-        assert "ABCDabcd" in shut_message, assertmsg
-
-    # tgen.mininet_cli()
-
 
 def test_bgp_no_shutdown():
     "Test BGP instance no shutdown"
diff --git a/tests/topotests/bgp_prefix_list_topo1/prefix_modify.json b/tests/topotests/bgp_prefix_list_topo1/prefix_modify.json
new file mode 100644 (file)
index 0000000..4a066ae
--- /dev/null
@@ -0,0 +1,120 @@
+{
+   "address_types": ["ipv4"],
+   "ipv4base":"192.120.1.0",
+   "ipv4mask":24,
+   "link_ip_start":{"ipv4":"192.120.1.0", "v4mask":30, "ipv6":"fd00::", "v6mask":64},
+   "routers":{
+      "r1":{
+         "links":{
+            "lo": {"ipv4": "auto", "type": "loopback", "add_static_route":"yes"},
+            "r2":{"ipv4":"auto"},
+            "r3":{"ipv4":"auto"}
+         },
+         "bgp":{
+            "local_as":"100",
+            "address_family": {
+               "ipv4": {
+                  "unicast": {
+                     "neighbor": {
+                        "r2": {
+                           "dest_link": {
+                              "r1": {}
+                           }
+                        },
+                        "r3": {
+                           "dest_link": {
+                              "r1": {}
+                           }
+                        }
+                     }
+                  }
+               }
+            }
+         }
+      },
+      "r2":{
+         "links":{
+            "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback", "add_static_route":"yes"},
+            "r1":{"ipv4":"auto", "ipv6":"auto"},
+            "r3":{"ipv4":"auto", "ipv6":"auto"}
+         },
+         "bgp":{
+            "local_as":"100",
+            "address_family": {
+               "ipv4": {
+                  "unicast": {
+                     "neighbor": {
+                        "r1": {
+                           "dest_link": {
+                              "r2": {}
+                           }
+                        },
+                        "r3": {
+                           "dest_link": {
+                              "r2": {}
+                           }
+                        }
+                     }
+                  }
+               }
+            }
+         }
+      },
+      "r3":{
+         "links":{
+            "lo": {"ipv4": "auto", "type": "loopback", "add_static_route":"yes"},
+            "r1":{"ipv4":"auto"},
+            "r2":{"ipv4":"auto"},
+            "r4":{"ipv4":"auto"}
+         },
+         "bgp":{
+            "local_as":"100",
+            "address_family": {
+               "ipv4": {
+                  "unicast": {
+                     "neighbor": {
+                        "r1": {
+                           "dest_link": {
+                              "r3": {}
+                           }
+                        },
+                        "r2": {
+                           "dest_link": {
+                              "r3": {}
+                           }
+                        },
+                        "r4": {
+                           "dest_link": {
+                              "r3": {}
+                           }
+                        }
+                     }
+                  }
+               }
+            }
+         }
+      },
+      "r4":{
+         "links":{
+            "lo": {"ipv4": "auto", "type": "loopback", "add_static_route":"yes"},
+            "r3":{"ipv4":"auto"}
+         },
+         "bgp":{
+            "local_as":"200",
+            "address_family": {
+               "ipv4": {
+                  "unicast": {
+                     "neighbor": {
+                        "r3": {
+                           "dest_link": {
+                              "r4": {}
+                           }
+                        }
+                     }
+                  }
+               }
+            }
+         }
+      }
+   }
+}
diff --git a/tests/topotests/bgp_prefix_list_topo1/test_prefix_modify.py b/tests/topotests/bgp_prefix_list_topo1/test_prefix_modify.py
new file mode 100644 (file)
index 0000000..541b9de
--- /dev/null
@@ -0,0 +1,404 @@
+#!/usr/bin/python
+# SPDX-License-Identifier: ISC
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+
+"""
+Following tests are covered to test prefix-list functionality:
+
+Test steps
+- Create topology (setup module)
+  Creating 4 routers topology, r1, r2, r3 are in IBGP and
+  r3, r4 are in EBGP
+- Bring up topology
+- Verify for bgp to converge
+
+IP prefix-list tests
+- Test modify prefix-list action
+"""
+
+import sys
+import time
+import os
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+    start_topology,
+    write_test_header,
+    write_test_footer,
+    reset_config_on_routers,
+    verify_rib,
+    create_static_routes,
+    create_prefix_lists,
+    step,
+    create_route_maps,
+    check_router_status,
+)
+from lib.topolog import logger
+from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp
+
+from lib.topojson import build_config_from_json
+
+pytestmark = [pytest.mark.bgpd]
+
+
+# Global variables
+bgp_convergence = False
+
+IPV4_PF3 = "192.168.0.0/18"
+IPV4_PF4 = "192.150.10.0/24"
+IPV4_PF5 = "192.168.10.1/32"
+IPV4_PF6 = "192.168.10.10/32"
+IPV4_PF7 = "192.168.10.0/24"
+
+
+def setup_module(mod):
+    """
+    Sets up the pytest environment
+
+    * `mod`: module name
+    """
+
+    testsuite_run_time = time.asctime(time.localtime(time.time()))
+    logger.info("Testsuite start time: {}".format(testsuite_run_time))
+    logger.info("=" * 40)
+
+    logger.info("Running setup_module to create topology")
+
+    # This function initiates the topology build with Topogen...
+    json_file = "{}/prefix_lists.json".format(CWD)
+    tgen = Topogen(json_file, mod.__name__)
+    global topo
+    topo = tgen.json_topo
+    # ... and here it calls Mininet initialization functions.
+
+    # Starting topology, create tmp files which are loaded to routers
+    #  to start daemons and then start routers
+    start_topology(tgen)
+
+    # Creating configuration from JSON
+    build_config_from_json(tgen, topo)
+
+    # Checking BGP convergence
+    global BGP_CONVERGENCE
+
+    # Don't run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    # Api call verify whether BGP is converged
+    BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+    assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format(
+        BGP_CONVERGENCE
+    )
+
+    logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+    """
+    Teardown the pytest environment
+
+    * `mod`: module name
+    """
+
+    logger.info("Running teardown_module to delete topology")
+
+    tgen = get_topogen()
+
+    # Stop toplogy and Remove tmp files
+    tgen.stop_topology()
+
+    logger.info(
+        "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+    )
+    logger.info("=" * 40)
+
+
+#####################################################
+#
+#   Tests starting
+#
+#####################################################
+
+
+def test_bug_prefix_lists_deny_to_permit_p1(request):
+    """
+    Verify modification of prefix-list action
+    """
+
+    tgen = get_topogen()
+    if BGP_CONVERGENCE is not True:
+        pytest.skip("skipped because of BGP Convergence failure")
+
+    # test case name
+    tc_name = request.node.name
+    write_test_header(tc_name)
+
+    # Creating configuration from JSON
+    if tgen.routers_have_failure():
+        check_router_status(tgen)
+    reset_config_on_routers(tgen)
+
+    # base config
+    step("Configure IPV4  and IPv6 IBGP and EBGP session as mentioned in setup")
+    step("Configure static routes on R2 with Null 0 nexthop")
+    input_dict_1 = {
+        "r2": {
+            "static_routes": [
+                {"network": IPV4_PF7, "no_of_ip": 1, "next_hop": "Null0"},
+                {"network": IPV4_PF6, "no_of_ip": 1, "next_hop": "Null0"},
+            ]
+        }
+    }
+    result = create_static_routes(tgen, input_dict_1)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("Advertise static route in BGP using redistribute static command")
+    input_dict_4 = {
+        "r2": {
+            "bgp": {
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "redistribute": [
+                                {"redist_type": "static"},
+                                {"redist_type": "connected"},
+                            ]
+                        }
+                    }
+                }
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, input_dict_4)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "All the static route advertised in R4 as BGP "
+        "routes verify using 'show ip  bgp'and 'show bgp'"
+    )
+    dut = "r4"
+    protocol = "bgp"
+
+    input_dict_route = {
+        "r4": {"static_routes": [{"network": IPV4_PF7}, {"network": IPV4_PF6}]}
+    }
+
+    result = verify_rib(tgen, "ipv4", dut, input_dict_route)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("Configure IPv4 and IPv6 prefix-list")
+    input_dict_3 = {
+        "r3": {
+            "prefix_lists": {
+                "ipv4": {
+                    "pf_list_1_ipv4": [
+                        {"seqid": "5", "network": IPV4_PF7, "action": "deny"}
+                    ],
+                }
+            }
+        }
+    }
+    result = create_prefix_lists(tgen, input_dict_3)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    input_dict_3 = {
+        "r3": {
+            "prefix_lists": {
+                "ipv4": {
+                    "pf_list_1": [
+                        {"seqid": "10", "network": IPV4_PF7, "action": "permit"}
+                    ]
+                }
+            }
+        }
+    }
+    result = create_prefix_lists(tgen, input_dict_3)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "configure route-map seq to permit IPV4 prefix list and seq"
+        "2 to permit IPV6 prefix list and apply it to out direction on R3"
+    )
+
+    input_dict_3 = {
+        "r3": {
+            "route_maps": {
+                "rmap_match_pf_1": [
+                    {
+                        "action": "permit",
+                        "match": {"ipv4": {"prefix_lists": "pf_list_1"}},
+                    }
+                ]
+            }
+        }
+    }
+    result = create_route_maps(tgen, input_dict_3)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    input_dict_7 = {
+        "r3": {
+            "bgp": {
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "neighbor": {
+                                "r4": {
+                                    "dest_link": {
+                                        "r3": {
+                                            "route_maps": [
+                                                {
+                                                    "name": "rmap_match_pf_1",
+                                                    "direction": "out",
+                                                }
+                                            ]
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    result = create_router_bgp(tgen, topo, input_dict_7)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Verify on R4 should not have any IPv4 and IPv6 BGP routes using "
+        "show ip bgp show bgp"
+    )
+
+    dut = "r4"
+    protocol = "bgp"
+
+    result = verify_rib(
+        tgen, "ipv4", dut, input_dict_route, protocol=protocol, expected=False
+    )
+    assert (
+        result is not True
+    ), "Testcase {} : Failed \n" "Error : Routes are still present \n {}".format(
+        tc_name, result
+    )
+
+    step("Modify IPv4/IPv6 prefix-list sequence 5 to another value on R3")
+    input_dict_3 = {
+        "r3": {
+            "prefix_lists": {
+                "ipv4": {
+                    "pf_list_1": [
+                        {"seqid": "5", "network": IPV4_PF4, "action": "deny"}
+                    ],
+                }
+            }
+        }
+    }
+    result = create_prefix_lists(tgen, input_dict_3)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Verify  /24 and /120 routes present on"
+        "R4 BGP table using show ip bgp show bgp"
+    )
+    input_dict = {"r4": {"static_routes": [{"network": IPV4_PF7}]}}
+
+    dut = "r4"
+    protocol = "bgp"
+
+    result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("Change prefix-list to same as original on R3")
+    input_dict_3 = {
+        "r3": {
+            "prefix_lists": {
+                "ipv4": {
+                    "pf_list_1": [
+                        {"seqid": "5", "network": IPV4_PF7, "action": "deny"}
+                    ],
+                }
+            }
+        }
+    }
+    result = create_prefix_lists(tgen, input_dict_3)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Verify  /24 and /120 routes removed on"
+        "R4 BGP table using show ip bgp show bgp"
+    )
+
+    result = verify_rib(
+        tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+    )
+    assert (
+        result is not True
+    ), "Testcase {} : Failed \n" "Error : Routes are still present \n {}".format(
+        tc_name, result
+    )
+
+    step("Modify IPv4/IPv6 prefix-list sequence 5 to another value")
+    input_dict_3 = {
+        "r3": {
+            "prefix_lists": {
+                "ipv4": {
+                    "pf_list_1": [
+                        {"seqid": "5", "network": IPV4_PF4, "action": "deny"}
+                    ],
+                }
+            }
+        }
+    }
+    result = create_prefix_lists(tgen, input_dict_3)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("Clear BGP on R3 and verify the routes")
+    clear_bgp(tgen, "ipv4", "r3")
+
+    result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("On R3 add prefix-list permit any for IPv4 and IPv6 seq 15")
+    input_dict_3 = {
+        "r3": {
+            "prefix_lists": {
+                "ipv4": {
+                    "pf_list_1": [
+                        {"seqid": "15", "network": "any", "action": "permit"}
+                    ],
+                }
+            }
+        }
+    }
+    result = create_prefix_lists(tgen, input_dict_3)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("verify /24 and /32 /120 and /128 routes are present on R4")
+    result = verify_rib(tgen, "ipv4", dut, input_dict_route)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
index 11342374471194a912c3987940af42fb9d1026ac..e48f81c53dd8eee14b5f7434594e1acac4a7e964 100644 (file)
@@ -409,8 +409,6 @@ def test_bgp_remove_private_as():
         # the old flag after each iteration so we only test the flags we expect.
         _change_remove_type(rmv_type, "del")
 
-    return True
-
 
 if __name__ == "__main__":
     args = ["-s"] + sys.argv[1:]
diff --git a/tests/topotests/bgp_route_map_match_source_protocol/__init__.py b/tests/topotests/bgp_route_map_match_source_protocol/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tests/topotests/bgp_route_map_match_source_protocol/r1/frr.conf b/tests/topotests/bgp_route_map_match_source_protocol/r1/frr.conf
new file mode 100644 (file)
index 0000000..7c3efee
--- /dev/null
@@ -0,0 +1,32 @@
+!
+interface lo
+ ip address 172.16.255.1/32
+!
+interface r1-eth0
+ ip address 192.168.1.1/24
+!
+interface r1-eth1
+ ip address 192.168.2.1/24
+!
+ip route 10.10.10.10/32 192.168.2.2
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.2 remote-as external
+ neighbor 192.168.1.2 timers 1 3
+ neighbor 192.168.1.2 timers connect 1
+ neighbor 192.168.2.2 remote-as external
+ neighbor 192.168.2.2 timers 1 3
+ neighbor 192.168.2.2 timers connect 1
+ address-family ipv4
+  redistribute connected
+  redistribute static
+  neighbor 192.168.1.2 route-map r2 out
+  neighbor 192.168.2.2 route-map r3 out
+ exit-address-family
+!
+route-map r2 permit 10
+ match source-protocol static
+route-map r3 permit 10
+ match source-protocol connected
+!
diff --git a/tests/topotests/bgp_route_map_match_source_protocol/r2/frr.conf b/tests/topotests/bgp_route_map_match_source_protocol/r2/frr.conf
new file mode 100644 (file)
index 0000000..7213975
--- /dev/null
@@ -0,0 +1,10 @@
+!
+interface r2-eth0
+ ip address 192.168.1.2/24
+!
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
+ neighbor 192.168.1.1 timers 1 3
+ neighbor 192.168.1.1 timers connect 1
+!
diff --git a/tests/topotests/bgp_route_map_match_source_protocol/r3/frr.conf b/tests/topotests/bgp_route_map_match_source_protocol/r3/frr.conf
new file mode 100644 (file)
index 0000000..4a1d830
--- /dev/null
@@ -0,0 +1,10 @@
+!
+interface r3-eth0
+ ip address 192.168.2.2/24
+!
+router bgp 65003
+ no bgp ebgp-requires-policy
+ neighbor 192.168.2.1 remote-as external
+ neighbor 192.168.2.1 timers 1 3
+ neighbor 192.168.2.1 timers connect 1
+!
diff --git a/tests/topotests/bgp_route_map_match_source_protocol/test_bgp_route_map_match_source_protocol.py b/tests/topotests/bgp_route_map_match_source_protocol/test_bgp_route_map_match_source_protocol.py
new file mode 100644 (file)
index 0000000..2828796
--- /dev/null
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# Copyright (c) 2023 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+
+"""
+Test if r1 can announce only static routes to r2, and only connected
+routes to r3 using `match source-protocol` with route-maps.
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+    for routern in range(1, 4):
+        tgen.add_router("r{}".format(routern))
+
+    switch = tgen.add_switch("s1")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r2"])
+
+    switch = tgen.add_switch("s2")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r3"])
+
+
+def setup_module(mod):
+    tgen = Topogen(build_topo, mod.__name__)
+    tgen.start_topology()
+
+    router_list = tgen.routers()
+
+    for i, (rname, router) in enumerate(router_list.items(), 1):
+        router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+    tgen.start_router()
+
+
+def teardown_module(mod):
+    tgen = get_topogen()
+    tgen.stop_topology()
+
+
+def test_bgp_route_map_match_source_protocol():
+    tgen = get_topogen()
+
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    def _bgp_check_advertised_routes_r2():
+        output = json.loads(
+            tgen.gears["r1"].vtysh_cmd(
+                "show bgp ipv4 unicast neighbors 192.168.1.2 advertised-routes json"
+            )
+        )
+        expected = {
+            "advertisedRoutes": {
+                "10.10.10.10/32": {
+                    "valid": True,
+                }
+            },
+            "totalPrefixCounter": 1,
+        }
+        return topotest.json_cmp(output, expected)
+
+    test_func = functools.partial(_bgp_check_advertised_routes_r2)
+    _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+    assert result is None, "Failed to filter routes by source-protocol for r2"
+
+    def _bgp_check_advertised_routes_r3():
+        output = json.loads(
+            tgen.gears["r1"].vtysh_cmd(
+                "show bgp ipv4 unicast neighbors 192.168.2.2 advertised-routes json"
+            )
+        )
+        expected = {
+            "advertisedRoutes": {
+                "192.168.1.0/24": {
+                    "valid": True,
+                },
+                "192.168.2.0/24": {
+                    "valid": True,
+                },
+                "172.16.255.1/32": {
+                    "valid": True,
+                },
+            },
+            "totalPrefixCounter": 3,
+        }
+        return topotest.json_cmp(output, expected)
+
+    test_func = functools.partial(_bgp_check_advertised_routes_r3)
+    _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+    assert result is None, "Failed to filter routes by source-protocol for r3"
+
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/__init__.py b/tests/topotests/bgp_vpnv4_per_nexthop_label/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/r1/bgp_ipv4_routes_vrf1.json b/tests/topotests/bgp_vpnv4_per_nexthop_label/r1/bgp_ipv4_routes_vrf1.json
new file mode 100644 (file)
index 0000000..31a1f3d
--- /dev/null
@@ -0,0 +1,143 @@
+{
+     "vrfName": "vrf1",
+     "localAS": 65500,
+     "routes":
+         {
+            "10.200.0.0/24": [
+                {
+                    "valid": true,
+                    "bestpath": true,
+                    "prefix": "10.200.0.0",
+                    "prefixLen": 24,
+                    "network": "10.200.0.0\/24",
+                    "nexthops": [
+                        {
+                            "ip": "192.168.0.2",
+                            "afi": "ipv4",
+                            "used": true
+                        }
+                    ]
+                }
+                
+            ],
+             "172.31.0.11/32": [
+                 {
+                     "valid":true,
+                     "bestpath":true,
+                     "prefix":"172.31.0.11",
+                     "prefixLen":32,
+                     "network":"172.31.0.11/32",
+                     "peerId":"192.0.2.100",
+                     "nexthops":[
+                         {
+                             "ip":"192.0.2.11",
+                             "afi":"ipv4",
+                             "used":true
+                         }
+                     ]
+                 }
+             ],
+             "172.31.0.12/32": [
+                 {
+                     "valid":true,
+                     "bestpath":true,
+                     "prefix":"172.31.0.12",
+                     "prefixLen":32,
+                     "network":"172.31.0.12/32",
+                     "peerId":"192.0.2.100",
+                     "nexthops":[
+                         {
+                             "ip":"192.0.2.12",
+                             "afi":"ipv4",
+                             "used":true
+                         }
+                     ]
+                 }
+             ],
+             "172.31.0.13/32": [
+                 {
+                     "valid":true,
+                     "bestpath":true,
+                     "prefix":"172.31.0.13",
+                     "prefixLen":32,
+                     "network":"172.31.0.13/32",
+                     "peerId":"192.168.255.13",
+                     "nexthops":[
+                         {
+                             "ip":"192.168.255.13",
+                             "afi":"ipv4",
+                             "used":true
+                         }
+                     ]
+                 }
+             ],
+             "172.31.0.14/32": [
+                 {
+                     "valid":true,
+                     "bestpath":true,
+                     "prefix":"172.31.0.14",
+                     "prefixLen":32,
+                     "network":"172.31.0.14/32",
+                     "peerId":"(unspec)",
+                     "nexthops":[
+                         {
+                             "ip":"192.0.2.14",
+                             "afi":"ipv4",
+                             "used":true
+                         }
+                     ]
+                 }
+             ],
+             "172.31.0.15/32": [
+                 {
+                     "valid":true,
+                     "bestpath":true,
+                     "prefix":"172.31.0.15",
+                     "prefixLen":32,
+                     "network":"172.31.0.15/32",
+                     "peerId":"(unspec)",
+                     "nexthops":[
+                         {
+                             "ip":"192.0.2.12",
+                             "afi":"ipv4",
+                             "used":true
+                         }
+                     ]
+                 }
+             ],
+             "172.31.0.20/32": [
+                 {
+                     "valid":true,
+                     "bestpath":true,
+                     "prefix":"172.31.0.20",
+                     "prefixLen":32,
+                     "network":"172.31.0.20/32",
+                     "peerId":"192.0.2.100",
+                     "nexthops":[
+                         {
+                             "ip":"192.0.2.11",
+                             "afi":"ipv4",
+                             "used":true
+                         }
+                     ]
+                 }
+             ],
+             "172.31.0.111/32": [
+                 {
+                     "valid":true,
+                     "bestpath":true,
+                     "prefix":"172.31.0.111",
+                     "prefixLen":32,
+                     "network":"172.31.0.111/32",
+                     "peerId":"192.0.2.100",
+                     "nexthops":[
+                         {
+                             "ip":"192.0.2.11",
+                             "afi":"ipv4",
+                             "used":true
+                         }
+                     ]
+                 }
+             ]
+         }
+}
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/r1/bgpd.conf b/tests/topotests/bgp_vpnv4_per_nexthop_label/r1/bgpd.conf
new file mode 100644 (file)
index 0000000..35fb2ec
--- /dev/null
@@ -0,0 +1,30 @@
+router bgp 65500
+ bgp router-id 192.168.0.1
+ no bgp ebgp-requires-policy
+ neighbor 192.168.0.2 remote-as 65501
+ address-family ipv4 unicast
+  no neighbor 192.168.0.2 activate
+ exit-address-family
+ address-family ipv4 vpn
+  neighbor 192.168.0.2 activate
+  neighbor 192.168.0.2 soft-reconfiguration inbound
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 192.168.0.1
+ neighbor 192.0.2.100 remote-as 65500
+ neighbor 192.168.255.13 remote-as 65500
+ address-family ipv4 unicast
+  redistribute connected
+  redistribute static
+  label vpn export allocation-mode per-nexthop
+  label vpn export auto
+  rd vpn export 444:1
+  rt vpn both 52:100
+  export vpn
+  import vpn
+ exit-address-family
+!
+interface r1-eth0
+ mpls bgp forwarding
+!
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/r1/ipv4_routes.json b/tests/topotests/bgp_vpnv4_per_nexthop_label/r1/ipv4_routes.json
new file mode 100644 (file)
index 0000000..da7d281
--- /dev/null
@@ -0,0 +1,50 @@
+{
+    "10.200.0.0/24": [
+        {
+            "prefix": "10.200.0.0/24",
+            "prefixLen": 24,
+            "protocol": "bgp",
+            "vrfName": "vrf1",
+            "selected": true,
+            "destSelected": true,
+            "distance": 20,
+            "metric": 0,
+            "nexthops": [
+                {
+                    "flags": 3,
+                    "fib": true,
+                    "ip": "10.125.0.2",
+                    "afi": "ipv4",
+                    "interfaceName": "r1-eth0",
+                    "vrf": "default",
+                    "active": true,
+                   "labels":[
+                       102
+                   ]
+                }
+            ]
+        }
+    ],
+    "10.201.0.0/24": [
+        {
+            "prefix": "10.201.0.0/24",
+            "prefixLen": 24,
+            "protocol": "connected",
+            "vrfName": "vrf1",
+            "selected": true,
+            "destSelected": true,
+            "distance": 0,
+            "metric": 0,
+            "installed": true,
+            "nexthops":[
+                {
+                    "flags": 3,
+                    "fib": true,
+                    "directlyConnected": true,
+                    "interfaceName": "r1-eth1",
+                    "active": true
+                }
+            ]
+        }
+    ]
+}
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/r1/zebra.conf b/tests/topotests/bgp_vpnv4_per_nexthop_label/r1/zebra.conf
new file mode 100644 (file)
index 0000000..2618595
--- /dev/null
@@ -0,0 +1,18 @@
+log stdout
+debug zebra nht
+!debug zebra kernel msgdump recv
+!debug zebra dplane detailed
+!debug zebra packet recv
+interface r1-eth1 vrf vrf1
+ ip address 192.0.2.1/24
+!
+interface r1-eth2 vrf vrf1
+ ip address 192.168.255.1/24
+!
+interface r1-eth0
+ ip address 192.168.0.1/24
+!
+vrf vrf1
+ ip route 172.31.0.14/32 192.0.2.14
+ ip route 172.31.0.15/32 192.0.2.12
+exit-vrf
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/r11/bgpd.conf b/tests/topotests/bgp_vpnv4_per_nexthop_label/r11/bgpd.conf
new file mode 100644 (file)
index 0000000..5da9151
--- /dev/null
@@ -0,0 +1,11 @@
+router bgp 65500
+ bgp router-id 192.0.2.11
+ no bgp network import-check
+ neighbor 192.0.2.100 remote-as 65500
+ address-family ipv4 unicast
+  network 172.31.0.11/32
+  network 172.31.0.111/32
+  network 172.31.0.20/32
+ exit-address-family
+!
+
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/r11/zebra.conf b/tests/topotests/bgp_vpnv4_per_nexthop_label/r11/zebra.conf
new file mode 100644 (file)
index 0000000..a080757
--- /dev/null
@@ -0,0 +1,4 @@
+log stdout
+interface r11-eth0
+ ip address 192.0.2.11/24
+!
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/r12/bgpd.conf b/tests/topotests/bgp_vpnv4_per_nexthop_label/r12/bgpd.conf
new file mode 100644 (file)
index 0000000..d3889f5
--- /dev/null
@@ -0,0 +1,9 @@
+router bgp 65500
+ bgp router-id 192.0.2.12
+ no bgp network import-check
+ neighbor 192.0.2.100 remote-as 65500
+ address-family ipv4 unicast
+  network 172.31.0.12/32
+ exit-address-family
+!
+
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/r12/zebra.conf b/tests/topotests/bgp_vpnv4_per_nexthop_label/r12/zebra.conf
new file mode 100644 (file)
index 0000000..9ce3aba
--- /dev/null
@@ -0,0 +1,4 @@
+log stdout
+interface r12-eth0
+ ip address 192.0.2.12/24
+!
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/r13/bgpd.conf b/tests/topotests/bgp_vpnv4_per_nexthop_label/r13/bgpd.conf
new file mode 100644 (file)
index 0000000..21dbb58
--- /dev/null
@@ -0,0 +1,9 @@
+router bgp 65500
+ bgp router-id 192.168.255.13
+ no bgp network import-check
+ address-family ipv4 unicast
+  neighbor 192.168.255.1 remote-as 65500
+  network 172.31.0.13/32
+ exit-address-family
+!
+
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/r13/zebra.conf b/tests/topotests/bgp_vpnv4_per_nexthop_label/r13/zebra.conf
new file mode 100644 (file)
index 0000000..4d78b5f
--- /dev/null
@@ -0,0 +1,4 @@
+log stdout
+interface r13-eth0
+ ip address 192.168.255.13/24
+!
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/r2/bgp_ipv4_routes.json b/tests/topotests/bgp_vpnv4_per_nexthop_label/r2/bgp_ipv4_routes.json
new file mode 100644 (file)
index 0000000..3407925
--- /dev/null
@@ -0,0 +1,38 @@
+{
+     "vrfName": "vrf1",
+     "localAS": 65501,
+     "routes":
+         {
+            "10.201.0.0/24": [
+                {
+                    "prefix": "10.201.0.0",
+                    "prefixLen": 24,
+                    "network": "10.201.0.0\/24",
+                    "nhVrfName": "default",
+                    "nexthops": [
+                        {
+                            "ip": "192.168.0.1",
+                            "afi": "ipv4",
+                            "used": true
+                        }
+                    ]
+                }
+            ],
+            "10.200.0.0/24": [
+                {
+                    "valid": true,
+                    "bestpath": true,
+                    "prefix": "10.200.0.0",
+                    "prefixLen": 24,
+                    "network": "10.200.0.0\/24",
+                    "nexthops": [
+                        {
+                            "ip": "0.0.0.0",
+                            "afi": "ipv4",
+                            "used": true
+                        }
+                    ]
+                }
+            ]
+         }
+}
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/r2/bgp_vpnv4_routes.json b/tests/topotests/bgp_vpnv4_per_nexthop_label/r2/bgp_vpnv4_routes.json
new file mode 100644 (file)
index 0000000..46f4a18
--- /dev/null
@@ -0,0 +1,187 @@
+{
+    "vrfName": "default",
+    "localAS": 65501,
+    "routes":
+    {
+        "routeDistinguishers":
+        {
+            "444:1":
+            {
+                "172.31.0.11/32": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "172.31.0.11",
+                        "prefixLen": 32,
+                        "network": "172.31.0.11\/32",
+                        "peerId": "192.168.0.1",
+                        "nexthops": [
+                            {
+                                "ip": "192.168.0.1",
+                                "afi": "ipv4",
+                                "used": true
+                            }
+                        ]
+                    }
+                ],
+                "172.31.0.12/32": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "172.31.0.12",
+                        "prefixLen": 32,
+                        "network": "172.31.0.12\/32",
+                        "peerId": "192.168.0.1",
+                        "nexthops": [
+                            {
+                                "ip": "192.168.0.1",
+                                "afi": "ipv4",
+                                "used": true
+                            }
+                        ]
+                    }
+                ],
+                "172.31.0.13/32": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "172.31.0.13",
+                        "prefixLen": 32,
+                        "network": "172.31.0.13\/32",
+                        "peerId": "192.168.0.1",
+                        "nexthops": [
+                            {
+                                "ip": "192.168.0.1",
+                                "afi": "ipv4",
+                                "used": true
+                            }
+                        ]
+                    }
+                ],
+                "172.31.0.14/32": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "172.31.0.14",
+                        "prefixLen": 32,
+                        "network": "172.31.0.14\/32",
+                        "peerId": "192.168.0.1",
+                        "nexthops": [
+                            {
+                                "ip": "192.168.0.1",
+                                "afi": "ipv4",
+                                "used": true
+                            }
+                        ]
+                    }
+                ],
+                "172.31.0.15/32": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "172.31.0.15",
+                        "prefixLen": 32,
+                        "network": "172.31.0.15\/32",
+                        "peerId": "192.168.0.1",
+                        "nexthops": [
+                            {
+                                "ip": "192.168.0.1",
+                                "afi": "ipv4",
+                                "used": true
+                            }
+                        ]
+                    }
+                ],
+                "172.31.0.20/32": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "172.31.0.20",
+                        "prefixLen": 32,
+                        "network": "172.31.0.20\/32",
+                        "peerId": "192.168.0.1",
+                        "nexthops": [
+                            {
+                                "ip": "192.168.0.1",
+                                "afi": "ipv4",
+                                "used": true
+                            }
+                        ]
+                    }
+                ],
+                "172.31.0.111/32": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "172.31.0.111",
+                        "prefixLen": 32,
+                        "network": "172.31.0.111\/32",
+                        "peerId": "192.168.0.1",
+                        "nexthops": [
+                            {
+                                "ip": "192.168.0.1",
+                                "afi": "ipv4",
+                                "used": true
+                            }
+                        ]
+                    }
+                ],
+                "192.0.2.0/24": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "192.0.2.0",
+                        "prefixLen": 24,
+                        "network": "192.0.2.0\/24",
+                        "peerId": "192.168.0.1",
+                        "nexthops": [
+                            {
+                                "ip": "192.168.0.1",
+                                "afi": "ipv4",
+                                "used": true
+                            }
+                        ]
+                    }
+                ],
+                "192.168.255.0/24": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "192.168.255.0",
+                        "prefixLen": 24,
+                        "network": "192.168.255.0\/24",
+                        "peerId": "192.168.0.1",
+                        "nexthops": [
+                            {
+                                "ip": "192.168.0.1",
+                                "afi": "ipv4",
+                                "used": true
+                            }
+                        ]
+                    }
+                ]
+            },
+            "444:2":
+            {
+                "10.200.0.0/24": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "10.200.0.0",
+                        "prefixLen": 24,
+                        "network": "10.200.0.0\/24",
+                        "peerId": "(unspec)",
+                        "nhVrfName": "vrf1",
+                        "nexthops": [
+                            {
+                                "ip": "0.0.0.0",
+                                "afi": "ipv4",
+                                "used": true
+                            }
+                        ]
+                    }
+                ]
+            }
+        }
+    }
+}
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/r2/bgpd.conf b/tests/topotests/bgp_vpnv4_per_nexthop_label/r2/bgpd.conf
new file mode 100644 (file)
index 0000000..5fb7902
--- /dev/null
@@ -0,0 +1,25 @@
+router bgp 65501
+ bgp router-id 192.168.0.2
+ no bgp ebgp-requires-policy
+ neighbor 192.168.0.1 remote-as 65500
+ address-family ipv4 unicast
+  no neighbor 192.168.0.1 activate
+ exit-address-family
+ address-family ipv4 vpn
+  neighbor 192.168.0.1 activate
+ exit-address-family
+!
+router bgp 65501 vrf vrf1
+ bgp router-id 192.168.0.2
+ address-family ipv4 unicast
+  redistribute connected
+  label vpn export 102
+  rd vpn export 444:2
+  rt vpn both 52:100
+  export vpn
+  import vpn
+ exit-address-family
+!
+interface r2-eth0
+ mpls bgp forwarding
+!
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/r2/zebra.conf b/tests/topotests/bgp_vpnv4_per_nexthop_label/r2/zebra.conf
new file mode 100644 (file)
index 0000000..b7283a3
--- /dev/null
@@ -0,0 +1,7 @@
+log stdout
+interface r2-eth1 vrf vrf1
+ ip address 10.200.0.2/24
+!
+interface r2-eth0
+ ip address 192.168.0.2/24
+!
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/rr/bgpd.conf b/tests/topotests/bgp_vpnv4_per_nexthop_label/rr/bgpd.conf
new file mode 100644 (file)
index 0000000..ff32314
--- /dev/null
@@ -0,0 +1,13 @@
+router bgp 65500
+ bgp router-id 100.100.100.100
+ no bgp network import-check
+ neighbor 192.0.2.1 remote-as 65500
+ neighbor 192.0.2.11 remote-as 65500
+ neighbor 192.0.2.12 remote-as 65500
+ address-family ipv4 unicast
+  neighbor 192.0.2.1 route-reflector-client
+  neighbor 192.0.2.11 route-reflector-client
+  neighbor 192.0.2.12 route-reflector-client
+ exit-address-family
+!
+
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/rr/zebra.conf b/tests/topotests/bgp_vpnv4_per_nexthop_label/rr/zebra.conf
new file mode 100644 (file)
index 0000000..315c22a
--- /dev/null
@@ -0,0 +1,4 @@
+log stdout
+interface rr-eth0
+ ip address 192.0.2.100/24
+!
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py b/tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py
new file mode 100644 (file)
index 0000000..966b717
--- /dev/null
@@ -0,0 +1,795 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+#
+# test_bgp_vpnv4_per_nexthop_label.py
+#
+# Copyright 2023 6WIND S.A.
+#
+
+"""
+ test_bgp_vpnv4_per_nexthop_label.py: Test the FRR BGP daemon using EBGP peering
+ Let us exchange VPNv4 updates between both devices
+ Updates from r1 will originate from the same RD, but will have separate
+ label values.
+
+     +----------+
+     |   r11    |
+     |192.0.2.11+---+
+     |          |   |                   +----+--------+              +----------+
+     +----------+   |         192.0.2.1 |vrf | r1     |192.168.0.0/24|    r2    |
+                    +-------------------+    |       1+--------------+          |
+     +----------+   |                   |VRF1|AS65500 |              | AS65501  |
+     |   r12    |   |     +-------------+    |   VPNV4|              |VPNV4     |
+     |192.0.2.12+---+     |192.168.255.1+-+--+--------+              +----------+
+     |          |         |
+     +----------+         |
+                          |
+     +----------+         |
+     |   r13    |         |
+     |192.168.  +---------+
+     | 255.13   |
+     +----------+
+"""
+
+import os
+import sys
+import json
+from functools import partial
+import pytest
+import functools
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+
+pytestmark = [pytest.mark.bgpd]
+
+PREFIXES_R11 = ["172.31.0.11/32", "172.31.0.20/32", "172.31.0.111/32"]
+PREFIXES_R12 = ["172.31.0.12/32", "172.31.0.15/32"]
+PREFIXES_R13 = ["172.31.0.13/32"]
+PREFIXES_REDIST = ["172.31.0.14/32"]
+PREFIXES_CONNECTED = ["192.168.255.0/24", "192.0.2.0/24"]
+
+
+def build_topo(tgen):
+    "Build function"
+
+    # Create 2 routers.
+    tgen.add_router("r1")
+    tgen.add_router("r2")
+    tgen.add_router("r11")
+    tgen.add_router("r12")
+    tgen.add_router("r13")
+    tgen.add_router("r14")
+    tgen.add_router("rr")
+
+    switch = tgen.add_switch("s1")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r2"])
+
+    switch = tgen.add_switch("s2")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r11"])
+    switch.add_link(tgen.gears["r12"])
+    switch.add_link(tgen.gears["rr"])
+
+    switch = tgen.add_switch("s3")
+    switch.add_link(tgen.gears["r2"])
+
+    switch = tgen.add_switch("s4")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r13"])
+
+    switch = tgen.add_switch("s5")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r14"])
+
+
+def _populate_iface():
+    tgen = get_topogen()
+    cmds_list = [
+        "ip link add vrf1 type vrf table 10",
+        "echo 100000 > /proc/sys/net/mpls/platform_labels",
+        "ip link set dev vrf1 up",
+        "ip link set dev {0}-eth1 master vrf1",
+        "echo 1 > /proc/sys/net/mpls/conf/{0}-eth0/input",
+    ]
+    cmds_list_plus = [
+        "ip link set dev {0}-eth2 master vrf1",
+    ]
+
+    for cmd in cmds_list:
+        input = cmd.format("r1")
+        logger.info("input: " + cmd)
+        output = tgen.net["r1"].cmd(cmd.format("r1"))
+        logger.info("output: " + output)
+
+    for cmd in cmds_list_plus:
+        input = cmd.format("r1")
+        logger.info("input: " + cmd)
+        output = tgen.net["r1"].cmd(cmd.format("r1"))
+        logger.info("output: " + output)
+
+    for cmd in cmds_list:
+        input = cmd.format("r2")
+        logger.info("input: " + cmd)
+        output = tgen.net["r2"].cmd(cmd.format("r2"))
+        logger.info("output: " + output)
+
+
+def setup_module(mod):
+    "Sets up the pytest environment"
+    tgen = Topogen(build_topo, mod.__name__)
+    tgen.start_topology()
+
+    router_list = tgen.routers()
+    _populate_iface()
+
+    for rname, router in router_list.items():
+        router.load_config(
+            TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+        )
+        router.load_config(
+            TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+        )
+
+    # Initialize all routers.
+    tgen.start_router()
+
+
+def teardown_module(_mod):
+    "Teardown the pytest environment"
+    tgen = get_topogen()
+
+    tgen.stop_topology()
+
+
+def bgp_vpnv4_table_check(router, group, label_list=None, label_value_expected=None):
+    """
+    Dump and check that vpnv4 entries have the same MPLS label value
+    * 'router': the router to check
+    * 'group': the list of prefixes to check. a single label value for the group has to be found
+    * 'label_list': check that the label values are not present in the vpnv4 entries
+    *              that list is updated with the present label value
+    * 'label_value_expected': check that the mpls label read is the same as that value
+    """
+
+    stored_label_inited = False
+    for prefix in group:
+        dump = router.vtysh_cmd("show bgp ipv4 vpn {} json".format(prefix), isjson=True)
+        assert dump, "{0}, {1}, route distinguisher not present".format(
+            router.name, prefix
+        )
+        for rd, pathes in dump.items():
+            for path in pathes["paths"]:
+                assert (
+                    "remoteLabel" in path.keys()
+                ), "{0}, {1}, remoteLabel not present".format(router.name, prefix)
+                logger.info(
+                    "{0}, {1}, label value is {2}".format(
+                        router.name, prefix, path["remoteLabel"]
+                    )
+                )
+                if stored_label_inited:
+                    assert (
+                        path["remoteLabel"] == stored_label
+                    ), "{0}, {1}, label value not expected one (expected {2}, observed {3}".format(
+                        router.name, prefix, stored_label, path["remoteLabel"]
+                    )
+                else:
+                    stored_label = path["remoteLabel"]
+                    stored_label_inited = True
+                    if label_list is not None:
+                        assert (
+                            stored_label not in label_list
+                        ), "{0}, {1}, label already detected in a previous prefix".format(
+                            router.name, prefix
+                        )
+                        label_list.add(stored_label)
+
+                if label_value_expected:
+                    assert (
+                        path["remoteLabel"] == label_value_expected
+                    ), "{0}, {1}, label value not expected (expected {2}, observed {3}".format(
+                        router.name, prefix, label_value_expected, path["remoteLabel"]
+                    )
+
+
+def bgp_vpnv4_table_check_all(router, label_list=None, same=False):
+    """
+    Dump and check that vpnv4 entries are correctly configured with specific label values
+    * 'router': the router to check
+    * 'label_list': check that the label values are not present in the vpnv4 entries
+    *              that list is updated with the present label value found.
+    * 'same': by default, set to False. Addresses groups are classified by addresses.
+    *         if set to True, all entries of all groups should have a unique label value
+    """
+    if same:
+        bgp_vpnv4_table_check(
+            router,
+            group=PREFIXES_R11
+            + PREFIXES_R12
+            + PREFIXES_R13
+            + PREFIXES_REDIST
+            + PREFIXES_CONNECTED,
+            label_list=label_list,
+        )
+    else:
+        for group in (
+            PREFIXES_R11,
+            PREFIXES_R12,
+            PREFIXES_R13,
+            PREFIXES_REDIST,
+            PREFIXES_CONNECTED,
+        ):
+            bgp_vpnv4_table_check(router, group=group, label_list=label_list)
+
+
+def mpls_table_check(router, blacklist=None, label_list=None, whitelist=None):
+    """
+    Dump and check 'show mpls table json' output. An assert is triggered in case test fails
+    * 'router': the router to check
+    * 'blacklist': the list of nexthops (IP or interface) that should not be on output
+    * 'label_list': the list of labels that should be in inLabel value
+    * 'whitelist': the list of nexthops (IP or interface) that should be on output
+    """
+    nexthop_list = []
+    if blacklist:
+        nexthop_list.append(blacklist)
+    logger.info("Checking MPLS labels on {}".format(router.name))
+    dump = router.vtysh_cmd("show mpls table json", isjson=True)
+    for in_label, label_info in dump.items():
+        if label_list is not None:
+            label_list.add(in_label)
+        for nh in label_info["nexthops"]:
+            assert (
+                nh["installed"] == True and nh["type"] == "BGP"
+            ), "{}, show mpls table, nexthop is not installed".format(router.name)
+            if "nexthop" in nh.keys():
+                assert (
+                    nh["nexthop"] not in nexthop_list
+                ), "{}, show mpls table, duplicated or blacklisted nexthop address".format(
+                    router.name
+                )
+                nexthop_list.append(nh["nexthop"])
+            elif "interface" in nh.keys():
+                assert (
+                    nh["interface"] not in nexthop_list
+                ), "{}, show mpls table, duplicated or blacklisted nexthop interface".format(
+                    router.name
+                )
+                nexthop_list.append(nh["interface"])
+            else:
+                assert (
+                    0
+                ), "{}, show mpls table, entry with neither nexthop nor interface".format(
+                    router.name
+                )
+
+    if whitelist:
+        for entry in whitelist:
+            assert (
+                entry in nexthop_list
+            ), "{}, show mpls table, entry with nexthop {} not present in nexthop list".format(
+                router.name, entry
+            )
+
+
+def check_show_bgp_vpn_prefix_not_found(router, ipversion, prefix, rd, label=None):
+    output = json.loads(
+        router.vtysh_cmd("show bgp {} vpn {} json".format(ipversion, prefix))
+    )
+    if label:
+        expected = {rd: {"prefix": prefix, "paths": [{"remoteLabel": label}]}}
+    else:
+        expected = {rd: {"prefix": prefix}}
+    ret = topotest.json_cmp(output, expected)
+    if ret is None:
+        return "not good"
+    return None
+
+
+def check_show_bgp_vpn_prefix_found(router, ipversion, prefix, rd):
+    output = json.loads(
+        router.vtysh_cmd("show bgp {} vpn {} json".format(ipversion, prefix))
+    )
+    expected = {rd: {"prefix": prefix}}
+    return topotest.json_cmp(output, expected)
+
+
+def check_show_mpls_table_entry_label_found(router, inlabel, interface):
+    output = json.loads(router.vtysh_cmd("show mpls table {} json".format(inlabel)))
+    expected = {
+        "inLabel": inlabel,
+        "installed": True,
+        "nexthops": [{"interface": interface}],
+    }
+    return topotest.json_cmp(output, expected)
+
+
+def check_show_mpls_table_entry_label_not_found(router, inlabel):
+    output = json.loads(router.vtysh_cmd("show mpls table {} json".format(inlabel)))
+    expected = {"inlabel": inlabel, "installed": True}
+    ret = topotest.json_cmp(output, expected)
+    if ret is None:
+        return "not good"
+    return None
+
+
+def mpls_entry_get_interface(router, label):
+    """
+    Assert that the label is in MPLS table
+    Assert an outgoing interface is programmed
+    return the outgoing interface
+    """
+    outgoing_interface = None
+
+    logger.info("Checking MPLS labels on {}".format(router.name))
+    dump = router.vtysh_cmd("show mpls table {} json".format(label), isjson=True)
+    assert dump, "{0}, label {1} not present".format(router.name, label)
+
+    for nh in dump["nexthops"]:
+        assert (
+            "interface" in nh.keys()
+        ), "{}, show mpls table, nexthop interface not present for MPLS entry {}".format(
+            router.name, label
+        )
+
+        outgoing_interface = nh["interface"]
+
+    return outgoing_interface
+
+
+def test_protocols_convergence():
+    """
+    Assert that all protocols have converged
+    statuses as they depend on it.
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    # Check BGP IPv4 routing tables on VRF1 of r1
+    logger.info("Checking BGP IPv4 routes for convergence on r1 VRF1")
+    router = tgen.gears["r1"]
+    json_file = "{}/{}/bgp_ipv4_routes_vrf1.json".format(CWD, router.name)
+    expected = json.loads(open(json_file).read())
+    test_func = partial(
+        topotest.router_json_cmp,
+        router,
+        "show bgp vrf vrf1 ipv4 json",
+        expected,
+    )
+    _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
+    assertmsg = '"{}" JSON output mismatches'.format(router.name)
+    assert result is None, assertmsg
+
+    logger.info("Checking BGP VPNv4 routes for convergence on r2")
+    router = tgen.gears["r2"]
+    json_file = "{}/{}/bgp_vpnv4_routes.json".format(CWD, router.name)
+    expected = json.loads(open(json_file).read())
+    test_func = partial(
+        topotest.router_json_cmp,
+        router,
+        "show bgp ipv4 vpn json",
+        expected,
+    )
+    _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assertmsg = '"{}" JSON output mismatches'.format(router.name)
+    assert result is None, assertmsg
+
+    # Check BGP labels received on r2
+    logger.info("Checking BGP VPNv4 labels on r2")
+    label_list = set()
+    bgp_vpnv4_table_check_all(tgen.gears["r2"], label_list)
+
+    # Check MPLS labels received on r1
+    mpls_table_check(tgen.gears["r1"], label_list)
+
+
+def test_flapping_bgp_vrf_down():
+    """
+    Turn down a remote BGP session
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+    logger.info("Unpeering BGP on r11")
+    tgen.gears["r11"].vtysh_cmd(
+        "configure terminal\nrouter bgp 65500\nno neighbor 192.0.2.100\n",
+        isjson=False,
+    )
+
+    def _bgp_prefix_not_found(router, vrf, ipversion, prefix):
+        output = json.loads(
+            router.vtysh_cmd(
+                "show bgp vrf {} {} {} json".format(vrf, ipversion, prefix)
+            )
+        )
+        expected = {"prefix": prefix}
+        ret = topotest.json_cmp(output, expected)
+        if ret is None:
+            return "not good"
+        return None
+
+    # Check prefix from r11 is not present
+    test_func = functools.partial(
+        _bgp_prefix_not_found, tgen.gears["r1"], "vrf1", "ipv4", "172.31.0.11/32"
+    )
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert (
+        success
+    ), "r1, prefix 172.31.0.11/32 from r11 did not disappear. r11 still connected to rr ?"
+
+    # Check BGP updated received on r2 are not from r11
+    logger.info("Checking BGP VPNv4 labels on r2")
+    for entry in PREFIXES_R11:
+        dump = tgen.gears["r2"].vtysh_cmd(
+            "show bgp ipv4 vpn {} json".format(entry), isjson=True
+        )
+        for rd in dump:
+            assert False, "r2, {}, route distinguisher {} present".format(entry, rd)
+
+    mpls_table_check(tgen.gears["r1"], blacklist=["192.0.2.11"])
+
+
+def test_flapping_bgp_vrf_up():
+    """
+    Turn up a remote BGP session
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+    logger.info("Peering BGP on r11")
+    tgen.gears["r11"].vtysh_cmd(
+        "configure terminal\nrouter bgp 65500\nneighbor 192.0.2.100 remote-as 65500\n",
+        isjson=False,
+    )
+
+    # Check r2 gets prefix 172.31.0.11/128
+    test_func = functools.partial(
+        check_show_bgp_vpn_prefix_found,
+        tgen.gears["r2"],
+        "ipv4",
+        "172.31.0.11/32",
+        "444:1",
+    )
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert (
+        success
+    ), "r2, prefix 172.31.0.11/32 from r11 not present. r11 still disconnected from rr ?"
+    bgp_vpnv4_table_check_all(tgen.gears["r2"])
+
+
+def test_recursive_route():
+    """
+    Test static recursive route redistributed over BGP
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    logger.info("Enabling recursive static route")
+    tgen.gears["r1"].vtysh_cmd(
+        "configure terminal\nvrf vrf1\nip route 172.31.0.30/32 172.31.0.20\n",
+        isjson=False,
+    )
+    logger.info("Checking BGP VPNv4 labels on r2")
+
+    # Check r2 received vpnv4 update with 172.31.0.30
+    test_func = functools.partial(
+        check_show_bgp_vpn_prefix_found,
+        tgen.gears["r2"],
+        "ipv4",
+        "172.31.0.30/32",
+        "444:1",
+    )
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert success, "r2, vpnv4 update 172.31.0.30 not found"
+
+    bgp_vpnv4_table_check(tgen.gears["r2"], group=PREFIXES_R11 + ["172.31.0.30/32"])
+
+    # diagnostic
+    logger.info("Dumping label nexthop table")
+    tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
+    logger.info("Dumping nexthop table")
+    tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 nexthop detail", isjson=False)
+
+    logger.info("Disabling recursive static route")
+    tgen.gears["r1"].vtysh_cmd(
+        "configure terminal\nvrf vrf1\nno ip route 172.31.0.30/32 172.31.0.20\n",
+        isjson=False,
+    )
+    logger.info("Checking BGP VPNv4 labels on r2")
+
+    # Check r2 removed 172.31.0.30 vpnv4 update
+    test_func = functools.partial(
+        check_show_bgp_vpn_prefix_not_found,
+        tgen.gears["r2"],
+        "ipv4",
+        "172.31.0.30/32",
+        "444:1",
+    )
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert success, "r2, vpnv4 update 172.31.0.30 still present"
+
+
+def test_prefix_changes_interface():
+    """
+    Test BGP update for a given prefix learnt on different interface
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    logger.info("Enabling a 172.31.0.50/32 prefix for r11")
+    tgen.gears["r11"].vtysh_cmd(
+        "configure terminal\nrouter bgp\naddress-family ipv4 unicast\nnetwork 172.31.0.50/32",
+        isjson=False,
+    )
+
+    # Check r2 received vpnv4 update with 172.31.0.50
+    test_func = functools.partial(
+        check_show_bgp_vpn_prefix_found,
+        tgen.gears["r2"],
+        "ipv4",
+        "172.31.0.50/32",
+        "444:1",
+    )
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert success, "r2, vpnv4 update 172.31.0.50 not found"
+
+    # diagnostic
+    logger.info("Dumping label nexthop table")
+    tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
+
+    label_list = set()
+    bgp_vpnv4_table_check(
+        tgen.gears["r2"],
+        group=["172.31.0.11/32", "172.31.0.111/32", "172.31.0.50/32"],
+        label_list=label_list,
+    )
+
+    assert (
+        len(label_list) == 1
+    ), "Multiple Label values found for updates from r11 found"
+
+    oldlabel = label_list.pop()
+    logger.info("r1, getting the outgoing interface used by label {}".format(oldlabel))
+    old_outgoing_interface = mpls_entry_get_interface(tgen.gears["r1"], oldlabel)
+    logger.info(
+        "r1, outgoing interface used by label {} is {}".format(
+            oldlabel, old_outgoing_interface
+        )
+    )
+
+    logger.info("Moving the 172.31.0.50/32 prefix from r11 to r13")
+    tgen.gears["r11"].vtysh_cmd(
+        "configure terminal\nrouter bgp\naddress-family ipv4 unicast\nno network 172.31.0.50/32",
+        isjson=False,
+    )
+    tgen.gears["r13"].vtysh_cmd(
+        "configure terminal\nrouter bgp\naddress-family ipv4 unicast\nnetwork 172.31.0.50/32",
+        isjson=False,
+    )
+
+    # Check r2 removed 172.31.0.50 vpnv4 update with old label
+    test_func = functools.partial(
+        check_show_bgp_vpn_prefix_not_found,
+        tgen.gears["r2"],
+        "ipv4",
+        "172.31.0.50/32",
+        "444:1",
+        label=oldlabel,
+    )
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert (
+        success
+    ), "r2, vpnv4 update 172.31.0.50 with old label {0} still present".format(oldlabel)
+
+    # diagnostic
+    logger.info("Dumping label nexthop table")
+    tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
+
+    # Check r2 received new 172.31.0.50 vpnv4 update
+    test_func = functools.partial(
+        check_show_bgp_vpn_prefix_found,
+        tgen.gears["r2"],
+        "ipv4",
+        "172.31.0.50/32",
+        "444:1",
+    )
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert success, "r2, vpnv4 update 172.31.0.50 not found"
+
+    label_list = set()
+    bgp_vpnv4_table_check(
+        tgen.gears["r2"],
+        group=PREFIXES_R13 + ["172.31.0.50/32"],
+        label_list=label_list,
+    )
+    assert (
+        len(label_list) == 1
+    ), "Multiple Label values found for updates from r13 found"
+
+    newlabel = label_list.pop()
+    logger.info("r1, getting the outgoing interface used by label {}".format(newlabel))
+    new_outgoing_interface = mpls_entry_get_interface(tgen.gears["r1"], newlabel)
+    logger.info(
+        "r1, outgoing interface used by label {} is {}".format(
+            newlabel, new_outgoing_interface
+        )
+    )
+    if old_outgoing_interface == new_outgoing_interface:
+        assert 0, "r1, outgoing interface did not change whereas BGP update moved"
+
+    logger.info("Restoring state by removing the 172.31.0.50/32 prefix from r13")
+    tgen.gears["r13"].vtysh_cmd(
+        "configure terminal\nrouter bgp\naddress-family ipv4 unicast\nno network 172.31.0.50/32",
+        isjson=False,
+    )
+
+
+def test_changing_default_label_value():
+    """
+    Change the MPLS default value
+    Check that r1 VPNv4 entries have the 222 label value
+    Check that MPLS entry with old label value is no more present
+    Check that MPLS entry for local traffic has inLabel set to 222
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router = tgen.gears["r1"]
+
+    # counting the number of labels used in the VPNv4 table
+    label_list = set()
+    logger.info("r1, vpnv4 table, check the number of labels used before modification")
+    bgp_vpnv4_table_check_all(router, label_list)
+    old_len = len(label_list)
+    assert (
+        old_len != 1
+    ), "r1, number of labels used should be greater than 1, oberved {} ".format(old_len)
+
+    logger.info("r1, vrf1, changing the default MPLS label value to export to 222")
+    router.vtysh_cmd(
+        "configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv4 unicast\nlabel vpn export 222\n",
+        isjson=False,
+    )
+
+    # Check r1 updated the MPLS entry with the 222 label value
+    logger.info(
+        "r1, mpls table, check that MPLS entry with inLabel set to 222 has vrf1 interface"
+    )
+    test_func = functools.partial(
+        check_show_mpls_table_entry_label_found, router, 222, "vrf1"
+    )
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert success, "r1, mpls entry with label 222 not found"
+
+    # check label repartition is ok
+    logger.info("r1, vpnv4 table, check the number of labels used after modification")
+    label_list = set()
+    bgp_vpnv4_table_check_all(router, label_list)
+    new_len = len(label_list)
+    assert (
+        old_len == new_len
+    ), "r1, number of labels after modification differ from previous, observed {}, expected {} ".format(
+        new_len, old_len
+    )
+
+    logger.info(
+        "r1, vpnv4 table, check that prefixes that were using the vrf label have refreshed the label value to 222"
+    )
+    bgp_vpnv4_table_check(
+        router, group=["192.168.255.0/24", "192.0.2.0/24"], label_value_expected=222
+    )
+
+
+def test_unconfigure_allocation_mode_nexthop():
+    """
+    Test unconfiguring allocation mode per nexthop
+    Check that show mpls table has no entry with label 17 (previously used)
+    Check that all VPN updates on r1 should have label value moved to 222
+    Check that show mpls table will only have 222 label value
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    logger.info("Unconfiguring allocation mode per nexthop")
+    router = tgen.gears["r1"]
+    router.vtysh_cmd(
+        "configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv4 unicast\nno label vpn export allocation-mode per-nexthop\n",
+        isjson=False,
+    )
+
+    # Check r1 updated the MPLS entry with the 222 label value
+    logger.info(
+        "r1, mpls table, check that MPLS entry with inLabel set to 17 is not present"
+    )
+    test_func = functools.partial(
+        check_show_mpls_table_entry_label_not_found, router, 17
+    )
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert success, "r1, mpls entry with label 17 still present"
+
+    # Check vpnv4 routes from r1
+    logger.info("Checking vpnv4 routes on r1")
+    label_list = set()
+    bgp_vpnv4_table_check_all(router, label_list=label_list, same=True)
+    assert len(label_list) == 1, "r1, multiple Label values found for vpnv4 updates"
+
+    new_label = label_list.pop()
+    assert (
+        new_label == 222
+    ), "r1, wrong label value in VPNv4 table, expected 222, observed {}".format(
+        new_label
+    )
+
+    # Check mpls table with 222 value
+    logger.info("Checking MPLS values on show mpls table of r1")
+    label_list = set()
+    label_list.add(222)
+    mpls_table_check(router, label_list=label_list)
+
+
+def test_reconfigure_allocation_mode_nexthop():
+    """
+    Test re-configuring allocation mode per nexthop
+    Check that show mpls table has no entry with label 17
+    Check that all VPN updates on r1 should have multiple label values and not only 222
+    Check that show mpls table will have multiple label values and not only 222
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    logger.info("Reconfiguring allocation mode per nexthop")
+    router = tgen.gears["r1"]
+    router.vtysh_cmd(
+        "configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv4 unicast\nlabel vpn export allocation-mode per-nexthop\n",
+        isjson=False,
+    )
+
+    # Check that show mpls table has no entry with label 17
+    logger.info(
+        "r1, mpls table, check that MPLS entry with inLabel set to 17 is present"
+    )
+    test_func = functools.partial(
+        check_show_mpls_table_entry_label_not_found, router, 17
+    )
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert success, "r1, mpls entry with label 17 still present"
+
+    # Check vpnv4 routes from r1
+    logger.info("Checking vpnv4 routes on r1")
+    label_list = set()
+    bgp_vpnv4_table_check_all(router, label_list=label_list)
+    assert len(label_list) != 1, "r1, only 1 label values found for vpnv4 updates"
+
+    # Check mpls table with all values
+    logger.info("Checking MPLS values on show mpls table of r1")
+    mpls_table_check(router, label_list=label_list)
+
+
+def test_memory_leak():
+    "Run the memory leak test and report results."
+    tgen = get_topogen()
+    if not tgen.is_memleak_enabled():
+        pytest.skip("Memory leak test/report is disabled")
+
+    tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/__init__.py b/tests/topotests/bgp_vpnv6_per_nexthop_label/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/r1/bgp_ipv6_routes_vrf1.json b/tests/topotests/bgp_vpnv6_per_nexthop_label/r1/bgp_ipv6_routes_vrf1.json
new file mode 100644 (file)
index 0000000..39ba7dd
--- /dev/null
@@ -0,0 +1,186 @@
+{
+     "vrfName": "vrf1",
+     "localAS": 65500,
+     "routes":
+         {
+            "10:200::/64": [
+                {
+                    "valid": true,
+                    "bestpath": true,
+                    "prefix": "10:200::",
+                    "prefixLen": 64,
+                    "network": "10:200::/64",
+                    "nexthops": [
+                        {
+                            "ip": "192:168::2",
+                            "afi": "ipv6",
+                            "used": true
+                        }
+                    ]
+                }
+            ],
+             "172:31::11/128": [
+                 {
+                     "valid":true,
+                     "bestpath":true,
+                     "prefix":"172:31::11",
+                     "prefixLen":128,
+                     "network":"172:31::11/128",
+                     "peerId":"192:2::100",
+                     "nexthops":[
+                         {
+                             "ip":"192:2::11",
+                             "afi":"ipv6",
+                             "scope":"global"
+                         }
+                     ]
+                 }
+             ],
+             "172:31::12/128": [
+                 {
+                     "valid":true,
+                     "bestpath":true,
+                     "prefix":"172:31::12",
+                     "prefixLen":128,
+                     "network":"172:31::12/128",
+                     "peerId":"192:2::100",
+                     "nexthops":[
+                         {
+                             "ip":"192:2::12",
+                             "afi":"ipv6",
+                             "scope":"global",
+                             "used":true
+                         },
+                         {
+                             "scope": "link-local"
+                         }
+                     ]
+                 }
+             ],
+             "172:31::13/128": [
+                 {
+                     "valid":true,
+                     "bestpath":true,
+                     "prefix":"172:31::13",
+                     "prefixLen":128,
+                     "network":"172:31::13/128",
+                     "peerId":"192:168::255:13",
+                     "nexthops":[
+                         {
+                             "ip":"192:168::255:13",
+                             "afi":"ipv6",
+                             "scope": "global",
+                             "used":true
+                         },
+                         {
+                             "scope": "link-local"
+                         }
+                     ]
+                 }
+             ],
+             "172:31::14/128": [
+                 {
+                     "valid":true,
+                     "bestpath":true,
+                     "prefix":"172:31::14",
+                     "prefixLen":128,
+                     "network":"172:31::14/128",
+                     "peerId":"(unspec)",
+                     "nexthops":[
+                         {
+                             "ip":"192:2::14",
+                             "afi":"ipv6",
+                             "used":true
+                         }
+                     ]
+                 }
+             ],
+             "172:31::15/128": [
+                 {
+                     "valid":true,
+                     "bestpath":true,
+                     "prefix":"172:31::15",
+                     "prefixLen":128,
+                     "network":"172:31::15/128",
+                     "peerId":"(unspec)",
+                     "nexthops":[
+                         {
+                             "ip":"192:2::12",
+                             "afi":"ipv6",
+                             "used":true
+                         }
+                     ]
+                 }
+             ],
+             "172:31::20/128": [
+                 {
+                     "valid":true,
+                     "bestpath":true,
+                     "prefix":"172:31::20",
+                     "prefixLen":128,
+                     "network":"172:31::20/128",
+                     "peerId":"192:2::100",
+                     "nexthops":[
+                         {
+                             "ip":"192:2::11",
+                             "afi":"ipv6",
+                             "scope":"global",
+                             "used":true
+                         }
+                     ]
+                 }
+             ],
+             "172:31::111/128": [
+                 {
+                     "valid":true,
+                     "bestpath":true,
+                     "prefix":"172:31::111",
+                     "prefixLen":128,
+                     "network":"172:31::111/128",
+                     "peerId":"192:2::100",
+                     "nexthops":[
+                         {
+                             "ip":"192:2::11",
+                             "afi":"ipv6",
+                             "scope":"global",
+                             "used":true
+                         }
+                     ]
+                 }
+             ],
+             "192:2::/64": [
+                 {
+                     "valid":true,
+                     "bestpath":true,
+                     "prefix":"192:2::",
+                     "prefixLen":64,
+                     "network":"192:2::/64",
+                     "peerId":"(unspec)",
+                     "nexthops":[
+                         {
+                             "ip":"::",
+                             "afi":"ipv6",
+                             "used":true
+                         }
+                     ]
+                 }
+             ],
+             "192:168::255:0/112": [
+                 {
+                     "valid":true,
+                     "bestpath":true,
+                     "prefix":"192:168::255:0",
+                     "prefixLen":112,
+                     "network":"192:168::255:0/112",
+                     "peerId":"(unspec)",
+                     "nexthops":[
+                         {
+                             "ip":"::",
+                             "afi":"ipv6",
+                             "used":true
+                         }
+                     ]
+                 }
+             ]
+         }
+}
diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/r1/bgpd.conf b/tests/topotests/bgp_vpnv6_per_nexthop_label/r1/bgpd.conf
new file mode 100644 (file)
index 0000000..6bb0a88
--- /dev/null
@@ -0,0 +1,45 @@
+debug bgp vpn leak-from-vrf
+debug bgp vpn label
+debug bgp nht
+debug bgp updates out
+router bgp 65500
+ bgp router-id 192.168.0.1
+ no bgp ebgp-requires-policy
+ neighbor 192:168::2 remote-as 65501
+ address-family ipv4 unicast
+  no neighbor 192:168::2 activate
+ exit-address-family
+ address-family ipv6 vpn
+  neighbor 192:168::2 activate
+  neighbor 192:168::2 soft-reconfiguration inbound
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 192.168.0.1
+ neighbor 192:2::100 remote-as 65500
+ neighbor 192:168::255:13 remote-as 65500
+ address-family ipv6 unicast
+  neighbor 192:2::100 activate
+  neighbor 192:2::100 route-map rmap in
+  neighbor 192:168::255:13 activate
+  neighbor 192:168::255:13 route-map rmap in
+  redistribute connected
+  redistribute static route-map rmap
+  label vpn export allocation-mode per-nexthop
+  label vpn export auto
+  rd vpn export 444:1
+  rt vpn both 52:100
+  export vpn
+  import vpn
+ exit-address-family
+!
+interface r1-eth0
+ mpls bgp forwarding
+!
+bgp community-list 1 seq 5 permit 10:10
+!
+route-map rmap permit 1
+ set ipv6 next-hop prefer-global
+!
+route-map rmap permit 2
+!
diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/r1/zebra.conf b/tests/topotests/bgp_vpnv6_per_nexthop_label/r1/zebra.conf
new file mode 100644 (file)
index 0000000..bdad9ee
--- /dev/null
@@ -0,0 +1,18 @@
+log stdout
+debug zebra nht
+!debug zebra kernel msgdump recv
+!debug zebra dplane detailed
+!debug zebra packet recv
+interface r1-eth1 vrf vrf1
+ ipv6 address 192:2::1/64
+!
+interface r1-eth2 vrf vrf1
+ ipv6 address 192:168::255:1/112
+!
+interface r1-eth0
+ ip address 192:168::1/112
+!
+vrf vrf1
+ ipv6 route 172:31::14/128 192:2::14
+ ipv6 route 172:31::15/128 192:2::12
+exit-vrf
diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/r11/bgpd.conf b/tests/topotests/bgp_vpnv6_per_nexthop_label/r11/bgpd.conf
new file mode 100644 (file)
index 0000000..cb653d6
--- /dev/null
@@ -0,0 +1,15 @@
+router bgp 65500
+ bgp router-id 11.11.11.11
+ no bgp network import-check
+ neighbor 192:2::100 remote-as 65500
+ address-family ipv4 unicast
+  no neighbor 192:2::100 activate
+ !
+ address-family ipv6 unicast
+  neighbor 192:2::100 activate
+  network 172:31::11/128
+  network 172:31::111/128
+  network 172:31::20/128
+ exit-address-family
+!
+
diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/r11/zebra.conf b/tests/topotests/bgp_vpnv6_per_nexthop_label/r11/zebra.conf
new file mode 100644 (file)
index 0000000..a76080d
--- /dev/null
@@ -0,0 +1,4 @@
+log stdout
+interface r11-eth0
+ ipv6 address 192:2::11/64
+!
diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/r12/bgpd.conf b/tests/topotests/bgp_vpnv6_per_nexthop_label/r12/bgpd.conf
new file mode 100644 (file)
index 0000000..d41fb18
--- /dev/null
@@ -0,0 +1,13 @@
+router bgp 65500
+ bgp router-id 12.12.12.12
+ no bgp network import-check
+ neighbor 192:2::100 remote-as 65500
+ address-family ipv4 unicast
+  no neighbor 192:2::100 activate
+ !
+ address-family ipv6 unicast
+  neighbor 192:2::100 activate
+  network 172:31::12/128
+ exit-address-family
+!
+
diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/r12/zebra.conf b/tests/topotests/bgp_vpnv6_per_nexthop_label/r12/zebra.conf
new file mode 100644 (file)
index 0000000..df9cae4
--- /dev/null
@@ -0,0 +1,4 @@
+log stdout
+interface r12-eth0
+ ipv6 address 192:2::12/64
+!
diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/r13/bgpd.conf b/tests/topotests/bgp_vpnv6_per_nexthop_label/r13/bgpd.conf
new file mode 100644 (file)
index 0000000..0437882
--- /dev/null
@@ -0,0 +1,12 @@
+router bgp 65500
+ bgp router-id 13.13.13.13
+ no bgp network import-check
+ neighbor 192:168::255:1 remote-as 65500
+ address-family ipv4 unicast
+  no neighbor 192:168::255:1 activate
+ exit-address-family
+ address-family ipv6 unicast
+  neighbor 192:168::255:1 activate
+  network 172:31::0:13/128
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/r13/zebra.conf b/tests/topotests/bgp_vpnv6_per_nexthop_label/r13/zebra.conf
new file mode 100644 (file)
index 0000000..dfe5994
--- /dev/null
@@ -0,0 +1,4 @@
+log stdout
+interface r13-eth0
+ ipv6 address 192:168::255:13/112
+!
diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/r2/bgp_vpnv6_routes.json b/tests/topotests/bgp_vpnv6_per_nexthop_label/r2/bgp_vpnv6_routes.json
new file mode 100644 (file)
index 0000000..bb7d5c0
--- /dev/null
@@ -0,0 +1,187 @@
+{
+    "vrfName": "default",
+    "localAS": 65501,
+    "routes":
+    {
+        "routeDistinguishers":
+        {
+            "444:1":
+            {
+                "172:31::11/128": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "172:31::11",
+                        "prefixLen": 128,
+                        "network": "172:31::11/128",
+                        "peerId": "192:168::1",
+                        "nexthops": [
+                            {
+                                "ip": "192:168::1",
+                                "afi": "ipv6",
+                                "used": true
+                            }
+                        ]
+                    }
+                ],
+                "172:31::12/128": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "172:31::12",
+                        "prefixLen": 128,
+                        "network": "172:31::12/128",
+                        "peerId": "192:168::1",
+                        "nexthops": [
+                            {
+                                "ip": "192:168::1",
+                                "afi": "ipv6",
+                                "used": true
+                            }
+                        ]
+                    }
+                ],
+                "172:31::13/128": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "172:31::13",
+                        "prefixLen": 128,
+                        "network": "172:31::13/128",
+                        "peerId": "192:168::1",
+                        "nexthops": [
+                            {
+                                "ip": "192:168::1",
+                                "afi": "ipv6",
+                                "used": true
+                            }
+                        ]
+                    }
+                ],
+                "172:31::14/128": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "172:31::14",
+                        "prefixLen": 128,
+                        "network": "172:31::14/128",
+                        "peerId": "192:168::1",
+                        "nexthops": [
+                            {
+                                "ip": "192:168::1",
+                                "afi": "ipv6",
+                                "used": true
+                            }
+                        ]
+                    }
+                ],
+                "172:31::15/128": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "172:31::15",
+                        "prefixLen": 128,
+                        "network": "172:31::15/128",
+                        "peerId": "192:168::1",
+                        "nexthops": [
+                            {
+                                "ip": "192:168::1",
+                                "afi": "ipv6",
+                                "used": true
+                            }
+                        ]
+                    }
+                ],
+                "172:31::20/128": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "172:31::20",
+                        "prefixLen": 128,
+                        "network": "172:31::20/128",
+                        "peerId": "192:168::1",
+                        "nexthops": [
+                            {
+                                "ip": "192:168::1",
+                                "afi": "ipv6",
+                                "used": true
+                            }
+                        ]
+                    }
+                ],
+                "172:31::111/128": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "172:31::111",
+                        "prefixLen": 128,
+                        "network": "172:31::111/128",
+                        "peerId": "192:168::1",
+                        "nexthops": [
+                            {
+                                "ip": "192:168::1",
+                                "afi": "ipv6",
+                                "used": true
+                            }
+                        ]
+                    }
+                ],
+                "192:2::/64": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "192:2::",
+                        "prefixLen": 64,
+                        "network": "192:2::/64",
+                        "peerId": "192:168::1",
+                        "nexthops": [
+                            {
+                                "ip": "192:168::1",
+                                "afi": "ipv6",
+                                "used": true
+                            }
+                        ]
+                    }
+                ],
+                "192:168::255:0/112": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "192:168::255:0",
+                        "prefixLen": 112,
+                        "network": "192:168::255:0/112",
+                        "peerId": "192:168::1",
+                        "nexthops": [
+                            {
+                                "ip": "192:168::1",
+                                "afi": "ipv6",
+                                "used": true
+                            }
+                        ]
+                    }
+                ]
+            },
+            "444:2":
+            {
+                "10:200::/64": [
+                    {
+                        "valid": true,
+                        "bestpath": true,
+                        "prefix": "10:200::",
+                        "prefixLen": 64,
+                        "network": "10:200::/64",
+                        "peerId": "(unspec)",
+                        "nhVrfName": "vrf1",
+                        "nexthops": [
+                            {
+                                "ip": "::",
+                                "afi": "ipv6",
+                                "used": true
+                            }
+                        ]
+                    }
+                ]
+            }
+        }
+    }
+}
diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/r2/bgpd.conf b/tests/topotests/bgp_vpnv6_per_nexthop_label/r2/bgpd.conf
new file mode 100644 (file)
index 0000000..30e9959
--- /dev/null
@@ -0,0 +1,25 @@
+router bgp 65501
+ bgp router-id 192.168.0.2
+ no bgp ebgp-requires-policy
+ neighbor 192:168::1 remote-as 65500
+ address-family ipv4 unicast
+  no neighbor 192:168::1 activate
+ exit-address-family
+ address-family ipv6 vpn
+  neighbor 192:168::1 activate
+ exit-address-family
+!
+router bgp 65501 vrf vrf1
+ bgp router-id 192.168.0.2
+ address-family ipv6 unicast
+  redistribute connected
+  label vpn export 102
+  rd vpn export 444:2
+  rt vpn both 52:100
+  export vpn
+  import vpn
+ exit-address-family
+!
+interface r2-eth0
+ mpls bgp forwarding
+!
diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/r2/zebra.conf b/tests/topotests/bgp_vpnv6_per_nexthop_label/r2/zebra.conf
new file mode 100644 (file)
index 0000000..47cee95
--- /dev/null
@@ -0,0 +1,7 @@
+log stdout
+interface r2-eth1 vrf vrf1
+ ipv6 address 10:200::2/64
+!
+interface r2-eth0
+ ipv6 address 192:168::2/112
+!
diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/rr/bgpd.conf b/tests/topotests/bgp_vpnv6_per_nexthop_label/rr/bgpd.conf
new file mode 100644 (file)
index 0000000..8c7664b
--- /dev/null
@@ -0,0 +1,24 @@
+router bgp 65500
+ bgp router-id 100.100.100.100
+ no bgp network import-check
+ neighbor 192:2::1 remote-as 65500
+ neighbor 192:2::11 remote-as 65500
+ neighbor 192:2::12 remote-as 65500
+ address-family ipv4 unicast
+  no neighbor 192:2::1 activate
+  no neighbor 192:2::11 activate
+  no neighbor 192:2::12 activate
+ !
+ address-family ipv6 unicast
+  neighbor 192:2::1 activate
+  neighbor 192:2::1 route-reflector-client
+  neighbor 192:2::1 nexthop-local unchanged
+  neighbor 192:2::11 activate
+  neighbor 192:2::11 route-reflector-client
+  neighbor 192:2::11 nexthop-local unchanged
+  neighbor 192:2::12 activate
+  neighbor 192:2::12 route-reflector-client
+  neighbor 192:2::12 nexthop-local unchanged
+ exit-address-family
+!
+
diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/rr/zebra.conf b/tests/topotests/bgp_vpnv6_per_nexthop_label/rr/zebra.conf
new file mode 100644 (file)
index 0000000..94b82dc
--- /dev/null
@@ -0,0 +1,4 @@
+log stdout
+interface rr-eth0
+ ipv6 address 192:2::100/64
+!
diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/test_bgp_vpnv6_per_nexthop_label.py b/tests/topotests/bgp_vpnv6_per_nexthop_label/test_bgp_vpnv6_per_nexthop_label.py
new file mode 100644 (file)
index 0000000..719ba80
--- /dev/null
@@ -0,0 +1,798 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+#
+# test_bgp_vpnv6_per_nexthop_label.py
+#
+# Copyright 2023 6WIND S.A.
+#
+
+"""
+ test_bgp_vpnv6_per_nexthop_label.py: Test the FRR BGP daemon using EBGP peering
+ Let us exchange VPNv6 updates between both devices
+ Updates from r1 will originate from the same RD, but will have separate
+ label values.
+
+     +----------+
+     |   r11    |
+     |192::2:11 +---+
+     |          |   |                   +----+--------+              +----------+
+     +----------+   |         192::2::1 |vrf | r1     |192:168::/112 |    r2    |
+                    +-------------------+    |       1+--------------+          |
+     +----------+   |                   |VRF1|AS65500 |              | AS65501  |
+     |   r12    |   |    +--------------+    |   VPNV4|              |VPNV4     |
+     |192::2:12 +---+    |192:168::255:1+-+--+--------+              +----------+
+     |          |        |
+     +----------+        |
+                         |
+     +----------+        |
+     |   r13    |        |
+     |192:168:: +--------+
+     | 255:13   |
+     +----------+
+"""
+
+import os
+import sys
+import json
+from functools import partial
+import pytest
+import functools
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+
+pytestmark = [pytest.mark.bgpd]
+
+PREFIXES_R11 = ["172:31::11/128", "172:31::20/128", "172:31::111/128"]
+PREFIXES_R12 = ["172:31::12/128", "172:31::15/128"]
+PREFIXES_REDIST_R14 = ["172:31::14/128"]
+PREFIXES_CONNECTED = ["192:168::255/112", "192:2::/64"]
+
+
+def build_topo(tgen):
+    "Build function"
+
+    # Create 2 routers.
+    tgen.add_router("r1")
+    tgen.add_router("r2")
+    tgen.add_router("r11")
+    tgen.add_router("r12")
+    tgen.add_router("r13")
+    tgen.add_router("r14")
+    tgen.add_router("rr")
+
+    switch = tgen.add_switch("s1")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r2"])
+
+    switch = tgen.add_switch("s2")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r11"])
+    switch.add_link(tgen.gears["r12"])
+    switch.add_link(tgen.gears["rr"])
+
+    switch = tgen.add_switch("s3")
+    switch.add_link(tgen.gears["r2"])
+
+    switch = tgen.add_switch("s4")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r13"])
+
+    switch = tgen.add_switch("s5")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r14"])
+
+
+def _populate_iface():
+    tgen = get_topogen()
+    cmds_list = [
+        "ip link add vrf1 type vrf table 10",
+        "echo 100000 > /proc/sys/net/mpls/platform_labels",
+        "ip link set dev vrf1 up",
+        "ip link set dev {0}-eth1 master vrf1",
+        "echo 1 > /proc/sys/net/mpls/conf/{0}-eth0/input",
+    ]
+    cmds_list_plus = [
+        "ip link set dev {0}-eth2 master vrf1",
+    ]
+
+    for cmd in cmds_list:
+        input = cmd.format("r1")
+        logger.info("input: " + cmd)
+        output = tgen.net["r1"].cmd(cmd.format("r1"))
+        logger.info("output: " + output)
+
+    for cmd in cmds_list_plus:
+        input = cmd.format("r1")
+        logger.info("input: " + cmd)
+        output = tgen.net["r1"].cmd(cmd.format("r1"))
+        logger.info("output: " + output)
+
+    for cmd in cmds_list:
+        input = cmd.format("r2")
+        logger.info("input: " + cmd)
+        output = tgen.net["r2"].cmd(cmd.format("r2"))
+        logger.info("output: " + output)
+
+
+def setup_module(mod):
+    "Sets up the pytest environment"
+    tgen = Topogen(build_topo, mod.__name__)
+    tgen.start_topology()
+
+    router_list = tgen.routers()
+    _populate_iface()
+
+    for rname, router in router_list.items():
+        router.load_config(
+            TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+        )
+        router.load_config(
+            TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+        )
+
+    # Initialize all routers.
+    tgen.start_router()
+
+
+def teardown_module(_mod):
+    "Teardown the pytest environment"
+    tgen = get_topogen()
+
+    tgen.stop_topology()
+
+
+def bgp_vpnv6_table_check(router, group, label_list=None, label_value_expected=None):
+    """
+    Dump and check that vpnv6 entries have the same MPLS label value
+    * 'router': the router to check
+    * 'group': the list of prefixes to check. a single label value for the group has to be found
+    * 'label_list': check that the label values are not present in the vpnv6 entries
+    *              that list is updated with the present label value
+    * 'label_value_expected': check that the mpls label read is the same as that value
+    """
+
+    stored_label_inited = False
+    for prefix in group:
+        dump = router.vtysh_cmd("show bgp ipv6 vpn {} json".format(prefix), isjson=True)
+        for rd, pathes in dump.items():
+            for path in pathes["paths"]:
+                assert (
+                    "remoteLabel" in path.keys()
+                ), "{0}, {1}, remoteLabel not present".format(router.name, prefix)
+                logger.info(
+                    "{0}, {1}, label value is {2}".format(
+                        router.name, prefix, path["remoteLabel"]
+                    )
+                )
+                if stored_label_inited:
+                    assert (
+                        path["remoteLabel"] == stored_label
+                    ), "{0}, {1}, label value not expected one (expected {2}, observed {3}".format(
+                        router.name, prefix, stored_label, path["remoteLabel"]
+                    )
+                else:
+                    stored_label = path["remoteLabel"]
+                    stored_label_inited = True
+                    if label_list is not None:
+                        assert (
+                            stored_label not in label_list
+                        ), "{0}, {1}, label already detected in a previous prefix".format(
+                            router.name, prefix
+                        )
+                        label_list.add(stored_label)
+
+                if label_value_expected:
+                    assert (
+                        path["remoteLabel"] == label_value_expected
+                    ), "{0}, {1}, label value not expected (expected {2}, observed {3}".format(
+                        router.name, prefix, label_value_expected, path["remoteLabel"]
+                    )
+
+
+def bgp_vpnv6_table_check_all(router, label_list=None, same=False):
+    """
+    Dump and check that vpnv6 entries are correctly configured with specific label values
+    * 'router': the router to check
+    * 'label_list': check that the label values are not present in the vpnv6 entries
+    *              that list is updated with the present label value found.
+    * 'same': by default, set to False. Addresses groups are classified by addresses.
+    *         if set to True, all entries of all groups should have a unique label value
+    """
+    if same:
+        bgp_vpnv6_table_check(
+            router,
+            group=PREFIXES_R11
+            + PREFIXES_R12
+            + PREFIXES_REDIST_R14
+            + PREFIXES_CONNECTED,
+            label_list=label_list,
+        )
+    else:
+        for group in (
+            PREFIXES_R11,
+            PREFIXES_R12,
+            PREFIXES_REDIST_R14,
+            PREFIXES_CONNECTED,
+        ):
+            bgp_vpnv6_table_check(router, group=group, label_list=label_list)
+
+
+def mpls_table_check(router, blacklist=None, label_list=None, whitelist=None):
+    """
+    Dump and check 'show mpls table json' output. An assert is triggered in case test fails
+    * 'router': the router to check
+    * 'blacklist': the list of nexthops (IP or interface) that should not be on output
+    * 'label_list': the list of labels that should be in inLabel value
+    * 'whitelist': the list of nexthops (IP or interface) that should be on output
+    """
+    nexthop_list = []
+    if blacklist:
+        nexthop_list.append(blacklist)
+    logger.info("Checking MPLS labels on {}".format(router.name))
+    dump = router.vtysh_cmd("show mpls table json", isjson=True)
+    for in_label, label_info in dump.items():
+        if label_list is not None:
+            label_list.add(in_label)
+        for nh in label_info["nexthops"]:
+            assert (
+                nh["installed"] == True and nh["type"] == "BGP"
+            ), "{}, show mpls table, nexthop is not installed".format(router.name)
+            if "nexthop" in nh.keys():
+                assert (
+                    nh["nexthop"] not in nexthop_list
+                ), "{}, show mpls table, duplicated or blacklisted nexthop address".format(
+                    router.name
+                )
+                nexthop_list.append(nh["nexthop"])
+            elif "interface" in nh.keys():
+                assert (
+                    nh["interface"] not in nexthop_list
+                ), "{}, show mpls table, duplicated or blacklisted nexthop interface".format(
+                    router.name
+                )
+                nexthop_list.append(nh["interface"])
+            else:
+                assert (
+                    0
+                ), "{}, show mpls table, entry with neither nexthop nor interface".format(
+                    router.name
+                )
+
+    if whitelist:
+        for entry in whitelist:
+            assert (
+                entry in nexthop_list
+            ), "{}, show mpls table, entry with nexthop {} not present in nexthop list".format(
+                router.name, entry
+            )
+
+
+def check_show_bgp_vpn_prefix_not_found(router, ipversion, prefix, rd, label=None):
+    output = json.loads(
+        router.vtysh_cmd("show bgp {} vpn {} json".format(ipversion, prefix))
+    )
+    if label:
+        expected = {rd: {"prefix": prefix, "paths": [{"remoteLabel": label}]}}
+    else:
+        expected = {rd: {"prefix": prefix}}
+    ret = topotest.json_cmp(output, expected)
+    if ret is None:
+        return "not good"
+    return None
+
+
+def check_show_bgp_vpn_prefix_found(router, ipversion, prefix, rd):
+    output = json.loads(
+        router.vtysh_cmd("show bgp {} vpn {} json".format(ipversion, prefix))
+    )
+    expected = {rd: {"prefix": prefix}}
+    return topotest.json_cmp(output, expected)
+
+
+def check_show_mpls_table_entry_label_found(router, inlabel, interface):
+    output = json.loads(router.vtysh_cmd("show mpls table {} json".format(inlabel)))
+    expected = {
+        "inLabel": inlabel,
+        "installed": True,
+        "nexthops": [{"interface": interface}],
+    }
+    return topotest.json_cmp(output, expected)
+
+
+def check_show_mpls_table_entry_label_not_found(router, inlabel):
+    output = json.loads(router.vtysh_cmd("show mpls table {} json".format(inlabel)))
+    expected = {"inlabel": inlabel, "installed": True}
+    ret = topotest.json_cmp(output, expected)
+    if ret is None:
+        return "not good"
+    return None
+
+
+def mpls_entry_get_interface(router, label):
+    """
+    Assert that the label is in MPLS table
+    Assert an outgoing interface is programmed
+    return the outgoing interface
+    """
+    outgoing_interface = None
+
+    logger.info("Checking MPLS labels on {}".format(router.name))
+    dump = router.vtysh_cmd("show mpls table {} json".format(label), isjson=True)
+    assert dump, "{}, show mpls table, inLabel {} not found".format(router.name, label)
+
+    for nh in dump["nexthops"]:
+        assert (
+            "interface" in nh.keys()
+        ), "{}, show mpls table, nexthop interface not present for MPLS entry {}".format(
+            router.name, label
+        )
+
+        outgoing_interface = nh["interface"]
+
+    return outgoing_interface
+
+
+def test_protocols_convergence():
+    """
+    Assert that all protocols have converged
+    statuses as they depend on it.
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    # Check BGP IPv6 routing tables on VRF1 of r1
+    logger.info("Checking BGP IPv6 routes for convergence on r1 VRF1")
+    router = tgen.gears["r1"]
+    json_file = "{}/{}/bgp_ipv6_routes_vrf1.json".format(CWD, router.name)
+
+    expected = json.loads(open(json_file).read())
+    test_func = partial(
+        topotest.router_json_cmp,
+        router,
+        "show bgp vrf vrf1 ipv6 json",
+        expected,
+    )
+    _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
+    assertmsg = '"{}" JSON output mismatches'.format(router.name)
+    assert result is None, assertmsg
+
+    logger.info("Checking BGP VPNv6 routes for convergence on r2")
+    router = tgen.gears["r2"]
+    json_file = "{}/{}/bgp_vpnv6_routes.json".format(CWD, router.name)
+    expected = json.loads(open(json_file).read())
+    test_func = partial(
+        topotest.router_json_cmp,
+        router,
+        "show bgp ipv6 vpn json",
+        expected,
+    )
+    _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assertmsg = '"{}" JSON output mismatches'.format(router.name)
+    assert result is None, assertmsg
+
+    # Check BGP labels received on r2
+    logger.info("Checking BGP VPNv6 labels on r2")
+    label_list = set()
+    bgp_vpnv6_table_check_all(tgen.gears["r2"], label_list)
+
+    # Check MPLS labels received on r1
+    mpls_table_check(tgen.gears["r1"], label_list)
+
+
+def test_flapping_bgp_vrf_down():
+    """
+    Turn down a remote BGP session
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+    logger.info("Unpeering BGP on r11")
+    tgen.gears["r11"].vtysh_cmd(
+        "configure terminal\nrouter bgp 65500\nno neighbor 192:2::100\n",
+        isjson=False,
+    )
+
+    def _bgp_prefix_not_found(router, vrf, ipversion, prefix):
+        output = json.loads(
+            router.vtysh_cmd(
+                "show bgp vrf {} {} {} json".format(vrf, ipversion, prefix)
+            )
+        )
+        expected = {"prefix": prefix}
+        ret = topotest.json_cmp(output, expected)
+        if ret is None:
+            return "not good"
+        return None
+
+    # Check prefix from r11 is not present
+    test_func = functools.partial(
+        _bgp_prefix_not_found, tgen.gears["r1"], "vrf1", "ipv6", "172:31::11/128"
+    )
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert (
+        success
+    ), "r1, prefix 172:31::11/128 from r11 did not disappear. r11 still connected to rr ?"
+
+    # Check BGP updated received on r2 are not from r11
+    logger.info("Checking BGP VPNv6 labels on r2")
+    for entry in PREFIXES_R11:
+        dump = tgen.gears["r2"].vtysh_cmd(
+            "show bgp ipv6 vpn {} json".format(entry), isjson=True
+        )
+        for rd in dump:
+            assert False, "r2, {}, route distinguisher {} present".format(entry, rd)
+
+    mpls_table_check(tgen.gears["r1"], blacklist=["192:2::11"])
+
+
+def test_flapping_bgp_vrf_up():
+    """
+    Turn up a remote BGP session
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+    logger.info("Peering BGP on r11")
+    tgen.gears["r11"].vtysh_cmd(
+        "configure terminal\nrouter bgp 65500\nneighbor 192:2::100 remote-as 65500\n",
+        isjson=False,
+    )
+    tgen.gears["r11"].vtysh_cmd(
+        "configure terminal\nrouter bgp 65500\naddress-family ipv6 unicast\nneighbor 192:2::100 activate\n",
+        isjson=False,
+    )
+
+    # Check r2 gets prefix 172:31::11/128
+    test_func = functools.partial(
+        check_show_bgp_vpn_prefix_found,
+        tgen.gears["r2"],
+        "ipv6",
+        "172:31::11/128",
+        "444:1",
+    )
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert (
+        success
+    ), "r2, prefix 172:31::11/128 from r11 not present. r11 still disconnected from rr ?"
+    bgp_vpnv6_table_check_all(tgen.gears["r2"])
+
+
+def test_recursive_route():
+    """
+    Test static recursive route redistributed over BGP
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    logger.info("Enabling recursive static route")
+    tgen.gears["r1"].vtysh_cmd(
+        "configure terminal\nvrf vrf1\nipv6 route 172:31::30/128 172:31::20\n",
+        isjson=False,
+    )
+    logger.info("Checking BGP VPNv6 labels on r2")
+    # that route should be sent along with label for 192.0.2.11
+
+    def _prefix30_not_found(router):
+        output = json.loads(router.vtysh_cmd("show bgp ipv6 vpn 172:31::30/128 json"))
+        expected = {"444:1": {"prefix": "172:31::30/128"}}
+        ret = topotest.json_cmp(output, expected)
+        if ret is None:
+            return "not good"
+        return None
+
+    def _prefix30_found(router):
+        output = json.loads(router.vtysh_cmd("show bgp ipv6 vpn 172:31::30/128 json"))
+        expected = {"444:1": {"prefix": "172:31::30/128"}}
+        return topotest.json_cmp(output, expected)
+
+    # Check r2 received vpnv6 update with 172:31::30
+    test_func = functools.partial(_prefix30_found, tgen.gears["r2"])
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert success, "r2, VPNv6 update 172:31::30 not found"
+
+    # that route should be sent along with label for 192::2:11
+    bgp_vpnv6_table_check(
+        tgen.gears["r2"],
+        group=PREFIXES_R11 + ["172:31::30/128"],
+    )
+
+    # diagnostic
+    logger.info("Dumping label nexthop table")
+    tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
+    logger.info("Dumping nexthop table")
+    tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 nexthop detail", isjson=False)
+
+    logger.info("Disabling recursive static route")
+    tgen.gears["r1"].vtysh_cmd(
+        "configure terminal\nvrf vrf1\nno ipv6 route 172:31::30/128 172:31::20\n",
+        isjson=False,
+    )
+
+    # Check r2 removed 172:31::30 vpnv6 update
+    test_func = functools.partial(_prefix30_not_found, tgen.gears["r2"])
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert success, "r2, VPNv6 update 172:31::30 still present"
+
+
+def test_prefix_changes_interface():
+    """
+    Test BGP update for a given prefix learnt on different interface
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    logger.info("Enabling a 172:31::50/128 prefix for r11")
+    tgen.gears["r11"].vtysh_cmd(
+        "configure terminal\nrouter bgp\naddress-family ipv6 unicast\nnetwork 172:31::50/128",
+        isjson=False,
+    )
+
+    # Check r2 received vpnv6 update with 172:31::50
+    test_func = functools.partial(
+        check_show_bgp_vpn_prefix_found,
+        tgen.gears["r2"],
+        "ipv6",
+        "172:31::50/128",
+        "444:1",
+    )
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert success, "r2, VPNv6 update 172:31::50 not found"
+
+    # diagnostic
+    logger.info("Dumping label nexthop table")
+    tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
+
+    label_list = set()
+    bgp_vpnv6_table_check(
+        tgen.gears["r2"],
+        group=PREFIXES_R11 + ["172:31::50/128"],
+        label_list=label_list,
+    )
+
+    assert (
+        len(label_list) == 1
+    ), "Multiple Label values found for updates from r11 found"
+
+    oldlabel = label_list.pop()
+    logger.info("r1, getting the outgoing interface used by label {}".format(oldlabel))
+    old_outgoing_interface = mpls_entry_get_interface(tgen.gears["r1"], oldlabel)
+    logger.info(
+        "r1, outgoing interface used by label {} is {}".format(
+            oldlabel, old_outgoing_interface
+        )
+    )
+
+    logger.info("Moving the 172:31::50/128 prefix from r11 to r13")
+    tgen.gears["r11"].vtysh_cmd(
+        "configure terminal\nrouter bgp\naddress-family ipv6 unicast\nno network 172:31::50/128",
+        isjson=False,
+    )
+    tgen.gears["r13"].vtysh_cmd(
+        "configure terminal\nrouter bgp\naddress-family ipv6 unicast\nnetwork 172:31::50/128",
+        isjson=False,
+    )
+
+    # Check r2 removed 172:31::50 vpnv6 update with old label
+    test_func = functools.partial(
+        check_show_bgp_vpn_prefix_not_found,
+        tgen.gears["r2"],
+        "ipv6",
+        "172:31::50/128",
+        "444:1",
+        label=oldlabel,
+    )
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert (
+        success
+    ), "r2, vpnv6 update 172:31::50 with old label {0} still present".format(oldlabel)
+
+    # diagnostic
+    logger.info("Dumping label nexthop table")
+    tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
+
+    # Check r2 received new 172:31::50 vpnv6 update
+    test_func = functools.partial(
+        check_show_bgp_vpn_prefix_found,
+        tgen.gears["r2"],
+        "ipv6",
+        "172:31::50/128",
+        "444:1",
+    )
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert success, "r2, vpnv6 update 172:31::50 not found"
+
+    label_list = set()
+    bgp_vpnv6_table_check(
+        tgen.gears["r2"],
+        group=["172:31::13/128", "172:31::50/128"],
+        label_list=label_list,
+    )
+    assert (
+        len(label_list) == 1
+    ), "Multiple Label values found for updates from r13 found"
+
+    newlabel = label_list.pop()
+    logger.info("r1, getting the outgoing interface used by label {}".format(newlabel))
+    new_outgoing_interface = mpls_entry_get_interface(tgen.gears["r1"], newlabel)
+    logger.info(
+        "r1, outgoing interface used by label {} is {}".format(
+            newlabel, new_outgoing_interface
+        )
+    )
+    if old_outgoing_interface == new_outgoing_interface:
+        assert 0, "r1, outgoing interface did not change whereas BGP update moved"
+
+    logger.info("Restoring state by removing the 172:31::50/128 prefix from r13")
+    tgen.gears["r13"].vtysh_cmd(
+        "configure terminal\nrouter bgp\naddress-family ipv6 unicast\nno network 172:31::50/128",
+        isjson=False,
+    )
+
+
+def test_changing_default_label_value():
+    """
+    Change the MPLS default value
+    Check that r1 VPNv6 entries have the 222 label value
+    Check that MPLS entry with old label value is no more present
+    Check that MPLS entry for local traffic has inLabel set to 222
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router = tgen.gears["r1"]
+
+    # counting the number of labels used in the VPNv6 table
+    label_list = set()
+    logger.info("r1, VPNv6 table, check the number of labels used before modification")
+    bgp_vpnv6_table_check_all(router, label_list)
+    old_len = len(label_list)
+    assert (
+        old_len != 1
+    ), "r1, number of labels used should be greater than 1, oberved {} ".format(old_len)
+
+    logger.info("r1, vrf1, changing the default MPLS label value to export to 222")
+    router.vtysh_cmd(
+        "configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv6 unicast\nlabel vpn export 222\n",
+        isjson=False,
+    )
+
+    # Check r1 updated the MPLS entry with the 222 label value
+    logger.info(
+        "r1, mpls table, check that MPLS entry with inLabel set to 222 has vrf1 interface"
+    )
+    test_func = functools.partial(
+        check_show_mpls_table_entry_label_found, router, 222, "vrf1"
+    )
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert success, "r1, mpls entry with label 222 not found"
+
+    # check label repartition is ok
+    logger.info("r1, VPNv6 table, check the number of labels used after modification")
+    label_list = set()
+    bgp_vpnv6_table_check_all(router, label_list)
+    new_len = len(label_list)
+    assert (
+        old_len == new_len
+    ), "r1, number of labels after modification differ from previous, observed {}, expected {} ".format(
+        new_len, old_len
+    )
+
+    logger.info(
+        "r1, VPNv6 table, check that prefixes that were using the vrf label have refreshed the label value to 222"
+    )
+    bgp_vpnv6_table_check(router, group=PREFIXES_CONNECTED, label_value_expected=222)
+
+
+def test_unconfigure_allocation_mode_nexthop():
+    """
+    Test unconfiguring allocation mode per nexthop
+    Check on r2 that new MPLS label values have been propagated
+    Check that show mpls table has no entry with label 17 (previously used)
+    Check that all VPN updates on r1 should have label value moved to 222
+    Check that show mpls table will only have 222 label value
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    logger.info("Unconfiguring allocation mode per nexthop")
+    router = tgen.gears["r1"]
+    dump = router.vtysh_cmd(
+        "configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv6 unicast\nno label vpn export allocation-mode per-nexthop\n",
+        isjson=False,
+    )
+
+    # Check r1 updated the MPLS entry with the 222 label value
+    logger.info(
+        "r1, mpls table, check that MPLS entry with inLabel set to 17 is not present"
+    )
+    test_func = functools.partial(
+        check_show_mpls_table_entry_label_not_found, router, 17
+    )
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert success, "r1, mpls entry with label 17 still present"
+
+    # Check vpnv6 routes from r1
+    logger.info("Checking VPNv6 routes on r1")
+    label_list = set()
+    bgp_vpnv6_table_check_all(router, label_list=label_list, same=True)
+    assert len(label_list) == 1, "r1, multiple Label values found for VPNv6 updates"
+
+    new_label = label_list.pop()
+    assert (
+        new_label == 222
+    ), "r1, wrong label value in VPNv6 table, expected 222, observed {}".format(
+        new_label
+    )
+
+    # Check mpls table with 222 value
+    logger.info("Checking MPLS values on show mpls table of r1")
+    label_list = set()
+    label_list.add(222)
+    mpls_table_check(router, label_list=label_list)
+
+
+def test_reconfigure_allocation_mode_nexthop():
+    """
+    Test re-configuring allocation mode per nexthop
+    Check that show mpls table has no entry with label 17
+    Check that all VPN updates on r1 should have multiple label values and not only 222
+    Check that show mpls table will have multiple label values and not only 222
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    logger.info("Reconfiguring allocation mode per nexthop")
+    router = tgen.gears["r1"]
+    dump = router.vtysh_cmd(
+        "configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv6 unicast\nlabel vpn export allocation-mode per-nexthop\n",
+        isjson=False,
+    )
+
+    # Check that show mpls table has no entry with label 17
+    logger.info(
+        "r1, mpls table, check that MPLS entry with inLabel set to 17 is present"
+    )
+    test_func = functools.partial(
+        check_show_mpls_table_entry_label_not_found, router, 17
+    )
+    success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assert success, "r1, mpls entry with label 17 still present"
+
+    # Check vpnv6 routes from r1
+    logger.info("Checking VPNv6 routes on r1")
+    label_list = set()
+    bgp_vpnv6_table_check_all(router, label_list=label_list)
+    assert len(label_list) != 1, "r1, only 1 label values found for VPNv6 updates"
+
+    # Check mpls table with all values
+    logger.info("Checking MPLS values on show mpls table of r1")
+    mpls_table_check(router, label_list=label_list)
+
+
+def test_memory_leak():
+    "Run the memory leak test and report results."
+    tgen = get_topogen()
+    if not tgen.is_memleak_enabled():
+        pytest.skip("Memory leak test/report is disabled")
+
+    tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
index 74e308dbc62fee71ed86eee0a577e25505a73cfb..b78a2f1052d160fdd563580ba469911f1fd19d1a 100755 (executable)
@@ -86,6 +86,12 @@ def pytest_addoption(parser):
         ),
     )
 
+    parser.addoption(
+        "--memleaks",
+        action="store_true",
+        help="Report memstat results as errors",
+    )
+
     parser.addoption(
         "--pause",
         action="store_true",
@@ -189,7 +195,7 @@ def pytest_addoption(parser):
     )
 
 
-def check_for_memleaks():
+def check_for_valgrind_memleaks():
     assert topotest.g_pytest_config.option.valgrind_memleaks
 
     leaks = []
@@ -232,10 +238,46 @@ def check_for_memleaks():
         pytest.fail("valgrind memleaks found for daemons: " + " ".join(daemons))
 
 
+def check_for_memleaks():
+    leaks = []
+    tgen = get_topogen()  # pylint: disable=redefined-outer-name
+    latest = []
+    existing = []
+    if tgen is not None:
+        logdir = tgen.logdir
+        if hasattr(tgen, "memstat_existing_files"):
+            existing = tgen.memstat_existing_files
+        latest = glob.glob(os.path.join(logdir, "*/*.err"))
+
+    daemons = []
+    for vfile in latest:
+        if vfile in existing:
+            continue
+        with open(vfile, encoding="ascii") as vf:
+            vfcontent = vf.read()
+            num = vfcontent.count("memstats:")
+            if num:
+                existing.append(vfile)  # have summary don't check again
+                emsg = "{} types in {}".format(num, vfile)
+                leaks.append(emsg)
+                daemon = re.match(r".*test[a-z_A-Z0-9\+]*/(.*)\.err", vfile).group(1)
+                daemons.append("{}({})".format(daemon, num))
+
+    if tgen is not None:
+        tgen.memstat_existing_files = existing
+
+    if leaks:
+        logger.error("memleaks found:\n\t%s", "\n\t".join(leaks))
+        pytest.fail("memleaks found for daemons: " + " ".join(daemons))
+
+
 @pytest.fixture(autouse=True, scope="module")
 def module_check_memtest(request):
     yield
     if request.config.option.valgrind_memleaks:
+        if get_topogen() is not None:
+            check_for_valgrind_memleaks()
+    if request.config.option.memleaks:
         if get_topogen() is not None:
             check_for_memleaks()
 
@@ -264,6 +306,8 @@ def pytest_runtest_call(item: pytest.Item) -> None:
 
     # Check for leaks if requested
     if item.config.option.valgrind_memleaks:
+        check_for_valgrind_memleaks()
+    if item.config.option.memleaks:
         check_for_memleaks()
 
 
@@ -370,10 +414,22 @@ def pytest_configure(config):
     if config.option.topology_only and is_xdist:
         pytest.exit("Cannot use --topology-only with distributed test mode")
 
+        pytest.exit("Cannot use --topology-only with distributed test mode")
+
     # Check environment now that we have config
     if not diagnose_env(rundir):
         pytest.exit("environment has errors, please read the logs in %s" % rundir)
 
+    # slave TOPOTESTS_CHECK_MEMLEAK to memleaks flag
+    if config.option.memleaks:
+        if "TOPOTESTS_CHECK_MEMLEAK" not in os.environ:
+            os.environ["TOPOTESTS_CHECK_MEMLEAK"] = "/dev/null"
+    else:
+        if "TOPOTESTS_CHECK_MEMLEAK" in os.environ:
+            del os.environ["TOPOTESTS_CHECK_MEMLEAK"]
+        if "TOPOTESTS_CHECK_STDERR" in os.environ:
+            del os.environ["TOPOTESTS_CHECK_STDERR"]
+
 
 @pytest.fixture(autouse=True, scope="session")
 def setup_session_auto():
index 5ff4b096fd471fcbc6ac596f00de3a5aadaa077f..7f37cedb2bd7655b03c804f2c7fd2095c5b9cd07 100644 (file)
@@ -8,7 +8,6 @@
     "192.168.1.2":{
       "remoteAs":65002,
       "version":4,
-      "tableVersion":0,
       "outq":0,
       "inq":0,
       "pfxRcd":3,
@@ -21,7 +20,6 @@
     "192.168.2.3":{
       "remoteAs":65003,
       "version":4,
-      "tableVersion":0,
       "outq":0,
       "inq":0,
       "pfxRcd":3,
index a0b12b2d6425b138ccaee1e404e23c9a0548f777..c58da996d7bf88b21932c7e988c11fec19437216 100644 (file)
@@ -164,7 +164,6 @@ def create_router_bgp(tgen, topo=None, input_dict=None, build=False, load_config
                         router,
                     )
                 else:
-
                     ipv4_data = bgp_addr_data.setdefault("ipv4", {})
                     ipv6_data = bgp_addr_data.setdefault("ipv6", {})
                     l2vpn_data = bgp_addr_data.setdefault("l2vpn", {})
@@ -645,7 +644,6 @@ def __create_l2vpn_evpn_address_family(
 
         if advertise_data:
             for address_type, unicast_type in advertise_data.items():
-
                 if type(unicast_type) is dict:
                     for key, value in unicast_type.items():
                         cmd = "advertise {} {}".format(address_type, key)
@@ -1651,7 +1649,6 @@ def modify_as_number(tgen, topo, input_dict):
 
     logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
     try:
-
         new_topo = deepcopy(topo["routers"])
         router_dict = {}
         for router in input_dict.keys():
@@ -1953,7 +1950,6 @@ def clear_bgp_and_verify(tgen, topo, router, rid=None):
 
         total_peer = 0
         for addr_type in bgp_addr_type.keys():
-
             if not check_address_types(addr_type):
                 continue
 
@@ -2022,7 +2018,6 @@ def clear_bgp_and_verify(tgen, topo, router, rid=None):
     peer_uptime_after_clear_bgp = {}
     # Verifying BGP convergence after bgp clear command
     for retry in range(50):
-
         # Waiting for BGP to converge
         logger.info(
             "Waiting for %s sec for BGP to converge on router" " %s...",
@@ -3278,7 +3273,6 @@ def verify_graceful_restart(
 
             if lmode is None:
                 if "graceful-restart" in input_dict[dut]["bgp"]:
-
                     if (
                         "graceful-restart" in input_dict[dut]["bgp"]["graceful-restart"]
                         and input_dict[dut]["bgp"]["graceful-restart"][
@@ -3326,7 +3320,6 @@ def verify_graceful_restart(
 
             if rmode is None:
                 if "graceful-restart" in input_dict[peer]["bgp"]:
-
                     if (
                         "graceful-restart"
                         in input_dict[peer]["bgp"]["graceful-restart"]
@@ -3628,7 +3621,6 @@ def verify_eor(tgen, topo, addr_type, input_dict, dut, peer, expected=True):
 
             eor_json = show_bgp_graceful_json_out[afi]["endOfRibStatus"]
             if "endOfRibSend" in eor_json:
-
                 if eor_json["endOfRibSend"]:
                     logger.info(
                         "[DUT: %s]: EOR Send true for %s " "%s", dut, neighbor_ip, afi
@@ -3918,7 +3910,6 @@ def verify_graceful_restart_timers(tgen, topo, addr_type, input_dict, dut, peer)
                         "timer"
                     ].items():
                         if rs_timer == "restart-time":
-
                             receivedTimer = value
                             if (
                                 show_bgp_graceful_json_out["timers"][
@@ -4777,7 +4768,6 @@ def get_prefix_count_route(
     tgen = get_topogen()
     for router, rnode in tgen.routers().items():
         if router == dut:
-
             if vrf:
                 ipv4_cmd = "sh ip bgp vrf {} summary json".format(vrf)
                 show_bgp_json_ipv4 = run_frr_cmd(rnode, ipv4_cmd, isjson=True)
@@ -4871,7 +4861,6 @@ def verify_rib_default_route(
     connected_routes = {}
     for router, rnode in tgen.routers().items():
         if router == dut:
-
             ipv4_routes = run_frr_cmd(rnode, "sh ip bgp json", isjson=True)
             ipv6_routes = run_frr_cmd(rnode, "sh ip bgp ipv6 unicast json", isjson=True)
     is_ipv4_default_attrib_found = False
index cd449cb50c727beb2c8deaa1de9d7879df8e32b2..699c7a4da6125156d91c25a6040b1a3ce7a084bb 100644 (file)
@@ -25,6 +25,7 @@ from lib.lutil import luCommand, luResult, LUtil
 import json
 import re
 
+
 # gpz: get rib in json form and compare against desired routes
 class BgpRib:
     def log(self, str):
index d19d8db75cbddf81b2356352389c9d9d4d65e15c..67afe8739f12d4d1b0a9187ab919a82c6c768851 100644 (file)
@@ -243,7 +243,6 @@ def run_frr_cmd(rnode, cmd, isjson=False):
 
 
 def apply_raw_config(tgen, input_dict):
-
     """
     API to configure raw configuration on device. This can be used for any cli
     which has not been implemented in JSON.
@@ -447,7 +446,6 @@ def check_router_status(tgen):
     try:
         router_list = tgen.routers()
         for router, rnode in router_list.items():
-
             result = rnode.check_router_running()
             if result != "":
                 daemons = []
@@ -1914,7 +1912,6 @@ def interface_status(tgen, topo, input_dict):
         rlist = []
 
         for router in input_dict.keys():
-
             interface_list = input_dict[router]["interface_list"]
             status = input_dict[router].setdefault("status", "up")
             for intf in interface_list:
@@ -2529,7 +2526,6 @@ def create_route_maps(tgen, input_dict, build=False):
                 continue
             rmap_data = []
             for rmap_name, rmap_value in input_dict[router]["route_maps"].items():
-
                 for rmap_dict in rmap_value:
                     del_action = rmap_dict.setdefault("delete", False)
 
@@ -3002,7 +2998,6 @@ def addKernelRoute(
         group_addr_range = [group_addr_range]
 
     for grp_addr in group_addr_range:
-
         addr_type = validate_ip_address(grp_addr)
         if addr_type == "ipv4":
             if next_hop is not None:
@@ -3193,7 +3188,6 @@ def configure_brctl(tgen, topo, input_dict):
 
         if "brctl" in input_dict[dut]:
             for brctl_dict in input_dict[dut]["brctl"]:
-
                 brctl_names = brctl_dict.setdefault("brctl_name", [])
                 addvxlans = brctl_dict.setdefault("addvxlan", [])
                 stp_values = brctl_dict.setdefault("stp", [])
@@ -3203,7 +3197,6 @@ def configure_brctl(tgen, topo, input_dict):
                 for brctl_name, vxlan, vrf, stp in zip(
                     brctl_names, addvxlans, vrfs, stp_values
                 ):
-
                     ip_cmd_list = []
                     cmd = "ip link add name {} type bridge stp_state {}".format(
                         brctl_name, stp
@@ -3614,7 +3607,6 @@ def verify_rib(
 
                 for static_route in static_routes:
                     if "vrf" in static_route and static_route["vrf"] is not None:
-
                         logger.info(
                             "[DUT: {}]: Verifying routes for VRF:"
                             " {}".format(router, static_route["vrf"])
@@ -4053,7 +4045,6 @@ def verify_fib_routes(tgen, addr_type, dut, input_dict, next_hop=None, protocol=
 
                 for static_route in static_routes:
                     if "vrf" in static_route and static_route["vrf"] is not None:
-
                         logger.info(
                             "[DUT: {}]: Verifying routes for VRF:"
                             " {}".format(router, static_route["vrf"])
index 9aef41cd1cd7b5061a027f24c13407ea11dbd5cc..1eb88f26d4c5108cbfb8749713a3c613e65e109b 100644 (file)
@@ -177,7 +177,17 @@ Total %-4d                                                           %-4d %d\n\
             self.log("unable to read: " + tstFile)
             sys.exit(1)
 
-    def command(self, target, command, regexp, op, result, returnJson, startt=None, force_result=False):
+    def command(
+        self,
+        target,
+        command,
+        regexp,
+        op,
+        result,
+        returnJson,
+        startt=None,
+        force_result=False,
+    ):
         global net
         if op == "jsoncmp_pass" or op == "jsoncmp_fail":
             returnJson = True
@@ -326,7 +336,9 @@ Total %-4d                                                           %-4d %d\n\
             if strict and (wait_count == 1):
                 force_result = True
 
-            found = self.command(target, command, regexp, op, result, returnJson, startt, force_result)
+            found = self.command(
+                target, command, regexp, op, result, returnJson, startt, force_result
+            )
             if found is not False:
                 break
 
@@ -342,6 +354,7 @@ Total %-4d                                                           %-4d %d\n\
 # initialized by luStart
 LUtil = None
 
+
 # entry calls
 def luStart(
     baseScriptDir=".",
@@ -455,6 +468,7 @@ def luShowFail():
     if printed > 0:
         logger.error("See %s for details of errors" % LUtil.fout_name)
 
+
 #
 # Sets default wait type for luCommand(op="wait) (may be overridden by
 # specifying luCommand(op="wait-strict") or luCommand(op="wait-nostrict")).
index c5c2adc545bdfa03c7058849099c24534f030571..d648a120ab7667d25fe60463d0dfe391b5000c1f 100644 (file)
@@ -273,7 +273,7 @@ ff02::2\tip6-allrouters
 
         shellopt = self.cfgopt.get_option_list("--shell")
         if "all" in shellopt or "." in shellopt:
-            self.run_in_window("bash")
+            self.run_in_window("bash", title="munet")
 
         # This is expected by newer munet CLI code
         self.config_dirname = ""
index dad87440bc65b6a192e31d93f0043547cd5791d9..ffe81fbd99cade52d8c6d6a36aca58a8cd5ac582 100644 (file)
@@ -302,7 +302,6 @@ def __create_ospf_global(tgen, input_dict, router, build, load_config, ospf):
     # ospf gr information
     gr_data = ospf_data.setdefault("graceful-restart", {})
     if gr_data:
-
         if "opaque" in gr_data and gr_data["opaque"]:
             cmd = "capability opaque"
             if gr_data.setdefault("delete", False):
@@ -710,6 +709,7 @@ def verify_ospf_neighbor(
                 else:
                     data_ip = topo["routers"][ospf_nbr]["links"]
                     data_rid = topo["routers"][ospf_nbr]["ospf"]["router_id"]
+                logger.info("ospf neighbor %s:   router-id: %s", router, data_rid)
                 if ospf_nbr in data_ip:
                     nbr_details = nbr_data[ospf_nbr]
                 elif lan:
@@ -728,8 +728,10 @@ def verify_ospf_neighbor(
                 try:
                     nh_state = show_ospf_json[nbr_rid][0]["nbrState"].split("/")[0]
                 except KeyError:
-                    errormsg = "[DUT: {}] OSPF peer {} missing,from " "{} ".format(
-                        router, nbr_rid, ospf_nbr
+                    errormsg = (
+                        "[DUT: {}] missing OSPF neighbor {} with router-id {}".format(
+                            router, ospf_nbr, nbr_rid
+                        )
                     )
                     return errormsg
 
@@ -843,7 +845,6 @@ def verify_ospf6_neighbor(tgen, topo=None, dut=None, input_dict=None, lan=False)
                     return errormsg
 
             for ospf_nbr, nbr_data in ospf_nbr_list.items():
-
                 try:
                     data_ip = data_rid = topo["routers"][ospf_nbr]["ospf6"]["router_id"]
                 except KeyError:
@@ -914,7 +915,6 @@ def verify_ospf6_neighbor(tgen, topo=None, dut=None, input_dict=None, lan=False)
                         return errormsg
                 continue
     else:
-
         for router, rnode in tgen.routers().items():
             if "ospf6" not in topo["routers"][router]:
                 continue
@@ -945,7 +945,7 @@ def verify_ospf6_neighbor(tgen, topo=None, dut=None, input_dict=None, lan=False)
                     data_ip = data_rid = topo["routers"][nbr_data["nbr"]]["ospf6"][
                         "router_id"
                     ]
-
+                logger.info("ospf neighbor %s:   router-id: %s", ospf_nbr, data_rid)
                 if ospf_nbr in data_ip:
                     nbr_details = nbr_data[ospf_nbr]
                 elif lan:
@@ -968,8 +968,10 @@ def verify_ospf6_neighbor(tgen, topo=None, dut=None, input_dict=None, lan=False)
                     nh_state = get_index_val.get(neighbor_ip)["state"]
                     intf_state = get_index_val.get(neighbor_ip)["ifState"]
                 except TypeError:
-                    errormsg = "[DUT: {}] OSPF peer {} missing,from " "{} ".format(
-                        router, nbr_rid, ospf_nbr
+                    errormsg = (
+                        "[DUT: {}] missing OSPF neighbor {} with router-id {}".format(
+                            router, ospf_nbr, nbr_rid
+                        )
                     )
                     return errormsg
 
@@ -1761,7 +1763,6 @@ def verify_ospf6_rib(
                             continue
 
                         if st_rt in ospf_rib_json:
-
                             st_found = True
                             found_routes.append(st_rt)
 
index 925890b324912aa7bbcd5001aef93a5d9f20ef22..e26bdb3af3b860f5761c0f67d5b65d86dc8d3224 100644 (file)
@@ -593,7 +593,6 @@ def find_rp_details(tgen, topo):
     topo_data = topo["routers"]
 
     for router in router_list.keys():
-
         if "pim" not in topo_data[router]:
             continue
 
@@ -1495,7 +1494,6 @@ def verify_mroutes(
                             and data["outboundInterface"] in oil
                         ):
                             if return_uptime:
-
                                 uptime_dict[grp_addr][src_address] = data["upTime"]
 
                             logger.info(
@@ -1917,7 +1915,6 @@ def get_pim_interface_traffic(tgen, input_dict):
             for intf, data in input_dict[dut].items():
                 interface_json = show_pim_intf_traffic_json[intf]
                 for state in data:
-
                     # Verify Tx/Rx
                     if state in interface_json:
                         output_dict[dut][state] = interface_json[state]
@@ -1990,7 +1987,6 @@ def get_pim6_interface_traffic(tgen, input_dict):
             for intf, data in input_dict[dut].items():
                 interface_json = show_pim_intf_traffic_json[intf]
                 for state in data:
-
                     # Verify Tx/Rx
                     if state in interface_json:
                         output_dict[dut][state] = interface_json[state]
@@ -3007,7 +3003,6 @@ def verify_pim_upstream_rpf(
     logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
 
     if "pim" in topo["routers"][dut]:
-
         logger.info("[DUT: %s]: Verifying ip pim upstream rpf:", dut)
 
         rnode = tgen.routers()[dut]
@@ -3245,7 +3240,6 @@ def verify_pim_join(
 
         grp_addr = grp_addr.split("/")[0]
         for source, data in interface_json[grp_addr].items():
-
             # Verify pim join
             if pim_join:
                 if data["group"] == grp_addr and data["channelJoinName"] == "JOIN":
@@ -3338,7 +3332,6 @@ def verify_igmp_config(tgen, input_dict, stats_return=False, expected=True):
         rnode = tgen.routers()[dut]
 
         for interface, data in input_dict[dut]["igmp"]["interfaces"].items():
-
             statistics = False
             report = False
             if "statistics" in input_dict[dut]["igmp"]["interfaces"][interface]["igmp"]:
@@ -3623,7 +3616,6 @@ def verify_pim_config(tgen, input_dict, expected=True):
         rnode = tgen.routers()[dut]
 
         for interface, data in input_dict[dut]["pim"]["interfaces"].items():
-
             logger.info("[DUT: %s]: Verifying PIM interface %s detail:", dut, interface)
 
             show_ip_igmp_intf_json = run_frr_cmd(
@@ -3772,7 +3764,6 @@ def verify_multicast_traffic(tgen, input_dict, return_traffic=False, expected=Tr
                     elif (
                         interface_json["pktsIn"] != 0 and interface_json["bytesIn"] != 0
                     ):
-
                         traffic_dict[traffic_type][interface][
                             "pktsIn"
                         ] = interface_json["pktsIn"]
@@ -3836,7 +3827,6 @@ def verify_multicast_traffic(tgen, input_dict, return_traffic=False, expected=Tr
                         interface_json["pktsOut"] != 0
                         and interface_json["bytesOut"] != 0
                     ):
-
                         traffic_dict[traffic_type][interface][
                             "pktsOut"
                         ] = interface_json["pktsOut"]
@@ -4232,7 +4222,6 @@ def verify_local_igmp_groups(tgen, dut, interface, group_addresses):
         group_addresses = [group_addresses]
 
     if interface not in show_ip_local_igmp_json:
-
         errormsg = (
             "[DUT %s]: Verifying local IGMP group received"
             " from interface %s [FAILED]!! " % (dut, interface)
@@ -4319,7 +4308,6 @@ def verify_pim_interface_traffic(tgen, input_dict, return_stats=True, addr_type=
         for intf, data in input_dict[dut].items():
             interface_json = show_pim_intf_traffic_json[intf]
             for state in data:
-
                 # Verify Tx/Rx
                 if state in interface_json:
                     output_dict[dut][state] = interface_json[state]
@@ -4525,7 +4513,6 @@ def verify_mld_config(tgen, input_dict, stats_return=False, expected=True):
     for dut in input_dict.keys():
         rnode = tgen.routers()[dut]
         for interface, data in input_dict[dut]["mld"]["interfaces"].items():
-
             statistics = False
             report = False
             if "statistics" in input_dict[dut]["mld"]["interfaces"][interface]["mld"]:
@@ -5040,7 +5027,6 @@ def verify_pim6_config(tgen, input_dict, expected=True):
         rnode = tgen.routers()[dut]
 
         for interface, data in input_dict[dut]["pim6"]["interfaces"].items():
-
             logger.info(
                 "[DUT: %s]: Verifying PIM6 interface %s detail:", dut, interface
             )
@@ -5158,7 +5144,6 @@ def verify_local_mld_groups(tgen, dut, interface, group_addresses):
         group_addresses = [group_addresses]
 
     if interface not in show_ipv6_local_mld_json["default"]:
-
         errormsg = (
             "[DUT %s]: Verifying local MLD group received"
             " from interface %s [FAILED]!! " % (dut, interface)
index ae58d9b2d1fd44cb4ceaf2ba888b492b16a4f1a6..230c2bd7f7d893a608a2d6490f45dd97d4024bb2 100755 (executable)
@@ -616,7 +616,6 @@ def test_json_object_asterisk_matching():
 
 
 def test_json_list_nested_with_objects():
-
     dcomplete = [{"key": 1, "list": [123]}, {"key": 2, "list": [123]}]
 
     dsub1 = [{"key": 2, "list": [123]}, {"key": 1, "list": [123]}]
index 53e6945bee1ad0d4e3f1fa8101e8ad2dfdf44520..23a6c869934a811330bacb1eec5b7ab024f75089 100644 (file)
@@ -206,7 +206,6 @@ def build_topo_from_json(tgen, topo=None):
             for destRouterLink, data in sorted(
                 topo["switches"][curSwitch]["links"].items()
             ):
-
                 # Loopback interfaces
                 if "dst_node" in data:
                     destRouter = data["dst_node"]
@@ -220,7 +219,6 @@ def build_topo_from_json(tgen, topo=None):
                     destRouter = destRouterLink
 
                 if destRouter in listAllRouters:
-
                     topo["routers"][destRouter]["links"][curSwitch] = deepcopy(
                         topo["switches"][curSwitch]["links"][destRouterLink]
                     )
@@ -316,6 +314,7 @@ def build_config_from_json(tgen, topo=None, save_bkup=True):
     func_dict = OrderedDict(
         [
             ("vrfs", create_vrf_cfg),
+            ("ospf", create_router_ospf),
             ("links", create_interfaces_cfg),
             ("static_routes", create_static_routes),
             ("prefix_lists", create_prefix_lists),
@@ -325,7 +324,6 @@ def build_config_from_json(tgen, topo=None, save_bkup=True):
             ("igmp", create_igmp_config),
             ("mld", create_mld_config),
             ("bgp", create_router_bgp),
-            ("ospf", create_router_ospf),
         ]
     )
 
@@ -398,7 +396,7 @@ def setup_module_from_json(testfile, json_file=None):
     tgen = create_tgen_from_json(testfile, json_file)
 
     # Start routers (and their daemons)
-    start_topology(tgen, topo_daemons(tgen))
+    start_topology(tgen)
 
     # Configure routers
     build_config_from_json(tgen)
index 967f09ecbd900e6e1fb2e4975c9fe573b4d5d4b2..aeb83d42904547e999318e3c5c1f5c1153745e03 100644 (file)
@@ -1469,21 +1469,22 @@ class Router(Node):
                 if not running:
                     break
 
-        if not running:
-            return ""
-
-        logger.warning(
-            "%s: sending SIGBUS to: %s", self.name, ", ".join([x[0] for x in running])
-        )
-        for name, pid in running:
-            pidfile = "/var/run/{}/{}.pid".format(self.routertype, name)
-            logger.info("%s: killing %s", self.name, name)
-            self.cmd("kill -SIGBUS %d" % pid)
-            self.cmd("rm -- " + pidfile)
-
-        sleep(
-            0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name
-        )
+        if running:
+            logger.warning(
+                "%s: sending SIGBUS to: %s",
+                self.name,
+                ", ".join([x[0] for x in running]),
+            )
+            for name, pid in running:
+                pidfile = "/var/run/{}/{}.pid".format(self.routertype, name)
+                logger.info("%s: killing %s", self.name, name)
+                self.cmd("kill -SIGBUS %d" % pid)
+                self.cmd("rm -- " + pidfile)
+
+            sleep(
+                0.5,
+                "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name,
+            )
 
         errors = self.checkRouterCores(reportOnce=True)
         if self.checkRouterVersion("<", minErrorVersion):
@@ -1526,6 +1527,9 @@ class Router(Node):
         """
 
         # Unfortunately this API allowsfor source to not exist for any and all routers.
+        if source is None:
+            source = f"{daemon}.conf"
+
         if source:
             head, tail = os.path.split(source)
             if not head and not self.path_exists(tail):
@@ -1557,7 +1561,7 @@ class Router(Node):
                     self.cmd_raises("cp {} {}".format(source, conf_file_mgmt))
                 self.cmd_raises("cp {} {}".format(source, conf_file))
 
-            if not self.unified_config or daemon == "frr":
+            if not (self.unified_config or daemon == "frr"):
                 self.cmd_raises("chown {0}:{0} {1}".format(self.routertype, conf_file))
                 self.cmd_raises("chmod 664 {}".format(conf_file))
 
@@ -1683,7 +1687,11 @@ class Router(Node):
         return self.getLog("out", daemon)
 
     def getLog(self, log, daemon):
-        return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log))
+        filename = "{}/{}/{}.{}".format(self.logdir, self.name, daemon, log)
+        log = ""
+        with open(filename) as file:
+            log = file.read()
+        return log
 
     def startRouterDaemons(self, daemons=None, tgen=None):
         "Starts FRR daemons for this router."
@@ -1841,9 +1849,13 @@ class Router(Node):
                 logger.info(
                     "%s: %s %s launched in gdb window", self, self.routertype, daemon
                 )
-            elif daemon in perfds and (self.name in perfds[daemon] or "all" in perfds[daemon]):
+            elif daemon in perfds and (
+                self.name in perfds[daemon] or "all" in perfds[daemon]
+            ):
                 cmdopt += rediropt
-                cmd = " ".join(["perf record {} --".format(perf_options), binary, cmdopt])
+                cmd = " ".join(
+                    ["perf record {} --".format(perf_options), binary, cmdopt]
+                )
                 p = self.popen(cmd)
                 self.perf_daemons[daemon] = p
                 if p.poll() and p.returncode:
@@ -1943,7 +1955,7 @@ class Router(Node):
                 tail_log_files.append("{}/{}/frr.log".format(self.logdir, self.name))
 
         for tailf in tail_log_files:
-            self.run_in_window("tail -f " + tailf, title=tailf, background=True)
+            self.run_in_window("tail -n10000 -F " + tailf, title=tailf, background=True)
 
         return ""
 
index eb4b088442da9c3e756d337cb6ac0f7e1601043e..c6ae70e09b21fe5a46efc504e07208c3c491d6b4 100644 (file)
@@ -1241,9 +1241,13 @@ class Commander:  # pylint: disable=R0904
                 # XXX not appropriate for ssh
                 cmd = ["sudo", "-Eu", os.environ["SUDO_USER"]] + cmd
 
-            if not isinstance(nscmd, str):
-                nscmd = shlex.join(nscmd)
-            cmd.append(nscmd)
+            if title:
+                cmd.append("-t")
+                cmd.append(title)
+
+            if isinstance(nscmd, str):
+                nscmd = shlex.split(nscmd)
+            cmd.extend(nscmd)
         elif "DISPLAY" in os.environ:
             cmd = [get_exec_path_host("xterm")]
             if "SUDO_USER" in os.environ:
index f58ea99d67b739965926064ef16c2593fbf76b75..f631073bb18ad769ea1ea823e7d2ddfe425eb2c8 100644 (file)
@@ -93,7 +93,7 @@ def spawn(unet, host, cmd, iow, ns_only):
             elif master_fd in r:
                 o = os.read(master_fd, 10240)
                 if o:
-                    iow.write(o.decode("utf-8"))
+                    iow.write(o.decode("utf-8", "ignore"))
                     iow.flush()
     finally:
         # restore tty settings back
@@ -281,7 +281,6 @@ async def async_input(prompt, histfile):
 
 
 def make_help_str(unet):
-
     w = sorted([x if x else "" for x in unet.cli_in_window_cmds])
     ww = unet.cli_in_window_cmds
     u = sorted([x if x else "" for x in unet.cli_run_cmds])
@@ -516,7 +515,6 @@ class Completer:
 async def doline(
     unet, line, outf, background=False, notty=False
 ):  # pylint: disable=R0911
-
     line = line.strip()
     m = re.fullmatch(r"^(\S+)(?:\s+(.*))?$", line)
     if not m:
@@ -670,7 +668,7 @@ async def cli_client(sockpath, prompt="munet> "):
         rb = rb[: -len(ENDMARKER)]
 
         # Write the output
-        sys.stdout.write(rb.decode("utf-8"))
+        sys.stdout.write(rb.decode("utf-8", "ignore"))
 
 
 async def local_cli(unet, outf, prompt, histfile, background):
@@ -729,7 +727,7 @@ async def cli_client_connected(unet, background, reader, writer):
                 self.writer = writer
 
             def write(self, x):
-                self.writer.write(x.encode("utf-8"))
+                self.writer.write(x.encode("utf-8", "ignore"))
 
             def flush(self):
                 self.writer.flush()
index aa43b977b02f471d334db236209b2a6103565c5d..45e1bc8db387bbc3958919e1b00a9b085a1cd698 100755 (executable)
@@ -430,6 +430,144 @@ def test_gr_rt7():
     check_routers(restarting="rt7")
 
 
+#
+# Test rt1 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt1():
+    logger.info("Test: verify rt1 performing an unplanned graceful restart")
+    tgen = get_topogen()
+
+    # Skip if previous fatal error condition is raised
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    kill_router_daemons(tgen, "rt1", ["ospf6d"], save_config=False)
+    start_router_daemons(tgen, "rt1", ["ospf6d"])
+
+    expect_grace_lsa(restarting="1.1.1.1", helper="rt2")
+    ensure_gr_is_in_zebra("rt1")
+    check_routers(restarting="rt1")
+
+
+#
+# Test rt2 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt2():
+    logger.info("Test: verify rt2 performing an unplanned graceful restart")
+    tgen = get_topogen()
+
+    # Skip if previous fatal error condition is raised
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    kill_router_daemons(tgen, "rt2", ["ospf6d"], save_config=False)
+    start_router_daemons(tgen, "rt2", ["ospf6d"])
+
+    expect_grace_lsa(restarting="2.2.2.2", helper="rt1")
+    expect_grace_lsa(restarting="2.2.2.2", helper="rt3")
+    ensure_gr_is_in_zebra("rt2")
+    check_routers(restarting="rt2")
+
+
+#
+# Test rt3 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt3():
+    logger.info("Test: verify rt3 performing an unplanned graceful restart")
+    tgen = get_topogen()
+
+    # Skip if previous fatal error condition is raised
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    kill_router_daemons(tgen, "rt3", ["ospf6d"], save_config=False)
+    start_router_daemons(tgen, "rt3", ["ospf6d"])
+
+    expect_grace_lsa(restarting="3.3.3.3", helper="rt2")
+    expect_grace_lsa(restarting="3.3.3.3", helper="rt4")
+    expect_grace_lsa(restarting="3.3.3.3", helper="rt6")
+    ensure_gr_is_in_zebra("rt3")
+    check_routers(restarting="rt3")
+
+
+#
+# Test rt4 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt4():
+    logger.info("Test: verify rt4 performing an unplanned graceful restart")
+    tgen = get_topogen()
+
+    # Skip if previous fatal error condition is raised
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    kill_router_daemons(tgen, "rt4", ["ospf6d"], save_config=False)
+    start_router_daemons(tgen, "rt4", ["ospf6d"])
+
+    expect_grace_lsa(restarting="4.4.4.4", helper="rt3")
+    expect_grace_lsa(restarting="4.4.4.4", helper="rt5")
+    ensure_gr_is_in_zebra("rt4")
+    check_routers(restarting="rt4")
+
+
+#
+# Test rt5 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt5():
+    logger.info("Test: verify rt5 performing an unplanned graceful restart")
+    tgen = get_topogen()
+
+    # Skip if previous fatal error condition is raised
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    kill_router_daemons(tgen, "rt5", ["ospf6d"], save_config=False)
+    start_router_daemons(tgen, "rt5", ["ospf6d"])
+
+    expect_grace_lsa(restarting="5.5.5.5", helper="rt4")
+    ensure_gr_is_in_zebra("rt5")
+    check_routers(restarting="rt5")
+
+
+#
+# Test rt6 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt6():
+    logger.info("Test: verify rt6 performing an unplanned graceful restart")
+    tgen = get_topogen()
+
+    # Skip if previous fatal error condition is raised
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    kill_router_daemons(tgen, "rt6", ["ospf6d"], save_config=False)
+    start_router_daemons(tgen, "rt6", ["ospf6d"])
+
+    expect_grace_lsa(restarting="6.6.6.6", helper="rt3")
+    expect_grace_lsa(restarting="6.6.6.6", helper="rt7")
+    ensure_gr_is_in_zebra("rt6")
+    check_routers(restarting="rt6")
+
+
+#
+# Test rt7 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt7():
+    logger.info("Test: verify rt7 performing an unplanned graceful restart")
+    tgen = get_topogen()
+
+    # Skip if previous fatal error condition is raised
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    kill_router_daemons(tgen, "rt7", ["ospf6d"], save_config=False)
+    start_router_daemons(tgen, "rt7", ["ospf6d"])
+
+    expect_grace_lsa(restarting="6.6.6.6", helper="rt6")
+    ensure_gr_is_in_zebra("rt7")
+    check_routers(restarting="rt7")
+
+
 # Memory leak test template
 def test_memory_leak():
     "Run the memory leak test and report results."
index 9d7a15833c16b3eb487f19dad13f8d0c86c61970..0531e81d4431d4fbe2093cbf3a42cd7d8bea3cce 100644 (file)
@@ -84,8 +84,8 @@ SUMMARY = {"ipv4": ["11.0.0.0/8", "12.0.0.0/8", "11.0.0.0/24"]}
 """
 TOPOOLOGY =
       Please view in a fixed-width font such as Courier.
-      +---+  A0       +---+
-      +R1 +------------+R2 |
+      +---+  A0        +---+
+      |R1 +------------+R2 |
       +-+-+-           +--++
         |  --        --  |
         |    -- A0 --    |
@@ -94,8 +94,8 @@ TOPOOLOGY =
         |    --    --    |
         |  --        --  |
       +-+-+-            +-+-+
-      +R0 +-------------+R3 |
-      +---+     A0     +---+
+      |R0 +-------------+R3 |
+      +---+     A0      +---+
 
 TESTCASES =
 1. OSPF summarisation functionality.
@@ -977,7 +977,7 @@ def test_ospf_type5_summary_tc42_p0(request):
 
     ip = topo["routers"]["r0"]["links"]["r3"]["ipv4"]
 
-    ip_net = str(ipaddress.ip_interface(u"{}".format(ip)).network)
+    ip_net = str(ipaddress.ip_interface("{}".format(ip)).network)
     ospf_summ_r1 = {
         "r0": {
             "ospf": {"summary-address": [{"prefix": ip_net.split("/")[0], "mask": "8"}]}
@@ -1519,7 +1519,7 @@ def test_ospf_type5_summary_tc45_p0(request):
     step("Repeat steps 1 to 10 of summarisation in non Back bone area.")
     reset_config_on_routers(tgen)
 
-    step("Change the area id on the interface on R0")
+    step("Change the area id on the interface on R0 to R1 from 0.0.0.0 to 0.0.0.1")
     input_dict = {
         "r0": {
             "links": {
@@ -1549,7 +1549,7 @@ def test_ospf_type5_summary_tc45_p0(request):
     result = create_interfaces_cfg(tgen, input_dict)
     assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
 
-    step("Change the area id on the interface  ")
+    step("Change the area id on the interface on R1 to R0 from 0.0.0.0 to 0.0.0.1")
     input_dict = {
         "r1": {
             "links": {
index ca2a3b287c3cba864389a377eed009894499a401..73185d501def086c575e586df767989ffb23c2bd 100755 (executable)
@@ -434,6 +434,144 @@ def test_gr_rt7():
     check_routers(restarting="rt7")
 
 
+#
+# Test rt1 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt1():
+    logger.info("Test: verify rt1 performing an unplanned graceful restart")
+    tgen = get_topogen()
+
+    # Skip if previous fatal error condition is raised
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    kill_router_daemons(tgen, "rt1", ["ospfd"], save_config=False)
+    start_router_daemons(tgen, "rt1", ["ospfd"])
+
+    expect_grace_lsa(restarting="1.1.1.1", area="0.0.0.1", helper="rt2")
+    ensure_gr_is_in_zebra("rt1")
+    check_routers(restarting="rt1")
+
+
+#
+# Test rt2 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt2():
+    logger.info("Test: verify rt2 performing an unplanned graceful restart")
+    tgen = get_topogen()
+
+    # Skip if previous fatal error condition is raised
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    kill_router_daemons(tgen, "rt2", ["ospfd"], save_config=False)
+    start_router_daemons(tgen, "rt2", ["ospfd"])
+
+    expect_grace_lsa(restarting="2.2.2.2", area="0.0.0.1", helper="rt1")
+    expect_grace_lsa(restarting="2.2.2.2", area="0.0.0.0", helper="rt3")
+    ensure_gr_is_in_zebra("rt2")
+    check_routers(restarting="rt2")
+
+
+#
+# Test rt3 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt3():
+    logger.info("Test: verify rt3 performing an unplanned graceful restart")
+    tgen = get_topogen()
+
+    # Skip if previous fatal error condition is raised
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    kill_router_daemons(tgen, "rt3", ["ospfd"], save_config=False)
+    start_router_daemons(tgen, "rt3", ["ospfd"])
+
+    expect_grace_lsa(restarting="3.3.3.3", area="0.0.0.0", helper="rt2")
+    expect_grace_lsa(restarting="3.3.3.3", area="0.0.0.0", helper="rt4")
+    expect_grace_lsa(restarting="3.3.3.3", area="0.0.0.0", helper="rt6")
+    ensure_gr_is_in_zebra("rt3")
+    check_routers(restarting="rt3")
+
+
+#
+# Test rt4 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt4():
+    logger.info("Test: verify rt4 performing an unplanned graceful restart")
+    tgen = get_topogen()
+
+    # Skip if previous fatal error condition is raised
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    kill_router_daemons(tgen, "rt4", ["ospfd"], save_config=False)
+    start_router_daemons(tgen, "rt4", ["ospfd"])
+
+    expect_grace_lsa(restarting="4.4.4.4", area="0.0.0.0", helper="rt3")
+    expect_grace_lsa(restarting="4.4.4.4", area="0.0.0.2", helper="rt5")
+    ensure_gr_is_in_zebra("rt4")
+    check_routers(restarting="rt4")
+
+
+#
+# Test rt5 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt5():
+    logger.info("Test: verify rt5 performing an unplanned graceful restart")
+    tgen = get_topogen()
+
+    # Skip if previous fatal error condition is raised
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    kill_router_daemons(tgen, "rt5", ["ospfd"], save_config=False)
+    start_router_daemons(tgen, "rt5", ["ospfd"])
+
+    expect_grace_lsa(restarting="5.5.5.5", area="0.0.0.2", helper="rt4")
+    ensure_gr_is_in_zebra("rt5")
+    check_routers(restarting="rt5")
+
+
+#
+# Test rt6 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt6():
+    logger.info("Test: verify rt6 performing an unplanned graceful restart")
+    tgen = get_topogen()
+
+    # Skip if previous fatal error condition is raised
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    kill_router_daemons(tgen, "rt6", ["ospfd"], save_config=False)
+    start_router_daemons(tgen, "rt6", ["ospfd"])
+
+    expect_grace_lsa(restarting="6.6.6.6", area="0.0.0.0", helper="rt3")
+    expect_grace_lsa(restarting="6.6.6.6", area="0.0.0.3", helper="rt7")
+    ensure_gr_is_in_zebra("rt6")
+    check_routers(restarting="rt6")
+
+
+#
+# Test rt7 performing an unplanned graceful restart
+#
+def test_unplanned_gr_rt7():
+    logger.info("Test: verify rt7 performing a graceful restart")
+    tgen = get_topogen()
+
+    # Skip if previous fatal error condition is raised
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    kill_router_daemons(tgen, "rt7", ["ospfd"], save_config=False)
+    start_router_daemons(tgen, "rt7", ["ospfd"])
+
+    expect_grace_lsa(restarting="7.7.7.7", area="0.0.0.3", helper="rt6")
+    ensure_gr_is_in_zebra("rt7")
+    check_routers(restarting="rt7")
+
+
 # Memory leak test template
 def test_memory_leak():
     "Run the memory leak test and report results."
index b01c96226ef29e1c20ad5d97e57afe19b0368d4f..2e8bea2651b5432f000a6122e4674dd8db69e52d 100644 (file)
@@ -20,7 +20,15 @@ from datetime import datetime, timedelta
 
 import pytest
 
-from lib.common_config import retry, run_frr_cmd, step
+from lib.common_config import (
+    retry,
+    run_frr_cmd,
+    step,
+    kill_router_daemons,
+    start_router_daemons,
+    shutdown_bringup_interface,
+)
+
 from lib.micronet import Timeout, comm_error
 from lib.topogen import Topogen, TopoRouter
 from lib.topotest import interface_set_status, json_cmp
@@ -936,6 +944,191 @@ def test_ospf_opaque_delete_data3(tgen):
     _test_opaque_add_del(tgen, apibin)
 
 
+def _test_opaque_add_restart_add(tgen, apibin):
+    "Test adding an opaque LSA and then restarting ospfd"
+
+    r1 = tgen.gears["r1"]
+    r2 = tgen.gears["r2"]
+
+    p = None
+    pread = None
+    # Log to our stdin, stderr
+    pout = open(os.path.join(r1.net.logdir, "r1/add-del.log"), "a+")
+    try:
+        step("reachable: check for add notification")
+        pread = r2.popen(
+            ["/usr/bin/timeout", "120", apibin, "-v", "--logtag=READER", "wait,120"],
+            encoding=None,  # don't buffer
+            stdin=subprocess.DEVNULL,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT,
+        )
+        p = r1.popen(
+            [
+                apibin,
+                "-v",
+                "add,10,1.2.3.4,231,1",
+                "add,10,1.2.3.4,231,1,feedaceebeef",
+                "wait, 5",
+                "add,10,1.2.3.4,231,1,feedaceedeadbeef",
+                "wait, 5",
+                "add,10,1.2.3.4,231,1,feedaceebaddbeef",
+                "wait, 5",
+            ]
+        )
+        add_input_dict = {
+            "areas": {
+                "1.2.3.4": {
+                    "areaLocalOpaqueLsa": [
+                        {
+                            "lsId": "231.0.0.1",
+                            "advertisedRouter": "1.0.0.0",
+                            "sequenceNumber": "80000004",
+                            "checksum": "3128",
+                        },
+                    ],
+                    "areaLocalOpaqueLsaCount": 1,
+                },
+            },
+        }
+        step("Check for add LSAs")
+        json_cmd = "show ip ospf da json"
+        assert verify_ospf_database(tgen, r1, add_input_dict, json_cmd) is None
+        assert verify_ospf_database(tgen, r2, add_input_dict, json_cmd) is None
+
+        step("Shutdown the interface on r1 to isolate it for r2")
+        shutdown_bringup_interface(tgen, "r1", "r1-eth0", False)
+
+        time.sleep(2)
+        step("Reset the client")
+        p.send_signal(signal.SIGINT)
+        time.sleep(2)
+        p.wait()
+        p = None
+
+        step("Kill ospfd on R1")
+        kill_router_daemons(tgen, "r1", ["ospfd"])
+        time.sleep(2)
+
+        step("Bring ospfd on R1 back up")
+        start_router_daemons(tgen, "r1", ["ospfd"])
+
+        p = r1.popen(
+            [
+                apibin,
+                "-v",
+                "add,10,1.2.3.4,231,1",
+                "add,10,1.2.3.4,231,1,feedaceecafebeef",
+                "wait, 5",
+            ]
+        )
+
+        step("Bring the interface on r1 back up for connection to r2")
+        shutdown_bringup_interface(tgen, "r1", "r1-eth0", True)
+
+        step("Verify area opaque LSA refresh")
+        json_cmd = "show ip ospf da opaque-area json"
+        add_detail_input_dict = {
+            "areaLocalOpaqueLsa": {
+                "areas": {
+                    "1.2.3.4": [
+                        {
+                            "linkStateId": "231.0.0.1",
+                            "advertisingRouter": "1.0.0.0",
+                            "lsaSeqNumber": "80000005",
+                            "checksum": "a87e",
+                            "length": 28,
+                            "opaqueDataLength": 8,
+                        },
+                    ],
+                },
+            },
+        }
+        assert verify_ospf_database(tgen, r1, add_detail_input_dict, json_cmd) is None
+        assert verify_ospf_database(tgen, r2, add_detail_input_dict, json_cmd) is None
+
+        step("Shutdown the interface on r1 to isolate it for r2")
+        shutdown_bringup_interface(tgen, "r1", "r1-eth0", False)
+
+        time.sleep(2)
+        step("Reset the client")
+        p.send_signal(signal.SIGINT)
+        time.sleep(2)
+        p.wait()
+        p = None
+
+        step("Kill ospfd on R1")
+        kill_router_daemons(tgen, "r1", ["ospfd"])
+        time.sleep(2)
+
+        step("Bring ospfd on R1 back up")
+        start_router_daemons(tgen, "r1", ["ospfd"])
+
+        step("Bring the interface on r1 back up for connection to r2")
+        shutdown_bringup_interface(tgen, "r1", "r1-eth0", True)
+
+        step("Verify area opaque LSA Purging")
+        json_cmd = "show ip ospf da opaque-area json"
+        add_detail_input_dict = {
+            "areaLocalOpaqueLsa": {
+                "areas": {
+                    "1.2.3.4": [
+                        {
+                            "lsaAge": 3600,
+                            "linkStateId": "231.0.0.1",
+                            "advertisingRouter": "1.0.0.0",
+                            "lsaSeqNumber": "80000005",
+                            "checksum": "a87e",
+                            "length": 28,
+                            "opaqueDataLength": 8,
+                        },
+                    ],
+                },
+            },
+        }
+        assert verify_ospf_database(tgen, r1, add_detail_input_dict, json_cmd) is None
+        assert verify_ospf_database(tgen, r2, add_detail_input_dict, json_cmd) is None
+        step("Verify Area Opaque LSA removal after timeout (60 seconds)")
+        time.sleep(60)
+        json_cmd = "show ip ospf da opaque-area json"
+        timeout_detail_input_dict = {
+            "areaLocalOpaqueLsa": {
+                "areas": {
+                    "1.2.3.4": [],
+                },
+            },
+        }
+        assert (
+            verify_ospf_database(tgen, r1, timeout_detail_input_dict, json_cmd) is None
+        )
+        assert (
+            verify_ospf_database(tgen, r2, timeout_detail_input_dict, json_cmd) is None
+        )
+
+    except Exception:
+        if p:
+            p.terminate()
+            if p.wait():
+                comm_error(p)
+            p = None
+        raise
+    finally:
+        if pread:
+            pread.terminate()
+            pread.wait()
+        if p:
+            p.terminate()
+            p.wait()
+
+
+@pytest.mark.parametrize("tgen", [2], indirect=True)
+def test_ospf_opaque_restart(tgen):
+    apibin = os.path.join(CLIENTDIR, "ospfclient.py")
+    rc, o, e = tgen.gears["r2"].net.cmd_status([apibin, "--help"])
+    logging.debug("%s --help: rc: %s stdout: '%s' stderr: '%s'", apibin, rc, o, e)
+    _test_opaque_add_restart_add(tgen, apibin)
+
+
 if __name__ == "__main__":
     args = ["-s"] + sys.argv[1:]
     sys.exit(pytest.main(args))
index b0e56e619a096fa9a88845856741ba444561e304..49c25ab8f6da6da0ed922c126d9b049b336244de 100644 (file)
@@ -565,7 +565,7 @@ def test_ospfv3_type5_summary_tc42_p0(request):
 
     ip = topo["routers"]["r0"]["links"]["r3"]["ipv6"]
 
-    ip_net = str(ipaddress.ip_interface(u"{}".format(ip)).network)
+    ip_net = str(ipaddress.ip_interface("{}".format(ip)).network)
     ospf_summ_r1 = {
         "r0": {
             "ospf6": {
index f779bf0a74f74dab434393c6c648e285c4e4aed2..0062cf3de29704d5654a5bd70dee171d0a787dc1 100644 (file)
@@ -1,7 +1,7 @@
 # Skip pytests example directory
 [pytest]
 
-asyncio_mode = auto
+asyncio_mode = auto
 
 # We always turn this on inside conftest.py, default shown
 # addopts = --junitxml=<rundir>/topotests.xml
diff --git a/tests/topotests/rip_allow_ecmp/r4/frr.conf b/tests/topotests/rip_allow_ecmp/r4/frr.conf
new file mode 100644 (file)
index 0000000..995c2be
--- /dev/null
@@ -0,0 +1,13 @@
+!
+int lo
+ ip address 10.10.10.1/32
+!
+int r4-eth0
+ ip address 192.168.1.4/24
+!
+router rip
+ network 192.168.1.0/24
+ network 10.10.10.1/32
+ timers basic 5 15 10
+exit
+
diff --git a/tests/topotests/rip_allow_ecmp/r5/frr.conf b/tests/topotests/rip_allow_ecmp/r5/frr.conf
new file mode 100644 (file)
index 0000000..57a06ec
--- /dev/null
@@ -0,0 +1,13 @@
+!
+int lo
+ ip address 10.10.10.1/32
+!
+int r5-eth0
+ ip address 192.168.1.5/24
+!
+router rip
+ network 192.168.1.0/24
+ network 10.10.10.1/32
+ timers basic 5 15 10
+exit
+
index acc0aea9e894bebc92baf27c0ff4082b91c60268..7d958fd496d6b9f4949f45a452176819a99c8399 100644 (file)
@@ -21,12 +21,13 @@ sys.path.append(os.path.join(CWD, "../"))
 # pylint: disable=C0413
 from lib import topotest
 from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.common_config import step
 
 pytestmark = [pytest.mark.ripd]
 
 
 def setup_module(mod):
-    topodef = {"s1": ("r1", "r2", "r3")}
+    topodef = {"s1": ("r1", "r2", "r3", "r4", "r5")}
     tgen = Topogen(topodef, mod.__name__)
     tgen.start_topology()
 
@@ -102,11 +103,13 @@ def test_rip_allow_ecmp():
     _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
     assert result is None, "Can't see 10.10.10.1/32 as multipath in `show ip rip`"
 
-    def _show_routes():
+    def _show_routes(nh_num):
         output = json.loads(r1.vtysh_cmd("show ip route json"))
         expected = {
             "10.10.10.1/32": [
                 {
+                    "internalNextHopNum": nh_num,
+                    "internalNextHopActiveNum": nh_num,
                     "nexthops": [
                         {
                             "ip": "192.168.1.2",
@@ -116,15 +119,36 @@ def test_rip_allow_ecmp():
                             "ip": "192.168.1.3",
                             "active": True,
                         },
-                    ]
+                    ],
                 }
             ]
         }
         return topotest.json_cmp(output, expected)
 
-    test_func = functools.partial(_show_routes)
+    test_func = functools.partial(_show_routes, 4)
     _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
-    assert result is None, "Can't see 10.10.10.1/32 as multipath in `show ip route`"
+    assert result is None, "Can't see 10.10.10.1/32 as multipath (4) in `show ip route`"
+
+    step(
+        "Configure allow-ecmp 2, ECMP group routes SHOULD have next-hops with the lowest IPs"
+    )
+    r1.vtysh_cmd(
+        """
+    configure terminal
+        router rip
+            allow-ecmp 2
+    """
+    )
+
+    test_func = functools.partial(_show_rip_routes)
+    _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+    assert (
+        result is None
+    ), "Can't see 10.10.10.1/32 as ECMP with the lowest next-hop IPs"
+
+    test_func = functools.partial(_show_routes, 2)
+    _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+    assert result is None, "Can't see 10.10.10.1/32 as multipath (2) in `show ip route`"
 
 
 if __name__ == "__main__":
index 00040622e5d7e7c1e6bf9702c9dc3ecf354758e0..799f46a6dc7ed9bcae452c6e88ac0ef9d47cd549 100644 (file)
@@ -23,7 +23,7 @@
         "instance": [
             {
                 "vrf": "default",
-                "allow-ecmp": "true",
+                "allow-ecmp": 1,
                 "distance": {
                     "source": [
                         {
index 2feddde2d86b8fc263d8fd751a4fe3bc7f3d1cad..dad83619cea6f504e3dcfdffd8af0fdd7282fe45 100644 (file)
@@ -19,7 +19,7 @@
 <ripd xmlns="http://frrouting.org/yang/ripd">
        <instance>
                <vrf>default</vrf>
-               <allow-ecmp>true</allow-ecmp>
+               <allow-ecmp>1</allow-ecmp>
                <static-route>10.0.1.0/24</static-route>
                <distance>
                        <source>
index 23e5b0227ca9d4692fd95bac08473d250d9c9fc7..b557cabb22ae4770a7ae9bf00fae037fc358c3e0 100644 (file)
@@ -23,6 +23,10 @@ module frr-bgp-route-map {
     prefix rt-types;
   }
 
+  import frr-route-types {
+    prefix frr-route-types;
+  }
+
   organization
     "Free Range Routing";
   contact
@@ -168,6 +172,12 @@ module frr-bgp-route-map {
       "Match IPv6 next hop address";
   }
 
+  identity source-protocol {
+    base frr-route-map:rmap-match-type;
+    description
+      "Match protocol via which the route was learnt";
+  }
+
   identity distance {
     base frr-route-map:rmap-set-type;
     description
@@ -759,6 +769,13 @@ module frr-bgp-route-map {
           "IPv6 address";
       }
     }
+
+    case source-protocol {
+      when "derived-from-or-self(../frr-route-map:condition, 'frr-bgp-route-map:source-protocol')";
+      leaf source-protocol {
+        type frr-route-types:frr-route-types;
+      }
+    }
   }
 
   augment "/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:rmap-set-action/frr-route-map:set-action" {
index a4bf50d958be3807c341eb3428f573d75e1e5109..5f85a4cabc1b092f1f7cf38439176ec934468301 100644 (file)
@@ -119,8 +119,8 @@ module frr-ripd {
           "VRF name.";
       }
       leaf allow-ecmp {
-        type boolean;
-        default "false";
+        type uint8;
+        default 0;
         description
           "Allow equal-cost multi-path.";
       }
index a3b61c90498fe747d423d3beec89ea0e06e90972..3653f715276b3ae40d81cdcec3a2da10f3a3fb09 100644 (file)
@@ -5736,6 +5736,21 @@ void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
                                  memory_order_relaxed);
 }
 
+static struct zebra_dplane_ctx *
+dplane_provider_dequeue_out_ctx(struct zebra_dplane_provider *prov)
+{
+       struct zebra_dplane_ctx *ctx;
+
+       ctx = dplane_ctx_list_pop(&(prov->dp_ctx_out_list));
+       if (!ctx)
+               return NULL;
+
+       atomic_fetch_sub_explicit(&(prov->dp_out_queued), 1,
+                                 memory_order_relaxed);
+
+       return ctx;
+}
+
 /*
  * Accessor for provider object
  */
@@ -6763,7 +6778,7 @@ static void dplane_thread_loop(struct event *event)
                dplane_provider_lock(prov);
 
                while (counter < limit) {
-                       ctx = dplane_ctx_list_pop(&(prov->dp_ctx_out_list));
+                       ctx = dplane_provider_dequeue_out_ctx(prov);
                        if (ctx) {
                                dplane_ctx_list_add_tail(&work_list, ctx);
                                counter++;
index f1a99d89ce9cbcbb3945f5f298a4f859a146673a..47d5b64a3fa1e9d2272a159e53de4817a7961e9e 100644 (file)
@@ -97,8 +97,8 @@ static struct zebra_nhlfe *nhlfe_find(struct nhlfe_list_head *list,
 static struct zebra_nhlfe *
 nhlfe_add(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
          enum nexthop_types_t gtype, const union g_addr *gate,
-         ifindex_t ifindex, uint8_t num_labels, const mpls_label_t *labels,
-         bool is_backup);
+         ifindex_t ifindex, vrf_id_t vrf_id, uint8_t num_labels,
+         const mpls_label_t *labels, bool is_backup);
 static int nhlfe_del(struct zebra_nhlfe *nhlfe);
 static void nhlfe_free(struct zebra_nhlfe *nhlfe);
 static void nhlfe_out_label_update(struct zebra_nhlfe *nhlfe,
@@ -212,11 +212,11 @@ static int lsp_install(struct zebra_vrf *zvrf, mpls_label_t label,
                        changed++;
                } else {
                        /* Add LSP entry to this nexthop */
-                       nhlfe = nhlfe_add(lsp, lsp_type, nexthop->type,
-                                         &nexthop->gate, nexthop->ifindex,
-                                         nexthop->nh_label->num_labels,
-                                         nexthop->nh_label->label,
-                                         false /*backup*/);
+                       nhlfe = nhlfe_add(
+                               lsp, lsp_type, nexthop->type, &nexthop->gate,
+                               nexthop->ifindex, nexthop->vrf_id,
+                               nexthop->nh_label->num_labels,
+                               nexthop->nh_label->label, false /*backup*/);
                        if (!nhlfe)
                                return -1;
 
@@ -1236,6 +1236,7 @@ static int nhlfe_nhop_match(struct zebra_nhlfe *nhlfe,
 
 /*
  * Locate NHLFE that matches with passed info.
+ * TODO: handle vrf_id if vrf backend is netns based
  */
 static struct zebra_nhlfe *nhlfe_find(struct nhlfe_list_head *list,
                                      enum lsp_types_t lsp_type,
@@ -1261,7 +1262,8 @@ static struct zebra_nhlfe *nhlfe_find(struct nhlfe_list_head *list,
 static struct zebra_nhlfe *
 nhlfe_alloc(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
            enum nexthop_types_t gtype, const union g_addr *gate,
-           ifindex_t ifindex, uint8_t num_labels, const mpls_label_t *labels)
+           ifindex_t ifindex, vrf_id_t vrf_id, uint8_t num_labels,
+           const mpls_label_t *labels)
 {
        struct zebra_nhlfe *nhlfe;
        struct nexthop *nexthop;
@@ -1278,7 +1280,7 @@ nhlfe_alloc(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
 
        nexthop_add_labels(nexthop, lsp_type, num_labels, labels);
 
-       nexthop->vrf_id = VRF_DEFAULT;
+       nexthop->vrf_id = vrf_id;
        nexthop->type = gtype;
        switch (nexthop->type) {
        case NEXTHOP_TYPE_IPV4:
@@ -1313,29 +1315,20 @@ nhlfe_alloc(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
  * Add primary or backup NHLFE. Base entry must have been created and
  * duplicate check done.
  */
-static struct zebra_nhlfe *nhlfe_add(struct zebra_lsp *lsp,
-                                    enum lsp_types_t lsp_type,
-                                    enum nexthop_types_t gtype,
-                                    const union g_addr *gate,
-                                    ifindex_t ifindex, uint8_t num_labels,
-                                    const mpls_label_t *labels, bool is_backup)
+static struct zebra_nhlfe *
+nhlfe_add(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
+         enum nexthop_types_t gtype, const union g_addr *gate,
+         ifindex_t ifindex, vrf_id_t vrf_id, uint8_t num_labels,
+         const mpls_label_t *labels, bool is_backup)
 {
        struct zebra_nhlfe *nhlfe;
 
        if (!lsp)
                return NULL;
 
-       /* Must have labels */
-       if (num_labels == 0 || labels == NULL) {
-               if (IS_ZEBRA_DEBUG_MPLS)
-                       zlog_debug("%s: invalid nexthop: no labels", __func__);
-
-               return NULL;
-       }
-
        /* Allocate new object */
-       nhlfe = nhlfe_alloc(lsp, lsp_type, gtype, gate, ifindex, num_labels,
-                           labels);
+       nhlfe = nhlfe_alloc(lsp, lsp_type, gtype, gate, ifindex, vrf_id,
+                           num_labels, labels);
 
        if (!nhlfe)
                return NULL;
@@ -1510,16 +1503,18 @@ static json_object *nhlfe_json(struct zebra_nhlfe *nhlfe)
 
        json_nhlfe = json_object_new_object();
        json_object_string_add(json_nhlfe, "type", nhlfe_type2str(nhlfe->type));
-       json_object_int_add(json_nhlfe, "outLabel",
-                           nexthop->nh_label->label[0]);
-
-       json_label_stack = json_object_new_array();
-       json_object_object_add(json_nhlfe, "outLabelStack", json_label_stack);
-       for (i = 0; i < nexthop->nh_label->num_labels; i++)
-               json_object_array_add(
-                       json_label_stack,
-                       json_object_new_int(nexthop->nh_label->label[i]));
-
+       if (nexthop->nh_label) {
+               json_object_int_add(json_nhlfe, "outLabel",
+                                   nexthop->nh_label->label[0]);
+               json_label_stack = json_object_new_array();
+               json_object_object_add(json_nhlfe, "outLabelStack",
+                                      json_label_stack);
+               for (i = 0; i < nexthop->nh_label->num_labels; i++)
+                       json_object_array_add(
+                               json_label_stack,
+                               json_object_new_int(
+                                       nexthop->nh_label->label[i]));
+       }
        json_object_int_add(json_nhlfe, "distance", nhlfe->distance);
 
        if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED))
@@ -1530,6 +1525,10 @@ static json_object *nhlfe_json(struct zebra_nhlfe *nhlfe)
        case NEXTHOP_TYPE_IPV4_IFINDEX:
                json_object_string_addf(json_nhlfe, "nexthop", "%pI4",
                                        &nexthop->gate.ipv4);
+               if (nexthop->ifindex)
+                       json_object_string_add(json_nhlfe, "interface",
+                                              ifindex2ifname(nexthop->ifindex,
+                                                             nexthop->vrf_id));
                break;
        case NEXTHOP_TYPE_IPV6:
        case NEXTHOP_TYPE_IPV6_IFINDEX:
@@ -2242,8 +2241,8 @@ zebra_mpls_lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
                         const mpls_label_t *out_labels)
 {
        /* Just a public pass-through to the internal implementation */
-       return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, num_labels,
-                        out_labels, false /*backup*/);
+       return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, VRF_DEFAULT,
+                        num_labels, out_labels, false /*backup*/);
 }
 
 /*
@@ -2257,8 +2256,8 @@ struct zebra_nhlfe *zebra_mpls_lsp_add_backup_nhlfe(
        uint8_t num_labels, const mpls_label_t *out_labels)
 {
        /* Just a public pass-through to the internal implementation */
-       return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, num_labels,
-                        out_labels, true);
+       return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, VRF_DEFAULT,
+                        num_labels, out_labels, true);
 }
 
 /*
@@ -2270,12 +2269,10 @@ struct zebra_nhlfe *zebra_mpls_lsp_add_nh(struct zebra_lsp *lsp,
 {
        struct zebra_nhlfe *nhlfe;
 
-       if (nh->nh_label == NULL || nh->nh_label->num_labels == 0)
-               return NULL;
-
-       nhlfe = nhlfe_add(lsp, lsp_type, nh->type, &nh->gate, nh->ifindex,
-                         nh->nh_label->num_labels, nh->nh_label->label,
-                         false /*backup*/);
+       nhlfe = nhlfe_add(
+               lsp, lsp_type, nh->type, &nh->gate, nh->ifindex, nh->vrf_id,
+               nh->nh_label ? nh->nh_label->num_labels : 0,
+               nh->nh_label ? nh->nh_label->label : NULL, false /*backup*/);
 
        return nhlfe;
 }
@@ -2290,12 +2287,10 @@ struct zebra_nhlfe *zebra_mpls_lsp_add_backup_nh(struct zebra_lsp *lsp,
 {
        struct zebra_nhlfe *nhlfe;
 
-       if (nh->nh_label == NULL || nh->nh_label->num_labels == 0)
-               return NULL;
-
-       nhlfe = nhlfe_add(lsp, lsp_type, nh->type, &nh->gate,
-                                nh->ifindex, nh->nh_label->num_labels,
-                                nh->nh_label->label, true);
+       nhlfe = nhlfe_add(lsp, lsp_type, nh->type, &nh->gate, nh->ifindex,
+                         nh->vrf_id,
+                         nh->nh_label ? nh->nh_label->num_labels : 0,
+                         nh->nh_label ? nh->nh_label->label : NULL, true);
 
        return nhlfe;
 }
@@ -3113,7 +3108,7 @@ static struct zebra_nhlfe *
 lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t type,
              uint8_t num_out_labels, const mpls_label_t *out_labels,
              enum nexthop_types_t gtype, const union g_addr *gate,
-             ifindex_t ifindex, bool is_backup)
+             ifindex_t ifindex, vrf_id_t vrf_id, bool is_backup)
 {
        struct zebra_nhlfe *nhlfe;
        char buf[MPLS_LABEL_STRLEN];
@@ -3133,13 +3128,18 @@ lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t type,
                struct nexthop *nh = nhlfe->nexthop;
 
                assert(nh);
-               assert(nh->nh_label);
 
                /* Clear deleted flag (in case it was set) */
                UNSET_FLAG(nhlfe->flags, NHLFE_FLAG_DELETED);
-               if (nh->nh_label->num_labels == num_out_labels
-                   && !memcmp(nh->nh_label->label, out_labels,
-                              sizeof(mpls_label_t) * num_out_labels))
+
+               if (!nh->nh_label || num_out_labels == 0)
+                       /* No change */
+                       return nhlfe;
+
+               if (nh->nh_label &&
+                   nh->nh_label->num_labels == num_out_labels &&
+                   !memcmp(nh->nh_label->label, out_labels,
+                           sizeof(mpls_label_t) * num_out_labels))
                        /* No change */
                        return nhlfe;
 
@@ -3160,7 +3160,7 @@ lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t type,
                }
 
                /* Update out label(s), trigger processing. */
-               if (nh->nh_label->num_labels == num_out_labels)
+               if (nh->nh_label && nh->nh_label->num_labels == num_out_labels)
                        memcpy(nh->nh_label->label, out_labels,
                               sizeof(mpls_label_t) * num_out_labels);
                else {
@@ -3170,7 +3170,7 @@ lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t type,
                }
        } else {
                /* Add LSP entry to this nexthop */
-               nhlfe = nhlfe_add(lsp, type, gtype, gate, ifindex,
+               nhlfe = nhlfe_add(lsp, type, gtype, gate, ifindex, vrf_id,
                                  num_out_labels, out_labels, is_backup);
                if (!nhlfe)
                        return NULL;
@@ -3179,8 +3179,11 @@ lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t type,
                        char buf2[MPLS_LABEL_STRLEN];
 
                        nhlfe2str(nhlfe, buf, sizeof(buf));
-                       mpls_label2str(num_out_labels, out_labels, buf2,
-                                      sizeof(buf2), 0, 0);
+                       if (num_out_labels)
+                               mpls_label2str(num_out_labels, out_labels, buf2,
+                                              sizeof(buf2), 0, 0);
+                       else
+                               snprintf(buf2, sizeof(buf2), "-");
 
                        zlog_debug("Add LSP in-label %u type %d %snexthop %s out-label(s) %s",
                                   lsp->ile.in_label, type, backup_str, buf,
@@ -3199,6 +3202,8 @@ lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t type,
 /*
  * Install an LSP and forwarding entry; used primarily
  * from vrf zapi message processing.
+ * TODO: handle vrf_id parameter when mpls API extends to interface or SRTE
+ * changes
  */
 int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type,
                     mpls_label_t in_label, uint8_t num_out_labels,
@@ -3220,7 +3225,7 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type,
        lsp = hash_get(lsp_table, &tmp_ile, lsp_alloc);
 
        nhlfe = lsp_add_nhlfe(lsp, type, num_out_labels, out_labels, gtype,
-                             gate, ifindex, false /*backup*/);
+                             gate, ifindex, VRF_DEFAULT, false /*backup*/);
        if (nhlfe == NULL)
                return -1;
 
@@ -3239,8 +3244,8 @@ static int lsp_znh_install(struct zebra_lsp *lsp, enum lsp_types_t type,
 {
        struct zebra_nhlfe *nhlfe;
 
-       nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num, znh->labels,
-                             znh->type, &znh->gate, znh->ifindex,
+       nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num, znh->labels, znh->type,
+                             &znh->gate, znh->ifindex, znh->vrf_id,
                              false /*backup*/);
        if (nhlfe == NULL)
                return -1;
@@ -3277,9 +3282,9 @@ static int lsp_backup_znh_install(struct zebra_lsp *lsp, enum lsp_types_t type,
 {
        struct zebra_nhlfe *nhlfe;
 
-       nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num,
-                             znh->labels, znh->type, &znh->gate,
-                             znh->ifindex, true /*backup*/);
+       nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num, znh->labels, znh->type,
+                             &znh->gate, znh->ifindex, znh->vrf_id,
+                             true /*backup*/);
        if (nhlfe == NULL) {
                if (IS_ZEBRA_DEBUG_MPLS)
                        zlog_debug("%s: unable to add backup nhlfe, label: %u",
@@ -3610,8 +3615,8 @@ int zebra_mpls_static_lsp_add(struct zebra_vrf *zvrf, mpls_label_t in_label,
 
        } else {
                /* Add static LSP entry to this nexthop */
-               nhlfe = nhlfe_add(lsp, ZEBRA_LSP_STATIC, gtype, gate,
-                                 ifindex, 1, &out_label, false /*backup*/);
+               nhlfe = nhlfe_add(lsp, ZEBRA_LSP_STATIC, gtype, gate, ifindex,
+                                 VRF_DEFAULT, 1, &out_label, false /*backup*/);
                if (!nhlfe)
                        return -1;
 
@@ -3820,7 +3825,8 @@ void zebra_mpls_print_lsp_table(struct vty *vty, struct zebra_vrf *zvrf,
                                        break;
                                }
 
-                               if (nexthop->type != NEXTHOP_TYPE_IFINDEX)
+                               if (nexthop->type != NEXTHOP_TYPE_IFINDEX &&
+                                   nexthop->nh_label)
                                        out_label_str = mpls_label2str(
                                                nexthop->nh_label->num_labels,
                                                &nexthop->nh_label->label[0],
index a1fee840df8e95b2a954457c2e5db6048fe4ef75..fc3f0c96a72f07e7829b892e431cdc628af7e865 100644 (file)
@@ -16,6 +16,7 @@
 #include "ptm_lib.h"
 #include "rib.h"
 #include "stream.h"
+#include "lib/version.h"
 #include "vrf.h"
 #include "vty.h"
 #include "lib_errors.h"