]> git.proxmox.com Git - mirror_frr.git/commitdiff
Merge pull request #12034 from opensourcerouting/fix/gr_hard_notification
authorRuss White <russ@riw.us>
Thu, 6 Oct 2022 14:05:11 +0000 (10:05 -0400)
committerGitHub <noreply@github.com>
Thu, 6 Oct 2022 14:05:11 +0000 (10:05 -0400)
bgpd: Do not send Deconfig/Shutdown message when restarting

36 files changed:
bgpd/bgp_conditional_adv.c
bgpd/bgp_conditional_adv.h
bgpd/bgp_debug.c
bgpd/bgp_debug.h
bgpd/bgp_nexthop.c
bgpd/bgp_open.c
bgpd/bgp_route.c
bgpd/bgp_vty.c
bgpd/bgp_zebra.c
bgpd/bgpd.c
bgpd/bgpd.h
configure.ac
debian/changelog
doc/user/bgp.rst
doc/user/pimv6.rst
isisd/fabricd.c
pimd/pim6_cmd.c
pimd/pim_addr.h
pimd/pim_hello.c
pimd/pim_tlv.c
pimd/pim_tlv.h
pimd/pim_vty.c
redhat/frr.spec.in
tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_unnumbered_nbr.py
tests/topotests/bgp_local_as/__init__.py [new file with mode: 0644]
tests/topotests/bgp_local_as/r1/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_local_as/r1/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_local_as/r2/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_local_as/r2/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_local_as/r3/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_local_as/r3/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_local_as/test_bgp_local_as.py [new file with mode: 0644]
tools/frr.service.in
tools/frr@.service.in
zebra/if_netlink.c
zebra/rtadv.c

index fc44e86cbc479c359cf4300a648bdceabbaf9c4b..2598361ad2c584fa924d5349febc0aac80fac9a6 100644 (file)
@@ -53,18 +53,16 @@ bgp_check_rmap_prefixes_in_bgp_table(struct bgp_table *table,
 
                        if (ret == RMAP_PERMITMATCH) {
                                bgp_dest_unlock_node(dest);
-                               if (BGP_DEBUG(update, UPDATE_OUT))
-                                       zlog_debug(
-                                               "%s: Condition map routes present in BGP table",
-                                               __func__);
+                               bgp_cond_adv_debug(
+                                       "%s: Condition map routes present in BGP table",
+                                       __func__);
 
                                return ret;
                        }
                }
        }
 
-       if (BGP_DEBUG(update, UPDATE_OUT))
-               zlog_debug("%s: Condition map routes not present in BGP table",
+       bgp_cond_adv_debug("%s: Condition map routes not present in BGP table",
                           __func__);
 
        return ret;
@@ -98,8 +96,7 @@ static void bgp_conditional_adv_routes(struct peer *peer, afi_t afi,
        subgrp->pscount = 0;
        SET_FLAG(subgrp->sflags, SUBGRP_STATUS_TABLE_REPARSING);
 
-       if (BGP_DEBUG(update, UPDATE_OUT))
-               zlog_debug("%s: %s routes to/from %s for %s", __func__,
+       bgp_cond_adv_debug("%s: %s routes to/from %s for %s", __func__,
                           update_type == UPDATE_TYPE_ADVERTISE ? "Advertise"
                                                                : "Withdraw",
                           peer->host, get_afi_safi_str(afi, safi, false));
@@ -224,7 +221,7 @@ static void bgp_conditional_adv_timer(struct thread *t)
                            && !peer->advmap_table_change)
                                continue;
 
-                       if (BGP_DEBUG(update, UPDATE_OUT)) {
+                       if (BGP_DEBUG(cond_adv, COND_ADV)) {
                                if (peer->advmap_table_change)
                                        zlog_debug(
                                                "%s: %s - routes changed in BGP table.",
@@ -269,10 +266,9 @@ static void bgp_conditional_adv_timer(struct thread *t)
                                            .advmap.update_type !=
                                    filter->advmap.update_type)) {
                                /* Handle change to peer advmap */
-                               if (BGP_DEBUG(update, UPDATE_OUT))
-                                       zlog_debug(
-                                               "%s: advmap.update_type changed for peer %s, adjusting update_group.",
-                                               __func__, peer->host);
+                               bgp_cond_adv_debug(
+                                       "%s: advmap.update_type changed for peer %s, adjusting update_group.",
+                                       __func__, peer->host);
 
                                update_group_adjust_peer(paf);
                        }
@@ -283,12 +279,10 @@ static void bgp_conditional_adv_timer(struct thread *t)
                         */
                        if (peer->advmap_config_change[afi][safi]) {
 
-                               if (BGP_DEBUG(update, UPDATE_OUT))
-                                       zlog_debug(
-                                               "%s: Configuration is changed on peer %s for %s, send the normal update first.",
-                                               __func__, peer->host,
-                                               get_afi_safi_str(afi, safi,
-                                                                false));
+                               bgp_cond_adv_debug(
+                                       "%s: Configuration is changed on peer %s for %s, send the normal update first.",
+                                       __func__, peer->host,
+                                       get_afi_safi_str(afi, safi, false));
                                if (paf) {
                                        update_subgroup_split_peer(paf, NULL);
                                        subgrp = paf->subgroup;
@@ -325,8 +319,7 @@ void bgp_conditional_adv_enable(struct peer *peer, afi_t afi, safi_t safi)
         * neighbors (AFI/SAFI). So just increment the counter.
         */
        if (++bgp->condition_filter_count > 1) {
-               if (BGP_DEBUG(update, UPDATE_OUT))
-                       zlog_debug("%s: condition_filter_count %d", __func__,
+               bgp_cond_adv_debug("%s: condition_filter_count %d", __func__,
                                   bgp->condition_filter_count);
 
                return;
@@ -349,8 +342,7 @@ void bgp_conditional_adv_disable(struct peer *peer, afi_t afi, safi_t safi)
         * So there's nothing to do except decrementing the counter.
         */
        if (--bgp->condition_filter_count != 0) {
-               if (BGP_DEBUG(update, UPDATE_OUT))
-                       zlog_debug("%s: condition_filter_count %d", __func__,
+               bgp_cond_adv_debug("%s: condition_filter_count %d", __func__,
                                   bgp->condition_filter_count);
 
                return;
@@ -359,3 +351,164 @@ void bgp_conditional_adv_disable(struct peer *peer, afi_t afi, safi_t safi)
        /* Last filter removed. So cancel conditional routes polling thread. */
        THREAD_OFF(bgp->t_condition_check);
 }
+
+static void peer_advertise_map_filter_update(struct peer *peer, afi_t afi,
+                                            safi_t safi, const char *amap_name,
+                                            struct route_map *amap,
+                                            const char *cmap_name,
+                                            struct route_map *cmap,
+                                            bool condition, bool set)
+{
+       struct bgp_filter *filter;
+       bool filter_exists = false;
+
+       filter = &peer->filter[afi][safi];
+
+       /* advertise-map is already configured. */
+       if (filter->advmap.aname) {
+               filter_exists = true;
+               XFREE(MTYPE_BGP_FILTER_NAME, filter->advmap.aname);
+               XFREE(MTYPE_BGP_FILTER_NAME, filter->advmap.cname);
+       }
+
+       route_map_counter_decrement(filter->advmap.amap);
+
+       /* Removed advertise-map configuration */
+       if (!set) {
+               memset(&filter->advmap, 0, sizeof(filter->advmap));
+
+               /* decrement condition_filter_count delete timer if
+                * this is the last advertise-map to be removed.
+                */
+               if (filter_exists)
+                       bgp_conditional_adv_disable(peer, afi, safi);
+
+               /* Process peer route updates. */
+               peer_on_policy_change(peer, afi, safi, 1);
+
+               return;
+       }
+
+       /* Update filter data with newly configured values. */
+       filter->advmap.aname = XSTRDUP(MTYPE_BGP_FILTER_NAME, amap_name);
+       filter->advmap.cname = XSTRDUP(MTYPE_BGP_FILTER_NAME, cmap_name);
+       filter->advmap.amap = amap;
+       filter->advmap.cmap = cmap;
+       filter->advmap.condition = condition;
+       route_map_counter_increment(filter->advmap.amap);
+       peer->advmap_config_change[afi][safi] = true;
+
+       /* Increment condition_filter_count and/or create timer. */
+       if (!filter_exists) {
+               filter->advmap.update_type = UPDATE_TYPE_ADVERTISE;
+               bgp_conditional_adv_enable(peer, afi, safi);
+       }
+
+       /* Process peer route updates. */
+       peer_on_policy_change(peer, afi, safi, 1);
+}
+
+/* Set advertise-map to the peer. */
+int peer_advertise_map_set(struct peer *peer, afi_t afi, safi_t safi,
+                          const char *advertise_name,
+                          struct route_map *advertise_map,
+                          const char *condition_name,
+                          struct route_map *condition_map, bool condition)
+{
+       struct peer *member;
+       struct listnode *node, *nnode;
+
+       /* Set configuration on peer. */
+       peer_advertise_map_filter_update(peer, afi, safi, advertise_name,
+                                        advertise_map, condition_name,
+                                        condition_map, condition, true);
+
+       /* Check if handling a regular peer & Skip peer-group mechanics. */
+       if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+               /* Set override-flag and process peer route updates. */
+               SET_FLAG(peer->filter_override[afi][safi][RMAP_OUT],
+                        PEER_FT_ADVERTISE_MAP);
+               return 0;
+       }
+
+       /*
+        * Set configuration on all peer-group members, unless they are
+        * explicitly overriding peer-group configuration.
+        */
+       for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+               /* Skip peers with overridden configuration. */
+               if (CHECK_FLAG(member->filter_override[afi][safi][RMAP_OUT],
+                              PEER_FT_ADVERTISE_MAP))
+                       continue;
+
+               /* Set configuration on peer-group member. */
+               peer_advertise_map_filter_update(
+                       member, afi, safi, advertise_name, advertise_map,
+                       condition_name, condition_map, condition, true);
+       }
+
+       return 0;
+}
+
+/* Unset advertise-map from the peer. */
+int peer_advertise_map_unset(struct peer *peer, afi_t afi, safi_t safi,
+                            const char *advertise_name,
+                            struct route_map *advertise_map,
+                            const char *condition_name,
+                            struct route_map *condition_map, bool condition)
+{
+       struct peer *member;
+       struct listnode *node, *nnode;
+
+       /* advertise-map is not configured */
+       if (!peer->filter[afi][safi].advmap.aname)
+               return 0;
+
+       /* Unset override-flag unconditionally. */
+       UNSET_FLAG(peer->filter_override[afi][safi][RMAP_OUT],
+                  PEER_FT_ADVERTISE_MAP);
+
+       /* Inherit configuration from peer-group if peer is member. */
+       if (peer_group_active(peer)) {
+               PEER_STR_ATTR_INHERIT(peer, peer->group,
+                                     filter[afi][safi].advmap.aname,
+                                     MTYPE_BGP_FILTER_NAME);
+               PEER_ATTR_INHERIT(peer, peer->group,
+                                 filter[afi][safi].advmap.amap);
+       } else
+               peer_advertise_map_filter_update(
+                       peer, afi, safi, advertise_name, advertise_map,
+                       condition_name, condition_map, condition, false);
+
+       /* Check if handling a regular peer and skip peer-group mechanics. */
+       if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+               /* Process peer route updates. */
+               bgp_cond_adv_debug("%s: Send normal update to %s for %s",
+                                  __func__, peer->host,
+                                  get_afi_safi_str(afi, safi, false));
+
+               return 0;
+       }
+
+       /*
+        * Remove configuration on all peer-group members, unless they are
+        * explicitly overriding peer-group configuration.
+        */
+       for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+               /* Skip peers with overridden configuration. */
+               if (CHECK_FLAG(member->filter_override[afi][safi][RMAP_OUT],
+                              PEER_FT_ADVERTISE_MAP))
+                       continue;
+               /* Remove configuration on peer-group member. */
+               peer_advertise_map_filter_update(
+                       member, afi, safi, advertise_name, advertise_map,
+                       condition_name, condition_map, condition, false);
+
+               /* Process peer route updates. */
+               bgp_cond_adv_debug("%s: Send normal update to %s for %s ",
+                                  __func__, member->host,
+                                  get_afi_safi_str(afi, safi, false));
+       }
+
+       return 0;
+}
index 371ae856c2cd02962c45bbaa66acee2aa0bfcd54..a4f1403a441ac3428c2d5fe15b2d5200206ec3bd 100644 (file)
 extern "C" {
 #endif
 
+/* Macro to log debug message */
+#define bgp_cond_adv_debug(...)                                                \
+       do {                                                                   \
+               if (BGP_DEBUG(cond_adv, COND_ADV))                             \
+                       zlog_debug("" __VA_ARGS__);                            \
+       } while (0)
+
 /* Polling time for monitoring condition-map routes in route table */
 #define DEFAULT_CONDITIONAL_ROUTES_POLL_TIME 60
 
@@ -40,6 +47,18 @@ extern void bgp_conditional_adv_enable(struct peer *peer, afi_t afi,
                                       safi_t safi);
 extern void bgp_conditional_adv_disable(struct peer *peer, afi_t afi,
                                        safi_t safi);
+extern int peer_advertise_map_set(struct peer *peer, afi_t afi, safi_t safi,
+                                 const char *advertise_name,
+                                 struct route_map *advertise_map,
+                                 const char *condition_name,
+                                 struct route_map *condition_map,
+                                 bool condition);
+extern int peer_advertise_map_unset(struct peer *peer, afi_t afi, safi_t safi,
+                                   const char *advertise_name,
+                                   struct route_map *advertise_map,
+                                   const char *condition_name,
+                                   struct route_map *condition_map,
+                                   bool condition);
 #ifdef __cplusplus
 }
 #endif
index 864cdb51a083ae9b649534c0d2451b92f57797c2..df7262be6412e590747bf6daae4ad8bd208f167e 100644 (file)
@@ -70,6 +70,7 @@ unsigned long conf_bgp_debug_pbr;
 unsigned long conf_bgp_debug_graceful_restart;
 unsigned long conf_bgp_debug_evpn_mh;
 unsigned long conf_bgp_debug_bfd;
+unsigned long conf_bgp_debug_cond_adv;
 
 unsigned long term_bgp_debug_as4;
 unsigned long term_bgp_debug_neighbor_events;
@@ -90,6 +91,7 @@ unsigned long term_bgp_debug_pbr;
 unsigned long term_bgp_debug_graceful_restart;
 unsigned long term_bgp_debug_evpn_mh;
 unsigned long term_bgp_debug_bfd;
+unsigned long term_bgp_debug_cond_adv;
 
 struct list *bgp_debug_neighbor_events_peers = NULL;
 struct list *bgp_debug_keepalive_peers = NULL;
@@ -2108,6 +2110,33 @@ DEFPY(debug_bgp_bfd, debug_bgp_bfd_cmd,
        return CMD_SUCCESS;
 }
 
+DEFPY (debug_bgp_cond_adv,
+       debug_bgp_cond_adv_cmd,
+       "[no$no] debug bgp conditional-advertisement",
+       NO_STR
+       DEBUG_STR
+       BGP_STR
+       "BGP conditional advertisement\n")
+{
+       if (vty->node == CONFIG_NODE) {
+               if (no)
+                       DEBUG_OFF(cond_adv, COND_ADV);
+               else
+                       DEBUG_ON(cond_adv, COND_ADV);
+       } else {
+               if (no) {
+                       TERM_DEBUG_OFF(cond_adv, COND_ADV);
+                       vty_out(vty,
+                               "BGP conditional advertisement debugging is off\n");
+               } else {
+                       TERM_DEBUG_ON(cond_adv, COND_ADV);
+                       vty_out(vty,
+                               "BGP conditional advertisement debugging is on\n");
+               }
+       }
+       return CMD_SUCCESS;
+}
+
 DEFUN (no_debug_bgp,
        no_debug_bgp_cmd,
        "no debug bgp",
@@ -2152,6 +2181,7 @@ DEFUN (no_debug_bgp,
        TERM_DEBUG_OFF(evpn_mh, EVPN_MH_ES);
        TERM_DEBUG_OFF(evpn_mh, EVPN_MH_RT);
        TERM_DEBUG_OFF(bfd, BFD_LIB);
+       TERM_DEBUG_OFF(cond_adv, COND_ADV);
 
        vty_out(vty, "All possible debugging has been turned off\n");
 
@@ -2244,6 +2274,10 @@ DEFUN_NOSH (show_debugging_bgp,
        if (BGP_DEBUG(bfd, BFD_LIB))
                vty_out(vty, "  BGP BFD library debugging is on\n");
 
+       if (BGP_DEBUG(cond_adv, COND_ADV))
+               vty_out(vty,
+                       "  BGP conditional advertisement debugging is on\n");
+
        return CMD_SUCCESS;
 }
 
@@ -2373,6 +2407,11 @@ static int bgp_config_write_debug(struct vty *vty)
                write++;
        }
 
+       if (CONF_BGP_DEBUG(cond_adv, COND_ADV)) {
+               vty_out(vty, "debug bgp conditional-advertisement\n");
+               write++;
+       }
+
        return write;
 }
 
@@ -2501,6 +2540,10 @@ void bgp_debug_init(void)
        /* debug bgp bfd */
        install_element(ENABLE_NODE, &debug_bgp_bfd_cmd);
        install_element(CONFIG_NODE, &debug_bgp_bfd_cmd);
+
+       /* debug bgp conditional advertisement */
+       install_element(ENABLE_NODE, &debug_bgp_cond_adv_cmd);
+       install_element(CONFIG_NODE, &debug_bgp_cond_adv_cmd);
 }
 
 /* Return true if this prefix is on the per_prefix_list of prefixes to debug
index 62f5340dfd53b2f78831744c222c9c733a66538c..be5ed0afdc8d0f8178c13ce33205cebabdcdfa04 100644 (file)
@@ -80,6 +80,7 @@ extern unsigned long conf_bgp_debug_pbr;
 extern unsigned long conf_bgp_debug_graceful_restart;
 extern unsigned long conf_bgp_debug_evpn_mh;
 extern unsigned long conf_bgp_debug_bfd;
+extern unsigned long conf_bgp_debug_cond_adv;
 
 extern unsigned long term_bgp_debug_as4;
 extern unsigned long term_bgp_debug_neighbor_events;
@@ -98,6 +99,7 @@ extern unsigned long term_bgp_debug_pbr;
 extern unsigned long term_bgp_debug_graceful_restart;
 extern unsigned long term_bgp_debug_evpn_mh;
 extern unsigned long term_bgp_debug_bfd;
+extern unsigned long term_bgp_debug_cond_adv;
 
 extern struct list *bgp_debug_neighbor_events_peers;
 extern struct list *bgp_debug_keepalive_peers;
@@ -143,6 +145,7 @@ struct bgp_debug_filter {
 #define BGP_DEBUG_GRACEFUL_RESTART     0x01
 
 #define BGP_DEBUG_BFD_LIB             0x01
+#define BGP_DEBUG_COND_ADV 0x01
 
 #define CONF_DEBUG_ON(a, b)    (conf_bgp_debug_ ## a |= (BGP_DEBUG_ ## b))
 #define CONF_DEBUG_OFF(a, b)   (conf_bgp_debug_ ## a &= ~(BGP_DEBUG_ ## b))
index 7867860cbfbd01760bb63ea78e0a2de9a765265b..075350cd2b8e8571faad871a42c7d0f9c5f4d9d1 100644 (file)
@@ -865,7 +865,6 @@ static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp,
        }
        tbuf = time(NULL) - (monotime(NULL) - bnc->last_update);
        vty_out(vty, "  Last update: %s", ctime(&tbuf));
-       vty_out(vty, "\n");
 
        /* show paths dependent on nexthop, if needed. */
        if (specific)
@@ -912,6 +911,7 @@ static int show_ip_bgp_nexthop_table(struct vty *vty, const char *name,
                struct prefix nhop;
                struct bgp_nexthop_cache_head (*tree)[AFI_MAX];
                struct bgp_nexthop_cache *bnc;
+               bool found = false;
 
                if (!str2prefix(nhopip_str, &nhop)) {
                        vty_out(vty, "nexthop address is malformed\n");
@@ -919,12 +919,16 @@ static int show_ip_bgp_nexthop_table(struct vty *vty, const char *name,
                }
                tree = import_table ? &bgp->import_check_table
                                    : &bgp->nexthop_cache_table;
-               bnc = bnc_find(tree[family2afi(nhop.family)], &nhop, 0, 0);
-               if (!bnc) {
-                       vty_out(vty, "specified nexthop does not have entry\n");
-                       return CMD_SUCCESS;
+               frr_each (bgp_nexthop_cache, &(*tree)[family2afi(nhop.family)],
+                         bnc) {
+                       if (prefix_cmp(&bnc->prefix, &nhop))
+                               continue;
+                       bgp_show_nexthop(vty, bgp, bnc, true);
+                       found = true;
                }
-               bgp_show_nexthop(vty, bgp, bnc, true);
+               if (!found)
+                       vty_out(vty, "nexthop %s does not have entry\n",
+                               nhopip_str);
        } else
                bgp_show_nexthops(vty, bgp, import_table);
 
index 7248f034a5a02c27a84e21f604f873fa9df2ef19..d1667fac2612f1a3b8e634d2789284cdc1013773 100644 (file)
@@ -1185,15 +1185,30 @@ as_t peek_for_as4_capability(struct peer *peer, uint16_t length)
                uint8_t opt_type;
                uint16_t opt_length;
 
-               /* Check the length. */
-               if (stream_get_getp(s) + 2 > end)
+               /* Ensure we can read the option type */
+               if (stream_get_getp(s) + 1 > end)
                        goto end;
 
-               /* Fetch option type and length. */
+               /* Fetch the option type */
                opt_type = stream_getc(s);
-               opt_length = BGP_OPEN_EXT_OPT_PARAMS_CAPABLE(peer)
-                                    ? stream_getw(s)
-                                    : stream_getc(s);
+
+               /*
+                * Check the length and fetch the opt_length
+                * If the peer is BGP_OPEN_EXT_OPT_PARAMS_CAPABLE(peer)
+                * then we do a getw which is 2 bytes.  So we need to
+                * ensure that we can read that as well
+                */
+               if (BGP_OPEN_EXT_OPT_PARAMS_CAPABLE(peer)) {
+                       if (stream_get_getp(s) + 2 > end)
+                               goto end;
+
+                       opt_length = stream_getw(s);
+               } else {
+                       if (stream_get_getp(s) + 1 > end)
+                               goto end;
+
+                       opt_length = stream_getc(s);
+               }
 
                /* Option length check. */
                if (stream_get_getp(s) + opt_length > end)
@@ -1263,19 +1278,40 @@ int bgp_open_option_parse(struct peer *peer, uint16_t length,
                uint8_t opt_type;
                uint16_t opt_length;
 
-               /* Must have at least an OPEN option header */
-               if (STREAM_READABLE(s) < 2) {
+               /*
+                * Check that we can read the opt_type and fetch it
+                */
+               if (STREAM_READABLE(s) < 1) {
                        zlog_info("%s Option length error", peer->host);
                        bgp_notify_send(peer, BGP_NOTIFY_OPEN_ERR,
                                        BGP_NOTIFY_OPEN_MALFORMED_ATTR);
                        return -1;
                }
-
-               /* Fetch option type and length. */
                opt_type = stream_getc(s);
-               opt_length = BGP_OPEN_EXT_OPT_PARAMS_CAPABLE(peer)
-                                    ? stream_getw(s)
-                                    : stream_getc(s);
+
+               /*
+                * Check the length of the stream to ensure that
+                * FRR can properly read the opt_length. Then read it
+                */
+               if (BGP_OPEN_EXT_OPT_PARAMS_CAPABLE(peer)) {
+                       if (STREAM_READABLE(s) < 2) {
+                               zlog_info("%s Option length error", peer->host);
+                               bgp_notify_send(peer, BGP_NOTIFY_OPEN_ERR,
+                                               BGP_NOTIFY_OPEN_MALFORMED_ATTR);
+                               return -1;
+                       }
+
+                       opt_length = stream_getw(s);
+               } else {
+                       if (STREAM_READABLE(s) < 1) {
+                               zlog_info("%s Option length error", peer->host);
+                               bgp_notify_send(peer, BGP_NOTIFY_OPEN_ERR,
+                                               BGP_NOTIFY_OPEN_MALFORMED_ATTR);
+                               return -1;
+                       }
+
+                       opt_length = stream_getc(s);
+               }
 
                /* Option length check. */
                if (STREAM_READABLE(s) < opt_length) {
index a8e016a89081602c4737e11e9bff083137ae3de2..7d4110029ec22e52800a743e96c0a1910512b1a6 100644 (file)
@@ -10287,11 +10287,17 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
 
        /* Display the IGP cost or 'inaccessible' */
        if (!CHECK_FLAG(path->flags, BGP_PATH_VALID)) {
-               if (json_paths)
+               bool import = CHECK_FLAG(bgp->flags, BGP_FLAG_IMPORT_CHECK);
+
+               if (json_paths) {
                        json_object_boolean_false_add(json_nexthop_global,
                                                      "accessible");
-               else
-                       vty_out(vty, " (inaccessible)");
+                       json_object_boolean_add(json_nexthop_global,
+                                               "importCheckEnabled", import);
+               } else {
+                       vty_out(vty, " (inaccessible%s)",
+                               import ? ", import-check enabled" : "");
+               }
        } else {
                if (path->extra && path->extra->igpmetric) {
                        if (json_paths)
index 82c9d9667cbe25aebea1767e25009a1a9a12c4fd..5b4a56233df87503488697afec441086727799d2 100644 (file)
@@ -873,9 +873,6 @@ int bgp_vty_return(struct vty *vty, enum bgp_create_error_code ret)
        case BGP_ERR_REMOVE_PRIVATE_AS:
                str = "remove-private-AS cannot be configured for IBGP peers";
                break;
-       case BGP_ERR_LOCAL_AS_ALLOWED_ONLY_FOR_EBGP:
-               str = "Local-AS allowed only for EBGP peers";
-               break;
        case BGP_ERR_CANNOT_HAVE_LOCAL_AS_SAME_AS:
                str = "Cannot have local-as same as BGP AS number";
                break;
@@ -936,9 +933,6 @@ int bgp_vty_return(struct vty *vty, enum bgp_create_error_code ret)
        case BGP_ERR_AF_UNCONFIGURED:
                str = "AFI/SAFI specified is not currently configured.";
                break;
-       case BGP_ERR_CANNOT_HAVE_LOCAL_AS_SAME_AS_REMOTE_AS:
-               str = "AS specified for local as is the same as the remote as and this is not allowed.";
-               break;
        case BGP_ERR_INVALID_AS:
                str = "Confederation AS specified is the same AS as our AS.";
                break;
@@ -14735,7 +14729,7 @@ static int bgp_show_neighbor_vty(struct vty *vty, const char *name,
 
 
 /* "show [ip] bgp neighbors graceful-restart" commands.  */
-DEFUN (show_ip_bgp_neighbors_gracrful_restart,
+DEFUN (show_ip_bgp_neighbors_graceful_restart,
        show_ip_bgp_neighbors_graceful_restart_cmd,
        "show bgp [<ipv4|ipv6>] neighbors [<A.B.C.D|X:X::X:X|WORD>] graceful-restart [json]",
        SHOW_STR
index 7dfb5046dd2f1dba3d9c2b297e25c9fbb3348dc3..57a859c61da9a803026448e894a772655d6adc1c 100644 (file)
@@ -307,6 +307,11 @@ static int bgp_interface_address_add(ZAPI_CALLBACK_ARGS)
 {
        struct connected *ifc;
        struct bgp *bgp;
+       struct peer *peer;
+       struct prefix *addr;
+       struct listnode *node, *nnode;
+       afi_t afi;
+       safi_t safi;
 
        bgp = bgp_lookup_by_vrf_id(vrf_id);
 
@@ -332,6 +337,48 @@ static int bgp_interface_address_add(ZAPI_CALLBACK_ARGS)
                if (IN6_IS_ADDR_LINKLOCAL(&ifc->address->u.prefix6)
                    && !list_isempty(ifc->ifp->nbr_connected))
                        bgp_start_interface_nbrs(bgp, ifc->ifp);
+               else {
+                       addr = ifc->address;
+
+                       for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
+                               if (addr->family == AF_INET)
+                                       continue;
+
+                               /*
+                                * If the Peer's interface name matches the
+                                * interface name for which BGP received the
+                                * update and if the received interface address
+                                * is a globalV6 and if the peer is currently
+                                * using a v4-mapped-v6 addr or a link local
+                                * address, then copy the Rxed global v6 addr
+                                * into peer's v6_global and send updates out
+                                * with new nexthop addr.
+                                */
+                               if ((peer->conf_if &&
+                                    (strcmp(peer->conf_if, ifc->ifp->name) ==
+                                     0)) &&
+                                   !IN6_IS_ADDR_LINKLOCAL(&addr->u.prefix6) &&
+                                   ((IS_MAPPED_IPV6(
+                                            &peer->nexthop.v6_global)) ||
+                                    IN6_IS_ADDR_LINKLOCAL(
+                                            &peer->nexthop.v6_global))) {
+
+                                       if (bgp_debug_zebra(ifc->address)) {
+                                               zlog_debug(
+                                                       "Update peer %pBP's current intf addr %pI6 and send updates",
+                                                       peer,
+                                                       &peer->nexthop
+                                                                .v6_global);
+                                       }
+                                       memcpy(&peer->nexthop.v6_global,
+                                              &addr->u.prefix6,
+                                              IPV6_MAX_BYTELEN);
+                                       FOREACH_AFI_SAFI (afi, safi)
+                                               bgp_announce_route(peer, afi,
+                                                                  safi, true);
+                               }
+                       }
+               }
        }
 
        return 0;
index 7840f5e0b88912bcb3a2440a128a1a48f54e9690..5a9ec1bbdc7551c2b9464ff526d86aff234c62b9 100644 (file)
@@ -997,9 +997,15 @@ void peer_af_flag_inherit(struct peer *peer, afi_t afi, safi_t safi,
 static inline enum bgp_peer_sort peer_calc_sort(struct peer *peer)
 {
        struct bgp *bgp;
+       as_t local_as;
 
        bgp = peer->bgp;
 
+       if (peer->change_local_as)
+               local_as = peer->change_local_as;
+       else
+               local_as = peer->local_as;
+
        /* Peer-group */
        if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
                if (peer->as_type == AS_INTERNAL)
@@ -1010,8 +1016,8 @@ static inline enum bgp_peer_sort peer_calc_sort(struct peer *peer)
 
                else if (peer->as_type == AS_SPECIFIED && peer->as) {
                        assert(bgp);
-                       return (bgp->as == peer->as ? BGP_PEER_IBGP
-                                                   : BGP_PEER_EBGP);
+                       return (local_as == peer->as ? BGP_PEER_IBGP
+                                                    : BGP_PEER_EBGP);
                }
 
                else {
@@ -1028,17 +1034,17 @@ static inline enum bgp_peer_sort peer_calc_sort(struct peer *peer)
 
        /* Normal peer */
        if (bgp && CHECK_FLAG(bgp->config, BGP_CONFIG_CONFEDERATION)) {
-               if (peer->local_as == 0)
+               if (local_as == 0)
                        return BGP_PEER_INTERNAL;
 
-               if (peer->local_as == peer->as) {
+               if (local_as == peer->as) {
                        if (bgp->as == bgp->confed_id) {
-                               if (peer->local_as == bgp->as)
+                               if (local_as == bgp->as)
                                        return BGP_PEER_IBGP;
                                else
                                        return BGP_PEER_EBGP;
                        } else {
-                               if (peer->local_as == bgp->confed_id)
+                               if (local_as == bgp->confed_id)
                                        return BGP_PEER_EBGP;
                                else
                                        return BGP_PEER_IBGP;
@@ -1056,8 +1062,7 @@ static inline enum bgp_peer_sort peer_calc_sort(struct peer *peer)
                            && (peer->group->conf->as_type != AS_UNSPECIFIED)) {
                                if (peer->group->conf->as_type
                                    == AS_SPECIFIED) {
-                                       if (peer->local_as
-                                           == peer->group->conf->as)
+                                       if (local_as == peer->group->conf->as)
                                                return BGP_PEER_IBGP;
                                        else
                                                return BGP_PEER_EBGP;
@@ -1073,9 +1078,8 @@ static inline enum bgp_peer_sort peer_calc_sort(struct peer *peer)
                        return (peer->as_type == AS_INTERNAL ? BGP_PEER_IBGP
                                                             : BGP_PEER_EBGP);
 
-               return (peer->local_as == 0
-                               ? BGP_PEER_INTERNAL
-                               : peer->local_as == peer->as ? BGP_PEER_IBGP
+               return (local_as == 0 ? BGP_PEER_INTERNAL
+                                     : local_as == peer->as ? BGP_PEER_IBGP
                                                             : BGP_PEER_EBGP);
        }
 }
@@ -1885,14 +1889,6 @@ void peer_as_change(struct peer *peer, as_t as, int as_specified)
                UNSET_FLAG(peer->af_flags[AFI_L2VPN][SAFI_EVPN],
                           PEER_FLAG_REFLECTOR_CLIENT);
        }
-
-       /* local-as reset */
-       if (newtype != BGP_PEER_EBGP) {
-               peer->change_local_as = 0;
-               peer_flag_unset(peer, PEER_FLAG_LOCAL_AS);
-               peer_flag_unset(peer, PEER_FLAG_LOCAL_AS_NO_PREPEND);
-               peer_flag_unset(peer, PEER_FLAG_LOCAL_AS_REPLACE_AS);
-       }
 }
 
 /* If peer does not exist, create new one.  If peer already exists,
@@ -3033,17 +3029,6 @@ int peer_group_bind(struct bgp *bgp, union sockunion *su, struct peer *peer,
                        /* ebgp-multihop reset */
                        if (gtype == BGP_PEER_IBGP)
                                group->conf->ttl = MAXTTL;
-
-                       /* local-as reset */
-                       if (gtype != BGP_PEER_EBGP) {
-                               group->conf->change_local_as = 0;
-                               peer_flag_unset(group->conf,
-                                               PEER_FLAG_LOCAL_AS);
-                               peer_flag_unset(group->conf,
-                                               PEER_FLAG_LOCAL_AS_NO_PREPEND);
-                               peer_flag_unset(group->conf,
-                                               PEER_FLAG_LOCAL_AS_REPLACE_AS);
-                       }
                }
 
                SET_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE);
@@ -5488,8 +5473,8 @@ void peer_tcp_mss_unset(struct peer *peer)
  * being used by a peer has changed (AF specific). Automatically
  * initiates inbound or outbound processing as needed.
  */
-static void peer_on_policy_change(struct peer *peer, afi_t afi, safi_t safi,
-                                 int outbound)
+void peer_on_policy_change(struct peer *peer, afi_t afi, safi_t safi,
+                          int outbound)
 {
        if (outbound) {
                update_group_adjust_peer(peer_af_find(peer, afi, safi));
@@ -6121,17 +6106,10 @@ int peer_local_as_set(struct peer *peer, as_t as, bool no_prepend,
        struct bgp *bgp = peer->bgp;
        struct peer *member;
        struct listnode *node, *nnode;
-       enum bgp_peer_sort ptype = peer_sort(peer);
-
-       if (ptype != BGP_PEER_EBGP && ptype != BGP_PEER_INTERNAL)
-               return BGP_ERR_LOCAL_AS_ALLOWED_ONLY_FOR_EBGP;
 
        if (bgp->as == as)
                return BGP_ERR_CANNOT_HAVE_LOCAL_AS_SAME_AS;
 
-       if (peer->as == as)
-               return BGP_ERR_CANNOT_HAVE_LOCAL_AS_SAME_AS_REMOTE_AS;
-
        /* Save previous flag states. */
        old_no_prepend =
                !!CHECK_FLAG(peer->flags, PEER_FLAG_LOCAL_AS_NO_PREPEND);
@@ -6147,6 +6125,7 @@ int peer_local_as_set(struct peer *peer, as_t as, bool no_prepend,
            && old_replace_as == replace_as)
                return 0;
        peer->change_local_as = as;
+       (void)peer_sort(peer);
 
        /* Check if handling a regular peer. */
        if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
@@ -7231,169 +7210,6 @@ int peer_unsuppress_map_unset(struct peer *peer, afi_t afi, safi_t safi)
        return 0;
 }
 
-static void peer_advertise_map_filter_update(struct peer *peer, afi_t afi,
-                                            safi_t safi, const char *amap_name,
-                                            struct route_map *amap,
-                                            const char *cmap_name,
-                                            struct route_map *cmap,
-                                            bool condition, bool set)
-{
-       struct bgp_filter *filter;
-       bool filter_exists = false;
-
-       filter = &peer->filter[afi][safi];
-
-       /* advertise-map is already configured. */
-       if (filter->advmap.aname) {
-               filter_exists = true;
-               XFREE(MTYPE_BGP_FILTER_NAME, filter->advmap.aname);
-               XFREE(MTYPE_BGP_FILTER_NAME, filter->advmap.cname);
-       }
-
-       route_map_counter_decrement(filter->advmap.amap);
-
-       /* Removed advertise-map configuration */
-       if (!set) {
-               memset(&filter->advmap, 0, sizeof(filter->advmap));
-
-               /* decrement condition_filter_count delete timer if
-                * this is the last advertise-map to be removed.
-                */
-               if (filter_exists)
-                       bgp_conditional_adv_disable(peer, afi, safi);
-
-               /* Process peer route updates. */
-               peer_on_policy_change(peer, afi, safi, 1);
-
-               return;
-       }
-
-       /* Update filter data with newly configured values. */
-       filter->advmap.aname = XSTRDUP(MTYPE_BGP_FILTER_NAME, amap_name);
-       filter->advmap.cname = XSTRDUP(MTYPE_BGP_FILTER_NAME, cmap_name);
-       filter->advmap.amap = amap;
-       filter->advmap.cmap = cmap;
-       filter->advmap.condition = condition;
-       route_map_counter_increment(filter->advmap.amap);
-       peer->advmap_config_change[afi][safi] = true;
-
-       /* Increment condition_filter_count and/or create timer. */
-       if (!filter_exists) {
-               filter->advmap.update_type = UPDATE_TYPE_ADVERTISE;
-               bgp_conditional_adv_enable(peer, afi, safi);
-       }
-
-       /* Process peer route updates. */
-       peer_on_policy_change(peer, afi, safi, 1);
-}
-
-/* Set advertise-map to the peer. */
-int peer_advertise_map_set(struct peer *peer, afi_t afi, safi_t safi,
-                          const char *advertise_name,
-                          struct route_map *advertise_map,
-                          const char *condition_name,
-                          struct route_map *condition_map, bool condition)
-{
-       struct peer *member;
-       struct listnode *node, *nnode;
-
-       /* Set configuration on peer. */
-       peer_advertise_map_filter_update(peer, afi, safi, advertise_name,
-                                        advertise_map, condition_name,
-                                        condition_map, condition, true);
-
-       /* Check if handling a regular peer & Skip peer-group mechanics. */
-       if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
-               /* Set override-flag and process peer route updates. */
-               SET_FLAG(peer->filter_override[afi][safi][RMAP_OUT],
-                        PEER_FT_ADVERTISE_MAP);
-               return 0;
-       }
-
-       /*
-        * Set configuration on all peer-group members, unless they are
-        * explicitly overriding peer-group configuration.
-        */
-       for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
-               /* Skip peers with overridden configuration. */
-               if (CHECK_FLAG(member->filter_override[afi][safi][RMAP_OUT],
-                              PEER_FT_ADVERTISE_MAP))
-                       continue;
-
-               /* Set configuration on peer-group member. */
-               peer_advertise_map_filter_update(
-                       member, afi, safi, advertise_name, advertise_map,
-                       condition_name, condition_map, condition, true);
-       }
-
-       return 0;
-}
-
-/* Unset advertise-map from the peer. */
-int peer_advertise_map_unset(struct peer *peer, afi_t afi, safi_t safi,
-                            const char *advertise_name,
-                            struct route_map *advertise_map,
-                            const char *condition_name,
-                            struct route_map *condition_map, bool condition)
-{
-       struct peer *member;
-       struct listnode *node, *nnode;
-
-       /* advertise-map is not configured */
-       if (!peer->filter[afi][safi].advmap.aname)
-               return 0;
-
-       /* Unset override-flag unconditionally. */
-       UNSET_FLAG(peer->filter_override[afi][safi][RMAP_OUT],
-                  PEER_FT_ADVERTISE_MAP);
-
-       /* Inherit configuration from peer-group if peer is member. */
-       if (peer_group_active(peer)) {
-               PEER_STR_ATTR_INHERIT(peer, peer->group,
-                                     filter[afi][safi].advmap.aname,
-                                     MTYPE_BGP_FILTER_NAME);
-               PEER_ATTR_INHERIT(peer, peer->group,
-                                 filter[afi][safi].advmap.amap);
-       } else
-               peer_advertise_map_filter_update(
-                       peer, afi, safi, advertise_name, advertise_map,
-                       condition_name, condition_map, condition, false);
-
-       /* Check if handling a regular peer and skip peer-group mechanics. */
-       if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
-               /* Process peer route updates. */
-               if (BGP_DEBUG(update, UPDATE_OUT))
-                       zlog_debug("%s: Send normal update to %s for %s",
-                                  __func__, peer->host,
-                                  get_afi_safi_str(afi, safi, false));
-
-               return 0;
-       }
-
-       /*
-        * Remove configuration on all peer-group members, unless they are
-        * explicitly overriding peer-group configuration.
-        */
-       for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
-               /* Skip peers with overridden configuration. */
-               if (CHECK_FLAG(member->filter_override[afi][safi][RMAP_OUT],
-                              PEER_FT_ADVERTISE_MAP))
-                       continue;
-               /* Remove configuration on peer-group member. */
-               peer_advertise_map_filter_update(
-                       member, afi, safi, advertise_name, advertise_map,
-                       condition_name, condition_map, condition, false);
-
-               /* Process peer route updates. */
-               if (BGP_DEBUG(update, UPDATE_OUT))
-                       zlog_debug("%s: Send normal update to %s for %s ",
-                                  __func__, member->host,
-                                  get_afi_safi_str(afi, safi, false));
-       }
-
-       return 0;
-}
-
 static bool peer_maximum_prefix_clear_overflow(struct peer *peer)
 {
        if (!CHECK_FLAG(peer->sflags, PEER_STATUS_PREFIX_OVERFLOW))
index f6162f33e460e7e58a24a3bc1db4e7ba6f477329..c8bcb5cd555b320995b7deef78c02500e43fbc6c 100644 (file)
@@ -2004,13 +2004,11 @@ enum bgp_create_error_code {
        BGP_ERR_AF_UNCONFIGURED = -15,
        BGP_ERR_SOFT_RECONFIG_UNCONFIGURED = -16,
        BGP_ERR_INSTANCE_MISMATCH = -17,
-       BGP_ERR_LOCAL_AS_ALLOWED_ONLY_FOR_EBGP = -18,
        BGP_ERR_CANNOT_HAVE_LOCAL_AS_SAME_AS = -19,
        BGP_ERR_TCPSIG_FAILED = -20,
        BGP_ERR_NO_EBGP_MULTIHOP_WITH_TTLHACK = -21,
        BGP_ERR_NO_IBGP_WITH_TTLHACK = -22,
        BGP_ERR_NO_INTERFACE_CONFIG = -23,
-       BGP_ERR_CANNOT_HAVE_LOCAL_AS_SAME_AS_REMOTE_AS = -24,
        BGP_ERR_AS_OVERRIDE = -25,
        BGP_ERR_INVALID_DYNAMIC_NEIGHBORS_LIMIT = -26,
        BGP_ERR_DYNAMIC_NEIGHBORS_RANGE_EXISTS = -27,
@@ -2576,7 +2574,8 @@ void peer_tcp_mss_unset(struct peer *peer);
 
 extern void bgp_recalculate_afi_safi_bestpaths(struct bgp *bgp, afi_t afi,
                                               safi_t safi);
-
+extern void peer_on_policy_change(struct peer *peer, afi_t afi, safi_t safi,
+                                 int outbound);
 #ifdef _FRR_ATTRIBUTE_PRINTFRR
 /* clang-format off */
 #pragma FRR printfrr_ext "%pBP" (struct peer *)
index 8c1fab0eabedf8f643edd6b99f75ec54027fbb45..4e1080045e7890e50ac3b300fcdf6ba8b40a2834 100644 (file)
@@ -7,7 +7,7 @@
 ##
 AC_PREREQ([2.69])
 
-AC_INIT([frr], [8.4-dev], [https://github.com/frrouting/frr/issues])
+AC_INIT([frr], [8.5-dev], [https://github.com/frrouting/frr/issues])
 PACKAGE_URL="https://frrouting.org/"
 AC_SUBST([PACKAGE_URL])
 PACKAGE_FULLNAME="FRRouting"
index 9c14270b664d72f2f5dd2b3e868fd2617147e81c..ad43f13e3356a8d000804c0269580e0d5db7968a 100644 (file)
@@ -1,3 +1,9 @@
+frr (8.5~dev-1) UNRELEASED; urgency=medium
+
+  * FRR Dev 8.5
+
+ -- Donatas Abraitis <donatas@opensourcerouting.org>  Tue, 04 Oct 2022 16:00:00 +0500
+
 frr (8.4~dev-1) UNRELEASED; urgency=medium
 
   * FRR Dev 8.4
index 5ce857c50c69923dc34aa910132b9684eb24f33d..f136ea4ae12a870c1678d9598914fafa47297223 100644 (file)
@@ -3497,6 +3497,10 @@ Debugging
    library messages and BGP BFD integration messages that are mostly state
    transitions and validation problems.
 
+.. clicmd:: debug bgp conditional-advertisement
+
+   Enable or disable debugging of BGP conditional advertisement.
+
 .. clicmd:: debug bgp neighbor-events
 
    Enable or disable debugging for neighbor events. This provides general
index 843734e217193e90d3c5bcb7c74124ea41c5e3cb..b242f4fe17395ac30554c7d1eb8caaf67b87af8e 100644 (file)
@@ -416,6 +416,10 @@ configure CLI mode. If you specify debug commands in the configuration cli
 mode, the debug commands can be persistent across restarts of the FRR pim6d if
 the config was written out.
 
+.. clicmd:: debug mld
+
+   This turns on debugging for MLD protocol activity.
+
 .. clicmd:: debug pimv6 events
 
    This turns on debugging for PIMv6 system events. Especially timers.
@@ -456,3 +460,15 @@ the config was written out.
 .. clicmd:: debug mroute6 detail
 
    This turns on detailed debugging for PIMv6 interaction with kernel MFC cache.
+
+.. clicmd:: debug mld events
+
+   This turns on debugging for MLD system events.
+
+.. clicmd:: debug mld packets
+
+   This turns on information about MLD protocol packets handling.
+
+.. clicmd:: debug mld trace [detail]
+
+   This traces mld code and how it is running. 
index a37cda1ce136c9e99b11ca3781fba87cfa24890a..be035b623d6330702bba6bfc2bb3757230f32bd1 100644 (file)
@@ -254,8 +254,10 @@ static void fabricd_initial_sync_timeout(struct thread *thread)
 {
        struct fabricd *f = THREAD_ARG(thread);
 
-       zlog_info("OpenFabric: Initial synchronization on %s timed out!",
-                 f->initial_sync_circuit->interface->name);
+       if (IS_DEBUG_ADJ_PACKETS)
+               zlog_debug(
+                       "OpenFabric: Initial synchronization on %s timed out!",
+                       f->initial_sync_circuit->interface->name);
        f->initial_sync_state = FABRICD_SYNC_PENDING;
        f->initial_sync_circuit = NULL;
 }
@@ -282,9 +284,11 @@ void fabricd_initial_sync_hello(struct isis_circuit *circuit)
                         timeout, &f->initial_sync_timeout);
        f->initial_sync_start = monotime(NULL);
 
-       zlog_info("OpenFabric: Started initial synchronization with %s on %s",
-                 sysid_print(circuit->u.p2p.neighbor->sysid),
-                 circuit->interface->name);
+       if (IS_DEBUG_ADJ_PACKETS)
+               zlog_debug(
+                       "OpenFabric: Started initial synchronization with %s on %s",
+                       sysid_print(circuit->u.p2p.neighbor->sysid),
+                       circuit->interface->name);
 }
 
 bool fabricd_initial_sync_is_in_progress(struct isis_area *area)
index dc84de6bfd6377da01857d8b7dfa751fa83d5d33..49248798bff8e65c6ceea2cbfea30a59447fd7ab 100644 (file)
@@ -1561,6 +1561,91 @@ DEFUN_NOSH (show_debugging_pimv6,
        return CMD_SUCCESS;
 }
 
+DEFPY (debug_mld,
+       debug_mld_cmd,
+       "[no] debug mld",
+       NO_STR
+       DEBUG_STR
+       DEBUG_MLD_STR)
+{
+       if (!no) {
+               PIM_DO_DEBUG_GM_EVENTS;
+               PIM_DO_DEBUG_GM_PACKETS;
+               PIM_DO_DEBUG_GM_TRACE;
+       } else {
+               PIM_DONT_DEBUG_GM_EVENTS;
+               PIM_DONT_DEBUG_GM_PACKETS;
+               PIM_DONT_DEBUG_GM_TRACE;
+       }
+
+       return CMD_SUCCESS;
+}
+
+DEFPY (debug_mld_events,
+       debug_mld_events_cmd,
+       "[no] debug mld events",
+       NO_STR
+       DEBUG_STR
+       DEBUG_MLD_STR
+       DEBUG_MLD_EVENTS_STR)
+{
+       if (!no)
+               PIM_DO_DEBUG_GM_EVENTS;
+       else
+               PIM_DONT_DEBUG_GM_EVENTS;
+
+       return CMD_SUCCESS;
+}
+
+DEFPY (debug_mld_packets,
+       debug_mld_packets_cmd,
+       "[no] debug mld packets",
+       NO_STR
+       DEBUG_STR
+       DEBUG_MLD_STR
+       DEBUG_MLD_PACKETS_STR)
+{
+       if (!no)
+               PIM_DO_DEBUG_GM_PACKETS;
+       else
+               PIM_DONT_DEBUG_GM_PACKETS;
+
+       return CMD_SUCCESS;
+}
+
+DEFPY (debug_mld_trace,
+       debug_mld_trace_cmd,
+       "[no] debug mld trace",
+       NO_STR
+       DEBUG_STR
+       DEBUG_MLD_STR
+       DEBUG_MLD_TRACE_STR)
+{
+       if (!no)
+               PIM_DO_DEBUG_GM_TRACE;
+       else
+               PIM_DONT_DEBUG_GM_TRACE;
+
+       return CMD_SUCCESS;
+}
+
+DEFPY (debug_mld_trace_detail,
+       debug_mld_trace_detail_cmd,
+       "[no] debug mld trace detail",
+       NO_STR
+       DEBUG_STR
+       DEBUG_MLD_STR
+       DEBUG_MLD_TRACE_STR
+       "detailed\n")
+{
+       if (!no)
+               PIM_DO_DEBUG_GM_TRACE_DETAIL;
+       else
+               PIM_DONT_DEBUG_GM_TRACE_DETAIL;
+
+       return CMD_SUCCESS;
+}
+
 void pim_cmd_init(void)
 {
        if_cmd_init(pim_interface_config_write);
@@ -1693,6 +1778,11 @@ void pim_cmd_init(void)
        install_element(ENABLE_NODE, &debug_pimv6_zebra_cmd);
        install_element(ENABLE_NODE, &debug_mroute6_cmd);
        install_element(ENABLE_NODE, &debug_mroute6_detail_cmd);
+       install_element(ENABLE_NODE, &debug_mld_cmd);
+       install_element(ENABLE_NODE, &debug_mld_events_cmd);
+       install_element(ENABLE_NODE, &debug_mld_packets_cmd);
+       install_element(ENABLE_NODE, &debug_mld_trace_cmd);
+       install_element(ENABLE_NODE, &debug_mld_trace_detail_cmd);
 
        install_element(CONFIG_NODE, &debug_pimv6_cmd);
        install_element(CONFIG_NODE, &debug_pimv6_nht_cmd);
@@ -1706,4 +1796,9 @@ void pim_cmd_init(void)
        install_element(CONFIG_NODE, &debug_pimv6_zebra_cmd);
        install_element(CONFIG_NODE, &debug_mroute6_cmd);
        install_element(CONFIG_NODE, &debug_mroute6_detail_cmd);
+       install_element(CONFIG_NODE, &debug_mld_cmd);
+       install_element(CONFIG_NODE, &debug_mld_events_cmd);
+       install_element(CONFIG_NODE, &debug_mld_packets_cmd);
+       install_element(CONFIG_NODE, &debug_mld_trace_cmd);
+       install_element(CONFIG_NODE, &debug_mld_trace_detail_cmd);
 }
index 7852d1788a7758a7c9edc76868361ce6d42c76cf..d15e978855560f147c6c862c58ff012fcb5b1a90 100644 (file)
@@ -37,6 +37,7 @@ typedef struct in_addr pim_addr;
 #define PIM_MAX_BITLEN IPV4_MAX_BITLEN
 #define PIM_AF_NAME     "ip"
 #define PIM_AF_DBG     "pim"
+#define GM_AF_DBG      "igmp"
 #define PIM_MROUTE_DBG  "mroute"
 #define PIMREG          "pimreg"
 #define GM              "IGMP"
@@ -65,6 +66,7 @@ typedef struct in6_addr pim_addr;
 #define PIM_MAX_BITLEN IPV6_MAX_BITLEN
 #define PIM_AF_NAME     "ipv6"
 #define PIM_AF_DBG     "pimv6"
+#define GM_AF_DBG      "mld"
 #define PIM_MROUTE_DBG  "mroute6"
 #define PIMREG          "pim6reg"
 #define GM              "MLD"
index 833103c27fc305e5bda46f3edd1ce7879f01c6f8..03ce8687e5f279e49182291e2a6ee6159bb6d03c 100644 (file)
@@ -389,8 +389,10 @@ int pim_hello_build_tlv(struct interface *ifp, uint8_t *tlv_buf,
        uint8_t *curr = tlv_buf;
        uint8_t *pastend = tlv_buf + tlv_buf_size;
        uint8_t *tmp;
+#if PIM_IPV == 4
        struct pim_interface *pim_ifp = ifp->info;
        struct pim_instance *pim = pim_ifp->pim;
+#endif
 
        /*
         * Append options
@@ -452,19 +454,20 @@ int pim_hello_build_tlv(struct interface *ifp, uint8_t *tlv_buf,
 
        /* Secondary Address List */
        if (ifp->connected->count) {
-               curr = pim_tlv_append_addrlist_ucast(curr, pastend,
-                                                    ifp->connected, AF_INET);
+               curr = pim_tlv_append_addrlist_ucast(curr, pastend, ifp,
+                                                    PIM_AF);
                if (!curr) {
                        if (PIM_DEBUG_PIM_HELLO) {
                                zlog_debug(
-                                       "%s: could not set PIM hello v4 Secondary Address List option for interface %s",
-                                       __func__, ifp->name);
+                                       "%s: could not set PIM hello %s Secondary Address List option for interface %s",
+                                       __func__, PIM_AF_NAME, ifp->name);
                        }
                        return -4;
                }
+#if PIM_IPV == 4
                if (pim->send_v6_secondary) {
-                       curr = pim_tlv_append_addrlist_ucast(
-                               curr, pastend, ifp->connected, AF_INET6);
+                       curr = pim_tlv_append_addrlist_ucast(curr, pastend, ifp,
+                                                            AF_INET6);
                        if (!curr) {
                                if (PIM_DEBUG_PIM_HELLO) {
                                        zlog_debug(
@@ -474,6 +477,7 @@ int pim_hello_build_tlv(struct interface *ifp, uint8_t *tlv_buf,
                                return -4;
                        }
                }
+#endif
        }
 
        return curr - tlv_buf;
index 65975ce14640bca825fc723a93374315ac92c497..c9caf0016ee7aaf77ba82be6a9750b4f3c530039 100644 (file)
@@ -29,6 +29,8 @@
 #include "pim_tlv.h"
 #include "pim_str.h"
 #include "pim_msg.h"
+#include "pim_iface.h"
+#include "pim_addr.h"
 
 #if PIM_IPV == 4
 #define PIM_MSG_ADDRESS_FAMILY PIM_MSG_ADDRESS_FAMILY_IPV4
@@ -226,12 +228,15 @@ int pim_encode_addr_group(uint8_t *buf, afi_t afi, int bidir, int scope,
 }
 
 uint8_t *pim_tlv_append_addrlist_ucast(uint8_t *buf, const uint8_t *buf_pastend,
-                                      struct list *ifconnected, int family)
+                                      struct interface *ifp, int family)
 {
        struct listnode *node;
        uint16_t option_len = 0;
        uint8_t *curr;
        size_t uel;
+       struct list *ifconnected = ifp->connected;
+       struct pim_interface *pim_ifp = ifp->info;
+       pim_addr addr;
 
        node = listhead(ifconnected);
 
@@ -252,7 +257,10 @@ uint8_t *pim_tlv_append_addrlist_ucast(uint8_t *buf, const uint8_t *buf_pastend,
                struct prefix *p = ifc->address;
                int l_encode;
 
-               if (!CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY))
+               addr = pim_addr_from_prefix(p);
+               if (!pim_addr_cmp(pim_ifp->primary_address, addr))
+                       /* don't add the primary address
+                        * into the secondary address list */
                        continue;
 
                if ((curr + uel) > buf_pastend)
index 64b3a0b6ba7206052c5324a506f7f0a7c4fc387d..63bd2c41f69f5e31b6083408141f89e902e859c2 100644 (file)
@@ -82,7 +82,7 @@ uint8_t *pim_tlv_append_2uint16(uint8_t *buf, const uint8_t *buf_pastend,
 uint8_t *pim_tlv_append_uint32(uint8_t *buf, const uint8_t *buf_pastend,
                               uint16_t option_type, uint32_t option_value);
 uint8_t *pim_tlv_append_addrlist_ucast(uint8_t *buf, const uint8_t *buf_pastend,
-                                      struct list *ifconnected, int family);
+                                      struct interface *ifp, int family);
 
 int pim_tlv_parse_holdtime(const char *ifname, pim_addr src_addr,
                           pim_hello_options *hello_options,
index 315fbae95c8007f50c0450844a34bf45b0f366fc..9021f1e12f560c7bc6a343eb3fd80f6f1e60ba5d 100644 (file)
@@ -59,20 +59,20 @@ int pim_debug_config_write(struct vty *vty)
                ++writes;
        }
        if (PIM_DEBUG_GM_EVENTS) {
-               vty_out(vty, "debug igmp events\n");
+               vty_out(vty, "debug " GM_AF_DBG " events\n");
                ++writes;
        }
        if (PIM_DEBUG_GM_PACKETS) {
-               vty_out(vty, "debug igmp packets\n");
+               vty_out(vty, "debug " GM_AF_DBG " packets\n");
                ++writes;
        }
        /* PIM_DEBUG_GM_TRACE catches _DETAIL too */
        if (router->debugs & PIM_MASK_GM_TRACE) {
-               vty_out(vty, "debug igmp trace\n");
+               vty_out(vty, "debug " GM_AF_DBG " trace\n");
                ++writes;
        }
        if (PIM_DEBUG_GM_TRACE_DETAIL) {
-               vty_out(vty, "debug igmp trace detail\n");
+               vty_out(vty, "debug " GM_AF_DBG " trace detail\n");
                ++writes;
        }
 
index 962541405f9f57e12b64cfa4e0a55a413f4b5fb2..8f469d2a07ac3929fa84e5bc8d721036debc22ad 100644 (file)
@@ -795,6 +795,8 @@ sed -i 's/ -M rpki//' %{_sysconfdir}/frr/daemons
 
 * Wed Jul 20 2022 Martin Winter <mwinter@opensourcerouting.org> - %{version}
 
+* Tue Oct 04 2022 Donatas Abraitis <donatas@opensourcerouting.org> - 8.4
+
 * Wed Jul 13 2022 Jafar Al-Gharaibeh <jafar@atcorp.com> - 8.3
 - General:
 -    Add camelcase json keys in addition to pascalcase (Wrong JSON keys will be depracated)
index 4105c3fe63eb6de27e0be757bc04906322301ccc..c0709043538b7fb908ee804b4f3875b6e263bed6 100644 (file)
@@ -26,13 +26,17 @@ import os
 import sys
 import time
 import pytest
+import functools
+import json
 
 # Save the Current Working Directory to find configuration files.
 CWD = os.path.dirname(os.path.realpath(__file__))
 sys.path.append(os.path.join(CWD, "../"))
 sys.path.append(os.path.join(CWD, "../../"))
 
-from lib.topogen import Topogen, get_topogen
+
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
 
 from lib.common_config import (
     write_test_header,
@@ -288,7 +292,6 @@ def test_unnumbered_loopback_ebgp_nbr_p0(request):
         " received on R2 BGP and routing table , "
         "verify using show ip bgp, show ip route for IPv4 routes ."
     )
-
     llip = get_llip("r1", "r2-link0")
     assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, llip)
 
@@ -582,6 +585,195 @@ def test_restart_frr_p2(request):
     write_test_footer(tc_name)
 
 
+def test_configure_gua_on_unnumbered_intf(request):
+    """
+    Configure a global V6 address on an unnumbered interface on R1
+
+    """
+    tc_name = request.node.name
+    write_test_header(tc_name)
+    tgen = get_topogen()
+    # Don't run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+    reset_config_on_routers(tgen)
+
+    step("Configure IPv6 EBGP Unnumbered session between R1 and R2")
+    step("Enable capability extended-nexthop on both the IPv6 BGP peers")
+    step("Activate same IPv6 nbr from IPv4 unicast family")
+    step("Enable cap ext nh on r1 and r2 and activate in ipv4 addr family")
+    step("Verify bgp convergence as ipv6 nbr is enabled on ipv4 addr family.")
+    bgp_convergence = verify_bgp_convergence(tgen, topo)
+    assert bgp_convergence is True, "Testcase {} :Failed \n Error: {}".format(
+        tc_name, bgp_convergence
+    )
+
+    step(" Configure 5 IPv4 static" " routes on R1, Nexthop as different links of R0")
+    for rte in range(0, NO_OF_RTES):
+        # Create Static routes
+        input_dict = {
+            "r1": {
+                "static_routes": [
+                    {
+                        "network": NETWORK["ipv4"][rte],
+                        "no_of_ip": 1,
+                        "next_hop": NEXT_HOP["ipv4"][rte],
+                    }
+                ]
+            }
+        }
+        result = create_static_routes(tgen, input_dict)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step(
+        "Advertise static routes from IPv4 unicast family and IPv6 "
+        "unicast family respectively from R1 using red static cmd "
+        "Advertise loopback from IPv4 unicast family using network command "
+        "from R1"
+    )
+
+    configure_bgp_on_r1 = {
+        "r1": {
+            "bgp": {
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "redistribute": [{"redist_type": "static"}],
+                            "advertise_networks": [
+                                {"network": NETWORK_CMD_IP, "no_of_network": 1}
+                            ],
+                        }
+                    },
+                    "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+                }
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, configure_bgp_on_r1)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    r2 = tgen.gears["r2"]
+
+    def bgp_prefix_received_gua_nh(router):
+        output = json.loads(router.vtysh_cmd("show bgp ipv4 unicast 11.0.20.1/32 json"))
+        expected = {
+            "prefix": "11.0.20.1/32",
+            "paths": [
+                {
+                    "nexthops": [
+                        {
+                            "ip": "5001:dead:beef::1",
+                            "hostname": "r1",
+                            "afi": "ipv6",
+                            "scope": "global",
+                        }
+                    ]
+                }
+            ],
+        }
+        return topotest.json_cmp(output, expected)
+
+    def bgp_prefix_received_v4_mapped_v6_nh(router):
+        output = json.loads(router.vtysh_cmd("show bgp ipv4 unicast 11.0.20.1/32 json"))
+        expected = {
+            "prefix": "11.0.20.1/32",
+            "paths": [
+                {
+                    "nexthops": [
+                        {
+                            "ip": "::ffff:a00:501",
+                            "hostname": "r1",
+                            "afi": "ipv6",
+                            "scope": "global",
+                        }
+                    ]
+                }
+            ],
+        }
+        return topotest.json_cmp(output, expected)
+
+    step("Configure a global V6 address on an unnumbered interface on R1")
+    output = tgen.gears["r1"].vtysh_cmd(
+        """
+        configure terminal
+        interface r1-r2-eth5
+        ipv6 address 5001:dead:beef::1/126
+        !
+        """
+    )
+
+    # verify that r2 has received prefix with GUA as nexthop
+    test_func = functools.partial(bgp_prefix_received_gua_nh, r2)
+    _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+    assert (
+        result is None
+    ), "Testcase {} : Failed \n Error: Nexthop for prefix 11.0.20.1 \
+    is not 5001:dead:beef::1".format(
+        tc_name
+    )
+
+    step("Configure a secondary global V6 address on an unnumbered interface on R1")
+    output = tgen.gears["r1"].vtysh_cmd(
+        """
+        configure terminal
+        interface r1-r2-eth5
+        ipv6 address 7771:dead:beef::1/126
+        !
+        """
+    )
+    # verify that r1 did not readvertise the prefix with secondary V6 address as the nexthop
+    test_func = functools.partial(bgp_prefix_received_gua_nh, r2)
+    _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+    assert (
+        result is None
+    ), "Testcase {} : Failed \n Error: Nexthop for prefix 11.0.20.1 \
+    is not 5001:dead:beef::1".format(
+        tc_name
+    )
+
+    step("Unconfigure the secondary global V6 address from unnumbered interface on R1")
+    output = tgen.gears["r1"].vtysh_cmd(
+        """
+        configure terminal
+        interface r1-r2-eth5
+        no ipv6 address 7771:dead:beef::1/126
+        !
+        """
+    )
+    # verify that r1 still has the prefix with primary GUA as the nexthop
+    test_func = functools.partial(bgp_prefix_received_gua_nh, r2)
+    _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+    assert (
+        result is None
+    ), "Testcase {} : Failed \n Error: Nexthop for prefix 11.0.20.1 \
+    is not 5001:dead:beef::1".format(
+        tc_name
+    )
+
+    step("Unconfigure the primary global V6 address from unnumbered interface on R1")
+    output = tgen.gears["r1"].vtysh_cmd(
+        """
+        configure terminal
+        interface r1-r2-eth5
+        no ipv6 address 5001:dead:beef::1/126
+        !
+        """
+    )
+    # verify that r1 has rcvd the prefix with v4-mapped-v6 address as the nexthop
+    test_func = functools.partial(bgp_prefix_received_v4_mapped_v6_nh, r2)
+    _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+    assert (
+        result is None
+    ), "Testcase {} : Failed \n Error: Nexthop for prefix 11.0.20.1 \
+    is not ::ffff:a00:501".format(
+        tc_name
+    )
+
+    write_test_footer(tc_name)
+
+
 if __name__ == "__main__":
     args = ["-s"] + sys.argv[1:]
     sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_local_as/__init__.py b/tests/topotests/bgp_local_as/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tests/topotests/bgp_local_as/r1/bgpd.conf b/tests/topotests/bgp_local_as/r1/bgpd.conf
new file mode 100644 (file)
index 0000000..fa147d8
--- /dev/null
@@ -0,0 +1,14 @@
+router bgp 65001
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.2 remote-as 65002
+ neighbor 192.168.1.2 local-as 65002
+ neighbor 192.168.1.2 timers 1 3
+ neighbor 192.168.1.2 timers connect 1
+ neighbor PG peer-group
+ neighbor PG remote-as 65003
+ neighbor PG local-as 65003
+ neighbor 192.168.2.2 peer-group PG
+ address-family ipv4
+  redistribute connected
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_local_as/r1/zebra.conf b/tests/topotests/bgp_local_as/r1/zebra.conf
new file mode 100644 (file)
index 0000000..5b32fae
--- /dev/null
@@ -0,0 +1,10 @@
+!
+interface lo
+ ip address 172.16.255.1/32
+!
+interface r1-eth0
+ ip address 192.168.1.1/24
+!
+interface r1-eth1
+ ip address 192.168.2.1/24
+!
diff --git a/tests/topotests/bgp_local_as/r2/bgpd.conf b/tests/topotests/bgp_local_as/r2/bgpd.conf
new file mode 100644 (file)
index 0000000..9c25bdf
--- /dev/null
@@ -0,0 +1,6 @@
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as internal
+ neighbor 192.168.1.1 timers 1 3
+ neighbor 192.168.1.1 timers connect 1
+!
diff --git a/tests/topotests/bgp_local_as/r2/zebra.conf b/tests/topotests/bgp_local_as/r2/zebra.conf
new file mode 100644 (file)
index 0000000..0c95656
--- /dev/null
@@ -0,0 +1,4 @@
+!
+interface r2-eth0
+ ip address 192.168.1.2/24
+!
diff --git a/tests/topotests/bgp_local_as/r3/bgpd.conf b/tests/topotests/bgp_local_as/r3/bgpd.conf
new file mode 100644 (file)
index 0000000..54ccd90
--- /dev/null
@@ -0,0 +1,6 @@
+router bgp 65003
+ no bgp ebgp-requires-policy
+ neighbor 192.168.2.1 remote-as internal
+ neighbor 192.168.2.1 timers 1 3
+ neighbor 192.168.2.1 timers connect 1
+!
diff --git a/tests/topotests/bgp_local_as/r3/zebra.conf b/tests/topotests/bgp_local_as/r3/zebra.conf
new file mode 100644 (file)
index 0000000..d28dedd
--- /dev/null
@@ -0,0 +1,4 @@
+!
+interface r3-eth0
+ ip address 192.168.2.2/24
+!
diff --git a/tests/topotests/bgp_local_as/test_bgp_local_as.py b/tests/topotests/bgp_local_as/test_bgp_local_as.py
new file mode 100644 (file)
index 0000000..525d442
--- /dev/null
@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2022 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.common_config import step
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+    for routern in range(1, 4):
+        tgen.add_router("r{}".format(routern))
+
+    switch = tgen.add_switch("s1")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r2"])
+
+    switch = tgen.add_switch("s2")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r3"])
+
+
+def setup_module(mod):
+    tgen = Topogen(build_topo, mod.__name__)
+    tgen.start_topology()
+
+    router_list = tgen.routers()
+
+    for i, (rname, router) in enumerate(router_list.items(), 1):
+        router.load_config(
+            TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+        )
+        router.load_config(
+            TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+        )
+
+    tgen.start_router()
+
+
+def teardown_module(mod):
+    tgen = get_topogen()
+    tgen.stop_topology()
+
+
+def test_bgp_local_as_same_remote_as():
+    tgen = get_topogen()
+
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    def _bgp_check_local_as_same_remote_as():
+        output = json.loads(
+            tgen.gears["r2"].vtysh_cmd("show bgp ipv4 unicast 172.16.255.1/32 json")
+        )
+        expected = {
+            "paths": [
+                {
+                    "valid": True,
+                    "aspath": {"string": "Local"},
+                    "nexthops": [{"ip": "192.168.1.1", "hostname": "r1"}],
+                }
+            ]
+        }
+        return topotest.json_cmp(output, expected)
+
+    step("Check if iBGP works when local-as == remote-as")
+    test_func = functools.partial(_bgp_check_local_as_same_remote_as)
+    _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+    assert result is None, "Failed to see BGP prefixes on R2"
+
+
+def test_bgp_peer_group_local_as_same_remote_as():
+    tgen = get_topogen()
+
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    def _bgp_check_local_as_same_remote_as():
+        output = json.loads(
+            tgen.gears["r3"].vtysh_cmd("show bgp ipv4 unicast 172.16.255.1/32 json")
+        )
+        expected = {
+            "paths": [
+                {
+                    "valid": True,
+                    "aspath": {"string": "Local"},
+                    "nexthops": [{"ip": "192.168.2.1", "hostname": "r1"}],
+                }
+            ]
+        }
+        return topotest.json_cmp(output, expected)
+
+    step("Initial BGP converge")
+    test_func = functools.partial(_bgp_check_local_as_same_remote_as)
+    _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+    assert result is None, "Failed to see BGP prefixes on R3"
+
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
index df1e4f3b081e0f47be669949cf2e7dedb4f2446b..1e958dd93e3d0c7bec4ad809c4822289bf10bab2 100644 (file)
@@ -15,7 +15,7 @@ StartLimitBurst=3
 TimeoutSec=@TIMEOUT_MIN@m
 WatchdogSec=60s
 RestartSec=5
-Restart=on-abnormal
+Restart=always
 LimitNOFILE=1024
 PIDFile=@CFG_STATE@/watchfrr.pid
 ExecStart=@CFG_SBIN@/frrinit.sh start
index 1cbef1b18c042573ce88c7261eecd52300745dda..85408a0cc7ea99b26b9024246fdb1686faa3d3a0 100644 (file)
@@ -15,7 +15,7 @@ StartLimitBurst=3
 TimeoutSec=@TIMEOUT_MIN@m
 WatchdogSec=60s
 RestartSec=5
-Restart=on-abnormal
+Restart=always
 LimitNOFILE=1024
 PIDFile=@CFG_STATE@/%I/watchfrr.pid
 ExecStart=@CFG_SBIN@/frrinit.sh start %I
index 0927bdc1d9ec8676c0c7851fd822d8a2d6e7e7ba..b28c468a969dcb268f7a206895ba1fe7866a5adb 100644 (file)
@@ -1182,13 +1182,19 @@ int interface_lookup_netlink(struct zebra_ns *zns)
        if (ret < 0)
                return ret;
 
+       /*
+        * So netlink_tunneldump_read will initiate a request
+        * per tunnel to get data.  If we are on a kernel that
+        * does not support this then we will get X error messages
+        * (one per tunnel request )back which netlink_parse_info will
+        * stop after the first one.  So we need to read equivalent
+        * error messages per tunnel then we can continue.
+        * if we do not gather all the read failures then
+        * later requests will not work right.
+        */
        ret = netlink_tunneldump_read(zns);
        if (ret < 0)
                return ret;
-       ret = netlink_parse_info(netlink_interface, netlink_cmd, &dp_info, 0,
-                                true);
-       if (ret < 0)
-               return ret;
 
        /* fixup linkages */
        zebra_if_update_all_links(zns);
@@ -2322,6 +2328,7 @@ int netlink_tunneldump_read(struct zebra_ns *zns)
        struct route_node *rn;
        struct interface *tmp_if = NULL;
        struct zebra_if *zif;
+       struct nlsock *netlink_cmd = &zns->netlink_cmd;
 
        zebra_dplane_info_from_zns(&dp_info, zns, true /*is_cmd*/);
 
@@ -2337,7 +2344,14 @@ int netlink_tunneldump_read(struct zebra_ns *zns)
                                                 tmp_if->ifindex);
                if (ret < 0)
                        return ret;
+
+               ret = netlink_parse_info(netlink_interface, netlink_cmd,
+                                        &dp_info, 0, true);
+
+               if (ret < 0)
+                       return ret;
        }
+
        return 0;
 }
 #endif /* GNU_LINUX */
index 93590a2f166786badea22a989b9a9d172b45a4d7..127888d6554796f2ee5674784a21c11a0f67ab22 100644 (file)
@@ -664,8 +664,9 @@ static void rtadv_process_advert(uint8_t *msg, unsigned int len,
             zif->rtadv.lastadvcurhoplimit.tv_sec == 0)) {
                flog_warn(
                        EC_ZEBRA_RA_PARAM_MISMATCH,
-                       "%s(%u): Rx RA - our AdvCurHopLimit doesn't agree with %s",
-                       ifp->name, ifp->ifindex, addr_str);
+                       "%s(%u): Rx RA - our AdvCurHopLimit (%u) doesn't agree with %s (%u)",
+                       ifp->name, ifp->ifindex, zif->rtadv.AdvCurHopLimit,
+                       addr_str, radvert->nd_ra_curhoplimit);
                monotime(&zif->rtadv.lastadvcurhoplimit);
        }
 
@@ -676,8 +677,11 @@ static void rtadv_process_advert(uint8_t *msg, unsigned int len,
             zif->rtadv.lastadvmanagedflag.tv_sec == 0)) {
                flog_warn(
                        EC_ZEBRA_RA_PARAM_MISMATCH,
-                       "%s(%u): Rx RA - our AdvManagedFlag doesn't agree with %s",
-                       ifp->name, ifp->ifindex, addr_str);
+                       "%s(%u): Rx RA - our AdvManagedFlag (%u) doesn't agree with %s (%u)",
+                       ifp->name, ifp->ifindex, zif->rtadv.AdvManagedFlag,
+                       addr_str,
+                       !!CHECK_FLAG(radvert->nd_ra_flags_reserved,
+                                    ND_RA_FLAG_MANAGED));
                monotime(&zif->rtadv.lastadvmanagedflag);
        }
 
@@ -688,8 +692,11 @@ static void rtadv_process_advert(uint8_t *msg, unsigned int len,
             zif->rtadv.lastadvotherconfigflag.tv_sec == 0)) {
                flog_warn(
                        EC_ZEBRA_RA_PARAM_MISMATCH,
-                       "%s(%u): Rx RA - our AdvOtherConfigFlag doesn't agree with %s",
-                       ifp->name, ifp->ifindex, addr_str);
+                       "%s(%u): Rx RA - our AdvOtherConfigFlag (%u) doesn't agree with %s (%u)",
+                       ifp->name, ifp->ifindex, zif->rtadv.AdvOtherConfigFlag,
+                       addr_str,
+                       !!CHECK_FLAG(radvert->nd_ra_flags_reserved,
+                                    ND_RA_FLAG_OTHER));
                monotime(&zif->rtadv.lastadvotherconfigflag);
        }
 
@@ -700,20 +707,23 @@ static void rtadv_process_advert(uint8_t *msg, unsigned int len,
             zif->rtadv.lastadvreachabletime.tv_sec == 0)) {
                flog_warn(
                        EC_ZEBRA_RA_PARAM_MISMATCH,
-                       "%s(%u): Rx RA - our AdvReachableTime doesn't agree with %s",
-                       ifp->name, ifp->ifindex, addr_str);
+                       "%s(%u): Rx RA - our AdvReachableTime (%u) doesn't agree with %s (%u)",
+                       ifp->name, ifp->ifindex, zif->rtadv.AdvReachableTime,
+                       addr_str, ntohl(radvert->nd_ra_reachable));
                monotime(&zif->rtadv.lastadvreachabletime);
        }
 
-       if ((ntohl(radvert->nd_ra_retransmit) !=
+       if ((radvert->nd_ra_retransmit && zif->rtadv.AdvRetransTimer) &&
+           (ntohl(radvert->nd_ra_retransmit) !=
             (unsigned int)zif->rtadv.AdvRetransTimer) &&
            (monotime_since(&zif->rtadv.lastadvretranstimer, NULL) >
                     SIXHOUR2USEC ||
             zif->rtadv.lastadvretranstimer.tv_sec == 0)) {
                flog_warn(
                        EC_ZEBRA_RA_PARAM_MISMATCH,
-                       "%s(%u): Rx RA - our AdvRetransTimer doesn't agree with %s",
-                       ifp->name, ifp->ifindex, addr_str);
+                       "%s(%u): Rx RA - our AdvRetransTimer (%u) doesn't agree with %s (%u)",
+                       ifp->name, ifp->ifindex, zif->rtadv.AdvRetransTimer,
+                       addr_str, ntohl(radvert->nd_ra_retransmit));
                monotime(&zif->rtadv.lastadvretranstimer);
        }