]> git.proxmox.com Git - mirror_frr.git/commitdiff
Merge pull request #4401 from manuhalo/fix_isisd_remove_conf
authorDonald Sharp <sharpd@cumulusnetworks.com>
Wed, 29 May 2019 11:46:38 +0000 (07:46 -0400)
committerGitHub <noreply@github.com>
Wed, 29 May 2019 11:46:38 +0000 (07:46 -0400)
isisd: del routes when area is unconfigured

50 files changed:
bgpd/bgp_nht.c
bgpd/bgp_route.c
bgpd/bgp_vty.c
bgpd/bgpd.c
doc/user/pim.rst
isisd/isis_pfpacket.c
lib/linklist.c
lib/linklist.h
lib/nexthop.c
lib/nexthop.h
lib/plist.c
pimd/pim_assert.c
pimd/pim_bsm.c [new file with mode: 0644]
pimd/pim_bsm.h [new file with mode: 0644]
pimd/pim_cmd.c
pimd/pim_cmd.h
pimd/pim_hello.c
pimd/pim_iface.c
pimd/pim_iface.h
pimd/pim_instance.c
pimd/pim_instance.h
pimd/pim_join.c
pimd/pim_msg.c
pimd/pim_msg.h
pimd/pim_nht.c
pimd/pim_nht.h
pimd/pim_pim.c
pimd/pim_register.c
pimd/pim_rp.c
pimd/pim_rp.h
pimd/pim_rpf.c
pimd/pim_upstream.c
pimd/pim_vty.c
pimd/pim_vxlan.c
pimd/pimd.h
pimd/subdir.am
snapcraft/README.snap_build.md
snapcraft/README.usage.md
snapcraft/defaults/fabricd.conf.default [new file with mode: 0644]
snapcraft/scripts/Makefile
snapcraft/scripts/bgpd-service
snapcraft/scripts/fabricd-service [new file with mode: 0644]
snapcraft/snapcraft.yaml.in
tests/topotests/bgp_show_ip_bgp_fqdn/__init__.py [new file with mode: 0644]
tests/topotests/bgp_show_ip_bgp_fqdn/r1/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_show_ip_bgp_fqdn/r1/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_show_ip_bgp_fqdn/r2/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_show_ip_bgp_fqdn/r2/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_show_ip_bgp_fqdn/test_bgp_show_ip_bgp_fqdn.py [new file with mode: 0644]
zebra/zebra_rib.c

index 7e721db49dbaf9b6a64711e824ab92e63d5a5b67..fdfa15b445f76995d2f5989baf6e90663e114acc 100644 (file)
@@ -474,8 +474,7 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
                                continue;
 
                        for (oldnh = bnc->nexthop; oldnh; oldnh = oldnh->next)
-                               if (nexthop_same_no_recurse(oldnh, nexthop) &&
-                                   nexthop_labels_match(oldnh, nexthop))
+                               if (nexthop_same(oldnh, nexthop))
                                        break;
 
                        if (!oldnh)
index d526afd77bfa46e832e2616959e20c0f8680ecea..31243c899dc2219e6709b8998d54d99fd58aa6ac 100644 (file)
@@ -6930,6 +6930,13 @@ static void route_vty_short_status_out(struct vty *vty,
                vty_out(vty, " ");
 }
 
+static char *bgp_nexthop_fqdn(struct peer *peer)
+{
+       if (peer->hostname && bgp_flag_check(peer->bgp, BGP_FLAG_SHOW_HOSTNAME))
+               return peer->hostname;
+       return NULL;
+}
+
 /* called from terminal list command */
 void route_vty_out(struct vty *vty, struct prefix *p,
                   struct bgp_path_info *path, int display, safi_t safi,
@@ -6946,6 +6953,7 @@ void route_vty_out(struct vty *vty, struct prefix *p,
        bool nexthop_othervrf = false;
        vrf_id_t nexthop_vrfid = VRF_DEFAULT;
        const char *nexthop_vrfname = VRF_DEFAULT_NAME;
+       char *nexthop_fqdn = bgp_nexthop_fqdn(path->peer);
 
        if (json_paths)
                json_path = json_object_new_object();
@@ -7041,42 +7049,58 @@ void route_vty_out(struct vty *vty, struct prefix *p,
                if (json_paths) {
                        json_nexthop_global = json_object_new_object();
 
-                       json_object_string_add(json_nexthop_global, "afi",
-                                              (af == AF_INET) ? "ip" : "ipv6");
-                       json_object_string_add(json_nexthop_global,
-                                              (af == AF_INET) ? "ip" : "ipv6",
-                                              nexthop);
+                       json_object_string_add(
+                               json_nexthop_global, "afi",
+                               nexthop_fqdn ? "fqdn"
+                                            : (af == AF_INET) ? "ip" : "ipv6");
+                       json_object_string_add(
+                               json_nexthop_global,
+                               nexthop_fqdn ? "fqdn"
+                                            : (af == AF_INET) ? "ip" : "ipv6",
+                               nexthop_fqdn ? nexthop_fqdn : nexthop);
                        json_object_boolean_true_add(json_nexthop_global,
                                                     "used");
                } else
-                       vty_out(vty, "%s%s", nexthop, vrf_id_str);
+                       vty_out(vty, "%s%s",
+                               nexthop_fqdn ? nexthop_fqdn : nexthop,
+                               vrf_id_str);
        } else if (safi == SAFI_EVPN) {
                if (json_paths) {
                        json_nexthop_global = json_object_new_object();
 
-                       json_object_string_add(json_nexthop_global, "ip",
-                                              inet_ntoa(attr->nexthop));
+                       json_object_string_add(
+                               json_nexthop_global,
+                               nexthop_fqdn ? "fqdn" : "ip",
+                               nexthop_fqdn ? nexthop_fqdn
+                                            : inet_ntoa(attr->nexthop));
                        json_object_string_add(json_nexthop_global, "afi",
                                               "ipv4");
                        json_object_boolean_true_add(json_nexthop_global,
                                                     "used");
                } else
-                       vty_out(vty, "%-16s%s", inet_ntoa(attr->nexthop),
+                       vty_out(vty, "%-16s%s",
+                               nexthop_fqdn ?: inet_ntoa(attr->nexthop),
                                vrf_id_str);
        } else if (safi == SAFI_FLOWSPEC) {
                if (attr->nexthop.s_addr != 0) {
                        if (json_paths) {
                                json_nexthop_global = json_object_new_object();
                                json_object_string_add(
-                                              json_nexthop_global, "ip",
-                                              inet_ntoa(attr->nexthop));
+                                       json_nexthop_global,
+                                       nexthop_fqdn ? "fqdn" : "ip",
+                                       nexthop_fqdn
+                                               ? nexthop_fqdn
+                                               : inet_ntoa(attr->nexthop));
                                json_object_string_add(json_nexthop_global,
                                                       "afi", "ipv4");
                                json_object_boolean_true_add(
                                                        json_nexthop_global,
                                                             "used");
                        } else {
-                               vty_out(vty, "%-16s", inet_ntoa(attr->nexthop));
+                               vty_out(vty, "%-16s",
+                                       nexthop_fqdn
+                                               ? nexthop_fqdn
+                                               : inet_ntoa(attr->nexthop));
                        }
                }
        } else if (p->family == AF_INET && !BGP_ATTR_NEXTHOP_AFI_IP6(attr)) {
@@ -7085,12 +7109,19 @@ void route_vty_out(struct vty *vty, struct prefix *p,
 
                        if ((safi == SAFI_MPLS_VPN) || (safi == SAFI_EVPN))
                                json_object_string_add(
-                                       json_nexthop_global, "ip",
-                                       inet_ntoa(attr->mp_nexthop_global_in));
+                                       json_nexthop_global,
+                                       nexthop_fqdn ? "fqdn" : "ip",
+                                       nexthop_fqdn
+                                               ? nexthop_fqdn
+                                               : inet_ntoa(
+                                                         attr->mp_nexthop_global_in));
                        else
                                json_object_string_add(
-                                       json_nexthop_global, "ip",
-                                       inet_ntoa(attr->nexthop));
+                                       json_nexthop_global,
+                                       nexthop_fqdn ? "fqdn" : "ip",
+                                       nexthop_fqdn
+                                               ? nexthop_fqdn
+                                               : inet_ntoa(attr->nexthop));
 
                        json_object_string_add(json_nexthop_global, "afi",
                                               "ipv4");
@@ -7100,7 +7131,9 @@ void route_vty_out(struct vty *vty, struct prefix *p,
                        char buf[BUFSIZ];
 
                        snprintf(buf, sizeof(buf), "%s%s",
-                               inet_ntoa(attr->nexthop), vrf_id_str);
+                                nexthop_fqdn ? nexthop_fqdn
+                                             : inet_ntoa(attr->nexthop),
+                                vrf_id_str);
                        vty_out(vty, "%-16s", buf);
                }
        }
@@ -7113,9 +7146,13 @@ void route_vty_out(struct vty *vty, struct prefix *p,
                if (json_paths) {
                        json_nexthop_global = json_object_new_object();
                        json_object_string_add(
-                               json_nexthop_global, "ip",
-                               inet_ntop(AF_INET6, &attr->mp_nexthop_global,
-                                         buf, BUFSIZ));
+                               json_nexthop_global,
+                               nexthop_fqdn ? "fqdn" : "ip",
+                               nexthop_fqdn
+                                       ? nexthop_fqdn
+                                       : inet_ntop(AF_INET6,
+                                                   &attr->mp_nexthop_global,
+                                                   buf, BUFSIZ));
                        json_object_string_add(json_nexthop_global, "afi",
                                               "ipv6");
                        json_object_string_add(json_nexthop_global, "scope",
@@ -7127,10 +7164,14 @@ void route_vty_out(struct vty *vty, struct prefix *p,
                            || (path->peer->conf_if)) {
                                json_nexthop_ll = json_object_new_object();
                                json_object_string_add(
-                                       json_nexthop_ll, "ip",
-                                       inet_ntop(AF_INET6,
-                                                 &attr->mp_nexthop_local, buf,
-                                                 BUFSIZ));
+                                       json_nexthop_ll,
+                                       nexthop_fqdn ? "fqdn" : "ip",
+                                       nexthop_fqdn
+                                               ? nexthop_fqdn
+                                               : inet_ntop(
+                                                         AF_INET6,
+                                                         &attr->mp_nexthop_local,
+                                                         buf, BUFSIZ));
                                json_object_string_add(json_nexthop_ll, "afi",
                                                       "ipv6");
                                json_object_string_add(json_nexthop_ll, "scope",
@@ -7169,10 +7210,12 @@ void route_vty_out(struct vty *vty, struct prefix *p,
                                } else {
                                        len = vty_out(
                                                vty, "%s%s",
-                                               inet_ntop(
-                                                       AF_INET6,
-                                                       &attr->mp_nexthop_local,
-                                                       buf, BUFSIZ),
+                                               nexthop_fqdn
+                                                       ? nexthop_fqdn
+                                                       : inet_ntop(
+                                                                 AF_INET6,
+                                                                 &attr->mp_nexthop_local,
+                                                                 buf, BUFSIZ),
                                                vrf_id_str);
                                        len = 16 - len;
 
@@ -7184,10 +7227,13 @@ void route_vty_out(struct vty *vty, struct prefix *p,
                        } else {
                                len = vty_out(
                                        vty, "%s%s",
-                                       inet_ntop(AF_INET6,
-                                                 &attr->mp_nexthop_global, buf,
-                                                 BUFSIZ),
-                                                 vrf_id_str);
+                                       nexthop_fqdn
+                                               ? nexthop_fqdn
+                                               : inet_ntop(
+                                                         AF_INET6,
+                                                         &attr->mp_nexthop_global,
+                                                         buf, BUFSIZ),
+                                       vrf_id_str);
                                len = 16 - len;
 
                                if (len < 1)
@@ -7664,7 +7710,7 @@ void route_vty_out_overlay(struct vty *vty, struct prefix *p,
                        vty_out(vty, "%s", str);
                else
                        json_object_string_add(json_overlay, "esi", str);
-               
+
                XFREE(MTYPE_TMP, str);
 
                if (is_evpn_prefix_ipaddr_v4((struct prefix_evpn *)p)) {
@@ -8040,6 +8086,7 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp,
        bool nexthop_self =
                CHECK_FLAG(path->flags, BGP_PATH_ANNC_NH_SELF) ? true : false;
        int i;
+       char *nexthop_fqdn = bgp_nexthop_fqdn(path->peer);
 
        if (json_paths) {
                json_path = json_object_new_object();
@@ -8175,21 +8222,33 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp,
                            || safi == SAFI_EVPN) {
                                if (json_paths)
                                        json_object_string_add(
-                                               json_nexthop_global, "ip",
-                                               inet_ntoa(
-                                                       attr->mp_nexthop_global_in));
+                                               json_nexthop_global,
+                                               nexthop_fqdn ? "fqdn" : "ip",
+                                               nexthop_fqdn
+                                                       ? nexthop_fqdn
+                                                       : inet_ntoa(
+                                                                 attr->mp_nexthop_global_in));
                                else
                                        vty_out(vty, "    %s",
-                                               inet_ntoa(
-                                                       attr->mp_nexthop_global_in));
+                                               nexthop_fqdn
+                                                       ? nexthop_fqdn
+                                                       : inet_ntoa(
+                                                                 attr->mp_nexthop_global_in));
                        } else {
                                if (json_paths)
                                        json_object_string_add(
-                                               json_nexthop_global, "ip",
-                                               inet_ntoa(attr->nexthop));
+                                               json_nexthop_global,
+                                               nexthop_fqdn ? "fqdn" : "ip",
+                                               nexthop_fqdn
+                                                       ? nexthop_fqdn
+                                                       : inet_ntoa(
+                                                                 attr->nexthop));
                                else
                                        vty_out(vty, "    %s",
-                                               inet_ntoa(attr->nexthop));
+                                               nexthop_fqdn
+                                                       ? nexthop_fqdn
+                                                       : inet_ntoa(
+                                                                 attr->nexthop));
                        }
 
                        if (json_paths)
@@ -8198,19 +8257,28 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp,
                } else {
                        if (json_paths) {
                                json_object_string_add(
-                                       json_nexthop_global, "ip",
-                                       inet_ntop(AF_INET6,
-                                                 &attr->mp_nexthop_global, buf,
-                                                 INET6_ADDRSTRLEN));
+                                       json_nexthop_global,
+                                       nexthop_fqdn ? "fqdn" : "ip",
+                                       nexthop_fqdn
+                                               ? nexthop_fqdn
+                                               : inet_ntop(
+                                                         AF_INET6,
+                                                         &attr->mp_nexthop_global,
+                                                         buf,
+                                                         INET6_ADDRSTRLEN));
                                json_object_string_add(json_nexthop_global,
                                                       "afi", "ipv6");
                                json_object_string_add(json_nexthop_global,
                                                       "scope", "global");
                        } else {
                                vty_out(vty, "    %s",
-                                       inet_ntop(AF_INET6,
-                                                 &attr->mp_nexthop_global, buf,
-                                                 INET6_ADDRSTRLEN));
+                                       nexthop_fqdn
+                                               ? nexthop_fqdn
+                                               : inet_ntop(
+                                                         AF_INET6,
+                                                         &attr->mp_nexthop_global,
+                                                         buf,
+                                                         INET6_ADDRSTRLEN));
                        }
                }
 
@@ -8392,10 +8460,15 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp,
                        if (json_paths) {
                                json_nexthop_ll = json_object_new_object();
                                json_object_string_add(
-                                       json_nexthop_ll, "ip",
-                                       inet_ntop(AF_INET6,
-                                                 &attr->mp_nexthop_local, buf,
-                                                 INET6_ADDRSTRLEN));
+                                       json_nexthop_ll,
+                                       nexthop_fqdn ? "fqdn" : "ip",
+                                       nexthop_fqdn
+                                               ? nexthop_fqdn
+                                               : inet_ntop(
+                                                         AF_INET6,
+                                                         &attr->mp_nexthop_local,
+                                                         buf,
+                                                         INET6_ADDRSTRLEN));
                                json_object_string_add(json_nexthop_ll, "afi",
                                                       "ipv6");
                                json_object_string_add(json_nexthop_ll, "scope",
index ae51f1d780d72ad168e47d4fba53b33de9ca72a3..6451c8d8ed3da9f2e1caa9e9321c4e09d51b61c5 100644 (file)
@@ -1129,10 +1129,25 @@ DEFUN (no_router_bgp,
                }
 
                if (bgp->l3vni) {
-                       vty_out(vty, "%% Please unconfigure l3vni %u",
+                       vty_out(vty, "%% Please unconfigure l3vni %u\n",
                                bgp->l3vni);
                        return CMD_WARNING_CONFIG_FAILED;
                }
+
+               /* Cannot delete default instance if vrf instances exist */
+               if (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT) {
+                       struct listnode *node;
+                       struct bgp *tmp_bgp;
+
+                       for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, tmp_bgp)) {
+                               if (tmp_bgp->inst_type
+                                   == BGP_INSTANCE_TYPE_VRF) {
+                                       vty_out(vty,
+                                               "%% Cannot delete default BGP instance. Dependent VRF instances exist\n");
+                                       return CMD_WARNING_CONFIG_FAILED;
+                               }
+                       }
+               }
        }
 
        bgp_delete(bgp);
index 2a7663f195cda2e445e66e457b1be0bc2d99853f..2e648af1bb986b639f1457f7dd66da5fe89f0458 100644 (file)
@@ -2232,6 +2232,8 @@ int peer_delete(struct peer *peer)
 
        SET_FLAG(peer->flags, PEER_FLAG_DELETE);
 
+       bgp_bfd_deregister_peer(peer);
+
        /* If this peer belongs to peer group, clear up the
           relationship.  */
        if (peer->group) {
index 5148d3baff18a9dad2e78bea440234afeb58b0b0..414423ae7c5a222e10bdae770add8dd8577e7aa2 100644 (file)
@@ -166,6 +166,20 @@ is in a vrf, enter the interface command with the vrf keyword at the end.
 
    Turns on BFD support for PIM for this interface.
 
+.. index:: ip pim bsm
+.. clicmd:: ip pim bsm
+
+   Tell pim that we would like to use this interface to process bootstrap
+   messages. This is enabled by default. 'no' form of this command is used to
+   restrict bsm messages on this interface.
+
+.. index:: ip pim unicast-bsm
+.. clicmd:: ip pim unicast-bsm
+
+   Tell pim that we would like to allow interface to process unicast bootstrap
+   messages. This is enabled by default. 'no' form of this command is used to
+   restrict processing of unicast bsm messages on this interface.
+
 .. index:: ip pim drpriority (1-4294967295)
 .. clicmd:: ip pim drpriority (1-4294967295)
 
@@ -407,6 +421,21 @@ cause great confusion.
 
    Display upstream information for S,G's and the RPF data associated with them.
 
+.. index:: show ip pim bsr
+.. clicmd:: show ip pim bsr
+
+   Display current bsr, its uptime and last received bsm age.
+
+.. index:: show ip pim bsrp-info
+.. clicmd:: show ip pim bsrp-info
+
+   Display group-to-rp mappings received from E-BSR.
+
+.. index:: show ip pim bsm-database
+.. clicmd:: show ip pim bsm-database
+
+   Display all fragments ofstored bootstrap message in user readable format.
+
 .. index:: show ip rpf
 .. clicmd:: show ip rpf
 
@@ -470,6 +499,11 @@ the config was written out.
 
    This traces pim code and how it is running.
 
+.. index:: debug pim bsm
+.. clicmd:: debug pim bsm
+
+   This turns on debugging for BSR message processing.
+
 .. index:: debug pim zebra
 .. clicmd:: debug pim zebra
 
index 824acd0ff80f493ada4f48f60198515f5a550a21..ea66e6950e68d8c0516297f1bdc1aa8ea6d89164 100644 (file)
@@ -73,7 +73,6 @@ uint8_t ALL_ISS[6] = {0x09, 0x00, 0x2B, 0x00, 0x00, 0x05};
 uint8_t ALL_ESS[6] = {0x09, 0x00, 0x2B, 0x00, 0x00, 0x04};
 
 static uint8_t discard_buff[8192];
-static uint8_t sock_buff[8192];
 
 /*
  * if level is 0 we are joining p2p multicast
@@ -277,19 +276,22 @@ int isis_recv_pdu_bcast(struct isis_circuit *circuit, uint8_t *ssnpa)
                return ISIS_WARNING;
        }
 
-       /* on lan we have to read to the static buff first */
-       bytesread = recvfrom(circuit->fd, sock_buff, sizeof(sock_buff),
-                            MSG_DONTWAIT, (struct sockaddr *)&s_addr,
-                            (socklen_t *)&addr_len);
+       /* Ensure that we have enough space for a pdu padded to fill the mtu */
+       unsigned int max_size =
+               circuit->interface->mtu > circuit->interface->mtu6
+                       ? circuit->interface->mtu
+                       : circuit->interface->mtu6;
+       uint8_t temp_buff[max_size];
+       bytesread =
+               recvfrom(circuit->fd, temp_buff, max_size, MSG_DONTWAIT,
+                        (struct sockaddr *)&s_addr, (socklen_t *)&addr_len);
        if (bytesread < 0) {
-               zlog_warn("isis_recv_pdu_bcast(): recvfrom() failed");
+               zlog_warn("%s: recvfrom() failed", __func__);
                return ISIS_WARNING;
        }
-
        /* then we lose the LLC */
-       stream_write(circuit->rcv_stream, sock_buff + LLC_LEN,
+       stream_write(circuit->rcv_stream, temp_buff + LLC_LEN,
                     bytesread - LLC_LEN);
-
        memcpy(ssnpa, &s_addr.sll_addr, s_addr.sll_halen);
 
        return ISIS_OK;
@@ -337,6 +339,7 @@ int isis_send_pdu_bcast(struct isis_circuit *circuit, int level)
 {
        struct msghdr msg;
        struct iovec iov[2];
+       char temp_buff[LLC_LEN];
 
        /* we need to do the LLC in here because of P2P circuits, which will
         * not need it
@@ -361,16 +364,16 @@ int isis_send_pdu_bcast(struct isis_circuit *circuit, int level)
 
        /* on a broadcast circuit */
        /* first we put the LLC in */
-       sock_buff[0] = 0xFE;
-       sock_buff[1] = 0xFE;
-       sock_buff[2] = 0x03;
+       temp_buff[0] = 0xFE;
+       temp_buff[1] = 0xFE;
+       temp_buff[2] = 0x03;
 
        memset(&msg, 0, sizeof(msg));
        msg.msg_name = &sa;
        msg.msg_namelen = sizeof(struct sockaddr_ll);
        msg.msg_iov = iov;
        msg.msg_iovlen = 2;
-       iov[0].iov_base = sock_buff;
+       iov[0].iov_base = temp_buff;
        iov[0].iov_len = LLC_LEN;
        iov[1].iov_base = circuit->snd_stream->data;
        iov[1].iov_len = stream_get_endp(circuit->snd_stream);
index 43bc709325d6615e9b0b65a8ddd14fb894e28014..e8ba9edc128473a18ff9bca2dd6b7a46fd8655f8 100644 (file)
@@ -92,6 +92,46 @@ void listnode_add_head(struct list *list, void *val)
        list->count++;
 }
 
+bool listnode_add_sort_nodup(struct list *list, void *val)
+{
+       struct listnode *n;
+       struct listnode *new;
+       int ret;
+
+       assert(val != NULL);
+
+       if (list->cmp) {
+               for (n = list->head; n; n = n->next) {
+                       ret = (*list->cmp)(val, n->data);
+                       if (ret < 0) {
+                               new = listnode_new();
+                               new->data = val;
+
+                               new->next = n;
+                               new->prev = n->prev;
+
+                               if (n->prev)
+                                       n->prev->next = new;
+                               else
+                                       list->head = new;
+                               n->prev = new;
+                               list->count++;
+                               return true;
+                       }
+                       /* found duplicate return false */
+                       if (ret == 0)
+                               return false;
+               }
+       }
+
+       new = listnode_new();
+       new->data = val;
+
+       LISTNODE_ATTACH(list, new);
+
+       return true;
+}
+
 void listnode_add_sort(struct list *list, void *val)
 {
        struct listnode *n;
@@ -242,6 +282,23 @@ void list_delete_all_node(struct list *list)
        list->count = 0;
 }
 
+void list_filter_out_nodes(struct list *list, bool (*cond)(void *data))
+{
+       struct listnode *node;
+       struct listnode *next;
+       void *data;
+
+       assert(list);
+
+       for (ALL_LIST_ELEMENTS(list, node, next, data)) {
+               if ((cond && cond(data)) || (!cond)) {
+                       if (*list->del)
+                               (*list->del)(data);
+                       list_delete_node(list, node);
+               }
+       }
+}
+
 void list_delete(struct list **list)
 {
        assert(*list);
index c2b289596d86ed586ae7c8fbef26b756e97189c0..da42aa268827d275a0f215368df081f9a997d761 100644 (file)
@@ -308,6 +308,39 @@ extern void list_delete_node(struct list *list, struct listnode *node);
  */
 extern void list_add_list(struct list *list, struct list *add);
 
+/*
+ * Delete all nodes which satisfy a condition from a list.
+ * Deletes the node if cond function returns true for the node.
+ * If function ptr passed is NULL, it deletes all nodes
+ *
+ * list
+ *    list to operate on
+ * cond
+ *    function pointer which takes node data as input and return TRUE or FALSE
+ */
+
+extern void list_filter_out_nodes(struct list *list, bool (*cond)(void *data));
+
+/*
+ * Insert a new element into a list with insertion sort if there is no
+ * duplicate element present in the list. This assumes the input list is
+ * sorted. If unsorted, it will check for duplicate until it finds out
+ * the position to do insertion sort with the unsorted list.
+ *
+ * If list->cmp is set, this function is used to determine the position to
+ * insert the new element. If it is not set, this function is equivalent to
+ * listnode_add. duplicate element is determined by cmp function returning 0.
+ *
+ * Runtime is O(N).
+ *
+ * list
+ *    list to operate on
+ *
+ * val
+ *    element to add
+ */
+
+extern bool listnode_add_sort_nodup(struct list *list, void *val);
 /* List iteration macro.
  * Usage: for (ALL_LIST_ELEMENTS (...) { ... }
  * It is safe to delete the listnode using this macro.
index 8e16e705901b8a951b81bf108cba5b5e5dc5f7cb..57a2f1daaaec4fe0a222253c2f77472db353a43f 100644 (file)
 DEFINE_MTYPE_STATIC(LIB, NEXTHOP, "Nexthop")
 DEFINE_MTYPE_STATIC(LIB, NH_LABEL, "Nexthop label")
 
-/* check if nexthops are same, non-recursive */
-int nexthop_same_no_recurse(const struct nexthop *next1,
-                           const struct nexthop *next2)
+static int _nexthop_labels_cmp(const struct nexthop *nh1,
+                              const struct nexthop *nh2)
 {
-       if (next1->type != next2->type)
+       const struct mpls_label_stack *nhl1 = NULL;
+       const struct mpls_label_stack *nhl2 = NULL;
+
+       nhl1 = nh1->nh_label;
+       nhl2 = nh2->nh_label;
+
+       /* No labels is a match */
+       if (!nhl1 && !nhl2)
                return 0;
 
-       switch (next1->type) {
+       if (nhl1 && !nhl2)
+               return 1;
+
+       if (nhl2 && !nhl1)
+               return -1;
+
+       if (nhl1->num_labels > nhl2->num_labels)
+               return 1;
+
+       if (nhl1->num_labels < nhl2->num_labels)
+               return -1;
+
+       return memcmp(nhl1->label, nhl2->label, nhl1->num_labels);
+}
+
+static int _nexthop_g_addr_cmp(enum nexthop_types_t type,
+                              const union g_addr *addr1,
+                              const union g_addr *addr2)
+{
+       int ret = 0;
+
+       switch (type) {
        case NEXTHOP_TYPE_IPV4:
        case NEXTHOP_TYPE_IPV4_IFINDEX:
-               if (!IPV4_ADDR_SAME(&next1->gate.ipv4, &next2->gate.ipv4))
-                       return 0;
-               if (next1->ifindex && (next1->ifindex != next2->ifindex))
-                       return 0;
+               ret = IPV4_ADDR_CMP(&addr1->ipv4, &addr2->ipv4);
+               break;
+       case NEXTHOP_TYPE_IPV6:
+       case NEXTHOP_TYPE_IPV6_IFINDEX:
+               ret = IPV6_ADDR_CMP(&addr1->ipv6, &addr2->ipv6);
                break;
        case NEXTHOP_TYPE_IFINDEX:
-               if (next1->ifindex != next2->ifindex)
-                       return 0;
+       case NEXTHOP_TYPE_BLACKHOLE:
+               /* No addr here */
                break;
+       }
+
+       return ret;
+}
+
+static int _nexthop_gateway_cmp(const struct nexthop *nh1,
+                               const struct nexthop *nh2)
+{
+       return _nexthop_g_addr_cmp(nh1->type, &nh1->gate, &nh2->gate);
+}
+
+static int _nexthop_source_cmp(const struct nexthop *nh1,
+                              const struct nexthop *nh2)
+{
+       return _nexthop_g_addr_cmp(nh1->type, &nh1->src, &nh2->src);
+}
+
+static int _nexthop_cmp_no_labels(const struct nexthop *next1,
+                                 const struct nexthop *next2)
+{
+       int ret = 0;
+
+       if (next1->vrf_id < next2->vrf_id)
+               return -1;
+
+       if (next1->vrf_id > next2->vrf_id)
+               return 1;
+
+       if (next1->type < next2->type)
+               return -1;
+
+       if (next1->type > next2->type)
+               return 1;
+
+       switch (next1->type) {
+       case NEXTHOP_TYPE_IPV4:
        case NEXTHOP_TYPE_IPV6:
-               if (!IPV6_ADDR_SAME(&next1->gate.ipv6, &next2->gate.ipv6))
-                       return 0;
+               ret = _nexthop_gateway_cmp(next1, next2);
+               if (ret != 0)
+                       return ret;
                break;
+       case NEXTHOP_TYPE_IPV4_IFINDEX:
        case NEXTHOP_TYPE_IPV6_IFINDEX:
-               if (!IPV6_ADDR_SAME(&next1->gate.ipv6, &next2->gate.ipv6))
-                       return 0;
-               if (next1->ifindex != next2->ifindex)
-                       return 0;
+               ret = _nexthop_gateway_cmp(next1, next2);
+               if (ret != 0)
+                       return ret;
+               /* Intentional Fall-Through */
+       case NEXTHOP_TYPE_IFINDEX:
+               if (next1->ifindex < next2->ifindex)
+                       return -1;
+
+               if (next1->ifindex > next2->ifindex)
+                       return 1;
                break;
-       default:
-               /* do nothing */
+       case NEXTHOP_TYPE_BLACKHOLE:
+               if (next1->bh_type < next2->bh_type)
+                       return -1;
+
+               if (next1->bh_type > next2->bh_type)
+                       return 1;
                break;
        }
-       return 1;
+
+       ret = _nexthop_source_cmp(next1, next2);
+
+       return ret;
+}
+
+int nexthop_cmp(const struct nexthop *next1, const struct nexthop *next2)
+{
+       int ret = 0;
+
+       ret = _nexthop_cmp_no_labels(next1, next2);
+       if (ret != 0)
+               return ret;
+
+       ret = _nexthop_labels_cmp(next1, next2);
+
+       return ret;
 }
 
 int nexthop_same_firsthop(struct nexthop *next1, struct nexthop *next2)
@@ -121,27 +213,12 @@ const char *nexthop_type_to_str(enum nexthop_types_t nh_type)
 /*
  * Check if the labels match for the 2 nexthops specified.
  */
-int nexthop_labels_match(const struct nexthop *nh1, const struct nexthop *nh2)
+bool nexthop_labels_match(const struct nexthop *nh1, const struct nexthop *nh2)
 {
-       const struct mpls_label_stack *nhl1, *nhl2;
-
-       nhl1 = nh1->nh_label;
-       nhl2 = nh2->nh_label;
-
-       /* No labels is a match */
-       if (!nhl1 && !nhl2)
-               return 1;
-
-       if (!nhl1 || !nhl2)
-               return 0;
-
-       if (nhl1->num_labels != nhl2->num_labels)
-               return 0;
-
-       if (memcmp(nhl1->label, nhl2->label, nhl1->num_labels))
-               return 0;
+       if (_nexthop_labels_cmp(nh1, nh2) != 0)
+               return false;
 
-       return 1;
+       return true;
 }
 
 struct nexthop *nexthop_new(void)
@@ -180,45 +257,28 @@ bool nexthop_same(const struct nexthop *nh1, const struct nexthop *nh2)
        if (nh1 == nh2)
                return true;
 
-       if (nh1->vrf_id != nh2->vrf_id)
+       if (nexthop_cmp(nh1, nh2) != 0)
                return false;
 
-       if (nh1->type != nh2->type)
+       return true;
+}
+
+bool nexthop_same_no_labels(const struct nexthop *nh1,
+                           const struct nexthop *nh2)
+{
+       if (nh1 && !nh2)
                return false;
 
-       switch (nh1->type) {
-       case NEXTHOP_TYPE_IFINDEX:
-               if (nh1->ifindex != nh2->ifindex)
-                       return false;
-               break;
-       case NEXTHOP_TYPE_IPV4:
-               if (nh1->gate.ipv4.s_addr != nh2->gate.ipv4.s_addr)
-                       return false;
-               break;
-       case NEXTHOP_TYPE_IPV4_IFINDEX:
-               if (nh1->gate.ipv4.s_addr != nh2->gate.ipv4.s_addr)
-                       return false;
-               if (nh1->ifindex != nh2->ifindex)
-                       return false;
-               break;
-       case NEXTHOP_TYPE_IPV6:
-               if (memcmp(&nh1->gate.ipv6, &nh2->gate.ipv6, 16))
-                       return false;
-               break;
-       case NEXTHOP_TYPE_IPV6_IFINDEX:
-               if (memcmp(&nh1->gate.ipv6, &nh2->gate.ipv6, 16))
-                       return false;
-               if (nh1->ifindex != nh2->ifindex)
-                       return false;
-               break;
-       case NEXTHOP_TYPE_BLACKHOLE:
-               if (nh1->bh_type != nh2->bh_type)
-                       return false;
-               break;
-       }
+       if (!nh1 && nh2)
+               return false;
+
+       if (nh1 == nh2)
+               return true;
+
+       if (_nexthop_cmp_no_labels(nh1, nh2) != 0)
+               return false;
 
-       /* Compare labels too (if present) */
-       return (!!nexthop_labels_match(nh1, nh2));
+       return true;
 }
 
 /* Update nexthop with label information. */
index 663acaeb69513eb5e64ff5c3c87e817e815ae115..5b6c12d4ef504005341f45c9c886c8c5f2a7f87f 100644 (file)
@@ -139,12 +139,13 @@ void nexthop_del_labels(struct nexthop *);
 uint32_t nexthop_hash(const struct nexthop *nexthop);
 
 extern bool nexthop_same(const struct nexthop *nh1, const struct nexthop *nh2);
+extern bool nexthop_same_no_labels(const struct nexthop *nh1,
+                                  const struct nexthop *nh2);
+extern int nexthop_cmp(const struct nexthop *nh1, const struct nexthop *nh2);
 
 extern const char *nexthop_type_to_str(enum nexthop_types_t nh_type);
-extern int nexthop_same_no_recurse(const struct nexthop *next1,
-                                  const struct nexthop *next2);
-extern int nexthop_labels_match(const struct nexthop *nh1,
-                               const struct nexthop *nh2);
+extern bool nexthop_labels_match(const struct nexthop *nh1,
+                                const struct nexthop *nh2);
 extern int nexthop_same_firsthop(struct nexthop *next1, struct nexthop *next2);
 
 extern const char *nexthop2str(const struct nexthop *nexthop,
index 2a97e1e5b2bfc57c51d8b6fdffe47f02afa60745..9957ff1f51a8244314dc006648638206efb89c01 100644 (file)
@@ -998,22 +998,36 @@ static int vty_prefix_list_uninstall(struct vty *vty, afi_t afi,
                return CMD_SUCCESS;
        }
 
-       /* We must have, at a minimum, both the type and prefix here */
-       if ((typestr == NULL) || (prefix == NULL)) {
-               vty_out(vty, "%% Both prefix and type required\n");
-               return CMD_WARNING_CONFIG_FAILED;
-       }
-
        /* Check sequence number. */
        if (seq)
                seqnum = (int64_t)atol(seq);
 
+       /* Sequence number specified, but nothing else. */
+       if (seq && typestr == NULL && prefix == NULL && ge == NULL
+           && le == NULL) {
+               pentry = prefix_seq_check(plist, seqnum);
+
+               if (pentry == NULL) {
+                       vty_out(vty,
+                               "%% Can't find prefix-list %s with sequence number %lu\n",
+                               name, seqnum);
+                       return CMD_WARNING_CONFIG_FAILED;
+               }
+
+               prefix_list_entry_delete(plist, pentry, 1);
+               return CMD_SUCCESS;
+       }
+
        /* ge and le number */
        if (ge)
                genum = atoi(ge);
        if (le)
                lenum = atoi(le);
 
+       /* We must have, at a minimum, both the type and prefix here */
+       if ((typestr == NULL) || (prefix == NULL))
+               return CMD_WARNING_CONFIG_FAILED;
+
        /* Check of filter type. */
        if (strncmp("permit", typestr, 1) == 0)
                type = PREFIX_PERMIT;
@@ -1375,6 +1389,17 @@ DEFPY (no_ip_prefix_list,
                                         action, dest, ge_str, le_str);
 }
 
+DEFPY(no_ip_prefix_list_seq, no_ip_prefix_list_seq_cmd,
+      "no ip prefix-list WORD seq (1-4294967295)",
+      NO_STR IP_STR PREFIX_LIST_STR
+      "Name of a prefix list\n"
+      "sequence number of an entry\n"
+      "Sequence number\n")
+{
+       return vty_prefix_list_uninstall(vty, AFI_IP, prefix_list, seq_str,
+                                        NULL, NULL, NULL, NULL);
+}
+
 DEFPY (no_ip_prefix_list_all,
        no_ip_prefix_list_all_cmd,
        "no ip prefix-list WORD",
@@ -2059,6 +2084,7 @@ static void prefix_list_init_ipv4(void)
 
        install_element(CONFIG_NODE, &ip_prefix_list_cmd);
        install_element(CONFIG_NODE, &no_ip_prefix_list_cmd);
+       install_element(CONFIG_NODE, &no_ip_prefix_list_seq_cmd);
        install_element(CONFIG_NODE, &no_ip_prefix_list_all_cmd);
 
        install_element(CONFIG_NODE, &ip_prefix_list_description_cmd);
index 0a450834e3efe55909f2791c7e786e4f369dd69a..438a0c9b64dba825bc817f2fb0eebf6c2d9af0e8 100644 (file)
@@ -418,7 +418,7 @@ int pim_assert_build_msg(uint8_t *pim_msg, int buf_size, struct interface *ifp,
          Add PIM header
        */
        pim_msg_size = pim_msg_curr - pim_msg;
-       pim_msg_build_header(pim_msg, pim_msg_size, PIM_MSG_TYPE_ASSERT);
+       pim_msg_build_header(pim_msg, pim_msg_size, PIM_MSG_TYPE_ASSERT, false);
 
        return pim_msg_size;
 }
diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c
new file mode 100644 (file)
index 0000000..4ba8d08
--- /dev/null
@@ -0,0 +1,1404 @@
+/*
+ * pim_bsm.c: PIM BSM handling routines
+ *
+ * Copyright (C) 2018-19 Vmware, Inc.
+ * Saravanan K
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+#include "if.h"
+#include "pimd.h"
+#include "pim_iface.h"
+#include "pim_instance.h"
+#include "pim_rpf.h"
+#include "pim_hello.h"
+#include "pim_pim.h"
+#include "pim_nht.h"
+#include "pim_bsm.h"
+#include "pim_time.h"
+
+/* Functions forward declaration */
+static void pim_bs_timer_start(struct bsm_scope *scope, int bs_timeout);
+static void pim_g2rp_timer_start(struct bsm_rpinfo *bsrp, int hold_time);
+static inline void pim_g2rp_timer_restart(struct bsm_rpinfo *bsrp,
+                                         int hold_time);
+
+/* Memory Types */
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSGRP_NODE, "PIM BSR advertised grp info")
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSRP_NODE, "PIM BSR advertised RP info")
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSM_INFO, "PIM BSM Info")
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSM_PKT_VAR_MEM, "PIM BSM Packet")
+
+/* All bsm packets forwarded shall be fit within ip mtu less iphdr(max) */
+#define MAX_IP_HDR_LEN 24
+
+/* pim_bsm_write_config - Write the interface pim bsm configuration.*/
+void pim_bsm_write_config(struct vty *vty, struct interface *ifp)
+{
+       struct pim_interface *pim_ifp = ifp->info;
+
+       if (pim_ifp) {
+               if (!pim_ifp->bsm_enable)
+                       vty_out(vty, " no ip pim bsm\n");
+               if (!pim_ifp->ucast_bsm_accept)
+                       vty_out(vty, " no ip pim unicast-bsm\n");
+       }
+}
+
+static void pim_free_bsgrp_data(struct bsgrp_node *bsgrp_node)
+{
+       if (bsgrp_node->bsrp_list)
+               list_delete(&bsgrp_node->bsrp_list);
+       if (bsgrp_node->partial_bsrp_list)
+               list_delete(&bsgrp_node->partial_bsrp_list);
+       XFREE(MTYPE_PIM_BSGRP_NODE, bsgrp_node);
+}
+
+static void pim_free_bsgrp_node(struct route_table *rt, struct prefix *grp)
+{
+       struct route_node *rn;
+
+       rn = route_node_lookup(rt, grp);
+       if (rn) {
+               rn->info = NULL;
+               route_unlock_node(rn);
+               route_unlock_node(rn);
+       }
+}
+
+static void pim_bsm_node_free(struct bsm_info *bsm)
+{
+       if (bsm->bsm)
+               XFREE(MTYPE_PIM_BSM_PKT_VAR_MEM, bsm->bsm);
+       XFREE(MTYPE_PIM_BSM_INFO, bsm);
+}
+
+static int pim_g2rp_list_compare(struct bsm_rpinfo *node1,
+                                struct bsm_rpinfo *node2)
+{
+       /* RP election Algo :
+        * Step-1 : Loweset Rp priority  will have higher precedance.
+        * Step-2 : If priority same then higher hash val will have
+        *          higher precedance.
+        * Step-3 : If Hash val is same then highest rp address will
+        *          become elected RP.
+        */
+       if (node1->rp_prio < node2->rp_prio)
+               return -1;
+       if (node1->rp_prio > node2->rp_prio)
+               return 1;
+       if (node1->hash < node2->hash)
+               return 1;
+       if (node1->hash > node2->hash)
+               return -1;
+       if (node1->rp_address.s_addr < node2->rp_address.s_addr)
+               return 1;
+       if (node1->rp_address.s_addr > node2->rp_address.s_addr)
+               return -1;
+       return 0;
+}
+
+static void pim_free_bsrp_node(struct bsm_rpinfo *bsrp_info)
+{
+       if (bsrp_info->g2rp_timer)
+               THREAD_OFF(bsrp_info->g2rp_timer);
+       XFREE(MTYPE_PIM_BSRP_NODE, bsrp_info);
+}
+
+static struct list *pim_alloc_bsrp_list(void)
+{
+       struct list *new_list = NULL;
+
+       new_list = list_new();
+
+       if (!new_list)
+               return NULL;
+
+       new_list->cmp = (int (*)(void *, void *))pim_g2rp_list_compare;
+       new_list->del = (void (*)(void *))pim_free_bsrp_node;
+
+       return new_list;
+}
+
+static struct bsgrp_node *pim_bsm_new_bsgrp_node(struct route_table *rt,
+                                                struct prefix *grp)
+{
+       struct route_node *rn;
+       struct bsgrp_node *bsgrp;
+
+       rn = route_node_get(rt, grp);
+       if (!rn) {
+               zlog_warn("%s: route node creation failed",
+                         __PRETTY_FUNCTION__);
+               return NULL;
+       }
+       bsgrp = XCALLOC(MTYPE_PIM_BSGRP_NODE, sizeof(struct bsgrp_node));
+
+       if (!bsgrp) {
+               if (PIM_DEBUG_BSM)
+                       zlog_debug("%s: bsgrp alloc failed",
+                                  __PRETTY_FUNCTION__);
+               route_unlock_node(rn);
+               return NULL;
+       }
+
+       rn->info = bsgrp;
+       bsgrp->bsrp_list = pim_alloc_bsrp_list();
+       bsgrp->partial_bsrp_list = pim_alloc_bsrp_list();
+
+       if ((!bsgrp->bsrp_list) || (!bsgrp->partial_bsrp_list)) {
+               route_unlock_node(rn);
+               pim_free_bsgrp_data(bsgrp);
+               return NULL;
+       }
+
+       prefix_copy(&bsgrp->group, grp);
+       return bsgrp;
+}
+
+static int pim_on_bs_timer(struct thread *t)
+{
+       struct route_node *rn;
+       struct bsm_scope *scope;
+       struct bsgrp_node *bsgrp_node;
+       struct bsm_rpinfo *bsrp;
+       struct prefix nht_p;
+       char buf[PREFIX2STR_BUFFER];
+       bool is_bsr_tracking = true;
+
+       scope = THREAD_ARG(t);
+       THREAD_OFF(scope->bs_timer);
+
+       if (PIM_DEBUG_BSM)
+               zlog_debug("%s: Bootstrap Timer expired for scope: %d",
+                          __PRETTY_FUNCTION__, scope->sz_id);
+
+       /* Remove next hop tracking for the bsr */
+       nht_p.family = AF_INET;
+       nht_p.prefixlen = IPV4_MAX_BITLEN;
+       nht_p.u.prefix4 = scope->current_bsr;
+       if (PIM_DEBUG_BSM) {
+               prefix2str(&nht_p, buf, sizeof(buf));
+               zlog_debug("%s: Deregister BSR addr %s with Zebra NHT",
+                          __PRETTY_FUNCTION__, buf);
+       }
+       pim_delete_tracked_nexthop(scope->pim, &nht_p, NULL, NULL,
+                                  is_bsr_tracking);
+
+       /* Reset scope zone data */
+       scope->accept_nofwd_bsm = false;
+       scope->state = ACCEPT_ANY;
+       scope->current_bsr.s_addr = INADDR_ANY;
+       scope->current_bsr_prio = 0;
+       scope->current_bsr_first_ts = 0;
+       scope->current_bsr_last_ts = 0;
+       scope->bsm_frag_tag = 0;
+       list_delete_all_node(scope->bsm_list);
+
+       for (rn = route_top(scope->bsrp_table); rn; rn = route_next(rn)) {
+
+               bsgrp_node = (struct bsgrp_node *)rn->info;
+               if (!bsgrp_node) {
+                       if (PIM_DEBUG_BSM)
+                               zlog_debug("%s: bsgrp_node is null",
+                                          __PRETTY_FUNCTION__);
+                       continue;
+               }
+               /* Give grace time for rp to continue for another hold time */
+               if ((bsgrp_node->bsrp_list) && (bsgrp_node->bsrp_list->count)) {
+                       bsrp = listnode_head(bsgrp_node->bsrp_list);
+                       pim_g2rp_timer_restart(bsrp, bsrp->rp_holdtime);
+               }
+               /* clear pending list */
+               if ((bsgrp_node->partial_bsrp_list)
+                   && (bsgrp_node->partial_bsrp_list->count)) {
+                       list_delete_all_node(bsgrp_node->partial_bsrp_list);
+                       bsgrp_node->pend_rp_cnt = 0;
+               }
+       }
+       return 0;
+}
+
+static void pim_bs_timer_stop(struct bsm_scope *scope)
+{
+       if (PIM_DEBUG_BSM)
+               zlog_debug("%s : BS timer being stopped of sz: %d",
+                          __PRETTY_FUNCTION__, scope->sz_id);
+       THREAD_OFF(scope->bs_timer);
+}
+
+static void pim_bs_timer_start(struct bsm_scope *scope, int bs_timeout)
+{
+       if (!scope) {
+               if (PIM_DEBUG_BSM)
+                       zlog_debug("%s : Invalid scope(NULL).",
+                                  __PRETTY_FUNCTION__);
+               return;
+       }
+       THREAD_OFF(scope->bs_timer);
+       if (PIM_DEBUG_BSM)
+               zlog_debug("%s : starting bs timer for scope %d with timeout %d secs",
+                          __PRETTY_FUNCTION__, scope->sz_id, bs_timeout);
+       thread_add_timer(router->master, pim_on_bs_timer, scope, bs_timeout,
+                        &scope->bs_timer);
+}
+
+static inline void pim_bs_timer_restart(struct bsm_scope *scope, int bs_timeout)
+{
+       pim_bs_timer_start(scope, bs_timeout);
+}
+
+void pim_bsm_proc_init(struct pim_instance *pim)
+{
+       memset(&pim->global_scope, 0, sizeof(struct bsm_scope));
+
+       pim->global_scope.sz_id = PIM_GBL_SZ_ID;
+       pim->global_scope.bsrp_table = route_table_init();
+       pim->global_scope.accept_nofwd_bsm = true;
+       pim->global_scope.state = NO_INFO;
+       pim->global_scope.pim = pim;
+       pim->global_scope.bsm_list = list_new();
+       pim->global_scope.bsm_list->del = (void (*)(void *))pim_bsm_node_free;
+       pim_bs_timer_start(&pim->global_scope, PIM_BS_TIME);
+}
+
+void pim_bsm_proc_free(struct pim_instance *pim)
+{
+       struct route_node *rn;
+       struct bsgrp_node *bsgrp;
+
+       pim_bs_timer_stop(&pim->global_scope);
+
+       if (pim->global_scope.bsm_list)
+               list_delete(&pim->global_scope.bsm_list);
+
+       for (rn = route_top(pim->global_scope.bsrp_table); rn;
+            rn = route_next(rn)) {
+               bsgrp = rn->info;
+               if (!bsgrp)
+                       continue;
+               pim_free_bsgrp_data(bsgrp);
+       }
+
+       if (pim->global_scope.bsrp_table)
+               route_table_finish(pim->global_scope.bsrp_table);
+}
+
+static bool is_hold_time_elapsed(void *data)
+{
+       struct bsm_rpinfo *bsrp;
+
+       bsrp = data;
+
+       if (bsrp->elapse_time < bsrp->rp_holdtime)
+               return false;
+       else
+               return true;
+}
+
+static int pim_on_g2rp_timer(struct thread *t)
+{
+       struct bsm_rpinfo *bsrp;
+       struct bsm_rpinfo *bsrp_node;
+       struct bsgrp_node *bsgrp_node;
+       struct listnode *bsrp_ln;
+       struct pim_instance *pim;
+       struct rp_info *rp_info;
+       struct route_node *rn;
+       uint16_t elapse;
+       struct in_addr bsrp_addr;
+
+       bsrp = THREAD_ARG(t);
+       THREAD_OFF(bsrp->g2rp_timer);
+       bsgrp_node = bsrp->bsgrp_node;
+
+       /* elapse time is the hold time of expired node */
+       elapse = bsrp->rp_holdtime;
+       bsrp_addr = bsrp->rp_address;
+
+       /* update elapse for all bsrp nodes */
+       for (ALL_LIST_ELEMENTS_RO(bsgrp_node->bsrp_list, bsrp_ln, bsrp_node))
+               bsrp_node->elapse_time += elapse;
+
+       /* remove the expired nodes from the list */
+       list_filter_out_nodes(bsgrp_node->bsrp_list, is_hold_time_elapsed);
+
+       /* Get the next elected rp node */
+       bsrp = listnode_head(bsgrp_node->bsrp_list);
+       pim = bsgrp_node->scope->pim;
+       rn = route_node_lookup(pim->rp_table, &bsgrp_node->group);
+
+       if (!rn) {
+               zlog_warn("%s: Route node doesn't exist", __PRETTY_FUNCTION__);
+               return 0;
+       }
+
+       rp_info = (struct rp_info *)rn->info;
+
+       if (!rp_info) {
+               route_unlock_node(rn);
+               return 0;
+       }
+
+       if (rp_info->rp_src != RP_SRC_STATIC) {
+               /* If new rp available, change it else delete the existing */
+               if (bsrp) {
+                       bsrp_addr = bsrp->rp_address;
+                       pim_g2rp_timer_start(
+                               bsrp, (bsrp->rp_holdtime - bsrp->elapse_time));
+                       pim_rp_change(pim, bsrp_addr, bsgrp_node->group,
+                                     RP_SRC_BSR);
+               } else {
+                       pim_rp_del(pim, bsrp_addr, bsgrp_node->group, NULL,
+                                  RP_SRC_BSR);
+               }
+       }
+
+       if ((!bsgrp_node->bsrp_list->count)
+           && (!bsgrp_node->partial_bsrp_list->count)) {
+               pim_free_bsgrp_node(pim->global_scope.bsrp_table,
+                                   &bsgrp_node->group);
+               pim_free_bsgrp_data(bsgrp_node);
+       }
+
+       return 0;
+}
+
+static void pim_g2rp_timer_start(struct bsm_rpinfo *bsrp, int hold_time)
+{
+       if (!bsrp) {
+               if (PIM_DEBUG_BSM)
+                       zlog_debug("%s : Invalid brsp(NULL).",
+                                  __PRETTY_FUNCTION__);
+               return;
+       }
+       THREAD_OFF(bsrp->g2rp_timer);
+       if (PIM_DEBUG_BSM) {
+               char buf[48];
+
+               zlog_debug(
+                       "%s : starting g2rp timer for grp: %s - rp: %s with timeout  %d secs(Actual Hold time : %d secs)",
+                       __PRETTY_FUNCTION__,
+                       prefix2str(&bsrp->bsgrp_node->group, buf, 48),
+                       inet_ntoa(bsrp->rp_address), hold_time,
+                       bsrp->rp_holdtime);
+       }
+
+       thread_add_timer(router->master, pim_on_g2rp_timer, bsrp, hold_time,
+                        &bsrp->g2rp_timer);
+}
+
+static inline void pim_g2rp_timer_restart(struct bsm_rpinfo *bsrp,
+                                         int hold_time)
+{
+       pim_g2rp_timer_start(bsrp, hold_time);
+}
+
+static void pim_g2rp_timer_stop(struct bsm_rpinfo *bsrp)
+{
+       if (!bsrp)
+               return;
+
+       if (PIM_DEBUG_BSM) {
+               char buf[48];
+
+               zlog_debug("%s : stopping g2rp timer for grp: %s - rp: %s",
+                          __PRETTY_FUNCTION__,
+                          prefix2str(&bsrp->bsgrp_node->group, buf, 48),
+                          inet_ntoa(bsrp->rp_address));
+       }
+
+       THREAD_OFF(bsrp->g2rp_timer);
+}
+
+static bool is_hold_time_zero(void *data)
+{
+       struct bsm_rpinfo *bsrp;
+
+       bsrp = data;
+
+       if (bsrp->rp_holdtime)
+               return false;
+       else
+               return true;
+}
+
+static void pim_instate_pend_list(struct bsgrp_node *bsgrp_node)
+{
+       struct bsm_rpinfo *active;
+       struct bsm_rpinfo *pend;
+       struct list *temp;
+       struct rp_info *rp_info;
+       struct route_node *rn;
+       struct pim_instance *pim;
+       struct rp_info *rp_all;
+       struct prefix group_all;
+       bool had_rp_node = true;
+
+       pim = bsgrp_node->scope->pim;
+       active = listnode_head(bsgrp_node->bsrp_list);
+
+       /* Remove nodes with hold time 0 & check if list still has a head */
+       list_filter_out_nodes(bsgrp_node->partial_bsrp_list, is_hold_time_zero);
+       pend = listnode_head(bsgrp_node->partial_bsrp_list);
+
+       if (!str2prefix("224.0.0.0/4", &group_all))
+               return;
+
+       rp_all = pim_rp_find_match_group(pim, &group_all);
+       rn = route_node_lookup(pim->rp_table, &bsgrp_node->group);
+
+       if (pend)
+               pim_g2rp_timer_start(pend, pend->rp_holdtime);
+
+       /* if rp node doesn't exist or exist but not configured(rp_all),
+        * install the rp from head(if exists) of partial list. List is
+        * is sorted such that head is the elected RP for the group.
+        */
+       if (!rn || (prefix_same(&rp_all->group, &bsgrp_node->group)
+                   && pim_rpf_addr_is_inaddr_none(&rp_all->rp))) {
+               if (PIM_DEBUG_BSM)
+                       zlog_debug("%s: Route node doesn't exist",
+                                  __PRETTY_FUNCTION__);
+               if (pend)
+                       pim_rp_new(pim, pend->rp_address, bsgrp_node->group,
+                                  NULL, RP_SRC_BSR);
+               had_rp_node = false;
+       } else {
+               rp_info = (struct rp_info *)rn->info;
+               if (!rp_info) {
+                       route_unlock_node(rn);
+                       if (pend)
+                               pim_rp_new(pim, pend->rp_address,
+                                          bsgrp_node->group, NULL, RP_SRC_BSR);
+                       had_rp_node = false;
+               }
+       }
+
+       /* We didn't have rp node and pending list is empty(unlikely), cleanup*/
+       if ((!had_rp_node) && (!pend)) {
+               pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+                                   &bsgrp_node->group);
+               pim_free_bsgrp_data(bsgrp_node);
+               return;
+       }
+
+       if ((had_rp_node) && (rp_info->rp_src != RP_SRC_STATIC)) {
+               /* This means we searched and got rp node, needs unlock */
+               route_unlock_node(rn);
+
+               if (active && pend) {
+                       if ((active->rp_address.s_addr
+                            != pend->rp_address.s_addr))
+                               pim_rp_change(pim, pend->rp_address,
+                                             bsgrp_node->group, RP_SRC_BSR);
+               }
+
+               /* Possible when the first BSM has group with 0 rp count */
+               if ((!active) && (!pend)) {
+                       if (PIM_DEBUG_BSM) {
+                               zlog_debug(
+                                       "%s: Both bsrp and partial list are empty",
+                                       __PRETTY_FUNCTION__);
+                       }
+                       pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+                                           &bsgrp_node->group);
+                       pim_free_bsgrp_data(bsgrp_node);
+                       return;
+               }
+
+               /* Possible when a group with 0 rp count received in BSM */
+               if ((active) && (!pend)) {
+                       pim_rp_del(pim, active->rp_address, bsgrp_node->group,
+                                  NULL, RP_SRC_BSR);
+                       pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+                                           &bsgrp_node->group);
+                       if (PIM_DEBUG_BSM) {
+                               zlog_debug("%s:Pend List is null,del grp node",
+                                          __PRETTY_FUNCTION__);
+                       }
+                       pim_free_bsgrp_data(bsgrp_node);
+                       return;
+               }
+       }
+
+       if ((had_rp_node) && (rp_info->rp_src == RP_SRC_STATIC)) {
+               /* We need to unlock rn this case */
+               route_unlock_node(rn);
+               /* there is a chance that static rp exist and bsrp cleaned
+                * so clean bsgrp node if pending list empty
+                */
+               if (!pend) {
+                       if (PIM_DEBUG_BSM)
+                               zlog_debug(
+                                       "%s: Partial list is empty, static rp exists",
+                                       __PRETTY_FUNCTION__);
+                       pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+                                           &bsgrp_node->group);
+                       pim_free_bsgrp_data(bsgrp_node);
+                       return;
+               }
+       }
+
+       /* swap the list & delete all nodes in partial list (old bsrp_list)
+        * before swap
+        *    active is head of bsrp list
+        *    pend is head of partial list
+        * After swap
+        *    active is head of partial list
+        *    pend is head of bsrp list
+        * So check appriate head after swap and clean the new partial list
+        */
+       temp = bsgrp_node->bsrp_list;
+       bsgrp_node->bsrp_list = bsgrp_node->partial_bsrp_list;
+       bsgrp_node->partial_bsrp_list = temp;
+
+       if (active) {
+               pim_g2rp_timer_stop(active);
+               list_delete_all_node(bsgrp_node->partial_bsrp_list);
+       }
+}
+
+static bool pim_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr,
+                             struct in_addr ip_src_addr)
+{
+       struct pim_nexthop nexthop;
+       int result;
+
+       memset(&nexthop, 0, sizeof(nexthop));
+
+       /* New BSR recived */
+       if (bsr.s_addr != pim->global_scope.current_bsr.s_addr) {
+               result = pim_nexthop_match(pim, bsr, ip_src_addr);
+
+               /* Nexthop lookup pass for the new BSR address */
+               if (result)
+                       return true;
+
+               if (PIM_DEBUG_BSM) {
+                       char bsr_str[INET_ADDRSTRLEN];
+
+                       pim_inet4_dump("<bsr?>", bsr, bsr_str, sizeof(bsr_str));
+                       zlog_debug("%s : No route to BSR address %s",
+                                  __PRETTY_FUNCTION__, bsr_str);
+               }
+               return false;
+       }
+
+       return pim_nexthop_match_nht_cache(pim, bsr, ip_src_addr);
+}
+
+static bool is_preferred_bsr(struct pim_instance *pim, struct in_addr bsr,
+                            uint32_t bsr_prio)
+{
+       if (bsr.s_addr == pim->global_scope.current_bsr.s_addr)
+               return true;
+
+       if (bsr_prio > pim->global_scope.current_bsr_prio)
+               return true;
+
+       else if (bsr_prio == pim->global_scope.current_bsr_prio) {
+               if (bsr.s_addr >= pim->global_scope.current_bsr.s_addr)
+                       return true;
+               else
+                       return false;
+       } else
+               return false;
+}
+
+static void pim_bsm_update(struct pim_instance *pim, struct in_addr bsr,
+                          uint32_t bsr_prio)
+{
+       struct pim_nexthop_cache pnc;
+
+       if (bsr.s_addr != pim->global_scope.current_bsr.s_addr) {
+               struct prefix nht_p;
+               char buf[PREFIX2STR_BUFFER];
+               bool is_bsr_tracking = true;
+
+               /* De-register old BSR and register new BSR with Zebra NHT */
+               nht_p.family = AF_INET;
+               nht_p.prefixlen = IPV4_MAX_BITLEN;
+
+               if (pim->global_scope.current_bsr.s_addr != INADDR_ANY) {
+                       nht_p.u.prefix4 = pim->global_scope.current_bsr;
+                       if (PIM_DEBUG_BSM) {
+                               prefix2str(&nht_p, buf, sizeof(buf));
+                               zlog_debug(
+                                       "%s: Deregister BSR addr %s with Zebra NHT",
+                                       __PRETTY_FUNCTION__, buf);
+                       }
+                       pim_delete_tracked_nexthop(pim, &nht_p, NULL, NULL,
+                                                  is_bsr_tracking);
+               }
+
+               nht_p.u.prefix4 = bsr;
+               if (PIM_DEBUG_BSM) {
+                       prefix2str(&nht_p, buf, sizeof(buf));
+                       zlog_debug(
+                               "%s: NHT Register BSR addr %s with Zebra NHT",
+                               __PRETTY_FUNCTION__, buf);
+               }
+
+               memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
+               pim_find_or_track_nexthop(pim, &nht_p, NULL, NULL,
+                                         is_bsr_tracking, &pnc);
+               pim->global_scope.current_bsr = bsr;
+               pim->global_scope.current_bsr_first_ts =
+                       pim_time_monotonic_sec();
+               pim->global_scope.state = ACCEPT_PREFERRED;
+       }
+       pim->global_scope.current_bsr_prio = bsr_prio;
+       pim->global_scope.current_bsr_last_ts = pim_time_monotonic_sec();
+}
+
+static bool pim_bsm_send_intf(uint8_t *buf, int len, struct interface *ifp,
+                             struct in_addr dst_addr)
+{
+       struct pim_interface *pim_ifp;
+
+       pim_ifp = ifp->info;
+
+       if (!pim_ifp) {
+               if (PIM_DEBUG_BSM)
+                       zlog_debug("%s: Pim interface not available for %s",
+                                  __PRETTY_FUNCTION__, ifp->name);
+               return false;
+       }
+
+       if (pim_ifp->pim_sock_fd == -1) {
+               if (PIM_DEBUG_BSM)
+                       zlog_debug("%s: Pim sock not available for %s",
+                                  __PRETTY_FUNCTION__, ifp->name);
+               return false;
+       }
+
+       pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address, dst_addr,
+                    buf, len, ifp->name);
+       pim_ifp->pim_ifstat_bsm_tx++;
+       pim_ifp->pim->bsm_sent++;
+       return true;
+}
+
+static bool pim_bsm_frag_send(uint8_t *buf, uint32_t len, struct interface *ifp,
+                             uint32_t pim_mtu, struct in_addr dst_addr,
+                             bool no_fwd)
+{
+       struct bsmmsg_grpinfo *grpinfo, *curgrp;
+       uint8_t *firstgrp_ptr;
+       uint8_t *pkt;
+       uint8_t *pak_start;
+       uint32_t parsed_len = 0;
+       uint32_t this_pkt_rem;
+       uint32_t copy_byte_count;
+       uint32_t this_pkt_len;
+       uint8_t total_rp_cnt;
+       uint8_t this_rp_cnt;
+       uint8_t frag_rp_cnt;
+       uint8_t rp_fit_cnt;
+       bool pak_pending = false;
+
+       /* MTU  passed here is PIM MTU (IP MTU less IP Hdr) */
+       if (pim_mtu < (PIM_MIN_BSM_LEN)) {
+               zlog_warn(
+                       "%s: mtu(pim mtu: %d) size less than minimum bootsrap len",
+                       __PRETTY_FUNCTION__, pim_mtu);
+               if (PIM_DEBUG_BSM)
+                       zlog_debug(
+                               "%s: mtu (pim mtu:%d) less than minimum bootsrap len",
+                               __PRETTY_FUNCTION__, pim_mtu);
+               return false;
+       }
+
+       pak_start = XCALLOC(MTYPE_PIM_BSM_PKT_VAR_MEM, pim_mtu);
+
+       if (!pak_start) {
+               if (PIM_DEBUG_BSM)
+                       zlog_debug("%s: malloc failed", __PRETTY_FUNCTION__);
+               return false;
+       }
+
+       pkt = pak_start;
+
+       /* Fill PIM header later before sending packet to calc checksum */
+       pkt += PIM_MSG_HEADER_LEN;
+       buf += PIM_MSG_HEADER_LEN;
+
+       /* copy bsm header to new packet at offset of pim hdr */
+       memcpy(pkt, buf, PIM_BSM_HDR_LEN);
+       pkt += PIM_BSM_HDR_LEN;
+       buf += PIM_BSM_HDR_LEN;
+       parsed_len += (PIM_MSG_HEADER_LEN + PIM_BSM_HDR_LEN);
+
+       /* Store the position of first grp ptr, which can be reused for
+        * next packet to start filling group. old bsm header and pim hdr
+        * remains. So need not be filled again for next packet onwards.
+        */
+       firstgrp_ptr = pkt;
+
+       /* we received mtu excluding IP hdr len as param
+        * now this_pkt_rem is mtu excluding
+        * PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN
+        */
+       this_pkt_rem = pim_mtu - (PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN);
+
+       /* For each group till the packet length parsed */
+       while (parsed_len < len) {
+               /* pkt            ---> fragment's current pointer
+                * buf            ---> input buffer's current pointer
+                * mtu            ---> size of the pim packet - PIM header
+                * curgrp         ---> current group on the fragment
+                * grpinfo        ---> current group on the input buffer
+                * this_pkt_rem   ---> bytes remaing on the current fragment
+                * rp_fit_cnt     ---> num of rp for current grp that
+                *                     fits this frag
+                * total_rp_cnt   ---> total rp present for the group in the buf
+                * frag_rp_cnt    ---> no of rp for the group to be fit in
+                *                     the frag
+                * this_rp_cnt    ---> how many rp have we parsed
+                */
+               grpinfo = (struct bsmmsg_grpinfo *)buf;
+               memcpy(pkt, buf, PIM_BSM_GRP_LEN);
+               curgrp = (struct bsmmsg_grpinfo *)pkt;
+               parsed_len += PIM_BSM_GRP_LEN;
+               pkt += PIM_BSM_GRP_LEN;
+               buf += PIM_BSM_GRP_LEN;
+               this_pkt_rem -= PIM_BSM_GRP_LEN;
+
+               /* initialize rp count and total_rp_cnt before the rp loop */
+               this_rp_cnt = 0;
+               total_rp_cnt = grpinfo->frag_rp_count;
+
+               /* Loop till all RPs for the group parsed */
+               while (this_rp_cnt < total_rp_cnt) {
+                       /* All RP from a group processed here.
+                        * group is pointed by grpinfo.
+                        * At this point make sure buf pointing to a RP
+                        * within a group
+                        */
+                       rp_fit_cnt = this_pkt_rem / PIM_BSM_RP_LEN;
+
+                       /* calculate how many rp am i going to copy in
+                        * this frag
+                        */
+                       if (rp_fit_cnt > (total_rp_cnt - this_rp_cnt))
+                               frag_rp_cnt = total_rp_cnt - this_rp_cnt;
+                       else
+                               frag_rp_cnt = rp_fit_cnt;
+
+                       /* populate the frag rp count for the current grp */
+                       curgrp->frag_rp_count = frag_rp_cnt;
+                       copy_byte_count = frag_rp_cnt * PIM_BSM_RP_LEN;
+
+                       /* copy all the rp that we are fitting in this
+                        * frag for the grp
+                        */
+                       memcpy(pkt, buf, copy_byte_count);
+                       this_rp_cnt += frag_rp_cnt;
+                       buf += copy_byte_count;
+                       pkt += copy_byte_count;
+                       parsed_len += copy_byte_count;
+                       this_pkt_rem -= copy_byte_count;
+
+                       /* Either we couldn't fit all rp for the group or the
+                        * mtu reached
+                        */
+                       if ((this_rp_cnt < total_rp_cnt)
+                           || (this_pkt_rem
+                               < (PIM_BSM_GRP_LEN + PIM_BSM_RP_LEN))) {
+                               /* No space to fit in more rp, send this pkt */
+                               this_pkt_len = pim_mtu - this_pkt_rem;
+                               pim_msg_build_header(pak_start, this_pkt_len,
+                                                    PIM_MSG_TYPE_BOOTSTRAP,
+                                                    no_fwd);
+                               pim_bsm_send_intf(pak_start, this_pkt_len, ifp,
+                                                 dst_addr);
+
+                               /* Construct next fragment. Reuse old packet */
+                               pkt = firstgrp_ptr;
+                               this_pkt_rem = pim_mtu - (PIM_BSM_HDR_LEN
+                                                         + PIM_MSG_HEADER_LEN);
+
+                               /* If pkt can't accomodate next group + atleast
+                                * one rp, we must break out of this inner loop
+                                * and process next RP
+                                */
+                               if (total_rp_cnt == this_rp_cnt)
+                                       break;
+
+                               /* If some more RPs for the same group pending,
+                                * fill grp hdr
+                                */
+                               memcpy(pkt, (uint8_t *)grpinfo,
+                                      PIM_BSM_GRP_LEN);
+                               curgrp = (struct bsmmsg_grpinfo *)pkt;
+                               pkt += PIM_BSM_GRP_LEN;
+                               this_pkt_rem -= PIM_BSM_GRP_LEN;
+                               pak_pending = false;
+                       } else {
+                               /* We filled something but not yet sent out */
+                               pak_pending = true;
+                       }
+               } /* while RP count */
+       }        /*while parsed len */
+
+       /* Send if we have any unsent packet */
+       if (pak_pending) {
+               this_pkt_len = pim_mtu - this_pkt_rem;
+               pim_msg_build_header(pak_start, this_pkt_len,
+                                    PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
+               pim_bsm_send_intf(pak_start, (pim_mtu - this_pkt_rem), ifp,
+                                 dst_addr);
+       }
+       XFREE(MTYPE_PIM_BSM_PKT_VAR_MEM, pak_start);
+       return true;
+}
+
+static void pim_bsm_fwd_whole_sz(struct pim_instance *pim, uint8_t *buf,
+                                uint32_t len, int sz)
+{
+       struct interface *ifp;
+       struct pim_interface *pim_ifp;
+       struct in_addr dst_addr;
+       uint32_t pim_mtu;
+       bool no_fwd = FALSE;
+       bool ret = FALSE;
+
+       /* For now only global scope zone is supported, so send on all
+        * pim interfaces in the vrf
+        */
+       dst_addr = qpim_all_pim_routers_addr;
+       FOR_ALL_INTERFACES (pim->vrf, ifp) {
+               pim_ifp = ifp->info;
+               if ((!pim_ifp) || (!pim_ifp->bsm_enable))
+                       continue;
+               pim_hello_require(ifp);
+               pim_mtu = ifp->mtu - MAX_IP_HDR_LEN;
+               if (pim_mtu < len) {
+                       ret = pim_bsm_frag_send(buf, len, ifp, pim_mtu,
+                                               dst_addr, no_fwd);
+                       if (PIM_DEBUG_BSM)
+                               zlog_debug("%s: pim_bsm_frag_send returned %s",
+                                          __PRETTY_FUNCTION__,
+                                          ret ? "TRUE" : "FALSE");
+               } else {
+                       pim_msg_build_header(buf, len, PIM_MSG_TYPE_BOOTSTRAP,
+                                            no_fwd);
+                       if (!pim_bsm_send_intf(buf, len, ifp, dst_addr)) {
+                               if (PIM_DEBUG_BSM)
+                                       zlog_debug(
+                                               "%s: pim_bsm_send_intf returned FALSE",
+                                               __PRETTY_FUNCTION__);
+                       }
+               }
+       }
+}
+
+bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)
+{
+       struct in_addr dst_addr;
+       struct pim_interface *pim_ifp;
+       struct bsm_scope *scope;
+       struct listnode *bsm_ln;
+       struct bsm_info *bsminfo;
+       char neigh_src_str[INET_ADDRSTRLEN];
+       uint32_t pim_mtu;
+       bool no_fwd = true;
+       bool ret = false;
+
+       if (PIM_DEBUG_BSM) {
+               pim_inet4_dump("<src?>", neigh->source_addr, neigh_src_str,
+                              sizeof(neigh_src_str));
+               zlog_debug("%s: New neighbor %s seen on %s",
+                          __PRETTY_FUNCTION__, neigh_src_str, ifp->name);
+       }
+
+       pim_ifp = ifp->info;
+
+       /* DR only forwards BSM packet */
+       if (pim_ifp->pim_dr_addr.s_addr == pim_ifp->primary_address.s_addr) {
+               if (PIM_DEBUG_BSM)
+                       zlog_debug(
+                               "%s: It is not DR, so don't forward BSM packet",
+                               __PRETTY_FUNCTION__);
+       }
+
+       if (!pim_ifp->bsm_enable) {
+               if (PIM_DEBUG_BSM)
+                       zlog_debug("%s: BSM proc not enabled on %s",
+                                  __PRETTY_FUNCTION__, ifp->name);
+               return ret;
+       }
+
+       scope = &pim_ifp->pim->global_scope;
+
+       if (!scope->bsm_list->count) {
+               if (PIM_DEBUG_BSM)
+                       zlog_debug("%s: BSM list for the scope is empty",
+                                  __PRETTY_FUNCTION__);
+               return ret;
+       }
+
+       if (!pim_ifp->ucast_bsm_accept) {
+               dst_addr = qpim_all_pim_routers_addr;
+               if (PIM_DEBUG_BSM)
+                       zlog_debug("%s: Sending BSM mcast to %s",
+                                  __PRETTY_FUNCTION__, neigh_src_str);
+       } else {
+               dst_addr = neigh->source_addr;
+               if (PIM_DEBUG_BSM)
+                       zlog_debug("%s: Sending BSM ucast to %s",
+                                  __PRETTY_FUNCTION__, neigh_src_str);
+       }
+       pim_mtu = ifp->mtu - MAX_IP_HDR_LEN;
+       pim_hello_require(ifp);
+
+       for (ALL_LIST_ELEMENTS_RO(scope->bsm_list, bsm_ln, bsminfo)) {
+               if (pim_mtu < bsminfo->size) {
+                       ret = pim_bsm_frag_send(bsminfo->bsm, bsminfo->size,
+                                               ifp, pim_mtu, dst_addr, no_fwd);
+                       if (!ret) {
+                               if (PIM_DEBUG_BSM)
+                                       zlog_debug(
+                                               "%s: pim_bsm_frag_send failed",
+                                               __PRETTY_FUNCTION__);
+                       }
+               } else {
+                       /* Pim header needs to be constructed */
+                       pim_msg_build_header(bsminfo->bsm, bsminfo->size,
+                                            PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
+                       ret = pim_bsm_send_intf(bsminfo->bsm, bsminfo->size,
+                                               ifp, dst_addr);
+                       if (!ret) {
+                               if (PIM_DEBUG_BSM)
+                                       zlog_debug(
+                                               "%s: pim_bsm_frag_send failed",
+                                               __PRETTY_FUNCTION__);
+                       }
+               }
+       }
+       return ret;
+}
+
+struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
+                                         struct prefix *grp)
+{
+       struct route_node *rn;
+       struct bsgrp_node *bsgrp;
+
+       rn = route_node_lookup(scope->bsrp_table, grp);
+       if (!rn) {
+               if (PIM_DEBUG_BSM)
+                       zlog_debug("%s: Route node doesn't exist for the group",
+                                  __PRETTY_FUNCTION__);
+               return NULL;
+       }
+       bsgrp = rn->info;
+       route_unlock_node(rn);
+
+       return bsgrp;
+}
+
+static uint32_t hash_calc_on_grp_rp(struct prefix group, struct in_addr rp,
+                                   uint8_t hashmasklen)
+{
+       uint64_t temp;
+       uint32_t hash;
+       uint32_t grpaddr;
+       uint32_t rp_add;
+       uint32_t mask = 0xffffffff;
+
+       /* mask to be made zero if hashmasklen is 0 because mask << 32
+        * may not give 0. hashmasklen can be 0 to 32.
+        */
+       if (hashmasklen == 0)
+               mask = 0;
+
+       /* in_addr stores ip in big endian, hence network byte order
+        * convert to uint32 before processing hash
+        */
+       grpaddr = ntohl(group.u.prefix4.s_addr);
+       /* Avoid shifting by 32 bit on a 32 bit register */
+       if (hashmasklen)
+               grpaddr = grpaddr & ((mask << (32 - hashmasklen)));
+       else
+               grpaddr = grpaddr & mask;
+       rp_add = ntohl(rp.s_addr);
+       temp = 1103515245 * ((1103515245 * grpaddr + 12345) ^ rp_add) + 12345;
+       hash = temp & (0x7fffffff);
+       return hash;
+}
+
+static bool pim_install_bsm_grp_rp(struct pim_instance *pim,
+                                  struct bsgrp_node *grpnode,
+                                  struct bsmmsg_rpinfo *rp)
+{
+       struct bsm_rpinfo *bsm_rpinfo;
+       uint8_t hashMask_len = pim->global_scope.hashMasklen;
+
+       /*memory allocation for bsm_rpinfo */
+       bsm_rpinfo = XCALLOC(MTYPE_PIM_BSRP_NODE, sizeof(*bsm_rpinfo));
+
+       if (!bsm_rpinfo) {
+               if (PIM_DEBUG_BSM)
+                       zlog_debug("%s, Memory allocation failed.\r\n",
+                                  __PRETTY_FUNCTION__);
+               return false;
+       }
+
+       bsm_rpinfo->rp_prio = rp->rp_pri;
+       bsm_rpinfo->rp_holdtime = rp->rp_holdtime;
+       memcpy(&bsm_rpinfo->rp_address, &rp->rpaddr.addr,
+              sizeof(struct in_addr));
+       bsm_rpinfo->elapse_time = 0;
+
+       /* Back pointer to the group node. */
+       bsm_rpinfo->bsgrp_node = grpnode;
+
+       /* update hash for this rp node */
+       bsm_rpinfo->hash = hash_calc_on_grp_rp(grpnode->group, rp->rpaddr.addr,
+                                              hashMask_len);
+       if (listnode_add_sort_nodup(grpnode->partial_bsrp_list, bsm_rpinfo)) {
+               if (PIM_DEBUG_BSM)
+                       zlog_debug(
+                               "%s, bs_rpinfo node added to the partial bs_rplist.\r\n",
+                               __PRETTY_FUNCTION__);
+               return true;
+       }
+
+       if (PIM_DEBUG_BSM)
+               zlog_debug("%s: list node not added\n", __PRETTY_FUNCTION__);
+
+       XFREE(MTYPE_PIM_BSRP_NODE, bsm_rpinfo);
+       return false;
+}
+
+static void pim_update_pending_rp_cnt(struct bsm_scope *sz,
+                                     struct bsgrp_node *bsgrp,
+                                     uint16_t bsm_frag_tag,
+                                     uint32_t total_rp_count)
+{
+       if (bsgrp->pend_rp_cnt) {
+               /* received bsm is different packet ,
+                * it is not same fragment.
+                */
+               if (bsm_frag_tag != bsgrp->frag_tag) {
+                       if (PIM_DEBUG_BSM)
+                               zlog_debug(
+                                       "%s,Received a new BSM ,so clear the pending bs_rpinfo list.\r\n",
+                                       __PRETTY_FUNCTION__);
+                       list_delete_all_node(bsgrp->partial_bsrp_list);
+                       bsgrp->pend_rp_cnt = total_rp_count;
+               }
+       } else
+               bsgrp->pend_rp_cnt = total_rp_count;
+
+       bsgrp->frag_tag = bsm_frag_tag;
+}
+
+/* Parsing BSR packet and adding to partial list of corresponding bsgrp node */
+static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,
+                                      int buflen, uint16_t bsm_frag_tag)
+{
+       struct bsmmsg_grpinfo grpinfo;
+       struct bsmmsg_rpinfo rpinfo;
+       struct prefix group;
+       struct bsgrp_node *bsgrp = NULL;
+       int frag_rp_cnt = 0;
+       int offset = 0;
+       int ins_count = 0;
+
+       while (buflen > offset) {
+               /* Extract Group tlv from BSM */
+               memcpy(&grpinfo, buf, sizeof(struct bsmmsg_grpinfo));
+
+               if (PIM_DEBUG_BSM) {
+                       char grp_str[INET_ADDRSTRLEN];
+
+                       pim_inet4_dump("<Group?>", grpinfo.group.addr, grp_str,
+                                      sizeof(grp_str));
+                       zlog_debug(
+                               "%s, Group %s  Rpcount:%d Fragment-Rp-count:%d\r\n",
+                               __PRETTY_FUNCTION__, grp_str, grpinfo.rp_count,
+                               grpinfo.frag_rp_count);
+               }
+
+               buf += sizeof(struct bsmmsg_grpinfo);
+               offset += sizeof(struct bsmmsg_grpinfo);
+
+               if (grpinfo.rp_count == 0) {
+                       if (PIM_DEBUG_BSM) {
+                               char grp_str[INET_ADDRSTRLEN];
+
+                               pim_inet4_dump("<Group?>", grpinfo.group.addr,
+                                              grp_str, sizeof(grp_str));
+                               zlog_debug(
+                                       "%s, Rp count is zero for group: %s\r\n",
+                                       __PRETTY_FUNCTION__, grp_str);
+                       }
+                       return false;
+               }
+
+               group.family = AF_INET;
+               group.prefixlen = grpinfo.group.mask;
+               group.u.prefix4.s_addr = grpinfo.group.addr.s_addr;
+
+               /* Get the Group node for the BSM rp table */
+               bsgrp = pim_bsm_get_bsgrp_node(scope, &group);
+
+               if (!bsgrp) {
+                       if (PIM_DEBUG_BSM)
+                               zlog_debug(
+                                       "%s, Create new  BSM Group node.\r\n",
+                                       __PRETTY_FUNCTION__);
+
+                       /* create a new node to be added to the tree. */
+                       bsgrp = pim_bsm_new_bsgrp_node(scope->bsrp_table,
+                                                      &group);
+
+                       if (!bsgrp) {
+                               zlog_debug(
+                                       "%s, Failed to get the BSM group node.\r\n",
+                                       __PRETTY_FUNCTION__);
+                               continue;
+                       }
+
+                       bsgrp->scope = scope;
+               }
+
+               pim_update_pending_rp_cnt(scope, bsgrp, bsm_frag_tag,
+                                         grpinfo.rp_count);
+               frag_rp_cnt = grpinfo.frag_rp_count;
+               ins_count = 0;
+
+               while (frag_rp_cnt--) {
+                       /* Extract RP address tlv from BSM */
+                       memcpy(&rpinfo, buf, sizeof(struct bsmmsg_rpinfo));
+                       rpinfo.rp_holdtime = ntohs(rpinfo.rp_holdtime);
+                       buf += sizeof(struct bsmmsg_rpinfo);
+                       offset += sizeof(struct bsmmsg_rpinfo);
+
+                       if (PIM_DEBUG_BSM) {
+                               char rp_str[INET_ADDRSTRLEN];
+
+                               pim_inet4_dump("<Rpaddr?>", rpinfo.rpaddr.addr,
+                                              rp_str, sizeof(rp_str));
+                               zlog_debug(
+                                       "%s, Rp address - %s; pri:%d hold:%d\r\n",
+                                       __PRETTY_FUNCTION__, rp_str,
+                                       rpinfo.rp_pri, rpinfo.rp_holdtime);
+                       }
+
+                       /* Call Install api to update grp-rp mappings */
+                       if (pim_install_bsm_grp_rp(scope->pim, bsgrp, &rpinfo))
+                               ins_count++;
+               }
+
+               bsgrp->pend_rp_cnt -= ins_count;
+
+               if (!bsgrp->pend_rp_cnt) {
+                       if (PIM_DEBUG_BSM)
+                               zlog_debug(
+                                       "%s, Recvd all the rps for this group, so bsrp list with penidng rp list.",
+                                       __PRETTY_FUNCTION__);
+                       /* replace the bsrp_list with pending list */
+                       pim_instate_pend_list(bsgrp);
+               }
+       }
+       return true;
+}
+
+int pim_bsm_process(struct interface *ifp, struct ip *ip_hdr, uint8_t *buf,
+                   uint32_t buf_size, bool no_fwd)
+{
+       struct bsm_hdr *bshdr;
+       int sz = PIM_GBL_SZ_ID;
+       struct bsmmsg_grpinfo *msg_grp;
+       struct pim_interface *pim_ifp = NULL;
+       struct bsm_info *bsminfo;
+       struct pim_instance *pim;
+       char bsr_str[INET_ADDRSTRLEN];
+       uint16_t frag_tag;
+       bool empty_bsm = FALSE;
+
+       /* BSM Packet acceptance validation */
+       pim_ifp = ifp->info;
+       if (!pim_ifp) {
+               if (PIM_DEBUG_BSM)
+                       zlog_debug("%s: multicast not enabled on interface %s",
+                                  __PRETTY_FUNCTION__, ifp->name);
+               return -1;
+       }
+
+       pim_ifp->pim_ifstat_bsm_rx++;
+       pim = pim_ifp->pim;
+       pim->bsm_rcvd++;
+
+       /* Drop if bsm processing is disabled on interface */
+       if (!pim_ifp->bsm_enable) {
+               zlog_warn("%s: BSM not enabled on interface %s",
+                         __PRETTY_FUNCTION__, ifp->name);
+               pim_ifp->pim_ifstat_bsm_cfg_miss++;
+               pim->bsm_dropped++;
+               return -1;
+       }
+
+       bshdr = (struct bsm_hdr *)(buf + PIM_MSG_HEADER_LEN);
+       pim_inet4_dump("<bsr?>", bshdr->bsr_addr.addr, bsr_str,
+                      sizeof(bsr_str));
+       pim->global_scope.hashMasklen = bshdr->hm_len;
+       frag_tag = ntohs(bshdr->frag_tag);
+
+       /* Identify empty BSM */
+       if ((buf_size - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN) < PIM_BSM_GRP_LEN)
+               empty_bsm = true;
+
+       if (!empty_bsm) {
+               msg_grp = (struct bsmmsg_grpinfo *)(buf + PIM_MSG_HEADER_LEN
+                                                   + PIM_BSM_HDR_LEN);
+               /* Currently we don't support scope zoned BSM */
+               if (msg_grp->group.sz) {
+                       if (PIM_DEBUG_BSM)
+                               zlog_debug(
+                                       "%s : Administratively scoped range BSM received",
+                                       __PRETTY_FUNCTION__);
+                       pim_ifp->pim_ifstat_bsm_invalid_sz++;
+                       pim->bsm_dropped++;
+                       return -1;
+               }
+       }
+
+       /* Drop if bsr is not preferred bsr */
+       if (!is_preferred_bsr(pim, bshdr->bsr_addr.addr, bshdr->bsr_prio)) {
+               if (PIM_DEBUG_BSM)
+                       zlog_debug("%s : Received a non-preferred BSM",
+                                  __PRETTY_FUNCTION__);
+               pim->bsm_dropped++;
+               return -1;
+       }
+
+       if (no_fwd) {
+               /* only accept no-forward BSM if quick refresh on startup */
+               if ((pim->global_scope.accept_nofwd_bsm)
+                   || (frag_tag == pim->global_scope.bsm_frag_tag)) {
+                       pim->global_scope.accept_nofwd_bsm = false;
+               } else {
+                       if (PIM_DEBUG_BSM)
+                               zlog_debug(
+                                       "%s : nofwd_bsm received on %s when accpt_nofwd_bsm false",
+                                       __PRETTY_FUNCTION__, bsr_str);
+                       pim->bsm_dropped++;
+                       pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++;
+                       return -1;
+               }
+       }
+
+       /* Mulicast BSM received */
+       if (ip_hdr->ip_dst.s_addr == qpim_all_pim_routers_addr.s_addr) {
+               if (!no_fwd) {
+                       if (!pim_bsr_rpf_check(pim, bshdr->bsr_addr.addr,
+                                              ip_hdr->ip_src)) {
+                               if (PIM_DEBUG_BSM)
+                                       zlog_debug(
+                                               "%s : RPF check fail for BSR address %s",
+                                               __PRETTY_FUNCTION__, bsr_str);
+                               pim->bsm_dropped++;
+                               return -1;
+                       }
+               }
+       } else if (if_lookup_exact_address(&ip_hdr->ip_dst, AF_INET,
+                                          pim->vrf_id)) {
+               /* Unicast BSM received - if ucast bsm not enabled on
+                * the interface, drop it
+                */
+               if (!pim_ifp->ucast_bsm_accept) {
+                       if (PIM_DEBUG_BSM)
+                               zlog_debug(
+                                       "%s : Unicast BSM not enabled on interface %s",
+                                       __PRETTY_FUNCTION__, ifp->name);
+                       pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++;
+                       pim->bsm_dropped++;
+                       return -1;
+               }
+
+       } else {
+               if (PIM_DEBUG_BSM)
+                       zlog_debug("%s : Invalid destination address",
+                                  __PRETTY_FUNCTION__);
+               pim->bsm_dropped++;
+               return -1;
+       }
+
+       if (empty_bsm) {
+               if (PIM_DEBUG_BSM)
+                       zlog_debug("%s : Empty Pref BSM received",
+                                  __PRETTY_FUNCTION__);
+       }
+       /* Parse Update bsm rp table and install/uninstall rp if required */
+       if (!pim_bsm_parse_install_g2rp(
+                   &pim_ifp->pim->global_scope,
+                   (buf + PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN),
+                   (buf_size - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN),
+                   frag_tag)) {
+               if (PIM_DEBUG_BSM) {
+                       zlog_debug("%s, Parsing BSM failed.\r\n",
+                                  __PRETTY_FUNCTION__);
+               }
+               pim->bsm_dropped++;
+               return -1;
+       }
+       /* Restart the bootstrap timer */
+       pim_bs_timer_restart(&pim_ifp->pim->global_scope,
+                            PIM_BSR_DEFAULT_TIMEOUT);
+
+       /* If new BSM received, clear the old bsm database */
+       if (pim_ifp->pim->global_scope.bsm_frag_tag != frag_tag) {
+               if (PIM_DEBUG_BSM) {
+                       zlog_debug("%s: Current frag tag: %d Frag teg rcvd: %d",
+                                  __PRETTY_FUNCTION__,
+                                  pim_ifp->pim->global_scope.bsm_frag_tag,
+                                  frag_tag);
+               }
+               list_delete_all_node(pim_ifp->pim->global_scope.bsm_list);
+               pim_ifp->pim->global_scope.bsm_frag_tag = frag_tag;
+       }
+
+       /* update the scope information from bsm */
+       pim_bsm_update(pim, bshdr->bsr_addr.addr, bshdr->bsr_prio);
+
+       if (!no_fwd) {
+               pim_bsm_fwd_whole_sz(pim_ifp->pim, buf, buf_size, sz);
+               bsminfo = XCALLOC(MTYPE_PIM_BSM_INFO, sizeof(struct bsm_info));
+               if (!bsminfo) {
+                       zlog_warn("%s: bsminfo alloc failed",
+                                 __PRETTY_FUNCTION__);
+                       return 0;
+               }
+
+               bsminfo->bsm = XCALLOC(MTYPE_PIM_BSM_PKT_VAR_MEM, buf_size);
+               if (!bsminfo->bsm) {
+                       zlog_warn("%s: bsm alloc failed", __PRETTY_FUNCTION__);
+                       XFREE(MTYPE_PIM_BSM_INFO, bsminfo);
+                       return 0;
+               }
+
+               bsminfo->size = buf_size;
+               memcpy(bsminfo->bsm, buf, buf_size);
+               listnode_add(pim_ifp->pim->global_scope.bsm_list, bsminfo);
+       }
+
+       return 0;
+}
diff --git a/pimd/pim_bsm.h b/pimd/pim_bsm.h
new file mode 100644 (file)
index 0000000..0758c94
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * pim_bsm.h: PIM BSM handling related
+ *
+ * Copyright (C) 2018-19 Vmware, Inc.
+ * Saravanan K
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#ifndef __PIM_BSM_H__
+#define __PIM_BSM_H__
+
+#include "if.h"
+#include "vty.h"
+#include "linklist.h"
+#include "table.h"
+#include "pim_rp.h"
+#include "pim_msg.h"
+
+/* Defines */
+#define PIM_GBL_SZ_ID 0                    /* global scope zone id set to 0 */
+#define PIM_BS_TIME 60             /* RFC 5059 - Sec 5 */
+#define PIM_BSR_DEFAULT_TIMEOUT 130 /* RFC 5059 - Sec 5 */
+
+/* These structures are only encoded IPv4 specific */
+#define PIM_BSM_HDR_LEN sizeof(struct bsm_hdr)
+#define PIM_BSM_GRP_LEN sizeof(struct bsmmsg_grpinfo)
+#define PIM_BSM_RP_LEN sizeof(struct bsmmsg_rpinfo)
+
+#define PIM_MIN_BSM_LEN \
+       (PIM_HDR_LEN + PIM_BSM_HDR_LEN + PIM_BSM_GRP_LEN + PIM_BSM_RP_LEN)
+
+/* Datastructures
+ * ==============
+ */
+
+/* Non candidate BSR states */
+enum ncbsr_state {
+       NO_INFO = 0,
+       ACCEPT_ANY,
+       ACCEPT_PREFERRED
+};
+
+/* BSM scope - bsm processing is per scope */
+struct bsm_scope {
+       int sz_id;                      /* scope zone id */
+       enum ncbsr_state state;         /* non candidate BSR state */
+       bool accept_nofwd_bsm;          /* no fwd bsm accepted for scope */
+       struct in_addr current_bsr;     /* current elected BSR for the sz */
+       uint32_t current_bsr_prio;      /* current BSR priority */
+       int64_t current_bsr_first_ts;   /* current BSR elected time */
+       int64_t current_bsr_last_ts;    /* Last BSM received from E-BSR */
+       uint16_t bsm_frag_tag;          /* Last received frag tag from E-BSR */
+       uint8_t hashMasklen;            /* Mask in hash calc RFC 7761 4.7.2 */
+       struct pim_instance *pim;       /* Back pointer to pim instance */
+       struct list *bsm_list;          /* list of bsm frag for frowarding */
+       struct route_table *bsrp_table; /* group2rp mapping rcvd from BSR */
+       struct thread *bs_timer;        /* Boot strap timer */
+       struct thread *sz_timer;
+};
+
+/* BSM packet - this is stored as list in bsm_list inside scope
+ * This is used for forwarding to new neighbors or restarting mcast routers
+ */
+struct bsm_info {
+       uint32_t size;      /* size of the packet */
+       unsigned char *bsm; /* Actual packet */
+};
+
+/* This is the group node of the bsrp table in scope.
+ * this node maintains the list of rp for the group.
+ */
+struct bsgrp_node {
+       struct prefix group;            /* Group range */
+       struct bsm_scope *scope;        /* Back ptr to scope */
+       struct list *bsrp_list;         /* list of RPs adv by BSR */
+       struct list *partial_bsrp_list; /* maintained until all RPs received */
+       int pend_rp_cnt;                /* Total RP - Received RP */
+       uint16_t frag_tag;              /* frag tag to identify the fragment */
+};
+
+/* This is the list node of bsrp_list and partial bsrp list in
+ * bsgrp_node. Hold info of each RP received for the group
+ */
+struct bsm_rpinfo {
+       uint32_t hash;                  /* Hash Value as per RFC 7761 4.7.2 */
+       uint32_t elapse_time;           /* upd at expiry of elected RP node */
+       uint16_t rp_prio;               /* RP priority */
+       uint16_t rp_holdtime;           /* RP holdtime - g2rp timer value */
+       struct in_addr rp_address;      /* RP Address */
+       struct bsgrp_node *bsgrp_node;  /* Back ptr to bsgrp_node */
+       struct thread *g2rp_timer;      /* Run only for elected RP node */
+};
+
+/*  Structures to extract Bootstrap Message header and Grp to RP Mappings
+ *  =====================================================================
+ *  BSM Format:
+ *
+ *   0                   1                   2                   3
+ *   0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |PIM Ver| Type  |N|  Reserved   |           Checksum            | PIM HDR
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |         Fragment Tag          | Hash Mask Len | BSR Priority  | BS HDR(1)
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |             BSR Address (Encoded-Unicast format)              | BS HDR(2)
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |            Group Address 1 (Encoded-Group format)             |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  | RP Count 1    | Frag RP Cnt 1 |         Reserved              |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |             RP Address 1 (Encoded-Unicast format)             |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |          RP1 Holdtime         | RP1 Priority  |   Reserved    |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |             RP Address 2 (Encoded-Unicast format)             |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |          RP2 Holdtime         | RP2 Priority  |   Reserved    |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |                               .                               |
+ *  |                               .                               |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |             RP Address m (Encoded-Unicast format)             |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |          RPm Holdtime         | RPm Priority  |   Reserved    |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |            Group Address 2 (Encoded-Group format)             |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |                               .                               |
+ *  |                               .                               |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |            Group Address n (Encoded-Group format)             |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  | RP Count n    | Frag RP Cnt n |          Reserved             |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |             RP Address 1 (Encoded-Unicast format)             |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |          RP1 Holdtime         | RP1 Priority  |   Reserved    |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |             RP Address 2 (Encoded-Unicast format)             |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |          RP2 Holdtime         | RP2 Priority  |   Reserved    |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |                               .                               |
+ *  |                               .                               |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |             RP Address m (Encoded-Unicast format)             |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |          RPm Holdtime         | RPm Priority  |   Reserved    |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct bsm_hdr {
+       uint16_t frag_tag;
+       uint8_t hm_len;
+       uint8_t bsr_prio;
+       struct pim_encoded_ipv4_unicast bsr_addr;
+} __attribute__((packed));
+
+struct bsmmsg_grpinfo {
+       struct pim_encoded_group_ipv4 group;
+       uint8_t rp_count;
+       uint8_t frag_rp_count;
+       uint16_t reserved;
+} __attribute__((packed));
+
+struct bsmmsg_rpinfo {
+       struct pim_encoded_ipv4_unicast rpaddr;
+       uint16_t rp_holdtime;
+       uint8_t rp_pri;
+       uint8_t reserved;
+} __attribute__((packed));
+
+/* API */
+void pim_bsm_proc_init(struct pim_instance *pim);
+void pim_bsm_proc_free(struct pim_instance *pim);
+void pim_bsm_write_config(struct vty *vty, struct interface *ifp);
+int  pim_bsm_process(struct interface *ifp,
+                    struct ip *ip_hdr,
+                    uint8_t *buf,
+                    uint32_t buf_size,
+                    bool no_fwd);
+bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp);
+struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
+                                         struct prefix *grp);
+#endif
index cb2ba87ec68fa55dcda2ef0dd778c399aa18ee6d..a2357067f993a7a9c5f0f3b49f496b0562ef2265 100644 (file)
@@ -62,6 +62,7 @@
 #include "pim_bfd.h"
 #include "pim_vxlan.h"
 #include "bfd.h"
+#include "pim_bsm.h"
 
 #ifndef VTYSH_EXTRACT_PL
 #include "pimd/pim_cmd_clippy.c"
@@ -1477,13 +1478,14 @@ static void pim_show_interface_traffic(struct pim_instance *pim,
                json = json_object_new_object();
        else {
                vty_out(vty, "\n");
-               vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s\n",
+               vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s%-17s\n",
                        "Interface", "       HELLO", "       JOIN",
                        "      PRUNE", "   REGISTER", "REGISTER-STOP",
-                       "  ASSERT");
-               vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s\n", "",
+                       "  ASSERT", "  BSM");
+               vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s%-17s\n", "",
                        "       Rx/Tx", "       Rx/Tx", "      Rx/Tx",
-                       "     Rx/Tx", "    Rx/Tx", "   Rx/Tx");
+                       "      Rx/Tx", "     Rx/Tx", "    Rx/Tx",
+                       "   Rx/Tx");
                vty_out(vty,
                        "---------------------------------------------------------------------------------------------------------------\n");
        }
@@ -1518,12 +1520,15 @@ static void pim_show_interface_traffic(struct pim_instance *pim,
                        json_object_int_add(json_row, "assertRx",
                                            pim_ifp->pim_ifstat_assert_recv);
                        json_object_int_add(json_row, "assertTx",
-                                           pim_ifp->pim_ifstat_assert_send);
-
+                                       pim_ifp->pim_ifstat_assert_send);
+                       json_object_int_add(json_row, "bsmRx",
+                                       pim_ifp->pim_ifstat_bsm_rx);
+                       json_object_int_add(json_row, "bsmTx",
+                                       pim_ifp->pim_ifstat_bsm_tx);
                        json_object_object_add(json, ifp->name, json_row);
                } else {
                        vty_out(vty,
-                               "%-16s %8u/%-8u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u \n",
+                               "%-16s %8u/%-8u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7lu/%-7lu \n",
                                ifp->name, pim_ifp->pim_ifstat_hello_recv,
                                pim_ifp->pim_ifstat_hello_sent,
                                pim_ifp->pim_ifstat_join_recv,
@@ -1535,7 +1540,9 @@ static void pim_show_interface_traffic(struct pim_instance *pim,
                                pim_ifp->pim_ifstat_reg_stop_recv,
                                pim_ifp->pim_ifstat_reg_stop_send,
                                pim_ifp->pim_ifstat_assert_recv,
-                               pim_ifp->pim_ifstat_assert_send);
+                               pim_ifp->pim_ifstat_assert_send,
+                               pim_ifp->pim_ifstat_bsm_rx,
+                               pim_ifp->pim_ifstat_bsm_tx);
                }
        }
        if (uj) {
@@ -1559,14 +1566,15 @@ static void pim_show_interface_traffic_single(struct pim_instance *pim,
                json = json_object_new_object();
        else {
                vty_out(vty, "\n");
-               vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s\n",
+               vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s%-17s\n",
                        "Interface", "    HELLO", "    JOIN", "   PRUNE",
-                       "   REGISTER", "  REGISTER-STOP", "  ASSERT");
-               vty_out(vty, "%-14s%-18s%-17s%-17s%-17s%-17s%-17s\n", "",
+                       "   REGISTER", "  REGISTER-STOP", "  ASSERT",
+                       "    BSM");
+               vty_out(vty, "%-14s%-18s%-17s%-17s%-17s%-17s%-17s%-17s\n", "",
                        "      Rx/Tx", "     Rx/Tx", "    Rx/Tx", "    Rx/Tx",
-                       "     Rx/Tx", "    Rx/Tx");
+                       "     Rx/Tx", "    Rx/Tx", "    Rx/Tx");
                vty_out(vty,
-                       "---------------------------------------------------------------------------------------------------------------------\n");
+                       "-------------------------------------------------------------------------------------------------------------------------------\n");
        }
 
        FOR_ALL_INTERFACES (pim->vrf, ifp) {
@@ -1605,11 +1613,15 @@ static void pim_show_interface_traffic_single(struct pim_instance *pim,
                                            pim_ifp->pim_ifstat_assert_recv);
                        json_object_int_add(json_row, "assertTx",
                                            pim_ifp->pim_ifstat_assert_send);
+                       json_object_int_add(json_row, "bsmRx",
+                                           pim_ifp->pim_ifstat_bsm_rx);
+                       json_object_int_add(json_row, "bsmTx",
+                                           pim_ifp->pim_ifstat_bsm_tx);
 
                        json_object_object_add(json, ifp->name, json_row);
                } else {
                        vty_out(vty,
-                               "%-16s %8u/%-8u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u \n",
+                               "%-16s %8u/%-8u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7lu/%-7lu \n",
                                ifp->name, pim_ifp->pim_ifstat_hello_recv,
                                pim_ifp->pim_ifstat_hello_sent,
                                pim_ifp->pim_ifstat_join_recv,
@@ -1621,7 +1633,9 @@ static void pim_show_interface_traffic_single(struct pim_instance *pim,
                                pim_ifp->pim_ifstat_reg_stop_recv,
                                pim_ifp->pim_ifstat_reg_stop_send,
                                pim_ifp->pim_ifstat_assert_recv,
-                               pim_ifp->pim_ifstat_assert_send);
+                               pim_ifp->pim_ifstat_assert_send,
+                               pim_ifp->pim_ifstat_bsm_rx,
+                               pim_ifp->pim_ifstat_bsm_tx);
                }
        }
        if (uj) {
@@ -2888,6 +2902,413 @@ static void pim_show_nexthop(struct pim_instance *pim, struct vty *vty)
        hash_walk(pim->rpf_hash, pim_print_pnc_cache_walkcb, &cwd);
 }
 
+/* Display the bsm database details */
+static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)
+{
+       struct listnode *bsmnode;
+       int count = 0;
+       int fragment = 1;
+       struct bsm_info *bsm;
+       json_object *json = NULL;
+       json_object *json_group = NULL;
+       json_object *json_row = NULL;
+
+       count = pim->global_scope.bsm_list->count;
+
+       if (uj) {
+               json = json_object_new_object();
+               json_object_int_add(json, "Number of the fragments", count);
+       } else {
+               vty_out(vty, "Scope Zone: Global\n");
+               vty_out(vty, "Number of the fragments: %d\n", count);
+               vty_out(vty, "\n");
+       }
+
+       for (ALL_LIST_ELEMENTS_RO(pim->global_scope.bsm_list, bsmnode, bsm)) {
+               char grp_str[INET_ADDRSTRLEN];
+               char rp_str[INET_ADDRSTRLEN];
+               char bsr_str[INET_ADDRSTRLEN];
+               struct bsmmsg_grpinfo *group;
+               struct bsmmsg_rpinfo *rpaddr;
+               struct prefix grp;
+               struct bsm_hdr *hdr;
+               uint32_t offset = 0;
+               uint8_t *buf;
+               uint32_t len = 0;
+               uint32_t frag_rp_cnt = 0;
+
+               buf = bsm->bsm;
+               len = bsm->size;
+
+               /* skip pim header */
+               buf += PIM_MSG_HEADER_LEN;
+               len -= PIM_MSG_HEADER_LEN;
+
+               hdr = (struct bsm_hdr *)buf;
+
+               /* BSM starts with bsr header */
+               buf += sizeof(struct bsm_hdr);
+               len -= sizeof(struct bsm_hdr);
+
+               pim_inet4_dump("<BSR Address?>", hdr->bsr_addr.addr, bsr_str,
+                              sizeof(bsr_str));
+
+
+               if (uj) {
+                       json_object_string_add(json, "BSR address", bsr_str);
+                       json_object_int_add(json, "BSR priority",
+                                           hdr->bsr_prio);
+                       json_object_int_add(json, "Hashmask Length",
+                                           hdr->hm_len);
+                       json_object_int_add(json, "Fragment Tag",
+                                           ntohs(hdr->frag_tag));
+               } else {
+                       vty_out(vty, "BSM Fragment : %d\n", fragment);
+                       vty_out(vty, "------------------\n");
+                       vty_out(vty, "%-15s %-15s %-15s %-15s\n", "BSR-Address",
+                               "BSR-Priority", "Hashmask-len", "Fragment-Tag");
+                       vty_out(vty, "%-15s %-15d %-15d %-15d\n", bsr_str,
+                               hdr->bsr_prio, hdr->hm_len,
+                               ntohs(hdr->frag_tag));
+               }
+
+               vty_out(vty, "\n");
+
+               while (offset < len) {
+                       group = (struct bsmmsg_grpinfo *)buf;
+
+                       if (group->group.family == PIM_MSG_ADDRESS_FAMILY_IPV4)
+                               grp.family = AF_INET;
+
+                       grp.prefixlen = group->group.mask;
+                       grp.u.prefix4.s_addr = group->group.addr.s_addr;
+
+                       prefix2str(&grp, grp_str, sizeof(grp_str));
+
+                       buf += sizeof(struct bsmmsg_grpinfo);
+                       offset += sizeof(struct bsmmsg_grpinfo);
+
+                       if (uj) {
+                               json_object_object_get_ex(json, grp_str,
+                                                         &json_group);
+                               if (!json_group) {
+                                       json_group = json_object_new_object();
+                                       json_object_int_add(json_group,
+                                                           "Rp Count",
+                                                           group->rp_count);
+                                       json_object_int_add(
+                                               json_group, "Fragment Rp count",
+                                               group->frag_rp_count);
+                                       json_object_object_add(json, grp_str,
+                                                              json_group);
+                               }
+                       } else {
+                               vty_out(vty, "Group : %s\n", grp_str);
+                               vty_out(vty, "-------------------\n");
+                               vty_out(vty, "Rp Count:%d\n", group->rp_count);
+                               vty_out(vty, "Fragment Rp Count : %d\n",
+                                       group->frag_rp_count);
+                       }
+
+                       frag_rp_cnt = group->frag_rp_count;
+
+                       if (!frag_rp_cnt)
+                               continue;
+
+                       if (!uj)
+                               vty_out(vty,
+                                       "RpAddress     HoldTime     Priority\n");
+
+                       while (frag_rp_cnt--) {
+                               rpaddr = (struct bsmmsg_rpinfo *)buf;
+
+                               buf += sizeof(struct bsmmsg_rpinfo);
+                               offset += sizeof(struct bsmmsg_rpinfo);
+
+                               pim_inet4_dump("<Rp addr?>",
+                                              rpaddr->rpaddr.addr, rp_str,
+                                              sizeof(rp_str));
+
+                               if (uj) {
+                                       json_row = json_object_new_object();
+                                       json_object_string_add(
+                                               json_row, "Rp Address", rp_str);
+                                       json_object_int_add(
+                                               json_row, "Rp HoldTime",
+                                               ntohs(rpaddr->rp_holdtime));
+                                       json_object_int_add(json_row,
+                                                           "Rp Priority",
+                                                           rpaddr->rp_pri);
+                                       json_object_object_add(
+                                               json_group, rp_str, json_row);
+                               } else {
+                                       vty_out(vty, "%-15s %-12d %d\n", rp_str,
+                                               ntohs(rpaddr->rp_holdtime),
+                                               rpaddr->rp_pri);
+                               }
+                       }
+                       vty_out(vty, "\n");
+               }
+
+               fragment++;
+       }
+
+       if (uj) {
+               vty_out(vty, "%s\n", json_object_to_json_string_ext(
+                                            json, JSON_C_TO_STRING_PRETTY));
+               json_object_free(json);
+       }
+}
+
+/*Display the group-rp mappings */
+static void pim_show_group_rp_mappings_info(struct pim_instance *pim,
+                                           struct vty *vty, bool uj)
+{
+       struct bsgrp_node *bsgrp;
+       struct listnode *rpnode;
+       struct bsm_rpinfo *bsm_rp;
+       struct route_node *rn;
+       char bsr_str[INET_ADDRSTRLEN];
+       json_object *json = NULL;
+       json_object *json_group = NULL;
+       json_object *json_row = NULL;
+
+       if (pim->global_scope.current_bsr.s_addr == INADDR_ANY)
+               strncpy(bsr_str, "0.0.0.0", sizeof(bsr_str));
+
+       else
+               pim_inet4_dump("<bsr?>", pim->global_scope.current_bsr, bsr_str,
+                              sizeof(bsr_str));
+
+       if (uj) {
+               json = json_object_new_object();
+               json_object_string_add(json, "BSR Address", bsr_str);
+       } else {
+               vty_out(vty, "BSR Address  %s\n", bsr_str);
+       }
+
+       for (rn = route_top(pim->global_scope.bsrp_table); rn;
+            rn = route_next(rn)) {
+               bsgrp = (struct bsgrp_node *)rn->info;
+
+               if (!bsgrp)
+                       continue;
+
+               char grp_str[INET_ADDRSTRLEN];
+
+               prefix2str(&bsgrp->group, grp_str, sizeof(grp_str));
+
+               if (uj) {
+                       json_object_object_get_ex(json, grp_str, &json_group);
+                       if (!json_group) {
+                               json_group = json_object_new_object();
+                               json_object_object_add(json, grp_str,
+                                                      json_group);
+                       }
+               } else {
+                       vty_out(vty, "Group Address %s\n", grp_str);
+                       vty_out(vty, "--------------------------\n");
+                       vty_out(vty, "%-15s %-15s %-15s %-15s\n", "Rp Address",
+                               "priority", "Holdtime", "Hash");
+
+                       vty_out(vty, "(ACTIVE)\n");
+               }
+
+               if (bsgrp->bsrp_list) {
+                       for (ALL_LIST_ELEMENTS_RO(bsgrp->bsrp_list, rpnode,
+                                                 bsm_rp)) {
+                               char rp_str[INET_ADDRSTRLEN];
+
+                               pim_inet4_dump("<Rp Address?>",
+                                              bsm_rp->rp_address, rp_str,
+                                              sizeof(rp_str));
+
+                               if (uj) {
+                                       json_row = json_object_new_object();
+                                       json_object_string_add(
+                                               json_row, "Rp Address", rp_str);
+                                       json_object_int_add(
+                                               json_row, "Rp HoldTime",
+                                               bsm_rp->rp_holdtime);
+                                       json_object_int_add(json_row,
+                                                           "Rp Priority",
+                                                           bsm_rp->rp_prio);
+                                       json_object_int_add(json_row,
+                                                           "Hash Val",
+                                                           bsm_rp->hash);
+                                       json_object_object_add(
+                                               json_group, rp_str, json_row);
+
+                               } else {
+                                       vty_out(vty,
+                                               "%-15s %-15u %-15u %-15u\n",
+                                               rp_str, bsm_rp->rp_prio,
+                                               bsm_rp->rp_holdtime,
+                                               bsm_rp->hash);
+                               }
+                       }
+                       if (!bsgrp->bsrp_list->count && !uj)
+                               vty_out(vty, "Active List is empty.\n");
+               }
+
+               if (uj) {
+                       json_object_int_add(json_group, "Pending RP count",
+                                           bsgrp->pend_rp_cnt);
+               } else {
+                       vty_out(vty, "(PENDING)\n");
+                       vty_out(vty, "Pending RP count :%d\n",
+                               bsgrp->pend_rp_cnt);
+                       if (bsgrp->pend_rp_cnt)
+                               vty_out(vty, "%-15s %-15s %-15s %-15s\n",
+                                       "Rp Address", "priority", "Holdtime",
+                                       "Hash");
+               }
+
+               if (bsgrp->partial_bsrp_list) {
+                       for (ALL_LIST_ELEMENTS_RO(bsgrp->partial_bsrp_list,
+                                                 rpnode, bsm_rp)) {
+                               char rp_str[INET_ADDRSTRLEN];
+
+                               pim_inet4_dump("<Rp Addr?>", bsm_rp->rp_address,
+                                              rp_str, sizeof(rp_str));
+
+                               if (uj) {
+                                       json_row = json_object_new_object();
+                                       json_object_string_add(
+                                               json_row, "Rp Address", rp_str);
+                                       json_object_int_add(
+                                               json_row, "Rp HoldTime",
+                                               bsm_rp->rp_holdtime);
+                                       json_object_int_add(json_row,
+                                                           "Rp Priority",
+                                                           bsm_rp->rp_prio);
+                                       json_object_int_add(json_row,
+                                                           "Hash Val",
+                                                           bsm_rp->hash);
+                                       json_object_object_add(
+                                               json_group, rp_str, json_row);
+                               } else {
+                                       vty_out(vty,
+                                               "%-15s %-15u %-15u %-15u\n",
+                                               rp_str, bsm_rp->rp_prio,
+                                               bsm_rp->rp_holdtime,
+                                               bsm_rp->hash);
+                               }
+                       }
+                       if (!bsgrp->partial_bsrp_list->count && !uj)
+                               vty_out(vty, "Partial List is empty\n");
+               }
+
+               if (!uj)
+                       vty_out(vty, "\n");
+       }
+
+       if (uj) {
+               vty_out(vty, "%s\n", json_object_to_json_string_ext(
+                                            json, JSON_C_TO_STRING_PRETTY));
+               json_object_free(json);
+       }
+}
+
+/* pim statistics - just adding only bsm related now.
+ * We can continue to add all pim related stats here.
+ */
+static void pim_show_statistics(struct pim_instance *pim, struct vty *vty,
+                               const char *ifname, bool uj)
+{
+       json_object *json = NULL;
+       struct interface *ifp;
+
+       if (uj) {
+               json = json_object_new_object();
+               json_object_int_add(json, "Number of Received BSMs",
+                                   pim->bsm_rcvd);
+               json_object_int_add(json, "Number of Forwared BSMs",
+                                   pim->bsm_sent);
+               json_object_int_add(json, "Number of Dropped BSMs",
+                                   pim->bsm_dropped);
+       } else {
+               vty_out(vty, "BSM Statistics :\n");
+               vty_out(vty, "----------------\n");
+               vty_out(vty, "Number of Received BSMs : %ld\n", pim->bsm_rcvd);
+               vty_out(vty, "Number of Forwared BSMs : %ld\n", pim->bsm_sent);
+               vty_out(vty, "Number of Dropped BSMs  : %ld\n",
+                       pim->bsm_dropped);
+       }
+
+       vty_out(vty, "\n");
+
+       /* scan interfaces */
+       FOR_ALL_INTERFACES (pim->vrf, ifp) {
+               struct pim_interface *pim_ifp = ifp->info;
+
+               if (ifname && strcmp(ifname, ifp->name))
+                       continue;
+
+               if (!pim_ifp)
+                       continue;
+
+               if (!uj) {
+                       vty_out(vty, "Interface : %s\n", ifp->name);
+                       vty_out(vty, "-------------------\n");
+                       vty_out(vty,
+                               "Number of BSMs dropped due to config miss : %u\n",
+                               pim_ifp->pim_ifstat_bsm_cfg_miss);
+                       vty_out(vty, "Number of unicast BSMs dropped : %u\n",
+                               pim_ifp->pim_ifstat_ucast_bsm_cfg_miss);
+                       vty_out(vty,
+                               "Number of BSMs dropped due to invalid scope zone : %u\n",
+                               pim_ifp->pim_ifstat_bsm_invalid_sz);
+               } else {
+
+                       json_object *json_row = NULL;
+
+                       json_row = json_object_new_object();
+
+                       json_object_string_add(json_row, "If Name", ifp->name);
+                       json_object_int_add(
+                               json_row,
+                               "Number of BSMs dropped due to config miss",
+                               pim_ifp->pim_ifstat_bsm_cfg_miss);
+                       json_object_int_add(
+                               json_row, "Number of unicast BSMs dropped",
+                               pim_ifp->pim_ifstat_ucast_bsm_cfg_miss);
+                       json_object_int_add(json_row,
+                                           "Number of BSMs dropped due to invalid scope zone",
+                                           pim_ifp->pim_ifstat_bsm_invalid_sz);
+                       json_object_object_add(json, ifp->name, json_row);
+               }
+               vty_out(vty, "\n");
+       }
+
+       if (uj) {
+               vty_out(vty, "%s\n", json_object_to_json_string_ext(
+                                            json, JSON_C_TO_STRING_PRETTY));
+               json_object_free(json);
+       }
+}
+
+static void clear_pim_statistics(struct pim_instance *pim)
+{
+       struct interface *ifp;
+
+       pim->bsm_rcvd = 0;
+       pim->bsm_sent = 0;
+       pim->bsm_dropped = 0;
+
+       /* scan interfaces */
+       FOR_ALL_INTERFACES (pim->vrf, ifp) {
+               struct pim_interface *pim_ifp = ifp->info;
+
+               if (!pim_ifp)
+                       continue;
+
+               pim_ifp->pim_ifstat_bsm_cfg_miss = 0;
+               pim_ifp->pim_ifstat_ucast_bsm_cfg_miss = 0;
+               pim_ifp->pim_ifstat_bsm_invalid_sz = 0;
+       }
+}
+
 static void igmp_show_groups(struct pim_instance *pim, struct vty *vty, bool uj)
 {
        struct interface *ifp;
@@ -3207,6 +3628,82 @@ static void igmp_show_source_retransmission(struct pim_instance *pim,
        }                         /* scan interfaces */
 }
 
+static void pim_show_bsr(struct pim_instance *pim,
+                        struct vty *vty,
+                        bool uj)
+{
+       char uptime[10];
+       char last_bsm_seen[10];
+       time_t now;
+       char bsr_state[20];
+       char bsr_str[PREFIX_STRLEN];
+       json_object *json = NULL;
+
+       vty_out(vty, "PIMv2 Bootstrap information\n");
+
+       if (pim->global_scope.current_bsr.s_addr == INADDR_ANY) {
+               strncpy(bsr_str, "0.0.0.0", sizeof(bsr_str));
+               pim_time_uptime(uptime, sizeof(uptime),
+                               pim->global_scope.current_bsr_first_ts);
+               pim_time_uptime(last_bsm_seen, sizeof(last_bsm_seen),
+                               pim->global_scope.current_bsr_last_ts);
+       }
+
+       else {
+               pim_inet4_dump("<bsr?>", pim->global_scope.current_bsr,
+                              bsr_str, sizeof(bsr_str));
+               now = pim_time_monotonic_sec();
+               pim_time_uptime(uptime, sizeof(uptime),
+                               (now - pim->global_scope.current_bsr_first_ts));
+               pim_time_uptime(last_bsm_seen, sizeof(last_bsm_seen),
+                               now - pim->global_scope.current_bsr_last_ts);
+       }
+
+       switch (pim->global_scope.state) {
+       case NO_INFO:
+               strncpy(bsr_state, "NO_INFO", sizeof(bsr_state));
+               break;
+       case ACCEPT_ANY:
+               strncpy(bsr_state, "ACCEPT_ANY", sizeof(bsr_state));
+               break;
+       case ACCEPT_PREFERRED:
+               strncpy(bsr_state, "ACCEPT_PREFERRED", sizeof(bsr_state));
+               break;
+       default:
+               strncpy(bsr_state, "", sizeof(bsr_state));
+       }
+
+       if (uj) {
+               json = json_object_new_object();
+               json_object_string_add(json, "bsr", bsr_str);
+               json_object_int_add(json, "priority",
+                                   pim->global_scope.current_bsr_prio);
+               json_object_int_add(json, "fragment_tag",
+                                   pim->global_scope.bsm_frag_tag);
+               json_object_string_add(json, "state", bsr_state);
+               json_object_string_add(json, "upTime", uptime);
+               json_object_string_add(json, "last_bsm_seen", last_bsm_seen);
+       }
+
+       else {
+               vty_out(vty, "Current preferred BSR address: %s\n", bsr_str);
+               vty_out(vty,
+                       "Priority        Fragment-Tag       State           UpTime\n");
+               vty_out(vty, "  %-12d    %-12d    %-13s    %7s\n",
+                       pim->global_scope.current_bsr_prio,
+                       pim->global_scope.bsm_frag_tag,
+                       bsr_state,
+                       uptime);
+               vty_out(vty, "Last BSM seen: %s\n", last_bsm_seen);
+       }
+
+       if (uj) {
+               vty_out(vty, "%s\n", json_object_to_json_string_ext(
+                                            json, JSON_C_TO_STRING_PRETTY));
+               json_object_free(json);
+       }
+}
+
 static void clear_igmp_interfaces(struct pim_instance *pim)
 {
        struct interface *ifp;
@@ -3282,6 +3779,25 @@ DEFUN (clear_ip_igmp_interfaces,
        return CMD_SUCCESS;
 }
 
+DEFUN (clear_ip_pim_statistics,
+       clear_ip_pim_statistics_cmd,
+       "clear ip pim statistics [vrf NAME]",
+       CLEAR_STR
+       IP_STR
+       CLEAR_IP_PIM_STR
+       VRF_CMD_HELP_STR
+       "Reset PIM statistics\n")
+{
+       int idx = 2;
+       struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+
+       if (!vrf)
+               return CMD_WARNING;
+
+       clear_pim_statistics(vrf->info);
+       return CMD_SUCCESS;
+}
+
 static void mroute_add_all(struct pim_instance *pim)
 {
        struct listnode *node;
@@ -3400,6 +3916,8 @@ DEFUN (clear_ip_pim_interface_traffic,
                pim_ifp->pim_ifstat_reg_stop_send = 0;
                pim_ifp->pim_ifstat_assert_recv = 0;
                pim_ifp->pim_ifstat_assert_send = 0;
+               pim_ifp->pim_ifstat_bsm_rx = 0;
+               pim_ifp->pim_ifstat_bsm_tx = 0;
        }
 
        return CMD_SUCCESS;
@@ -4459,6 +4977,76 @@ DEFUN (show_ip_pim_interface_traffic,
        return CMD_SUCCESS;
 }
 
+DEFUN (show_ip_pim_bsm_db,
+       show_ip_pim_bsm_db_cmd,
+       "show ip pim bsm-database [vrf NAME] [json]",
+       SHOW_STR
+       IP_STR
+       PIM_STR
+       VRF_CMD_HELP_STR
+       "PIM cached bsm packets information\n"
+       JSON_STR)
+{
+       int idx = 2;
+       struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+       bool uj = use_json(argc, argv);
+
+       if (!vrf)
+               return CMD_WARNING;
+
+       pim_show_bsm_db(vrf->info, vty, uj);
+       return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_pim_bsrp,
+       show_ip_pim_bsrp_cmd,
+       "show ip pim bsrp-info [vrf NAME] [json]",
+       SHOW_STR
+       IP_STR
+       PIM_STR
+       VRF_CMD_HELP_STR
+       "PIM cached group-rp mappings information\n"
+       JSON_STR)
+{
+       int idx = 2;
+       struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+       bool uj = use_json(argc, argv);
+
+       if (!vrf)
+               return CMD_WARNING;
+
+       pim_show_group_rp_mappings_info(vrf->info, vty, uj);
+
+       return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_pim_statistics,
+       show_ip_pim_statistics_cmd,
+       "show ip pim [vrf NAME] statistics  [interface WORD] [json]",
+       SHOW_STR
+       IP_STR
+       PIM_STR
+       VRF_CMD_HELP_STR
+       "PIM statistics\n"
+       "interface\n"
+       "PIM interface\n"
+       JSON_STR)
+{
+       int idx = 2;
+       struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+       bool uj = use_json(argc, argv);
+
+       if (!vrf)
+               return CMD_WARNING;
+
+       if (argv_find(argv, argc, "WORD", &idx))
+               pim_show_statistics(vrf->info, vty, argv[idx]->arg, uj);
+       else
+               pim_show_statistics(vrf->info, vty, NULL, uj);
+
+       return CMD_SUCCESS;
+}
+
 static void show_multicast_interfaces(struct pim_instance *pim, struct vty *vty)
 {
        struct interface *ifp;
@@ -5233,7 +5821,13 @@ static int pim_rp_cmd_worker(struct pim_instance *pim, struct vty *vty,
 {
        int result;
 
-       result = pim_rp_new(pim, rp, group, plist);
+       result = pim_rp_new_config(pim, rp, group, plist);
+
+       if (result == PIM_GROUP_BAD_ADDR_MASK_COMBO) {
+               vty_out(vty, "%% Inconsistent address and mask: %s\n",
+                       group);
+               return CMD_WARNING_CONFIG_FAILED;
+       }
 
        if (result == PIM_GROUP_BAD_ADDRESS) {
                vty_out(vty, "%% Bad group address specified: %s\n", group);
@@ -5559,7 +6153,7 @@ static int pim_no_rp_cmd_worker(struct pim_instance *pim, struct vty *vty,
                                const char *rp, const char *group,
                                const char *plist)
 {
-       int result = pim_rp_del(pim, rp, group, plist);
+       int result = pim_rp_del_config(pim, rp, group, plist);
 
        if (result == PIM_GROUP_BAD_ADDRESS) {
                vty_out(vty, "%% Bad group address specified: %s\n", group);
@@ -5778,6 +6372,27 @@ DEFUN (show_ip_pim_group_type,
        return CMD_SUCCESS;
 }
 
+DEFUN (show_ip_pim_bsr,
+       show_ip_pim_bsr_cmd,
+       "show ip pim bsr [json]",
+       SHOW_STR
+       IP_STR
+       PIM_STR
+       "boot-strap router information\n"
+       JSON_STR)
+{
+       int idx = 2;
+       struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+       bool uj = use_json(argc, argv);
+
+       if (!vrf)
+               return CMD_WARNING;
+
+       pim_show_bsr(vrf->info, vty, uj);
+
+       return CMD_SUCCESS;
+}
+
 DEFUN (ip_ssmpingd,
        ip_ssmpingd_cmd,
        "ip ssmpingd [A.B.C.D]",
@@ -7226,6 +7841,7 @@ DEFUN (debug_pim,
        PIM_DO_DEBUG_PIM_TRACE;
        PIM_DO_DEBUG_MSDP_EVENTS;
        PIM_DO_DEBUG_MSDP_PACKETS;
+       PIM_DO_DEBUG_BSM;
        return CMD_SUCCESS;
 }
 
@@ -7244,6 +7860,7 @@ DEFUN (no_debug_pim,
 
        PIM_DONT_DEBUG_PIM_PACKETDUMP_SEND;
        PIM_DONT_DEBUG_PIM_PACKETDUMP_RECV;
+       PIM_DONT_DEBUG_BSM;
 
        return CMD_SUCCESS;
 }
@@ -7629,6 +8246,30 @@ DEFUN (no_debug_mtrace,
        return CMD_SUCCESS;
 }
 
+DEFUN (debug_bsm,
+       debug_bsm_cmd,
+       "debug pim bsm",
+       DEBUG_STR
+       DEBUG_PIM_STR
+       DEBUG_PIM_BSM_STR)
+{
+       PIM_DO_DEBUG_BSM;
+       return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_bsm,
+       no_debug_bsm_cmd,
+       "no debug pim bsm",
+       NO_STR
+       DEBUG_STR
+       DEBUG_PIM_STR
+       DEBUG_PIM_BSM_STR)
+{
+       PIM_DONT_DEBUG_BSM;
+       return CMD_SUCCESS;
+}
+
+
 DEFUN_NOSH (show_debugging_pim,
            show_debugging_pim_cmd,
            "show debugging [pim]",
@@ -7752,6 +8393,94 @@ DEFUN (no_ip_pim_bfd,
        return CMD_SUCCESS;
 }
 
+DEFUN (ip_pim_bsm,
+       ip_pim_bsm_cmd,
+       "ip pim bsm",
+       IP_STR
+       PIM_STR
+       "Enables BSM support on the interface\n")
+{
+       VTY_DECLVAR_CONTEXT(interface, ifp);
+       struct pim_interface *pim_ifp = ifp->info;
+
+       if (!pim_ifp) {
+               if (!pim_cmd_interface_add(ifp)) {
+                       vty_out(vty, "Could not enable PIM SM on interface\n");
+                       return CMD_WARNING;
+               }
+       }
+
+       pim_ifp = ifp->info;
+       pim_ifp->bsm_enable = true;
+
+       return CMD_SUCCESS;
+}
+
+DEFUN (no_ip_pim_bsm,
+       no_ip_pim_bsm_cmd,
+       "no ip pim bsm",
+       NO_STR
+       IP_STR
+       PIM_STR
+       "Disables BSM support\n")
+{
+       VTY_DECLVAR_CONTEXT(interface, ifp);
+       struct pim_interface *pim_ifp = ifp->info;
+
+       if (!pim_ifp) {
+               vty_out(vty, "Pim not enabled on this interface\n");
+               return CMD_WARNING;
+       }
+
+       pim_ifp->bsm_enable = false;
+
+       return CMD_SUCCESS;
+}
+
+DEFUN (ip_pim_ucast_bsm,
+       ip_pim_ucast_bsm_cmd,
+       "ip pim unicast-bsm",
+       IP_STR
+       PIM_STR
+       "Accept/Send unicast BSM on the interface\n")
+{
+       VTY_DECLVAR_CONTEXT(interface, ifp);
+       struct pim_interface *pim_ifp = ifp->info;
+
+       if (!pim_ifp) {
+               if (!pim_cmd_interface_add(ifp)) {
+                       vty_out(vty, "Could not enable PIM SM on interface\n");
+                       return CMD_WARNING;
+               }
+       }
+
+       pim_ifp = ifp->info;
+       pim_ifp->ucast_bsm_accept = true;
+
+       return CMD_SUCCESS;
+}
+
+DEFUN (no_ip_pim_ucast_bsm,
+       no_ip_pim_ucast_bsm_cmd,
+       "no ip pim unicast-bsm",
+       NO_STR
+       IP_STR
+       PIM_STR
+       "Block send/receive unicast BSM on this interface\n")
+{
+       VTY_DECLVAR_CONTEXT(interface, ifp);
+       struct pim_interface *pim_ifp = ifp->info;
+
+       if (!pim_ifp) {
+               vty_out(vty, "Pim not enabled on this interface\n");
+               return CMD_WARNING;
+       }
+
+       pim_ifp->ucast_bsm_accept = false;
+
+       return CMD_SUCCESS;
+}
+
 #if HAVE_BFDD > 0
 DEFUN_HIDDEN(
 #else
@@ -9349,6 +10078,7 @@ void pim_cmd_init(void)
        install_element(VIEW_NODE, &show_ip_pim_upstream_rpf_cmd);
        install_element(VIEW_NODE, &show_ip_pim_rp_cmd);
        install_element(VIEW_NODE, &show_ip_pim_rp_vrf_all_cmd);
+       install_element(VIEW_NODE, &show_ip_pim_bsr_cmd);
        install_element(VIEW_NODE, &show_ip_multicast_cmd);
        install_element(VIEW_NODE, &show_ip_multicast_vrf_all_cmd);
        install_element(VIEW_NODE, &show_ip_mroute_cmd);
@@ -9360,6 +10090,9 @@ void pim_cmd_init(void)
        install_element(VIEW_NODE, &show_debugging_pim_cmd);
        install_element(VIEW_NODE, &show_ip_pim_nexthop_cmd);
        install_element(VIEW_NODE, &show_ip_pim_nexthop_lookup_cmd);
+       install_element(VIEW_NODE, &show_ip_pim_bsrp_cmd);
+       install_element(VIEW_NODE, &show_ip_pim_bsm_db_cmd);
+       install_element(VIEW_NODE, &show_ip_pim_statistics_cmd);
 
        install_element(ENABLE_NODE, &clear_ip_interfaces_cmd);
        install_element(ENABLE_NODE, &clear_ip_igmp_interfaces_cmd);
@@ -9367,6 +10100,7 @@ void pim_cmd_init(void)
        install_element(ENABLE_NODE, &clear_ip_pim_interfaces_cmd);
        install_element(ENABLE_NODE, &clear_ip_pim_interface_traffic_cmd);
        install_element(ENABLE_NODE, &clear_ip_pim_oil_cmd);
+       install_element(ENABLE_NODE, &clear_ip_pim_statistics_cmd);
 
        install_element(ENABLE_NODE, &debug_igmp_cmd);
        install_element(ENABLE_NODE, &no_debug_igmp_cmd);
@@ -9414,6 +10148,8 @@ void pim_cmd_init(void)
        install_element(ENABLE_NODE, &no_debug_msdp_packets_cmd);
        install_element(ENABLE_NODE, &debug_mtrace_cmd);
        install_element(ENABLE_NODE, &no_debug_mtrace_cmd);
+       install_element(ENABLE_NODE, &debug_bsm_cmd);
+       install_element(ENABLE_NODE, &no_debug_bsm_cmd);
 
        install_element(CONFIG_NODE, &debug_igmp_cmd);
        install_element(CONFIG_NODE, &no_debug_igmp_cmd);
@@ -9457,6 +10193,8 @@ void pim_cmd_init(void)
        install_element(CONFIG_NODE, &no_debug_msdp_packets_cmd);
        install_element(CONFIG_NODE, &debug_mtrace_cmd);
        install_element(CONFIG_NODE, &no_debug_mtrace_cmd);
+       install_element(CONFIG_NODE, &debug_bsm_cmd);
+       install_element(CONFIG_NODE, &no_debug_bsm_cmd);
 
        install_element(CONFIG_NODE, &ip_msdp_mesh_group_member_cmd);
        install_element(VRF_NODE, &ip_msdp_mesh_group_member_cmd);
@@ -9480,6 +10218,11 @@ void pim_cmd_init(void)
        install_element(VIEW_NODE, &show_ip_pim_vxlan_sg_work_cmd);
        install_element(INTERFACE_NODE, &interface_pim_use_source_cmd);
        install_element(INTERFACE_NODE, &interface_no_pim_use_source_cmd);
+       /* Install BSM command */
+       install_element(INTERFACE_NODE, &ip_pim_bsm_cmd);
+       install_element(INTERFACE_NODE, &no_ip_pim_bsm_cmd);
+       install_element(INTERFACE_NODE, &ip_pim_ucast_bsm_cmd);
+       install_element(INTERFACE_NODE, &no_ip_pim_ucast_bsm_cmd);
        /* Install BFD command */
        install_element(INTERFACE_NODE, &ip_pim_bfd_cmd);
        install_element(INTERFACE_NODE, &ip_pim_bfd_param_cmd);
index 67d6e43c34a5a959958464034409f334b69388f6..558f28231ba2278a2ddf9162259f9c1363ead4e4 100644 (file)
@@ -65,6 +65,8 @@
 #define DEBUG_MSDP_INTERNAL_STR                     "MSDP protocol internal\n"
 #define DEBUG_MSDP_PACKETS_STR                      "MSDP protocol packets\n"
 #define DEBUG_MTRACE_STR                            "Mtrace protocol activity\n"
+#define DEBUG_PIM_BSM_STR                           "BSR message processing activity\n"
+
 
 void pim_cmd_init(void);
 
index e482d321a41b6fef736f0eeeb5843665132f5b9b..721d153d76f7638a051e688f37c8be1ea6a2713f 100644 (file)
@@ -31,6 +31,7 @@
 #include "pim_iface.h"
 #include "pim_neighbor.h"
 #include "pim_upstream.h"
+#include "pim_bsm.h"
 
 static void on_trace(const char *label, struct interface *ifp,
                     struct in_addr src)
@@ -367,6 +368,12 @@ int pim_hello_recv(struct interface *ifp, struct in_addr src_addr,
                        }
                        FREE_ADDR_LIST_THEN_RETURN(-8);
                }
+               /* Forward BSM if required */
+               if (!pim_bsm_new_nbr_fwd(neigh, ifp)) {
+                       if (PIM_DEBUG_PIM_HELLO)
+                               zlog_debug("%s: forwarding bsm to new nbr failed",
+                                          __PRETTY_FUNCTION__);
+               }
 
                /* actual addr list has been saved under neighbor */
                return 0;
@@ -420,6 +427,12 @@ int pim_hello_recv(struct interface *ifp, struct in_addr src_addr,
                                }
                                FREE_ADDR_LIST_THEN_RETURN(-9);
                        }
+                       /* Forward BSM if required */
+                       if (!pim_bsm_new_nbr_fwd(neigh, ifp)) {
+                               if (PIM_DEBUG_PIM_HELLO)
+                                       zlog_debug("%s: forwarding bsm to new nbr failed",
+                                                  __PRETTY_FUNCTION__);
+                       }
                        /* actual addr list is saved under neighbor */
                        return 0;
 
index 0fb7f176ce8e843989b56ee21f5ba0d469060bf5..08be38c138abe570908e2949650f3152652b172b 100644 (file)
@@ -132,6 +132,10 @@ struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim,
        pim_ifp->igmp_specific_query_max_response_time_dsec =
                IGMP_SPECIFIC_QUERY_MAX_RESPONSE_TIME_DSEC;
 
+       /* BSM config on interface: TRUE by default */
+       pim_ifp->bsm_enable = true;
+       pim_ifp->ucast_bsm_accept = true;
+
        /*
          RFC 3376: 8.3. Query Response Interval
          The number of seconds represented by the [Query Response Interval]
index fe96c077582cfd10b59d394aaef02b3c78ca40da..ab138589bd1252e41891a9409a1223ea50585329 100644 (file)
@@ -128,6 +128,8 @@ struct pim_interface {
        bool activeactive;
 
        int64_t pim_ifstat_start; /* start timestamp for stats */
+       uint64_t pim_ifstat_bsm_rx;
+       uint64_t pim_ifstat_bsm_tx;
        uint32_t pim_ifstat_hello_sent;
        uint32_t pim_ifstat_hello_sendfail;
        uint32_t pim_ifstat_hello_recv;
@@ -142,7 +144,12 @@ struct pim_interface {
        uint32_t pim_ifstat_reg_stop_send;
        uint32_t pim_ifstat_assert_recv;
        uint32_t pim_ifstat_assert_send;
+       uint32_t pim_ifstat_bsm_cfg_miss;
+       uint32_t pim_ifstat_ucast_bsm_cfg_miss;
+       uint32_t pim_ifstat_bsm_invalid_sz;
        struct bfd_info *bfd_info;
+       bool bsm_enable; /* bsm processing enable */
+       bool ucast_bsm_accept; /* ucast bsm processing */
 };
 
 /*
index a2bf3d27839b64969df4d0cdc3cb8f6c44529213..6848d2dabb73a7d95df03db2703e85d1add827d8 100644 (file)
@@ -33,6 +33,7 @@
 #include "pim_static.h"
 #include "pim_ssmpingd.h"
 #include "pim_vty.h"
+#include "pim_bsm.h"
 
 static void pim_instance_terminate(struct pim_instance *pim)
 {
@@ -50,6 +51,8 @@ static void pim_instance_terminate(struct pim_instance *pim)
 
        pim_rp_free(pim);
 
+       pim_bsm_proc_free(pim);
+
        /* Traverse and cleanup rpf_hash */
        if (pim->rpf_hash) {
                hash_clean(pim->rpf_hash, (void *)pim_rp_list_hash_clean);
@@ -106,6 +109,8 @@ static struct pim_instance *pim_instance_init(struct vrf *vrf)
 
        pim_rp_init(pim);
 
+       pim_bsm_proc_init(pim);
+
        pim_oil_init(pim);
 
        pim_upstream_init(pim);
index 1740bcc7900d2ced781b72f8edd518f00f0f7186..06d41c4b537d392397e8b8d1ee82d31223bf04b1 100644 (file)
@@ -26,6 +26,7 @@
 #include "pim_str.h"
 #include "pim_msdp.h"
 #include "pim_assert.h"
+#include "pim_bsm.h"
 #include "pim_vxlan_instance.h"
 
 #if defined(HAVE_LINUX_MROUTE_H)
@@ -121,6 +122,11 @@ struct pim_instance {
 
        bool ecmp_enable;
        bool ecmp_rebalance_enable;
+       /* Bsm related */
+       struct bsm_scope global_scope;
+       uint64_t bsm_rcvd;
+       uint64_t bsm_sent;
+       uint64_t bsm_dropped;
 
        /* If we need to rescan all our upstreams */
        struct thread *rpf_cache_refresher;
index cbacaf3ea872733b4c749c3875e2230cf6c68bb2..5e1a4f0c5e6a13a5850b6a3d1268402faca40781 100644 (file)
@@ -519,7 +519,7 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
                group_size = pim_msg_get_jp_group_size(group->sources);
                if (group_size > packet_left) {
                        pim_msg_build_header(pim_msg, packet_size,
-                                            PIM_MSG_TYPE_JOIN_PRUNE);
+                                            PIM_MSG_TYPE_JOIN_PRUNE, false);
                        if (pim_msg_send(pim_ifp->pim_sock_fd,
                                         pim_ifp->primary_address,
                                         qpim_all_pim_routers_addr, pim_msg,
@@ -576,7 +576,7 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
                if (packet_left < sizeof(struct pim_jp_groups)
                    || msg->num_groups == 255) {
                        pim_msg_build_header(pim_msg, packet_size,
-                                            PIM_MSG_TYPE_JOIN_PRUNE);
+                                            PIM_MSG_TYPE_JOIN_PRUNE, false);
                        if (pim_msg_send(pim_ifp->pim_sock_fd,
                                         pim_ifp->primary_address,
                                         qpim_all_pim_routers_addr, pim_msg,
@@ -596,7 +596,7 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
        if (!new_packet) {
                // msg->num_groups = htons (msg->num_groups);
                pim_msg_build_header(pim_msg, packet_size,
-                                    PIM_MSG_TYPE_JOIN_PRUNE);
+                                    PIM_MSG_TYPE_JOIN_PRUNE, false);
                if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
                                 qpim_all_pim_routers_addr, pim_msg,
                                 packet_size,
index 63688f87e055d92fd918996ab1a53d12f3c58289..2e467502b1feb38261dfbc8ab23a88d40a348791 100644 (file)
@@ -39,7 +39,7 @@
 #include "pim_oil.h"
 
 void pim_msg_build_header(uint8_t *pim_msg, size_t pim_msg_size,
-                         uint8_t pim_msg_type)
+                         uint8_t pim_msg_type, bool no_fwd)
 {
        struct pim_msg_header *header = (struct pim_msg_header *)pim_msg;
 
@@ -48,6 +48,7 @@ void pim_msg_build_header(uint8_t *pim_msg, size_t pim_msg_size,
         */
        header->ver = PIM_PROTO_VERSION;
        header->type = pim_msg_type;
+       header->Nbit = no_fwd;
        header->reserved = 0;
 
 
index ad9b5d9c01e6d457762acbff4dde100c6453bb29..5f5030396750cc4cb9b0a6694e4cfd0b965d8d27 100644 (file)
@@ -23,6 +23,8 @@
 #include <netinet/in.h>
 
 #include "pim_jp_agg.h"
+
+#define PIM_HDR_LEN  sizeof(struct pim_msg_header)
 /*
   Number       Description
   ----------   ------------------
@@ -41,11 +43,20 @@ enum pim_msg_address_family {
 
 /*
  * Network Order pim_msg_hdr
+ * =========================
+ *  PIM Header definition as per RFC 5059. N bit introduced to indicate
+ *  do-not-forward option in PIM Boot strap Message.
+ *    0                   1                   2                   3
+ *   0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |PIM Ver| Type  |N|  Reserved   |           Checksum            |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  */
 struct pim_msg_header {
        uint8_t type : 4;
        uint8_t ver : 4;
-       uint8_t reserved;
+       uint8_t Nbit : 1; /* No Fwd Bit */
+       uint8_t reserved : 7;
        uint16_t checksum;
 } __attribute__((packed));
 
@@ -58,7 +69,9 @@ struct pim_encoded_ipv4_unicast {
 struct pim_encoded_group_ipv4 {
        uint8_t ne;
        uint8_t family;
-       uint8_t reserved;
+       uint8_t bidir : 1;      /* Bidir bit */
+       uint8_t reserved : 6;   /* Reserved */
+       uint8_t sz : 1;         /* scope zone bit */
        uint8_t mask;
        struct in_addr addr;
 } __attribute__((packed));
@@ -88,7 +101,7 @@ struct pim_jp {
 } __attribute__((packed));
 
 void pim_msg_build_header(uint8_t *pim_msg, size_t pim_msg_size,
-                         uint8_t pim_msg_type);
+                         uint8_t pim_msg_type, bool no_fwd);
 uint8_t *pim_msg_addr_encode_ipv4_ucast(uint8_t *buf, struct in_addr addr);
 uint8_t *pim_msg_addr_encode_ipv4_group(uint8_t *buf, struct in_addr addr);
 
index 48b9f1f284a16ba47cc5e371d3bc7690246c662c..1a2f451524fda881d3153b3b1eeb4a8b88272f7b 100644 (file)
@@ -121,6 +121,7 @@ static struct pim_nexthop_cache *pim_nexthop_cache_add(struct pim_instance *pim,
  */
 int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
                              struct pim_upstream *up, struct rp_info *rp,
+                             bool bsr_track_needed,
                              struct pim_nexthop_cache *out_pnc)
 {
        struct pim_nexthop_cache *pnc = NULL;
@@ -157,6 +158,9 @@ int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
        if (up != NULL)
                hash_get(pnc->upstream_hash, up, hash_alloc_intern);
 
+       if (bsr_track_needed)
+               pnc->bsr_tracking = true;
+
        if (CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID)) {
                if (out_pnc)
                        memcpy(out_pnc, pnc, sizeof(struct pim_nexthop_cache));
@@ -167,7 +171,8 @@ int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
 }
 
 void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
-                               struct pim_upstream *up, struct rp_info *rp)
+                               struct pim_upstream *up, struct rp_info *rp,
+                               bool del_bsr_tracking)
 {
        struct pim_nexthop_cache *pnc = NULL;
        struct pim_nexthop_cache lookup;
@@ -208,6 +213,9 @@ void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
                if (up)
                        hash_release(pnc->upstream_hash, up);
 
+               if (del_bsr_tracking)
+                       pnc->bsr_tracking = false;
+
                if (PIM_DEBUG_PIM_NHT) {
                        char buf[PREFIX_STRLEN];
                        prefix2str(addr, buf, sizeof buf);
@@ -218,7 +226,8 @@ void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
                }
 
                if (pnc->rp_list->count == 0
-                   && pnc->upstream_hash->count == 0) {
+                   && pnc->upstream_hash->count == 0
+                   && pnc->bsr_tracking == false) {
                        pim_sendmsg_zebra_rnh(pim, zclient, pnc,
                                              ZEBRA_NEXTHOP_UNREGISTER);
 
@@ -233,6 +242,169 @@ void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
        }
 }
 
+/* Given a source address and a neighbor address, check if the neighbor is one
+ * of the next hop to reach the source. search from zebra route database
+ */
+bool pim_nexthop_match(struct pim_instance *pim, struct in_addr addr,
+                      struct in_addr ip_src)
+{
+       struct pim_zlookup_nexthop nexthop_tab[MULTIPATH_NUM];
+       int i = 0;
+       ifindex_t first_ifindex = 0;
+       struct interface *ifp = NULL;
+       struct pim_neighbor *nbr = NULL;
+       int num_ifindex;
+
+       if (addr.s_addr == INADDR_NONE)
+               return 0;
+
+       memset(nexthop_tab, 0,
+              sizeof(struct pim_zlookup_nexthop) * MULTIPATH_NUM);
+       num_ifindex = zclient_lookup_nexthop(pim, nexthop_tab, MULTIPATH_NUM,
+                                            addr, PIM_NEXTHOP_LOOKUP_MAX);
+       if (num_ifindex < 1) {
+               char addr_str[INET_ADDRSTRLEN];
+
+               pim_inet4_dump("<addr?>", addr, addr_str, sizeof(addr_str));
+               zlog_warn(
+                       "%s %s: could not find nexthop ifindex for address %s",
+                       __FILE__, __PRETTY_FUNCTION__, addr_str);
+               return 0;
+       }
+
+       while (i < num_ifindex) {
+               first_ifindex = nexthop_tab[i].ifindex;
+
+               ifp = if_lookup_by_index(first_ifindex, pim->vrf_id);
+               if (!ifp) {
+                       if (PIM_DEBUG_ZEBRA) {
+                               char addr_str[INET_ADDRSTRLEN];
+
+                               pim_inet4_dump("<addr?>", addr, addr_str,
+                                              sizeof(addr_str));
+                               zlog_debug(
+                                       "%s %s: could not find interface for ifindex %d (address %s)",
+                                       __FILE__, __PRETTY_FUNCTION__,
+                                       first_ifindex, addr_str);
+                       }
+                       i++;
+                       continue;
+               }
+
+               if (!ifp->info) {
+                       if (PIM_DEBUG_ZEBRA) {
+                               char addr_str[INET_ADDRSTRLEN];
+
+                               pim_inet4_dump("<addr?>", addr, addr_str,
+                                              sizeof(addr_str));
+                               zlog_debug(
+                                       "%s: multicast not enabled on input interface %s (ifindex=%d, RPF for source %s)",
+                                       __PRETTY_FUNCTION__, ifp->name,
+                                       first_ifindex, addr_str);
+                       }
+                       i++;
+                       continue;
+               }
+
+               if (!pim_if_connected_to_source(ifp, addr)) {
+                       nbr = pim_neighbor_find(
+                               ifp, nexthop_tab[i].nexthop_addr.u.prefix4);
+                       if (PIM_DEBUG_PIM_TRACE_DETAIL)
+                               zlog_debug("ifp name: %s, pim nbr: %p",
+                                          ifp->name, nbr);
+                       if (!nbr && !if_is_loopback(ifp)) {
+                               i++;
+                               continue;
+                       }
+               }
+
+               if (nexthop_tab[i].nexthop_addr.u.prefix4.s_addr
+                   == ip_src.s_addr)
+                       return 1;
+
+               i++;
+       }
+
+       return 0;
+}
+
+/* Given a source address and a neighbor address, check if the neighbor is one
+ * of the next hop to reach the source. search from pim next hop cache
+ */
+bool pim_nexthop_match_nht_cache(struct pim_instance *pim, struct in_addr addr,
+                                struct in_addr ip_src)
+{
+       struct pim_rpf rpf;
+       ifindex_t first_ifindex;
+       struct interface *ifp = NULL;
+       uint8_t nh_iter = 0;
+       struct pim_neighbor *nbr = NULL;
+       struct nexthop *nh_node = NULL;
+       struct pim_nexthop_cache *pnc = NULL;
+
+       memset(&rpf, 0, sizeof(struct pim_rpf));
+       rpf.rpf_addr.family = AF_INET;
+       rpf.rpf_addr.prefixlen = IPV4_MAX_BITLEN;
+       rpf.rpf_addr.u.prefix4 = addr;
+
+       pnc = pim_nexthop_cache_find(pim, &rpf);
+       if (!pnc || !pnc->nexthop_num)
+               return 0;
+
+       for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) {
+               first_ifindex = nh_node->ifindex;
+               ifp = if_lookup_by_index(first_ifindex, pim->vrf_id);
+               if (!ifp) {
+                       if (PIM_DEBUG_PIM_NHT) {
+                               char addr_str[INET_ADDRSTRLEN];
+
+                               pim_inet4_dump("<addr?>", addr, addr_str,
+                                              sizeof(addr_str));
+                               zlog_debug(
+                                       "%s %s: could not find interface for ifindex %d (address %s(%s))",
+                                       __FILE__, __PRETTY_FUNCTION__,
+                                       first_ifindex, addr_str,
+                                       pim->vrf->name);
+                       }
+                       nh_iter++;
+                       continue;
+               }
+               if (!ifp->info) {
+                       if (PIM_DEBUG_PIM_NHT) {
+                               char addr_str[INET_ADDRSTRLEN];
+
+                               pim_inet4_dump("<addr?>", addr, addr_str,
+                                              sizeof(addr_str));
+                               zlog_debug(
+                                       "%s: multicast not enabled on input interface %s(%s) (ifindex=%d, RPF for source %s)",
+                                       __PRETTY_FUNCTION__, ifp->name,
+                                       pim->vrf->name, first_ifindex,
+                                       addr_str);
+                       }
+                       nh_iter++;
+                       continue;
+               }
+
+               if (!pim_if_connected_to_source(ifp, addr)) {
+                       nbr = pim_neighbor_find(ifp, nh_node->gate.ipv4);
+                       if (!nbr && !if_is_loopback(ifp)) {
+                               if (PIM_DEBUG_PIM_NHT)
+                                       zlog_debug(
+                                               "%s: pim nbr not found on input interface %s(%s)",
+                                               __PRETTY_FUNCTION__, ifp->name,
+                                               pim->vrf->name);
+                               nh_iter++;
+                               continue;
+                       }
+               }
+
+               if (nh_node->gate.ipv4.s_addr == ip_src.s_addr)
+                       return 1;
+       }
+
+       return 0;
+}
+
 void pim_rp_nexthop_del(struct rp_info *rp_info)
 {
        rp_info->rp.source_nexthop.interface = NULL;
index e7a5fa77209ba7987192b5c2b00a6c39139f7c2d..12dbf167d1fc044c53731ef389b68455d087bf30 100644 (file)
@@ -45,14 +45,22 @@ struct pim_nexthop_cache {
 
        struct list *rp_list;
        struct hash *upstream_hash;
+       /* Ideally this has to be list of scope zone. But for now we can just
+        * have as a bool variable to say bsr_tracking.
+        * Later this variable can be changed as a list of scope zones for
+        * tracking same bsr for multiple scope zones.
+        */
+       bool bsr_tracking;
 };
 
 int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS);
 int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
                              struct pim_upstream *up, struct rp_info *rp,
+                             bool bsr_track_needed,
                              struct pim_nexthop_cache *out_pnc);
 void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
-                               struct pim_upstream *up, struct rp_info *rp);
+                               struct pim_upstream *up, struct rp_info *rp,
+                               bool del_bsr_tracking);
 struct pim_nexthop_cache *pim_nexthop_cache_find(struct pim_instance *pim,
                                                 struct pim_rpf *rpf);
 uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp);
@@ -64,4 +72,9 @@ void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient,
 int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim,
                                     struct prefix *src, struct prefix *grp);
 void pim_rp_nexthop_del(struct rp_info *rp_info);
+bool pim_nexthop_match(struct pim_instance *pim, struct in_addr addr,
+                      struct in_addr ip_src);
+bool pim_nexthop_match_nht_cache(struct pim_instance *pim, struct in_addr addr,
+                                struct in_addr ip_src);
+
 #endif
index 71b0d479281be1de3b66cd138f32d8a1d562d8cd..12b28ed9af68015f44d2b858929b4842967e7648 100644 (file)
@@ -39,6 +39,7 @@
 #include "pim_msg.h"
 #include "pim_register.h"
 #include "pim_errors.h"
+#include "pim_bsm.h"
 
 static int on_pim_hello_send(struct thread *t);
 static int pim_hello_send(struct interface *ifp, uint16_t holdtime);
@@ -148,6 +149,7 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len)
        uint16_t checksum;     /* computed checksum */
        struct pim_neighbor *neigh;
        struct pim_msg_header *header;
+       bool   no_fwd;
 
        if (len < sizeof(*ip_hdr)) {
                if (PIM_DEBUG_PIM_PACKETS)
@@ -185,6 +187,7 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len)
 
        /* for computing checksum */
        header->checksum = 0;
+       no_fwd = header->Nbit;
 
        if (header->type == PIM_MSG_TYPE_REGISTER) {
                /* First 8 byte header checksum */
@@ -273,6 +276,11 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len)
                                       pim_msg + PIM_MSG_HEADER_LEN,
                                       pim_msg_len - PIM_MSG_HEADER_LEN);
                break;
+       case PIM_MSG_TYPE_BOOTSTRAP:
+               return pim_bsm_process(ifp, ip_hdr, pim_msg, pim_msg_len,
+                                      no_fwd);
+               break;
+
        default:
                if (PIM_DEBUG_PIM_PACKETS) {
                        zlog_debug(
@@ -634,7 +642,7 @@ static int hello_send(struct interface *ifp, uint16_t holdtime)
        zassert(pim_msg_size >= PIM_PIM_MIN_LEN);
        zassert(pim_msg_size <= PIM_PIM_BUFSIZE_WRITE);
 
-       pim_msg_build_header(pim_msg, pim_msg_size, PIM_MSG_TYPE_HELLO);
+       pim_msg_build_header(pim_msg, pim_msg_size, PIM_MSG_TYPE_HELLO, false);
 
        if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
                         qpim_all_pim_routers_addr, pim_msg, pim_msg_size,
index 431236eebe720f2f2717bf0802caedebb313cc2c..3fe7e8bf640e788573a05529dffe6bcfeb359289 100644 (file)
@@ -94,7 +94,7 @@ void pim_register_stop_send(struct interface *ifp, struct prefix_sg *sg,
        b1length += length;
 
        pim_msg_build_header(buffer, b1length + PIM_MSG_REGISTER_STOP_LEN,
-                            PIM_MSG_TYPE_REG_STOP);
+                            PIM_MSG_TYPE_REG_STOP, false);
 
        pinfo = (struct pim_interface *)ifp->info;
        if (!pinfo) {
@@ -208,7 +208,7 @@ void pim_register_send(const uint8_t *buf, int buf_size, struct in_addr src,
        memcpy(b1, (const unsigned char *)buf, buf_size);
 
        pim_msg_build_header(buffer, buf_size + PIM_MSG_REGISTER_LEN,
-                            PIM_MSG_TYPE_REGISTER);
+                            PIM_MSG_TYPE_REGISTER, false);
 
        ++pinfo->pim_ifstat_reg_send;
 
index 14643743ad8fc4fd28f7c38f0beabc7b81244d17..ca865d28c31e1ec5ef786011b3ece75ecbc206d5 100644 (file)
@@ -48,6 +48,7 @@
 #include "pim_mroute.h"
 #include "pim_oil.h"
 #include "pim_zebra.h"
+#include "pim_bsm.h"
 
 /* Cleanup pim->rpf_hash each node data */
 void pim_rp_list_hash_clean(void *data)
@@ -372,7 +373,7 @@ void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
                        zlog_debug("%s: Deregister upstream %s addr %s with Zebra NHT",
                                   __PRETTY_FUNCTION__, up->sg_str, buf);
                }
-               pim_delete_tracked_nexthop(pim, &nht_p, up, NULL);
+               pim_delete_tracked_nexthop(pim, &nht_p, up, NULL, false);
        }
 
        /* Update the upstream address */
@@ -406,10 +407,45 @@ void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
        pim_zebra_update_all_interfaces(pim);
 }
 
-int pim_rp_new(struct pim_instance *pim, const char *rp,
-              const char *group_range, const char *plist)
+int pim_rp_new_config(struct pim_instance *pim, const char *rp,
+                     const char *group_range, const char *plist)
 {
        int result = 0;
+       struct prefix group;
+       struct in_addr rp_addr;
+
+       if (group_range == NULL)
+               result = str2prefix("224.0.0.0/4", &group);
+       else {
+               result = str2prefix(group_range, &group);
+               if (result) {
+                       struct prefix temp;
+
+                       prefix_copy(&temp, &group);
+                       apply_mask(&temp);
+                       if (!prefix_same(&group, &temp))
+                               return PIM_GROUP_BAD_ADDR_MASK_COMBO;
+               }
+       }
+
+       if (!result)
+               return PIM_GROUP_BAD_ADDRESS;
+
+       result = inet_pton(AF_INET, rp, &rp_addr);
+
+       if (result <= 0)
+               return PIM_RP_BAD_ADDRESS;
+
+       result = pim_rp_new(pim, rp_addr, group, plist, RP_SRC_STATIC);
+       return result;
+}
+
+int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr,
+              struct prefix group, const char *plist,
+              enum rp_source rp_src_flag)
+{
+       int result = 0;
+       char rp[INET_ADDRSTRLEN];
        struct rp_info *rp_info;
        struct rp_info *rp_all;
        struct prefix group_all;
@@ -417,41 +453,19 @@ int pim_rp_new(struct pim_instance *pim, const char *rp,
        struct rp_info *tmp_rp_info;
        char buffer[BUFSIZ];
        struct prefix nht_p;
-       struct prefix temp;
        struct route_node *rn;
        struct pim_upstream *up;
        struct listnode *upnode;
 
        rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
 
-       if (group_range == NULL)
-               result = str2prefix("224.0.0.0/4", &rp_info->group);
-       else {
-               result = str2prefix(group_range, &rp_info->group);
-               if (result) {
-                       prefix_copy(&temp, &rp_info->group);
-                       apply_mask(&temp);
-                       if (!prefix_same(&rp_info->group, &temp)) {
-                               XFREE(MTYPE_PIM_RP, rp_info);
-                               return PIM_GROUP_BAD_ADDR_MASK_COMBO;
-                       }
-               }
-       }
-
-       if (!result) {
-               XFREE(MTYPE_PIM_RP, rp_info);
-               return PIM_GROUP_BAD_ADDRESS;
-       }
-
        rp_info->rp.rpf_addr.family = AF_INET;
        rp_info->rp.rpf_addr.prefixlen = IPV4_MAX_PREFIXLEN;
-       result = inet_pton(rp_info->rp.rpf_addr.family, rp,
-                          &rp_info->rp.rpf_addr.u.prefix4);
+       rp_info->rp.rpf_addr.u.prefix4 = rp_addr;
+       prefix_copy(&rp_info->group, &group);
+       rp_info->rp_src = rp_src_flag;
 
-       if (result <= 0) {
-               XFREE(MTYPE_PIM_RP, rp_info);
-               return PIM_RP_BAD_ADDRESS;
-       }
+       inet_ntop(AF_INET, &rp_info->rp.rpf_addr.u.prefix4, rp, sizeof(rp));
 
        if (plist) {
                /*
@@ -479,10 +493,10 @@ int pim_rp_new(struct pim_instance *pim, const char *rp,
                        if (rp_info->rp.rpf_addr.u.prefix4.s_addr
                            == tmp_rp_info->rp.rpf_addr.u.prefix4.s_addr) {
                                if (tmp_rp_info->plist)
-                                       pim_rp_del(pim, rp, NULL,
-                                                  tmp_rp_info->plist);
+                                       pim_rp_del_config(pim, rp, NULL,
+                                                         tmp_rp_info->plist);
                                else
-                                       pim_rp_del(
+                                       pim_rp_del_config(
                                                pim, rp,
                                                prefix2str(&tmp_rp_info->group,
                                                           buffer, BUFSIZ),
@@ -516,7 +530,8 @@ int pim_rp_new(struct pim_instance *pim, const char *rp,
                            && rp_info->rp.rpf_addr.u.prefix4.s_addr
                                       == tmp_rp_info->rp.rpf_addr.u.prefix4
                                                  .s_addr) {
-                               pim_rp_del(pim, rp, NULL, tmp_rp_info->plist);
+                               pim_rp_del_config(pim, rp, NULL,
+                                                 tmp_rp_info->plist);
                        }
                }
 
@@ -526,6 +541,7 @@ int pim_rp_new(struct pim_instance *pim, const char *rp,
                if (prefix_same(&rp_all->group, &rp_info->group)
                    && pim_rpf_addr_is_inaddr_none(&rp_all->rp)) {
                        rp_all->rp.rpf_addr = rp_info->rp.rpf_addr;
+                       rp_all->rp_src = rp_src_flag;
                        XFREE(MTYPE_PIM_RP, rp_info);
 
                        /* Register addr with Zebra NHT */
@@ -556,8 +572,8 @@ int pim_rp_new(struct pim_instance *pim, const char *rp,
                                        grp.family = AF_INET;
                                        grp.prefixlen = IPV4_MAX_BITLEN;
                                        grp.u.prefix4 = up->sg.grp;
-                                       trp_info = pim_rp_find_match_group(pim,
-                                                                         &grp);
+                                       trp_info = pim_rp_find_match_group(
+                                               pim, &grp);
                                        if (trp_info == rp_all)
                                                pim_upstream_update(pim, up);
                                }
@@ -565,24 +581,27 @@ int pim_rp_new(struct pim_instance *pim, const char *rp,
 
                        pim_rp_check_interfaces(pim, rp_all);
                        pim_rp_refresh_group_to_rp_mapping(pim);
-
                        pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_all,
-                                                 NULL);
+                                                 false, NULL);
+
                        if (!pim_ecmp_nexthop_lookup(pim,
                                                     &rp_all->rp.source_nexthop,
                                                     &nht_p, &rp_all->group, 1))
                                return PIM_RP_NO_PATH;
-
                        return PIM_SUCCESS;
                }
 
                /*
                 * Return if the group is already configured for this RP
                 */
-               if (pim_rp_find_exact(pim, rp_info->rp.rpf_addr.u.prefix4,
-                                     &rp_info->group)) {
+               tmp_rp_info = pim_rp_find_exact(
+                       pim, rp_info->rp.rpf_addr.u.prefix4, &rp_info->group);
+               if (tmp_rp_info) {
+                       if ((tmp_rp_info->rp_src != rp_src_flag)
+                           && (rp_src_flag == RP_SRC_STATIC))
+                               tmp_rp_info->rp_src = rp_src_flag;
                        XFREE(MTYPE_PIM_RP, rp_info);
-                       return PIM_SUCCESS;
+                       return result;
                }
 
                /*
@@ -604,8 +623,20 @@ int pim_rp_new(struct pim_instance *pim, const char *rp,
                                 */
                                if (prefix_same(&rp_info->group,
                                                &tmp_rp_info->group)) {
+                                       if ((rp_src_flag == RP_SRC_STATIC)
+                                           && (tmp_rp_info->rp_src
+                                               == RP_SRC_STATIC)) {
+                                               XFREE(MTYPE_PIM_RP, rp_info);
+                                               return PIM_GROUP_OVERLAP;
+                                       }
+
+                                       result = pim_rp_change(
+                                               pim,
+                                               rp_info->rp.rpf_addr.u.prefix4,
+                                               tmp_rp_info->group,
+                                               rp_src_flag);
                                        XFREE(MTYPE_PIM_RP, rp_info);
-                                       return PIM_GROUP_OVERLAP;
+                                       return result;
                                }
                        }
                }
@@ -654,8 +685,7 @@ int pim_rp_new(struct pim_instance *pim, const char *rp,
                zlog_debug("%s: NHT Register RP addr %s grp %s with Zebra ",
                           __PRETTY_FUNCTION__, buf, buf1);
        }
-
-       pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
+       pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, false, NULL);
        if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, &nht_p,
                                     &rp_info->group, 1))
                return PIM_RP_NO_PATH;
@@ -663,21 +693,12 @@ int pim_rp_new(struct pim_instance *pim, const char *rp,
        return PIM_SUCCESS;
 }
 
-int pim_rp_del(struct pim_instance *pim, const char *rp,
-              const char *group_range, const char *plist)
+int pim_rp_del_config(struct pim_instance *pim, const char *rp,
+                     const char *group_range, const char *plist)
 {
        struct prefix group;
        struct in_addr rp_addr;
-       struct prefix g_all;
-       struct rp_info *rp_info;
-       struct rp_info *rp_all;
        int result;
-       struct prefix nht_p;
-       struct route_node *rn;
-       bool was_plist = false;
-       struct rp_info *trp_info;
-       struct pim_upstream *up;
-       struct listnode *upnode;
 
        if (group_range == NULL)
                result = str2prefix("224.0.0.0/4", &group);
@@ -691,6 +712,32 @@ int pim_rp_del(struct pim_instance *pim, const char *rp,
        if (result <= 0)
                return PIM_RP_BAD_ADDRESS;
 
+       result = pim_rp_del(pim, rp_addr, group, plist, RP_SRC_STATIC);
+       return result;
+}
+
+int pim_rp_del(struct pim_instance *pim, struct in_addr rp_addr,
+              struct prefix group, const char *plist,
+              enum rp_source rp_src_flag)
+{
+       struct prefix g_all;
+       struct rp_info *rp_info;
+       struct rp_info *rp_all;
+       struct prefix nht_p;
+       struct route_node *rn;
+       bool was_plist = false;
+       struct rp_info *trp_info;
+       struct pim_upstream *up;
+       struct listnode *upnode;
+       struct bsgrp_node *bsgrp = NULL;
+       struct bsm_rpinfo *bsrp = NULL;
+       char grp_str[PREFIX2STR_BUFFER];
+       char rp_str[INET_ADDRSTRLEN];
+
+       if (!inet_ntop(AF_INET, &rp_addr, rp_str, sizeof(rp_str)))
+               sprintf(rp_str, "<rp?>");
+       prefix2str(&group, grp_str, sizeof(grp_str));
+
        if (plist)
                rp_info = pim_rp_find_prefix_list(pim, rp_addr, plist);
        else
@@ -704,6 +751,42 @@ int pim_rp_del(struct pim_instance *pim, const char *rp,
                was_plist = true;
        }
 
+       if (PIM_DEBUG_TRACE)
+               zlog_debug("%s: Delete RP %s for the group %s",
+                          __PRETTY_FUNCTION__, rp_str, grp_str);
+
+       /* While static RP is getting deleted, we need to check if dynamic RP
+        * present for the same group in BSM RP table, then install the dynamic
+        * RP for the group node into the main rp table
+        */
+       if (rp_src_flag == RP_SRC_STATIC) {
+               bsgrp = pim_bsm_get_bsgrp_node(&pim->global_scope, &group);
+
+               if (bsgrp) {
+                       bsrp = listnode_head(bsgrp->bsrp_list);
+                       if (bsrp) {
+                               if (PIM_DEBUG_TRACE) {
+                                       char bsrp_str[INET_ADDRSTRLEN];
+
+                                       if (!inet_ntop(AF_INET, bsrp, bsrp_str,
+                                                      sizeof(bsrp_str)))
+                                               sprintf(bsrp_str, "<bsrp?>");
+
+                                       zlog_debug("%s: BSM RP %s found for the group %s",
+                                                  __PRETTY_FUNCTION__,
+                                                  bsrp_str, grp_str);
+                               }
+                               return pim_rp_change(pim, bsrp->rp_address,
+                                                    group, RP_SRC_BSR);
+                       }
+               } else {
+                       if (PIM_DEBUG_TRACE)
+                               zlog_debug(
+                                       "%s: BSM RP not found for the group %s",
+                                       __PRETTY_FUNCTION__, grp_str);
+               }
+       }
+
        /* Deregister addr with Zebra NHT */
        nht_p.family = AF_INET;
        nht_p.prefixlen = IPV4_MAX_BITLEN;
@@ -714,7 +797,7 @@ int pim_rp_del(struct pim_instance *pim, const char *rp,
                zlog_debug("%s: Deregister RP addr %s with Zebra ",
                           __PRETTY_FUNCTION__, buf);
        }
-       pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info);
+       pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info, false);
 
        if (!str2prefix("224.0.0.0/4", &g_all))
                return PIM_RP_BAD_ADDRESS;
@@ -726,8 +809,9 @@ int pim_rp_del(struct pim_instance *pim, const char *rp,
                        /* Find the upstream (*, G) whose upstream address is
                         * same as the deleted RP
                         */
-                       if ((up->upstream_addr.s_addr == rp_addr.s_addr) &&
-                           (up->sg.src.s_addr == INADDR_ANY)) {
+                       if ((up->upstream_addr.s_addr
+                            == rp_info->rp.rpf_addr.u.prefix4.s_addr)
+                           && (up->sg.src.s_addr == INADDR_ANY)) {
                                struct prefix grp;
                                grp.family = AF_INET;
                                grp.prefixlen = IPV4_MAX_BITLEN;
@@ -777,8 +861,9 @@ int pim_rp_del(struct pim_instance *pim, const char *rp,
                /* Find the upstream (*, G) whose upstream address is same as
                 * the deleted RP
                 */
-               if ((up->upstream_addr.s_addr == rp_addr.s_addr) &&
-                   (up->sg.src.s_addr == INADDR_ANY)) {
+               if ((up->upstream_addr.s_addr
+                    == rp_info->rp.rpf_addr.u.prefix4.s_addr)
+                   && (up->sg.src.s_addr == INADDR_ANY)) {
                        struct prefix grp;
 
                        grp.family = AF_INET;
@@ -790,9 +875,9 @@ int pim_rp_del(struct pim_instance *pim, const char *rp,
                        /* RP not found for the group grp */
                        if (pim_rpf_addr_is_inaddr_none(&trp_info->rp)) {
                                pim_upstream_rpf_clear(pim, up);
-                               pim_rp_set_upstream_addr(pim,
-                                       &up->upstream_addr,
-                                       up->sg.src, up->sg.grp);
+                               pim_rp_set_upstream_addr(
+                                       pim, &up->upstream_addr, up->sg.src,
+                                       up->sg.grp);
                        }
 
                        /* RP found for the group grp */
@@ -805,6 +890,105 @@ int pim_rp_del(struct pim_instance *pim, const char *rp,
        return PIM_SUCCESS;
 }
 
+int pim_rp_change(struct pim_instance *pim, struct in_addr new_rp_addr,
+                 struct prefix group, enum rp_source rp_src_flag)
+{
+       struct prefix nht_p;
+       struct route_node *rn;
+       int result = 0;
+       struct rp_info *rp_info = NULL;
+       struct pim_upstream *up;
+       struct listnode *upnode;
+
+       rn = route_node_lookup(pim->rp_table, &group);
+       if (!rn) {
+               result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
+               return result;
+       }
+
+       rp_info = rn->info;
+
+       if (!rp_info) {
+               route_unlock_node(rn);
+               result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
+               return result;
+       }
+
+       if (rp_info->rp.rpf_addr.u.prefix4.s_addr == new_rp_addr.s_addr) {
+               if (rp_info->rp_src != rp_src_flag) {
+                       rp_info->rp_src = rp_src_flag;
+                       route_unlock_node(rn);
+                       return PIM_SUCCESS;
+               }
+       }
+
+       /* Deregister old RP addr with Zebra NHT */
+       if (rp_info->rp.rpf_addr.u.prefix4.s_addr != INADDR_ANY) {
+               nht_p.family = AF_INET;
+               nht_p.prefixlen = IPV4_MAX_BITLEN;
+               nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
+               if (PIM_DEBUG_PIM_NHT_RP) {
+                       char buf[PREFIX2STR_BUFFER];
+
+                       prefix2str(&nht_p, buf, sizeof(buf));
+                       zlog_debug("%s: Deregister RP addr %s with Zebra ",
+                                  __PRETTY_FUNCTION__, buf);
+               }
+               pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info, false);
+       }
+
+       pim_rp_nexthop_del(rp_info);
+       listnode_delete(pim->rp_list, rp_info);
+       /* Update the new RP address*/
+       rp_info->rp.rpf_addr.u.prefix4 = new_rp_addr;
+       rp_info->rp_src = rp_src_flag;
+       rp_info->i_am_rp = 0;
+
+       listnode_add_sort(pim->rp_list, rp_info);
+
+       for (ALL_LIST_ELEMENTS_RO(pim->upstream_list, upnode, up)) {
+               if (up->sg.src.s_addr == INADDR_ANY) {
+                       struct prefix grp;
+                       struct rp_info *trp_info;
+
+                       grp.family = AF_INET;
+                       grp.prefixlen = IPV4_MAX_BITLEN;
+                       grp.u.prefix4 = up->sg.grp;
+                       trp_info = pim_rp_find_match_group(pim, &grp);
+
+                       if (trp_info == rp_info)
+                               pim_upstream_update(pim, up);
+               }
+       }
+
+       /* Register new RP addr with Zebra NHT */
+       nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
+       if (PIM_DEBUG_PIM_NHT_RP) {
+               char buf[PREFIX2STR_BUFFER];
+               char buf1[PREFIX2STR_BUFFER];
+
+               prefix2str(&nht_p, buf, sizeof(buf));
+               prefix2str(&rp_info->group, buf1, sizeof(buf1));
+               zlog_debug("%s: NHT Register RP addr %s grp %s with Zebra ",
+                          __PRETTY_FUNCTION__, buf, buf1);
+       }
+
+       pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, false, NULL);
+       if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, &nht_p,
+                                    &rp_info->group, 1)) {
+               route_unlock_node(rn);
+               return PIM_RP_NO_PATH;
+       }
+
+       pim_rp_check_interfaces(pim, rp_info);
+
+       route_unlock_node(rn);
+
+       pim_rp_refresh_group_to_rp_mapping(pim);
+
+       return result;
+}
+
 void pim_rp_setup(struct pim_instance *pim)
 {
        struct listnode *node;
@@ -819,7 +1003,8 @@ void pim_rp_setup(struct pim_instance *pim)
                nht_p.prefixlen = IPV4_MAX_BITLEN;
                nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
 
-               pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
+               pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, false,
+                                         NULL);
                if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
                                             &nht_p, &rp_info->group, 1))
                        if (PIM_DEBUG_PIM_NHT_RP)
@@ -969,8 +1154,8 @@ struct pim_rpf *pim_rp_g(struct pim_instance *pim, struct in_addr group)
                                "%s: NHT Register RP addr %s grp %s with Zebra",
                                __PRETTY_FUNCTION__, buf, buf1);
                }
-
-               pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
+               pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, false,
+                                         NULL);
                pim_rpf_set_refresh_time(pim);
                (void)pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
                                              &nht_p, &rp_info->group, 1);
@@ -1030,6 +1215,9 @@ int pim_rp_config_write(struct pim_instance *pim, struct vty *vty,
                if (pim_rpf_addr_is_inaddr_none(&rp_info->rp))
                        continue;
 
+               if (rp_info->rp_src == RP_SRC_BSR)
+                       continue;
+
                if (rp_info->plist)
                        vty_out(vty, "%sip pim rp %s prefix-list %s\n", spaces,
                                inet_ntop(AF_INET,
@@ -1062,6 +1250,7 @@ void pim_rp_show_information(struct pim_instance *pim, struct vty *vty, bool uj)
        struct rp_info *rp_info;
        struct rp_info *prev_rp_info = NULL;
        struct listnode *node;
+       char source[7];
 
        json_object *json = NULL;
        json_object *json_rp_rows = NULL;
@@ -1071,12 +1260,17 @@ void pim_rp_show_information(struct pim_instance *pim, struct vty *vty, bool uj)
                json = json_object_new_object();
        else
                vty_out(vty,
-                       "RP address       group/prefix-list   OIF               I am RP\n");
-
+                       "RP address       group/prefix-list   OIF         I am RP     Source\n");
        for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
                if (!pim_rpf_addr_is_inaddr_none(&rp_info->rp)) {
                        char buf[48];
 
+                       if (rp_info->rp_src == RP_SRC_STATIC)
+                               strcpy(source, "Static");
+                       else if (rp_info->rp_src == RP_SRC_BSR)
+                               strcpy(source, "BSR");
+                       else
+                               strcpy(source, "None");
                        if (uj) {
                                /*
                                 * If we have moved on to a new RP then add the
@@ -1119,6 +1313,8 @@ void pim_rp_show_information(struct pim_instance *pim, struct vty *vty, bool uj)
                                                json_row, "group",
                                                prefix2str(&rp_info->group, buf,
                                                           48));
+                               json_object_string_add(json_row, "source",
+                                                      source);
 
                                json_object_array_add(json_rp_rows, json_row);
                        } else {
@@ -1143,9 +1339,10 @@ void pim_rp_show_information(struct pim_instance *pim, struct vty *vty, bool uj)
                                if (rp_info->i_am_rp)
                                        vty_out(vty, "yes\n");
                                else
-                                       vty_out(vty, "no\n");
-                       }
+                                       vty_out(vty, "no");
 
+                               vty_out(vty, "%14s\n", source);
+                       }
                        prev_rp_info = rp_info;
                }
        }
@@ -1180,7 +1377,7 @@ void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
                nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
                memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
                if (!pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info,
-                                              &pnc))
+                                              false, &pnc))
                        continue;
 
                for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) {
index 402ec30aba270167bdc4500dcbe81e5a54eba8dc..6dc26c07a95ca63e362a7a28f6ef3c26a883af2f 100644 (file)
 #include "pim_iface.h"
 #include "pim_rpf.h"
 
+enum rp_source {
+       RP_SRC_NONE = 0,
+       RP_SRC_STATIC,
+       RP_SRC_BSR
+};
+
 struct rp_info {
        struct prefix group;
        struct pim_rpf rp;
+       enum rp_source rp_src;
        int i_am_rp;
        char *plist;
 };
@@ -39,10 +46,18 @@ void pim_rp_free(struct pim_instance *pim);
 
 void pim_rp_list_hash_clean(void *data);
 
-int pim_rp_new(struct pim_instance *pim, const char *rp, const char *group,
-              const char *plist);
-int pim_rp_del(struct pim_instance *pim, const char *rp, const char *group,
-              const char *plist);
+int pim_rp_new_config(struct pim_instance *pim, const char *rp,
+                     const char *group, const char *plist);
+int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr,
+              struct prefix group, const char *plist,
+              enum rp_source rp_src_flag);
+int pim_rp_del_config(struct pim_instance *pim, const char *rp,
+                     const char *group, const char *plist);
+int pim_rp_del(struct pim_instance *pim, struct in_addr rp_addr,
+              struct prefix group, const char *plist,
+              enum rp_source rp_src_flag);
+int pim_rp_change(struct pim_instance *pim, struct in_addr new_rp_addr,
+                 struct prefix group, enum rp_source rp_src_flag);
 void pim_rp_prefix_list_update(struct pim_instance *pim,
                               struct prefix_list *plist);
 
index dba46e63f03a3006cdb1d7b8adc675131a98267e..d388802454730431e9681e6c22e03036c6300658 100644 (file)
@@ -238,7 +238,7 @@ enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,
        if ((up->sg.src.s_addr == INADDR_ANY && I_am_RP(pim, up->sg.grp)) ||
            PIM_UPSTREAM_FLAG_TEST_FHR(up->flags))
                neigh_needed = FALSE;
-       pim_find_or_track_nexthop(pim, &nht_p, up, NULL, NULL);
+       pim_find_or_track_nexthop(pim, &nht_p, up, NULL, false, NULL);
        if (!pim_ecmp_nexthop_lookup(pim, &rpf->source_nexthop, &src, &grp,
                                     neigh_needed))
                return PIM_RPF_FAILURE;
index 55d998f27084bdf2e68ab7e9835c351a2fcc1dcc..a823962b23269c2aedc5889f0a9e7e5eeb81b6bd 100644 (file)
@@ -237,7 +237,7 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
                        zlog_debug("%s: Deregister upstream %s addr %s with Zebra NHT",
                                   __PRETTY_FUNCTION__, up->sg_str, buf);
                }
-               pim_delete_tracked_nexthop(pim, &nht_p, up, NULL);
+               pim_delete_tracked_nexthop(pim, &nht_p, up, NULL, false);
        }
 
        XFREE(MTYPE_PIM_UPSTREAM, up);
index 2654ebc588b6b93a1397a7e4755bc8a6cb2ce42d..8d40f85132acb78fe150578b6413e00e6b4dc59d 100644 (file)
@@ -39,6 +39,7 @@
 #include "pim_msdp.h"
 #include "pim_ssm.h"
 #include "pim_bfd.h"
+#include "pim_bsm.h"
 #include "pim_vxlan.h"
 
 int pim_debug_config_write(struct vty *vty)
@@ -120,6 +121,11 @@ int pim_debug_config_write(struct vty *vty)
                ++writes;
        }
 
+       if (PIM_DEBUG_BSM) {
+               vty_out(vty, "debug pim bsm\n");
+               ++writes;
+       }
+
        if (PIM_DEBUG_VXLAN) {
                vty_out(vty, "debug pim vxlan\n");
                ++writes;
@@ -383,7 +389,10 @@ int pim_interface_config_write(struct vty *vty)
 
                                writes +=
                                        pim_static_write_mroute(pim, vty, ifp);
+                               pim_bsm_write_config(vty, ifp);
+                               ++writes;
                                pim_bfd_write_config(vty, ifp);
+                               ++writes;
                        }
                        vty_endframe(vty, "!\n");
                        ++writes;
index 1c6b56568f62202fca1803966b04012f881d84da..09669e206eafea4316d580792c68a56ece57e282 100644 (file)
@@ -344,7 +344,7 @@ static void pim_vxlan_orig_mr_up_add(struct pim_vxlan_sg *vxlan_sg)
                        nht_p.prefixlen = IPV4_MAX_BITLEN;
                        nht_p.u.prefix4 = up->upstream_addr;
                        pim_delete_tracked_nexthop(vxlan_sg->pim,
-                               &nht_p, up, NULL);
+                               &nht_p, up, NULL, false);
                }
                pim_upstream_ref(up, flags, __PRETTY_FUNCTION__);
                vxlan_sg->up = up;
index 2f2a870371397bb084b0733e23b64ddbed9c32d5..cdeaed39773eb40b62f546a584d32a735b2ee0e0 100644 (file)
 #define PIM_MASK_PIM_NHT_RP          (1 << 24)
 #define PIM_MASK_MTRACE              (1 << 25)
 #define PIM_MASK_VXLAN               (1 << 26)
+#define PIM_MASK_BSM_PROC            (1 << 27)
 /* Remember 32 bits!!! */
 
 /* PIM error codes */
@@ -182,11 +183,12 @@ extern uint8_t qpim_ecmp_rebalance_enable;
 #define PIM_DEBUG_PIM_NHT_RP (router->debugs & PIM_MASK_PIM_NHT_RP)
 #define PIM_DEBUG_MTRACE (router->debugs & PIM_MASK_MTRACE)
 #define PIM_DEBUG_VXLAN (router->debugs & PIM_MASK_VXLAN)
+#define PIM_DEBUG_BSM  (router->debugs & PIM_MASK_BSM_PROC)
 
 #define PIM_DEBUG_EVENTS                                                       \
        (router->debugs                                                        \
         & (PIM_MASK_PIM_EVENTS | PIM_MASK_IGMP_EVENTS                         \
-           | PIM_MASK_MSDP_EVENTS))
+           | PIM_MASK_MSDP_EVENTS | PIM_MASK_BSM_PROC))
 #define PIM_DEBUG_PACKETS                                                      \
        (router->debugs                                                        \
         & (PIM_MASK_PIM_PACKETS | PIM_MASK_IGMP_PACKETS                       \
@@ -212,6 +214,7 @@ extern uint8_t qpim_ecmp_rebalance_enable;
 #define PIM_DO_DEBUG_SSMPINGD (router->debugs |= PIM_MASK_SSMPINGD)
 #define PIM_DO_DEBUG_MROUTE (router->debugs |= PIM_MASK_MROUTE)
 #define PIM_DO_DEBUG_MROUTE_DETAIL (router->debugs |= PIM_MASK_MROUTE_DETAIL)
+#define PIM_DO_DEBUG_BSM (router->debugs |= PIM_MASK_BSM_PROC)
 #define PIM_DO_DEBUG_PIM_HELLO (router->debugs |= PIM_MASK_PIM_HELLO)
 #define PIM_DO_DEBUG_PIM_J_P (router->debugs |= PIM_MASK_PIM_J_P)
 #define PIM_DO_DEBUG_PIM_REG (router->debugs |= PIM_MASK_PIM_REG)
@@ -253,6 +256,7 @@ extern uint8_t qpim_ecmp_rebalance_enable;
 #define PIM_DONT_DEBUG_PIM_NHT_RP (router->debugs &= ~PIM_MASK_PIM_NHT_RP)
 #define PIM_DONT_DEBUG_MTRACE (router->debugs &= ~PIM_MASK_MTRACE)
 #define PIM_DONT_DEBUG_VXLAN (router->debugs &= ~PIM_MASK_VXLAN)
+#define PIM_DONT_DEBUG_BSM (router->debugs &= ~PIM_MASK_BSM_PROC)
 
 void pim_router_init(void);
 void pim_router_terminate(void);
index 7f4810722b858267f139c8dc5b2f0a5ae5305aa9..240b62804fb7a2fea15ba5b8769ca93429ef37d3 100644 (file)
@@ -17,6 +17,7 @@ pimd_libpim_a_SOURCES = \
        pimd/pim_assert.c \
        pimd/pim_bfd.c \
        pimd/pim_br.c \
+       pimd/pim_bsm.c \
        pimd/pim_cmd.c \
        pimd/pim_errors.c \
        pimd/pim_hello.c \
@@ -68,6 +69,7 @@ noinst_HEADERS += \
        pimd/pim_assert.h \
        pimd/pim_bfd.h \
        pimd/pim_br.h \
+       pimd/pim_bsm.h \
        pimd/pim_cmd.h \
        pimd/pim_errors.h \
        pimd/pim_hello.h \
index 7c4284845183256709ed42dad2cea83ee422ab8a..e43f63f2d9042253a235d191374b46f5616c5405 100644 (file)
@@ -92,6 +92,14 @@ All the commands are prefixed with frr.
     frr.ripngd-debug
     frr.ldp-debug
     frr.zebra-debug
+    frr.pimd-debug
+    frr.nhrpd-debug
+    frr.babeld-debug
+    frr.eigrpd-debug
+    frr.pbrd-debug
+    frr.staticd-debug
+    frr.bfdd-debug
+    frr.fabricd-debug
 
 vtysh can be accessed as frr.vtysh (Make sure you have /snap/bin in your
 path). If access as `vtysh` instead of `frr.vtysh` is needed, you can enable it
index a7b51a5656f8603e906f0e2e8f01ad493dd4aa8d..6a0864c8c5f2ac15f62bb380aae40ad85477d06d 100644 (file)
@@ -66,6 +66,8 @@ depend on them). These are mainly intended to debug the Snap
         Starts staticd daemon in foreground
 - `frr.bfdd-debug`:
         Starts bfdd daemon in foreground
+- `frr.fabricd-debug`:
+        Starts fabricd daemon in foreground
 
 MPLS (LDP)
 ----------
diff --git a/snapcraft/defaults/fabricd.conf.default b/snapcraft/defaults/fabricd.conf.default
new file mode 100644 (file)
index 0000000..e69de29
index e3a7708f23043d59454fc3862375f84ed9f3ddb7..7ddb7f0769fe12c70bc70582dfa7d1534d550129 100644 (file)
@@ -17,6 +17,7 @@ install:
        install -D -m 0755 pbrd-service $(DESTDIR)/bin/
        install -D -m 0755 staticd-service $(DESTDIR)/bin/
        install -D -m 0755 bfdd-service $(DESTDIR)/bin/
+       install -D -m 0755 fabricd-service $(DESTDIR)/bin/
        install -D -m 0755 set-options $(DESTDIR)/bin/
        install -D -m 0755 show_version $(DESTDIR)/bin/
 
index 6c3a6f595955a913b54028ce5d90037e465b6037..64273d9f80cb83070941d7f4c3318d7b023d519c 100644 (file)
@@ -10,7 +10,7 @@ fi
 if ! [ -e $SNAP_DATA/rpki.conf ]; then
     echo "-M rpki" > $SNAP_DATA/rpki.conf
 fi
-EXTRA_OPTIONS="`cat $SNAP_DATA/rpki.conf`"
+EXTRA_OPTIONS="`$SNAP/bin/cat $SNAP_DATA/rpki.conf`"
 exec $SNAP/sbin/bgpd \
     -f $SNAP_DATA/bgpd.conf \
     --pid_file $SNAP_DATA/bgpd.pid \
diff --git a/snapcraft/scripts/fabricd-service b/snapcraft/scripts/fabricd-service
new file mode 100644 (file)
index 0000000..586f061
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e -x
+
+if ! [ -e $SNAP_DATA/fabricd.conf ]; then
+    cp $SNAP/etc/frr/fabricd.conf.default $SNAP_DATA/fabricd.conf
+fi
+exec $SNAP/sbin/fabricd \
+    -f $SNAP_DATA/fabricd.conf \
+    --pid_file $SNAP_DATA/fabricd.pid \
+    --socket $SNAP_DATA/zsock \
+    --vty_socket $SNAP_DATA
+
index b70d6efee2c094dba976ad2a841c0faf240301f6..d8071e8cfee596c6572759be050331b130fd9e3c 100644 (file)
@@ -4,8 +4,8 @@ summary: FRRouting BGP/OSPFv2/OSPFv3/ISIS/RIP/RIPng/PIM/LDP/EIGRP/BFD routing da
 description: BGP/OSPFv2/OSPFv3/ISIS/RIP/RIPng/PIM/LDP/EIGRP/BFD routing daemon
  FRRouting (FRR) is free software which manages TCP/IP based routing 
  protocols. It supports BGP4, BGP4+, OSPFv2, OSPFv3, IS-IS, RIPv1, RIPv2, 
- RIPng, PIM, LDP, Babel, EIGRP, PBR (Policy-based routing) and BFD as well as
- the IPv6 versions of these.
+ RIPng, PIM, LDP, Babel, EIGRP, PBR (Policy-based routing), BFD and OpenFabric
as well as the IPv6 versions of these.
  FRRouting (frr) is a fork of Quagga.
 confinement: strict
 grade: devel
@@ -127,6 +127,13 @@ apps:
             - network
             - network-bind
             - network-control
+    fabricd:
+        command: bin/fabricd-service
+        daemon: simple
+        plugs:
+            - network
+            - network-bind
+            - network-control
     set:
         command: bin/set-options
     zebra-debug:
@@ -136,7 +143,7 @@ apps:
             - network-bind
             - network-control
     bgpd-debug:
-        command: sbin/bgpd -f $SNAP_DATA/bgpd.conf --pid_file $SNAP_DATA/bgpd.pid --socket $SNAP_DATA/zsock --vty_socket $SNAP_DATA --moduledir $SNAP/lib/frr/modules `cat $SNAP_DATA/rpki.conf 2> /dev/null`
+        command: sbin/bgpd -f $SNAP_DATA/bgpd.conf --pid_file $SNAP_DATA/bgpd.pid --socket $SNAP_DATA/zsock --vty_socket $SNAP_DATA --moduledir $SNAP/lib/frr/modules
         plugs:
             - network
             - network-bind
@@ -219,6 +226,12 @@ apps:
             - network
             - network-bind
             - network-control
+    fabricd-debug:
+        command: sbin/fabricd -f $SNAP_DATA/fabricd.conf --pid_file $SNAP_DATA/fabricd.pid --socket $SNAP_DATA/zsock --vty_socket $SNAP_DATA
+        plugs:
+            - network
+            - network-bind
+            - network-control
 
 parts:
     rtrlib:
@@ -230,16 +243,33 @@ parts:
         stage-packages:
            - libssh-4
         prime:
-           - lib/x86_64-linux-gnu/librtr.so*
+           - lib/librtr.so*
            - usr/lib/x86_64-linux-gnu/libssh.so*
         source: https://github.com/rtrlib/rtrlib.git
         source-type: git
-        source-tag: v0.5.0
+        source-tag: v0.6.3
         plugin: cmake
         configflags:
            - -DCMAKE_BUILD_TYPE=Release
+    libyang:
+        build-packages:
+           - cmake
+           - make
+           - gcc
+           - libpcre3-dev
+        stage-packages:
+           - libpcre3
+        source: https://github.com/CESNET/libyang.git
+        source-type: git
+        source-tag: v0.16-r3
+        plugin: cmake
+        configflags:
+           - -DCMAKE_INSTALL_PREFIX:PATH=/usr
+           - -DENABLE_LYD_PRIV=ON
+           - -DENABLE_CACHE=OFF
+           - -DCMAKE_BUILD_TYPE:String="Release"
     frr: 
-        after: [rtrlib]
+        after: [rtrlib,libyang]
         build-packages:
            - gcc
            - autoconf
@@ -272,13 +302,13 @@ parts:
            - iproute2
            - logrotate
            - libcap2
-           - libc6
            - libtinfo5
            - libreadline6
            - libjson-c2
            - libc-ares2
            - libatm1
            - libprotobuf-c1
+           - libdb5.3
         plugin: autotools
         source: ../frr-@PACKAGE_VERSION@.tar.gz
         configflags:
@@ -322,7 +352,9 @@ parts:
             eigrpd.conf.default: etc/frr/eigrpd.conf.default
             pbrd.conf.default: etc/frr/pbrd.conf.default
             bfdd.conf.default: etc/frr/bfdd.conf.default
+            fabricd.conf.default: etc/frr/fabricd.conf.default
             vtysh.conf.default: etc/frr/vtysh.conf.default
+            staticd.conf.default: etc/frr/staticd.conf.default
     frr-scripts:
         plugin: make
         source: scripts
@@ -344,3 +376,8 @@ parts:
             README.snap_build.md: doc/README.snap_build.md
             extra_version_info.txt: doc/extra_version_info.txt
 
+passthrough:
+    layout:
+         /usr/lib/x86_64-linux-gnu/libyang:
+             bind: $SNAP/usr/lib/x86_64-linux-gnu/libyang
+
diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/__init__.py b/tests/topotests/bgp_show_ip_bgp_fqdn/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/r1/bgpd.conf b/tests/topotests/bgp_show_ip_bgp_fqdn/r1/bgpd.conf
new file mode 100644 (file)
index 0000000..235b42b
--- /dev/null
@@ -0,0 +1,4 @@
+router bgp 65000
+  neighbor 192.168.255.2 remote-as 65001
+  address-family ipv4 unicast
+    redistribute connected
diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/r1/zebra.conf b/tests/topotests/bgp_show_ip_bgp_fqdn/r1/zebra.conf
new file mode 100644 (file)
index 0000000..0a283c0
--- /dev/null
@@ -0,0 +1,9 @@
+!
+interface lo
+ ip address 172.16.255.254/32
+!
+interface r1-eth0
+ ip address 192.168.255.1/24
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/r2/bgpd.conf b/tests/topotests/bgp_show_ip_bgp_fqdn/r2/bgpd.conf
new file mode 100644 (file)
index 0000000..c05bfd5
--- /dev/null
@@ -0,0 +1,5 @@
+router bgp 65001
+  bgp default show-hostname
+  neighbor 192.168.255.1 remote-as 65000
+  address-family ipv4 unicast
+    redistribute connected
diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/r2/zebra.conf b/tests/topotests/bgp_show_ip_bgp_fqdn/r2/zebra.conf
new file mode 100644 (file)
index 0000000..5abba71
--- /dev/null
@@ -0,0 +1,9 @@
+!
+interface lo
+ ip address 172.16.255.253/32
+!
+interface r2-eth0
+ ip address 192.168.255.2/24
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/test_bgp_show_ip_bgp_fqdn.py b/tests/topotests/bgp_show_ip_bgp_fqdn/test_bgp_show_ip_bgp_fqdn.py
new file mode 100644 (file)
index 0000000..59ffd36
--- /dev/null
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+
+#
+# test_bgp_show_ip_bgp_fqdn.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2019 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_bgp_show_ip_bgp_fqdn.py:
+Test if FQND is visible in `show [ip] bgp` output if
+`bgp default show-hostname` is toggled.
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from mininet.topo import Topo
+
+class TemplateTopo(Topo):
+    def build(self, *_args, **_opts):
+        tgen = get_topogen(self)
+
+        for routern in range(1, 3):
+            tgen.add_router('r{}'.format(routern))
+
+        switch = tgen.add_switch('s1')
+        switch.add_link(tgen.gears['r1'])
+        switch.add_link(tgen.gears['r2'])
+
+def setup_module(mod):
+    tgen = Topogen(TemplateTopo, mod.__name__)
+    tgen.start_topology()
+
+    router_list = tgen.routers()
+
+    for i, (rname, router) in enumerate(router_list.iteritems(), 1):
+        router.load_config(
+            TopoRouter.RD_ZEBRA,
+            os.path.join(CWD, '{}/zebra.conf'.format(rname))
+        )
+        router.load_config(
+            TopoRouter.RD_BGP,
+            os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+        )
+
+    tgen.start_router()
+
+def teardown_module(mod):
+    tgen = get_topogen()
+    tgen.stop_topology()
+
+def test_bgp_maximum_prefix_invalid():
+    tgen = get_topogen()
+
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    def _bgp_converge(router, neighbor):
+        cmd = "show ip bgp neighbor {0} json".format(neighbor)
+        while True:
+            output = json.loads(tgen.gears[router].vtysh_cmd(cmd))
+            if output[neighbor]['bgpState'] == 'Established':
+                time.sleep(3)
+                return True
+
+    def _bgp_show_nexthop(router, prefix):
+        cmd = "show ip bgp json"
+        output = json.loads(tgen.gears[router].vtysh_cmd(cmd))
+        for nh in output['routes'][prefix][0]['nexthops']:
+            if 'fqdn' in nh:
+                return 'fqdn'
+        return 'ip'
+
+    if _bgp_converge('r2', '192.168.255.1'):
+        assert _bgp_show_nexthop('r2', '172.16.255.254/32') == 'fqdn'
+
+    if _bgp_converge('r1', '192.168.255.2'):
+        assert _bgp_show_nexthop('r1', '172.16.255.253/32') == 'ip'
+
+if __name__ == '__main__':
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
index 391917ec681f66d2d8f9f9327f856a8a31a77b6c..b31b6a12508b491073e7a61694bdd4042a6670f4 100644 (file)
@@ -2855,7 +2855,11 @@ void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type,
                                break;
                        }
                        for (ALL_NEXTHOPS(re->ng, rtnh))
-                               if (nexthop_same_no_recurse(rtnh, nh)) {
+                               /*
+                                * No guarantee all kernel send nh with labels
+                                * on delete.
+                                */
+                               if (nexthop_same_no_labels(rtnh, nh)) {
                                        same = re;
                                        break;
                                }