* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*/
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
#include "if.h"
#include "pimd.h"
#include "pim_iface.h"
DEFINE_MTYPE_STATIC(PIMD, PIM_BSM_INFO, "PIM BSM Info")
DEFINE_MTYPE_STATIC(PIMD, PIM_BSM_PKT_VAR_MEM, "PIM BSM Packet")
+/* All bsm packets forwarded shall be fit within ip mtu less iphdr(max) */
+#define MAX_IP_HDR_LEN 24
+
/* pim_bsm_write_config - Write the interface pim bsm configuration.*/
void pim_bsm_write_config(struct vty *vty, struct interface *ifp)
{
}
bsgrp = XCALLOC(MTYPE_PIM_BSGRP_NODE, sizeof(struct bsgrp_node));
- if (!bsgrp) {
- if (PIM_DEBUG_BSM)
- zlog_debug("%s: bsgrp alloc failed",
- __PRETTY_FUNCTION__);
- route_unlock_node(rn);
- return NULL;
- }
-
rn->info = bsgrp;
bsgrp->bsrp_list = pim_alloc_bsrp_list();
bsgrp->partial_bsrp_list = pim_alloc_bsrp_list();
pim_free_bsgrp_data(bsgrp);
}
- if (pim->global_scope.bsrp_table)
- route_table_finish(pim->global_scope.bsrp_table);
+ route_table_finish(pim->global_scope.bsrp_table);
}
static bool is_hold_time_elapsed(void *data)
pim_g2rp_timer_start(bsrp, hold_time);
}
+static void pim_g2rp_timer_stop(struct bsm_rpinfo *bsrp)
+{
+ if (!bsrp)
+ return;
+
+ if (PIM_DEBUG_BSM) {
+ char buf[48];
+
+ zlog_debug("%s : stopping g2rp timer for grp: %s - rp: %s",
+ __PRETTY_FUNCTION__,
+ prefix2str(&bsrp->bsgrp_node->group, buf, 48),
+ inet_ntoa(bsrp->rp_address));
+ }
+
+ THREAD_OFF(bsrp->g2rp_timer);
+}
+
+static bool is_hold_time_zero(void *data)
+{
+ struct bsm_rpinfo *bsrp;
+
+ bsrp = data;
+
+ if (bsrp->rp_holdtime)
+ return false;
+ else
+ return true;
+}
+
+static void pim_instate_pend_list(struct bsgrp_node *bsgrp_node)
+{
+ struct bsm_rpinfo *active;
+ struct bsm_rpinfo *pend;
+ struct list *temp;
+ struct rp_info *rp_info;
+ struct route_node *rn;
+ struct pim_instance *pim;
+ struct rp_info *rp_all;
+ struct prefix group_all;
+ bool had_rp_node = true;
+
+ pim = bsgrp_node->scope->pim;
+ active = listnode_head(bsgrp_node->bsrp_list);
+
+ /* Remove nodes with hold time 0 & check if list still has a head */
+ list_filter_out_nodes(bsgrp_node->partial_bsrp_list, is_hold_time_zero);
+ pend = listnode_head(bsgrp_node->partial_bsrp_list);
+
+ if (!str2prefix("224.0.0.0/4", &group_all))
+ return;
+
+ rp_all = pim_rp_find_match_group(pim, &group_all);
+ rn = route_node_lookup(pim->rp_table, &bsgrp_node->group);
+
+ if (pend)
+ pim_g2rp_timer_start(pend, pend->rp_holdtime);
+
+ /* if rp node doesn't exist or exist but not configured(rp_all),
+ * install the rp from head(if exists) of partial list. List is
+ * is sorted such that head is the elected RP for the group.
+ */
+ if (!rn || (prefix_same(&rp_all->group, &bsgrp_node->group)
+ && pim_rpf_addr_is_inaddr_none(&rp_all->rp))) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Route node doesn't exist",
+ __PRETTY_FUNCTION__);
+ if (pend)
+ pim_rp_new(pim, pend->rp_address, bsgrp_node->group,
+ NULL, RP_SRC_BSR);
+ had_rp_node = false;
+ } else {
+ rp_info = (struct rp_info *)rn->info;
+ if (!rp_info) {
+ route_unlock_node(rn);
+ if (pend)
+ pim_rp_new(pim, pend->rp_address,
+ bsgrp_node->group, NULL, RP_SRC_BSR);
+ had_rp_node = false;
+ }
+ }
+
+ /* We didn't have rp node and pending list is empty(unlikely), cleanup*/
+ if ((!had_rp_node) && (!pend)) {
+ pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+ &bsgrp_node->group);
+ pim_free_bsgrp_data(bsgrp_node);
+ return;
+ }
+
+ if ((had_rp_node) && (rp_info->rp_src != RP_SRC_STATIC)) {
+ /* This means we searched and got rp node, needs unlock */
+ route_unlock_node(rn);
+
+ if (active && pend) {
+ if ((active->rp_address.s_addr
+ != pend->rp_address.s_addr))
+ pim_rp_change(pim, pend->rp_address,
+ bsgrp_node->group, RP_SRC_BSR);
+ }
+
+ /* Possible when the first BSM has group with 0 rp count */
+ if ((!active) && (!pend)) {
+ if (PIM_DEBUG_BSM) {
+ zlog_debug(
+ "%s: Both bsrp and partial list are empty",
+ __PRETTY_FUNCTION__);
+ }
+ pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+ &bsgrp_node->group);
+ pim_free_bsgrp_data(bsgrp_node);
+ return;
+ }
+
+ /* Possible when a group with 0 rp count received in BSM */
+ if ((active) && (!pend)) {
+ pim_rp_del(pim, active->rp_address, bsgrp_node->group,
+ NULL, RP_SRC_BSR);
+ pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+ &bsgrp_node->group);
+ if (PIM_DEBUG_BSM) {
+ zlog_debug("%s:Pend List is null,del grp node",
+ __PRETTY_FUNCTION__);
+ }
+ pim_free_bsgrp_data(bsgrp_node);
+ return;
+ }
+ }
+
+ if ((had_rp_node) && (rp_info->rp_src == RP_SRC_STATIC)) {
+ /* We need to unlock rn this case */
+ route_unlock_node(rn);
+ /* there is a chance that static rp exist and bsrp cleaned
+ * so clean bsgrp node if pending list empty
+ */
+ if (!pend) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: Partial list is empty, static rp exists",
+ __PRETTY_FUNCTION__);
+ pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+ &bsgrp_node->group);
+ pim_free_bsgrp_data(bsgrp_node);
+ return;
+ }
+ }
+
+ /* swap the list & delete all nodes in partial list (old bsrp_list)
+ * before swap
+ * active is head of bsrp list
+ * pend is head of partial list
+ * After swap
+ * active is head of partial list
+ * pend is head of bsrp list
+ * So check appriate head after swap and clean the new partial list
+ */
+ temp = bsgrp_node->bsrp_list;
+ bsgrp_node->bsrp_list = bsgrp_node->partial_bsrp_list;
+ bsgrp_node->partial_bsrp_list = temp;
+
+ if (active) {
+ pim_g2rp_timer_stop(active);
+ list_delete_all_node(bsgrp_node->partial_bsrp_list);
+ }
+}
+
static bool pim_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr,
struct in_addr ip_src_addr)
{
pim->global_scope.current_bsr_last_ts = pim_time_monotonic_sec();
}
+static bool pim_bsm_send_intf(uint8_t *buf, int len, struct interface *ifp,
+ struct in_addr dst_addr)
+{
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Pim interface not available for %s",
+ __PRETTY_FUNCTION__, ifp->name);
+ return false;
+ }
+
+ if (pim_ifp->pim_sock_fd == -1) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Pim sock not available for %s",
+ __PRETTY_FUNCTION__, ifp->name);
+ return false;
+ }
+
+ if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
+ dst_addr, buf, len, ifp->name)) {
+ zlog_warn("%s: Could not send BSM message on interface: %s",
+ __PRETTY_FUNCTION__, ifp->name);
+ return false;
+ }
+
+ pim_ifp->pim_ifstat_bsm_tx++;
+ pim_ifp->pim->bsm_sent++;
+ return true;
+}
+
+static bool pim_bsm_frag_send(uint8_t *buf, uint32_t len, struct interface *ifp,
+ uint32_t pim_mtu, struct in_addr dst_addr,
+ bool no_fwd)
+{
+ struct bsmmsg_grpinfo *grpinfo, *curgrp;
+ uint8_t *firstgrp_ptr;
+ uint8_t *pkt;
+ uint8_t *pak_start;
+ uint32_t parsed_len = 0;
+ uint32_t this_pkt_rem;
+ uint32_t copy_byte_count;
+ uint32_t this_pkt_len;
+ uint8_t total_rp_cnt;
+ uint8_t this_rp_cnt;
+ uint8_t frag_rp_cnt;
+ uint8_t rp_fit_cnt;
+ bool pak_pending = false;
+
+ /* MTU passed here is PIM MTU (IP MTU less IP Hdr) */
+ if (pim_mtu < (PIM_MIN_BSM_LEN)) {
+ zlog_warn(
+ "%s: mtu(pim mtu: %d) size less than minimum bootstrap len",
+ __PRETTY_FUNCTION__, pim_mtu);
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: mtu (pim mtu:%d) less than minimum bootstrap len",
+ __PRETTY_FUNCTION__, pim_mtu);
+ return false;
+ }
+
+ pak_start = XCALLOC(MTYPE_PIM_BSM_PKT_VAR_MEM, pim_mtu);
+
+ pkt = pak_start;
+
+ /* Fill PIM header later before sending packet to calc checksum */
+ pkt += PIM_MSG_HEADER_LEN;
+ buf += PIM_MSG_HEADER_LEN;
+
+ /* copy bsm header to new packet at offset of pim hdr */
+ memcpy(pkt, buf, PIM_BSM_HDR_LEN);
+ pkt += PIM_BSM_HDR_LEN;
+ buf += PIM_BSM_HDR_LEN;
+ parsed_len += (PIM_MSG_HEADER_LEN + PIM_BSM_HDR_LEN);
+
+ /* Store the position of first grp ptr, which can be reused for
+ * next packet to start filling group. old bsm header and pim hdr
+ * remains. So need not be filled again for next packet onwards.
+ */
+ firstgrp_ptr = pkt;
+
+ /* we received mtu excluding IP hdr len as param
+ * now this_pkt_rem is mtu excluding
+ * PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN
+ */
+ this_pkt_rem = pim_mtu - (PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN);
+
+ /* For each group till the packet length parsed */
+ while (parsed_len < len) {
+ /* pkt ---> fragment's current pointer
+ * buf ---> input buffer's current pointer
+ * mtu ---> size of the pim packet - PIM header
+ * curgrp ---> current group on the fragment
+ * grpinfo ---> current group on the input buffer
+ * this_pkt_rem ---> bytes remaing on the current fragment
+ * rp_fit_cnt ---> num of rp for current grp that
+ * fits this frag
+ * total_rp_cnt ---> total rp present for the group in the buf
+ * frag_rp_cnt ---> no of rp for the group to be fit in
+ * the frag
+ * this_rp_cnt ---> how many rp have we parsed
+ */
+ grpinfo = (struct bsmmsg_grpinfo *)buf;
+ memcpy(pkt, buf, PIM_BSM_GRP_LEN);
+ curgrp = (struct bsmmsg_grpinfo *)pkt;
+ parsed_len += PIM_BSM_GRP_LEN;
+ pkt += PIM_BSM_GRP_LEN;
+ buf += PIM_BSM_GRP_LEN;
+ this_pkt_rem -= PIM_BSM_GRP_LEN;
+
+ /* initialize rp count and total_rp_cnt before the rp loop */
+ this_rp_cnt = 0;
+ total_rp_cnt = grpinfo->frag_rp_count;
+
+ /* Loop till all RPs for the group parsed */
+ while (this_rp_cnt < total_rp_cnt) {
+ /* All RP from a group processed here.
+ * group is pointed by grpinfo.
+ * At this point make sure buf pointing to a RP
+ * within a group
+ */
+ rp_fit_cnt = this_pkt_rem / PIM_BSM_RP_LEN;
+
+ /* calculate how many rp am i going to copy in
+ * this frag
+ */
+ if (rp_fit_cnt > (total_rp_cnt - this_rp_cnt))
+ frag_rp_cnt = total_rp_cnt - this_rp_cnt;
+ else
+ frag_rp_cnt = rp_fit_cnt;
+
+ /* populate the frag rp count for the current grp */
+ curgrp->frag_rp_count = frag_rp_cnt;
+ copy_byte_count = frag_rp_cnt * PIM_BSM_RP_LEN;
+
+ /* copy all the rp that we are fitting in this
+ * frag for the grp
+ */
+ memcpy(pkt, buf, copy_byte_count);
+ this_rp_cnt += frag_rp_cnt;
+ buf += copy_byte_count;
+ pkt += copy_byte_count;
+ parsed_len += copy_byte_count;
+ this_pkt_rem -= copy_byte_count;
+
+ /* Either we couldn't fit all rp for the group or the
+ * mtu reached
+ */
+ if ((this_rp_cnt < total_rp_cnt)
+ || (this_pkt_rem
+ < (PIM_BSM_GRP_LEN + PIM_BSM_RP_LEN))) {
+ /* No space to fit in more rp, send this pkt */
+ this_pkt_len = pim_mtu - this_pkt_rem;
+ pim_msg_build_header(pak_start, this_pkt_len,
+ PIM_MSG_TYPE_BOOTSTRAP,
+ no_fwd);
+ pim_bsm_send_intf(pak_start, this_pkt_len, ifp,
+ dst_addr);
+
+ /* Construct next fragment. Reuse old packet */
+ pkt = firstgrp_ptr;
+ this_pkt_rem = pim_mtu - (PIM_BSM_HDR_LEN
+ + PIM_MSG_HEADER_LEN);
+
+ /* If pkt can't accomodate next group + atleast
+ * one rp, we must break out of this inner loop
+ * and process next RP
+ */
+ if (total_rp_cnt == this_rp_cnt)
+ break;
+
+ /* If some more RPs for the same group pending,
+ * fill grp hdr
+ */
+ memcpy(pkt, (uint8_t *)grpinfo,
+ PIM_BSM_GRP_LEN);
+ curgrp = (struct bsmmsg_grpinfo *)pkt;
+ pkt += PIM_BSM_GRP_LEN;
+ this_pkt_rem -= PIM_BSM_GRP_LEN;
+ pak_pending = false;
+ } else {
+ /* We filled something but not yet sent out */
+ pak_pending = true;
+ }
+ } /* while RP count */
+ } /*while parsed len */
+
+ /* Send if we have any unsent packet */
+ if (pak_pending) {
+ this_pkt_len = pim_mtu - this_pkt_rem;
+ pim_msg_build_header(pak_start, this_pkt_len,
+ PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
+ pim_bsm_send_intf(pak_start, (pim_mtu - this_pkt_rem), ifp,
+ dst_addr);
+ }
+ XFREE(MTYPE_PIM_BSM_PKT_VAR_MEM, pak_start);
+ return true;
+}
+
+static void pim_bsm_fwd_whole_sz(struct pim_instance *pim, uint8_t *buf,
+ uint32_t len, int sz)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ struct in_addr dst_addr;
+ uint32_t pim_mtu;
+ bool no_fwd = false;
+ bool ret = false;
+
+ /* For now only global scope zone is supported, so send on all
+ * pim interfaces in the vrf
+ */
+ dst_addr = qpim_all_pim_routers_addr;
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+ if ((!pim_ifp) || (!pim_ifp->bsm_enable))
+ continue;
+ pim_hello_require(ifp);
+ pim_mtu = ifp->mtu - MAX_IP_HDR_LEN;
+ if (pim_mtu < len) {
+ ret = pim_bsm_frag_send(buf, len, ifp, pim_mtu,
+ dst_addr, no_fwd);
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: pim_bsm_frag_send returned %s",
+ __PRETTY_FUNCTION__,
+ ret ? "TRUE" : "FALSE");
+ } else {
+ pim_msg_build_header(buf, len, PIM_MSG_TYPE_BOOTSTRAP,
+ no_fwd);
+ if (!pim_bsm_send_intf(buf, len, ifp, dst_addr)) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: pim_bsm_send_intf returned false",
+ __PRETTY_FUNCTION__);
+ }
+ }
+ }
+}
+
+bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)
+{
+ struct in_addr dst_addr;
+ struct pim_interface *pim_ifp;
+ struct bsm_scope *scope;
+ struct listnode *bsm_ln;
+ struct bsm_info *bsminfo;
+ char neigh_src_str[INET_ADDRSTRLEN];
+ uint32_t pim_mtu;
+ bool no_fwd = true;
+ bool ret = false;
+
+ if (PIM_DEBUG_BSM) {
+ pim_inet4_dump("<src?>", neigh->source_addr, neigh_src_str,
+ sizeof(neigh_src_str));
+ zlog_debug("%s: New neighbor %s seen on %s",
+ __PRETTY_FUNCTION__, neigh_src_str, ifp->name);
+ }
+
+ pim_ifp = ifp->info;
+
+ /* DR only forwards BSM packet */
+ if (pim_ifp->pim_dr_addr.s_addr == pim_ifp->primary_address.s_addr) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: It is not DR, so don't forward BSM packet",
+ __PRETTY_FUNCTION__);
+ }
+
+ if (!pim_ifp->bsm_enable) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: BSM proc not enabled on %s",
+ __PRETTY_FUNCTION__, ifp->name);
+ return ret;
+ }
+
+ scope = &pim_ifp->pim->global_scope;
+
+ if (!scope->bsm_list->count) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: BSM list for the scope is empty",
+ __PRETTY_FUNCTION__);
+ return ret;
+ }
+
+ if (!pim_ifp->ucast_bsm_accept) {
+ dst_addr = qpim_all_pim_routers_addr;
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Sending BSM mcast to %s",
+ __PRETTY_FUNCTION__, neigh_src_str);
+ } else {
+ dst_addr = neigh->source_addr;
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Sending BSM ucast to %s",
+ __PRETTY_FUNCTION__, neigh_src_str);
+ }
+ pim_mtu = ifp->mtu - MAX_IP_HDR_LEN;
+ pim_hello_require(ifp);
+
+ for (ALL_LIST_ELEMENTS_RO(scope->bsm_list, bsm_ln, bsminfo)) {
+ if (pim_mtu < bsminfo->size) {
+ ret = pim_bsm_frag_send(bsminfo->bsm, bsminfo->size,
+ ifp, pim_mtu, dst_addr, no_fwd);
+ if (!ret) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: pim_bsm_frag_send failed",
+ __PRETTY_FUNCTION__);
+ }
+ } else {
+ /* Pim header needs to be constructed */
+ pim_msg_build_header(bsminfo->bsm, bsminfo->size,
+ PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
+ ret = pim_bsm_send_intf(bsminfo->bsm, bsminfo->size,
+ ifp, dst_addr);
+ if (!ret) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: pim_bsm_frag_send failed",
+ __PRETTY_FUNCTION__);
+ }
+ }
+ }
+ return ret;
+}
+
struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
struct prefix *grp)
{
else
grpaddr = grpaddr & mask;
rp_add = ntohl(rp.s_addr);
- temp = 1103515245 * ((1103515245 * grpaddr + 12345) ^ rp_add) + 12345;
+ temp = 1103515245 * ((1103515245 * (uint64_t)grpaddr + 12345) ^ rp_add)
+ + 12345;
hash = temp & (0x7fffffff);
return hash;
}
/*memory allocation for bsm_rpinfo */
bsm_rpinfo = XCALLOC(MTYPE_PIM_BSRP_NODE, sizeof(*bsm_rpinfo));
- if (!bsm_rpinfo) {
- if (PIM_DEBUG_BSM)
- zlog_debug("%s, Memory allocation failed.\r\n",
- __PRETTY_FUNCTION__);
- return false;
- }
-
bsm_rpinfo->rp_prio = rp->rp_pri;
bsm_rpinfo->rp_holdtime = rp->rp_holdtime;
memcpy(&bsm_rpinfo->rp_address, &rp->rpaddr.addr,
int ins_count = 0;
while (buflen > offset) {
+ if (offset + (int)sizeof(struct bsmmsg_grpinfo) > buflen) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: buflen received %d is less than the internal data structure of the packet would suggest",
+ __PRETTY_FUNCTION__, buflen);
+ return false;
+ }
/* Extract Group tlv from BSM */
memcpy(&grpinfo, buf, sizeof(struct bsmmsg_grpinfo));
}
group.family = AF_INET;
+ if (grpinfo.group.mask > IPV4_MAX_BITLEN) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s, v4 prefix length specified: %d is too long",
+ __PRETTY_FUNCTION__, grpinfo.group.mask);
+ return false;
+ }
group.prefixlen = grpinfo.group.mask;
group.u.prefix4.s_addr = grpinfo.group.addr.s_addr;
ins_count = 0;
while (frag_rp_cnt--) {
+ if (offset + (int)sizeof(struct bsmmsg_rpinfo)
+ > buflen) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s, buflen received: %u is less than the internal data structure of the packet would suggest",
+ __PRETTY_FUNCTION__, buflen);
+ return false;
+ }
+
/* Extract RP address tlv from BSM */
memcpy(&rpinfo, buf, sizeof(struct bsmmsg_rpinfo));
rpinfo.rp_holdtime = ntohs(rpinfo.rp_holdtime);
zlog_debug(
"%s, Recvd all the rps for this group, so bsrp list with penidng rp list.",
__PRETTY_FUNCTION__);
- /* replace the bsrp_list with pending list - TODO */
+ /* replace the bsrp_list with pending list */
+ pim_instate_pend_list(bsgrp);
}
}
return true;
uint32_t buf_size, bool no_fwd)
{
struct bsm_hdr *bshdr;
+ int sz = PIM_GBL_SZ_ID;
struct bsmmsg_grpinfo *msg_grp;
struct pim_interface *pim_ifp = NULL;
+ struct bsm_info *bsminfo;
struct pim_instance *pim;
char bsr_str[INET_ADDRSTRLEN];
uint16_t frag_tag;
- bool empty_bsm = FALSE;
+ bool empty_bsm = false;
/* BSM Packet acceptance validation */
pim_ifp = ifp->info;
return -1;
}
+ if (buf_size < (PIM_MSG_HEADER_LEN + sizeof(struct bsm_hdr))) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: received buffer length of %d which is too small to properly decode",
+ __PRETTY_FUNCTION__, buf_size);
+ return -1;
+ }
+
bshdr = (struct bsm_hdr *)(buf + PIM_MSG_HEADER_LEN);
pim_inet4_dump("<bsr?>", bshdr->bsr_addr.addr, bsr_str,
sizeof(bsr_str));
/* update the scope information from bsm */
pim_bsm_update(pim, bshdr->bsr_addr.addr, bshdr->bsr_prio);
+
+ if (!no_fwd) {
+ pim_bsm_fwd_whole_sz(pim_ifp->pim, buf, buf_size, sz);
+ bsminfo = XCALLOC(MTYPE_PIM_BSM_INFO, sizeof(struct bsm_info));
+
+ bsminfo->bsm = XCALLOC(MTYPE_PIM_BSM_PKT_VAR_MEM, buf_size);
+
+ bsminfo->size = buf_size;
+ memcpy(bsminfo->bsm, buf, buf_size);
+ listnode_add(pim_ifp->pim->global_scope.bsm_list, bsminfo);
+ }
+
return 0;
}