continue;
for (oldnh = bnc->nexthop; oldnh; oldnh = oldnh->next)
- if (nexthop_same_no_recurse(oldnh, nexthop) &&
- nexthop_labels_match(oldnh, nexthop))
+ if (nexthop_same(oldnh, nexthop))
break;
if (!oldnh)
vty_out(vty, " ");
}
+static char *bgp_nexthop_fqdn(struct peer *peer)
+{
+ if (peer->hostname && bgp_flag_check(peer->bgp, BGP_FLAG_SHOW_HOSTNAME))
+ return peer->hostname;
+ return NULL;
+}
+
/* called from terminal list command */
void route_vty_out(struct vty *vty, struct prefix *p,
struct bgp_path_info *path, int display, safi_t safi,
bool nexthop_othervrf = false;
vrf_id_t nexthop_vrfid = VRF_DEFAULT;
const char *nexthop_vrfname = VRF_DEFAULT_NAME;
+ char *nexthop_fqdn = bgp_nexthop_fqdn(path->peer);
if (json_paths)
json_path = json_object_new_object();
if (json_paths) {
json_nexthop_global = json_object_new_object();
- json_object_string_add(json_nexthop_global, "afi",
- (af == AF_INET) ? "ip" : "ipv6");
- json_object_string_add(json_nexthop_global,
- (af == AF_INET) ? "ip" : "ipv6",
- nexthop);
+ json_object_string_add(
+ json_nexthop_global, "afi",
+ nexthop_fqdn ? "fqdn"
+ : (af == AF_INET) ? "ip" : "ipv6");
+ json_object_string_add(
+ json_nexthop_global,
+ nexthop_fqdn ? "fqdn"
+ : (af == AF_INET) ? "ip" : "ipv6",
+ nexthop_fqdn ? nexthop_fqdn : nexthop);
json_object_boolean_true_add(json_nexthop_global,
"used");
} else
- vty_out(vty, "%s%s", nexthop, vrf_id_str);
+ vty_out(vty, "%s%s",
+ nexthop_fqdn ? nexthop_fqdn : nexthop,
+ vrf_id_str);
} else if (safi == SAFI_EVPN) {
if (json_paths) {
json_nexthop_global = json_object_new_object();
- json_object_string_add(json_nexthop_global, "ip",
- inet_ntoa(attr->nexthop));
+ json_object_string_add(
+ json_nexthop_global,
+ nexthop_fqdn ? "fqdn" : "ip",
+ nexthop_fqdn ? nexthop_fqdn
+ : inet_ntoa(attr->nexthop));
json_object_string_add(json_nexthop_global, "afi",
"ipv4");
json_object_boolean_true_add(json_nexthop_global,
"used");
} else
- vty_out(vty, "%-16s%s", inet_ntoa(attr->nexthop),
+ vty_out(vty, "%-16s%s",
+ nexthop_fqdn ?: inet_ntoa(attr->nexthop),
vrf_id_str);
} else if (safi == SAFI_FLOWSPEC) {
if (attr->nexthop.s_addr != 0) {
if (json_paths) {
json_nexthop_global = json_object_new_object();
json_object_string_add(
- json_nexthop_global, "ip",
- inet_ntoa(attr->nexthop));
+ json_nexthop_global,
+ nexthop_fqdn ? "fqdn" : "ip",
+ nexthop_fqdn
+ ? nexthop_fqdn
+ : inet_ntoa(attr->nexthop));
json_object_string_add(json_nexthop_global,
"afi", "ipv4");
json_object_boolean_true_add(
json_nexthop_global,
"used");
} else {
- vty_out(vty, "%-16s", inet_ntoa(attr->nexthop));
+ vty_out(vty, "%-16s",
+ nexthop_fqdn
+ ? nexthop_fqdn
+ : inet_ntoa(attr->nexthop));
}
}
} else if (p->family == AF_INET && !BGP_ATTR_NEXTHOP_AFI_IP6(attr)) {
if ((safi == SAFI_MPLS_VPN) || (safi == SAFI_EVPN))
json_object_string_add(
- json_nexthop_global, "ip",
- inet_ntoa(attr->mp_nexthop_global_in));
+ json_nexthop_global,
+ nexthop_fqdn ? "fqdn" : "ip",
+ nexthop_fqdn
+ ? nexthop_fqdn
+ : inet_ntoa(
+ attr->mp_nexthop_global_in));
else
json_object_string_add(
- json_nexthop_global, "ip",
- inet_ntoa(attr->nexthop));
+ json_nexthop_global,
+ nexthop_fqdn ? "fqdn" : "ip",
+ nexthop_fqdn
+ ? nexthop_fqdn
+ : inet_ntoa(attr->nexthop));
json_object_string_add(json_nexthop_global, "afi",
"ipv4");
char buf[BUFSIZ];
snprintf(buf, sizeof(buf), "%s%s",
- inet_ntoa(attr->nexthop), vrf_id_str);
+ nexthop_fqdn ? nexthop_fqdn
+ : inet_ntoa(attr->nexthop),
+ vrf_id_str);
vty_out(vty, "%-16s", buf);
}
}
if (json_paths) {
json_nexthop_global = json_object_new_object();
json_object_string_add(
- json_nexthop_global, "ip",
- inet_ntop(AF_INET6, &attr->mp_nexthop_global,
- buf, BUFSIZ));
+ json_nexthop_global,
+ nexthop_fqdn ? "fqdn" : "ip",
+ nexthop_fqdn
+ ? nexthop_fqdn
+ : inet_ntop(AF_INET6,
+ &attr->mp_nexthop_global,
+ buf, BUFSIZ));
json_object_string_add(json_nexthop_global, "afi",
"ipv6");
json_object_string_add(json_nexthop_global, "scope",
|| (path->peer->conf_if)) {
json_nexthop_ll = json_object_new_object();
json_object_string_add(
- json_nexthop_ll, "ip",
- inet_ntop(AF_INET6,
- &attr->mp_nexthop_local, buf,
- BUFSIZ));
+ json_nexthop_ll,
+ nexthop_fqdn ? "fqdn" : "ip",
+ nexthop_fqdn
+ ? nexthop_fqdn
+ : inet_ntop(
+ AF_INET6,
+ &attr->mp_nexthop_local,
+ buf, BUFSIZ));
json_object_string_add(json_nexthop_ll, "afi",
"ipv6");
json_object_string_add(json_nexthop_ll, "scope",
} else {
len = vty_out(
vty, "%s%s",
- inet_ntop(
- AF_INET6,
- &attr->mp_nexthop_local,
- buf, BUFSIZ),
+ nexthop_fqdn
+ ? nexthop_fqdn
+ : inet_ntop(
+ AF_INET6,
+ &attr->mp_nexthop_local,
+ buf, BUFSIZ),
vrf_id_str);
len = 16 - len;
} else {
len = vty_out(
vty, "%s%s",
- inet_ntop(AF_INET6,
- &attr->mp_nexthop_global, buf,
- BUFSIZ),
- vrf_id_str);
+ nexthop_fqdn
+ ? nexthop_fqdn
+ : inet_ntop(
+ AF_INET6,
+ &attr->mp_nexthop_global,
+ buf, BUFSIZ),
+ vrf_id_str);
len = 16 - len;
if (len < 1)
vty_out(vty, "%s", str);
else
json_object_string_add(json_overlay, "esi", str);
-
+
XFREE(MTYPE_TMP, str);
if (is_evpn_prefix_ipaddr_v4((struct prefix_evpn *)p)) {
bool nexthop_self =
CHECK_FLAG(path->flags, BGP_PATH_ANNC_NH_SELF) ? true : false;
int i;
+ char *nexthop_fqdn = bgp_nexthop_fqdn(path->peer);
if (json_paths) {
json_path = json_object_new_object();
|| safi == SAFI_EVPN) {
if (json_paths)
json_object_string_add(
- json_nexthop_global, "ip",
- inet_ntoa(
- attr->mp_nexthop_global_in));
+ json_nexthop_global,
+ nexthop_fqdn ? "fqdn" : "ip",
+ nexthop_fqdn
+ ? nexthop_fqdn
+ : inet_ntoa(
+ attr->mp_nexthop_global_in));
else
vty_out(vty, " %s",
- inet_ntoa(
- attr->mp_nexthop_global_in));
+ nexthop_fqdn
+ ? nexthop_fqdn
+ : inet_ntoa(
+ attr->mp_nexthop_global_in));
} else {
if (json_paths)
json_object_string_add(
- json_nexthop_global, "ip",
- inet_ntoa(attr->nexthop));
+ json_nexthop_global,
+ nexthop_fqdn ? "fqdn" : "ip",
+ nexthop_fqdn
+ ? nexthop_fqdn
+ : inet_ntoa(
+ attr->nexthop));
else
vty_out(vty, " %s",
- inet_ntoa(attr->nexthop));
+ nexthop_fqdn
+ ? nexthop_fqdn
+ : inet_ntoa(
+ attr->nexthop));
}
if (json_paths)
} else {
if (json_paths) {
json_object_string_add(
- json_nexthop_global, "ip",
- inet_ntop(AF_INET6,
- &attr->mp_nexthop_global, buf,
- INET6_ADDRSTRLEN));
+ json_nexthop_global,
+ nexthop_fqdn ? "fqdn" : "ip",
+ nexthop_fqdn
+ ? nexthop_fqdn
+ : inet_ntop(
+ AF_INET6,
+ &attr->mp_nexthop_global,
+ buf,
+ INET6_ADDRSTRLEN));
json_object_string_add(json_nexthop_global,
"afi", "ipv6");
json_object_string_add(json_nexthop_global,
"scope", "global");
} else {
vty_out(vty, " %s",
- inet_ntop(AF_INET6,
- &attr->mp_nexthop_global, buf,
- INET6_ADDRSTRLEN));
+ nexthop_fqdn
+ ? nexthop_fqdn
+ : inet_ntop(
+ AF_INET6,
+ &attr->mp_nexthop_global,
+ buf,
+ INET6_ADDRSTRLEN));
}
}
if (json_paths) {
json_nexthop_ll = json_object_new_object();
json_object_string_add(
- json_nexthop_ll, "ip",
- inet_ntop(AF_INET6,
- &attr->mp_nexthop_local, buf,
- INET6_ADDRSTRLEN));
+ json_nexthop_ll,
+ nexthop_fqdn ? "fqdn" : "ip",
+ nexthop_fqdn
+ ? nexthop_fqdn
+ : inet_ntop(
+ AF_INET6,
+ &attr->mp_nexthop_local,
+ buf,
+ INET6_ADDRSTRLEN));
json_object_string_add(json_nexthop_ll, "afi",
"ipv6");
json_object_string_add(json_nexthop_ll, "scope",
}
if (bgp->l3vni) {
- vty_out(vty, "%% Please unconfigure l3vni %u",
+ vty_out(vty, "%% Please unconfigure l3vni %u\n",
bgp->l3vni);
return CMD_WARNING_CONFIG_FAILED;
}
+
+ /* Cannot delete default instance if vrf instances exist */
+ if (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT) {
+ struct listnode *node;
+ struct bgp *tmp_bgp;
+
+ for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, tmp_bgp)) {
+ if (tmp_bgp->inst_type
+ == BGP_INSTANCE_TYPE_VRF) {
+ vty_out(vty,
+ "%% Cannot delete default BGP instance. Dependent VRF instances exist\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ }
+ }
}
bgp_delete(bgp);
SET_FLAG(peer->flags, PEER_FLAG_DELETE);
+ bgp_bfd_deregister_peer(peer);
+
/* If this peer belongs to peer group, clear up the
relationship. */
if (peer->group) {
Turns on BFD support for PIM for this interface.
+.. index:: ip pim bsm
+.. clicmd:: ip pim bsm
+
+ Tell pim that we would like to use this interface to process bootstrap
+ messages. This is enabled by default. 'no' form of this command is used to
+ restrict bsm messages on this interface.
+
+.. index:: ip pim unicast-bsm
+.. clicmd:: ip pim unicast-bsm
+
+ Tell pim that we would like to allow interface to process unicast bootstrap
+ messages. This is enabled by default. 'no' form of this command is used to
+ restrict processing of unicast bsm messages on this interface.
+
.. index:: ip pim drpriority (1-4294967295)
.. clicmd:: ip pim drpriority (1-4294967295)
Display upstream information for S,G's and the RPF data associated with them.
+.. index:: show ip pim bsr
+.. clicmd:: show ip pim bsr
+
+ Display current bsr, its uptime and last received bsm age.
+
+.. index:: show ip pim bsrp-info
+.. clicmd:: show ip pim bsrp-info
+
+ Display group-to-rp mappings received from E-BSR.
+
+.. index:: show ip pim bsm-database
+.. clicmd:: show ip pim bsm-database
+
+ Display all fragments ofstored bootstrap message in user readable format.
+
.. index:: show ip rpf
.. clicmd:: show ip rpf
This traces pim code and how it is running.
+.. index:: debug pim bsm
+.. clicmd:: debug pim bsm
+
+ This turns on debugging for BSR message processing.
+
.. index:: debug pim zebra
.. clicmd:: debug pim zebra
uint8_t ALL_ESS[6] = {0x09, 0x00, 0x2B, 0x00, 0x00, 0x04};
static uint8_t discard_buff[8192];
-static uint8_t sock_buff[8192];
/*
* if level is 0 we are joining p2p multicast
return ISIS_WARNING;
}
- /* on lan we have to read to the static buff first */
- bytesread = recvfrom(circuit->fd, sock_buff, sizeof(sock_buff),
- MSG_DONTWAIT, (struct sockaddr *)&s_addr,
- (socklen_t *)&addr_len);
+ /* Ensure that we have enough space for a pdu padded to fill the mtu */
+ unsigned int max_size =
+ circuit->interface->mtu > circuit->interface->mtu6
+ ? circuit->interface->mtu
+ : circuit->interface->mtu6;
+ uint8_t temp_buff[max_size];
+ bytesread =
+ recvfrom(circuit->fd, temp_buff, max_size, MSG_DONTWAIT,
+ (struct sockaddr *)&s_addr, (socklen_t *)&addr_len);
if (bytesread < 0) {
- zlog_warn("isis_recv_pdu_bcast(): recvfrom() failed");
+ zlog_warn("%s: recvfrom() failed", __func__);
return ISIS_WARNING;
}
-
/* then we lose the LLC */
- stream_write(circuit->rcv_stream, sock_buff + LLC_LEN,
+ stream_write(circuit->rcv_stream, temp_buff + LLC_LEN,
bytesread - LLC_LEN);
-
memcpy(ssnpa, &s_addr.sll_addr, s_addr.sll_halen);
return ISIS_OK;
{
struct msghdr msg;
struct iovec iov[2];
+ char temp_buff[LLC_LEN];
/* we need to do the LLC in here because of P2P circuits, which will
* not need it
/* on a broadcast circuit */
/* first we put the LLC in */
- sock_buff[0] = 0xFE;
- sock_buff[1] = 0xFE;
- sock_buff[2] = 0x03;
+ temp_buff[0] = 0xFE;
+ temp_buff[1] = 0xFE;
+ temp_buff[2] = 0x03;
memset(&msg, 0, sizeof(msg));
msg.msg_name = &sa;
msg.msg_namelen = sizeof(struct sockaddr_ll);
msg.msg_iov = iov;
msg.msg_iovlen = 2;
- iov[0].iov_base = sock_buff;
+ iov[0].iov_base = temp_buff;
iov[0].iov_len = LLC_LEN;
iov[1].iov_base = circuit->snd_stream->data;
iov[1].iov_len = stream_get_endp(circuit->snd_stream);
list->count++;
}
+bool listnode_add_sort_nodup(struct list *list, void *val)
+{
+ struct listnode *n;
+ struct listnode *new;
+ int ret;
+
+ assert(val != NULL);
+
+ if (list->cmp) {
+ for (n = list->head; n; n = n->next) {
+ ret = (*list->cmp)(val, n->data);
+ if (ret < 0) {
+ new = listnode_new();
+ new->data = val;
+
+ new->next = n;
+ new->prev = n->prev;
+
+ if (n->prev)
+ n->prev->next = new;
+ else
+ list->head = new;
+ n->prev = new;
+ list->count++;
+ return true;
+ }
+ /* found duplicate return false */
+ if (ret == 0)
+ return false;
+ }
+ }
+
+ new = listnode_new();
+ new->data = val;
+
+ LISTNODE_ATTACH(list, new);
+
+ return true;
+}
+
void listnode_add_sort(struct list *list, void *val)
{
struct listnode *n;
list->count = 0;
}
+void list_filter_out_nodes(struct list *list, bool (*cond)(void *data))
+{
+ struct listnode *node;
+ struct listnode *next;
+ void *data;
+
+ assert(list);
+
+ for (ALL_LIST_ELEMENTS(list, node, next, data)) {
+ if ((cond && cond(data)) || (!cond)) {
+ if (*list->del)
+ (*list->del)(data);
+ list_delete_node(list, node);
+ }
+ }
+}
+
void list_delete(struct list **list)
{
assert(*list);
*/
extern void list_add_list(struct list *list, struct list *add);
+/*
+ * Delete all nodes which satisfy a condition from a list.
+ * Deletes the node if cond function returns true for the node.
+ * If function ptr passed is NULL, it deletes all nodes
+ *
+ * list
+ * list to operate on
+ * cond
+ * function pointer which takes node data as input and return TRUE or FALSE
+ */
+
+extern void list_filter_out_nodes(struct list *list, bool (*cond)(void *data));
+
+/*
+ * Insert a new element into a list with insertion sort if there is no
+ * duplicate element present in the list. This assumes the input list is
+ * sorted. If unsorted, it will check for duplicate until it finds out
+ * the position to do insertion sort with the unsorted list.
+ *
+ * If list->cmp is set, this function is used to determine the position to
+ * insert the new element. If it is not set, this function is equivalent to
+ * listnode_add. duplicate element is determined by cmp function returning 0.
+ *
+ * Runtime is O(N).
+ *
+ * list
+ * list to operate on
+ *
+ * val
+ * element to add
+ */
+
+extern bool listnode_add_sort_nodup(struct list *list, void *val);
/* List iteration macro.
* Usage: for (ALL_LIST_ELEMENTS (...) { ... }
* It is safe to delete the listnode using this macro.
DEFINE_MTYPE_STATIC(LIB, NEXTHOP, "Nexthop")
DEFINE_MTYPE_STATIC(LIB, NH_LABEL, "Nexthop label")
-/* check if nexthops are same, non-recursive */
-int nexthop_same_no_recurse(const struct nexthop *next1,
- const struct nexthop *next2)
+static int _nexthop_labels_cmp(const struct nexthop *nh1,
+ const struct nexthop *nh2)
{
- if (next1->type != next2->type)
+ const struct mpls_label_stack *nhl1 = NULL;
+ const struct mpls_label_stack *nhl2 = NULL;
+
+ nhl1 = nh1->nh_label;
+ nhl2 = nh2->nh_label;
+
+ /* No labels is a match */
+ if (!nhl1 && !nhl2)
return 0;
- switch (next1->type) {
+ if (nhl1 && !nhl2)
+ return 1;
+
+ if (nhl2 && !nhl1)
+ return -1;
+
+ if (nhl1->num_labels > nhl2->num_labels)
+ return 1;
+
+ if (nhl1->num_labels < nhl2->num_labels)
+ return -1;
+
+ return memcmp(nhl1->label, nhl2->label, nhl1->num_labels);
+}
+
+static int _nexthop_g_addr_cmp(enum nexthop_types_t type,
+ const union g_addr *addr1,
+ const union g_addr *addr2)
+{
+ int ret = 0;
+
+ switch (type) {
case NEXTHOP_TYPE_IPV4:
case NEXTHOP_TYPE_IPV4_IFINDEX:
- if (!IPV4_ADDR_SAME(&next1->gate.ipv4, &next2->gate.ipv4))
- return 0;
- if (next1->ifindex && (next1->ifindex != next2->ifindex))
- return 0;
+ ret = IPV4_ADDR_CMP(&addr1->ipv4, &addr2->ipv4);
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ ret = IPV6_ADDR_CMP(&addr1->ipv6, &addr2->ipv6);
break;
case NEXTHOP_TYPE_IFINDEX:
- if (next1->ifindex != next2->ifindex)
- return 0;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ /* No addr here */
break;
+ }
+
+ return ret;
+}
+
+static int _nexthop_gateway_cmp(const struct nexthop *nh1,
+ const struct nexthop *nh2)
+{
+ return _nexthop_g_addr_cmp(nh1->type, &nh1->gate, &nh2->gate);
+}
+
+static int _nexthop_source_cmp(const struct nexthop *nh1,
+ const struct nexthop *nh2)
+{
+ return _nexthop_g_addr_cmp(nh1->type, &nh1->src, &nh2->src);
+}
+
+static int _nexthop_cmp_no_labels(const struct nexthop *next1,
+ const struct nexthop *next2)
+{
+ int ret = 0;
+
+ if (next1->vrf_id < next2->vrf_id)
+ return -1;
+
+ if (next1->vrf_id > next2->vrf_id)
+ return 1;
+
+ if (next1->type < next2->type)
+ return -1;
+
+ if (next1->type > next2->type)
+ return 1;
+
+ switch (next1->type) {
+ case NEXTHOP_TYPE_IPV4:
case NEXTHOP_TYPE_IPV6:
- if (!IPV6_ADDR_SAME(&next1->gate.ipv6, &next2->gate.ipv6))
- return 0;
+ ret = _nexthop_gateway_cmp(next1, next2);
+ if (ret != 0)
+ return ret;
break;
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
case NEXTHOP_TYPE_IPV6_IFINDEX:
- if (!IPV6_ADDR_SAME(&next1->gate.ipv6, &next2->gate.ipv6))
- return 0;
- if (next1->ifindex != next2->ifindex)
- return 0;
+ ret = _nexthop_gateway_cmp(next1, next2);
+ if (ret != 0)
+ return ret;
+ /* Intentional Fall-Through */
+ case NEXTHOP_TYPE_IFINDEX:
+ if (next1->ifindex < next2->ifindex)
+ return -1;
+
+ if (next1->ifindex > next2->ifindex)
+ return 1;
break;
- default:
- /* do nothing */
+ case NEXTHOP_TYPE_BLACKHOLE:
+ if (next1->bh_type < next2->bh_type)
+ return -1;
+
+ if (next1->bh_type > next2->bh_type)
+ return 1;
break;
}
- return 1;
+
+ ret = _nexthop_source_cmp(next1, next2);
+
+ return ret;
+}
+
+int nexthop_cmp(const struct nexthop *next1, const struct nexthop *next2)
+{
+ int ret = 0;
+
+ ret = _nexthop_cmp_no_labels(next1, next2);
+ if (ret != 0)
+ return ret;
+
+ ret = _nexthop_labels_cmp(next1, next2);
+
+ return ret;
}
int nexthop_same_firsthop(struct nexthop *next1, struct nexthop *next2)
/*
* Check if the labels match for the 2 nexthops specified.
*/
-int nexthop_labels_match(const struct nexthop *nh1, const struct nexthop *nh2)
+bool nexthop_labels_match(const struct nexthop *nh1, const struct nexthop *nh2)
{
- const struct mpls_label_stack *nhl1, *nhl2;
-
- nhl1 = nh1->nh_label;
- nhl2 = nh2->nh_label;
-
- /* No labels is a match */
- if (!nhl1 && !nhl2)
- return 1;
-
- if (!nhl1 || !nhl2)
- return 0;
-
- if (nhl1->num_labels != nhl2->num_labels)
- return 0;
-
- if (memcmp(nhl1->label, nhl2->label, nhl1->num_labels))
- return 0;
+ if (_nexthop_labels_cmp(nh1, nh2) != 0)
+ return false;
- return 1;
+ return true;
}
struct nexthop *nexthop_new(void)
if (nh1 == nh2)
return true;
- if (nh1->vrf_id != nh2->vrf_id)
+ if (nexthop_cmp(nh1, nh2) != 0)
return false;
- if (nh1->type != nh2->type)
+ return true;
+}
+
+bool nexthop_same_no_labels(const struct nexthop *nh1,
+ const struct nexthop *nh2)
+{
+ if (nh1 && !nh2)
return false;
- switch (nh1->type) {
- case NEXTHOP_TYPE_IFINDEX:
- if (nh1->ifindex != nh2->ifindex)
- return false;
- break;
- case NEXTHOP_TYPE_IPV4:
- if (nh1->gate.ipv4.s_addr != nh2->gate.ipv4.s_addr)
- return false;
- break;
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- if (nh1->gate.ipv4.s_addr != nh2->gate.ipv4.s_addr)
- return false;
- if (nh1->ifindex != nh2->ifindex)
- return false;
- break;
- case NEXTHOP_TYPE_IPV6:
- if (memcmp(&nh1->gate.ipv6, &nh2->gate.ipv6, 16))
- return false;
- break;
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- if (memcmp(&nh1->gate.ipv6, &nh2->gate.ipv6, 16))
- return false;
- if (nh1->ifindex != nh2->ifindex)
- return false;
- break;
- case NEXTHOP_TYPE_BLACKHOLE:
- if (nh1->bh_type != nh2->bh_type)
- return false;
- break;
- }
+ if (!nh1 && nh2)
+ return false;
+
+ if (nh1 == nh2)
+ return true;
+
+ if (_nexthop_cmp_no_labels(nh1, nh2) != 0)
+ return false;
- /* Compare labels too (if present) */
- return (!!nexthop_labels_match(nh1, nh2));
+ return true;
}
/* Update nexthop with label information. */
uint32_t nexthop_hash(const struct nexthop *nexthop);
extern bool nexthop_same(const struct nexthop *nh1, const struct nexthop *nh2);
+extern bool nexthop_same_no_labels(const struct nexthop *nh1,
+ const struct nexthop *nh2);
+extern int nexthop_cmp(const struct nexthop *nh1, const struct nexthop *nh2);
extern const char *nexthop_type_to_str(enum nexthop_types_t nh_type);
-extern int nexthop_same_no_recurse(const struct nexthop *next1,
- const struct nexthop *next2);
-extern int nexthop_labels_match(const struct nexthop *nh1,
- const struct nexthop *nh2);
+extern bool nexthop_labels_match(const struct nexthop *nh1,
+ const struct nexthop *nh2);
extern int nexthop_same_firsthop(struct nexthop *next1, struct nexthop *next2);
extern const char *nexthop2str(const struct nexthop *nexthop,
return CMD_SUCCESS;
}
- /* We must have, at a minimum, both the type and prefix here */
- if ((typestr == NULL) || (prefix == NULL)) {
- vty_out(vty, "%% Both prefix and type required\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
-
/* Check sequence number. */
if (seq)
seqnum = (int64_t)atol(seq);
+ /* Sequence number specified, but nothing else. */
+ if (seq && typestr == NULL && prefix == NULL && ge == NULL
+ && le == NULL) {
+ pentry = prefix_seq_check(plist, seqnum);
+
+ if (pentry == NULL) {
+ vty_out(vty,
+ "%% Can't find prefix-list %s with sequence number %lu\n",
+ name, seqnum);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ prefix_list_entry_delete(plist, pentry, 1);
+ return CMD_SUCCESS;
+ }
+
/* ge and le number */
if (ge)
genum = atoi(ge);
if (le)
lenum = atoi(le);
+ /* We must have, at a minimum, both the type and prefix here */
+ if ((typestr == NULL) || (prefix == NULL))
+ return CMD_WARNING_CONFIG_FAILED;
+
/* Check of filter type. */
if (strncmp("permit", typestr, 1) == 0)
type = PREFIX_PERMIT;
action, dest, ge_str, le_str);
}
+DEFPY(no_ip_prefix_list_seq, no_ip_prefix_list_seq_cmd,
+ "no ip prefix-list WORD seq (1-4294967295)",
+ NO_STR IP_STR PREFIX_LIST_STR
+ "Name of a prefix list\n"
+ "sequence number of an entry\n"
+ "Sequence number\n")
+{
+ return vty_prefix_list_uninstall(vty, AFI_IP, prefix_list, seq_str,
+ NULL, NULL, NULL, NULL);
+}
+
DEFPY (no_ip_prefix_list_all,
no_ip_prefix_list_all_cmd,
"no ip prefix-list WORD",
install_element(CONFIG_NODE, &ip_prefix_list_cmd);
install_element(CONFIG_NODE, &no_ip_prefix_list_cmd);
+ install_element(CONFIG_NODE, &no_ip_prefix_list_seq_cmd);
install_element(CONFIG_NODE, &no_ip_prefix_list_all_cmd);
install_element(CONFIG_NODE, &ip_prefix_list_description_cmd);
Add PIM header
*/
pim_msg_size = pim_msg_curr - pim_msg;
- pim_msg_build_header(pim_msg, pim_msg_size, PIM_MSG_TYPE_ASSERT);
+ pim_msg_build_header(pim_msg, pim_msg_size, PIM_MSG_TYPE_ASSERT, false);
return pim_msg_size;
}
--- /dev/null
+/*
+ * pim_bsm.c: PIM BSM handling routines
+ *
+ * Copyright (C) 2018-19 Vmware, Inc.
+ * Saravanan K
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+#include "if.h"
+#include "pimd.h"
+#include "pim_iface.h"
+#include "pim_instance.h"
+#include "pim_rpf.h"
+#include "pim_hello.h"
+#include "pim_pim.h"
+#include "pim_nht.h"
+#include "pim_bsm.h"
+#include "pim_time.h"
+
+/* Functions forward declaration */
+static void pim_bs_timer_start(struct bsm_scope *scope, int bs_timeout);
+static void pim_g2rp_timer_start(struct bsm_rpinfo *bsrp, int hold_time);
+static inline void pim_g2rp_timer_restart(struct bsm_rpinfo *bsrp,
+ int hold_time);
+
+/* Memory Types */
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSGRP_NODE, "PIM BSR advertised grp info")
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSRP_NODE, "PIM BSR advertised RP info")
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSM_INFO, "PIM BSM Info")
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSM_PKT_VAR_MEM, "PIM BSM Packet")
+
+/* All bsm packets forwarded shall be fit within ip mtu less iphdr(max) */
+#define MAX_IP_HDR_LEN 24
+
+/* pim_bsm_write_config - Write the interface pim bsm configuration.*/
+void pim_bsm_write_config(struct vty *vty, struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (pim_ifp) {
+ if (!pim_ifp->bsm_enable)
+ vty_out(vty, " no ip pim bsm\n");
+ if (!pim_ifp->ucast_bsm_accept)
+ vty_out(vty, " no ip pim unicast-bsm\n");
+ }
+}
+
+static void pim_free_bsgrp_data(struct bsgrp_node *bsgrp_node)
+{
+ if (bsgrp_node->bsrp_list)
+ list_delete(&bsgrp_node->bsrp_list);
+ if (bsgrp_node->partial_bsrp_list)
+ list_delete(&bsgrp_node->partial_bsrp_list);
+ XFREE(MTYPE_PIM_BSGRP_NODE, bsgrp_node);
+}
+
+static void pim_free_bsgrp_node(struct route_table *rt, struct prefix *grp)
+{
+ struct route_node *rn;
+
+ rn = route_node_lookup(rt, grp);
+ if (rn) {
+ rn->info = NULL;
+ route_unlock_node(rn);
+ route_unlock_node(rn);
+ }
+}
+
+static void pim_bsm_node_free(struct bsm_info *bsm)
+{
+ if (bsm->bsm)
+ XFREE(MTYPE_PIM_BSM_PKT_VAR_MEM, bsm->bsm);
+ XFREE(MTYPE_PIM_BSM_INFO, bsm);
+}
+
+static int pim_g2rp_list_compare(struct bsm_rpinfo *node1,
+ struct bsm_rpinfo *node2)
+{
+ /* RP election Algo :
+ * Step-1 : Loweset Rp priority will have higher precedance.
+ * Step-2 : If priority same then higher hash val will have
+ * higher precedance.
+ * Step-3 : If Hash val is same then highest rp address will
+ * become elected RP.
+ */
+ if (node1->rp_prio < node2->rp_prio)
+ return -1;
+ if (node1->rp_prio > node2->rp_prio)
+ return 1;
+ if (node1->hash < node2->hash)
+ return 1;
+ if (node1->hash > node2->hash)
+ return -1;
+ if (node1->rp_address.s_addr < node2->rp_address.s_addr)
+ return 1;
+ if (node1->rp_address.s_addr > node2->rp_address.s_addr)
+ return -1;
+ return 0;
+}
+
+static void pim_free_bsrp_node(struct bsm_rpinfo *bsrp_info)
+{
+ if (bsrp_info->g2rp_timer)
+ THREAD_OFF(bsrp_info->g2rp_timer);
+ XFREE(MTYPE_PIM_BSRP_NODE, bsrp_info);
+}
+
+static struct list *pim_alloc_bsrp_list(void)
+{
+ struct list *new_list = NULL;
+
+ new_list = list_new();
+
+ if (!new_list)
+ return NULL;
+
+ new_list->cmp = (int (*)(void *, void *))pim_g2rp_list_compare;
+ new_list->del = (void (*)(void *))pim_free_bsrp_node;
+
+ return new_list;
+}
+
+static struct bsgrp_node *pim_bsm_new_bsgrp_node(struct route_table *rt,
+ struct prefix *grp)
+{
+ struct route_node *rn;
+ struct bsgrp_node *bsgrp;
+
+ rn = route_node_get(rt, grp);
+ if (!rn) {
+ zlog_warn("%s: route node creation failed",
+ __PRETTY_FUNCTION__);
+ return NULL;
+ }
+ bsgrp = XCALLOC(MTYPE_PIM_BSGRP_NODE, sizeof(struct bsgrp_node));
+
+ if (!bsgrp) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: bsgrp alloc failed",
+ __PRETTY_FUNCTION__);
+ route_unlock_node(rn);
+ return NULL;
+ }
+
+ rn->info = bsgrp;
+ bsgrp->bsrp_list = pim_alloc_bsrp_list();
+ bsgrp->partial_bsrp_list = pim_alloc_bsrp_list();
+
+ if ((!bsgrp->bsrp_list) || (!bsgrp->partial_bsrp_list)) {
+ route_unlock_node(rn);
+ pim_free_bsgrp_data(bsgrp);
+ return NULL;
+ }
+
+ prefix_copy(&bsgrp->group, grp);
+ return bsgrp;
+}
+
+static int pim_on_bs_timer(struct thread *t)
+{
+ struct route_node *rn;
+ struct bsm_scope *scope;
+ struct bsgrp_node *bsgrp_node;
+ struct bsm_rpinfo *bsrp;
+ struct prefix nht_p;
+ char buf[PREFIX2STR_BUFFER];
+ bool is_bsr_tracking = true;
+
+ scope = THREAD_ARG(t);
+ THREAD_OFF(scope->bs_timer);
+
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Bootstrap Timer expired for scope: %d",
+ __PRETTY_FUNCTION__, scope->sz_id);
+
+ /* Remove next hop tracking for the bsr */
+ nht_p.family = AF_INET;
+ nht_p.prefixlen = IPV4_MAX_BITLEN;
+ nht_p.u.prefix4 = scope->current_bsr;
+ if (PIM_DEBUG_BSM) {
+ prefix2str(&nht_p, buf, sizeof(buf));
+ zlog_debug("%s: Deregister BSR addr %s with Zebra NHT",
+ __PRETTY_FUNCTION__, buf);
+ }
+ pim_delete_tracked_nexthop(scope->pim, &nht_p, NULL, NULL,
+ is_bsr_tracking);
+
+ /* Reset scope zone data */
+ scope->accept_nofwd_bsm = false;
+ scope->state = ACCEPT_ANY;
+ scope->current_bsr.s_addr = INADDR_ANY;
+ scope->current_bsr_prio = 0;
+ scope->current_bsr_first_ts = 0;
+ scope->current_bsr_last_ts = 0;
+ scope->bsm_frag_tag = 0;
+ list_delete_all_node(scope->bsm_list);
+
+ for (rn = route_top(scope->bsrp_table); rn; rn = route_next(rn)) {
+
+ bsgrp_node = (struct bsgrp_node *)rn->info;
+ if (!bsgrp_node) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: bsgrp_node is null",
+ __PRETTY_FUNCTION__);
+ continue;
+ }
+ /* Give grace time for rp to continue for another hold time */
+ if ((bsgrp_node->bsrp_list) && (bsgrp_node->bsrp_list->count)) {
+ bsrp = listnode_head(bsgrp_node->bsrp_list);
+ pim_g2rp_timer_restart(bsrp, bsrp->rp_holdtime);
+ }
+ /* clear pending list */
+ if ((bsgrp_node->partial_bsrp_list)
+ && (bsgrp_node->partial_bsrp_list->count)) {
+ list_delete_all_node(bsgrp_node->partial_bsrp_list);
+ bsgrp_node->pend_rp_cnt = 0;
+ }
+ }
+ return 0;
+}
+
+static void pim_bs_timer_stop(struct bsm_scope *scope)
+{
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : BS timer being stopped of sz: %d",
+ __PRETTY_FUNCTION__, scope->sz_id);
+ THREAD_OFF(scope->bs_timer);
+}
+
+static void pim_bs_timer_start(struct bsm_scope *scope, int bs_timeout)
+{
+ if (!scope) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : Invalid scope(NULL).",
+ __PRETTY_FUNCTION__);
+ return;
+ }
+ THREAD_OFF(scope->bs_timer);
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : starting bs timer for scope %d with timeout %d secs",
+ __PRETTY_FUNCTION__, scope->sz_id, bs_timeout);
+ thread_add_timer(router->master, pim_on_bs_timer, scope, bs_timeout,
+ &scope->bs_timer);
+}
+
+static inline void pim_bs_timer_restart(struct bsm_scope *scope, int bs_timeout)
+{
+ pim_bs_timer_start(scope, bs_timeout);
+}
+
+void pim_bsm_proc_init(struct pim_instance *pim)
+{
+ memset(&pim->global_scope, 0, sizeof(struct bsm_scope));
+
+ pim->global_scope.sz_id = PIM_GBL_SZ_ID;
+ pim->global_scope.bsrp_table = route_table_init();
+ pim->global_scope.accept_nofwd_bsm = true;
+ pim->global_scope.state = NO_INFO;
+ pim->global_scope.pim = pim;
+ pim->global_scope.bsm_list = list_new();
+ pim->global_scope.bsm_list->del = (void (*)(void *))pim_bsm_node_free;
+ pim_bs_timer_start(&pim->global_scope, PIM_BS_TIME);
+}
+
+void pim_bsm_proc_free(struct pim_instance *pim)
+{
+ struct route_node *rn;
+ struct bsgrp_node *bsgrp;
+
+ pim_bs_timer_stop(&pim->global_scope);
+
+ if (pim->global_scope.bsm_list)
+ list_delete(&pim->global_scope.bsm_list);
+
+ for (rn = route_top(pim->global_scope.bsrp_table); rn;
+ rn = route_next(rn)) {
+ bsgrp = rn->info;
+ if (!bsgrp)
+ continue;
+ pim_free_bsgrp_data(bsgrp);
+ }
+
+ if (pim->global_scope.bsrp_table)
+ route_table_finish(pim->global_scope.bsrp_table);
+}
+
+static bool is_hold_time_elapsed(void *data)
+{
+ struct bsm_rpinfo *bsrp;
+
+ bsrp = data;
+
+ if (bsrp->elapse_time < bsrp->rp_holdtime)
+ return false;
+ else
+ return true;
+}
+
+static int pim_on_g2rp_timer(struct thread *t)
+{
+ struct bsm_rpinfo *bsrp;
+ struct bsm_rpinfo *bsrp_node;
+ struct bsgrp_node *bsgrp_node;
+ struct listnode *bsrp_ln;
+ struct pim_instance *pim;
+ struct rp_info *rp_info;
+ struct route_node *rn;
+ uint16_t elapse;
+ struct in_addr bsrp_addr;
+
+ bsrp = THREAD_ARG(t);
+ THREAD_OFF(bsrp->g2rp_timer);
+ bsgrp_node = bsrp->bsgrp_node;
+
+ /* elapse time is the hold time of expired node */
+ elapse = bsrp->rp_holdtime;
+ bsrp_addr = bsrp->rp_address;
+
+ /* update elapse for all bsrp nodes */
+ for (ALL_LIST_ELEMENTS_RO(bsgrp_node->bsrp_list, bsrp_ln, bsrp_node))
+ bsrp_node->elapse_time += elapse;
+
+ /* remove the expired nodes from the list */
+ list_filter_out_nodes(bsgrp_node->bsrp_list, is_hold_time_elapsed);
+
+ /* Get the next elected rp node */
+ bsrp = listnode_head(bsgrp_node->bsrp_list);
+ pim = bsgrp_node->scope->pim;
+ rn = route_node_lookup(pim->rp_table, &bsgrp_node->group);
+
+ if (!rn) {
+ zlog_warn("%s: Route node doesn't exist", __PRETTY_FUNCTION__);
+ return 0;
+ }
+
+ rp_info = (struct rp_info *)rn->info;
+
+ if (!rp_info) {
+ route_unlock_node(rn);
+ return 0;
+ }
+
+ if (rp_info->rp_src != RP_SRC_STATIC) {
+ /* If new rp available, change it else delete the existing */
+ if (bsrp) {
+ bsrp_addr = bsrp->rp_address;
+ pim_g2rp_timer_start(
+ bsrp, (bsrp->rp_holdtime - bsrp->elapse_time));
+ pim_rp_change(pim, bsrp_addr, bsgrp_node->group,
+ RP_SRC_BSR);
+ } else {
+ pim_rp_del(pim, bsrp_addr, bsgrp_node->group, NULL,
+ RP_SRC_BSR);
+ }
+ }
+
+ if ((!bsgrp_node->bsrp_list->count)
+ && (!bsgrp_node->partial_bsrp_list->count)) {
+ pim_free_bsgrp_node(pim->global_scope.bsrp_table,
+ &bsgrp_node->group);
+ pim_free_bsgrp_data(bsgrp_node);
+ }
+
+ return 0;
+}
+
+static void pim_g2rp_timer_start(struct bsm_rpinfo *bsrp, int hold_time)
+{
+ if (!bsrp) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : Invalid brsp(NULL).",
+ __PRETTY_FUNCTION__);
+ return;
+ }
+ THREAD_OFF(bsrp->g2rp_timer);
+ if (PIM_DEBUG_BSM) {
+ char buf[48];
+
+ zlog_debug(
+ "%s : starting g2rp timer for grp: %s - rp: %s with timeout %d secs(Actual Hold time : %d secs)",
+ __PRETTY_FUNCTION__,
+ prefix2str(&bsrp->bsgrp_node->group, buf, 48),
+ inet_ntoa(bsrp->rp_address), hold_time,
+ bsrp->rp_holdtime);
+ }
+
+ thread_add_timer(router->master, pim_on_g2rp_timer, bsrp, hold_time,
+ &bsrp->g2rp_timer);
+}
+
+static inline void pim_g2rp_timer_restart(struct bsm_rpinfo *bsrp,
+ int hold_time)
+{
+ pim_g2rp_timer_start(bsrp, hold_time);
+}
+
+static void pim_g2rp_timer_stop(struct bsm_rpinfo *bsrp)
+{
+ if (!bsrp)
+ return;
+
+ if (PIM_DEBUG_BSM) {
+ char buf[48];
+
+ zlog_debug("%s : stopping g2rp timer for grp: %s - rp: %s",
+ __PRETTY_FUNCTION__,
+ prefix2str(&bsrp->bsgrp_node->group, buf, 48),
+ inet_ntoa(bsrp->rp_address));
+ }
+
+ THREAD_OFF(bsrp->g2rp_timer);
+}
+
+static bool is_hold_time_zero(void *data)
+{
+ struct bsm_rpinfo *bsrp;
+
+ bsrp = data;
+
+ if (bsrp->rp_holdtime)
+ return false;
+ else
+ return true;
+}
+
+static void pim_instate_pend_list(struct bsgrp_node *bsgrp_node)
+{
+ struct bsm_rpinfo *active;
+ struct bsm_rpinfo *pend;
+ struct list *temp;
+ struct rp_info *rp_info;
+ struct route_node *rn;
+ struct pim_instance *pim;
+ struct rp_info *rp_all;
+ struct prefix group_all;
+ bool had_rp_node = true;
+
+ pim = bsgrp_node->scope->pim;
+ active = listnode_head(bsgrp_node->bsrp_list);
+
+ /* Remove nodes with hold time 0 & check if list still has a head */
+ list_filter_out_nodes(bsgrp_node->partial_bsrp_list, is_hold_time_zero);
+ pend = listnode_head(bsgrp_node->partial_bsrp_list);
+
+ if (!str2prefix("224.0.0.0/4", &group_all))
+ return;
+
+ rp_all = pim_rp_find_match_group(pim, &group_all);
+ rn = route_node_lookup(pim->rp_table, &bsgrp_node->group);
+
+ if (pend)
+ pim_g2rp_timer_start(pend, pend->rp_holdtime);
+
+ /* if rp node doesn't exist or exist but not configured(rp_all),
+ * install the rp from head(if exists) of partial list. List is
+ * is sorted such that head is the elected RP for the group.
+ */
+ if (!rn || (prefix_same(&rp_all->group, &bsgrp_node->group)
+ && pim_rpf_addr_is_inaddr_none(&rp_all->rp))) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Route node doesn't exist",
+ __PRETTY_FUNCTION__);
+ if (pend)
+ pim_rp_new(pim, pend->rp_address, bsgrp_node->group,
+ NULL, RP_SRC_BSR);
+ had_rp_node = false;
+ } else {
+ rp_info = (struct rp_info *)rn->info;
+ if (!rp_info) {
+ route_unlock_node(rn);
+ if (pend)
+ pim_rp_new(pim, pend->rp_address,
+ bsgrp_node->group, NULL, RP_SRC_BSR);
+ had_rp_node = false;
+ }
+ }
+
+ /* We didn't have rp node and pending list is empty(unlikely), cleanup*/
+ if ((!had_rp_node) && (!pend)) {
+ pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+ &bsgrp_node->group);
+ pim_free_bsgrp_data(bsgrp_node);
+ return;
+ }
+
+ if ((had_rp_node) && (rp_info->rp_src != RP_SRC_STATIC)) {
+ /* This means we searched and got rp node, needs unlock */
+ route_unlock_node(rn);
+
+ if (active && pend) {
+ if ((active->rp_address.s_addr
+ != pend->rp_address.s_addr))
+ pim_rp_change(pim, pend->rp_address,
+ bsgrp_node->group, RP_SRC_BSR);
+ }
+
+ /* Possible when the first BSM has group with 0 rp count */
+ if ((!active) && (!pend)) {
+ if (PIM_DEBUG_BSM) {
+ zlog_debug(
+ "%s: Both bsrp and partial list are empty",
+ __PRETTY_FUNCTION__);
+ }
+ pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+ &bsgrp_node->group);
+ pim_free_bsgrp_data(bsgrp_node);
+ return;
+ }
+
+ /* Possible when a group with 0 rp count received in BSM */
+ if ((active) && (!pend)) {
+ pim_rp_del(pim, active->rp_address, bsgrp_node->group,
+ NULL, RP_SRC_BSR);
+ pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+ &bsgrp_node->group);
+ if (PIM_DEBUG_BSM) {
+ zlog_debug("%s:Pend List is null,del grp node",
+ __PRETTY_FUNCTION__);
+ }
+ pim_free_bsgrp_data(bsgrp_node);
+ return;
+ }
+ }
+
+ if ((had_rp_node) && (rp_info->rp_src == RP_SRC_STATIC)) {
+ /* We need to unlock rn this case */
+ route_unlock_node(rn);
+ /* there is a chance that static rp exist and bsrp cleaned
+ * so clean bsgrp node if pending list empty
+ */
+ if (!pend) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: Partial list is empty, static rp exists",
+ __PRETTY_FUNCTION__);
+ pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+ &bsgrp_node->group);
+ pim_free_bsgrp_data(bsgrp_node);
+ return;
+ }
+ }
+
+ /* swap the list & delete all nodes in partial list (old bsrp_list)
+ * before swap
+ * active is head of bsrp list
+ * pend is head of partial list
+ * After swap
+ * active is head of partial list
+ * pend is head of bsrp list
+ * So check appriate head after swap and clean the new partial list
+ */
+ temp = bsgrp_node->bsrp_list;
+ bsgrp_node->bsrp_list = bsgrp_node->partial_bsrp_list;
+ bsgrp_node->partial_bsrp_list = temp;
+
+ if (active) {
+ pim_g2rp_timer_stop(active);
+ list_delete_all_node(bsgrp_node->partial_bsrp_list);
+ }
+}
+
+static bool pim_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr,
+ struct in_addr ip_src_addr)
+{
+ struct pim_nexthop nexthop;
+ int result;
+
+ memset(&nexthop, 0, sizeof(nexthop));
+
+ /* New BSR recived */
+ if (bsr.s_addr != pim->global_scope.current_bsr.s_addr) {
+ result = pim_nexthop_match(pim, bsr, ip_src_addr);
+
+ /* Nexthop lookup pass for the new BSR address */
+ if (result)
+ return true;
+
+ if (PIM_DEBUG_BSM) {
+ char bsr_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<bsr?>", bsr, bsr_str, sizeof(bsr_str));
+ zlog_debug("%s : No route to BSR address %s",
+ __PRETTY_FUNCTION__, bsr_str);
+ }
+ return false;
+ }
+
+ return pim_nexthop_match_nht_cache(pim, bsr, ip_src_addr);
+}
+
+static bool is_preferred_bsr(struct pim_instance *pim, struct in_addr bsr,
+ uint32_t bsr_prio)
+{
+ if (bsr.s_addr == pim->global_scope.current_bsr.s_addr)
+ return true;
+
+ if (bsr_prio > pim->global_scope.current_bsr_prio)
+ return true;
+
+ else if (bsr_prio == pim->global_scope.current_bsr_prio) {
+ if (bsr.s_addr >= pim->global_scope.current_bsr.s_addr)
+ return true;
+ else
+ return false;
+ } else
+ return false;
+}
+
+static void pim_bsm_update(struct pim_instance *pim, struct in_addr bsr,
+ uint32_t bsr_prio)
+{
+ struct pim_nexthop_cache pnc;
+
+ if (bsr.s_addr != pim->global_scope.current_bsr.s_addr) {
+ struct prefix nht_p;
+ char buf[PREFIX2STR_BUFFER];
+ bool is_bsr_tracking = true;
+
+ /* De-register old BSR and register new BSR with Zebra NHT */
+ nht_p.family = AF_INET;
+ nht_p.prefixlen = IPV4_MAX_BITLEN;
+
+ if (pim->global_scope.current_bsr.s_addr != INADDR_ANY) {
+ nht_p.u.prefix4 = pim->global_scope.current_bsr;
+ if (PIM_DEBUG_BSM) {
+ prefix2str(&nht_p, buf, sizeof(buf));
+ zlog_debug(
+ "%s: Deregister BSR addr %s with Zebra NHT",
+ __PRETTY_FUNCTION__, buf);
+ }
+ pim_delete_tracked_nexthop(pim, &nht_p, NULL, NULL,
+ is_bsr_tracking);
+ }
+
+ nht_p.u.prefix4 = bsr;
+ if (PIM_DEBUG_BSM) {
+ prefix2str(&nht_p, buf, sizeof(buf));
+ zlog_debug(
+ "%s: NHT Register BSR addr %s with Zebra NHT",
+ __PRETTY_FUNCTION__, buf);
+ }
+
+ memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
+ pim_find_or_track_nexthop(pim, &nht_p, NULL, NULL,
+ is_bsr_tracking, &pnc);
+ pim->global_scope.current_bsr = bsr;
+ pim->global_scope.current_bsr_first_ts =
+ pim_time_monotonic_sec();
+ pim->global_scope.state = ACCEPT_PREFERRED;
+ }
+ pim->global_scope.current_bsr_prio = bsr_prio;
+ pim->global_scope.current_bsr_last_ts = pim_time_monotonic_sec();
+}
+
+static bool pim_bsm_send_intf(uint8_t *buf, int len, struct interface *ifp,
+ struct in_addr dst_addr)
+{
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Pim interface not available for %s",
+ __PRETTY_FUNCTION__, ifp->name);
+ return false;
+ }
+
+ if (pim_ifp->pim_sock_fd == -1) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Pim sock not available for %s",
+ __PRETTY_FUNCTION__, ifp->name);
+ return false;
+ }
+
+ pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address, dst_addr,
+ buf, len, ifp->name);
+ pim_ifp->pim_ifstat_bsm_tx++;
+ pim_ifp->pim->bsm_sent++;
+ return true;
+}
+
+static bool pim_bsm_frag_send(uint8_t *buf, uint32_t len, struct interface *ifp,
+ uint32_t pim_mtu, struct in_addr dst_addr,
+ bool no_fwd)
+{
+ struct bsmmsg_grpinfo *grpinfo, *curgrp;
+ uint8_t *firstgrp_ptr;
+ uint8_t *pkt;
+ uint8_t *pak_start;
+ uint32_t parsed_len = 0;
+ uint32_t this_pkt_rem;
+ uint32_t copy_byte_count;
+ uint32_t this_pkt_len;
+ uint8_t total_rp_cnt;
+ uint8_t this_rp_cnt;
+ uint8_t frag_rp_cnt;
+ uint8_t rp_fit_cnt;
+ bool pak_pending = false;
+
+ /* MTU passed here is PIM MTU (IP MTU less IP Hdr) */
+ if (pim_mtu < (PIM_MIN_BSM_LEN)) {
+ zlog_warn(
+ "%s: mtu(pim mtu: %d) size less than minimum bootsrap len",
+ __PRETTY_FUNCTION__, pim_mtu);
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: mtu (pim mtu:%d) less than minimum bootsrap len",
+ __PRETTY_FUNCTION__, pim_mtu);
+ return false;
+ }
+
+ pak_start = XCALLOC(MTYPE_PIM_BSM_PKT_VAR_MEM, pim_mtu);
+
+ if (!pak_start) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: malloc failed", __PRETTY_FUNCTION__);
+ return false;
+ }
+
+ pkt = pak_start;
+
+ /* Fill PIM header later before sending packet to calc checksum */
+ pkt += PIM_MSG_HEADER_LEN;
+ buf += PIM_MSG_HEADER_LEN;
+
+ /* copy bsm header to new packet at offset of pim hdr */
+ memcpy(pkt, buf, PIM_BSM_HDR_LEN);
+ pkt += PIM_BSM_HDR_LEN;
+ buf += PIM_BSM_HDR_LEN;
+ parsed_len += (PIM_MSG_HEADER_LEN + PIM_BSM_HDR_LEN);
+
+ /* Store the position of first grp ptr, which can be reused for
+ * next packet to start filling group. old bsm header and pim hdr
+ * remains. So need not be filled again for next packet onwards.
+ */
+ firstgrp_ptr = pkt;
+
+ /* we received mtu excluding IP hdr len as param
+ * now this_pkt_rem is mtu excluding
+ * PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN
+ */
+ this_pkt_rem = pim_mtu - (PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN);
+
+ /* For each group till the packet length parsed */
+ while (parsed_len < len) {
+ /* pkt ---> fragment's current pointer
+ * buf ---> input buffer's current pointer
+ * mtu ---> size of the pim packet - PIM header
+ * curgrp ---> current group on the fragment
+ * grpinfo ---> current group on the input buffer
+ * this_pkt_rem ---> bytes remaing on the current fragment
+ * rp_fit_cnt ---> num of rp for current grp that
+ * fits this frag
+ * total_rp_cnt ---> total rp present for the group in the buf
+ * frag_rp_cnt ---> no of rp for the group to be fit in
+ * the frag
+ * this_rp_cnt ---> how many rp have we parsed
+ */
+ grpinfo = (struct bsmmsg_grpinfo *)buf;
+ memcpy(pkt, buf, PIM_BSM_GRP_LEN);
+ curgrp = (struct bsmmsg_grpinfo *)pkt;
+ parsed_len += PIM_BSM_GRP_LEN;
+ pkt += PIM_BSM_GRP_LEN;
+ buf += PIM_BSM_GRP_LEN;
+ this_pkt_rem -= PIM_BSM_GRP_LEN;
+
+ /* initialize rp count and total_rp_cnt before the rp loop */
+ this_rp_cnt = 0;
+ total_rp_cnt = grpinfo->frag_rp_count;
+
+ /* Loop till all RPs for the group parsed */
+ while (this_rp_cnt < total_rp_cnt) {
+ /* All RP from a group processed here.
+ * group is pointed by grpinfo.
+ * At this point make sure buf pointing to a RP
+ * within a group
+ */
+ rp_fit_cnt = this_pkt_rem / PIM_BSM_RP_LEN;
+
+ /* calculate how many rp am i going to copy in
+ * this frag
+ */
+ if (rp_fit_cnt > (total_rp_cnt - this_rp_cnt))
+ frag_rp_cnt = total_rp_cnt - this_rp_cnt;
+ else
+ frag_rp_cnt = rp_fit_cnt;
+
+ /* populate the frag rp count for the current grp */
+ curgrp->frag_rp_count = frag_rp_cnt;
+ copy_byte_count = frag_rp_cnt * PIM_BSM_RP_LEN;
+
+ /* copy all the rp that we are fitting in this
+ * frag for the grp
+ */
+ memcpy(pkt, buf, copy_byte_count);
+ this_rp_cnt += frag_rp_cnt;
+ buf += copy_byte_count;
+ pkt += copy_byte_count;
+ parsed_len += copy_byte_count;
+ this_pkt_rem -= copy_byte_count;
+
+ /* Either we couldn't fit all rp for the group or the
+ * mtu reached
+ */
+ if ((this_rp_cnt < total_rp_cnt)
+ || (this_pkt_rem
+ < (PIM_BSM_GRP_LEN + PIM_BSM_RP_LEN))) {
+ /* No space to fit in more rp, send this pkt */
+ this_pkt_len = pim_mtu - this_pkt_rem;
+ pim_msg_build_header(pak_start, this_pkt_len,
+ PIM_MSG_TYPE_BOOTSTRAP,
+ no_fwd);
+ pim_bsm_send_intf(pak_start, this_pkt_len, ifp,
+ dst_addr);
+
+ /* Construct next fragment. Reuse old packet */
+ pkt = firstgrp_ptr;
+ this_pkt_rem = pim_mtu - (PIM_BSM_HDR_LEN
+ + PIM_MSG_HEADER_LEN);
+
+ /* If pkt can't accomodate next group + atleast
+ * one rp, we must break out of this inner loop
+ * and process next RP
+ */
+ if (total_rp_cnt == this_rp_cnt)
+ break;
+
+ /* If some more RPs for the same group pending,
+ * fill grp hdr
+ */
+ memcpy(pkt, (uint8_t *)grpinfo,
+ PIM_BSM_GRP_LEN);
+ curgrp = (struct bsmmsg_grpinfo *)pkt;
+ pkt += PIM_BSM_GRP_LEN;
+ this_pkt_rem -= PIM_BSM_GRP_LEN;
+ pak_pending = false;
+ } else {
+ /* We filled something but not yet sent out */
+ pak_pending = true;
+ }
+ } /* while RP count */
+ } /*while parsed len */
+
+ /* Send if we have any unsent packet */
+ if (pak_pending) {
+ this_pkt_len = pim_mtu - this_pkt_rem;
+ pim_msg_build_header(pak_start, this_pkt_len,
+ PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
+ pim_bsm_send_intf(pak_start, (pim_mtu - this_pkt_rem), ifp,
+ dst_addr);
+ }
+ XFREE(MTYPE_PIM_BSM_PKT_VAR_MEM, pak_start);
+ return true;
+}
+
+static void pim_bsm_fwd_whole_sz(struct pim_instance *pim, uint8_t *buf,
+ uint32_t len, int sz)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ struct in_addr dst_addr;
+ uint32_t pim_mtu;
+ bool no_fwd = FALSE;
+ bool ret = FALSE;
+
+ /* For now only global scope zone is supported, so send on all
+ * pim interfaces in the vrf
+ */
+ dst_addr = qpim_all_pim_routers_addr;
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+ if ((!pim_ifp) || (!pim_ifp->bsm_enable))
+ continue;
+ pim_hello_require(ifp);
+ pim_mtu = ifp->mtu - MAX_IP_HDR_LEN;
+ if (pim_mtu < len) {
+ ret = pim_bsm_frag_send(buf, len, ifp, pim_mtu,
+ dst_addr, no_fwd);
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: pim_bsm_frag_send returned %s",
+ __PRETTY_FUNCTION__,
+ ret ? "TRUE" : "FALSE");
+ } else {
+ pim_msg_build_header(buf, len, PIM_MSG_TYPE_BOOTSTRAP,
+ no_fwd);
+ if (!pim_bsm_send_intf(buf, len, ifp, dst_addr)) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: pim_bsm_send_intf returned FALSE",
+ __PRETTY_FUNCTION__);
+ }
+ }
+ }
+}
+
+bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)
+{
+ struct in_addr dst_addr;
+ struct pim_interface *pim_ifp;
+ struct bsm_scope *scope;
+ struct listnode *bsm_ln;
+ struct bsm_info *bsminfo;
+ char neigh_src_str[INET_ADDRSTRLEN];
+ uint32_t pim_mtu;
+ bool no_fwd = true;
+ bool ret = false;
+
+ if (PIM_DEBUG_BSM) {
+ pim_inet4_dump("<src?>", neigh->source_addr, neigh_src_str,
+ sizeof(neigh_src_str));
+ zlog_debug("%s: New neighbor %s seen on %s",
+ __PRETTY_FUNCTION__, neigh_src_str, ifp->name);
+ }
+
+ pim_ifp = ifp->info;
+
+ /* DR only forwards BSM packet */
+ if (pim_ifp->pim_dr_addr.s_addr == pim_ifp->primary_address.s_addr) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: It is not DR, so don't forward BSM packet",
+ __PRETTY_FUNCTION__);
+ }
+
+ if (!pim_ifp->bsm_enable) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: BSM proc not enabled on %s",
+ __PRETTY_FUNCTION__, ifp->name);
+ return ret;
+ }
+
+ scope = &pim_ifp->pim->global_scope;
+
+ if (!scope->bsm_list->count) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: BSM list for the scope is empty",
+ __PRETTY_FUNCTION__);
+ return ret;
+ }
+
+ if (!pim_ifp->ucast_bsm_accept) {
+ dst_addr = qpim_all_pim_routers_addr;
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Sending BSM mcast to %s",
+ __PRETTY_FUNCTION__, neigh_src_str);
+ } else {
+ dst_addr = neigh->source_addr;
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Sending BSM ucast to %s",
+ __PRETTY_FUNCTION__, neigh_src_str);
+ }
+ pim_mtu = ifp->mtu - MAX_IP_HDR_LEN;
+ pim_hello_require(ifp);
+
+ for (ALL_LIST_ELEMENTS_RO(scope->bsm_list, bsm_ln, bsminfo)) {
+ if (pim_mtu < bsminfo->size) {
+ ret = pim_bsm_frag_send(bsminfo->bsm, bsminfo->size,
+ ifp, pim_mtu, dst_addr, no_fwd);
+ if (!ret) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: pim_bsm_frag_send failed",
+ __PRETTY_FUNCTION__);
+ }
+ } else {
+ /* Pim header needs to be constructed */
+ pim_msg_build_header(bsminfo->bsm, bsminfo->size,
+ PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
+ ret = pim_bsm_send_intf(bsminfo->bsm, bsminfo->size,
+ ifp, dst_addr);
+ if (!ret) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: pim_bsm_frag_send failed",
+ __PRETTY_FUNCTION__);
+ }
+ }
+ }
+ return ret;
+}
+
+struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
+ struct prefix *grp)
+{
+ struct route_node *rn;
+ struct bsgrp_node *bsgrp;
+
+ rn = route_node_lookup(scope->bsrp_table, grp);
+ if (!rn) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Route node doesn't exist for the group",
+ __PRETTY_FUNCTION__);
+ return NULL;
+ }
+ bsgrp = rn->info;
+ route_unlock_node(rn);
+
+ return bsgrp;
+}
+
+static uint32_t hash_calc_on_grp_rp(struct prefix group, struct in_addr rp,
+ uint8_t hashmasklen)
+{
+ uint64_t temp;
+ uint32_t hash;
+ uint32_t grpaddr;
+ uint32_t rp_add;
+ uint32_t mask = 0xffffffff;
+
+ /* mask to be made zero if hashmasklen is 0 because mask << 32
+ * may not give 0. hashmasklen can be 0 to 32.
+ */
+ if (hashmasklen == 0)
+ mask = 0;
+
+ /* in_addr stores ip in big endian, hence network byte order
+ * convert to uint32 before processing hash
+ */
+ grpaddr = ntohl(group.u.prefix4.s_addr);
+ /* Avoid shifting by 32 bit on a 32 bit register */
+ if (hashmasklen)
+ grpaddr = grpaddr & ((mask << (32 - hashmasklen)));
+ else
+ grpaddr = grpaddr & mask;
+ rp_add = ntohl(rp.s_addr);
+ temp = 1103515245 * ((1103515245 * grpaddr + 12345) ^ rp_add) + 12345;
+ hash = temp & (0x7fffffff);
+ return hash;
+}
+
+static bool pim_install_bsm_grp_rp(struct pim_instance *pim,
+ struct bsgrp_node *grpnode,
+ struct bsmmsg_rpinfo *rp)
+{
+ struct bsm_rpinfo *bsm_rpinfo;
+ uint8_t hashMask_len = pim->global_scope.hashMasklen;
+
+ /*memory allocation for bsm_rpinfo */
+ bsm_rpinfo = XCALLOC(MTYPE_PIM_BSRP_NODE, sizeof(*bsm_rpinfo));
+
+ if (!bsm_rpinfo) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s, Memory allocation failed.\r\n",
+ __PRETTY_FUNCTION__);
+ return false;
+ }
+
+ bsm_rpinfo->rp_prio = rp->rp_pri;
+ bsm_rpinfo->rp_holdtime = rp->rp_holdtime;
+ memcpy(&bsm_rpinfo->rp_address, &rp->rpaddr.addr,
+ sizeof(struct in_addr));
+ bsm_rpinfo->elapse_time = 0;
+
+ /* Back pointer to the group node. */
+ bsm_rpinfo->bsgrp_node = grpnode;
+
+ /* update hash for this rp node */
+ bsm_rpinfo->hash = hash_calc_on_grp_rp(grpnode->group, rp->rpaddr.addr,
+ hashMask_len);
+ if (listnode_add_sort_nodup(grpnode->partial_bsrp_list, bsm_rpinfo)) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s, bs_rpinfo node added to the partial bs_rplist.\r\n",
+ __PRETTY_FUNCTION__);
+ return true;
+ }
+
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: list node not added\n", __PRETTY_FUNCTION__);
+
+ XFREE(MTYPE_PIM_BSRP_NODE, bsm_rpinfo);
+ return false;
+}
+
+static void pim_update_pending_rp_cnt(struct bsm_scope *sz,
+ struct bsgrp_node *bsgrp,
+ uint16_t bsm_frag_tag,
+ uint32_t total_rp_count)
+{
+ if (bsgrp->pend_rp_cnt) {
+ /* received bsm is different packet ,
+ * it is not same fragment.
+ */
+ if (bsm_frag_tag != bsgrp->frag_tag) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s,Received a new BSM ,so clear the pending bs_rpinfo list.\r\n",
+ __PRETTY_FUNCTION__);
+ list_delete_all_node(bsgrp->partial_bsrp_list);
+ bsgrp->pend_rp_cnt = total_rp_count;
+ }
+ } else
+ bsgrp->pend_rp_cnt = total_rp_count;
+
+ bsgrp->frag_tag = bsm_frag_tag;
+}
+
+/* Parsing BSR packet and adding to partial list of corresponding bsgrp node */
+static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,
+ int buflen, uint16_t bsm_frag_tag)
+{
+ struct bsmmsg_grpinfo grpinfo;
+ struct bsmmsg_rpinfo rpinfo;
+ struct prefix group;
+ struct bsgrp_node *bsgrp = NULL;
+ int frag_rp_cnt = 0;
+ int offset = 0;
+ int ins_count = 0;
+
+ while (buflen > offset) {
+ /* Extract Group tlv from BSM */
+ memcpy(&grpinfo, buf, sizeof(struct bsmmsg_grpinfo));
+
+ if (PIM_DEBUG_BSM) {
+ char grp_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<Group?>", grpinfo.group.addr, grp_str,
+ sizeof(grp_str));
+ zlog_debug(
+ "%s, Group %s Rpcount:%d Fragment-Rp-count:%d\r\n",
+ __PRETTY_FUNCTION__, grp_str, grpinfo.rp_count,
+ grpinfo.frag_rp_count);
+ }
+
+ buf += sizeof(struct bsmmsg_grpinfo);
+ offset += sizeof(struct bsmmsg_grpinfo);
+
+ if (grpinfo.rp_count == 0) {
+ if (PIM_DEBUG_BSM) {
+ char grp_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<Group?>", grpinfo.group.addr,
+ grp_str, sizeof(grp_str));
+ zlog_debug(
+ "%s, Rp count is zero for group: %s\r\n",
+ __PRETTY_FUNCTION__, grp_str);
+ }
+ return false;
+ }
+
+ group.family = AF_INET;
+ group.prefixlen = grpinfo.group.mask;
+ group.u.prefix4.s_addr = grpinfo.group.addr.s_addr;
+
+ /* Get the Group node for the BSM rp table */
+ bsgrp = pim_bsm_get_bsgrp_node(scope, &group);
+
+ if (!bsgrp) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s, Create new BSM Group node.\r\n",
+ __PRETTY_FUNCTION__);
+
+ /* create a new node to be added to the tree. */
+ bsgrp = pim_bsm_new_bsgrp_node(scope->bsrp_table,
+ &group);
+
+ if (!bsgrp) {
+ zlog_debug(
+ "%s, Failed to get the BSM group node.\r\n",
+ __PRETTY_FUNCTION__);
+ continue;
+ }
+
+ bsgrp->scope = scope;
+ }
+
+ pim_update_pending_rp_cnt(scope, bsgrp, bsm_frag_tag,
+ grpinfo.rp_count);
+ frag_rp_cnt = grpinfo.frag_rp_count;
+ ins_count = 0;
+
+ while (frag_rp_cnt--) {
+ /* Extract RP address tlv from BSM */
+ memcpy(&rpinfo, buf, sizeof(struct bsmmsg_rpinfo));
+ rpinfo.rp_holdtime = ntohs(rpinfo.rp_holdtime);
+ buf += sizeof(struct bsmmsg_rpinfo);
+ offset += sizeof(struct bsmmsg_rpinfo);
+
+ if (PIM_DEBUG_BSM) {
+ char rp_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<Rpaddr?>", rpinfo.rpaddr.addr,
+ rp_str, sizeof(rp_str));
+ zlog_debug(
+ "%s, Rp address - %s; pri:%d hold:%d\r\n",
+ __PRETTY_FUNCTION__, rp_str,
+ rpinfo.rp_pri, rpinfo.rp_holdtime);
+ }
+
+ /* Call Install api to update grp-rp mappings */
+ if (pim_install_bsm_grp_rp(scope->pim, bsgrp, &rpinfo))
+ ins_count++;
+ }
+
+ bsgrp->pend_rp_cnt -= ins_count;
+
+ if (!bsgrp->pend_rp_cnt) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s, Recvd all the rps for this group, so bsrp list with penidng rp list.",
+ __PRETTY_FUNCTION__);
+ /* replace the bsrp_list with pending list */
+ pim_instate_pend_list(bsgrp);
+ }
+ }
+ return true;
+}
+
+int pim_bsm_process(struct interface *ifp, struct ip *ip_hdr, uint8_t *buf,
+ uint32_t buf_size, bool no_fwd)
+{
+ struct bsm_hdr *bshdr;
+ int sz = PIM_GBL_SZ_ID;
+ struct bsmmsg_grpinfo *msg_grp;
+ struct pim_interface *pim_ifp = NULL;
+ struct bsm_info *bsminfo;
+ struct pim_instance *pim;
+ char bsr_str[INET_ADDRSTRLEN];
+ uint16_t frag_tag;
+ bool empty_bsm = FALSE;
+
+ /* BSM Packet acceptance validation */
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: multicast not enabled on interface %s",
+ __PRETTY_FUNCTION__, ifp->name);
+ return -1;
+ }
+
+ pim_ifp->pim_ifstat_bsm_rx++;
+ pim = pim_ifp->pim;
+ pim->bsm_rcvd++;
+
+ /* Drop if bsm processing is disabled on interface */
+ if (!pim_ifp->bsm_enable) {
+ zlog_warn("%s: BSM not enabled on interface %s",
+ __PRETTY_FUNCTION__, ifp->name);
+ pim_ifp->pim_ifstat_bsm_cfg_miss++;
+ pim->bsm_dropped++;
+ return -1;
+ }
+
+ bshdr = (struct bsm_hdr *)(buf + PIM_MSG_HEADER_LEN);
+ pim_inet4_dump("<bsr?>", bshdr->bsr_addr.addr, bsr_str,
+ sizeof(bsr_str));
+ pim->global_scope.hashMasklen = bshdr->hm_len;
+ frag_tag = ntohs(bshdr->frag_tag);
+
+ /* Identify empty BSM */
+ if ((buf_size - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN) < PIM_BSM_GRP_LEN)
+ empty_bsm = true;
+
+ if (!empty_bsm) {
+ msg_grp = (struct bsmmsg_grpinfo *)(buf + PIM_MSG_HEADER_LEN
+ + PIM_BSM_HDR_LEN);
+ /* Currently we don't support scope zoned BSM */
+ if (msg_grp->group.sz) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s : Administratively scoped range BSM received",
+ __PRETTY_FUNCTION__);
+ pim_ifp->pim_ifstat_bsm_invalid_sz++;
+ pim->bsm_dropped++;
+ return -1;
+ }
+ }
+
+ /* Drop if bsr is not preferred bsr */
+ if (!is_preferred_bsr(pim, bshdr->bsr_addr.addr, bshdr->bsr_prio)) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : Received a non-preferred BSM",
+ __PRETTY_FUNCTION__);
+ pim->bsm_dropped++;
+ return -1;
+ }
+
+ if (no_fwd) {
+ /* only accept no-forward BSM if quick refresh on startup */
+ if ((pim->global_scope.accept_nofwd_bsm)
+ || (frag_tag == pim->global_scope.bsm_frag_tag)) {
+ pim->global_scope.accept_nofwd_bsm = false;
+ } else {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s : nofwd_bsm received on %s when accpt_nofwd_bsm false",
+ __PRETTY_FUNCTION__, bsr_str);
+ pim->bsm_dropped++;
+ pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++;
+ return -1;
+ }
+ }
+
+ /* Mulicast BSM received */
+ if (ip_hdr->ip_dst.s_addr == qpim_all_pim_routers_addr.s_addr) {
+ if (!no_fwd) {
+ if (!pim_bsr_rpf_check(pim, bshdr->bsr_addr.addr,
+ ip_hdr->ip_src)) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s : RPF check fail for BSR address %s",
+ __PRETTY_FUNCTION__, bsr_str);
+ pim->bsm_dropped++;
+ return -1;
+ }
+ }
+ } else if (if_lookup_exact_address(&ip_hdr->ip_dst, AF_INET,
+ pim->vrf_id)) {
+ /* Unicast BSM received - if ucast bsm not enabled on
+ * the interface, drop it
+ */
+ if (!pim_ifp->ucast_bsm_accept) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s : Unicast BSM not enabled on interface %s",
+ __PRETTY_FUNCTION__, ifp->name);
+ pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++;
+ pim->bsm_dropped++;
+ return -1;
+ }
+
+ } else {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : Invalid destination address",
+ __PRETTY_FUNCTION__);
+ pim->bsm_dropped++;
+ return -1;
+ }
+
+ if (empty_bsm) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : Empty Pref BSM received",
+ __PRETTY_FUNCTION__);
+ }
+ /* Parse Update bsm rp table and install/uninstall rp if required */
+ if (!pim_bsm_parse_install_g2rp(
+ &pim_ifp->pim->global_scope,
+ (buf + PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN),
+ (buf_size - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN),
+ frag_tag)) {
+ if (PIM_DEBUG_BSM) {
+ zlog_debug("%s, Parsing BSM failed.\r\n",
+ __PRETTY_FUNCTION__);
+ }
+ pim->bsm_dropped++;
+ return -1;
+ }
+ /* Restart the bootstrap timer */
+ pim_bs_timer_restart(&pim_ifp->pim->global_scope,
+ PIM_BSR_DEFAULT_TIMEOUT);
+
+ /* If new BSM received, clear the old bsm database */
+ if (pim_ifp->pim->global_scope.bsm_frag_tag != frag_tag) {
+ if (PIM_DEBUG_BSM) {
+ zlog_debug("%s: Current frag tag: %d Frag teg rcvd: %d",
+ __PRETTY_FUNCTION__,
+ pim_ifp->pim->global_scope.bsm_frag_tag,
+ frag_tag);
+ }
+ list_delete_all_node(pim_ifp->pim->global_scope.bsm_list);
+ pim_ifp->pim->global_scope.bsm_frag_tag = frag_tag;
+ }
+
+ /* update the scope information from bsm */
+ pim_bsm_update(pim, bshdr->bsr_addr.addr, bshdr->bsr_prio);
+
+ if (!no_fwd) {
+ pim_bsm_fwd_whole_sz(pim_ifp->pim, buf, buf_size, sz);
+ bsminfo = XCALLOC(MTYPE_PIM_BSM_INFO, sizeof(struct bsm_info));
+ if (!bsminfo) {
+ zlog_warn("%s: bsminfo alloc failed",
+ __PRETTY_FUNCTION__);
+ return 0;
+ }
+
+ bsminfo->bsm = XCALLOC(MTYPE_PIM_BSM_PKT_VAR_MEM, buf_size);
+ if (!bsminfo->bsm) {
+ zlog_warn("%s: bsm alloc failed", __PRETTY_FUNCTION__);
+ XFREE(MTYPE_PIM_BSM_INFO, bsminfo);
+ return 0;
+ }
+
+ bsminfo->size = buf_size;
+ memcpy(bsminfo->bsm, buf, buf_size);
+ listnode_add(pim_ifp->pim->global_scope.bsm_list, bsminfo);
+ }
+
+ return 0;
+}
--- /dev/null
+/*
+ * pim_bsm.h: PIM BSM handling related
+ *
+ * Copyright (C) 2018-19 Vmware, Inc.
+ * Saravanan K
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#ifndef __PIM_BSM_H__
+#define __PIM_BSM_H__
+
+#include "if.h"
+#include "vty.h"
+#include "linklist.h"
+#include "table.h"
+#include "pim_rp.h"
+#include "pim_msg.h"
+
+/* Defines */
+#define PIM_GBL_SZ_ID 0 /* global scope zone id set to 0 */
+#define PIM_BS_TIME 60 /* RFC 5059 - Sec 5 */
+#define PIM_BSR_DEFAULT_TIMEOUT 130 /* RFC 5059 - Sec 5 */
+
+/* These structures are only encoded IPv4 specific */
+#define PIM_BSM_HDR_LEN sizeof(struct bsm_hdr)
+#define PIM_BSM_GRP_LEN sizeof(struct bsmmsg_grpinfo)
+#define PIM_BSM_RP_LEN sizeof(struct bsmmsg_rpinfo)
+
+#define PIM_MIN_BSM_LEN \
+ (PIM_HDR_LEN + PIM_BSM_HDR_LEN + PIM_BSM_GRP_LEN + PIM_BSM_RP_LEN)
+
+/* Datastructures
+ * ==============
+ */
+
+/* Non candidate BSR states */
+enum ncbsr_state {
+ NO_INFO = 0,
+ ACCEPT_ANY,
+ ACCEPT_PREFERRED
+};
+
+/* BSM scope - bsm processing is per scope */
+struct bsm_scope {
+ int sz_id; /* scope zone id */
+ enum ncbsr_state state; /* non candidate BSR state */
+ bool accept_nofwd_bsm; /* no fwd bsm accepted for scope */
+ struct in_addr current_bsr; /* current elected BSR for the sz */
+ uint32_t current_bsr_prio; /* current BSR priority */
+ int64_t current_bsr_first_ts; /* current BSR elected time */
+ int64_t current_bsr_last_ts; /* Last BSM received from E-BSR */
+ uint16_t bsm_frag_tag; /* Last received frag tag from E-BSR */
+ uint8_t hashMasklen; /* Mask in hash calc RFC 7761 4.7.2 */
+ struct pim_instance *pim; /* Back pointer to pim instance */
+ struct list *bsm_list; /* list of bsm frag for frowarding */
+ struct route_table *bsrp_table; /* group2rp mapping rcvd from BSR */
+ struct thread *bs_timer; /* Boot strap timer */
+ struct thread *sz_timer;
+};
+
+/* BSM packet - this is stored as list in bsm_list inside scope
+ * This is used for forwarding to new neighbors or restarting mcast routers
+ */
+struct bsm_info {
+ uint32_t size; /* size of the packet */
+ unsigned char *bsm; /* Actual packet */
+};
+
+/* This is the group node of the bsrp table in scope.
+ * this node maintains the list of rp for the group.
+ */
+struct bsgrp_node {
+ struct prefix group; /* Group range */
+ struct bsm_scope *scope; /* Back ptr to scope */
+ struct list *bsrp_list; /* list of RPs adv by BSR */
+ struct list *partial_bsrp_list; /* maintained until all RPs received */
+ int pend_rp_cnt; /* Total RP - Received RP */
+ uint16_t frag_tag; /* frag tag to identify the fragment */
+};
+
+/* This is the list node of bsrp_list and partial bsrp list in
+ * bsgrp_node. Hold info of each RP received for the group
+ */
+struct bsm_rpinfo {
+ uint32_t hash; /* Hash Value as per RFC 7761 4.7.2 */
+ uint32_t elapse_time; /* upd at expiry of elected RP node */
+ uint16_t rp_prio; /* RP priority */
+ uint16_t rp_holdtime; /* RP holdtime - g2rp timer value */
+ struct in_addr rp_address; /* RP Address */
+ struct bsgrp_node *bsgrp_node; /* Back ptr to bsgrp_node */
+ struct thread *g2rp_timer; /* Run only for elected RP node */
+};
+
+/* Structures to extract Bootstrap Message header and Grp to RP Mappings
+ * =====================================================================
+ * BSM Format:
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |PIM Ver| Type |N| Reserved | Checksum | PIM HDR
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Fragment Tag | Hash Mask Len | BSR Priority | BS HDR(1)
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | BSR Address (Encoded-Unicast format) | BS HDR(2)
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Group Address 1 (Encoded-Group format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Count 1 | Frag RP Cnt 1 | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Address 1 (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP1 Holdtime | RP1 Priority | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Address 2 (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP2 Holdtime | RP2 Priority | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | . |
+ * | . |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Address m (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RPm Holdtime | RPm Priority | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Group Address 2 (Encoded-Group format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | . |
+ * | . |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Group Address n (Encoded-Group format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Count n | Frag RP Cnt n | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Address 1 (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP1 Holdtime | RP1 Priority | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Address 2 (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP2 Holdtime | RP2 Priority | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | . |
+ * | . |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Address m (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RPm Holdtime | RPm Priority | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct bsm_hdr {
+ uint16_t frag_tag;
+ uint8_t hm_len;
+ uint8_t bsr_prio;
+ struct pim_encoded_ipv4_unicast bsr_addr;
+} __attribute__((packed));
+
+struct bsmmsg_grpinfo {
+ struct pim_encoded_group_ipv4 group;
+ uint8_t rp_count;
+ uint8_t frag_rp_count;
+ uint16_t reserved;
+} __attribute__((packed));
+
+struct bsmmsg_rpinfo {
+ struct pim_encoded_ipv4_unicast rpaddr;
+ uint16_t rp_holdtime;
+ uint8_t rp_pri;
+ uint8_t reserved;
+} __attribute__((packed));
+
+/* API */
+void pim_bsm_proc_init(struct pim_instance *pim);
+void pim_bsm_proc_free(struct pim_instance *pim);
+void pim_bsm_write_config(struct vty *vty, struct interface *ifp);
+int pim_bsm_process(struct interface *ifp,
+ struct ip *ip_hdr,
+ uint8_t *buf,
+ uint32_t buf_size,
+ bool no_fwd);
+bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp);
+struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
+ struct prefix *grp);
+#endif
#include "pim_bfd.h"
#include "pim_vxlan.h"
#include "bfd.h"
+#include "pim_bsm.h"
#ifndef VTYSH_EXTRACT_PL
#include "pimd/pim_cmd_clippy.c"
json = json_object_new_object();
else {
vty_out(vty, "\n");
- vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s\n",
+ vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s%-17s\n",
"Interface", " HELLO", " JOIN",
" PRUNE", " REGISTER", "REGISTER-STOP",
- " ASSERT");
- vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s\n", "",
+ " ASSERT", " BSM");
+ vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s%-17s\n", "",
" Rx/Tx", " Rx/Tx", " Rx/Tx",
- " Rx/Tx", " Rx/Tx", " Rx/Tx");
+ " Rx/Tx", " Rx/Tx", " Rx/Tx",
+ " Rx/Tx");
vty_out(vty,
"---------------------------------------------------------------------------------------------------------------\n");
}
json_object_int_add(json_row, "assertRx",
pim_ifp->pim_ifstat_assert_recv);
json_object_int_add(json_row, "assertTx",
- pim_ifp->pim_ifstat_assert_send);
-
+ pim_ifp->pim_ifstat_assert_send);
+ json_object_int_add(json_row, "bsmRx",
+ pim_ifp->pim_ifstat_bsm_rx);
+ json_object_int_add(json_row, "bsmTx",
+ pim_ifp->pim_ifstat_bsm_tx);
json_object_object_add(json, ifp->name, json_row);
} else {
vty_out(vty,
- "%-16s %8u/%-8u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u \n",
+ "%-16s %8u/%-8u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7lu/%-7lu \n",
ifp->name, pim_ifp->pim_ifstat_hello_recv,
pim_ifp->pim_ifstat_hello_sent,
pim_ifp->pim_ifstat_join_recv,
pim_ifp->pim_ifstat_reg_stop_recv,
pim_ifp->pim_ifstat_reg_stop_send,
pim_ifp->pim_ifstat_assert_recv,
- pim_ifp->pim_ifstat_assert_send);
+ pim_ifp->pim_ifstat_assert_send,
+ pim_ifp->pim_ifstat_bsm_rx,
+ pim_ifp->pim_ifstat_bsm_tx);
}
}
if (uj) {
json = json_object_new_object();
else {
vty_out(vty, "\n");
- vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s\n",
+ vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s%-17s\n",
"Interface", " HELLO", " JOIN", " PRUNE",
- " REGISTER", " REGISTER-STOP", " ASSERT");
- vty_out(vty, "%-14s%-18s%-17s%-17s%-17s%-17s%-17s\n", "",
+ " REGISTER", " REGISTER-STOP", " ASSERT",
+ " BSM");
+ vty_out(vty, "%-14s%-18s%-17s%-17s%-17s%-17s%-17s%-17s\n", "",
" Rx/Tx", " Rx/Tx", " Rx/Tx", " Rx/Tx",
- " Rx/Tx", " Rx/Tx");
+ " Rx/Tx", " Rx/Tx", " Rx/Tx");
vty_out(vty,
- "---------------------------------------------------------------------------------------------------------------------\n");
+ "-------------------------------------------------------------------------------------------------------------------------------\n");
}
FOR_ALL_INTERFACES (pim->vrf, ifp) {
pim_ifp->pim_ifstat_assert_recv);
json_object_int_add(json_row, "assertTx",
pim_ifp->pim_ifstat_assert_send);
+ json_object_int_add(json_row, "bsmRx",
+ pim_ifp->pim_ifstat_bsm_rx);
+ json_object_int_add(json_row, "bsmTx",
+ pim_ifp->pim_ifstat_bsm_tx);
json_object_object_add(json, ifp->name, json_row);
} else {
vty_out(vty,
- "%-16s %8u/%-8u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u \n",
+ "%-16s %8u/%-8u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7lu/%-7lu \n",
ifp->name, pim_ifp->pim_ifstat_hello_recv,
pim_ifp->pim_ifstat_hello_sent,
pim_ifp->pim_ifstat_join_recv,
pim_ifp->pim_ifstat_reg_stop_recv,
pim_ifp->pim_ifstat_reg_stop_send,
pim_ifp->pim_ifstat_assert_recv,
- pim_ifp->pim_ifstat_assert_send);
+ pim_ifp->pim_ifstat_assert_send,
+ pim_ifp->pim_ifstat_bsm_rx,
+ pim_ifp->pim_ifstat_bsm_tx);
}
}
if (uj) {
hash_walk(pim->rpf_hash, pim_print_pnc_cache_walkcb, &cwd);
}
+/* Display the bsm database details */
+static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)
+{
+ struct listnode *bsmnode;
+ int count = 0;
+ int fragment = 1;
+ struct bsm_info *bsm;
+ json_object *json = NULL;
+ json_object *json_group = NULL;
+ json_object *json_row = NULL;
+
+ count = pim->global_scope.bsm_list->count;
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_int_add(json, "Number of the fragments", count);
+ } else {
+ vty_out(vty, "Scope Zone: Global\n");
+ vty_out(vty, "Number of the fragments: %d\n", count);
+ vty_out(vty, "\n");
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim->global_scope.bsm_list, bsmnode, bsm)) {
+ char grp_str[INET_ADDRSTRLEN];
+ char rp_str[INET_ADDRSTRLEN];
+ char bsr_str[INET_ADDRSTRLEN];
+ struct bsmmsg_grpinfo *group;
+ struct bsmmsg_rpinfo *rpaddr;
+ struct prefix grp;
+ struct bsm_hdr *hdr;
+ uint32_t offset = 0;
+ uint8_t *buf;
+ uint32_t len = 0;
+ uint32_t frag_rp_cnt = 0;
+
+ buf = bsm->bsm;
+ len = bsm->size;
+
+ /* skip pim header */
+ buf += PIM_MSG_HEADER_LEN;
+ len -= PIM_MSG_HEADER_LEN;
+
+ hdr = (struct bsm_hdr *)buf;
+
+ /* BSM starts with bsr header */
+ buf += sizeof(struct bsm_hdr);
+ len -= sizeof(struct bsm_hdr);
+
+ pim_inet4_dump("<BSR Address?>", hdr->bsr_addr.addr, bsr_str,
+ sizeof(bsr_str));
+
+
+ if (uj) {
+ json_object_string_add(json, "BSR address", bsr_str);
+ json_object_int_add(json, "BSR priority",
+ hdr->bsr_prio);
+ json_object_int_add(json, "Hashmask Length",
+ hdr->hm_len);
+ json_object_int_add(json, "Fragment Tag",
+ ntohs(hdr->frag_tag));
+ } else {
+ vty_out(vty, "BSM Fragment : %d\n", fragment);
+ vty_out(vty, "------------------\n");
+ vty_out(vty, "%-15s %-15s %-15s %-15s\n", "BSR-Address",
+ "BSR-Priority", "Hashmask-len", "Fragment-Tag");
+ vty_out(vty, "%-15s %-15d %-15d %-15d\n", bsr_str,
+ hdr->bsr_prio, hdr->hm_len,
+ ntohs(hdr->frag_tag));
+ }
+
+ vty_out(vty, "\n");
+
+ while (offset < len) {
+ group = (struct bsmmsg_grpinfo *)buf;
+
+ if (group->group.family == PIM_MSG_ADDRESS_FAMILY_IPV4)
+ grp.family = AF_INET;
+
+ grp.prefixlen = group->group.mask;
+ grp.u.prefix4.s_addr = group->group.addr.s_addr;
+
+ prefix2str(&grp, grp_str, sizeof(grp_str));
+
+ buf += sizeof(struct bsmmsg_grpinfo);
+ offset += sizeof(struct bsmmsg_grpinfo);
+
+ if (uj) {
+ json_object_object_get_ex(json, grp_str,
+ &json_group);
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_int_add(json_group,
+ "Rp Count",
+ group->rp_count);
+ json_object_int_add(
+ json_group, "Fragment Rp count",
+ group->frag_rp_count);
+ json_object_object_add(json, grp_str,
+ json_group);
+ }
+ } else {
+ vty_out(vty, "Group : %s\n", grp_str);
+ vty_out(vty, "-------------------\n");
+ vty_out(vty, "Rp Count:%d\n", group->rp_count);
+ vty_out(vty, "Fragment Rp Count : %d\n",
+ group->frag_rp_count);
+ }
+
+ frag_rp_cnt = group->frag_rp_count;
+
+ if (!frag_rp_cnt)
+ continue;
+
+ if (!uj)
+ vty_out(vty,
+ "RpAddress HoldTime Priority\n");
+
+ while (frag_rp_cnt--) {
+ rpaddr = (struct bsmmsg_rpinfo *)buf;
+
+ buf += sizeof(struct bsmmsg_rpinfo);
+ offset += sizeof(struct bsmmsg_rpinfo);
+
+ pim_inet4_dump("<Rp addr?>",
+ rpaddr->rpaddr.addr, rp_str,
+ sizeof(rp_str));
+
+ if (uj) {
+ json_row = json_object_new_object();
+ json_object_string_add(
+ json_row, "Rp Address", rp_str);
+ json_object_int_add(
+ json_row, "Rp HoldTime",
+ ntohs(rpaddr->rp_holdtime));
+ json_object_int_add(json_row,
+ "Rp Priority",
+ rpaddr->rp_pri);
+ json_object_object_add(
+ json_group, rp_str, json_row);
+ } else {
+ vty_out(vty, "%-15s %-12d %d\n", rp_str,
+ ntohs(rpaddr->rp_holdtime),
+ rpaddr->rp_pri);
+ }
+ }
+ vty_out(vty, "\n");
+ }
+
+ fragment++;
+ }
+
+ if (uj) {
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+}
+
+/*Display the group-rp mappings */
+static void pim_show_group_rp_mappings_info(struct pim_instance *pim,
+ struct vty *vty, bool uj)
+{
+ struct bsgrp_node *bsgrp;
+ struct listnode *rpnode;
+ struct bsm_rpinfo *bsm_rp;
+ struct route_node *rn;
+ char bsr_str[INET_ADDRSTRLEN];
+ json_object *json = NULL;
+ json_object *json_group = NULL;
+ json_object *json_row = NULL;
+
+ if (pim->global_scope.current_bsr.s_addr == INADDR_ANY)
+ strncpy(bsr_str, "0.0.0.0", sizeof(bsr_str));
+
+ else
+ pim_inet4_dump("<bsr?>", pim->global_scope.current_bsr, bsr_str,
+ sizeof(bsr_str));
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_string_add(json, "BSR Address", bsr_str);
+ } else {
+ vty_out(vty, "BSR Address %s\n", bsr_str);
+ }
+
+ for (rn = route_top(pim->global_scope.bsrp_table); rn;
+ rn = route_next(rn)) {
+ bsgrp = (struct bsgrp_node *)rn->info;
+
+ if (!bsgrp)
+ continue;
+
+ char grp_str[INET_ADDRSTRLEN];
+
+ prefix2str(&bsgrp->group, grp_str, sizeof(grp_str));
+
+ if (uj) {
+ json_object_object_get_ex(json, grp_str, &json_group);
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, grp_str,
+ json_group);
+ }
+ } else {
+ vty_out(vty, "Group Address %s\n", grp_str);
+ vty_out(vty, "--------------------------\n");
+ vty_out(vty, "%-15s %-15s %-15s %-15s\n", "Rp Address",
+ "priority", "Holdtime", "Hash");
+
+ vty_out(vty, "(ACTIVE)\n");
+ }
+
+ if (bsgrp->bsrp_list) {
+ for (ALL_LIST_ELEMENTS_RO(bsgrp->bsrp_list, rpnode,
+ bsm_rp)) {
+ char rp_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<Rp Address?>",
+ bsm_rp->rp_address, rp_str,
+ sizeof(rp_str));
+
+ if (uj) {
+ json_row = json_object_new_object();
+ json_object_string_add(
+ json_row, "Rp Address", rp_str);
+ json_object_int_add(
+ json_row, "Rp HoldTime",
+ bsm_rp->rp_holdtime);
+ json_object_int_add(json_row,
+ "Rp Priority",
+ bsm_rp->rp_prio);
+ json_object_int_add(json_row,
+ "Hash Val",
+ bsm_rp->hash);
+ json_object_object_add(
+ json_group, rp_str, json_row);
+
+ } else {
+ vty_out(vty,
+ "%-15s %-15u %-15u %-15u\n",
+ rp_str, bsm_rp->rp_prio,
+ bsm_rp->rp_holdtime,
+ bsm_rp->hash);
+ }
+ }
+ if (!bsgrp->bsrp_list->count && !uj)
+ vty_out(vty, "Active List is empty.\n");
+ }
+
+ if (uj) {
+ json_object_int_add(json_group, "Pending RP count",
+ bsgrp->pend_rp_cnt);
+ } else {
+ vty_out(vty, "(PENDING)\n");
+ vty_out(vty, "Pending RP count :%d\n",
+ bsgrp->pend_rp_cnt);
+ if (bsgrp->pend_rp_cnt)
+ vty_out(vty, "%-15s %-15s %-15s %-15s\n",
+ "Rp Address", "priority", "Holdtime",
+ "Hash");
+ }
+
+ if (bsgrp->partial_bsrp_list) {
+ for (ALL_LIST_ELEMENTS_RO(bsgrp->partial_bsrp_list,
+ rpnode, bsm_rp)) {
+ char rp_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<Rp Addr?>", bsm_rp->rp_address,
+ rp_str, sizeof(rp_str));
+
+ if (uj) {
+ json_row = json_object_new_object();
+ json_object_string_add(
+ json_row, "Rp Address", rp_str);
+ json_object_int_add(
+ json_row, "Rp HoldTime",
+ bsm_rp->rp_holdtime);
+ json_object_int_add(json_row,
+ "Rp Priority",
+ bsm_rp->rp_prio);
+ json_object_int_add(json_row,
+ "Hash Val",
+ bsm_rp->hash);
+ json_object_object_add(
+ json_group, rp_str, json_row);
+ } else {
+ vty_out(vty,
+ "%-15s %-15u %-15u %-15u\n",
+ rp_str, bsm_rp->rp_prio,
+ bsm_rp->rp_holdtime,
+ bsm_rp->hash);
+ }
+ }
+ if (!bsgrp->partial_bsrp_list->count && !uj)
+ vty_out(vty, "Partial List is empty\n");
+ }
+
+ if (!uj)
+ vty_out(vty, "\n");
+ }
+
+ if (uj) {
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+}
+
+/* pim statistics - just adding only bsm related now.
+ * We can continue to add all pim related stats here.
+ */
+static void pim_show_statistics(struct pim_instance *pim, struct vty *vty,
+ const char *ifname, bool uj)
+{
+ json_object *json = NULL;
+ struct interface *ifp;
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_int_add(json, "Number of Received BSMs",
+ pim->bsm_rcvd);
+ json_object_int_add(json, "Number of Forwared BSMs",
+ pim->bsm_sent);
+ json_object_int_add(json, "Number of Dropped BSMs",
+ pim->bsm_dropped);
+ } else {
+ vty_out(vty, "BSM Statistics :\n");
+ vty_out(vty, "----------------\n");
+ vty_out(vty, "Number of Received BSMs : %ld\n", pim->bsm_rcvd);
+ vty_out(vty, "Number of Forwared BSMs : %ld\n", pim->bsm_sent);
+ vty_out(vty, "Number of Dropped BSMs : %ld\n",
+ pim->bsm_dropped);
+ }
+
+ vty_out(vty, "\n");
+
+ /* scan interfaces */
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (ifname && strcmp(ifname, ifp->name))
+ continue;
+
+ if (!pim_ifp)
+ continue;
+
+ if (!uj) {
+ vty_out(vty, "Interface : %s\n", ifp->name);
+ vty_out(vty, "-------------------\n");
+ vty_out(vty,
+ "Number of BSMs dropped due to config miss : %u\n",
+ pim_ifp->pim_ifstat_bsm_cfg_miss);
+ vty_out(vty, "Number of unicast BSMs dropped : %u\n",
+ pim_ifp->pim_ifstat_ucast_bsm_cfg_miss);
+ vty_out(vty,
+ "Number of BSMs dropped due to invalid scope zone : %u\n",
+ pim_ifp->pim_ifstat_bsm_invalid_sz);
+ } else {
+
+ json_object *json_row = NULL;
+
+ json_row = json_object_new_object();
+
+ json_object_string_add(json_row, "If Name", ifp->name);
+ json_object_int_add(
+ json_row,
+ "Number of BSMs dropped due to config miss",
+ pim_ifp->pim_ifstat_bsm_cfg_miss);
+ json_object_int_add(
+ json_row, "Number of unicast BSMs dropped",
+ pim_ifp->pim_ifstat_ucast_bsm_cfg_miss);
+ json_object_int_add(json_row,
+ "Number of BSMs dropped due to invalid scope zone",
+ pim_ifp->pim_ifstat_bsm_invalid_sz);
+ json_object_object_add(json, ifp->name, json_row);
+ }
+ vty_out(vty, "\n");
+ }
+
+ if (uj) {
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+}
+
+static void clear_pim_statistics(struct pim_instance *pim)
+{
+ struct interface *ifp;
+
+ pim->bsm_rcvd = 0;
+ pim->bsm_sent = 0;
+ pim->bsm_dropped = 0;
+
+ /* scan interfaces */
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ pim_ifp->pim_ifstat_bsm_cfg_miss = 0;
+ pim_ifp->pim_ifstat_ucast_bsm_cfg_miss = 0;
+ pim_ifp->pim_ifstat_bsm_invalid_sz = 0;
+ }
+}
+
static void igmp_show_groups(struct pim_instance *pim, struct vty *vty, bool uj)
{
struct interface *ifp;
} /* scan interfaces */
}
+static void pim_show_bsr(struct pim_instance *pim,
+ struct vty *vty,
+ bool uj)
+{
+ char uptime[10];
+ char last_bsm_seen[10];
+ time_t now;
+ char bsr_state[20];
+ char bsr_str[PREFIX_STRLEN];
+ json_object *json = NULL;
+
+ vty_out(vty, "PIMv2 Bootstrap information\n");
+
+ if (pim->global_scope.current_bsr.s_addr == INADDR_ANY) {
+ strncpy(bsr_str, "0.0.0.0", sizeof(bsr_str));
+ pim_time_uptime(uptime, sizeof(uptime),
+ pim->global_scope.current_bsr_first_ts);
+ pim_time_uptime(last_bsm_seen, sizeof(last_bsm_seen),
+ pim->global_scope.current_bsr_last_ts);
+ }
+
+ else {
+ pim_inet4_dump("<bsr?>", pim->global_scope.current_bsr,
+ bsr_str, sizeof(bsr_str));
+ now = pim_time_monotonic_sec();
+ pim_time_uptime(uptime, sizeof(uptime),
+ (now - pim->global_scope.current_bsr_first_ts));
+ pim_time_uptime(last_bsm_seen, sizeof(last_bsm_seen),
+ now - pim->global_scope.current_bsr_last_ts);
+ }
+
+ switch (pim->global_scope.state) {
+ case NO_INFO:
+ strncpy(bsr_state, "NO_INFO", sizeof(bsr_state));
+ break;
+ case ACCEPT_ANY:
+ strncpy(bsr_state, "ACCEPT_ANY", sizeof(bsr_state));
+ break;
+ case ACCEPT_PREFERRED:
+ strncpy(bsr_state, "ACCEPT_PREFERRED", sizeof(bsr_state));
+ break;
+ default:
+ strncpy(bsr_state, "", sizeof(bsr_state));
+ }
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_string_add(json, "bsr", bsr_str);
+ json_object_int_add(json, "priority",
+ pim->global_scope.current_bsr_prio);
+ json_object_int_add(json, "fragment_tag",
+ pim->global_scope.bsm_frag_tag);
+ json_object_string_add(json, "state", bsr_state);
+ json_object_string_add(json, "upTime", uptime);
+ json_object_string_add(json, "last_bsm_seen", last_bsm_seen);
+ }
+
+ else {
+ vty_out(vty, "Current preferred BSR address: %s\n", bsr_str);
+ vty_out(vty,
+ "Priority Fragment-Tag State UpTime\n");
+ vty_out(vty, " %-12d %-12d %-13s %7s\n",
+ pim->global_scope.current_bsr_prio,
+ pim->global_scope.bsm_frag_tag,
+ bsr_state,
+ uptime);
+ vty_out(vty, "Last BSM seen: %s\n", last_bsm_seen);
+ }
+
+ if (uj) {
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+}
+
static void clear_igmp_interfaces(struct pim_instance *pim)
{
struct interface *ifp;
return CMD_SUCCESS;
}
+DEFUN (clear_ip_pim_statistics,
+ clear_ip_pim_statistics_cmd,
+ "clear ip pim statistics [vrf NAME]",
+ CLEAR_STR
+ IP_STR
+ CLEAR_IP_PIM_STR
+ VRF_CMD_HELP_STR
+ "Reset PIM statistics\n")
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ clear_pim_statistics(vrf->info);
+ return CMD_SUCCESS;
+}
+
static void mroute_add_all(struct pim_instance *pim)
{
struct listnode *node;
pim_ifp->pim_ifstat_reg_stop_send = 0;
pim_ifp->pim_ifstat_assert_recv = 0;
pim_ifp->pim_ifstat_assert_send = 0;
+ pim_ifp->pim_ifstat_bsm_rx = 0;
+ pim_ifp->pim_ifstat_bsm_tx = 0;
}
return CMD_SUCCESS;
return CMD_SUCCESS;
}
+DEFUN (show_ip_pim_bsm_db,
+ show_ip_pim_bsm_db_cmd,
+ "show ip pim bsm-database [vrf NAME] [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM cached bsm packets information\n"
+ JSON_STR)
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+ bool uj = use_json(argc, argv);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ pim_show_bsm_db(vrf->info, vty, uj);
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_pim_bsrp,
+ show_ip_pim_bsrp_cmd,
+ "show ip pim bsrp-info [vrf NAME] [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM cached group-rp mappings information\n"
+ JSON_STR)
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+ bool uj = use_json(argc, argv);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ pim_show_group_rp_mappings_info(vrf->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_pim_statistics,
+ show_ip_pim_statistics_cmd,
+ "show ip pim [vrf NAME] statistics [interface WORD] [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM statistics\n"
+ "interface\n"
+ "PIM interface\n"
+ JSON_STR)
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+ bool uj = use_json(argc, argv);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ if (argv_find(argv, argc, "WORD", &idx))
+ pim_show_statistics(vrf->info, vty, argv[idx]->arg, uj);
+ else
+ pim_show_statistics(vrf->info, vty, NULL, uj);
+
+ return CMD_SUCCESS;
+}
+
static void show_multicast_interfaces(struct pim_instance *pim, struct vty *vty)
{
struct interface *ifp;
{
int result;
- result = pim_rp_new(pim, rp, group, plist);
+ result = pim_rp_new_config(pim, rp, group, plist);
+
+ if (result == PIM_GROUP_BAD_ADDR_MASK_COMBO) {
+ vty_out(vty, "%% Inconsistent address and mask: %s\n",
+ group);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
if (result == PIM_GROUP_BAD_ADDRESS) {
vty_out(vty, "%% Bad group address specified: %s\n", group);
const char *rp, const char *group,
const char *plist)
{
- int result = pim_rp_del(pim, rp, group, plist);
+ int result = pim_rp_del_config(pim, rp, group, plist);
if (result == PIM_GROUP_BAD_ADDRESS) {
vty_out(vty, "%% Bad group address specified: %s\n", group);
return CMD_SUCCESS;
}
+DEFUN (show_ip_pim_bsr,
+ show_ip_pim_bsr_cmd,
+ "show ip pim bsr [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ "boot-strap router information\n"
+ JSON_STR)
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+ bool uj = use_json(argc, argv);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ pim_show_bsr(vrf->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
DEFUN (ip_ssmpingd,
ip_ssmpingd_cmd,
"ip ssmpingd [A.B.C.D]",
PIM_DO_DEBUG_PIM_TRACE;
PIM_DO_DEBUG_MSDP_EVENTS;
PIM_DO_DEBUG_MSDP_PACKETS;
+ PIM_DO_DEBUG_BSM;
return CMD_SUCCESS;
}
PIM_DONT_DEBUG_PIM_PACKETDUMP_SEND;
PIM_DONT_DEBUG_PIM_PACKETDUMP_RECV;
+ PIM_DONT_DEBUG_BSM;
return CMD_SUCCESS;
}
return CMD_SUCCESS;
}
+DEFUN (debug_bsm,
+ debug_bsm_cmd,
+ "debug pim bsm",
+ DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_PIM_BSM_STR)
+{
+ PIM_DO_DEBUG_BSM;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_bsm,
+ no_debug_bsm_cmd,
+ "no debug pim bsm",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_PIM_BSM_STR)
+{
+ PIM_DONT_DEBUG_BSM;
+ return CMD_SUCCESS;
+}
+
+
DEFUN_NOSH (show_debugging_pim,
show_debugging_pim_cmd,
"show debugging [pim]",
return CMD_SUCCESS;
}
+DEFUN (ip_pim_bsm,
+ ip_pim_bsm_cmd,
+ "ip pim bsm",
+ IP_STR
+ PIM_STR
+ "Enables BSM support on the interface\n")
+{
+ VTY_DECLVAR_CONTEXT(interface, ifp);
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp) {
+ if (!pim_cmd_interface_add(ifp)) {
+ vty_out(vty, "Could not enable PIM SM on interface\n");
+ return CMD_WARNING;
+ }
+ }
+
+ pim_ifp = ifp->info;
+ pim_ifp->bsm_enable = true;
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_ip_pim_bsm,
+ no_ip_pim_bsm_cmd,
+ "no ip pim bsm",
+ NO_STR
+ IP_STR
+ PIM_STR
+ "Disables BSM support\n")
+{
+ VTY_DECLVAR_CONTEXT(interface, ifp);
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp) {
+ vty_out(vty, "Pim not enabled on this interface\n");
+ return CMD_WARNING;
+ }
+
+ pim_ifp->bsm_enable = false;
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (ip_pim_ucast_bsm,
+ ip_pim_ucast_bsm_cmd,
+ "ip pim unicast-bsm",
+ IP_STR
+ PIM_STR
+ "Accept/Send unicast BSM on the interface\n")
+{
+ VTY_DECLVAR_CONTEXT(interface, ifp);
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp) {
+ if (!pim_cmd_interface_add(ifp)) {
+ vty_out(vty, "Could not enable PIM SM on interface\n");
+ return CMD_WARNING;
+ }
+ }
+
+ pim_ifp = ifp->info;
+ pim_ifp->ucast_bsm_accept = true;
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_ip_pim_ucast_bsm,
+ no_ip_pim_ucast_bsm_cmd,
+ "no ip pim unicast-bsm",
+ NO_STR
+ IP_STR
+ PIM_STR
+ "Block send/receive unicast BSM on this interface\n")
+{
+ VTY_DECLVAR_CONTEXT(interface, ifp);
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp) {
+ vty_out(vty, "Pim not enabled on this interface\n");
+ return CMD_WARNING;
+ }
+
+ pim_ifp->ucast_bsm_accept = false;
+
+ return CMD_SUCCESS;
+}
+
#if HAVE_BFDD > 0
DEFUN_HIDDEN(
#else
install_element(VIEW_NODE, &show_ip_pim_upstream_rpf_cmd);
install_element(VIEW_NODE, &show_ip_pim_rp_cmd);
install_element(VIEW_NODE, &show_ip_pim_rp_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_bsr_cmd);
install_element(VIEW_NODE, &show_ip_multicast_cmd);
install_element(VIEW_NODE, &show_ip_multicast_vrf_all_cmd);
install_element(VIEW_NODE, &show_ip_mroute_cmd);
install_element(VIEW_NODE, &show_debugging_pim_cmd);
install_element(VIEW_NODE, &show_ip_pim_nexthop_cmd);
install_element(VIEW_NODE, &show_ip_pim_nexthop_lookup_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_bsrp_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_bsm_db_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_statistics_cmd);
install_element(ENABLE_NODE, &clear_ip_interfaces_cmd);
install_element(ENABLE_NODE, &clear_ip_igmp_interfaces_cmd);
install_element(ENABLE_NODE, &clear_ip_pim_interfaces_cmd);
install_element(ENABLE_NODE, &clear_ip_pim_interface_traffic_cmd);
install_element(ENABLE_NODE, &clear_ip_pim_oil_cmd);
+ install_element(ENABLE_NODE, &clear_ip_pim_statistics_cmd);
install_element(ENABLE_NODE, &debug_igmp_cmd);
install_element(ENABLE_NODE, &no_debug_igmp_cmd);
install_element(ENABLE_NODE, &no_debug_msdp_packets_cmd);
install_element(ENABLE_NODE, &debug_mtrace_cmd);
install_element(ENABLE_NODE, &no_debug_mtrace_cmd);
+ install_element(ENABLE_NODE, &debug_bsm_cmd);
+ install_element(ENABLE_NODE, &no_debug_bsm_cmd);
install_element(CONFIG_NODE, &debug_igmp_cmd);
install_element(CONFIG_NODE, &no_debug_igmp_cmd);
install_element(CONFIG_NODE, &no_debug_msdp_packets_cmd);
install_element(CONFIG_NODE, &debug_mtrace_cmd);
install_element(CONFIG_NODE, &no_debug_mtrace_cmd);
+ install_element(CONFIG_NODE, &debug_bsm_cmd);
+ install_element(CONFIG_NODE, &no_debug_bsm_cmd);
install_element(CONFIG_NODE, &ip_msdp_mesh_group_member_cmd);
install_element(VRF_NODE, &ip_msdp_mesh_group_member_cmd);
install_element(VIEW_NODE, &show_ip_pim_vxlan_sg_work_cmd);
install_element(INTERFACE_NODE, &interface_pim_use_source_cmd);
install_element(INTERFACE_NODE, &interface_no_pim_use_source_cmd);
+ /* Install BSM command */
+ install_element(INTERFACE_NODE, &ip_pim_bsm_cmd);
+ install_element(INTERFACE_NODE, &no_ip_pim_bsm_cmd);
+ install_element(INTERFACE_NODE, &ip_pim_ucast_bsm_cmd);
+ install_element(INTERFACE_NODE, &no_ip_pim_ucast_bsm_cmd);
/* Install BFD command */
install_element(INTERFACE_NODE, &ip_pim_bfd_cmd);
install_element(INTERFACE_NODE, &ip_pim_bfd_param_cmd);
#define DEBUG_MSDP_INTERNAL_STR "MSDP protocol internal\n"
#define DEBUG_MSDP_PACKETS_STR "MSDP protocol packets\n"
#define DEBUG_MTRACE_STR "Mtrace protocol activity\n"
+#define DEBUG_PIM_BSM_STR "BSR message processing activity\n"
+
void pim_cmd_init(void);
#include "pim_iface.h"
#include "pim_neighbor.h"
#include "pim_upstream.h"
+#include "pim_bsm.h"
static void on_trace(const char *label, struct interface *ifp,
struct in_addr src)
}
FREE_ADDR_LIST_THEN_RETURN(-8);
}
+ /* Forward BSM if required */
+ if (!pim_bsm_new_nbr_fwd(neigh, ifp)) {
+ if (PIM_DEBUG_PIM_HELLO)
+ zlog_debug("%s: forwarding bsm to new nbr failed",
+ __PRETTY_FUNCTION__);
+ }
/* actual addr list has been saved under neighbor */
return 0;
}
FREE_ADDR_LIST_THEN_RETURN(-9);
}
+ /* Forward BSM if required */
+ if (!pim_bsm_new_nbr_fwd(neigh, ifp)) {
+ if (PIM_DEBUG_PIM_HELLO)
+ zlog_debug("%s: forwarding bsm to new nbr failed",
+ __PRETTY_FUNCTION__);
+ }
/* actual addr list is saved under neighbor */
return 0;
pim_ifp->igmp_specific_query_max_response_time_dsec =
IGMP_SPECIFIC_QUERY_MAX_RESPONSE_TIME_DSEC;
+ /* BSM config on interface: TRUE by default */
+ pim_ifp->bsm_enable = true;
+ pim_ifp->ucast_bsm_accept = true;
+
/*
RFC 3376: 8.3. Query Response Interval
The number of seconds represented by the [Query Response Interval]
bool activeactive;
int64_t pim_ifstat_start; /* start timestamp for stats */
+ uint64_t pim_ifstat_bsm_rx;
+ uint64_t pim_ifstat_bsm_tx;
uint32_t pim_ifstat_hello_sent;
uint32_t pim_ifstat_hello_sendfail;
uint32_t pim_ifstat_hello_recv;
uint32_t pim_ifstat_reg_stop_send;
uint32_t pim_ifstat_assert_recv;
uint32_t pim_ifstat_assert_send;
+ uint32_t pim_ifstat_bsm_cfg_miss;
+ uint32_t pim_ifstat_ucast_bsm_cfg_miss;
+ uint32_t pim_ifstat_bsm_invalid_sz;
struct bfd_info *bfd_info;
+ bool bsm_enable; /* bsm processing enable */
+ bool ucast_bsm_accept; /* ucast bsm processing */
};
/*
#include "pim_static.h"
#include "pim_ssmpingd.h"
#include "pim_vty.h"
+#include "pim_bsm.h"
static void pim_instance_terminate(struct pim_instance *pim)
{
pim_rp_free(pim);
+ pim_bsm_proc_free(pim);
+
/* Traverse and cleanup rpf_hash */
if (pim->rpf_hash) {
hash_clean(pim->rpf_hash, (void *)pim_rp_list_hash_clean);
pim_rp_init(pim);
+ pim_bsm_proc_init(pim);
+
pim_oil_init(pim);
pim_upstream_init(pim);
#include "pim_str.h"
#include "pim_msdp.h"
#include "pim_assert.h"
+#include "pim_bsm.h"
#include "pim_vxlan_instance.h"
#if defined(HAVE_LINUX_MROUTE_H)
bool ecmp_enable;
bool ecmp_rebalance_enable;
+ /* Bsm related */
+ struct bsm_scope global_scope;
+ uint64_t bsm_rcvd;
+ uint64_t bsm_sent;
+ uint64_t bsm_dropped;
/* If we need to rescan all our upstreams */
struct thread *rpf_cache_refresher;
group_size = pim_msg_get_jp_group_size(group->sources);
if (group_size > packet_left) {
pim_msg_build_header(pim_msg, packet_size,
- PIM_MSG_TYPE_JOIN_PRUNE);
+ PIM_MSG_TYPE_JOIN_PRUNE, false);
if (pim_msg_send(pim_ifp->pim_sock_fd,
pim_ifp->primary_address,
qpim_all_pim_routers_addr, pim_msg,
if (packet_left < sizeof(struct pim_jp_groups)
|| msg->num_groups == 255) {
pim_msg_build_header(pim_msg, packet_size,
- PIM_MSG_TYPE_JOIN_PRUNE);
+ PIM_MSG_TYPE_JOIN_PRUNE, false);
if (pim_msg_send(pim_ifp->pim_sock_fd,
pim_ifp->primary_address,
qpim_all_pim_routers_addr, pim_msg,
if (!new_packet) {
// msg->num_groups = htons (msg->num_groups);
pim_msg_build_header(pim_msg, packet_size,
- PIM_MSG_TYPE_JOIN_PRUNE);
+ PIM_MSG_TYPE_JOIN_PRUNE, false);
if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
qpim_all_pim_routers_addr, pim_msg,
packet_size,
#include "pim_oil.h"
void pim_msg_build_header(uint8_t *pim_msg, size_t pim_msg_size,
- uint8_t pim_msg_type)
+ uint8_t pim_msg_type, bool no_fwd)
{
struct pim_msg_header *header = (struct pim_msg_header *)pim_msg;
*/
header->ver = PIM_PROTO_VERSION;
header->type = pim_msg_type;
+ header->Nbit = no_fwd;
header->reserved = 0;
#include <netinet/in.h>
#include "pim_jp_agg.h"
+
+#define PIM_HDR_LEN sizeof(struct pim_msg_header)
/*
Number Description
---------- ------------------
/*
* Network Order pim_msg_hdr
+ * =========================
+ * PIM Header definition as per RFC 5059. N bit introduced to indicate
+ * do-not-forward option in PIM Boot strap Message.
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |PIM Ver| Type |N| Reserved | Checksum |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
struct pim_msg_header {
uint8_t type : 4;
uint8_t ver : 4;
- uint8_t reserved;
+ uint8_t Nbit : 1; /* No Fwd Bit */
+ uint8_t reserved : 7;
uint16_t checksum;
} __attribute__((packed));
struct pim_encoded_group_ipv4 {
uint8_t ne;
uint8_t family;
- uint8_t reserved;
+ uint8_t bidir : 1; /* Bidir bit */
+ uint8_t reserved : 6; /* Reserved */
+ uint8_t sz : 1; /* scope zone bit */
uint8_t mask;
struct in_addr addr;
} __attribute__((packed));
} __attribute__((packed));
void pim_msg_build_header(uint8_t *pim_msg, size_t pim_msg_size,
- uint8_t pim_msg_type);
+ uint8_t pim_msg_type, bool no_fwd);
uint8_t *pim_msg_addr_encode_ipv4_ucast(uint8_t *buf, struct in_addr addr);
uint8_t *pim_msg_addr_encode_ipv4_group(uint8_t *buf, struct in_addr addr);
*/
int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
struct pim_upstream *up, struct rp_info *rp,
+ bool bsr_track_needed,
struct pim_nexthop_cache *out_pnc)
{
struct pim_nexthop_cache *pnc = NULL;
if (up != NULL)
hash_get(pnc->upstream_hash, up, hash_alloc_intern);
+ if (bsr_track_needed)
+ pnc->bsr_tracking = true;
+
if (CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID)) {
if (out_pnc)
memcpy(out_pnc, pnc, sizeof(struct pim_nexthop_cache));
}
void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
- struct pim_upstream *up, struct rp_info *rp)
+ struct pim_upstream *up, struct rp_info *rp,
+ bool del_bsr_tracking)
{
struct pim_nexthop_cache *pnc = NULL;
struct pim_nexthop_cache lookup;
if (up)
hash_release(pnc->upstream_hash, up);
+ if (del_bsr_tracking)
+ pnc->bsr_tracking = false;
+
if (PIM_DEBUG_PIM_NHT) {
char buf[PREFIX_STRLEN];
prefix2str(addr, buf, sizeof buf);
}
if (pnc->rp_list->count == 0
- && pnc->upstream_hash->count == 0) {
+ && pnc->upstream_hash->count == 0
+ && pnc->bsr_tracking == false) {
pim_sendmsg_zebra_rnh(pim, zclient, pnc,
ZEBRA_NEXTHOP_UNREGISTER);
}
}
+/* Given a source address and a neighbor address, check if the neighbor is one
+ * of the next hop to reach the source. search from zebra route database
+ */
+bool pim_nexthop_match(struct pim_instance *pim, struct in_addr addr,
+ struct in_addr ip_src)
+{
+ struct pim_zlookup_nexthop nexthop_tab[MULTIPATH_NUM];
+ int i = 0;
+ ifindex_t first_ifindex = 0;
+ struct interface *ifp = NULL;
+ struct pim_neighbor *nbr = NULL;
+ int num_ifindex;
+
+ if (addr.s_addr == INADDR_NONE)
+ return 0;
+
+ memset(nexthop_tab, 0,
+ sizeof(struct pim_zlookup_nexthop) * MULTIPATH_NUM);
+ num_ifindex = zclient_lookup_nexthop(pim, nexthop_tab, MULTIPATH_NUM,
+ addr, PIM_NEXTHOP_LOOKUP_MAX);
+ if (num_ifindex < 1) {
+ char addr_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<addr?>", addr, addr_str, sizeof(addr_str));
+ zlog_warn(
+ "%s %s: could not find nexthop ifindex for address %s",
+ __FILE__, __PRETTY_FUNCTION__, addr_str);
+ return 0;
+ }
+
+ while (i < num_ifindex) {
+ first_ifindex = nexthop_tab[i].ifindex;
+
+ ifp = if_lookup_by_index(first_ifindex, pim->vrf_id);
+ if (!ifp) {
+ if (PIM_DEBUG_ZEBRA) {
+ char addr_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<addr?>", addr, addr_str,
+ sizeof(addr_str));
+ zlog_debug(
+ "%s %s: could not find interface for ifindex %d (address %s)",
+ __FILE__, __PRETTY_FUNCTION__,
+ first_ifindex, addr_str);
+ }
+ i++;
+ continue;
+ }
+
+ if (!ifp->info) {
+ if (PIM_DEBUG_ZEBRA) {
+ char addr_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<addr?>", addr, addr_str,
+ sizeof(addr_str));
+ zlog_debug(
+ "%s: multicast not enabled on input interface %s (ifindex=%d, RPF for source %s)",
+ __PRETTY_FUNCTION__, ifp->name,
+ first_ifindex, addr_str);
+ }
+ i++;
+ continue;
+ }
+
+ if (!pim_if_connected_to_source(ifp, addr)) {
+ nbr = pim_neighbor_find(
+ ifp, nexthop_tab[i].nexthop_addr.u.prefix4);
+ if (PIM_DEBUG_PIM_TRACE_DETAIL)
+ zlog_debug("ifp name: %s, pim nbr: %p",
+ ifp->name, nbr);
+ if (!nbr && !if_is_loopback(ifp)) {
+ i++;
+ continue;
+ }
+ }
+
+ if (nexthop_tab[i].nexthop_addr.u.prefix4.s_addr
+ == ip_src.s_addr)
+ return 1;
+
+ i++;
+ }
+
+ return 0;
+}
+
+/* Given a source address and a neighbor address, check if the neighbor is one
+ * of the next hop to reach the source. search from pim next hop cache
+ */
+bool pim_nexthop_match_nht_cache(struct pim_instance *pim, struct in_addr addr,
+ struct in_addr ip_src)
+{
+ struct pim_rpf rpf;
+ ifindex_t first_ifindex;
+ struct interface *ifp = NULL;
+ uint8_t nh_iter = 0;
+ struct pim_neighbor *nbr = NULL;
+ struct nexthop *nh_node = NULL;
+ struct pim_nexthop_cache *pnc = NULL;
+
+ memset(&rpf, 0, sizeof(struct pim_rpf));
+ rpf.rpf_addr.family = AF_INET;
+ rpf.rpf_addr.prefixlen = IPV4_MAX_BITLEN;
+ rpf.rpf_addr.u.prefix4 = addr;
+
+ pnc = pim_nexthop_cache_find(pim, &rpf);
+ if (!pnc || !pnc->nexthop_num)
+ return 0;
+
+ for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) {
+ first_ifindex = nh_node->ifindex;
+ ifp = if_lookup_by_index(first_ifindex, pim->vrf_id);
+ if (!ifp) {
+ if (PIM_DEBUG_PIM_NHT) {
+ char addr_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<addr?>", addr, addr_str,
+ sizeof(addr_str));
+ zlog_debug(
+ "%s %s: could not find interface for ifindex %d (address %s(%s))",
+ __FILE__, __PRETTY_FUNCTION__,
+ first_ifindex, addr_str,
+ pim->vrf->name);
+ }
+ nh_iter++;
+ continue;
+ }
+ if (!ifp->info) {
+ if (PIM_DEBUG_PIM_NHT) {
+ char addr_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<addr?>", addr, addr_str,
+ sizeof(addr_str));
+ zlog_debug(
+ "%s: multicast not enabled on input interface %s(%s) (ifindex=%d, RPF for source %s)",
+ __PRETTY_FUNCTION__, ifp->name,
+ pim->vrf->name, first_ifindex,
+ addr_str);
+ }
+ nh_iter++;
+ continue;
+ }
+
+ if (!pim_if_connected_to_source(ifp, addr)) {
+ nbr = pim_neighbor_find(ifp, nh_node->gate.ipv4);
+ if (!nbr && !if_is_loopback(ifp)) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: pim nbr not found on input interface %s(%s)",
+ __PRETTY_FUNCTION__, ifp->name,
+ pim->vrf->name);
+ nh_iter++;
+ continue;
+ }
+ }
+
+ if (nh_node->gate.ipv4.s_addr == ip_src.s_addr)
+ return 1;
+ }
+
+ return 0;
+}
+
void pim_rp_nexthop_del(struct rp_info *rp_info)
{
rp_info->rp.source_nexthop.interface = NULL;
struct list *rp_list;
struct hash *upstream_hash;
+ /* Ideally this has to be list of scope zone. But for now we can just
+ * have as a bool variable to say bsr_tracking.
+ * Later this variable can be changed as a list of scope zones for
+ * tracking same bsr for multiple scope zones.
+ */
+ bool bsr_tracking;
};
int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS);
int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
struct pim_upstream *up, struct rp_info *rp,
+ bool bsr_track_needed,
struct pim_nexthop_cache *out_pnc);
void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
- struct pim_upstream *up, struct rp_info *rp);
+ struct pim_upstream *up, struct rp_info *rp,
+ bool del_bsr_tracking);
struct pim_nexthop_cache *pim_nexthop_cache_find(struct pim_instance *pim,
struct pim_rpf *rpf);
uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp);
int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim,
struct prefix *src, struct prefix *grp);
void pim_rp_nexthop_del(struct rp_info *rp_info);
+bool pim_nexthop_match(struct pim_instance *pim, struct in_addr addr,
+ struct in_addr ip_src);
+bool pim_nexthop_match_nht_cache(struct pim_instance *pim, struct in_addr addr,
+ struct in_addr ip_src);
+
#endif
#include "pim_msg.h"
#include "pim_register.h"
#include "pim_errors.h"
+#include "pim_bsm.h"
static int on_pim_hello_send(struct thread *t);
static int pim_hello_send(struct interface *ifp, uint16_t holdtime);
uint16_t checksum; /* computed checksum */
struct pim_neighbor *neigh;
struct pim_msg_header *header;
+ bool no_fwd;
if (len < sizeof(*ip_hdr)) {
if (PIM_DEBUG_PIM_PACKETS)
/* for computing checksum */
header->checksum = 0;
+ no_fwd = header->Nbit;
if (header->type == PIM_MSG_TYPE_REGISTER) {
/* First 8 byte header checksum */
pim_msg + PIM_MSG_HEADER_LEN,
pim_msg_len - PIM_MSG_HEADER_LEN);
break;
+ case PIM_MSG_TYPE_BOOTSTRAP:
+ return pim_bsm_process(ifp, ip_hdr, pim_msg, pim_msg_len,
+ no_fwd);
+ break;
+
default:
if (PIM_DEBUG_PIM_PACKETS) {
zlog_debug(
zassert(pim_msg_size >= PIM_PIM_MIN_LEN);
zassert(pim_msg_size <= PIM_PIM_BUFSIZE_WRITE);
- pim_msg_build_header(pim_msg, pim_msg_size, PIM_MSG_TYPE_HELLO);
+ pim_msg_build_header(pim_msg, pim_msg_size, PIM_MSG_TYPE_HELLO, false);
if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
qpim_all_pim_routers_addr, pim_msg, pim_msg_size,
b1length += length;
pim_msg_build_header(buffer, b1length + PIM_MSG_REGISTER_STOP_LEN,
- PIM_MSG_TYPE_REG_STOP);
+ PIM_MSG_TYPE_REG_STOP, false);
pinfo = (struct pim_interface *)ifp->info;
if (!pinfo) {
memcpy(b1, (const unsigned char *)buf, buf_size);
pim_msg_build_header(buffer, buf_size + PIM_MSG_REGISTER_LEN,
- PIM_MSG_TYPE_REGISTER);
+ PIM_MSG_TYPE_REGISTER, false);
++pinfo->pim_ifstat_reg_send;
#include "pim_mroute.h"
#include "pim_oil.h"
#include "pim_zebra.h"
+#include "pim_bsm.h"
/* Cleanup pim->rpf_hash each node data */
void pim_rp_list_hash_clean(void *data)
zlog_debug("%s: Deregister upstream %s addr %s with Zebra NHT",
__PRETTY_FUNCTION__, up->sg_str, buf);
}
- pim_delete_tracked_nexthop(pim, &nht_p, up, NULL);
+ pim_delete_tracked_nexthop(pim, &nht_p, up, NULL, false);
}
/* Update the upstream address */
pim_zebra_update_all_interfaces(pim);
}
-int pim_rp_new(struct pim_instance *pim, const char *rp,
- const char *group_range, const char *plist)
+int pim_rp_new_config(struct pim_instance *pim, const char *rp,
+ const char *group_range, const char *plist)
{
int result = 0;
+ struct prefix group;
+ struct in_addr rp_addr;
+
+ if (group_range == NULL)
+ result = str2prefix("224.0.0.0/4", &group);
+ else {
+ result = str2prefix(group_range, &group);
+ if (result) {
+ struct prefix temp;
+
+ prefix_copy(&temp, &group);
+ apply_mask(&temp);
+ if (!prefix_same(&group, &temp))
+ return PIM_GROUP_BAD_ADDR_MASK_COMBO;
+ }
+ }
+
+ if (!result)
+ return PIM_GROUP_BAD_ADDRESS;
+
+ result = inet_pton(AF_INET, rp, &rp_addr);
+
+ if (result <= 0)
+ return PIM_RP_BAD_ADDRESS;
+
+ result = pim_rp_new(pim, rp_addr, group, plist, RP_SRC_STATIC);
+ return result;
+}
+
+int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr,
+ struct prefix group, const char *plist,
+ enum rp_source rp_src_flag)
+{
+ int result = 0;
+ char rp[INET_ADDRSTRLEN];
struct rp_info *rp_info;
struct rp_info *rp_all;
struct prefix group_all;
struct rp_info *tmp_rp_info;
char buffer[BUFSIZ];
struct prefix nht_p;
- struct prefix temp;
struct route_node *rn;
struct pim_upstream *up;
struct listnode *upnode;
rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
- if (group_range == NULL)
- result = str2prefix("224.0.0.0/4", &rp_info->group);
- else {
- result = str2prefix(group_range, &rp_info->group);
- if (result) {
- prefix_copy(&temp, &rp_info->group);
- apply_mask(&temp);
- if (!prefix_same(&rp_info->group, &temp)) {
- XFREE(MTYPE_PIM_RP, rp_info);
- return PIM_GROUP_BAD_ADDR_MASK_COMBO;
- }
- }
- }
-
- if (!result) {
- XFREE(MTYPE_PIM_RP, rp_info);
- return PIM_GROUP_BAD_ADDRESS;
- }
-
rp_info->rp.rpf_addr.family = AF_INET;
rp_info->rp.rpf_addr.prefixlen = IPV4_MAX_PREFIXLEN;
- result = inet_pton(rp_info->rp.rpf_addr.family, rp,
- &rp_info->rp.rpf_addr.u.prefix4);
+ rp_info->rp.rpf_addr.u.prefix4 = rp_addr;
+ prefix_copy(&rp_info->group, &group);
+ rp_info->rp_src = rp_src_flag;
- if (result <= 0) {
- XFREE(MTYPE_PIM_RP, rp_info);
- return PIM_RP_BAD_ADDRESS;
- }
+ inet_ntop(AF_INET, &rp_info->rp.rpf_addr.u.prefix4, rp, sizeof(rp));
if (plist) {
/*
if (rp_info->rp.rpf_addr.u.prefix4.s_addr
== tmp_rp_info->rp.rpf_addr.u.prefix4.s_addr) {
if (tmp_rp_info->plist)
- pim_rp_del(pim, rp, NULL,
- tmp_rp_info->plist);
+ pim_rp_del_config(pim, rp, NULL,
+ tmp_rp_info->plist);
else
- pim_rp_del(
+ pim_rp_del_config(
pim, rp,
prefix2str(&tmp_rp_info->group,
buffer, BUFSIZ),
&& rp_info->rp.rpf_addr.u.prefix4.s_addr
== tmp_rp_info->rp.rpf_addr.u.prefix4
.s_addr) {
- pim_rp_del(pim, rp, NULL, tmp_rp_info->plist);
+ pim_rp_del_config(pim, rp, NULL,
+ tmp_rp_info->plist);
}
}
if (prefix_same(&rp_all->group, &rp_info->group)
&& pim_rpf_addr_is_inaddr_none(&rp_all->rp)) {
rp_all->rp.rpf_addr = rp_info->rp.rpf_addr;
+ rp_all->rp_src = rp_src_flag;
XFREE(MTYPE_PIM_RP, rp_info);
/* Register addr with Zebra NHT */
grp.family = AF_INET;
grp.prefixlen = IPV4_MAX_BITLEN;
grp.u.prefix4 = up->sg.grp;
- trp_info = pim_rp_find_match_group(pim,
- &grp);
+ trp_info = pim_rp_find_match_group(
+ pim, &grp);
if (trp_info == rp_all)
pim_upstream_update(pim, up);
}
pim_rp_check_interfaces(pim, rp_all);
pim_rp_refresh_group_to_rp_mapping(pim);
-
pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_all,
- NULL);
+ false, NULL);
+
if (!pim_ecmp_nexthop_lookup(pim,
&rp_all->rp.source_nexthop,
&nht_p, &rp_all->group, 1))
return PIM_RP_NO_PATH;
-
return PIM_SUCCESS;
}
/*
* Return if the group is already configured for this RP
*/
- if (pim_rp_find_exact(pim, rp_info->rp.rpf_addr.u.prefix4,
- &rp_info->group)) {
+ tmp_rp_info = pim_rp_find_exact(
+ pim, rp_info->rp.rpf_addr.u.prefix4, &rp_info->group);
+ if (tmp_rp_info) {
+ if ((tmp_rp_info->rp_src != rp_src_flag)
+ && (rp_src_flag == RP_SRC_STATIC))
+ tmp_rp_info->rp_src = rp_src_flag;
XFREE(MTYPE_PIM_RP, rp_info);
- return PIM_SUCCESS;
+ return result;
}
/*
*/
if (prefix_same(&rp_info->group,
&tmp_rp_info->group)) {
+ if ((rp_src_flag == RP_SRC_STATIC)
+ && (tmp_rp_info->rp_src
+ == RP_SRC_STATIC)) {
+ XFREE(MTYPE_PIM_RP, rp_info);
+ return PIM_GROUP_OVERLAP;
+ }
+
+ result = pim_rp_change(
+ pim,
+ rp_info->rp.rpf_addr.u.prefix4,
+ tmp_rp_info->group,
+ rp_src_flag);
XFREE(MTYPE_PIM_RP, rp_info);
- return PIM_GROUP_OVERLAP;
+ return result;
}
}
}
zlog_debug("%s: NHT Register RP addr %s grp %s with Zebra ",
__PRETTY_FUNCTION__, buf, buf1);
}
-
- pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
+ pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, false, NULL);
if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, &nht_p,
&rp_info->group, 1))
return PIM_RP_NO_PATH;
return PIM_SUCCESS;
}
-int pim_rp_del(struct pim_instance *pim, const char *rp,
- const char *group_range, const char *plist)
+int pim_rp_del_config(struct pim_instance *pim, const char *rp,
+ const char *group_range, const char *plist)
{
struct prefix group;
struct in_addr rp_addr;
- struct prefix g_all;
- struct rp_info *rp_info;
- struct rp_info *rp_all;
int result;
- struct prefix nht_p;
- struct route_node *rn;
- bool was_plist = false;
- struct rp_info *trp_info;
- struct pim_upstream *up;
- struct listnode *upnode;
if (group_range == NULL)
result = str2prefix("224.0.0.0/4", &group);
if (result <= 0)
return PIM_RP_BAD_ADDRESS;
+ result = pim_rp_del(pim, rp_addr, group, plist, RP_SRC_STATIC);
+ return result;
+}
+
+int pim_rp_del(struct pim_instance *pim, struct in_addr rp_addr,
+ struct prefix group, const char *plist,
+ enum rp_source rp_src_flag)
+{
+ struct prefix g_all;
+ struct rp_info *rp_info;
+ struct rp_info *rp_all;
+ struct prefix nht_p;
+ struct route_node *rn;
+ bool was_plist = false;
+ struct rp_info *trp_info;
+ struct pim_upstream *up;
+ struct listnode *upnode;
+ struct bsgrp_node *bsgrp = NULL;
+ struct bsm_rpinfo *bsrp = NULL;
+ char grp_str[PREFIX2STR_BUFFER];
+ char rp_str[INET_ADDRSTRLEN];
+
+ if (!inet_ntop(AF_INET, &rp_addr, rp_str, sizeof(rp_str)))
+ sprintf(rp_str, "<rp?>");
+ prefix2str(&group, grp_str, sizeof(grp_str));
+
if (plist)
rp_info = pim_rp_find_prefix_list(pim, rp_addr, plist);
else
was_plist = true;
}
+ if (PIM_DEBUG_TRACE)
+ zlog_debug("%s: Delete RP %s for the group %s",
+ __PRETTY_FUNCTION__, rp_str, grp_str);
+
+ /* While static RP is getting deleted, we need to check if dynamic RP
+ * present for the same group in BSM RP table, then install the dynamic
+ * RP for the group node into the main rp table
+ */
+ if (rp_src_flag == RP_SRC_STATIC) {
+ bsgrp = pim_bsm_get_bsgrp_node(&pim->global_scope, &group);
+
+ if (bsgrp) {
+ bsrp = listnode_head(bsgrp->bsrp_list);
+ if (bsrp) {
+ if (PIM_DEBUG_TRACE) {
+ char bsrp_str[INET_ADDRSTRLEN];
+
+ if (!inet_ntop(AF_INET, bsrp, bsrp_str,
+ sizeof(bsrp_str)))
+ sprintf(bsrp_str, "<bsrp?>");
+
+ zlog_debug("%s: BSM RP %s found for the group %s",
+ __PRETTY_FUNCTION__,
+ bsrp_str, grp_str);
+ }
+ return pim_rp_change(pim, bsrp->rp_address,
+ group, RP_SRC_BSR);
+ }
+ } else {
+ if (PIM_DEBUG_TRACE)
+ zlog_debug(
+ "%s: BSM RP not found for the group %s",
+ __PRETTY_FUNCTION__, grp_str);
+ }
+ }
+
/* Deregister addr with Zebra NHT */
nht_p.family = AF_INET;
nht_p.prefixlen = IPV4_MAX_BITLEN;
zlog_debug("%s: Deregister RP addr %s with Zebra ",
__PRETTY_FUNCTION__, buf);
}
- pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info);
+ pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info, false);
if (!str2prefix("224.0.0.0/4", &g_all))
return PIM_RP_BAD_ADDRESS;
/* Find the upstream (*, G) whose upstream address is
* same as the deleted RP
*/
- if ((up->upstream_addr.s_addr == rp_addr.s_addr) &&
- (up->sg.src.s_addr == INADDR_ANY)) {
+ if ((up->upstream_addr.s_addr
+ == rp_info->rp.rpf_addr.u.prefix4.s_addr)
+ && (up->sg.src.s_addr == INADDR_ANY)) {
struct prefix grp;
grp.family = AF_INET;
grp.prefixlen = IPV4_MAX_BITLEN;
/* Find the upstream (*, G) whose upstream address is same as
* the deleted RP
*/
- if ((up->upstream_addr.s_addr == rp_addr.s_addr) &&
- (up->sg.src.s_addr == INADDR_ANY)) {
+ if ((up->upstream_addr.s_addr
+ == rp_info->rp.rpf_addr.u.prefix4.s_addr)
+ && (up->sg.src.s_addr == INADDR_ANY)) {
struct prefix grp;
grp.family = AF_INET;
/* RP not found for the group grp */
if (pim_rpf_addr_is_inaddr_none(&trp_info->rp)) {
pim_upstream_rpf_clear(pim, up);
- pim_rp_set_upstream_addr(pim,
- &up->upstream_addr,
- up->sg.src, up->sg.grp);
+ pim_rp_set_upstream_addr(
+ pim, &up->upstream_addr, up->sg.src,
+ up->sg.grp);
}
/* RP found for the group grp */
return PIM_SUCCESS;
}
+int pim_rp_change(struct pim_instance *pim, struct in_addr new_rp_addr,
+ struct prefix group, enum rp_source rp_src_flag)
+{
+ struct prefix nht_p;
+ struct route_node *rn;
+ int result = 0;
+ struct rp_info *rp_info = NULL;
+ struct pim_upstream *up;
+ struct listnode *upnode;
+
+ rn = route_node_lookup(pim->rp_table, &group);
+ if (!rn) {
+ result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
+ return result;
+ }
+
+ rp_info = rn->info;
+
+ if (!rp_info) {
+ route_unlock_node(rn);
+ result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
+ return result;
+ }
+
+ if (rp_info->rp.rpf_addr.u.prefix4.s_addr == new_rp_addr.s_addr) {
+ if (rp_info->rp_src != rp_src_flag) {
+ rp_info->rp_src = rp_src_flag;
+ route_unlock_node(rn);
+ return PIM_SUCCESS;
+ }
+ }
+
+ /* Deregister old RP addr with Zebra NHT */
+ if (rp_info->rp.rpf_addr.u.prefix4.s_addr != INADDR_ANY) {
+ nht_p.family = AF_INET;
+ nht_p.prefixlen = IPV4_MAX_BITLEN;
+ nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
+ if (PIM_DEBUG_PIM_NHT_RP) {
+ char buf[PREFIX2STR_BUFFER];
+
+ prefix2str(&nht_p, buf, sizeof(buf));
+ zlog_debug("%s: Deregister RP addr %s with Zebra ",
+ __PRETTY_FUNCTION__, buf);
+ }
+ pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info, false);
+ }
+
+ pim_rp_nexthop_del(rp_info);
+ listnode_delete(pim->rp_list, rp_info);
+ /* Update the new RP address*/
+ rp_info->rp.rpf_addr.u.prefix4 = new_rp_addr;
+ rp_info->rp_src = rp_src_flag;
+ rp_info->i_am_rp = 0;
+
+ listnode_add_sort(pim->rp_list, rp_info);
+
+ for (ALL_LIST_ELEMENTS_RO(pim->upstream_list, upnode, up)) {
+ if (up->sg.src.s_addr == INADDR_ANY) {
+ struct prefix grp;
+ struct rp_info *trp_info;
+
+ grp.family = AF_INET;
+ grp.prefixlen = IPV4_MAX_BITLEN;
+ grp.u.prefix4 = up->sg.grp;
+ trp_info = pim_rp_find_match_group(pim, &grp);
+
+ if (trp_info == rp_info)
+ pim_upstream_update(pim, up);
+ }
+ }
+
+ /* Register new RP addr with Zebra NHT */
+ nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
+ if (PIM_DEBUG_PIM_NHT_RP) {
+ char buf[PREFIX2STR_BUFFER];
+ char buf1[PREFIX2STR_BUFFER];
+
+ prefix2str(&nht_p, buf, sizeof(buf));
+ prefix2str(&rp_info->group, buf1, sizeof(buf1));
+ zlog_debug("%s: NHT Register RP addr %s grp %s with Zebra ",
+ __PRETTY_FUNCTION__, buf, buf1);
+ }
+
+ pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, false, NULL);
+ if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, &nht_p,
+ &rp_info->group, 1)) {
+ route_unlock_node(rn);
+ return PIM_RP_NO_PATH;
+ }
+
+ pim_rp_check_interfaces(pim, rp_info);
+
+ route_unlock_node(rn);
+
+ pim_rp_refresh_group_to_rp_mapping(pim);
+
+ return result;
+}
+
void pim_rp_setup(struct pim_instance *pim)
{
struct listnode *node;
nht_p.prefixlen = IPV4_MAX_BITLEN;
nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
- pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
+ pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, false,
+ NULL);
if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
&nht_p, &rp_info->group, 1))
if (PIM_DEBUG_PIM_NHT_RP)
"%s: NHT Register RP addr %s grp %s with Zebra",
__PRETTY_FUNCTION__, buf, buf1);
}
-
- pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
+ pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, false,
+ NULL);
pim_rpf_set_refresh_time(pim);
(void)pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
&nht_p, &rp_info->group, 1);
if (pim_rpf_addr_is_inaddr_none(&rp_info->rp))
continue;
+ if (rp_info->rp_src == RP_SRC_BSR)
+ continue;
+
if (rp_info->plist)
vty_out(vty, "%sip pim rp %s prefix-list %s\n", spaces,
inet_ntop(AF_INET,
struct rp_info *rp_info;
struct rp_info *prev_rp_info = NULL;
struct listnode *node;
+ char source[7];
json_object *json = NULL;
json_object *json_rp_rows = NULL;
json = json_object_new_object();
else
vty_out(vty,
- "RP address group/prefix-list OIF I am RP\n");
-
+ "RP address group/prefix-list OIF I am RP Source\n");
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
if (!pim_rpf_addr_is_inaddr_none(&rp_info->rp)) {
char buf[48];
+ if (rp_info->rp_src == RP_SRC_STATIC)
+ strcpy(source, "Static");
+ else if (rp_info->rp_src == RP_SRC_BSR)
+ strcpy(source, "BSR");
+ else
+ strcpy(source, "None");
if (uj) {
/*
* If we have moved on to a new RP then add the
json_row, "group",
prefix2str(&rp_info->group, buf,
48));
+ json_object_string_add(json_row, "source",
+ source);
json_object_array_add(json_rp_rows, json_row);
} else {
if (rp_info->i_am_rp)
vty_out(vty, "yes\n");
else
- vty_out(vty, "no\n");
- }
+ vty_out(vty, "no");
+ vty_out(vty, "%14s\n", source);
+ }
prev_rp_info = rp_info;
}
}
nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
if (!pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info,
- &pnc))
+ false, &pnc))
continue;
for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) {
#include "pim_iface.h"
#include "pim_rpf.h"
+enum rp_source {
+ RP_SRC_NONE = 0,
+ RP_SRC_STATIC,
+ RP_SRC_BSR
+};
+
struct rp_info {
struct prefix group;
struct pim_rpf rp;
+ enum rp_source rp_src;
int i_am_rp;
char *plist;
};
void pim_rp_list_hash_clean(void *data);
-int pim_rp_new(struct pim_instance *pim, const char *rp, const char *group,
- const char *plist);
-int pim_rp_del(struct pim_instance *pim, const char *rp, const char *group,
- const char *plist);
+int pim_rp_new_config(struct pim_instance *pim, const char *rp,
+ const char *group, const char *plist);
+int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr,
+ struct prefix group, const char *plist,
+ enum rp_source rp_src_flag);
+int pim_rp_del_config(struct pim_instance *pim, const char *rp,
+ const char *group, const char *plist);
+int pim_rp_del(struct pim_instance *pim, struct in_addr rp_addr,
+ struct prefix group, const char *plist,
+ enum rp_source rp_src_flag);
+int pim_rp_change(struct pim_instance *pim, struct in_addr new_rp_addr,
+ struct prefix group, enum rp_source rp_src_flag);
void pim_rp_prefix_list_update(struct pim_instance *pim,
struct prefix_list *plist);
if ((up->sg.src.s_addr == INADDR_ANY && I_am_RP(pim, up->sg.grp)) ||
PIM_UPSTREAM_FLAG_TEST_FHR(up->flags))
neigh_needed = FALSE;
- pim_find_or_track_nexthop(pim, &nht_p, up, NULL, NULL);
+ pim_find_or_track_nexthop(pim, &nht_p, up, NULL, false, NULL);
if (!pim_ecmp_nexthop_lookup(pim, &rpf->source_nexthop, &src, &grp,
neigh_needed))
return PIM_RPF_FAILURE;
zlog_debug("%s: Deregister upstream %s addr %s with Zebra NHT",
__PRETTY_FUNCTION__, up->sg_str, buf);
}
- pim_delete_tracked_nexthop(pim, &nht_p, up, NULL);
+ pim_delete_tracked_nexthop(pim, &nht_p, up, NULL, false);
}
XFREE(MTYPE_PIM_UPSTREAM, up);
#include "pim_msdp.h"
#include "pim_ssm.h"
#include "pim_bfd.h"
+#include "pim_bsm.h"
#include "pim_vxlan.h"
int pim_debug_config_write(struct vty *vty)
++writes;
}
+ if (PIM_DEBUG_BSM) {
+ vty_out(vty, "debug pim bsm\n");
+ ++writes;
+ }
+
if (PIM_DEBUG_VXLAN) {
vty_out(vty, "debug pim vxlan\n");
++writes;
writes +=
pim_static_write_mroute(pim, vty, ifp);
+ pim_bsm_write_config(vty, ifp);
+ ++writes;
pim_bfd_write_config(vty, ifp);
+ ++writes;
}
vty_endframe(vty, "!\n");
++writes;
nht_p.prefixlen = IPV4_MAX_BITLEN;
nht_p.u.prefix4 = up->upstream_addr;
pim_delete_tracked_nexthop(vxlan_sg->pim,
- &nht_p, up, NULL);
+ &nht_p, up, NULL, false);
}
pim_upstream_ref(up, flags, __PRETTY_FUNCTION__);
vxlan_sg->up = up;
#define PIM_MASK_PIM_NHT_RP (1 << 24)
#define PIM_MASK_MTRACE (1 << 25)
#define PIM_MASK_VXLAN (1 << 26)
+#define PIM_MASK_BSM_PROC (1 << 27)
/* Remember 32 bits!!! */
/* PIM error codes */
#define PIM_DEBUG_PIM_NHT_RP (router->debugs & PIM_MASK_PIM_NHT_RP)
#define PIM_DEBUG_MTRACE (router->debugs & PIM_MASK_MTRACE)
#define PIM_DEBUG_VXLAN (router->debugs & PIM_MASK_VXLAN)
+#define PIM_DEBUG_BSM (router->debugs & PIM_MASK_BSM_PROC)
#define PIM_DEBUG_EVENTS \
(router->debugs \
& (PIM_MASK_PIM_EVENTS | PIM_MASK_IGMP_EVENTS \
- | PIM_MASK_MSDP_EVENTS))
+ | PIM_MASK_MSDP_EVENTS | PIM_MASK_BSM_PROC))
#define PIM_DEBUG_PACKETS \
(router->debugs \
& (PIM_MASK_PIM_PACKETS | PIM_MASK_IGMP_PACKETS \
#define PIM_DO_DEBUG_SSMPINGD (router->debugs |= PIM_MASK_SSMPINGD)
#define PIM_DO_DEBUG_MROUTE (router->debugs |= PIM_MASK_MROUTE)
#define PIM_DO_DEBUG_MROUTE_DETAIL (router->debugs |= PIM_MASK_MROUTE_DETAIL)
+#define PIM_DO_DEBUG_BSM (router->debugs |= PIM_MASK_BSM_PROC)
#define PIM_DO_DEBUG_PIM_HELLO (router->debugs |= PIM_MASK_PIM_HELLO)
#define PIM_DO_DEBUG_PIM_J_P (router->debugs |= PIM_MASK_PIM_J_P)
#define PIM_DO_DEBUG_PIM_REG (router->debugs |= PIM_MASK_PIM_REG)
#define PIM_DONT_DEBUG_PIM_NHT_RP (router->debugs &= ~PIM_MASK_PIM_NHT_RP)
#define PIM_DONT_DEBUG_MTRACE (router->debugs &= ~PIM_MASK_MTRACE)
#define PIM_DONT_DEBUG_VXLAN (router->debugs &= ~PIM_MASK_VXLAN)
+#define PIM_DONT_DEBUG_BSM (router->debugs &= ~PIM_MASK_BSM_PROC)
void pim_router_init(void);
void pim_router_terminate(void);
pimd/pim_assert.c \
pimd/pim_bfd.c \
pimd/pim_br.c \
+ pimd/pim_bsm.c \
pimd/pim_cmd.c \
pimd/pim_errors.c \
pimd/pim_hello.c \
pimd/pim_assert.h \
pimd/pim_bfd.h \
pimd/pim_br.h \
+ pimd/pim_bsm.h \
pimd/pim_cmd.h \
pimd/pim_errors.h \
pimd/pim_hello.h \
frr.ripngd-debug
frr.ldp-debug
frr.zebra-debug
+ frr.pimd-debug
+ frr.nhrpd-debug
+ frr.babeld-debug
+ frr.eigrpd-debug
+ frr.pbrd-debug
+ frr.staticd-debug
+ frr.bfdd-debug
+ frr.fabricd-debug
vtysh can be accessed as frr.vtysh (Make sure you have /snap/bin in your
path). If access as `vtysh` instead of `frr.vtysh` is needed, you can enable it
Starts staticd daemon in foreground
- `frr.bfdd-debug`:
Starts bfdd daemon in foreground
+- `frr.fabricd-debug`:
+ Starts fabricd daemon in foreground
MPLS (LDP)
----------
install -D -m 0755 pbrd-service $(DESTDIR)/bin/
install -D -m 0755 staticd-service $(DESTDIR)/bin/
install -D -m 0755 bfdd-service $(DESTDIR)/bin/
+ install -D -m 0755 fabricd-service $(DESTDIR)/bin/
install -D -m 0755 set-options $(DESTDIR)/bin/
install -D -m 0755 show_version $(DESTDIR)/bin/
if ! [ -e $SNAP_DATA/rpki.conf ]; then
echo "-M rpki" > $SNAP_DATA/rpki.conf
fi
-EXTRA_OPTIONS="`cat $SNAP_DATA/rpki.conf`"
+EXTRA_OPTIONS="`$SNAP/bin/cat $SNAP_DATA/rpki.conf`"
exec $SNAP/sbin/bgpd \
-f $SNAP_DATA/bgpd.conf \
--pid_file $SNAP_DATA/bgpd.pid \
--- /dev/null
+#!/bin/sh
+
+set -e -x
+
+if ! [ -e $SNAP_DATA/fabricd.conf ]; then
+ cp $SNAP/etc/frr/fabricd.conf.default $SNAP_DATA/fabricd.conf
+fi
+exec $SNAP/sbin/fabricd \
+ -f $SNAP_DATA/fabricd.conf \
+ --pid_file $SNAP_DATA/fabricd.pid \
+ --socket $SNAP_DATA/zsock \
+ --vty_socket $SNAP_DATA
+
description: BGP/OSPFv2/OSPFv3/ISIS/RIP/RIPng/PIM/LDP/EIGRP/BFD routing daemon
FRRouting (FRR) is free software which manages TCP/IP based routing
protocols. It supports BGP4, BGP4+, OSPFv2, OSPFv3, IS-IS, RIPv1, RIPv2,
- RIPng, PIM, LDP, Babel, EIGRP, PBR (Policy-based routing) and BFD as well as
- the IPv6 versions of these.
+ RIPng, PIM, LDP, Babel, EIGRP, PBR (Policy-based routing), BFD and OpenFabric
+ as well as the IPv6 versions of these.
FRRouting (frr) is a fork of Quagga.
confinement: strict
grade: devel
- network
- network-bind
- network-control
+ fabricd:
+ command: bin/fabricd-service
+ daemon: simple
+ plugs:
+ - network
+ - network-bind
+ - network-control
set:
command: bin/set-options
zebra-debug:
- network-bind
- network-control
bgpd-debug:
- command: sbin/bgpd -f $SNAP_DATA/bgpd.conf --pid_file $SNAP_DATA/bgpd.pid --socket $SNAP_DATA/zsock --vty_socket $SNAP_DATA --moduledir $SNAP/lib/frr/modules `cat $SNAP_DATA/rpki.conf 2> /dev/null`
+ command: sbin/bgpd -f $SNAP_DATA/bgpd.conf --pid_file $SNAP_DATA/bgpd.pid --socket $SNAP_DATA/zsock --vty_socket $SNAP_DATA --moduledir $SNAP/lib/frr/modules
plugs:
- network
- network-bind
- network
- network-bind
- network-control
+ fabricd-debug:
+ command: sbin/fabricd -f $SNAP_DATA/fabricd.conf --pid_file $SNAP_DATA/fabricd.pid --socket $SNAP_DATA/zsock --vty_socket $SNAP_DATA
+ plugs:
+ - network
+ - network-bind
+ - network-control
parts:
rtrlib:
stage-packages:
- libssh-4
prime:
- - lib/x86_64-linux-gnu/librtr.so*
+ - lib/librtr.so*
- usr/lib/x86_64-linux-gnu/libssh.so*
source: https://github.com/rtrlib/rtrlib.git
source-type: git
- source-tag: v0.5.0
+ source-tag: v0.6.3
plugin: cmake
configflags:
- -DCMAKE_BUILD_TYPE=Release
+ libyang:
+ build-packages:
+ - cmake
+ - make
+ - gcc
+ - libpcre3-dev
+ stage-packages:
+ - libpcre3
+ source: https://github.com/CESNET/libyang.git
+ source-type: git
+ source-tag: v0.16-r3
+ plugin: cmake
+ configflags:
+ - -DCMAKE_INSTALL_PREFIX:PATH=/usr
+ - -DENABLE_LYD_PRIV=ON
+ - -DENABLE_CACHE=OFF
+ - -DCMAKE_BUILD_TYPE:String="Release"
frr:
- after: [rtrlib]
+ after: [rtrlib,libyang]
build-packages:
- gcc
- autoconf
- iproute2
- logrotate
- libcap2
- - libc6
- libtinfo5
- libreadline6
- libjson-c2
- libc-ares2
- libatm1
- libprotobuf-c1
+ - libdb5.3
plugin: autotools
source: ../frr-@PACKAGE_VERSION@.tar.gz
configflags:
eigrpd.conf.default: etc/frr/eigrpd.conf.default
pbrd.conf.default: etc/frr/pbrd.conf.default
bfdd.conf.default: etc/frr/bfdd.conf.default
+ fabricd.conf.default: etc/frr/fabricd.conf.default
vtysh.conf.default: etc/frr/vtysh.conf.default
+ staticd.conf.default: etc/frr/staticd.conf.default
frr-scripts:
plugin: make
source: scripts
README.snap_build.md: doc/README.snap_build.md
extra_version_info.txt: doc/extra_version_info.txt
+passthrough:
+ layout:
+ /usr/lib/x86_64-linux-gnu/libyang:
+ bind: $SNAP/usr/lib/x86_64-linux-gnu/libyang
+
--- /dev/null
+router bgp 65000
+ neighbor 192.168.255.2 remote-as 65001
+ address-family ipv4 unicast
+ redistribute connected
--- /dev/null
+!
+interface lo
+ ip address 172.16.255.254/32
+!
+interface r1-eth0
+ ip address 192.168.255.1/24
+!
+ip forwarding
+!
--- /dev/null
+router bgp 65001
+ bgp default show-hostname
+ neighbor 192.168.255.1 remote-as 65000
+ address-family ipv4 unicast
+ redistribute connected
--- /dev/null
+!
+interface lo
+ ip address 172.16.255.253/32
+!
+interface r2-eth0
+ ip address 192.168.255.2/24
+!
+ip forwarding
+!
--- /dev/null
+#!/usr/bin/env python
+
+#
+# test_bgp_show_ip_bgp_fqdn.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2019 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_bgp_show_ip_bgp_fqdn.py:
+Test if FQND is visible in `show [ip] bgp` output if
+`bgp default show-hostname` is toggled.
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from mininet.topo import Topo
+
+class TemplateTopo(Topo):
+ def build(self, *_args, **_opts):
+ tgen = get_topogen(self)
+
+ for routern in range(1, 3):
+ tgen.add_router('r{}'.format(routern))
+
+ switch = tgen.add_switch('s1')
+ switch.add_link(tgen.gears['r1'])
+ switch.add_link(tgen.gears['r2'])
+
+def setup_module(mod):
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for i, (rname, router) in enumerate(router_list.iteritems(), 1):
+ router.load_config(
+ TopoRouter.RD_ZEBRA,
+ os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP,
+ os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ )
+
+ tgen.start_router()
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+def test_bgp_maximum_prefix_invalid():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ def _bgp_converge(router, neighbor):
+ cmd = "show ip bgp neighbor {0} json".format(neighbor)
+ while True:
+ output = json.loads(tgen.gears[router].vtysh_cmd(cmd))
+ if output[neighbor]['bgpState'] == 'Established':
+ time.sleep(3)
+ return True
+
+ def _bgp_show_nexthop(router, prefix):
+ cmd = "show ip bgp json"
+ output = json.loads(tgen.gears[router].vtysh_cmd(cmd))
+ for nh in output['routes'][prefix][0]['nexthops']:
+ if 'fqdn' in nh:
+ return 'fqdn'
+ return 'ip'
+
+ if _bgp_converge('r2', '192.168.255.1'):
+ assert _bgp_show_nexthop('r2', '172.16.255.254/32') == 'fqdn'
+
+ if _bgp_converge('r1', '192.168.255.2'):
+ assert _bgp_show_nexthop('r1', '172.16.255.253/32') == 'ip'
+
+if __name__ == '__main__':
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
break;
}
for (ALL_NEXTHOPS(re->ng, rtnh))
- if (nexthop_same_no_recurse(rtnh, nh)) {
+ /*
+ * No guarantee all kernel send nh with labels
+ * on delete.
+ */
+ if (nexthop_same_no_labels(rtnh, nh)) {
same = re;
break;
}