2 * pim_bsm.c: PIM BSM handling routines
4 * Copyright (C) 2018-19 Vmware, Inc.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; see the file COPYING; if not, write to the
19 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
24 #include "pim_iface.h"
25 #include "pim_instance.h"
27 #include "pim_hello.h"
33 /* Functions forward declaration */
34 static void pim_bs_timer_start(struct bsm_scope
*scope
, int bs_timeout
);
35 static void pim_g2rp_timer_start(struct bsm_rpinfo
*bsrp
, int hold_time
);
36 static inline void pim_g2rp_timer_restart(struct bsm_rpinfo
*bsrp
,
40 DEFINE_MTYPE_STATIC(PIMD
, PIM_BSGRP_NODE
, "PIM BSR advertised grp info")
41 DEFINE_MTYPE_STATIC(PIMD
, PIM_BSRP_NODE
, "PIM BSR advertised RP info")
42 DEFINE_MTYPE_STATIC(PIMD
, PIM_BSM_INFO
, "PIM BSM Info")
43 DEFINE_MTYPE_STATIC(PIMD
, PIM_BSM_PKT_VAR_MEM
, "PIM BSM Packet")
45 /* All bsm packets forwarded shall be fit within ip mtu less iphdr(max) */
46 #define MAX_IP_HDR_LEN 24
48 /* pim_bsm_write_config - Write the interface pim bsm configuration.*/
49 void pim_bsm_write_config(struct vty
*vty
, struct interface
*ifp
)
51 struct pim_interface
*pim_ifp
= ifp
->info
;
54 if (!pim_ifp
->bsm_enable
)
55 vty_out(vty
, " no ip pim bsm\n");
56 if (!pim_ifp
->ucast_bsm_accept
)
57 vty_out(vty
, " no ip pim unicast-bsm\n");
61 static void pim_free_bsgrp_data(struct bsgrp_node
*bsgrp_node
)
63 if (bsgrp_node
->bsrp_list
)
64 list_delete(&bsgrp_node
->bsrp_list
);
65 if (bsgrp_node
->partial_bsrp_list
)
66 list_delete(&bsgrp_node
->partial_bsrp_list
);
67 XFREE(MTYPE_PIM_BSGRP_NODE
, bsgrp_node
);
70 static void pim_free_bsgrp_node(struct route_table
*rt
, struct prefix
*grp
)
72 struct route_node
*rn
;
74 rn
= route_node_lookup(rt
, grp
);
77 route_unlock_node(rn
);
78 route_unlock_node(rn
);
82 static void pim_bsm_node_free(struct bsm_info
*bsm
)
85 XFREE(MTYPE_PIM_BSM_PKT_VAR_MEM
, bsm
->bsm
);
86 XFREE(MTYPE_PIM_BSM_INFO
, bsm
);
89 static int pim_g2rp_list_compare(struct bsm_rpinfo
*node1
,
90 struct bsm_rpinfo
*node2
)
93 * Step-1 : Loweset Rp priority will have higher precedance.
94 * Step-2 : If priority same then higher hash val will have
96 * Step-3 : If Hash val is same then highest rp address will
99 if (node1
->rp_prio
< node2
->rp_prio
)
101 if (node1
->rp_prio
> node2
->rp_prio
)
103 if (node1
->hash
< node2
->hash
)
105 if (node1
->hash
> node2
->hash
)
107 if (node1
->rp_address
.s_addr
< node2
->rp_address
.s_addr
)
109 if (node1
->rp_address
.s_addr
> node2
->rp_address
.s_addr
)
114 static void pim_free_bsrp_node(struct bsm_rpinfo
*bsrp_info
)
116 if (bsrp_info
->g2rp_timer
)
117 THREAD_OFF(bsrp_info
->g2rp_timer
);
118 XFREE(MTYPE_PIM_BSRP_NODE
, bsrp_info
);
121 static struct list
*pim_alloc_bsrp_list(void)
123 struct list
*new_list
= NULL
;
125 new_list
= list_new();
130 new_list
->cmp
= (int (*)(void *, void *))pim_g2rp_list_compare
;
131 new_list
->del
= (void (*)(void *))pim_free_bsrp_node
;
136 static struct bsgrp_node
*pim_bsm_new_bsgrp_node(struct route_table
*rt
,
139 struct route_node
*rn
;
140 struct bsgrp_node
*bsgrp
;
142 rn
= route_node_get(rt
, grp
);
144 zlog_warn("%s: route node creation failed",
145 __PRETTY_FUNCTION__
);
148 bsgrp
= XCALLOC(MTYPE_PIM_BSGRP_NODE
, sizeof(struct bsgrp_node
));
152 zlog_debug("%s: bsgrp alloc failed",
153 __PRETTY_FUNCTION__
);
154 route_unlock_node(rn
);
159 bsgrp
->bsrp_list
= pim_alloc_bsrp_list();
160 bsgrp
->partial_bsrp_list
= pim_alloc_bsrp_list();
162 if ((!bsgrp
->bsrp_list
) || (!bsgrp
->partial_bsrp_list
)) {
163 route_unlock_node(rn
);
164 pim_free_bsgrp_data(bsgrp
);
168 prefix_copy(&bsgrp
->group
, grp
);
172 static int pim_on_bs_timer(struct thread
*t
)
174 struct route_node
*rn
;
175 struct bsm_scope
*scope
;
176 struct bsgrp_node
*bsgrp_node
;
177 struct bsm_rpinfo
*bsrp
;
179 char buf
[PREFIX2STR_BUFFER
];
180 bool is_bsr_tracking
= true;
182 scope
= THREAD_ARG(t
);
183 THREAD_OFF(scope
->bs_timer
);
186 zlog_debug("%s: Bootstrap Timer expired for scope: %d",
187 __PRETTY_FUNCTION__
, scope
->sz_id
);
189 /* Remove next hop tracking for the bsr */
190 nht_p
.family
= AF_INET
;
191 nht_p
.prefixlen
= IPV4_MAX_BITLEN
;
192 nht_p
.u
.prefix4
= scope
->current_bsr
;
194 prefix2str(&nht_p
, buf
, sizeof(buf
));
195 zlog_debug("%s: Deregister BSR addr %s with Zebra NHT",
196 __PRETTY_FUNCTION__
, buf
);
198 pim_delete_tracked_nexthop(scope
->pim
, &nht_p
, NULL
, NULL
,
201 /* Reset scope zone data */
202 scope
->accept_nofwd_bsm
= false;
203 scope
->state
= ACCEPT_ANY
;
204 scope
->current_bsr
.s_addr
= INADDR_ANY
;
205 scope
->current_bsr_prio
= 0;
206 scope
->current_bsr_first_ts
= 0;
207 scope
->current_bsr_last_ts
= 0;
208 scope
->bsm_frag_tag
= 0;
209 list_delete_all_node(scope
->bsm_list
);
211 for (rn
= route_top(scope
->bsrp_table
); rn
; rn
= route_next(rn
)) {
213 bsgrp_node
= (struct bsgrp_node
*)rn
->info
;
216 zlog_debug("%s: bsgrp_node is null",
217 __PRETTY_FUNCTION__
);
220 /* Give grace time for rp to continue for another hold time */
221 if ((bsgrp_node
->bsrp_list
) && (bsgrp_node
->bsrp_list
->count
)) {
222 bsrp
= listnode_head(bsgrp_node
->bsrp_list
);
223 pim_g2rp_timer_restart(bsrp
, bsrp
->rp_holdtime
);
225 /* clear pending list */
226 if ((bsgrp_node
->partial_bsrp_list
)
227 && (bsgrp_node
->partial_bsrp_list
->count
)) {
228 list_delete_all_node(bsgrp_node
->partial_bsrp_list
);
229 bsgrp_node
->pend_rp_cnt
= 0;
235 static void pim_bs_timer_stop(struct bsm_scope
*scope
)
238 zlog_debug("%s : BS timer being stopped of sz: %d",
239 __PRETTY_FUNCTION__
, scope
->sz_id
);
240 THREAD_OFF(scope
->bs_timer
);
243 static void pim_bs_timer_start(struct bsm_scope
*scope
, int bs_timeout
)
247 zlog_debug("%s : Invalid scope(NULL).",
248 __PRETTY_FUNCTION__
);
251 THREAD_OFF(scope
->bs_timer
);
253 zlog_debug("%s : starting bs timer for scope %d with timeout %d secs",
254 __PRETTY_FUNCTION__
, scope
->sz_id
, bs_timeout
);
255 thread_add_timer(router
->master
, pim_on_bs_timer
, scope
, bs_timeout
,
259 static inline void pim_bs_timer_restart(struct bsm_scope
*scope
, int bs_timeout
)
261 pim_bs_timer_start(scope
, bs_timeout
);
264 void pim_bsm_proc_init(struct pim_instance
*pim
)
266 memset(&pim
->global_scope
, 0, sizeof(struct bsm_scope
));
268 pim
->global_scope
.sz_id
= PIM_GBL_SZ_ID
;
269 pim
->global_scope
.bsrp_table
= route_table_init();
270 pim
->global_scope
.accept_nofwd_bsm
= true;
271 pim
->global_scope
.state
= NO_INFO
;
272 pim
->global_scope
.pim
= pim
;
273 pim
->global_scope
.bsm_list
= list_new();
274 pim
->global_scope
.bsm_list
->del
= (void (*)(void *))pim_bsm_node_free
;
275 pim_bs_timer_start(&pim
->global_scope
, PIM_BS_TIME
);
278 void pim_bsm_proc_free(struct pim_instance
*pim
)
280 struct route_node
*rn
;
281 struct bsgrp_node
*bsgrp
;
283 pim_bs_timer_stop(&pim
->global_scope
);
285 if (pim
->global_scope
.bsm_list
)
286 list_delete(&pim
->global_scope
.bsm_list
);
288 for (rn
= route_top(pim
->global_scope
.bsrp_table
); rn
;
289 rn
= route_next(rn
)) {
293 pim_free_bsgrp_data(bsgrp
);
296 if (pim
->global_scope
.bsrp_table
)
297 route_table_finish(pim
->global_scope
.bsrp_table
);
300 static bool is_hold_time_elapsed(void *data
)
302 struct bsm_rpinfo
*bsrp
;
306 if (bsrp
->elapse_time
< bsrp
->rp_holdtime
)
312 static int pim_on_g2rp_timer(struct thread
*t
)
314 struct bsm_rpinfo
*bsrp
;
315 struct bsm_rpinfo
*bsrp_node
;
316 struct bsgrp_node
*bsgrp_node
;
317 struct listnode
*bsrp_ln
;
318 struct pim_instance
*pim
;
319 struct rp_info
*rp_info
;
320 struct route_node
*rn
;
322 struct in_addr bsrp_addr
;
324 bsrp
= THREAD_ARG(t
);
325 THREAD_OFF(bsrp
->g2rp_timer
);
326 bsgrp_node
= bsrp
->bsgrp_node
;
328 /* elapse time is the hold time of expired node */
329 elapse
= bsrp
->rp_holdtime
;
330 bsrp_addr
= bsrp
->rp_address
;
332 /* update elapse for all bsrp nodes */
333 for (ALL_LIST_ELEMENTS_RO(bsgrp_node
->bsrp_list
, bsrp_ln
, bsrp_node
))
334 bsrp_node
->elapse_time
+= elapse
;
336 /* remove the expired nodes from the list */
337 list_filter_out_nodes(bsgrp_node
->bsrp_list
, is_hold_time_elapsed
);
339 /* Get the next elected rp node */
340 bsrp
= listnode_head(bsgrp_node
->bsrp_list
);
341 pim
= bsgrp_node
->scope
->pim
;
342 rn
= route_node_lookup(pim
->rp_table
, &bsgrp_node
->group
);
345 zlog_warn("%s: Route node doesn't exist", __PRETTY_FUNCTION__
);
349 rp_info
= (struct rp_info
*)rn
->info
;
352 route_unlock_node(rn
);
356 if (rp_info
->rp_src
!= RP_SRC_STATIC
) {
357 /* If new rp available, change it else delete the existing */
359 bsrp_addr
= bsrp
->rp_address
;
360 pim_g2rp_timer_start(
361 bsrp
, (bsrp
->rp_holdtime
- bsrp
->elapse_time
));
362 pim_rp_change(pim
, bsrp_addr
, bsgrp_node
->group
,
365 pim_rp_del(pim
, bsrp_addr
, bsgrp_node
->group
, NULL
,
370 if ((!bsgrp_node
->bsrp_list
->count
)
371 && (!bsgrp_node
->partial_bsrp_list
->count
)) {
372 pim_free_bsgrp_node(pim
->global_scope
.bsrp_table
,
374 pim_free_bsgrp_data(bsgrp_node
);
380 static void pim_g2rp_timer_start(struct bsm_rpinfo
*bsrp
, int hold_time
)
384 zlog_debug("%s : Invalid brsp(NULL).",
385 __PRETTY_FUNCTION__
);
388 THREAD_OFF(bsrp
->g2rp_timer
);
393 "%s : starting g2rp timer for grp: %s - rp: %s with timeout %d secs(Actual Hold time : %d secs)",
395 prefix2str(&bsrp
->bsgrp_node
->group
, buf
, 48),
396 inet_ntoa(bsrp
->rp_address
), hold_time
,
400 thread_add_timer(router
->master
, pim_on_g2rp_timer
, bsrp
, hold_time
,
404 static inline void pim_g2rp_timer_restart(struct bsm_rpinfo
*bsrp
,
407 pim_g2rp_timer_start(bsrp
, hold_time
);
410 static void pim_g2rp_timer_stop(struct bsm_rpinfo
*bsrp
)
418 zlog_debug("%s : stopping g2rp timer for grp: %s - rp: %s",
420 prefix2str(&bsrp
->bsgrp_node
->group
, buf
, 48),
421 inet_ntoa(bsrp
->rp_address
));
424 THREAD_OFF(bsrp
->g2rp_timer
);
427 static bool is_hold_time_zero(void *data
)
429 struct bsm_rpinfo
*bsrp
;
433 if (bsrp
->rp_holdtime
)
439 static void pim_instate_pend_list(struct bsgrp_node
*bsgrp_node
)
441 struct bsm_rpinfo
*active
;
442 struct bsm_rpinfo
*pend
;
444 struct rp_info
*rp_info
;
445 struct route_node
*rn
;
446 struct pim_instance
*pim
;
447 struct rp_info
*rp_all
;
448 struct prefix group_all
;
449 bool had_rp_node
= true;
451 pim
= bsgrp_node
->scope
->pim
;
452 active
= listnode_head(bsgrp_node
->bsrp_list
);
454 /* Remove nodes with hold time 0 & check if list still has a head */
455 list_filter_out_nodes(bsgrp_node
->partial_bsrp_list
, is_hold_time_zero
);
456 pend
= listnode_head(bsgrp_node
->partial_bsrp_list
);
458 if (!str2prefix("224.0.0.0/4", &group_all
))
461 rp_all
= pim_rp_find_match_group(pim
, &group_all
);
462 rn
= route_node_lookup(pim
->rp_table
, &bsgrp_node
->group
);
465 pim_g2rp_timer_start(pend
, pend
->rp_holdtime
);
467 /* if rp node doesn't exist or exist but not configured(rp_all),
468 * install the rp from head(if exists) of partial list. List is
469 * is sorted such that head is the elected RP for the group.
471 if (!rn
|| (prefix_same(&rp_all
->group
, &bsgrp_node
->group
)
472 && pim_rpf_addr_is_inaddr_none(&rp_all
->rp
))) {
474 zlog_debug("%s: Route node doesn't exist",
475 __PRETTY_FUNCTION__
);
477 pim_rp_new(pim
, pend
->rp_address
, bsgrp_node
->group
,
481 rp_info
= (struct rp_info
*)rn
->info
;
483 route_unlock_node(rn
);
485 pim_rp_new(pim
, pend
->rp_address
,
486 bsgrp_node
->group
, NULL
, RP_SRC_BSR
);
491 /* We didn't have rp node and pending list is empty(unlikely), cleanup*/
492 if ((!had_rp_node
) && (!pend
)) {
493 pim_free_bsgrp_node(bsgrp_node
->scope
->bsrp_table
,
495 pim_free_bsgrp_data(bsgrp_node
);
499 if ((had_rp_node
) && (rp_info
->rp_src
!= RP_SRC_STATIC
)) {
500 /* This means we searched and got rp node, needs unlock */
501 route_unlock_node(rn
);
503 if (active
&& pend
) {
504 if ((active
->rp_address
.s_addr
505 != pend
->rp_address
.s_addr
))
506 pim_rp_change(pim
, pend
->rp_address
,
507 bsgrp_node
->group
, RP_SRC_BSR
);
510 /* Possible when the first BSM has group with 0 rp count */
511 if ((!active
) && (!pend
)) {
514 "%s: Both bsrp and partial list are empty",
515 __PRETTY_FUNCTION__
);
517 pim_free_bsgrp_node(bsgrp_node
->scope
->bsrp_table
,
519 pim_free_bsgrp_data(bsgrp_node
);
523 /* Possible when a group with 0 rp count received in BSM */
524 if ((active
) && (!pend
)) {
525 pim_rp_del(pim
, active
->rp_address
, bsgrp_node
->group
,
527 pim_free_bsgrp_node(bsgrp_node
->scope
->bsrp_table
,
530 zlog_debug("%s:Pend List is null,del grp node",
531 __PRETTY_FUNCTION__
);
533 pim_free_bsgrp_data(bsgrp_node
);
538 if ((had_rp_node
) && (rp_info
->rp_src
== RP_SRC_STATIC
)) {
539 /* We need to unlock rn this case */
540 route_unlock_node(rn
);
541 /* there is a chance that static rp exist and bsrp cleaned
542 * so clean bsgrp node if pending list empty
547 "%s: Partial list is empty, static rp exists",
548 __PRETTY_FUNCTION__
);
549 pim_free_bsgrp_node(bsgrp_node
->scope
->bsrp_table
,
551 pim_free_bsgrp_data(bsgrp_node
);
556 /* swap the list & delete all nodes in partial list (old bsrp_list)
558 * active is head of bsrp list
559 * pend is head of partial list
561 * active is head of partial list
562 * pend is head of bsrp list
563 * So check appriate head after swap and clean the new partial list
565 temp
= bsgrp_node
->bsrp_list
;
566 bsgrp_node
->bsrp_list
= bsgrp_node
->partial_bsrp_list
;
567 bsgrp_node
->partial_bsrp_list
= temp
;
570 pim_g2rp_timer_stop(active
);
571 list_delete_all_node(bsgrp_node
->partial_bsrp_list
);
575 static bool pim_bsr_rpf_check(struct pim_instance
*pim
, struct in_addr bsr
,
576 struct in_addr ip_src_addr
)
578 struct pim_nexthop nexthop
;
581 memset(&nexthop
, 0, sizeof(nexthop
));
583 /* New BSR recived */
584 if (bsr
.s_addr
!= pim
->global_scope
.current_bsr
.s_addr
) {
585 result
= pim_nexthop_match(pim
, bsr
, ip_src_addr
);
587 /* Nexthop lookup pass for the new BSR address */
592 char bsr_str
[INET_ADDRSTRLEN
];
594 pim_inet4_dump("<bsr?>", bsr
, bsr_str
, sizeof(bsr_str
));
595 zlog_debug("%s : No route to BSR address %s",
596 __PRETTY_FUNCTION__
, bsr_str
);
601 return pim_nexthop_match_nht_cache(pim
, bsr
, ip_src_addr
);
604 static bool is_preferred_bsr(struct pim_instance
*pim
, struct in_addr bsr
,
607 if (bsr
.s_addr
== pim
->global_scope
.current_bsr
.s_addr
)
610 if (bsr_prio
> pim
->global_scope
.current_bsr_prio
)
613 else if (bsr_prio
== pim
->global_scope
.current_bsr_prio
) {
614 if (bsr
.s_addr
>= pim
->global_scope
.current_bsr
.s_addr
)
622 static void pim_bsm_update(struct pim_instance
*pim
, struct in_addr bsr
,
625 struct pim_nexthop_cache pnc
;
627 if (bsr
.s_addr
!= pim
->global_scope
.current_bsr
.s_addr
) {
629 char buf
[PREFIX2STR_BUFFER
];
630 bool is_bsr_tracking
= true;
632 /* De-register old BSR and register new BSR with Zebra NHT */
633 nht_p
.family
= AF_INET
;
634 nht_p
.prefixlen
= IPV4_MAX_BITLEN
;
636 if (pim
->global_scope
.current_bsr
.s_addr
!= INADDR_ANY
) {
637 nht_p
.u
.prefix4
= pim
->global_scope
.current_bsr
;
639 prefix2str(&nht_p
, buf
, sizeof(buf
));
641 "%s: Deregister BSR addr %s with Zebra NHT",
642 __PRETTY_FUNCTION__
, buf
);
644 pim_delete_tracked_nexthop(pim
, &nht_p
, NULL
, NULL
,
648 nht_p
.u
.prefix4
= bsr
;
650 prefix2str(&nht_p
, buf
, sizeof(buf
));
652 "%s: NHT Register BSR addr %s with Zebra NHT",
653 __PRETTY_FUNCTION__
, buf
);
656 memset(&pnc
, 0, sizeof(struct pim_nexthop_cache
));
657 pim_find_or_track_nexthop(pim
, &nht_p
, NULL
, NULL
,
658 is_bsr_tracking
, &pnc
);
659 pim
->global_scope
.current_bsr
= bsr
;
660 pim
->global_scope
.current_bsr_first_ts
=
661 pim_time_monotonic_sec();
662 pim
->global_scope
.state
= ACCEPT_PREFERRED
;
664 pim
->global_scope
.current_bsr_prio
= bsr_prio
;
665 pim
->global_scope
.current_bsr_last_ts
= pim_time_monotonic_sec();
668 static bool pim_bsm_send_intf(uint8_t *buf
, int len
, struct interface
*ifp
,
669 struct in_addr dst_addr
)
671 struct pim_interface
*pim_ifp
;
677 zlog_debug("%s: Pim interface not available for %s",
678 __PRETTY_FUNCTION__
, ifp
->name
);
682 if (pim_ifp
->pim_sock_fd
== -1) {
684 zlog_debug("%s: Pim sock not available for %s",
685 __PRETTY_FUNCTION__
, ifp
->name
);
689 pim_msg_send(pim_ifp
->pim_sock_fd
, pim_ifp
->primary_address
, dst_addr
,
690 buf
, len
, ifp
->name
);
691 pim_ifp
->pim_ifstat_bsm_tx
++;
692 pim_ifp
->pim
->bsm_sent
++;
696 static bool pim_bsm_frag_send(uint8_t *buf
, uint32_t len
, struct interface
*ifp
,
697 uint32_t pim_mtu
, struct in_addr dst_addr
,
700 struct bsmmsg_grpinfo
*grpinfo
, *curgrp
;
701 uint8_t *firstgrp_ptr
;
704 uint32_t parsed_len
= 0;
705 uint32_t this_pkt_rem
;
706 uint32_t copy_byte_count
;
707 uint32_t this_pkt_len
;
708 uint8_t total_rp_cnt
;
712 bool pak_pending
= false;
714 /* MTU passed here is PIM MTU (IP MTU less IP Hdr) */
715 if (pim_mtu
< (PIM_MIN_BSM_LEN
)) {
717 "%s: mtu(pim mtu: %d) size less than minimum bootsrap len",
718 __PRETTY_FUNCTION__
, pim_mtu
);
721 "%s: mtu (pim mtu:%d) less than minimum bootsrap len",
722 __PRETTY_FUNCTION__
, pim_mtu
);
726 pak_start
= XCALLOC(MTYPE_PIM_BSM_PKT_VAR_MEM
, pim_mtu
);
730 zlog_debug("%s: malloc failed", __PRETTY_FUNCTION__
);
736 /* Fill PIM header later before sending packet to calc checksum */
737 pkt
+= PIM_MSG_HEADER_LEN
;
738 buf
+= PIM_MSG_HEADER_LEN
;
740 /* copy bsm header to new packet at offset of pim hdr */
741 memcpy(pkt
, buf
, PIM_BSM_HDR_LEN
);
742 pkt
+= PIM_BSM_HDR_LEN
;
743 buf
+= PIM_BSM_HDR_LEN
;
744 parsed_len
+= (PIM_MSG_HEADER_LEN
+ PIM_BSM_HDR_LEN
);
746 /* Store the position of first grp ptr, which can be reused for
747 * next packet to start filling group. old bsm header and pim hdr
748 * remains. So need not be filled again for next packet onwards.
752 /* we received mtu excluding IP hdr len as param
753 * now this_pkt_rem is mtu excluding
754 * PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN
756 this_pkt_rem
= pim_mtu
- (PIM_BSM_HDR_LEN
+ PIM_MSG_HEADER_LEN
);
758 /* For each group till the packet length parsed */
759 while (parsed_len
< len
) {
760 /* pkt ---> fragment's current pointer
761 * buf ---> input buffer's current pointer
762 * mtu ---> size of the pim packet - PIM header
763 * curgrp ---> current group on the fragment
764 * grpinfo ---> current group on the input buffer
765 * this_pkt_rem ---> bytes remaing on the current fragment
766 * rp_fit_cnt ---> num of rp for current grp that
768 * total_rp_cnt ---> total rp present for the group in the buf
769 * frag_rp_cnt ---> no of rp for the group to be fit in
771 * this_rp_cnt ---> how many rp have we parsed
773 grpinfo
= (struct bsmmsg_grpinfo
*)buf
;
774 memcpy(pkt
, buf
, PIM_BSM_GRP_LEN
);
775 curgrp
= (struct bsmmsg_grpinfo
*)pkt
;
776 parsed_len
+= PIM_BSM_GRP_LEN
;
777 pkt
+= PIM_BSM_GRP_LEN
;
778 buf
+= PIM_BSM_GRP_LEN
;
779 this_pkt_rem
-= PIM_BSM_GRP_LEN
;
781 /* initialize rp count and total_rp_cnt before the rp loop */
783 total_rp_cnt
= grpinfo
->frag_rp_count
;
785 /* Loop till all RPs for the group parsed */
786 while (this_rp_cnt
< total_rp_cnt
) {
787 /* All RP from a group processed here.
788 * group is pointed by grpinfo.
789 * At this point make sure buf pointing to a RP
792 rp_fit_cnt
= this_pkt_rem
/ PIM_BSM_RP_LEN
;
794 /* calculate how many rp am i going to copy in
797 if (rp_fit_cnt
> (total_rp_cnt
- this_rp_cnt
))
798 frag_rp_cnt
= total_rp_cnt
- this_rp_cnt
;
800 frag_rp_cnt
= rp_fit_cnt
;
802 /* populate the frag rp count for the current grp */
803 curgrp
->frag_rp_count
= frag_rp_cnt
;
804 copy_byte_count
= frag_rp_cnt
* PIM_BSM_RP_LEN
;
806 /* copy all the rp that we are fitting in this
809 memcpy(pkt
, buf
, copy_byte_count
);
810 this_rp_cnt
+= frag_rp_cnt
;
811 buf
+= copy_byte_count
;
812 pkt
+= copy_byte_count
;
813 parsed_len
+= copy_byte_count
;
814 this_pkt_rem
-= copy_byte_count
;
816 /* Either we couldn't fit all rp for the group or the
819 if ((this_rp_cnt
< total_rp_cnt
)
821 < (PIM_BSM_GRP_LEN
+ PIM_BSM_RP_LEN
))) {
822 /* No space to fit in more rp, send this pkt */
823 this_pkt_len
= pim_mtu
- this_pkt_rem
;
824 pim_msg_build_header(pak_start
, this_pkt_len
,
825 PIM_MSG_TYPE_BOOTSTRAP
,
827 pim_bsm_send_intf(pak_start
, this_pkt_len
, ifp
,
830 /* Construct next fragment. Reuse old packet */
832 this_pkt_rem
= pim_mtu
- (PIM_BSM_HDR_LEN
833 + PIM_MSG_HEADER_LEN
);
835 /* If pkt can't accomodate next group + atleast
836 * one rp, we must break out of this inner loop
837 * and process next RP
839 if (total_rp_cnt
== this_rp_cnt
)
842 /* If some more RPs for the same group pending,
845 memcpy(pkt
, (uint8_t *)grpinfo
,
847 curgrp
= (struct bsmmsg_grpinfo
*)pkt
;
848 pkt
+= PIM_BSM_GRP_LEN
;
849 this_pkt_rem
-= PIM_BSM_GRP_LEN
;
852 /* We filled something but not yet sent out */
855 } /* while RP count */
856 } /*while parsed len */
858 /* Send if we have any unsent packet */
860 this_pkt_len
= pim_mtu
- this_pkt_rem
;
861 pim_msg_build_header(pak_start
, this_pkt_len
,
862 PIM_MSG_TYPE_BOOTSTRAP
, no_fwd
);
863 pim_bsm_send_intf(pak_start
, (pim_mtu
- this_pkt_rem
), ifp
,
866 XFREE(MTYPE_PIM_BSM_PKT_VAR_MEM
, pak_start
);
870 static void pim_bsm_fwd_whole_sz(struct pim_instance
*pim
, uint8_t *buf
,
871 uint32_t len
, int sz
)
873 struct interface
*ifp
;
874 struct pim_interface
*pim_ifp
;
875 struct in_addr dst_addr
;
880 /* For now only global scope zone is supported, so send on all
881 * pim interfaces in the vrf
883 dst_addr
= qpim_all_pim_routers_addr
;
884 FOR_ALL_INTERFACES (pim
->vrf
, ifp
) {
886 if ((!pim_ifp
) || (!pim_ifp
->bsm_enable
))
888 pim_hello_require(ifp
);
889 pim_mtu
= ifp
->mtu
- MAX_IP_HDR_LEN
;
891 ret
= pim_bsm_frag_send(buf
, len
, ifp
, pim_mtu
,
894 zlog_debug("%s: pim_bsm_frag_send returned %s",
896 ret
? "TRUE" : "FALSE");
898 pim_msg_build_header(buf
, len
, PIM_MSG_TYPE_BOOTSTRAP
,
900 if (!pim_bsm_send_intf(buf
, len
, ifp
, dst_addr
)) {
903 "%s: pim_bsm_send_intf returned FALSE",
904 __PRETTY_FUNCTION__
);
910 bool pim_bsm_new_nbr_fwd(struct pim_neighbor
*neigh
, struct interface
*ifp
)
912 struct in_addr dst_addr
;
913 struct pim_interface
*pim_ifp
;
914 struct bsm_scope
*scope
;
915 struct listnode
*bsm_ln
;
916 struct bsm_info
*bsminfo
;
917 char neigh_src_str
[INET_ADDRSTRLEN
];
923 pim_inet4_dump("<src?>", neigh
->source_addr
, neigh_src_str
,
924 sizeof(neigh_src_str
));
925 zlog_debug("%s: New neighbor %s seen on %s",
926 __PRETTY_FUNCTION__
, neigh_src_str
, ifp
->name
);
931 /* DR only forwards BSM packet */
932 if (pim_ifp
->pim_dr_addr
.s_addr
== pim_ifp
->primary_address
.s_addr
) {
935 "%s: It is not DR, so don't forward BSM packet",
936 __PRETTY_FUNCTION__
);
939 if (!pim_ifp
->bsm_enable
) {
941 zlog_debug("%s: BSM proc not enabled on %s",
942 __PRETTY_FUNCTION__
, ifp
->name
);
946 scope
= &pim_ifp
->pim
->global_scope
;
948 if (!scope
->bsm_list
->count
) {
950 zlog_debug("%s: BSM list for the scope is empty",
951 __PRETTY_FUNCTION__
);
955 if (!pim_ifp
->ucast_bsm_accept
) {
956 dst_addr
= qpim_all_pim_routers_addr
;
958 zlog_debug("%s: Sending BSM mcast to %s",
959 __PRETTY_FUNCTION__
, neigh_src_str
);
961 dst_addr
= neigh
->source_addr
;
963 zlog_debug("%s: Sending BSM ucast to %s",
964 __PRETTY_FUNCTION__
, neigh_src_str
);
966 pim_mtu
= ifp
->mtu
- MAX_IP_HDR_LEN
;
967 pim_hello_require(ifp
);
969 for (ALL_LIST_ELEMENTS_RO(scope
->bsm_list
, bsm_ln
, bsminfo
)) {
970 if (pim_mtu
< bsminfo
->size
) {
971 ret
= pim_bsm_frag_send(bsminfo
->bsm
, bsminfo
->size
,
972 ifp
, pim_mtu
, dst_addr
, no_fwd
);
976 "%s: pim_bsm_frag_send failed",
977 __PRETTY_FUNCTION__
);
980 /* Pim header needs to be constructed */
981 pim_msg_build_header(bsminfo
->bsm
, bsminfo
->size
,
982 PIM_MSG_TYPE_BOOTSTRAP
, no_fwd
);
983 ret
= pim_bsm_send_intf(bsminfo
->bsm
, bsminfo
->size
,
988 "%s: pim_bsm_frag_send failed",
989 __PRETTY_FUNCTION__
);
996 struct bsgrp_node
*pim_bsm_get_bsgrp_node(struct bsm_scope
*scope
,
999 struct route_node
*rn
;
1000 struct bsgrp_node
*bsgrp
;
1002 rn
= route_node_lookup(scope
->bsrp_table
, grp
);
1005 zlog_debug("%s: Route node doesn't exist for the group",
1006 __PRETTY_FUNCTION__
);
1010 route_unlock_node(rn
);
1015 static uint32_t hash_calc_on_grp_rp(struct prefix group
, struct in_addr rp
,
1016 uint8_t hashmasklen
)
1022 uint32_t mask
= 0xffffffff;
1024 /* mask to be made zero if hashmasklen is 0 because mask << 32
1025 * may not give 0. hashmasklen can be 0 to 32.
1027 if (hashmasklen
== 0)
1030 /* in_addr stores ip in big endian, hence network byte order
1031 * convert to uint32 before processing hash
1033 grpaddr
= ntohl(group
.u
.prefix4
.s_addr
);
1034 /* Avoid shifting by 32 bit on a 32 bit register */
1036 grpaddr
= grpaddr
& ((mask
<< (32 - hashmasklen
)));
1038 grpaddr
= grpaddr
& mask
;
1039 rp_add
= ntohl(rp
.s_addr
);
1040 temp
= 1103515245 * ((1103515245 * grpaddr
+ 12345) ^ rp_add
) + 12345;
1041 hash
= temp
& (0x7fffffff);
1045 static bool pim_install_bsm_grp_rp(struct pim_instance
*pim
,
1046 struct bsgrp_node
*grpnode
,
1047 struct bsmmsg_rpinfo
*rp
)
1049 struct bsm_rpinfo
*bsm_rpinfo
;
1050 uint8_t hashMask_len
= pim
->global_scope
.hashMasklen
;
1052 /*memory allocation for bsm_rpinfo */
1053 bsm_rpinfo
= XCALLOC(MTYPE_PIM_BSRP_NODE
, sizeof(*bsm_rpinfo
));
1057 zlog_debug("%s, Memory allocation failed.\r\n",
1058 __PRETTY_FUNCTION__
);
1062 bsm_rpinfo
->rp_prio
= rp
->rp_pri
;
1063 bsm_rpinfo
->rp_holdtime
= rp
->rp_holdtime
;
1064 memcpy(&bsm_rpinfo
->rp_address
, &rp
->rpaddr
.addr
,
1065 sizeof(struct in_addr
));
1066 bsm_rpinfo
->elapse_time
= 0;
1068 /* Back pointer to the group node. */
1069 bsm_rpinfo
->bsgrp_node
= grpnode
;
1071 /* update hash for this rp node */
1072 bsm_rpinfo
->hash
= hash_calc_on_grp_rp(grpnode
->group
, rp
->rpaddr
.addr
,
1074 if (listnode_add_sort_nodup(grpnode
->partial_bsrp_list
, bsm_rpinfo
)) {
1077 "%s, bs_rpinfo node added to the partial bs_rplist.\r\n",
1078 __PRETTY_FUNCTION__
);
1083 zlog_debug("%s: list node not added\n", __PRETTY_FUNCTION__
);
1085 XFREE(MTYPE_PIM_BSRP_NODE
, bsm_rpinfo
);
1089 static void pim_update_pending_rp_cnt(struct bsm_scope
*sz
,
1090 struct bsgrp_node
*bsgrp
,
1091 uint16_t bsm_frag_tag
,
1092 uint32_t total_rp_count
)
1094 if (bsgrp
->pend_rp_cnt
) {
1095 /* received bsm is different packet ,
1096 * it is not same fragment.
1098 if (bsm_frag_tag
!= bsgrp
->frag_tag
) {
1101 "%s,Received a new BSM ,so clear the pending bs_rpinfo list.\r\n",
1102 __PRETTY_FUNCTION__
);
1103 list_delete_all_node(bsgrp
->partial_bsrp_list
);
1104 bsgrp
->pend_rp_cnt
= total_rp_count
;
1107 bsgrp
->pend_rp_cnt
= total_rp_count
;
1109 bsgrp
->frag_tag
= bsm_frag_tag
;
1112 /* Parsing BSR packet and adding to partial list of corresponding bsgrp node */
1113 static bool pim_bsm_parse_install_g2rp(struct bsm_scope
*scope
, uint8_t *buf
,
1114 int buflen
, uint16_t bsm_frag_tag
)
1116 struct bsmmsg_grpinfo grpinfo
;
1117 struct bsmmsg_rpinfo rpinfo
;
1118 struct prefix group
;
1119 struct bsgrp_node
*bsgrp
= NULL
;
1120 int frag_rp_cnt
= 0;
1124 while (buflen
> offset
) {
1125 /* Extract Group tlv from BSM */
1126 memcpy(&grpinfo
, buf
, sizeof(struct bsmmsg_grpinfo
));
1128 if (PIM_DEBUG_BSM
) {
1129 char grp_str
[INET_ADDRSTRLEN
];
1131 pim_inet4_dump("<Group?>", grpinfo
.group
.addr
, grp_str
,
1134 "%s, Group %s Rpcount:%d Fragment-Rp-count:%d\r\n",
1135 __PRETTY_FUNCTION__
, grp_str
, grpinfo
.rp_count
,
1136 grpinfo
.frag_rp_count
);
1139 buf
+= sizeof(struct bsmmsg_grpinfo
);
1140 offset
+= sizeof(struct bsmmsg_grpinfo
);
1142 if (grpinfo
.rp_count
== 0) {
1143 if (PIM_DEBUG_BSM
) {
1144 char grp_str
[INET_ADDRSTRLEN
];
1146 pim_inet4_dump("<Group?>", grpinfo
.group
.addr
,
1147 grp_str
, sizeof(grp_str
));
1149 "%s, Rp count is zero for group: %s\r\n",
1150 __PRETTY_FUNCTION__
, grp_str
);
1155 group
.family
= AF_INET
;
1156 group
.prefixlen
= grpinfo
.group
.mask
;
1157 group
.u
.prefix4
.s_addr
= grpinfo
.group
.addr
.s_addr
;
1159 /* Get the Group node for the BSM rp table */
1160 bsgrp
= pim_bsm_get_bsgrp_node(scope
, &group
);
1165 "%s, Create new BSM Group node.\r\n",
1166 __PRETTY_FUNCTION__
);
1168 /* create a new node to be added to the tree. */
1169 bsgrp
= pim_bsm_new_bsgrp_node(scope
->bsrp_table
,
1174 "%s, Failed to get the BSM group node.\r\n",
1175 __PRETTY_FUNCTION__
);
1179 bsgrp
->scope
= scope
;
1182 pim_update_pending_rp_cnt(scope
, bsgrp
, bsm_frag_tag
,
1184 frag_rp_cnt
= grpinfo
.frag_rp_count
;
1187 while (frag_rp_cnt
--) {
1188 /* Extract RP address tlv from BSM */
1189 memcpy(&rpinfo
, buf
, sizeof(struct bsmmsg_rpinfo
));
1190 rpinfo
.rp_holdtime
= ntohs(rpinfo
.rp_holdtime
);
1191 buf
+= sizeof(struct bsmmsg_rpinfo
);
1192 offset
+= sizeof(struct bsmmsg_rpinfo
);
1194 if (PIM_DEBUG_BSM
) {
1195 char rp_str
[INET_ADDRSTRLEN
];
1197 pim_inet4_dump("<Rpaddr?>", rpinfo
.rpaddr
.addr
,
1198 rp_str
, sizeof(rp_str
));
1200 "%s, Rp address - %s; pri:%d hold:%d\r\n",
1201 __PRETTY_FUNCTION__
, rp_str
,
1202 rpinfo
.rp_pri
, rpinfo
.rp_holdtime
);
1205 /* Call Install api to update grp-rp mappings */
1206 if (pim_install_bsm_grp_rp(scope
->pim
, bsgrp
, &rpinfo
))
1210 bsgrp
->pend_rp_cnt
-= ins_count
;
1212 if (!bsgrp
->pend_rp_cnt
) {
1215 "%s, Recvd all the rps for this group, so bsrp list with penidng rp list.",
1216 __PRETTY_FUNCTION__
);
1217 /* replace the bsrp_list with pending list */
1218 pim_instate_pend_list(bsgrp
);
1224 int pim_bsm_process(struct interface
*ifp
, struct ip
*ip_hdr
, uint8_t *buf
,
1225 uint32_t buf_size
, bool no_fwd
)
1227 struct bsm_hdr
*bshdr
;
1228 int sz
= PIM_GBL_SZ_ID
;
1229 struct bsmmsg_grpinfo
*msg_grp
;
1230 struct pim_interface
*pim_ifp
= NULL
;
1231 struct bsm_info
*bsminfo
;
1232 struct pim_instance
*pim
;
1233 char bsr_str
[INET_ADDRSTRLEN
];
1235 bool empty_bsm
= FALSE
;
1237 /* BSM Packet acceptance validation */
1238 pim_ifp
= ifp
->info
;
1241 zlog_debug("%s: multicast not enabled on interface %s",
1242 __PRETTY_FUNCTION__
, ifp
->name
);
1246 pim_ifp
->pim_ifstat_bsm_rx
++;
1250 /* Drop if bsm processing is disabled on interface */
1251 if (!pim_ifp
->bsm_enable
) {
1252 zlog_warn("%s: BSM not enabled on interface %s",
1253 __PRETTY_FUNCTION__
, ifp
->name
);
1254 pim_ifp
->pim_ifstat_bsm_cfg_miss
++;
1259 bshdr
= (struct bsm_hdr
*)(buf
+ PIM_MSG_HEADER_LEN
);
1260 pim_inet4_dump("<bsr?>", bshdr
->bsr_addr
.addr
, bsr_str
,
1262 pim
->global_scope
.hashMasklen
= bshdr
->hm_len
;
1263 frag_tag
= ntohs(bshdr
->frag_tag
);
1265 /* Identify empty BSM */
1266 if ((buf_size
- PIM_BSM_HDR_LEN
- PIM_MSG_HEADER_LEN
) < PIM_BSM_GRP_LEN
)
1270 msg_grp
= (struct bsmmsg_grpinfo
*)(buf
+ PIM_MSG_HEADER_LEN
1272 /* Currently we don't support scope zoned BSM */
1273 if (msg_grp
->group
.sz
) {
1276 "%s : Administratively scoped range BSM received",
1277 __PRETTY_FUNCTION__
);
1278 pim_ifp
->pim_ifstat_bsm_invalid_sz
++;
1284 /* Drop if bsr is not preferred bsr */
1285 if (!is_preferred_bsr(pim
, bshdr
->bsr_addr
.addr
, bshdr
->bsr_prio
)) {
1287 zlog_debug("%s : Received a non-preferred BSM",
1288 __PRETTY_FUNCTION__
);
1294 /* only accept no-forward BSM if quick refresh on startup */
1295 if ((pim
->global_scope
.accept_nofwd_bsm
)
1296 || (frag_tag
== pim
->global_scope
.bsm_frag_tag
)) {
1297 pim
->global_scope
.accept_nofwd_bsm
= false;
1301 "%s : nofwd_bsm received on %s when accpt_nofwd_bsm false",
1302 __PRETTY_FUNCTION__
, bsr_str
);
1304 pim_ifp
->pim_ifstat_ucast_bsm_cfg_miss
++;
1309 /* Mulicast BSM received */
1310 if (ip_hdr
->ip_dst
.s_addr
== qpim_all_pim_routers_addr
.s_addr
) {
1312 if (!pim_bsr_rpf_check(pim
, bshdr
->bsr_addr
.addr
,
1316 "%s : RPF check fail for BSR address %s",
1317 __PRETTY_FUNCTION__
, bsr_str
);
1322 } else if (if_lookup_exact_address(&ip_hdr
->ip_dst
, AF_INET
,
1324 /* Unicast BSM received - if ucast bsm not enabled on
1325 * the interface, drop it
1327 if (!pim_ifp
->ucast_bsm_accept
) {
1330 "%s : Unicast BSM not enabled on interface %s",
1331 __PRETTY_FUNCTION__
, ifp
->name
);
1332 pim_ifp
->pim_ifstat_ucast_bsm_cfg_miss
++;
1339 zlog_debug("%s : Invalid destination address",
1340 __PRETTY_FUNCTION__
);
1347 zlog_debug("%s : Empty Pref BSM received",
1348 __PRETTY_FUNCTION__
);
1350 /* Parse Update bsm rp table and install/uninstall rp if required */
1351 if (!pim_bsm_parse_install_g2rp(
1352 &pim_ifp
->pim
->global_scope
,
1353 (buf
+ PIM_BSM_HDR_LEN
+ PIM_MSG_HEADER_LEN
),
1354 (buf_size
- PIM_BSM_HDR_LEN
- PIM_MSG_HEADER_LEN
),
1356 if (PIM_DEBUG_BSM
) {
1357 zlog_debug("%s, Parsing BSM failed.\r\n",
1358 __PRETTY_FUNCTION__
);
1363 /* Restart the bootstrap timer */
1364 pim_bs_timer_restart(&pim_ifp
->pim
->global_scope
,
1365 PIM_BSR_DEFAULT_TIMEOUT
);
1367 /* If new BSM received, clear the old bsm database */
1368 if (pim_ifp
->pim
->global_scope
.bsm_frag_tag
!= frag_tag
) {
1369 if (PIM_DEBUG_BSM
) {
1370 zlog_debug("%s: Current frag tag: %d Frag teg rcvd: %d",
1371 __PRETTY_FUNCTION__
,
1372 pim_ifp
->pim
->global_scope
.bsm_frag_tag
,
1375 list_delete_all_node(pim_ifp
->pim
->global_scope
.bsm_list
);
1376 pim_ifp
->pim
->global_scope
.bsm_frag_tag
= frag_tag
;
1379 /* update the scope information from bsm */
1380 pim_bsm_update(pim
, bshdr
->bsr_addr
.addr
, bshdr
->bsr_prio
);
1383 pim_bsm_fwd_whole_sz(pim_ifp
->pim
, buf
, buf_size
, sz
);
1384 bsminfo
= XCALLOC(MTYPE_PIM_BSM_INFO
, sizeof(struct bsm_info
));
1386 zlog_warn("%s: bsminfo alloc failed",
1387 __PRETTY_FUNCTION__
);
1391 bsminfo
->bsm
= XCALLOC(MTYPE_PIM_BSM_PKT_VAR_MEM
, buf_size
);
1392 if (!bsminfo
->bsm
) {
1393 zlog_warn("%s: bsm alloc failed", __PRETTY_FUNCTION__
);
1394 XFREE(MTYPE_PIM_BSM_INFO
, bsminfo
);
1398 bsminfo
->size
= buf_size
;
1399 memcpy(bsminfo
->bsm
, buf
, buf_size
);
1400 listnode_add(pim_ifp
->pim
->global_scope
.bsm_list
, bsminfo
);