2 * pim_bsm.c: PIM BSM handling routines
4 * Copyright (C) 2018-19 Vmware, Inc.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; see the file COPYING; if not, write to the
19 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
29 #include "pim_iface.h"
30 #include "pim_instance.h"
32 #include "pim_hello.h"
38 /* Functions forward declaration */
39 static void pim_bs_timer_start(struct bsm_scope
*scope
, int bs_timeout
);
40 static void pim_g2rp_timer_start(struct bsm_rpinfo
*bsrp
, int hold_time
);
41 static inline void pim_g2rp_timer_restart(struct bsm_rpinfo
*bsrp
,
45 DEFINE_MTYPE_STATIC(PIMD
, PIM_BSGRP_NODE
, "PIM BSR advertised grp info")
46 DEFINE_MTYPE_STATIC(PIMD
, PIM_BSRP_NODE
, "PIM BSR advertised RP info")
47 DEFINE_MTYPE_STATIC(PIMD
, PIM_BSM_INFO
, "PIM BSM Info")
48 DEFINE_MTYPE_STATIC(PIMD
, PIM_BSM_PKT_VAR_MEM
, "PIM BSM Packet")
50 /* All bsm packets forwarded shall be fit within ip mtu less iphdr(max) */
51 #define MAX_IP_HDR_LEN 24
53 /* pim_bsm_write_config - Write the interface pim bsm configuration.*/
54 void pim_bsm_write_config(struct vty
*vty
, struct interface
*ifp
)
56 struct pim_interface
*pim_ifp
= ifp
->info
;
59 if (!pim_ifp
->bsm_enable
)
60 vty_out(vty
, " no ip pim bsm\n");
61 if (!pim_ifp
->ucast_bsm_accept
)
62 vty_out(vty
, " no ip pim unicast-bsm\n");
66 static void pim_free_bsgrp_data(struct bsgrp_node
*bsgrp_node
)
68 if (bsgrp_node
->bsrp_list
)
69 list_delete(&bsgrp_node
->bsrp_list
);
70 if (bsgrp_node
->partial_bsrp_list
)
71 list_delete(&bsgrp_node
->partial_bsrp_list
);
72 XFREE(MTYPE_PIM_BSGRP_NODE
, bsgrp_node
);
75 static void pim_free_bsgrp_node(struct route_table
*rt
, struct prefix
*grp
)
77 struct route_node
*rn
;
79 rn
= route_node_lookup(rt
, grp
);
82 route_unlock_node(rn
);
83 route_unlock_node(rn
);
87 static void pim_bsm_node_free(struct bsm_info
*bsm
)
90 XFREE(MTYPE_PIM_BSM_PKT_VAR_MEM
, bsm
->bsm
);
91 XFREE(MTYPE_PIM_BSM_INFO
, bsm
);
94 static int pim_g2rp_list_compare(struct bsm_rpinfo
*node1
,
95 struct bsm_rpinfo
*node2
)
98 * Step-1 : Loweset Rp priority will have higher precedance.
99 * Step-2 : If priority same then higher hash val will have
101 * Step-3 : If Hash val is same then highest rp address will
104 if (node1
->rp_prio
< node2
->rp_prio
)
106 if (node1
->rp_prio
> node2
->rp_prio
)
108 if (node1
->hash
< node2
->hash
)
110 if (node1
->hash
> node2
->hash
)
112 if (node1
->rp_address
.s_addr
< node2
->rp_address
.s_addr
)
114 if (node1
->rp_address
.s_addr
> node2
->rp_address
.s_addr
)
119 static void pim_free_bsrp_node(struct bsm_rpinfo
*bsrp_info
)
121 if (bsrp_info
->g2rp_timer
)
122 THREAD_OFF(bsrp_info
->g2rp_timer
);
123 XFREE(MTYPE_PIM_BSRP_NODE
, bsrp_info
);
126 static struct list
*pim_alloc_bsrp_list(void)
128 struct list
*new_list
= NULL
;
130 new_list
= list_new();
135 new_list
->cmp
= (int (*)(void *, void *))pim_g2rp_list_compare
;
136 new_list
->del
= (void (*)(void *))pim_free_bsrp_node
;
141 static struct bsgrp_node
*pim_bsm_new_bsgrp_node(struct route_table
*rt
,
144 struct route_node
*rn
;
145 struct bsgrp_node
*bsgrp
;
147 rn
= route_node_get(rt
, grp
);
149 zlog_warn("%s: route node creation failed",
150 __PRETTY_FUNCTION__
);
153 bsgrp
= XCALLOC(MTYPE_PIM_BSGRP_NODE
, sizeof(struct bsgrp_node
));
157 zlog_debug("%s: bsgrp alloc failed",
158 __PRETTY_FUNCTION__
);
159 route_unlock_node(rn
);
164 bsgrp
->bsrp_list
= pim_alloc_bsrp_list();
165 bsgrp
->partial_bsrp_list
= pim_alloc_bsrp_list();
167 if ((!bsgrp
->bsrp_list
) || (!bsgrp
->partial_bsrp_list
)) {
168 route_unlock_node(rn
);
169 pim_free_bsgrp_data(bsgrp
);
173 prefix_copy(&bsgrp
->group
, grp
);
177 static int pim_on_bs_timer(struct thread
*t
)
179 struct route_node
*rn
;
180 struct bsm_scope
*scope
;
181 struct bsgrp_node
*bsgrp_node
;
182 struct bsm_rpinfo
*bsrp
;
184 char buf
[PREFIX2STR_BUFFER
];
185 bool is_bsr_tracking
= true;
187 scope
= THREAD_ARG(t
);
188 THREAD_OFF(scope
->bs_timer
);
191 zlog_debug("%s: Bootstrap Timer expired for scope: %d",
192 __PRETTY_FUNCTION__
, scope
->sz_id
);
194 /* Remove next hop tracking for the bsr */
195 nht_p
.family
= AF_INET
;
196 nht_p
.prefixlen
= IPV4_MAX_BITLEN
;
197 nht_p
.u
.prefix4
= scope
->current_bsr
;
199 prefix2str(&nht_p
, buf
, sizeof(buf
));
200 zlog_debug("%s: Deregister BSR addr %s with Zebra NHT",
201 __PRETTY_FUNCTION__
, buf
);
203 pim_delete_tracked_nexthop(scope
->pim
, &nht_p
, NULL
, NULL
,
206 /* Reset scope zone data */
207 scope
->accept_nofwd_bsm
= false;
208 scope
->state
= ACCEPT_ANY
;
209 scope
->current_bsr
.s_addr
= INADDR_ANY
;
210 scope
->current_bsr_prio
= 0;
211 scope
->current_bsr_first_ts
= 0;
212 scope
->current_bsr_last_ts
= 0;
213 scope
->bsm_frag_tag
= 0;
214 list_delete_all_node(scope
->bsm_list
);
216 for (rn
= route_top(scope
->bsrp_table
); rn
; rn
= route_next(rn
)) {
218 bsgrp_node
= (struct bsgrp_node
*)rn
->info
;
221 zlog_debug("%s: bsgrp_node is null",
222 __PRETTY_FUNCTION__
);
225 /* Give grace time for rp to continue for another hold time */
226 if ((bsgrp_node
->bsrp_list
) && (bsgrp_node
->bsrp_list
->count
)) {
227 bsrp
= listnode_head(bsgrp_node
->bsrp_list
);
228 pim_g2rp_timer_restart(bsrp
, bsrp
->rp_holdtime
);
230 /* clear pending list */
231 if ((bsgrp_node
->partial_bsrp_list
)
232 && (bsgrp_node
->partial_bsrp_list
->count
)) {
233 list_delete_all_node(bsgrp_node
->partial_bsrp_list
);
234 bsgrp_node
->pend_rp_cnt
= 0;
240 static void pim_bs_timer_stop(struct bsm_scope
*scope
)
243 zlog_debug("%s : BS timer being stopped of sz: %d",
244 __PRETTY_FUNCTION__
, scope
->sz_id
);
245 THREAD_OFF(scope
->bs_timer
);
248 static void pim_bs_timer_start(struct bsm_scope
*scope
, int bs_timeout
)
252 zlog_debug("%s : Invalid scope(NULL).",
253 __PRETTY_FUNCTION__
);
256 THREAD_OFF(scope
->bs_timer
);
258 zlog_debug("%s : starting bs timer for scope %d with timeout %d secs",
259 __PRETTY_FUNCTION__
, scope
->sz_id
, bs_timeout
);
260 thread_add_timer(router
->master
, pim_on_bs_timer
, scope
, bs_timeout
,
264 static inline void pim_bs_timer_restart(struct bsm_scope
*scope
, int bs_timeout
)
266 pim_bs_timer_start(scope
, bs_timeout
);
269 void pim_bsm_proc_init(struct pim_instance
*pim
)
271 memset(&pim
->global_scope
, 0, sizeof(struct bsm_scope
));
273 pim
->global_scope
.sz_id
= PIM_GBL_SZ_ID
;
274 pim
->global_scope
.bsrp_table
= route_table_init();
275 pim
->global_scope
.accept_nofwd_bsm
= true;
276 pim
->global_scope
.state
= NO_INFO
;
277 pim
->global_scope
.pim
= pim
;
278 pim
->global_scope
.bsm_list
= list_new();
279 pim
->global_scope
.bsm_list
->del
= (void (*)(void *))pim_bsm_node_free
;
280 pim_bs_timer_start(&pim
->global_scope
, PIM_BS_TIME
);
283 void pim_bsm_proc_free(struct pim_instance
*pim
)
285 struct route_node
*rn
;
286 struct bsgrp_node
*bsgrp
;
288 pim_bs_timer_stop(&pim
->global_scope
);
290 if (pim
->global_scope
.bsm_list
)
291 list_delete(&pim
->global_scope
.bsm_list
);
293 for (rn
= route_top(pim
->global_scope
.bsrp_table
); rn
;
294 rn
= route_next(rn
)) {
298 pim_free_bsgrp_data(bsgrp
);
301 if (pim
->global_scope
.bsrp_table
)
302 route_table_finish(pim
->global_scope
.bsrp_table
);
305 static bool is_hold_time_elapsed(void *data
)
307 struct bsm_rpinfo
*bsrp
;
311 if (bsrp
->elapse_time
< bsrp
->rp_holdtime
)
317 static int pim_on_g2rp_timer(struct thread
*t
)
319 struct bsm_rpinfo
*bsrp
;
320 struct bsm_rpinfo
*bsrp_node
;
321 struct bsgrp_node
*bsgrp_node
;
322 struct listnode
*bsrp_ln
;
323 struct pim_instance
*pim
;
324 struct rp_info
*rp_info
;
325 struct route_node
*rn
;
327 struct in_addr bsrp_addr
;
329 bsrp
= THREAD_ARG(t
);
330 THREAD_OFF(bsrp
->g2rp_timer
);
331 bsgrp_node
= bsrp
->bsgrp_node
;
333 /* elapse time is the hold time of expired node */
334 elapse
= bsrp
->rp_holdtime
;
335 bsrp_addr
= bsrp
->rp_address
;
337 /* update elapse for all bsrp nodes */
338 for (ALL_LIST_ELEMENTS_RO(bsgrp_node
->bsrp_list
, bsrp_ln
, bsrp_node
))
339 bsrp_node
->elapse_time
+= elapse
;
341 /* remove the expired nodes from the list */
342 list_filter_out_nodes(bsgrp_node
->bsrp_list
, is_hold_time_elapsed
);
344 /* Get the next elected rp node */
345 bsrp
= listnode_head(bsgrp_node
->bsrp_list
);
346 pim
= bsgrp_node
->scope
->pim
;
347 rn
= route_node_lookup(pim
->rp_table
, &bsgrp_node
->group
);
350 zlog_warn("%s: Route node doesn't exist", __PRETTY_FUNCTION__
);
354 rp_info
= (struct rp_info
*)rn
->info
;
357 route_unlock_node(rn
);
361 if (rp_info
->rp_src
!= RP_SRC_STATIC
) {
362 /* If new rp available, change it else delete the existing */
364 bsrp_addr
= bsrp
->rp_address
;
365 pim_g2rp_timer_start(
366 bsrp
, (bsrp
->rp_holdtime
- bsrp
->elapse_time
));
367 pim_rp_change(pim
, bsrp_addr
, bsgrp_node
->group
,
370 pim_rp_del(pim
, bsrp_addr
, bsgrp_node
->group
, NULL
,
375 if ((!bsgrp_node
->bsrp_list
->count
)
376 && (!bsgrp_node
->partial_bsrp_list
->count
)) {
377 pim_free_bsgrp_node(pim
->global_scope
.bsrp_table
,
379 pim_free_bsgrp_data(bsgrp_node
);
385 static void pim_g2rp_timer_start(struct bsm_rpinfo
*bsrp
, int hold_time
)
389 zlog_debug("%s : Invalid brsp(NULL).",
390 __PRETTY_FUNCTION__
);
393 THREAD_OFF(bsrp
->g2rp_timer
);
398 "%s : starting g2rp timer for grp: %s - rp: %s with timeout %d secs(Actual Hold time : %d secs)",
400 prefix2str(&bsrp
->bsgrp_node
->group
, buf
, 48),
401 inet_ntoa(bsrp
->rp_address
), hold_time
,
405 thread_add_timer(router
->master
, pim_on_g2rp_timer
, bsrp
, hold_time
,
409 static inline void pim_g2rp_timer_restart(struct bsm_rpinfo
*bsrp
,
412 pim_g2rp_timer_start(bsrp
, hold_time
);
415 static void pim_g2rp_timer_stop(struct bsm_rpinfo
*bsrp
)
423 zlog_debug("%s : stopping g2rp timer for grp: %s - rp: %s",
425 prefix2str(&bsrp
->bsgrp_node
->group
, buf
, 48),
426 inet_ntoa(bsrp
->rp_address
));
429 THREAD_OFF(bsrp
->g2rp_timer
);
432 static bool is_hold_time_zero(void *data
)
434 struct bsm_rpinfo
*bsrp
;
438 if (bsrp
->rp_holdtime
)
444 static void pim_instate_pend_list(struct bsgrp_node
*bsgrp_node
)
446 struct bsm_rpinfo
*active
;
447 struct bsm_rpinfo
*pend
;
449 struct rp_info
*rp_info
;
450 struct route_node
*rn
;
451 struct pim_instance
*pim
;
452 struct rp_info
*rp_all
;
453 struct prefix group_all
;
454 bool had_rp_node
= true;
456 pim
= bsgrp_node
->scope
->pim
;
457 active
= listnode_head(bsgrp_node
->bsrp_list
);
459 /* Remove nodes with hold time 0 & check if list still has a head */
460 list_filter_out_nodes(bsgrp_node
->partial_bsrp_list
, is_hold_time_zero
);
461 pend
= listnode_head(bsgrp_node
->partial_bsrp_list
);
463 if (!str2prefix("224.0.0.0/4", &group_all
))
466 rp_all
= pim_rp_find_match_group(pim
, &group_all
);
467 rn
= route_node_lookup(pim
->rp_table
, &bsgrp_node
->group
);
470 pim_g2rp_timer_start(pend
, pend
->rp_holdtime
);
472 /* if rp node doesn't exist or exist but not configured(rp_all),
473 * install the rp from head(if exists) of partial list. List is
474 * is sorted such that head is the elected RP for the group.
476 if (!rn
|| (prefix_same(&rp_all
->group
, &bsgrp_node
->group
)
477 && pim_rpf_addr_is_inaddr_none(&rp_all
->rp
))) {
479 zlog_debug("%s: Route node doesn't exist",
480 __PRETTY_FUNCTION__
);
482 pim_rp_new(pim
, pend
->rp_address
, bsgrp_node
->group
,
486 rp_info
= (struct rp_info
*)rn
->info
;
488 route_unlock_node(rn
);
490 pim_rp_new(pim
, pend
->rp_address
,
491 bsgrp_node
->group
, NULL
, RP_SRC_BSR
);
496 /* We didn't have rp node and pending list is empty(unlikely), cleanup*/
497 if ((!had_rp_node
) && (!pend
)) {
498 pim_free_bsgrp_node(bsgrp_node
->scope
->bsrp_table
,
500 pim_free_bsgrp_data(bsgrp_node
);
504 if ((had_rp_node
) && (rp_info
->rp_src
!= RP_SRC_STATIC
)) {
505 /* This means we searched and got rp node, needs unlock */
506 route_unlock_node(rn
);
508 if (active
&& pend
) {
509 if ((active
->rp_address
.s_addr
510 != pend
->rp_address
.s_addr
))
511 pim_rp_change(pim
, pend
->rp_address
,
512 bsgrp_node
->group
, RP_SRC_BSR
);
515 /* Possible when the first BSM has group with 0 rp count */
516 if ((!active
) && (!pend
)) {
519 "%s: Both bsrp and partial list are empty",
520 __PRETTY_FUNCTION__
);
522 pim_free_bsgrp_node(bsgrp_node
->scope
->bsrp_table
,
524 pim_free_bsgrp_data(bsgrp_node
);
528 /* Possible when a group with 0 rp count received in BSM */
529 if ((active
) && (!pend
)) {
530 pim_rp_del(pim
, active
->rp_address
, bsgrp_node
->group
,
532 pim_free_bsgrp_node(bsgrp_node
->scope
->bsrp_table
,
535 zlog_debug("%s:Pend List is null,del grp node",
536 __PRETTY_FUNCTION__
);
538 pim_free_bsgrp_data(bsgrp_node
);
543 if ((had_rp_node
) && (rp_info
->rp_src
== RP_SRC_STATIC
)) {
544 /* We need to unlock rn this case */
545 route_unlock_node(rn
);
546 /* there is a chance that static rp exist and bsrp cleaned
547 * so clean bsgrp node if pending list empty
552 "%s: Partial list is empty, static rp exists",
553 __PRETTY_FUNCTION__
);
554 pim_free_bsgrp_node(bsgrp_node
->scope
->bsrp_table
,
556 pim_free_bsgrp_data(bsgrp_node
);
561 /* swap the list & delete all nodes in partial list (old bsrp_list)
563 * active is head of bsrp list
564 * pend is head of partial list
566 * active is head of partial list
567 * pend is head of bsrp list
568 * So check appriate head after swap and clean the new partial list
570 temp
= bsgrp_node
->bsrp_list
;
571 bsgrp_node
->bsrp_list
= bsgrp_node
->partial_bsrp_list
;
572 bsgrp_node
->partial_bsrp_list
= temp
;
575 pim_g2rp_timer_stop(active
);
576 list_delete_all_node(bsgrp_node
->partial_bsrp_list
);
580 static bool pim_bsr_rpf_check(struct pim_instance
*pim
, struct in_addr bsr
,
581 struct in_addr ip_src_addr
)
583 struct pim_nexthop nexthop
;
586 memset(&nexthop
, 0, sizeof(nexthop
));
588 /* New BSR recived */
589 if (bsr
.s_addr
!= pim
->global_scope
.current_bsr
.s_addr
) {
590 result
= pim_nexthop_match(pim
, bsr
, ip_src_addr
);
592 /* Nexthop lookup pass for the new BSR address */
597 char bsr_str
[INET_ADDRSTRLEN
];
599 pim_inet4_dump("<bsr?>", bsr
, bsr_str
, sizeof(bsr_str
));
600 zlog_debug("%s : No route to BSR address %s",
601 __PRETTY_FUNCTION__
, bsr_str
);
606 return pim_nexthop_match_nht_cache(pim
, bsr
, ip_src_addr
);
609 static bool is_preferred_bsr(struct pim_instance
*pim
, struct in_addr bsr
,
612 if (bsr
.s_addr
== pim
->global_scope
.current_bsr
.s_addr
)
615 if (bsr_prio
> pim
->global_scope
.current_bsr_prio
)
618 else if (bsr_prio
== pim
->global_scope
.current_bsr_prio
) {
619 if (bsr
.s_addr
>= pim
->global_scope
.current_bsr
.s_addr
)
627 static void pim_bsm_update(struct pim_instance
*pim
, struct in_addr bsr
,
630 struct pim_nexthop_cache pnc
;
632 if (bsr
.s_addr
!= pim
->global_scope
.current_bsr
.s_addr
) {
634 char buf
[PREFIX2STR_BUFFER
];
635 bool is_bsr_tracking
= true;
637 /* De-register old BSR and register new BSR with Zebra NHT */
638 nht_p
.family
= AF_INET
;
639 nht_p
.prefixlen
= IPV4_MAX_BITLEN
;
641 if (pim
->global_scope
.current_bsr
.s_addr
!= INADDR_ANY
) {
642 nht_p
.u
.prefix4
= pim
->global_scope
.current_bsr
;
644 prefix2str(&nht_p
, buf
, sizeof(buf
));
646 "%s: Deregister BSR addr %s with Zebra NHT",
647 __PRETTY_FUNCTION__
, buf
);
649 pim_delete_tracked_nexthop(pim
, &nht_p
, NULL
, NULL
,
653 nht_p
.u
.prefix4
= bsr
;
655 prefix2str(&nht_p
, buf
, sizeof(buf
));
657 "%s: NHT Register BSR addr %s with Zebra NHT",
658 __PRETTY_FUNCTION__
, buf
);
661 memset(&pnc
, 0, sizeof(struct pim_nexthop_cache
));
662 pim_find_or_track_nexthop(pim
, &nht_p
, NULL
, NULL
,
663 is_bsr_tracking
, &pnc
);
664 pim
->global_scope
.current_bsr
= bsr
;
665 pim
->global_scope
.current_bsr_first_ts
=
666 pim_time_monotonic_sec();
667 pim
->global_scope
.state
= ACCEPT_PREFERRED
;
669 pim
->global_scope
.current_bsr_prio
= bsr_prio
;
670 pim
->global_scope
.current_bsr_last_ts
= pim_time_monotonic_sec();
673 static bool pim_bsm_send_intf(uint8_t *buf
, int len
, struct interface
*ifp
,
674 struct in_addr dst_addr
)
676 struct pim_interface
*pim_ifp
;
682 zlog_debug("%s: Pim interface not available for %s",
683 __PRETTY_FUNCTION__
, ifp
->name
);
687 if (pim_ifp
->pim_sock_fd
== -1) {
689 zlog_debug("%s: Pim sock not available for %s",
690 __PRETTY_FUNCTION__
, ifp
->name
);
694 pim_msg_send(pim_ifp
->pim_sock_fd
, pim_ifp
->primary_address
, dst_addr
,
695 buf
, len
, ifp
->name
);
696 pim_ifp
->pim_ifstat_bsm_tx
++;
697 pim_ifp
->pim
->bsm_sent
++;
701 static bool pim_bsm_frag_send(uint8_t *buf
, uint32_t len
, struct interface
*ifp
,
702 uint32_t pim_mtu
, struct in_addr dst_addr
,
705 struct bsmmsg_grpinfo
*grpinfo
, *curgrp
;
706 uint8_t *firstgrp_ptr
;
709 uint32_t parsed_len
= 0;
710 uint32_t this_pkt_rem
;
711 uint32_t copy_byte_count
;
712 uint32_t this_pkt_len
;
713 uint8_t total_rp_cnt
;
717 bool pak_pending
= false;
719 /* MTU passed here is PIM MTU (IP MTU less IP Hdr) */
720 if (pim_mtu
< (PIM_MIN_BSM_LEN
)) {
722 "%s: mtu(pim mtu: %d) size less than minimum bootsrap len",
723 __PRETTY_FUNCTION__
, pim_mtu
);
726 "%s: mtu (pim mtu:%d) less than minimum bootsrap len",
727 __PRETTY_FUNCTION__
, pim_mtu
);
731 pak_start
= XCALLOC(MTYPE_PIM_BSM_PKT_VAR_MEM
, pim_mtu
);
735 zlog_debug("%s: malloc failed", __PRETTY_FUNCTION__
);
741 /* Fill PIM header later before sending packet to calc checksum */
742 pkt
+= PIM_MSG_HEADER_LEN
;
743 buf
+= PIM_MSG_HEADER_LEN
;
745 /* copy bsm header to new packet at offset of pim hdr */
746 memcpy(pkt
, buf
, PIM_BSM_HDR_LEN
);
747 pkt
+= PIM_BSM_HDR_LEN
;
748 buf
+= PIM_BSM_HDR_LEN
;
749 parsed_len
+= (PIM_MSG_HEADER_LEN
+ PIM_BSM_HDR_LEN
);
751 /* Store the position of first grp ptr, which can be reused for
752 * next packet to start filling group. old bsm header and pim hdr
753 * remains. So need not be filled again for next packet onwards.
757 /* we received mtu excluding IP hdr len as param
758 * now this_pkt_rem is mtu excluding
759 * PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN
761 this_pkt_rem
= pim_mtu
- (PIM_BSM_HDR_LEN
+ PIM_MSG_HEADER_LEN
);
763 /* For each group till the packet length parsed */
764 while (parsed_len
< len
) {
765 /* pkt ---> fragment's current pointer
766 * buf ---> input buffer's current pointer
767 * mtu ---> size of the pim packet - PIM header
768 * curgrp ---> current group on the fragment
769 * grpinfo ---> current group on the input buffer
770 * this_pkt_rem ---> bytes remaing on the current fragment
771 * rp_fit_cnt ---> num of rp for current grp that
773 * total_rp_cnt ---> total rp present for the group in the buf
774 * frag_rp_cnt ---> no of rp for the group to be fit in
776 * this_rp_cnt ---> how many rp have we parsed
778 grpinfo
= (struct bsmmsg_grpinfo
*)buf
;
779 memcpy(pkt
, buf
, PIM_BSM_GRP_LEN
);
780 curgrp
= (struct bsmmsg_grpinfo
*)pkt
;
781 parsed_len
+= PIM_BSM_GRP_LEN
;
782 pkt
+= PIM_BSM_GRP_LEN
;
783 buf
+= PIM_BSM_GRP_LEN
;
784 this_pkt_rem
-= PIM_BSM_GRP_LEN
;
786 /* initialize rp count and total_rp_cnt before the rp loop */
788 total_rp_cnt
= grpinfo
->frag_rp_count
;
790 /* Loop till all RPs for the group parsed */
791 while (this_rp_cnt
< total_rp_cnt
) {
792 /* All RP from a group processed here.
793 * group is pointed by grpinfo.
794 * At this point make sure buf pointing to a RP
797 rp_fit_cnt
= this_pkt_rem
/ PIM_BSM_RP_LEN
;
799 /* calculate how many rp am i going to copy in
802 if (rp_fit_cnt
> (total_rp_cnt
- this_rp_cnt
))
803 frag_rp_cnt
= total_rp_cnt
- this_rp_cnt
;
805 frag_rp_cnt
= rp_fit_cnt
;
807 /* populate the frag rp count for the current grp */
808 curgrp
->frag_rp_count
= frag_rp_cnt
;
809 copy_byte_count
= frag_rp_cnt
* PIM_BSM_RP_LEN
;
811 /* copy all the rp that we are fitting in this
814 memcpy(pkt
, buf
, copy_byte_count
);
815 this_rp_cnt
+= frag_rp_cnt
;
816 buf
+= copy_byte_count
;
817 pkt
+= copy_byte_count
;
818 parsed_len
+= copy_byte_count
;
819 this_pkt_rem
-= copy_byte_count
;
821 /* Either we couldn't fit all rp for the group or the
824 if ((this_rp_cnt
< total_rp_cnt
)
826 < (PIM_BSM_GRP_LEN
+ PIM_BSM_RP_LEN
))) {
827 /* No space to fit in more rp, send this pkt */
828 this_pkt_len
= pim_mtu
- this_pkt_rem
;
829 pim_msg_build_header(pak_start
, this_pkt_len
,
830 PIM_MSG_TYPE_BOOTSTRAP
,
832 pim_bsm_send_intf(pak_start
, this_pkt_len
, ifp
,
835 /* Construct next fragment. Reuse old packet */
837 this_pkt_rem
= pim_mtu
- (PIM_BSM_HDR_LEN
838 + PIM_MSG_HEADER_LEN
);
840 /* If pkt can't accomodate next group + atleast
841 * one rp, we must break out of this inner loop
842 * and process next RP
844 if (total_rp_cnt
== this_rp_cnt
)
847 /* If some more RPs for the same group pending,
850 memcpy(pkt
, (uint8_t *)grpinfo
,
852 curgrp
= (struct bsmmsg_grpinfo
*)pkt
;
853 pkt
+= PIM_BSM_GRP_LEN
;
854 this_pkt_rem
-= PIM_BSM_GRP_LEN
;
857 /* We filled something but not yet sent out */
860 } /* while RP count */
861 } /*while parsed len */
863 /* Send if we have any unsent packet */
865 this_pkt_len
= pim_mtu
- this_pkt_rem
;
866 pim_msg_build_header(pak_start
, this_pkt_len
,
867 PIM_MSG_TYPE_BOOTSTRAP
, no_fwd
);
868 pim_bsm_send_intf(pak_start
, (pim_mtu
- this_pkt_rem
), ifp
,
871 XFREE(MTYPE_PIM_BSM_PKT_VAR_MEM
, pak_start
);
875 static void pim_bsm_fwd_whole_sz(struct pim_instance
*pim
, uint8_t *buf
,
876 uint32_t len
, int sz
)
878 struct interface
*ifp
;
879 struct pim_interface
*pim_ifp
;
880 struct in_addr dst_addr
;
885 /* For now only global scope zone is supported, so send on all
886 * pim interfaces in the vrf
888 dst_addr
= qpim_all_pim_routers_addr
;
889 FOR_ALL_INTERFACES (pim
->vrf
, ifp
) {
891 if ((!pim_ifp
) || (!pim_ifp
->bsm_enable
))
893 pim_hello_require(ifp
);
894 pim_mtu
= ifp
->mtu
- MAX_IP_HDR_LEN
;
896 ret
= pim_bsm_frag_send(buf
, len
, ifp
, pim_mtu
,
899 zlog_debug("%s: pim_bsm_frag_send returned %s",
901 ret
? "TRUE" : "FALSE");
903 pim_msg_build_header(buf
, len
, PIM_MSG_TYPE_BOOTSTRAP
,
905 if (!pim_bsm_send_intf(buf
, len
, ifp
, dst_addr
)) {
908 "%s: pim_bsm_send_intf returned FALSE",
909 __PRETTY_FUNCTION__
);
915 bool pim_bsm_new_nbr_fwd(struct pim_neighbor
*neigh
, struct interface
*ifp
)
917 struct in_addr dst_addr
;
918 struct pim_interface
*pim_ifp
;
919 struct bsm_scope
*scope
;
920 struct listnode
*bsm_ln
;
921 struct bsm_info
*bsminfo
;
922 char neigh_src_str
[INET_ADDRSTRLEN
];
928 pim_inet4_dump("<src?>", neigh
->source_addr
, neigh_src_str
,
929 sizeof(neigh_src_str
));
930 zlog_debug("%s: New neighbor %s seen on %s",
931 __PRETTY_FUNCTION__
, neigh_src_str
, ifp
->name
);
936 /* DR only forwards BSM packet */
937 if (pim_ifp
->pim_dr_addr
.s_addr
== pim_ifp
->primary_address
.s_addr
) {
940 "%s: It is not DR, so don't forward BSM packet",
941 __PRETTY_FUNCTION__
);
944 if (!pim_ifp
->bsm_enable
) {
946 zlog_debug("%s: BSM proc not enabled on %s",
947 __PRETTY_FUNCTION__
, ifp
->name
);
951 scope
= &pim_ifp
->pim
->global_scope
;
953 if (!scope
->bsm_list
->count
) {
955 zlog_debug("%s: BSM list for the scope is empty",
956 __PRETTY_FUNCTION__
);
960 if (!pim_ifp
->ucast_bsm_accept
) {
961 dst_addr
= qpim_all_pim_routers_addr
;
963 zlog_debug("%s: Sending BSM mcast to %s",
964 __PRETTY_FUNCTION__
, neigh_src_str
);
966 dst_addr
= neigh
->source_addr
;
968 zlog_debug("%s: Sending BSM ucast to %s",
969 __PRETTY_FUNCTION__
, neigh_src_str
);
971 pim_mtu
= ifp
->mtu
- MAX_IP_HDR_LEN
;
972 pim_hello_require(ifp
);
974 for (ALL_LIST_ELEMENTS_RO(scope
->bsm_list
, bsm_ln
, bsminfo
)) {
975 if (pim_mtu
< bsminfo
->size
) {
976 ret
= pim_bsm_frag_send(bsminfo
->bsm
, bsminfo
->size
,
977 ifp
, pim_mtu
, dst_addr
, no_fwd
);
981 "%s: pim_bsm_frag_send failed",
982 __PRETTY_FUNCTION__
);
985 /* Pim header needs to be constructed */
986 pim_msg_build_header(bsminfo
->bsm
, bsminfo
->size
,
987 PIM_MSG_TYPE_BOOTSTRAP
, no_fwd
);
988 ret
= pim_bsm_send_intf(bsminfo
->bsm
, bsminfo
->size
,
993 "%s: pim_bsm_frag_send failed",
994 __PRETTY_FUNCTION__
);
1001 struct bsgrp_node
*pim_bsm_get_bsgrp_node(struct bsm_scope
*scope
,
1004 struct route_node
*rn
;
1005 struct bsgrp_node
*bsgrp
;
1007 rn
= route_node_lookup(scope
->bsrp_table
, grp
);
1010 zlog_debug("%s: Route node doesn't exist for the group",
1011 __PRETTY_FUNCTION__
);
1015 route_unlock_node(rn
);
1020 static uint32_t hash_calc_on_grp_rp(struct prefix group
, struct in_addr rp
,
1021 uint8_t hashmasklen
)
1027 uint32_t mask
= 0xffffffff;
1029 /* mask to be made zero if hashmasklen is 0 because mask << 32
1030 * may not give 0. hashmasklen can be 0 to 32.
1032 if (hashmasklen
== 0)
1035 /* in_addr stores ip in big endian, hence network byte order
1036 * convert to uint32 before processing hash
1038 grpaddr
= ntohl(group
.u
.prefix4
.s_addr
);
1039 /* Avoid shifting by 32 bit on a 32 bit register */
1041 grpaddr
= grpaddr
& ((mask
<< (32 - hashmasklen
)));
1043 grpaddr
= grpaddr
& mask
;
1044 rp_add
= ntohl(rp
.s_addr
);
1045 temp
= 1103515245 * ((1103515245 * grpaddr
+ 12345) ^ rp_add
) + 12345;
1046 hash
= temp
& (0x7fffffff);
1050 static bool pim_install_bsm_grp_rp(struct pim_instance
*pim
,
1051 struct bsgrp_node
*grpnode
,
1052 struct bsmmsg_rpinfo
*rp
)
1054 struct bsm_rpinfo
*bsm_rpinfo
;
1055 uint8_t hashMask_len
= pim
->global_scope
.hashMasklen
;
1057 /*memory allocation for bsm_rpinfo */
1058 bsm_rpinfo
= XCALLOC(MTYPE_PIM_BSRP_NODE
, sizeof(*bsm_rpinfo
));
1062 zlog_debug("%s, Memory allocation failed.\r\n",
1063 __PRETTY_FUNCTION__
);
1067 bsm_rpinfo
->rp_prio
= rp
->rp_pri
;
1068 bsm_rpinfo
->rp_holdtime
= rp
->rp_holdtime
;
1069 memcpy(&bsm_rpinfo
->rp_address
, &rp
->rpaddr
.addr
,
1070 sizeof(struct in_addr
));
1071 bsm_rpinfo
->elapse_time
= 0;
1073 /* Back pointer to the group node. */
1074 bsm_rpinfo
->bsgrp_node
= grpnode
;
1076 /* update hash for this rp node */
1077 bsm_rpinfo
->hash
= hash_calc_on_grp_rp(grpnode
->group
, rp
->rpaddr
.addr
,
1079 if (listnode_add_sort_nodup(grpnode
->partial_bsrp_list
, bsm_rpinfo
)) {
1082 "%s, bs_rpinfo node added to the partial bs_rplist.\r\n",
1083 __PRETTY_FUNCTION__
);
1088 zlog_debug("%s: list node not added\n", __PRETTY_FUNCTION__
);
1090 XFREE(MTYPE_PIM_BSRP_NODE
, bsm_rpinfo
);
1094 static void pim_update_pending_rp_cnt(struct bsm_scope
*sz
,
1095 struct bsgrp_node
*bsgrp
,
1096 uint16_t bsm_frag_tag
,
1097 uint32_t total_rp_count
)
1099 if (bsgrp
->pend_rp_cnt
) {
1100 /* received bsm is different packet ,
1101 * it is not same fragment.
1103 if (bsm_frag_tag
!= bsgrp
->frag_tag
) {
1106 "%s,Received a new BSM ,so clear the pending bs_rpinfo list.\r\n",
1107 __PRETTY_FUNCTION__
);
1108 list_delete_all_node(bsgrp
->partial_bsrp_list
);
1109 bsgrp
->pend_rp_cnt
= total_rp_count
;
1112 bsgrp
->pend_rp_cnt
= total_rp_count
;
1114 bsgrp
->frag_tag
= bsm_frag_tag
;
1117 /* Parsing BSR packet and adding to partial list of corresponding bsgrp node */
1118 static bool pim_bsm_parse_install_g2rp(struct bsm_scope
*scope
, uint8_t *buf
,
1119 int buflen
, uint16_t bsm_frag_tag
)
1121 struct bsmmsg_grpinfo grpinfo
;
1122 struct bsmmsg_rpinfo rpinfo
;
1123 struct prefix group
;
1124 struct bsgrp_node
*bsgrp
= NULL
;
1125 int frag_rp_cnt
= 0;
1129 while (buflen
> offset
) {
1130 /* Extract Group tlv from BSM */
1131 memcpy(&grpinfo
, buf
, sizeof(struct bsmmsg_grpinfo
));
1133 if (PIM_DEBUG_BSM
) {
1134 char grp_str
[INET_ADDRSTRLEN
];
1136 pim_inet4_dump("<Group?>", grpinfo
.group
.addr
, grp_str
,
1139 "%s, Group %s Rpcount:%d Fragment-Rp-count:%d\r\n",
1140 __PRETTY_FUNCTION__
, grp_str
, grpinfo
.rp_count
,
1141 grpinfo
.frag_rp_count
);
1144 buf
+= sizeof(struct bsmmsg_grpinfo
);
1145 offset
+= sizeof(struct bsmmsg_grpinfo
);
1147 if (grpinfo
.rp_count
== 0) {
1148 if (PIM_DEBUG_BSM
) {
1149 char grp_str
[INET_ADDRSTRLEN
];
1151 pim_inet4_dump("<Group?>", grpinfo
.group
.addr
,
1152 grp_str
, sizeof(grp_str
));
1154 "%s, Rp count is zero for group: %s\r\n",
1155 __PRETTY_FUNCTION__
, grp_str
);
1160 group
.family
= AF_INET
;
1161 group
.prefixlen
= grpinfo
.group
.mask
;
1162 group
.u
.prefix4
.s_addr
= grpinfo
.group
.addr
.s_addr
;
1164 /* Get the Group node for the BSM rp table */
1165 bsgrp
= pim_bsm_get_bsgrp_node(scope
, &group
);
1170 "%s, Create new BSM Group node.\r\n",
1171 __PRETTY_FUNCTION__
);
1173 /* create a new node to be added to the tree. */
1174 bsgrp
= pim_bsm_new_bsgrp_node(scope
->bsrp_table
,
1179 "%s, Failed to get the BSM group node.\r\n",
1180 __PRETTY_FUNCTION__
);
1184 bsgrp
->scope
= scope
;
1187 pim_update_pending_rp_cnt(scope
, bsgrp
, bsm_frag_tag
,
1189 frag_rp_cnt
= grpinfo
.frag_rp_count
;
1192 while (frag_rp_cnt
--) {
1193 /* Extract RP address tlv from BSM */
1194 memcpy(&rpinfo
, buf
, sizeof(struct bsmmsg_rpinfo
));
1195 rpinfo
.rp_holdtime
= ntohs(rpinfo
.rp_holdtime
);
1196 buf
+= sizeof(struct bsmmsg_rpinfo
);
1197 offset
+= sizeof(struct bsmmsg_rpinfo
);
1199 if (PIM_DEBUG_BSM
) {
1200 char rp_str
[INET_ADDRSTRLEN
];
1202 pim_inet4_dump("<Rpaddr?>", rpinfo
.rpaddr
.addr
,
1203 rp_str
, sizeof(rp_str
));
1205 "%s, Rp address - %s; pri:%d hold:%d\r\n",
1206 __PRETTY_FUNCTION__
, rp_str
,
1207 rpinfo
.rp_pri
, rpinfo
.rp_holdtime
);
1210 /* Call Install api to update grp-rp mappings */
1211 if (pim_install_bsm_grp_rp(scope
->pim
, bsgrp
, &rpinfo
))
1215 bsgrp
->pend_rp_cnt
-= ins_count
;
1217 if (!bsgrp
->pend_rp_cnt
) {
1220 "%s, Recvd all the rps for this group, so bsrp list with penidng rp list.",
1221 __PRETTY_FUNCTION__
);
1222 /* replace the bsrp_list with pending list */
1223 pim_instate_pend_list(bsgrp
);
1229 int pim_bsm_process(struct interface
*ifp
, struct ip
*ip_hdr
, uint8_t *buf
,
1230 uint32_t buf_size
, bool no_fwd
)
1232 struct bsm_hdr
*bshdr
;
1233 int sz
= PIM_GBL_SZ_ID
;
1234 struct bsmmsg_grpinfo
*msg_grp
;
1235 struct pim_interface
*pim_ifp
= NULL
;
1236 struct bsm_info
*bsminfo
;
1237 struct pim_instance
*pim
;
1238 char bsr_str
[INET_ADDRSTRLEN
];
1240 bool empty_bsm
= FALSE
;
1242 /* BSM Packet acceptance validation */
1243 pim_ifp
= ifp
->info
;
1246 zlog_debug("%s: multicast not enabled on interface %s",
1247 __PRETTY_FUNCTION__
, ifp
->name
);
1251 pim_ifp
->pim_ifstat_bsm_rx
++;
1255 /* Drop if bsm processing is disabled on interface */
1256 if (!pim_ifp
->bsm_enable
) {
1257 zlog_warn("%s: BSM not enabled on interface %s",
1258 __PRETTY_FUNCTION__
, ifp
->name
);
1259 pim_ifp
->pim_ifstat_bsm_cfg_miss
++;
1264 bshdr
= (struct bsm_hdr
*)(buf
+ PIM_MSG_HEADER_LEN
);
1265 pim_inet4_dump("<bsr?>", bshdr
->bsr_addr
.addr
, bsr_str
,
1267 pim
->global_scope
.hashMasklen
= bshdr
->hm_len
;
1268 frag_tag
= ntohs(bshdr
->frag_tag
);
1270 /* Identify empty BSM */
1271 if ((buf_size
- PIM_BSM_HDR_LEN
- PIM_MSG_HEADER_LEN
) < PIM_BSM_GRP_LEN
)
1275 msg_grp
= (struct bsmmsg_grpinfo
*)(buf
+ PIM_MSG_HEADER_LEN
1277 /* Currently we don't support scope zoned BSM */
1278 if (msg_grp
->group
.sz
) {
1281 "%s : Administratively scoped range BSM received",
1282 __PRETTY_FUNCTION__
);
1283 pim_ifp
->pim_ifstat_bsm_invalid_sz
++;
1289 /* Drop if bsr is not preferred bsr */
1290 if (!is_preferred_bsr(pim
, bshdr
->bsr_addr
.addr
, bshdr
->bsr_prio
)) {
1292 zlog_debug("%s : Received a non-preferred BSM",
1293 __PRETTY_FUNCTION__
);
1299 /* only accept no-forward BSM if quick refresh on startup */
1300 if ((pim
->global_scope
.accept_nofwd_bsm
)
1301 || (frag_tag
== pim
->global_scope
.bsm_frag_tag
)) {
1302 pim
->global_scope
.accept_nofwd_bsm
= false;
1306 "%s : nofwd_bsm received on %s when accpt_nofwd_bsm false",
1307 __PRETTY_FUNCTION__
, bsr_str
);
1309 pim_ifp
->pim_ifstat_ucast_bsm_cfg_miss
++;
1314 /* Mulicast BSM received */
1315 if (ip_hdr
->ip_dst
.s_addr
== qpim_all_pim_routers_addr
.s_addr
) {
1317 if (!pim_bsr_rpf_check(pim
, bshdr
->bsr_addr
.addr
,
1321 "%s : RPF check fail for BSR address %s",
1322 __PRETTY_FUNCTION__
, bsr_str
);
1327 } else if (if_lookup_exact_address(&ip_hdr
->ip_dst
, AF_INET
,
1329 /* Unicast BSM received - if ucast bsm not enabled on
1330 * the interface, drop it
1332 if (!pim_ifp
->ucast_bsm_accept
) {
1335 "%s : Unicast BSM not enabled on interface %s",
1336 __PRETTY_FUNCTION__
, ifp
->name
);
1337 pim_ifp
->pim_ifstat_ucast_bsm_cfg_miss
++;
1344 zlog_debug("%s : Invalid destination address",
1345 __PRETTY_FUNCTION__
);
1352 zlog_debug("%s : Empty Pref BSM received",
1353 __PRETTY_FUNCTION__
);
1355 /* Parse Update bsm rp table and install/uninstall rp if required */
1356 if (!pim_bsm_parse_install_g2rp(
1357 &pim_ifp
->pim
->global_scope
,
1358 (buf
+ PIM_BSM_HDR_LEN
+ PIM_MSG_HEADER_LEN
),
1359 (buf_size
- PIM_BSM_HDR_LEN
- PIM_MSG_HEADER_LEN
),
1361 if (PIM_DEBUG_BSM
) {
1362 zlog_debug("%s, Parsing BSM failed.\r\n",
1363 __PRETTY_FUNCTION__
);
1368 /* Restart the bootstrap timer */
1369 pim_bs_timer_restart(&pim_ifp
->pim
->global_scope
,
1370 PIM_BSR_DEFAULT_TIMEOUT
);
1372 /* If new BSM received, clear the old bsm database */
1373 if (pim_ifp
->pim
->global_scope
.bsm_frag_tag
!= frag_tag
) {
1374 if (PIM_DEBUG_BSM
) {
1375 zlog_debug("%s: Current frag tag: %d Frag teg rcvd: %d",
1376 __PRETTY_FUNCTION__
,
1377 pim_ifp
->pim
->global_scope
.bsm_frag_tag
,
1380 list_delete_all_node(pim_ifp
->pim
->global_scope
.bsm_list
);
1381 pim_ifp
->pim
->global_scope
.bsm_frag_tag
= frag_tag
;
1384 /* update the scope information from bsm */
1385 pim_bsm_update(pim
, bshdr
->bsr_addr
.addr
, bshdr
->bsr_prio
);
1388 pim_bsm_fwd_whole_sz(pim_ifp
->pim
, buf
, buf_size
, sz
);
1389 bsminfo
= XCALLOC(MTYPE_PIM_BSM_INFO
, sizeof(struct bsm_info
));
1391 zlog_warn("%s: bsminfo alloc failed",
1392 __PRETTY_FUNCTION__
);
1396 bsminfo
->bsm
= XCALLOC(MTYPE_PIM_BSM_PKT_VAR_MEM
, buf_size
);
1397 if (!bsminfo
->bsm
) {
1398 zlog_warn("%s: bsm alloc failed", __PRETTY_FUNCTION__
);
1399 XFREE(MTYPE_PIM_BSM_INFO
, bsminfo
);
1403 bsminfo
->size
= buf_size
;
1404 memcpy(bsminfo
->bsm
, buf
, buf_size
);
1405 listnode_add(pim_ifp
->pim
->global_scope
.bsm_list
, bsminfo
);