]> git.proxmox.com Git - mirror_frr.git/blob - pimd/pim_bsm.c
Merge pull request #8587 from volta-networks/fix_ospf6_message_logs
[mirror_frr.git] / pimd / pim_bsm.c
1 /*
2 * pim_bsm.c: PIM BSM handling routines
3 *
4 * Copyright (C) 2018-19 Vmware, Inc.
5 * Saravanan K
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; see the file COPYING; if not, write to the
19 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
20 * MA 02110-1301 USA
21 */
22
23 #ifdef HAVE_CONFIG_H
24 #include "config.h"
25 #endif
26
27 #include "if.h"
28 #include "pimd.h"
29 #include "pim_iface.h"
30 #include "pim_instance.h"
31 #include "pim_rpf.h"
32 #include "pim_hello.h"
33 #include "pim_pim.h"
34 #include "pim_nht.h"
35 #include "pim_bsm.h"
36 #include "pim_time.h"
37
38 /* Functions forward declaration */
39 static void pim_bs_timer_start(struct bsm_scope *scope, int bs_timeout);
40 static void pim_g2rp_timer_start(struct bsm_rpinfo *bsrp, int hold_time);
41 static inline void pim_g2rp_timer_restart(struct bsm_rpinfo *bsrp,
42 int hold_time);
43
44 /* Memory Types */
45 DEFINE_MTYPE_STATIC(PIMD, PIM_BSGRP_NODE, "PIM BSR advertised grp info");
46 DEFINE_MTYPE_STATIC(PIMD, PIM_BSRP_INFO, "PIM BSR advertised RP info");
47 DEFINE_MTYPE_STATIC(PIMD, PIM_BSM_FRAG, "PIM BSM fragment");
48 DEFINE_MTYPE_STATIC(PIMD, PIM_BSM_PKT_VAR_MEM, "PIM BSM Packet");
49
50 /* All bsm packets forwarded shall be fit within ip mtu less iphdr(max) */
51 #define MAX_IP_HDR_LEN 24
52
53 /* pim_bsm_write_config - Write the interface pim bsm configuration.*/
54 void pim_bsm_write_config(struct vty *vty, struct interface *ifp)
55 {
56 struct pim_interface *pim_ifp = ifp->info;
57
58 if (pim_ifp) {
59 if (!pim_ifp->bsm_enable)
60 vty_out(vty, " no ip pim bsm\n");
61 if (!pim_ifp->ucast_bsm_accept)
62 vty_out(vty, " no ip pim unicast-bsm\n");
63 }
64 }
65
66 static void pim_bsm_rpinfo_free(struct bsm_rpinfo *bsrp_info)
67 {
68 THREAD_OFF(bsrp_info->g2rp_timer);
69 XFREE(MTYPE_PIM_BSRP_INFO, bsrp_info);
70 }
71
72 void pim_bsm_rpinfos_free(struct bsm_rpinfos_head *head)
73 {
74 struct bsm_rpinfo *bsrp_info;
75
76 while ((bsrp_info = bsm_rpinfos_pop(head)))
77 pim_bsm_rpinfo_free(bsrp_info);
78 }
79
80 void pim_free_bsgrp_data(struct bsgrp_node *bsgrp_node)
81 {
82 pim_bsm_rpinfos_free(bsgrp_node->bsrp_list);
83 pim_bsm_rpinfos_free(bsgrp_node->partial_bsrp_list);
84 XFREE(MTYPE_PIM_BSGRP_NODE, bsgrp_node);
85 }
86
87 void pim_free_bsgrp_node(struct route_table *rt, struct prefix *grp)
88 {
89 struct route_node *rn;
90
91 rn = route_node_lookup(rt, grp);
92 if (rn) {
93 rn->info = NULL;
94 route_unlock_node(rn);
95 route_unlock_node(rn);
96 }
97 }
98
99 static void pim_bsm_frag_free(struct bsm_frag *bsfrag)
100 {
101 XFREE(MTYPE_PIM_BSM_FRAG, bsfrag);
102 }
103
104 void pim_bsm_frags_free(struct bsm_scope *scope)
105 {
106 struct bsm_frag *bsfrag;
107
108 while ((bsfrag = bsm_frags_pop(scope->bsm_frags)))
109 pim_bsm_frag_free(bsfrag);
110 }
111
112 int pim_bsm_rpinfo_cmp(const struct bsm_rpinfo *node1,
113 const struct bsm_rpinfo *node2)
114 {
115 /* RP election Algo :
116 * Step-1 : Loweset Rp priority will have higher precedance.
117 * Step-2 : If priority same then higher hash val will have
118 * higher precedance.
119 * Step-3 : If Hash val is same then highest rp address will
120 * become elected RP.
121 */
122 if (node1->rp_prio < node2->rp_prio)
123 return -1;
124 if (node1->rp_prio > node2->rp_prio)
125 return 1;
126 if (node1->hash < node2->hash)
127 return 1;
128 if (node1->hash > node2->hash)
129 return -1;
130 if (node1->rp_address.s_addr < node2->rp_address.s_addr)
131 return 1;
132 if (node1->rp_address.s_addr > node2->rp_address.s_addr)
133 return -1;
134 return 0;
135 }
136
137 static struct bsgrp_node *pim_bsm_new_bsgrp_node(struct route_table *rt,
138 struct prefix *grp)
139 {
140 struct route_node *rn;
141 struct bsgrp_node *bsgrp;
142
143 rn = route_node_get(rt, grp);
144 if (!rn) {
145 zlog_warn("%s: route node creation failed", __func__);
146 return NULL;
147 }
148 bsgrp = XCALLOC(MTYPE_PIM_BSGRP_NODE, sizeof(struct bsgrp_node));
149
150 rn->info = bsgrp;
151 bsm_rpinfos_init(bsgrp->bsrp_list);
152 bsm_rpinfos_init(bsgrp->partial_bsrp_list);
153
154 prefix_copy(&bsgrp->group, grp);
155 return bsgrp;
156 }
157
158 static int pim_on_bs_timer(struct thread *t)
159 {
160 struct route_node *rn;
161 struct bsm_scope *scope;
162 struct bsgrp_node *bsgrp_node;
163 struct bsm_rpinfo *bsrp;
164 struct prefix nht_p;
165 bool is_bsr_tracking = true;
166
167 scope = THREAD_ARG(t);
168 THREAD_OFF(scope->bs_timer);
169
170 if (PIM_DEBUG_BSM)
171 zlog_debug("%s: Bootstrap Timer expired for scope: %d",
172 __func__, scope->sz_id);
173
174 /* Remove next hop tracking for the bsr */
175 nht_p.family = AF_INET;
176 nht_p.prefixlen = IPV4_MAX_BITLEN;
177 nht_p.u.prefix4 = scope->current_bsr;
178 if (PIM_DEBUG_BSM)
179 zlog_debug("%s: Deregister BSR addr %pFX with Zebra NHT",
180 __func__, &nht_p);
181 pim_delete_tracked_nexthop(scope->pim, &nht_p, NULL, NULL,
182 is_bsr_tracking);
183
184 /* Reset scope zone data */
185 scope->accept_nofwd_bsm = false;
186 scope->state = ACCEPT_ANY;
187 scope->current_bsr.s_addr = INADDR_ANY;
188 scope->current_bsr_prio = 0;
189 scope->current_bsr_first_ts = 0;
190 scope->current_bsr_last_ts = 0;
191 scope->bsm_frag_tag = 0;
192 pim_bsm_frags_free(scope);
193
194 for (rn = route_top(scope->bsrp_table); rn; rn = route_next(rn)) {
195
196 bsgrp_node = (struct bsgrp_node *)rn->info;
197 if (!bsgrp_node) {
198 if (PIM_DEBUG_BSM)
199 zlog_debug("%s: bsgrp_node is null", __func__);
200 continue;
201 }
202 /* Give grace time for rp to continue for another hold time */
203 bsrp = bsm_rpinfos_first(bsgrp_node->bsrp_list);
204 if (bsrp)
205 pim_g2rp_timer_restart(bsrp, bsrp->rp_holdtime);
206
207 /* clear pending list */
208 pim_bsm_rpinfos_free(bsgrp_node->partial_bsrp_list);
209 bsgrp_node->pend_rp_cnt = 0;
210 }
211 return 0;
212 }
213
214 void pim_bs_timer_stop(struct bsm_scope *scope)
215 {
216 if (PIM_DEBUG_BSM)
217 zlog_debug("%s : BS timer being stopped of sz: %d", __func__,
218 scope->sz_id);
219 THREAD_OFF(scope->bs_timer);
220 }
221
222 static void pim_bs_timer_start(struct bsm_scope *scope, int bs_timeout)
223 {
224 if (!scope) {
225 if (PIM_DEBUG_BSM)
226 zlog_debug("%s : Invalid scope(NULL).", __func__);
227 return;
228 }
229 THREAD_OFF(scope->bs_timer);
230 if (PIM_DEBUG_BSM)
231 zlog_debug(
232 "%s : starting bs timer for scope %d with timeout %d secs",
233 __func__, scope->sz_id, bs_timeout);
234 thread_add_timer(router->master, pim_on_bs_timer, scope, bs_timeout,
235 &scope->bs_timer);
236 }
237
238 static inline void pim_bs_timer_restart(struct bsm_scope *scope, int bs_timeout)
239 {
240 pim_bs_timer_start(scope, bs_timeout);
241 }
242
243 void pim_bsm_proc_init(struct pim_instance *pim)
244 {
245 memset(&pim->global_scope, 0, sizeof(struct bsm_scope));
246
247 pim->global_scope.sz_id = PIM_GBL_SZ_ID;
248 pim->global_scope.bsrp_table = route_table_init();
249 pim->global_scope.accept_nofwd_bsm = true;
250 pim->global_scope.state = NO_INFO;
251 pim->global_scope.pim = pim;
252 bsm_frags_init(pim->global_scope.bsm_frags);
253 pim_bs_timer_start(&pim->global_scope, PIM_BS_TIME);
254 }
255
256 void pim_bsm_proc_free(struct pim_instance *pim)
257 {
258 struct route_node *rn;
259 struct bsgrp_node *bsgrp;
260
261 pim_bs_timer_stop(&pim->global_scope);
262 pim_bsm_frags_free(&pim->global_scope);
263
264 for (rn = route_top(pim->global_scope.bsrp_table); rn;
265 rn = route_next(rn)) {
266 bsgrp = rn->info;
267 if (!bsgrp)
268 continue;
269 pim_free_bsgrp_data(bsgrp);
270 }
271
272 route_table_finish(pim->global_scope.bsrp_table);
273 }
274
275 static bool is_hold_time_elapsed(void *data)
276 {
277 struct bsm_rpinfo *bsrp;
278
279 bsrp = data;
280
281 if (bsrp->elapse_time < bsrp->rp_holdtime)
282 return false;
283 else
284 return true;
285 }
286
287 static int pim_on_g2rp_timer(struct thread *t)
288 {
289 struct bsm_rpinfo *bsrp;
290 struct bsm_rpinfo *bsrp_node;
291 struct bsgrp_node *bsgrp_node;
292 struct pim_instance *pim;
293 struct rp_info *rp_info;
294 struct route_node *rn;
295 uint16_t elapse;
296 struct in_addr bsrp_addr;
297
298 bsrp = THREAD_ARG(t);
299 THREAD_OFF(bsrp->g2rp_timer);
300 bsgrp_node = bsrp->bsgrp_node;
301
302 /* elapse time is the hold time of expired node */
303 elapse = bsrp->rp_holdtime;
304 bsrp_addr = bsrp->rp_address;
305
306 /* update elapse for all bsrp nodes */
307 frr_each_safe (bsm_rpinfos, bsgrp_node->bsrp_list, bsrp_node) {
308 bsrp_node->elapse_time += elapse;
309
310 if (is_hold_time_elapsed(bsrp_node)) {
311 bsm_rpinfos_del(bsgrp_node->bsrp_list, bsrp_node);
312 pim_bsm_rpinfo_free(bsrp_node);
313 }
314 }
315
316 /* Get the next elected rp node */
317 bsrp = bsm_rpinfos_first(bsgrp_node->bsrp_list);
318 pim = bsgrp_node->scope->pim;
319 rn = route_node_lookup(pim->rp_table, &bsgrp_node->group);
320
321 if (!rn) {
322 zlog_warn("%s: Route node doesn't exist", __func__);
323 return 0;
324 }
325
326 rp_info = (struct rp_info *)rn->info;
327
328 if (!rp_info) {
329 route_unlock_node(rn);
330 return 0;
331 }
332
333 if (rp_info->rp_src != RP_SRC_STATIC) {
334 /* If new rp available, change it else delete the existing */
335 if (bsrp) {
336 bsrp_addr = bsrp->rp_address;
337 pim_g2rp_timer_start(
338 bsrp, (bsrp->rp_holdtime - bsrp->elapse_time));
339 pim_rp_change(pim, bsrp_addr, bsgrp_node->group,
340 RP_SRC_BSR);
341 } else {
342 pim_rp_del(pim, bsrp_addr, bsgrp_node->group, NULL,
343 RP_SRC_BSR);
344 }
345 }
346
347 if (!bsm_rpinfos_count(bsgrp_node->bsrp_list)
348 && !bsm_rpinfos_count(bsgrp_node->partial_bsrp_list)) {
349 pim_free_bsgrp_node(pim->global_scope.bsrp_table,
350 &bsgrp_node->group);
351 pim_free_bsgrp_data(bsgrp_node);
352 }
353
354 return 0;
355 }
356
357 static void pim_g2rp_timer_start(struct bsm_rpinfo *bsrp, int hold_time)
358 {
359 if (!bsrp) {
360 if (PIM_DEBUG_BSM)
361 zlog_debug("%s : Invalid brsp(NULL).", __func__);
362 return;
363 }
364 THREAD_OFF(bsrp->g2rp_timer);
365 if (PIM_DEBUG_BSM)
366 zlog_debug(
367 "%s : starting g2rp timer for grp: %pFX - rp: %pI4 with timeout %d secs(Actual Hold time : %d secs)",
368 __func__, &bsrp->bsgrp_node->group,
369 &bsrp->rp_address, hold_time,
370 bsrp->rp_holdtime);
371
372 thread_add_timer(router->master, pim_on_g2rp_timer, bsrp, hold_time,
373 &bsrp->g2rp_timer);
374 }
375
376 static inline void pim_g2rp_timer_restart(struct bsm_rpinfo *bsrp,
377 int hold_time)
378 {
379 pim_g2rp_timer_start(bsrp, hold_time);
380 }
381
382 static void pim_g2rp_timer_stop(struct bsm_rpinfo *bsrp)
383 {
384 if (!bsrp)
385 return;
386
387 if (PIM_DEBUG_BSM)
388 zlog_debug("%s : stopping g2rp timer for grp: %pFX - rp: %pI4",
389 __func__, &bsrp->bsgrp_node->group,
390 &bsrp->rp_address);
391
392 THREAD_OFF(bsrp->g2rp_timer);
393 }
394
395 static bool is_hold_time_zero(void *data)
396 {
397 struct bsm_rpinfo *bsrp;
398
399 bsrp = data;
400
401 if (bsrp->rp_holdtime)
402 return false;
403 else
404 return true;
405 }
406
407 static void pim_instate_pend_list(struct bsgrp_node *bsgrp_node)
408 {
409 struct bsm_rpinfo *active;
410 struct bsm_rpinfo *pend;
411 struct rp_info *rp_info;
412 struct route_node *rn;
413 struct pim_instance *pim;
414 struct rp_info *rp_all;
415 struct prefix group_all;
416 bool had_rp_node = true;
417
418 pim = bsgrp_node->scope->pim;
419 active = bsm_rpinfos_first(bsgrp_node->bsrp_list);
420
421 /* Remove nodes with hold time 0 & check if list still has a head */
422 frr_each_safe (bsm_rpinfos, bsgrp_node->partial_bsrp_list, pend)
423 if (is_hold_time_zero(pend))
424 bsm_rpinfos_del(bsgrp_node->partial_bsrp_list, pend);
425
426 pend = bsm_rpinfos_first(bsgrp_node->partial_bsrp_list);
427
428 if (!str2prefix("224.0.0.0/4", &group_all))
429 return;
430
431 rp_all = pim_rp_find_match_group(pim, &group_all);
432 rn = route_node_lookup(pim->rp_table, &bsgrp_node->group);
433
434 if (pend)
435 pim_g2rp_timer_start(pend, pend->rp_holdtime);
436
437 /* if rp node doesn't exist or exist but not configured(rp_all),
438 * install the rp from head(if exists) of partial list. List is
439 * is sorted such that head is the elected RP for the group.
440 */
441 if (!rn || (prefix_same(&rp_all->group, &bsgrp_node->group)
442 && pim_rpf_addr_is_inaddr_none(&rp_all->rp))) {
443 if (PIM_DEBUG_BSM)
444 zlog_debug("%s: Route node doesn't exist", __func__);
445 if (pend)
446 pim_rp_new(pim, pend->rp_address, bsgrp_node->group,
447 NULL, RP_SRC_BSR);
448 had_rp_node = false;
449 } else {
450 rp_info = (struct rp_info *)rn->info;
451 if (!rp_info) {
452 route_unlock_node(rn);
453 if (pend)
454 pim_rp_new(pim, pend->rp_address,
455 bsgrp_node->group, NULL, RP_SRC_BSR);
456 had_rp_node = false;
457 }
458 }
459
460 /* We didn't have rp node and pending list is empty(unlikely), cleanup*/
461 if ((!had_rp_node) && (!pend)) {
462 pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
463 &bsgrp_node->group);
464 pim_free_bsgrp_data(bsgrp_node);
465 return;
466 }
467
468 if ((had_rp_node) && (rp_info->rp_src != RP_SRC_STATIC)) {
469 /* This means we searched and got rp node, needs unlock */
470 route_unlock_node(rn);
471
472 if (active && pend) {
473 if ((active->rp_address.s_addr
474 != pend->rp_address.s_addr))
475 pim_rp_change(pim, pend->rp_address,
476 bsgrp_node->group, RP_SRC_BSR);
477 }
478
479 /* Possible when the first BSM has group with 0 rp count */
480 if ((!active) && (!pend)) {
481 if (PIM_DEBUG_BSM) {
482 zlog_debug(
483 "%s: Both bsrp and partial list are empty",
484 __func__);
485 }
486 pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
487 &bsgrp_node->group);
488 pim_free_bsgrp_data(bsgrp_node);
489 return;
490 }
491
492 /* Possible when a group with 0 rp count received in BSM */
493 if ((active) && (!pend)) {
494 pim_rp_del(pim, active->rp_address, bsgrp_node->group,
495 NULL, RP_SRC_BSR);
496 pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
497 &bsgrp_node->group);
498 if (PIM_DEBUG_BSM) {
499 zlog_debug("%s:Pend List is null,del grp node",
500 __func__);
501 }
502 pim_free_bsgrp_data(bsgrp_node);
503 return;
504 }
505 }
506
507 if ((had_rp_node) && (rp_info->rp_src == RP_SRC_STATIC)) {
508 /* We need to unlock rn this case */
509 route_unlock_node(rn);
510 /* there is a chance that static rp exist and bsrp cleaned
511 * so clean bsgrp node if pending list empty
512 */
513 if (!pend) {
514 if (PIM_DEBUG_BSM)
515 zlog_debug(
516 "%s: Partial list is empty, static rp exists",
517 __func__);
518 pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
519 &bsgrp_node->group);
520 pim_free_bsgrp_data(bsgrp_node);
521 return;
522 }
523 }
524
525 /* swap the list & delete all nodes in partial list (old bsrp_list)
526 * before swap
527 * active is head of bsrp list
528 * pend is head of partial list
529 * After swap
530 * active is head of partial list
531 * pend is head of bsrp list
532 * So check appriate head after swap and clean the new partial list
533 */
534 bsm_rpinfos_swap_all(bsgrp_node->bsrp_list,
535 bsgrp_node->partial_bsrp_list);
536
537 if (active)
538 pim_g2rp_timer_stop(active);
539 pim_bsm_rpinfos_free(bsgrp_node->partial_bsrp_list);
540 }
541
542 static bool pim_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr,
543 struct in_addr ip_src_addr)
544 {
545 struct pim_nexthop nexthop;
546 int result;
547
548 memset(&nexthop, 0, sizeof(nexthop));
549
550 /* New BSR recived */
551 if (bsr.s_addr != pim->global_scope.current_bsr.s_addr) {
552 result = pim_nexthop_match(pim, bsr, ip_src_addr);
553
554 /* Nexthop lookup pass for the new BSR address */
555 if (result)
556 return true;
557
558 if (PIM_DEBUG_BSM) {
559 char bsr_str[INET_ADDRSTRLEN];
560
561 pim_inet4_dump("<bsr?>", bsr, bsr_str, sizeof(bsr_str));
562 zlog_debug("%s : No route to BSR address %s", __func__,
563 bsr_str);
564 }
565 return false;
566 }
567
568 return pim_nexthop_match_nht_cache(pim, bsr, ip_src_addr);
569 }
570
571 static bool is_preferred_bsr(struct pim_instance *pim, struct in_addr bsr,
572 uint32_t bsr_prio)
573 {
574 if (bsr.s_addr == pim->global_scope.current_bsr.s_addr)
575 return true;
576
577 if (bsr_prio > pim->global_scope.current_bsr_prio)
578 return true;
579
580 else if (bsr_prio == pim->global_scope.current_bsr_prio) {
581 if (ntohl(bsr.s_addr)
582 >= ntohl(pim->global_scope.current_bsr.s_addr))
583 return true;
584 else
585 return false;
586 } else
587 return false;
588 }
589
590 static void pim_bsm_update(struct pim_instance *pim, struct in_addr bsr,
591 uint32_t bsr_prio)
592 {
593 struct pim_nexthop_cache pnc;
594
595 if (bsr.s_addr != pim->global_scope.current_bsr.s_addr) {
596 struct prefix nht_p;
597 bool is_bsr_tracking = true;
598
599 /* De-register old BSR and register new BSR with Zebra NHT */
600 nht_p.family = AF_INET;
601 nht_p.prefixlen = IPV4_MAX_BITLEN;
602
603 if (pim->global_scope.current_bsr.s_addr != INADDR_ANY) {
604 nht_p.u.prefix4 = pim->global_scope.current_bsr;
605 if (PIM_DEBUG_BSM)
606 zlog_debug(
607 "%s: Deregister BSR addr %pFX with Zebra NHT",
608 __func__, &nht_p);
609 pim_delete_tracked_nexthop(pim, &nht_p, NULL, NULL,
610 is_bsr_tracking);
611 }
612
613 nht_p.u.prefix4 = bsr;
614 if (PIM_DEBUG_BSM)
615 zlog_debug(
616 "%s: NHT Register BSR addr %pFX with Zebra NHT",
617 __func__, &nht_p);
618
619 memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
620 pim_find_or_track_nexthop(pim, &nht_p, NULL, NULL,
621 is_bsr_tracking, &pnc);
622 pim->global_scope.current_bsr = bsr;
623 pim->global_scope.current_bsr_first_ts =
624 pim_time_monotonic_sec();
625 pim->global_scope.state = ACCEPT_PREFERRED;
626 }
627 pim->global_scope.current_bsr_prio = bsr_prio;
628 pim->global_scope.current_bsr_last_ts = pim_time_monotonic_sec();
629 }
630
631 static bool pim_bsm_send_intf(uint8_t *buf, int len, struct interface *ifp,
632 struct in_addr dst_addr)
633 {
634 struct pim_interface *pim_ifp;
635
636 pim_ifp = ifp->info;
637
638 if (!pim_ifp) {
639 if (PIM_DEBUG_BSM)
640 zlog_debug("%s: Pim interface not available for %s",
641 __func__, ifp->name);
642 return false;
643 }
644
645 if (pim_ifp->pim_sock_fd == -1) {
646 if (PIM_DEBUG_BSM)
647 zlog_debug("%s: Pim sock not available for %s",
648 __func__, ifp->name);
649 return false;
650 }
651
652 if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
653 dst_addr, buf, len, ifp->name)) {
654 zlog_warn("%s: Could not send BSM message on interface: %s",
655 __func__, ifp->name);
656 return false;
657 }
658
659 pim_ifp->pim_ifstat_bsm_tx++;
660 pim_ifp->pim->bsm_sent++;
661 return true;
662 }
663
664 static bool pim_bsm_frag_send(uint8_t *buf, uint32_t len, struct interface *ifp,
665 uint32_t pim_mtu, struct in_addr dst_addr,
666 bool no_fwd)
667 {
668 struct bsmmsg_grpinfo *grpinfo, *curgrp;
669 uint8_t *firstgrp_ptr;
670 uint8_t *pkt;
671 uint8_t *pak_start;
672 uint32_t parsed_len = 0;
673 uint32_t this_pkt_rem;
674 uint32_t copy_byte_count;
675 uint32_t this_pkt_len;
676 uint8_t total_rp_cnt;
677 uint8_t this_rp_cnt;
678 uint8_t frag_rp_cnt;
679 uint8_t rp_fit_cnt;
680 bool pak_pending = false;
681
682 /* MTU passed here is PIM MTU (IP MTU less IP Hdr) */
683 if (pim_mtu < (PIM_MIN_BSM_LEN)) {
684 zlog_warn(
685 "%s: mtu(pim mtu: %d) size less than minimum bootstrap len",
686 __func__, pim_mtu);
687 if (PIM_DEBUG_BSM)
688 zlog_debug(
689 "%s: mtu (pim mtu:%d) less than minimum bootstrap len",
690 __func__, pim_mtu);
691 return false;
692 }
693
694 pak_start = XCALLOC(MTYPE_PIM_BSM_PKT_VAR_MEM, pim_mtu);
695
696 pkt = pak_start;
697
698 /* Fill PIM header later before sending packet to calc checksum */
699 pkt += PIM_MSG_HEADER_LEN;
700 buf += PIM_MSG_HEADER_LEN;
701
702 /* copy bsm header to new packet at offset of pim hdr */
703 memcpy(pkt, buf, PIM_BSM_HDR_LEN);
704 pkt += PIM_BSM_HDR_LEN;
705 buf += PIM_BSM_HDR_LEN;
706 parsed_len += (PIM_MSG_HEADER_LEN + PIM_BSM_HDR_LEN);
707
708 /* Store the position of first grp ptr, which can be reused for
709 * next packet to start filling group. old bsm header and pim hdr
710 * remains. So need not be filled again for next packet onwards.
711 */
712 firstgrp_ptr = pkt;
713
714 /* we received mtu excluding IP hdr len as param
715 * now this_pkt_rem is mtu excluding
716 * PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN
717 */
718 this_pkt_rem = pim_mtu - (PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN);
719
720 /* For each group till the packet length parsed */
721 while (parsed_len < len) {
722 /* pkt ---> fragment's current pointer
723 * buf ---> input buffer's current pointer
724 * mtu ---> size of the pim packet - PIM header
725 * curgrp ---> current group on the fragment
726 * grpinfo ---> current group on the input buffer
727 * this_pkt_rem ---> bytes remaing on the current fragment
728 * rp_fit_cnt ---> num of rp for current grp that
729 * fits this frag
730 * total_rp_cnt ---> total rp present for the group in the buf
731 * frag_rp_cnt ---> no of rp for the group to be fit in
732 * the frag
733 * this_rp_cnt ---> how many rp have we parsed
734 */
735 grpinfo = (struct bsmmsg_grpinfo *)buf;
736 memcpy(pkt, buf, PIM_BSM_GRP_LEN);
737 curgrp = (struct bsmmsg_grpinfo *)pkt;
738 parsed_len += PIM_BSM_GRP_LEN;
739 pkt += PIM_BSM_GRP_LEN;
740 buf += PIM_BSM_GRP_LEN;
741 this_pkt_rem -= PIM_BSM_GRP_LEN;
742
743 /* initialize rp count and total_rp_cnt before the rp loop */
744 this_rp_cnt = 0;
745 total_rp_cnt = grpinfo->frag_rp_count;
746
747 /* Loop till all RPs for the group parsed */
748 while (this_rp_cnt < total_rp_cnt) {
749 /* All RP from a group processed here.
750 * group is pointed by grpinfo.
751 * At this point make sure buf pointing to a RP
752 * within a group
753 */
754 rp_fit_cnt = this_pkt_rem / PIM_BSM_RP_LEN;
755
756 /* calculate how many rp am i going to copy in
757 * this frag
758 */
759 if (rp_fit_cnt > (total_rp_cnt - this_rp_cnt))
760 frag_rp_cnt = total_rp_cnt - this_rp_cnt;
761 else
762 frag_rp_cnt = rp_fit_cnt;
763
764 /* populate the frag rp count for the current grp */
765 curgrp->frag_rp_count = frag_rp_cnt;
766 copy_byte_count = frag_rp_cnt * PIM_BSM_RP_LEN;
767
768 /* copy all the rp that we are fitting in this
769 * frag for the grp
770 */
771 memcpy(pkt, buf, copy_byte_count);
772 this_rp_cnt += frag_rp_cnt;
773 buf += copy_byte_count;
774 pkt += copy_byte_count;
775 parsed_len += copy_byte_count;
776 this_pkt_rem -= copy_byte_count;
777
778 /* Either we couldn't fit all rp for the group or the
779 * mtu reached
780 */
781 if ((this_rp_cnt < total_rp_cnt)
782 || (this_pkt_rem
783 < (PIM_BSM_GRP_LEN + PIM_BSM_RP_LEN))) {
784 /* No space to fit in more rp, send this pkt */
785 this_pkt_len = pim_mtu - this_pkt_rem;
786 pim_msg_build_header(pak_start, this_pkt_len,
787 PIM_MSG_TYPE_BOOTSTRAP,
788 no_fwd);
789 pim_bsm_send_intf(pak_start, this_pkt_len, ifp,
790 dst_addr);
791
792 /* Construct next fragment. Reuse old packet */
793 pkt = firstgrp_ptr;
794 this_pkt_rem = pim_mtu - (PIM_BSM_HDR_LEN
795 + PIM_MSG_HEADER_LEN);
796
797 /* If pkt can't accomodate next group + atleast
798 * one rp, we must break out of this inner loop
799 * and process next RP
800 */
801 if (total_rp_cnt == this_rp_cnt)
802 break;
803
804 /* If some more RPs for the same group pending,
805 * fill grp hdr
806 */
807 memcpy(pkt, (uint8_t *)grpinfo,
808 PIM_BSM_GRP_LEN);
809 curgrp = (struct bsmmsg_grpinfo *)pkt;
810 pkt += PIM_BSM_GRP_LEN;
811 this_pkt_rem -= PIM_BSM_GRP_LEN;
812 pak_pending = false;
813 } else {
814 /* We filled something but not yet sent out */
815 pak_pending = true;
816 }
817 } /* while RP count */
818 } /*while parsed len */
819
820 /* Send if we have any unsent packet */
821 if (pak_pending) {
822 this_pkt_len = pim_mtu - this_pkt_rem;
823 pim_msg_build_header(pak_start, this_pkt_len,
824 PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
825 pim_bsm_send_intf(pak_start, (pim_mtu - this_pkt_rem), ifp,
826 dst_addr);
827 }
828 XFREE(MTYPE_PIM_BSM_PKT_VAR_MEM, pak_start);
829 return true;
830 }
831
832 static void pim_bsm_fwd_whole_sz(struct pim_instance *pim, uint8_t *buf,
833 uint32_t len, int sz)
834 {
835 struct interface *ifp;
836 struct pim_interface *pim_ifp;
837 struct in_addr dst_addr;
838 uint32_t pim_mtu;
839 bool no_fwd = false;
840 bool ret = false;
841
842 /* For now only global scope zone is supported, so send on all
843 * pim interfaces in the vrf
844 */
845 dst_addr = qpim_all_pim_routers_addr;
846 FOR_ALL_INTERFACES (pim->vrf, ifp) {
847 pim_ifp = ifp->info;
848 if ((!pim_ifp) || (!pim_ifp->bsm_enable))
849 continue;
850
851 /*
852 * RFC 5059 Sec 3.4:
853 * When a Bootstrap message is forwarded, it is forwarded out
854 * of every multicast-capable interface that has PIM neighbors.
855 *
856 * So skipping pim interfaces with no neighbors.
857 */
858 if (listcount(pim_ifp->pim_neighbor_list) == 0)
859 continue;
860
861 pim_hello_require(ifp);
862 pim_mtu = ifp->mtu - MAX_IP_HDR_LEN;
863 if (pim_mtu < len) {
864 ret = pim_bsm_frag_send(buf, len, ifp, pim_mtu,
865 dst_addr, no_fwd);
866 if (PIM_DEBUG_BSM)
867 zlog_debug("%s: pim_bsm_frag_send returned %s",
868 __func__, ret ? "TRUE" : "FALSE");
869 } else {
870 pim_msg_build_header(buf, len, PIM_MSG_TYPE_BOOTSTRAP,
871 no_fwd);
872 if (!pim_bsm_send_intf(buf, len, ifp, dst_addr)) {
873 if (PIM_DEBUG_BSM)
874 zlog_debug(
875 "%s: pim_bsm_send_intf returned false",
876 __func__);
877 }
878 }
879 }
880 }
881
882 bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)
883 {
884 struct in_addr dst_addr;
885 struct pim_interface *pim_ifp;
886 struct bsm_scope *scope;
887 struct bsm_frag *bsfrag;
888 char neigh_src_str[INET_ADDRSTRLEN];
889 uint32_t pim_mtu;
890 bool no_fwd = true;
891 bool ret = false;
892
893 if (PIM_DEBUG_BSM) {
894 pim_inet4_dump("<src?>", neigh->source_addr, neigh_src_str,
895 sizeof(neigh_src_str));
896 zlog_debug("%s: New neighbor %s seen on %s", __func__,
897 neigh_src_str, ifp->name);
898 }
899
900 pim_ifp = ifp->info;
901
902 /* DR only forwards BSM packet */
903 if (pim_ifp->pim_dr_addr.s_addr == pim_ifp->primary_address.s_addr) {
904 if (PIM_DEBUG_BSM)
905 zlog_debug(
906 "%s: It is not DR, so don't forward BSM packet",
907 __func__);
908 }
909
910 if (!pim_ifp->bsm_enable) {
911 if (PIM_DEBUG_BSM)
912 zlog_debug("%s: BSM proc not enabled on %s", __func__,
913 ifp->name);
914 return ret;
915 }
916
917 scope = &pim_ifp->pim->global_scope;
918
919 if (!bsm_frags_count(scope->bsm_frags)) {
920 if (PIM_DEBUG_BSM)
921 zlog_debug("%s: BSM list for the scope is empty",
922 __func__);
923 return ret;
924 }
925
926 if (!pim_ifp->ucast_bsm_accept) {
927 dst_addr = qpim_all_pim_routers_addr;
928 if (PIM_DEBUG_BSM)
929 zlog_debug("%s: Sending BSM mcast to %s", __func__,
930 neigh_src_str);
931 } else {
932 dst_addr = neigh->source_addr;
933 if (PIM_DEBUG_BSM)
934 zlog_debug("%s: Sending BSM ucast to %s", __func__,
935 neigh_src_str);
936 }
937 pim_mtu = ifp->mtu - MAX_IP_HDR_LEN;
938 pim_hello_require(ifp);
939
940 frr_each (bsm_frags, scope->bsm_frags, bsfrag) {
941 if (pim_mtu < bsfrag->size) {
942 ret = pim_bsm_frag_send(bsfrag->data, bsfrag->size, ifp,
943 pim_mtu, dst_addr, no_fwd);
944 if (!ret) {
945 if (PIM_DEBUG_BSM)
946 zlog_debug(
947 "%s: pim_bsm_frag_send failed",
948 __func__);
949 }
950 } else {
951 /* Pim header needs to be constructed */
952 pim_msg_build_header(bsfrag->data, bsfrag->size,
953 PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
954 ret = pim_bsm_send_intf(bsfrag->data, bsfrag->size, ifp,
955 dst_addr);
956 if (!ret) {
957 if (PIM_DEBUG_BSM)
958 zlog_debug(
959 "%s: pim_bsm_frag_send failed",
960 __func__);
961 }
962 }
963 }
964 return ret;
965 }
966
967 struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
968 struct prefix *grp)
969 {
970 struct route_node *rn;
971 struct bsgrp_node *bsgrp;
972
973 rn = route_node_lookup(scope->bsrp_table, grp);
974 if (!rn) {
975 if (PIM_DEBUG_BSM)
976 zlog_debug("%s: Route node doesn't exist for the group",
977 __func__);
978 return NULL;
979 }
980 bsgrp = rn->info;
981 route_unlock_node(rn);
982
983 return bsgrp;
984 }
985
986 static uint32_t hash_calc_on_grp_rp(struct prefix group, struct in_addr rp,
987 uint8_t hashmasklen)
988 {
989 uint64_t temp;
990 uint32_t hash;
991 uint32_t grpaddr;
992 uint32_t rp_add;
993 uint32_t mask = 0xffffffff;
994
995 /* mask to be made zero if hashmasklen is 0 because mask << 32
996 * may not give 0. hashmasklen can be 0 to 32.
997 */
998 if (hashmasklen == 0)
999 mask = 0;
1000
1001 /* in_addr stores ip in big endian, hence network byte order
1002 * convert to uint32 before processing hash
1003 */
1004 grpaddr = ntohl(group.u.prefix4.s_addr);
1005 /* Avoid shifting by 32 bit on a 32 bit register */
1006 if (hashmasklen)
1007 grpaddr = grpaddr & ((mask << (32 - hashmasklen)));
1008 else
1009 grpaddr = grpaddr & mask;
1010 rp_add = ntohl(rp.s_addr);
1011 temp = 1103515245 * ((1103515245 * (uint64_t)grpaddr + 12345) ^ rp_add)
1012 + 12345;
1013 hash = temp & (0x7fffffff);
1014 return hash;
1015 }
1016
1017 static bool pim_install_bsm_grp_rp(struct pim_instance *pim,
1018 struct bsgrp_node *grpnode,
1019 struct bsmmsg_rpinfo *rp)
1020 {
1021 struct bsm_rpinfo *bsm_rpinfo;
1022 uint8_t hashMask_len = pim->global_scope.hashMasklen;
1023
1024 /*memory allocation for bsm_rpinfo */
1025 bsm_rpinfo = XCALLOC(MTYPE_PIM_BSRP_INFO, sizeof(*bsm_rpinfo));
1026
1027 bsm_rpinfo->rp_prio = rp->rp_pri;
1028 bsm_rpinfo->rp_holdtime = rp->rp_holdtime;
1029 memcpy(&bsm_rpinfo->rp_address, &rp->rpaddr.addr,
1030 sizeof(struct in_addr));
1031 bsm_rpinfo->elapse_time = 0;
1032
1033 /* Back pointer to the group node. */
1034 bsm_rpinfo->bsgrp_node = grpnode;
1035
1036 /* update hash for this rp node */
1037 bsm_rpinfo->hash = hash_calc_on_grp_rp(grpnode->group, rp->rpaddr.addr,
1038 hashMask_len);
1039 if (bsm_rpinfos_add(grpnode->partial_bsrp_list, bsm_rpinfo) == NULL) {
1040 if (PIM_DEBUG_BSM)
1041 zlog_debug(
1042 "%s, bs_rpinfo node added to the partial bs_rplist.",
1043 __func__);
1044 return true;
1045 }
1046
1047 if (PIM_DEBUG_BSM)
1048 zlog_debug("%s: list node not added", __func__);
1049
1050 XFREE(MTYPE_PIM_BSRP_INFO, bsm_rpinfo);
1051 return false;
1052 }
1053
1054 static void pim_update_pending_rp_cnt(struct bsm_scope *sz,
1055 struct bsgrp_node *bsgrp,
1056 uint16_t bsm_frag_tag,
1057 uint32_t total_rp_count)
1058 {
1059 if (bsgrp->pend_rp_cnt) {
1060 /* received bsm is different packet ,
1061 * it is not same fragment.
1062 */
1063 if (bsm_frag_tag != bsgrp->frag_tag) {
1064 if (PIM_DEBUG_BSM)
1065 zlog_debug(
1066 "%s,Received a new BSM ,so clear the pending bs_rpinfo list.",
1067 __func__);
1068 pim_bsm_rpinfos_free(bsgrp->partial_bsrp_list);
1069 bsgrp->pend_rp_cnt = total_rp_count;
1070 }
1071 } else
1072 bsgrp->pend_rp_cnt = total_rp_count;
1073
1074 bsgrp->frag_tag = bsm_frag_tag;
1075 }
1076
1077 /* Parsing BSR packet and adding to partial list of corresponding bsgrp node */
1078 static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,
1079 int buflen, uint16_t bsm_frag_tag)
1080 {
1081 struct bsmmsg_grpinfo grpinfo;
1082 struct bsmmsg_rpinfo rpinfo;
1083 struct prefix group;
1084 struct bsgrp_node *bsgrp = NULL;
1085 int frag_rp_cnt = 0;
1086 int offset = 0;
1087 int ins_count = 0;
1088
1089 while (buflen > offset) {
1090 if (offset + (int)sizeof(struct bsmmsg_grpinfo) > buflen) {
1091 if (PIM_DEBUG_BSM)
1092 zlog_debug(
1093 "%s: buflen received %d is less than the internal data structure of the packet would suggest",
1094 __func__, buflen);
1095 return false;
1096 }
1097 /* Extract Group tlv from BSM */
1098 memcpy(&grpinfo, buf, sizeof(struct bsmmsg_grpinfo));
1099
1100 if (PIM_DEBUG_BSM) {
1101 char grp_str[INET_ADDRSTRLEN];
1102
1103 pim_inet4_dump("<Group?>", grpinfo.group.addr, grp_str,
1104 sizeof(grp_str));
1105 zlog_debug(
1106 "%s, Group %s Rpcount:%d Fragment-Rp-count:%d",
1107 __func__, grp_str, grpinfo.rp_count,
1108 grpinfo.frag_rp_count);
1109 }
1110
1111 buf += sizeof(struct bsmmsg_grpinfo);
1112 offset += sizeof(struct bsmmsg_grpinfo);
1113
1114 if (grpinfo.rp_count == 0) {
1115 if (PIM_DEBUG_BSM) {
1116 char grp_str[INET_ADDRSTRLEN];
1117
1118 pim_inet4_dump("<Group?>", grpinfo.group.addr,
1119 grp_str, sizeof(grp_str));
1120 zlog_debug("%s, Rp count is zero for group: %s",
1121 __func__, grp_str);
1122 }
1123 return false;
1124 }
1125
1126 group.family = AF_INET;
1127 if (grpinfo.group.mask > IPV4_MAX_BITLEN) {
1128 if (PIM_DEBUG_BSM)
1129 zlog_debug(
1130 "%s, v4 prefix length specified: %d is too long",
1131 __func__, grpinfo.group.mask);
1132 return false;
1133 }
1134 group.prefixlen = grpinfo.group.mask;
1135 group.u.prefix4.s_addr = grpinfo.group.addr.s_addr;
1136
1137 /* Get the Group node for the BSM rp table */
1138 bsgrp = pim_bsm_get_bsgrp_node(scope, &group);
1139
1140 if (!bsgrp) {
1141 if (PIM_DEBUG_BSM)
1142 zlog_debug("%s, Create new BSM Group node.",
1143 __func__);
1144
1145 /* create a new node to be added to the tree. */
1146 bsgrp = pim_bsm_new_bsgrp_node(scope->bsrp_table,
1147 &group);
1148
1149 if (!bsgrp) {
1150 zlog_debug(
1151 "%s, Failed to get the BSM group node.",
1152 __func__);
1153 continue;
1154 }
1155
1156 bsgrp->scope = scope;
1157 }
1158
1159 pim_update_pending_rp_cnt(scope, bsgrp, bsm_frag_tag,
1160 grpinfo.rp_count);
1161 frag_rp_cnt = grpinfo.frag_rp_count;
1162 ins_count = 0;
1163
1164 while (frag_rp_cnt--) {
1165 if (offset + (int)sizeof(struct bsmmsg_rpinfo)
1166 > buflen) {
1167 if (PIM_DEBUG_BSM)
1168 zlog_debug(
1169 "%s, buflen received: %u is less than the internal data structure of the packet would suggest",
1170 __func__, buflen);
1171 return false;
1172 }
1173
1174 /* Extract RP address tlv from BSM */
1175 memcpy(&rpinfo, buf, sizeof(struct bsmmsg_rpinfo));
1176 rpinfo.rp_holdtime = ntohs(rpinfo.rp_holdtime);
1177 buf += sizeof(struct bsmmsg_rpinfo);
1178 offset += sizeof(struct bsmmsg_rpinfo);
1179
1180 if (PIM_DEBUG_BSM) {
1181 char rp_str[INET_ADDRSTRLEN];
1182
1183 pim_inet4_dump("<Rpaddr?>", rpinfo.rpaddr.addr,
1184 rp_str, sizeof(rp_str));
1185 zlog_debug(
1186 "%s, Rp address - %s; pri:%d hold:%d",
1187 __func__, rp_str, rpinfo.rp_pri,
1188 rpinfo.rp_holdtime);
1189 }
1190
1191 /* Call Install api to update grp-rp mappings */
1192 if (pim_install_bsm_grp_rp(scope->pim, bsgrp, &rpinfo))
1193 ins_count++;
1194 }
1195
1196 bsgrp->pend_rp_cnt -= ins_count;
1197
1198 if (!bsgrp->pend_rp_cnt) {
1199 if (PIM_DEBUG_BSM)
1200 zlog_debug(
1201 "%s, Recvd all the rps for this group, so bsrp list with penidng rp list.",
1202 __func__);
1203 /* replace the bsrp_list with pending list */
1204 pim_instate_pend_list(bsgrp);
1205 }
1206 }
1207 return true;
1208 }
1209
1210 int pim_bsm_process(struct interface *ifp, struct ip *ip_hdr, uint8_t *buf,
1211 uint32_t buf_size, bool no_fwd)
1212 {
1213 struct bsm_hdr *bshdr;
1214 int sz = PIM_GBL_SZ_ID;
1215 struct bsmmsg_grpinfo *msg_grp;
1216 struct pim_interface *pim_ifp = NULL;
1217 struct bsm_frag *bsfrag;
1218 struct pim_instance *pim;
1219 char bsr_str[INET_ADDRSTRLEN];
1220 uint16_t frag_tag;
1221 bool empty_bsm = false;
1222
1223 /* BSM Packet acceptance validation */
1224 pim_ifp = ifp->info;
1225 if (!pim_ifp) {
1226 if (PIM_DEBUG_BSM)
1227 zlog_debug("%s: multicast not enabled on interface %s",
1228 __func__, ifp->name);
1229 return -1;
1230 }
1231
1232 pim_ifp->pim_ifstat_bsm_rx++;
1233 pim = pim_ifp->pim;
1234 pim->bsm_rcvd++;
1235
1236 /* Drop if bsm processing is disabled on interface */
1237 if (!pim_ifp->bsm_enable) {
1238 zlog_warn("%s: BSM not enabled on interface %s", __func__,
1239 ifp->name);
1240 pim_ifp->pim_ifstat_bsm_cfg_miss++;
1241 pim->bsm_dropped++;
1242 return -1;
1243 }
1244
1245 if (buf_size < (PIM_MSG_HEADER_LEN + sizeof(struct bsm_hdr))) {
1246 if (PIM_DEBUG_BSM)
1247 zlog_debug(
1248 "%s: received buffer length of %d which is too small to properly decode",
1249 __func__, buf_size);
1250 return -1;
1251 }
1252
1253 bshdr = (struct bsm_hdr *)(buf + PIM_MSG_HEADER_LEN);
1254 pim_inet4_dump("<bsr?>", bshdr->bsr_addr.addr, bsr_str,
1255 sizeof(bsr_str));
1256 if (bshdr->hm_len > 32) {
1257 zlog_warn("Bad hashmask length for IPv4; got %hhu, expected value in range 0-32",
1258 bshdr->hm_len);
1259 pim->bsm_dropped++;
1260 return -1;
1261 }
1262 pim->global_scope.hashMasklen = bshdr->hm_len;
1263 frag_tag = ntohs(bshdr->frag_tag);
1264
1265 /* Identify empty BSM */
1266 if ((buf_size - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN) < PIM_BSM_GRP_LEN)
1267 empty_bsm = true;
1268
1269 if (!empty_bsm) {
1270 msg_grp = (struct bsmmsg_grpinfo *)(buf + PIM_MSG_HEADER_LEN
1271 + PIM_BSM_HDR_LEN);
1272 /* Currently we don't support scope zoned BSM */
1273 if (msg_grp->group.sz) {
1274 if (PIM_DEBUG_BSM)
1275 zlog_debug(
1276 "%s : Administratively scoped range BSM received",
1277 __func__);
1278 pim_ifp->pim_ifstat_bsm_invalid_sz++;
1279 pim->bsm_dropped++;
1280 return -1;
1281 }
1282 }
1283
1284 /* Drop if bsr is not preferred bsr */
1285 if (!is_preferred_bsr(pim, bshdr->bsr_addr.addr, bshdr->bsr_prio)) {
1286 if (PIM_DEBUG_BSM)
1287 zlog_debug("%s : Received a non-preferred BSM",
1288 __func__);
1289 pim->bsm_dropped++;
1290 return -1;
1291 }
1292
1293 if (no_fwd) {
1294 /* only accept no-forward BSM if quick refresh on startup */
1295 if ((pim->global_scope.accept_nofwd_bsm)
1296 || (frag_tag == pim->global_scope.bsm_frag_tag)) {
1297 pim->global_scope.accept_nofwd_bsm = false;
1298 } else {
1299 if (PIM_DEBUG_BSM)
1300 zlog_debug(
1301 "%s : nofwd_bsm received on %s when accpt_nofwd_bsm false",
1302 __func__, bsr_str);
1303 pim->bsm_dropped++;
1304 pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++;
1305 return -1;
1306 }
1307 }
1308
1309 /* Mulicast BSM received */
1310 if (ip_hdr->ip_dst.s_addr == qpim_all_pim_routers_addr.s_addr) {
1311 if (!no_fwd) {
1312 if (!pim_bsr_rpf_check(pim, bshdr->bsr_addr.addr,
1313 ip_hdr->ip_src)) {
1314 if (PIM_DEBUG_BSM)
1315 zlog_debug(
1316 "%s : RPF check fail for BSR address %s",
1317 __func__, bsr_str);
1318 pim->bsm_dropped++;
1319 return -1;
1320 }
1321 }
1322 } else if (if_lookup_exact_address(&ip_hdr->ip_dst, AF_INET,
1323 pim->vrf_id)) {
1324 /* Unicast BSM received - if ucast bsm not enabled on
1325 * the interface, drop it
1326 */
1327 if (!pim_ifp->ucast_bsm_accept) {
1328 if (PIM_DEBUG_BSM)
1329 zlog_debug(
1330 "%s : Unicast BSM not enabled on interface %s",
1331 __func__, ifp->name);
1332 pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++;
1333 pim->bsm_dropped++;
1334 return -1;
1335 }
1336
1337 } else {
1338 if (PIM_DEBUG_BSM)
1339 zlog_debug("%s : Invalid destination address",
1340 __func__);
1341 pim->bsm_dropped++;
1342 return -1;
1343 }
1344
1345 if (empty_bsm) {
1346 if (PIM_DEBUG_BSM)
1347 zlog_debug("%s : Empty Pref BSM received", __func__);
1348 }
1349 /* Parse Update bsm rp table and install/uninstall rp if required */
1350 if (!pim_bsm_parse_install_g2rp(
1351 &pim_ifp->pim->global_scope,
1352 (buf + PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN),
1353 (buf_size - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN),
1354 frag_tag)) {
1355 if (PIM_DEBUG_BSM) {
1356 zlog_debug("%s, Parsing BSM failed.", __func__);
1357 }
1358 pim->bsm_dropped++;
1359 return -1;
1360 }
1361 /* Restart the bootstrap timer */
1362 pim_bs_timer_restart(&pim_ifp->pim->global_scope,
1363 PIM_BSR_DEFAULT_TIMEOUT);
1364
1365 /* If new BSM received, clear the old bsm database */
1366 if (pim_ifp->pim->global_scope.bsm_frag_tag != frag_tag) {
1367 if (PIM_DEBUG_BSM) {
1368 zlog_debug("%s: Current frag tag: %d Frag teg rcvd: %d",
1369 __func__,
1370 pim_ifp->pim->global_scope.bsm_frag_tag,
1371 frag_tag);
1372 }
1373 pim_bsm_frags_free(&pim_ifp->pim->global_scope);
1374 pim_ifp->pim->global_scope.bsm_frag_tag = frag_tag;
1375 }
1376
1377 /* update the scope information from bsm */
1378 pim_bsm_update(pim, bshdr->bsr_addr.addr, bshdr->bsr_prio);
1379
1380 if (!no_fwd) {
1381 pim_bsm_fwd_whole_sz(pim_ifp->pim, buf, buf_size, sz);
1382 bsfrag = XCALLOC(MTYPE_PIM_BSM_FRAG,
1383 sizeof(struct bsm_frag) + buf_size);
1384
1385 bsfrag->size = buf_size;
1386 memcpy(bsfrag->data, buf, buf_size);
1387 bsm_frags_add_tail(pim_ifp->pim->global_scope.bsm_frags,
1388 bsfrag);
1389 }
1390
1391 return 0;
1392 }