]> git.proxmox.com Git - mirror_frr.git/blob - pimd/pim_bsm.c
Merge pull request #10775 from opensourcerouting/pim6-mld-pr
[mirror_frr.git] / pimd / pim_bsm.c
1 /*
2 * pim_bsm.c: PIM BSM handling routines
3 *
4 * Copyright (C) 2018-19 Vmware, Inc.
5 * Saravanan K
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; see the file COPYING; if not, write to the
19 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
20 * MA 02110-1301 USA
21 */
22
23 #ifdef HAVE_CONFIG_H
24 #include "config.h"
25 #endif
26
27 #include "if.h"
28 #include "pimd.h"
29 #include "pim_iface.h"
30 #include "pim_instance.h"
31 #include "pim_neighbor.h"
32 #include "pim_rpf.h"
33 #include "pim_hello.h"
34 #include "pim_pim.h"
35 #include "pim_nht.h"
36 #include "pim_bsm.h"
37 #include "pim_time.h"
38 #include "pim_zebra.h"
39 #include "pim_util.h"
40
41 /* Functions forward declaration */
42 static void pim_bs_timer_start(struct bsm_scope *scope, int bs_timeout);
43 static void pim_g2rp_timer_start(struct bsm_rpinfo *bsrp, int hold_time);
44 static inline void pim_g2rp_timer_restart(struct bsm_rpinfo *bsrp,
45 int hold_time);
46
47 /* Memory Types */
48 DEFINE_MTYPE_STATIC(PIMD, PIM_BSGRP_NODE, "PIM BSR advertised grp info");
49 DEFINE_MTYPE_STATIC(PIMD, PIM_BSRP_INFO, "PIM BSR advertised RP info");
50 DEFINE_MTYPE_STATIC(PIMD, PIM_BSM_FRAG, "PIM BSM fragment");
51 DEFINE_MTYPE_STATIC(PIMD, PIM_BSM_PKT_VAR_MEM, "PIM BSM Packet");
52
53 /* All bsm packets forwarded shall be fit within ip mtu less iphdr(max) */
54 #define MAX_IP_HDR_LEN 24
55
56 /* pim_bsm_write_config - Write the interface pim bsm configuration.*/
57 void pim_bsm_write_config(struct vty *vty, struct interface *ifp)
58 {
59 struct pim_interface *pim_ifp = ifp->info;
60
61 if (pim_ifp) {
62 if (!pim_ifp->bsm_enable)
63 vty_out(vty, " no " PIM_AF_NAME " pim bsm\n");
64 if (!pim_ifp->ucast_bsm_accept)
65 vty_out(vty, " no " PIM_AF_NAME " pim unicast-bsm\n");
66 }
67 }
68
69 static void pim_bsm_rpinfo_free(struct bsm_rpinfo *bsrp_info)
70 {
71 THREAD_OFF(bsrp_info->g2rp_timer);
72 XFREE(MTYPE_PIM_BSRP_INFO, bsrp_info);
73 }
74
75 static void pim_bsm_rpinfos_free(struct bsm_rpinfos_head *head)
76 {
77 struct bsm_rpinfo *bsrp_info;
78
79 while ((bsrp_info = bsm_rpinfos_pop(head)))
80 pim_bsm_rpinfo_free(bsrp_info);
81 }
82
83 static void pim_free_bsgrp_data(struct bsgrp_node *bsgrp_node)
84 {
85 pim_bsm_rpinfos_free(bsgrp_node->bsrp_list);
86 pim_bsm_rpinfos_free(bsgrp_node->partial_bsrp_list);
87 XFREE(MTYPE_PIM_BSGRP_NODE, bsgrp_node);
88 }
89
90 static void pim_free_bsgrp_node(struct route_table *rt, struct prefix *grp)
91 {
92 struct route_node *rn;
93
94 rn = route_node_lookup(rt, grp);
95 if (rn) {
96 rn->info = NULL;
97 route_unlock_node(rn);
98 route_unlock_node(rn);
99 }
100 }
101
102 static void pim_bsm_frag_free(struct bsm_frag *bsfrag)
103 {
104 XFREE(MTYPE_PIM_BSM_FRAG, bsfrag);
105 }
106
107 static void pim_bsm_frags_free(struct bsm_scope *scope)
108 {
109 struct bsm_frag *bsfrag;
110
111 while ((bsfrag = bsm_frags_pop(scope->bsm_frags)))
112 pim_bsm_frag_free(bsfrag);
113 }
114
115 int pim_bsm_rpinfo_cmp(const struct bsm_rpinfo *node1,
116 const struct bsm_rpinfo *node2)
117 {
118 /* RP election Algo :
119 * Step-1 : Loweset Rp priority will have higher precedance.
120 * Step-2 : If priority same then higher hash val will have
121 * higher precedance.
122 * Step-3 : If Hash val is same then highest rp address will
123 * become elected RP.
124 */
125 if (node1->rp_prio < node2->rp_prio)
126 return -1;
127 if (node1->rp_prio > node2->rp_prio)
128 return 1;
129 if (node1->hash < node2->hash)
130 return 1;
131 if (node1->hash > node2->hash)
132 return -1;
133 if (node1->rp_address.s_addr < node2->rp_address.s_addr)
134 return 1;
135 if (node1->rp_address.s_addr > node2->rp_address.s_addr)
136 return -1;
137 return 0;
138 }
139
140 static struct bsgrp_node *pim_bsm_new_bsgrp_node(struct route_table *rt,
141 struct prefix *grp)
142 {
143 struct route_node *rn;
144 struct bsgrp_node *bsgrp;
145
146 rn = route_node_get(rt, grp);
147 if (!rn) {
148 zlog_warn("%s: route node creation failed", __func__);
149 return NULL;
150 }
151 bsgrp = XCALLOC(MTYPE_PIM_BSGRP_NODE, sizeof(struct bsgrp_node));
152
153 rn->info = bsgrp;
154 bsm_rpinfos_init(bsgrp->bsrp_list);
155 bsm_rpinfos_init(bsgrp->partial_bsrp_list);
156
157 prefix_copy(&bsgrp->group, grp);
158 return bsgrp;
159 }
160
161 static void pim_on_bs_timer(struct thread *t)
162 {
163 struct route_node *rn;
164 struct bsm_scope *scope;
165 struct bsgrp_node *bsgrp_node;
166 struct bsm_rpinfo *bsrp;
167
168 scope = THREAD_ARG(t);
169 THREAD_OFF(scope->bs_timer);
170
171 if (PIM_DEBUG_BSM)
172 zlog_debug("%s: Bootstrap Timer expired for scope: %d",
173 __func__, scope->sz_id);
174
175 pim_nht_bsr_del(scope->pim, scope->current_bsr);
176
177 /* Reset scope zone data */
178 scope->accept_nofwd_bsm = false;
179 scope->state = ACCEPT_ANY;
180 scope->current_bsr.s_addr = INADDR_ANY;
181 scope->current_bsr_prio = 0;
182 scope->current_bsr_first_ts = 0;
183 scope->current_bsr_last_ts = 0;
184 scope->bsm_frag_tag = 0;
185 pim_bsm_frags_free(scope);
186
187 for (rn = route_top(scope->bsrp_table); rn; rn = route_next(rn)) {
188
189 bsgrp_node = (struct bsgrp_node *)rn->info;
190 if (!bsgrp_node) {
191 if (PIM_DEBUG_BSM)
192 zlog_debug("%s: bsgrp_node is null", __func__);
193 continue;
194 }
195 /* Give grace time for rp to continue for another hold time */
196 bsrp = bsm_rpinfos_first(bsgrp_node->bsrp_list);
197 if (bsrp)
198 pim_g2rp_timer_restart(bsrp, bsrp->rp_holdtime);
199
200 /* clear pending list */
201 pim_bsm_rpinfos_free(bsgrp_node->partial_bsrp_list);
202 bsgrp_node->pend_rp_cnt = 0;
203 }
204 }
205
206 static void pim_bs_timer_stop(struct bsm_scope *scope)
207 {
208 if (PIM_DEBUG_BSM)
209 zlog_debug("%s : BS timer being stopped of sz: %d", __func__,
210 scope->sz_id);
211 THREAD_OFF(scope->bs_timer);
212 }
213
214 static void pim_bs_timer_start(struct bsm_scope *scope, int bs_timeout)
215 {
216 if (!scope) {
217 if (PIM_DEBUG_BSM)
218 zlog_debug("%s : Invalid scope(NULL).", __func__);
219 return;
220 }
221 THREAD_OFF(scope->bs_timer);
222 if (PIM_DEBUG_BSM)
223 zlog_debug(
224 "%s : starting bs timer for scope %d with timeout %d secs",
225 __func__, scope->sz_id, bs_timeout);
226 thread_add_timer(router->master, pim_on_bs_timer, scope, bs_timeout,
227 &scope->bs_timer);
228 }
229
230 static inline void pim_bs_timer_restart(struct bsm_scope *scope, int bs_timeout)
231 {
232 pim_bs_timer_start(scope, bs_timeout);
233 }
234
235 void pim_bsm_proc_init(struct pim_instance *pim)
236 {
237 memset(&pim->global_scope, 0, sizeof(struct bsm_scope));
238
239 pim->global_scope.sz_id = PIM_GBL_SZ_ID;
240 pim->global_scope.bsrp_table = route_table_init();
241 pim->global_scope.accept_nofwd_bsm = true;
242 pim->global_scope.state = NO_INFO;
243 pim->global_scope.pim = pim;
244 bsm_frags_init(pim->global_scope.bsm_frags);
245 pim_bs_timer_start(&pim->global_scope, PIM_BS_TIME);
246 }
247
248 void pim_bsm_proc_free(struct pim_instance *pim)
249 {
250 struct route_node *rn;
251 struct bsgrp_node *bsgrp;
252
253 pim_bs_timer_stop(&pim->global_scope);
254 pim_bsm_frags_free(&pim->global_scope);
255
256 for (rn = route_top(pim->global_scope.bsrp_table); rn;
257 rn = route_next(rn)) {
258 bsgrp = rn->info;
259 if (!bsgrp)
260 continue;
261 pim_free_bsgrp_data(bsgrp);
262 }
263
264 route_table_finish(pim->global_scope.bsrp_table);
265 }
266
267 static bool is_hold_time_elapsed(void *data)
268 {
269 struct bsm_rpinfo *bsrp;
270
271 bsrp = data;
272
273 if (bsrp->elapse_time < bsrp->rp_holdtime)
274 return false;
275 else
276 return true;
277 }
278
279 static void pim_on_g2rp_timer(struct thread *t)
280 {
281 struct bsm_rpinfo *bsrp;
282 struct bsm_rpinfo *bsrp_node;
283 struct bsgrp_node *bsgrp_node;
284 struct pim_instance *pim;
285 struct rp_info *rp_info;
286 struct route_node *rn;
287 uint16_t elapse;
288 pim_addr bsrp_addr;
289
290 bsrp = THREAD_ARG(t);
291 THREAD_OFF(bsrp->g2rp_timer);
292 bsgrp_node = bsrp->bsgrp_node;
293
294 /* elapse time is the hold time of expired node */
295 elapse = bsrp->rp_holdtime;
296 bsrp_addr = bsrp->rp_address;
297
298 /* update elapse for all bsrp nodes */
299 frr_each_safe (bsm_rpinfos, bsgrp_node->bsrp_list, bsrp_node) {
300 bsrp_node->elapse_time += elapse;
301
302 if (is_hold_time_elapsed(bsrp_node)) {
303 bsm_rpinfos_del(bsgrp_node->bsrp_list, bsrp_node);
304 pim_bsm_rpinfo_free(bsrp_node);
305 }
306 }
307
308 /* Get the next elected rp node */
309 bsrp = bsm_rpinfos_first(bsgrp_node->bsrp_list);
310 pim = bsgrp_node->scope->pim;
311 rn = route_node_lookup(pim->rp_table, &bsgrp_node->group);
312
313 if (!rn) {
314 zlog_warn("%s: Route node doesn't exist", __func__);
315 return;
316 }
317
318 rp_info = (struct rp_info *)rn->info;
319
320 if (!rp_info) {
321 route_unlock_node(rn);
322 return;
323 }
324
325 if (rp_info->rp_src != RP_SRC_STATIC) {
326 /* If new rp available, change it else delete the existing */
327 if (bsrp) {
328 pim_g2rp_timer_start(
329 bsrp, (bsrp->rp_holdtime - bsrp->elapse_time));
330 pim_rp_change(pim, bsrp->rp_address, bsgrp_node->group,
331 RP_SRC_BSR);
332 } else {
333 pim_rp_del(pim, bsrp_addr, bsgrp_node->group, NULL,
334 RP_SRC_BSR);
335 }
336 }
337
338 if (!bsm_rpinfos_count(bsgrp_node->bsrp_list)
339 && !bsm_rpinfos_count(bsgrp_node->partial_bsrp_list)) {
340 pim_free_bsgrp_node(pim->global_scope.bsrp_table,
341 &bsgrp_node->group);
342 pim_free_bsgrp_data(bsgrp_node);
343 }
344 }
345
346 static void pim_g2rp_timer_start(struct bsm_rpinfo *bsrp, int hold_time)
347 {
348 if (!bsrp) {
349 if (PIM_DEBUG_BSM)
350 zlog_debug("%s : Invalid brsp(NULL).", __func__);
351 return;
352 }
353 THREAD_OFF(bsrp->g2rp_timer);
354 if (PIM_DEBUG_BSM)
355 zlog_debug(
356 "%s : starting g2rp timer for grp: %pFX - rp: %pI4 with timeout %d secs(Actual Hold time : %d secs)",
357 __func__, &bsrp->bsgrp_node->group,
358 &bsrp->rp_address, hold_time,
359 bsrp->rp_holdtime);
360
361 thread_add_timer(router->master, pim_on_g2rp_timer, bsrp, hold_time,
362 &bsrp->g2rp_timer);
363 }
364
365 static inline void pim_g2rp_timer_restart(struct bsm_rpinfo *bsrp,
366 int hold_time)
367 {
368 pim_g2rp_timer_start(bsrp, hold_time);
369 }
370
371 static void pim_g2rp_timer_stop(struct bsm_rpinfo *bsrp)
372 {
373 if (!bsrp)
374 return;
375
376 if (PIM_DEBUG_BSM)
377 zlog_debug("%s : stopping g2rp timer for grp: %pFX - rp: %pI4",
378 __func__, &bsrp->bsgrp_node->group,
379 &bsrp->rp_address);
380
381 THREAD_OFF(bsrp->g2rp_timer);
382 }
383
384 static bool is_hold_time_zero(void *data)
385 {
386 struct bsm_rpinfo *bsrp;
387
388 bsrp = data;
389
390 if (bsrp->rp_holdtime)
391 return false;
392 else
393 return true;
394 }
395
396 static void pim_instate_pend_list(struct bsgrp_node *bsgrp_node)
397 {
398 struct bsm_rpinfo *active;
399 struct bsm_rpinfo *pend;
400 struct rp_info *rp_info;
401 struct route_node *rn;
402 struct pim_instance *pim;
403 struct rp_info *rp_all;
404 struct prefix group_all;
405 bool had_rp_node = true;
406
407 pim = bsgrp_node->scope->pim;
408 active = bsm_rpinfos_first(bsgrp_node->bsrp_list);
409
410 /* Remove nodes with hold time 0 & check if list still has a head */
411 frr_each_safe (bsm_rpinfos, bsgrp_node->partial_bsrp_list, pend) {
412 if (is_hold_time_zero(pend)) {
413 bsm_rpinfos_del(bsgrp_node->partial_bsrp_list, pend);
414 pim_bsm_rpinfo_free(pend);
415 }
416 }
417
418 pend = bsm_rpinfos_first(bsgrp_node->partial_bsrp_list);
419
420 if (!pim_get_all_mcast_group(&group_all))
421 return;
422
423 rp_all = pim_rp_find_match_group(pim, &group_all);
424 rn = route_node_lookup(pim->rp_table, &bsgrp_node->group);
425
426 if (pend)
427 pim_g2rp_timer_start(pend, pend->rp_holdtime);
428
429 /* if rp node doesn't exist or exist but not configured(rp_all),
430 * install the rp from head(if exists) of partial list. List is
431 * is sorted such that head is the elected RP for the group.
432 */
433 if (!rn || (prefix_same(&rp_all->group, &bsgrp_node->group) &&
434 pim_rpf_addr_is_inaddr_any(&rp_all->rp))) {
435 if (PIM_DEBUG_BSM)
436 zlog_debug("%s: Route node doesn't exist", __func__);
437 if (pend)
438 pim_rp_new(pim, pend->rp_address, bsgrp_node->group,
439 NULL, RP_SRC_BSR);
440 had_rp_node = false;
441 } else {
442 rp_info = (struct rp_info *)rn->info;
443 if (!rp_info) {
444 route_unlock_node(rn);
445 if (pend)
446 pim_rp_new(pim, pend->rp_address,
447 bsgrp_node->group, NULL, RP_SRC_BSR);
448 had_rp_node = false;
449 }
450 }
451
452 /* We didn't have rp node and pending list is empty(unlikely), cleanup*/
453 if ((!had_rp_node) && (!pend)) {
454 pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
455 &bsgrp_node->group);
456 pim_free_bsgrp_data(bsgrp_node);
457 return;
458 }
459
460 if ((had_rp_node) && (rp_info->rp_src != RP_SRC_STATIC)) {
461 /* This means we searched and got rp node, needs unlock */
462 route_unlock_node(rn);
463
464 if (active && pend) {
465 if ((active->rp_address.s_addr
466 != pend->rp_address.s_addr))
467 pim_rp_change(pim, pend->rp_address,
468 bsgrp_node->group, RP_SRC_BSR);
469 }
470
471 /* Possible when the first BSM has group with 0 rp count */
472 if ((!active) && (!pend)) {
473 if (PIM_DEBUG_BSM) {
474 zlog_debug(
475 "%s: Both bsrp and partial list are empty",
476 __func__);
477 }
478 pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
479 &bsgrp_node->group);
480 pim_free_bsgrp_data(bsgrp_node);
481 return;
482 }
483
484 /* Possible when a group with 0 rp count received in BSM */
485 if ((active) && (!pend)) {
486 pim_rp_del(pim, active->rp_address, bsgrp_node->group,
487 NULL, RP_SRC_BSR);
488 pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
489 &bsgrp_node->group);
490 if (PIM_DEBUG_BSM) {
491 zlog_debug("%s:Pend List is null,del grp node",
492 __func__);
493 }
494 pim_free_bsgrp_data(bsgrp_node);
495 return;
496 }
497 }
498
499 if ((had_rp_node) && (rp_info->rp_src == RP_SRC_STATIC)) {
500 /* We need to unlock rn this case */
501 route_unlock_node(rn);
502 /* there is a chance that static rp exist and bsrp cleaned
503 * so clean bsgrp node if pending list empty
504 */
505 if (!pend) {
506 if (PIM_DEBUG_BSM)
507 zlog_debug(
508 "%s: Partial list is empty, static rp exists",
509 __func__);
510 pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
511 &bsgrp_node->group);
512 pim_free_bsgrp_data(bsgrp_node);
513 return;
514 }
515 }
516
517 /* swap the list & delete all nodes in partial list (old bsrp_list)
518 * before swap
519 * active is head of bsrp list
520 * pend is head of partial list
521 * After swap
522 * active is head of partial list
523 * pend is head of bsrp list
524 * So check appriate head after swap and clean the new partial list
525 */
526 bsm_rpinfos_swap_all(bsgrp_node->bsrp_list,
527 bsgrp_node->partial_bsrp_list);
528
529 if (active)
530 pim_g2rp_timer_stop(active);
531 pim_bsm_rpinfos_free(bsgrp_node->partial_bsrp_list);
532 }
533
534 static bool is_preferred_bsr(struct pim_instance *pim, struct in_addr bsr,
535 uint32_t bsr_prio)
536 {
537 if (bsr.s_addr == pim->global_scope.current_bsr.s_addr)
538 return true;
539
540 if (bsr_prio > pim->global_scope.current_bsr_prio)
541 return true;
542
543 else if (bsr_prio == pim->global_scope.current_bsr_prio) {
544 if (ntohl(bsr.s_addr)
545 >= ntohl(pim->global_scope.current_bsr.s_addr))
546 return true;
547 else
548 return false;
549 } else
550 return false;
551 }
552
553 static void pim_bsm_update(struct pim_instance *pim, struct in_addr bsr,
554 uint32_t bsr_prio)
555 {
556 if (bsr.s_addr != pim->global_scope.current_bsr.s_addr) {
557 pim_nht_bsr_del(pim, pim->global_scope.current_bsr);
558 pim_nht_bsr_add(pim, bsr);
559
560 pim->global_scope.current_bsr = bsr;
561 pim->global_scope.current_bsr_first_ts =
562 pim_time_monotonic_sec();
563 pim->global_scope.state = ACCEPT_PREFERRED;
564 }
565 pim->global_scope.current_bsr_prio = bsr_prio;
566 pim->global_scope.current_bsr_last_ts = pim_time_monotonic_sec();
567 }
568
569 void pim_bsm_clear(struct pim_instance *pim)
570 {
571 struct route_node *rn;
572 struct route_node *rpnode;
573 struct bsgrp_node *bsgrp;
574 struct prefix nht_p;
575 struct prefix g_all;
576 struct rp_info *rp_all;
577 struct pim_upstream *up;
578 struct rp_info *rp_info;
579 bool upstream_updated = false;
580
581 pim_nht_bsr_del(pim, pim->global_scope.current_bsr);
582
583 /* Reset scope zone data */
584 pim->global_scope.accept_nofwd_bsm = false;
585 pim->global_scope.state = ACCEPT_ANY;
586 pim->global_scope.current_bsr.s_addr = INADDR_ANY;
587 pim->global_scope.current_bsr_prio = 0;
588 pim->global_scope.current_bsr_first_ts = 0;
589 pim->global_scope.current_bsr_last_ts = 0;
590 pim->global_scope.bsm_frag_tag = 0;
591 pim_bsm_frags_free(&pim->global_scope);
592
593 pim_bs_timer_stop(&pim->global_scope);
594
595 for (rn = route_top(pim->global_scope.bsrp_table); rn;
596 rn = route_next(rn)) {
597 bsgrp = rn->info;
598 if (!bsgrp)
599 continue;
600
601 rpnode = route_node_lookup(pim->rp_table, &bsgrp->group);
602
603 if (!rpnode) {
604 pim_free_bsgrp_node(bsgrp->scope->bsrp_table,
605 &bsgrp->group);
606 pim_free_bsgrp_data(bsgrp);
607 continue;
608 }
609
610 rp_info = (struct rp_info *)rpnode->info;
611
612 if ((!rp_info) || (rp_info->rp_src != RP_SRC_BSR)) {
613 pim_free_bsgrp_node(bsgrp->scope->bsrp_table,
614 &bsgrp->group);
615 pim_free_bsgrp_data(bsgrp);
616 continue;
617 }
618
619 /* Deregister addr with Zebra NHT */
620 nht_p.family = AF_INET;
621 nht_p.prefixlen = IPV4_MAX_BITLEN;
622 nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
623
624 if (PIM_DEBUG_PIM_NHT_RP) {
625 zlog_debug("%s: Deregister RP addr %pFX with Zebra ",
626 __func__, &nht_p);
627 }
628
629 pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info);
630
631 if (!pim_get_all_mcast_group(&g_all))
632 return;
633
634 rp_all = pim_rp_find_match_group(pim, &g_all);
635
636 if (rp_all == rp_info) {
637 pim_addr_to_prefix(&rp_all->rp.rpf_addr, PIMADDR_ANY);
638 rp_all->i_am_rp = 0;
639 } else {
640 /* Delete the rp_info from rp-list */
641 listnode_delete(pim->rp_list, rp_info);
642
643 /* Delete the rp node from rp_table */
644 rpnode->info = NULL;
645 route_unlock_node(rpnode);
646 route_unlock_node(rpnode);
647 XFREE(MTYPE_PIM_RP, rp_info);
648 }
649
650 pim_free_bsgrp_node(bsgrp->scope->bsrp_table, &bsgrp->group);
651 pim_free_bsgrp_data(bsgrp);
652 }
653 pim_rp_refresh_group_to_rp_mapping(pim);
654
655
656 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
657 /* Find the upstream (*, G) whose upstream address is same as
658 * the RP
659 */
660 if (!pim_addr_is_any(up->sg.src))
661 continue;
662
663 struct prefix grp;
664 struct rp_info *trp_info;
665
666 pim_addr_to_prefix(&grp, up->sg.grp);
667 trp_info = pim_rp_find_match_group(pim, &grp);
668
669 /* RP not found for the group grp */
670 if (pim_rpf_addr_is_inaddr_any(&trp_info->rp)) {
671 pim_upstream_rpf_clear(pim, up);
672 pim_rp_set_upstream_addr(pim, &up->upstream_addr,
673 up->sg.src, up->sg.grp);
674 } else {
675 /* RP found for the group grp */
676 pim_upstream_update(pim, up);
677 upstream_updated = true;
678 }
679 }
680
681 if (upstream_updated)
682 pim_zebra_update_all_interfaces(pim);
683 }
684
685 static bool pim_bsm_send_intf(uint8_t *buf, int len, struct interface *ifp,
686 pim_addr dst_addr)
687 {
688 struct pim_interface *pim_ifp;
689
690 pim_ifp = ifp->info;
691
692 if (!pim_ifp) {
693 if (PIM_DEBUG_BSM)
694 zlog_debug("%s: Pim interface not available for %s",
695 __func__, ifp->name);
696 return false;
697 }
698
699 if (pim_ifp->pim_sock_fd == -1) {
700 if (PIM_DEBUG_BSM)
701 zlog_debug("%s: Pim sock not available for %s",
702 __func__, ifp->name);
703 return false;
704 }
705
706 if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
707 dst_addr, buf, len, ifp)) {
708 zlog_warn("%s: Could not send BSM message on interface: %s",
709 __func__, ifp->name);
710 return false;
711 }
712
713 if (!pim_ifp->pim_passive_enable)
714 pim_ifp->pim_ifstat_bsm_tx++;
715
716 pim_ifp->pim->bsm_sent++;
717 return true;
718 }
719
720 static bool pim_bsm_frag_send(uint8_t *buf, uint32_t len, struct interface *ifp,
721 uint32_t pim_mtu, pim_addr dst_addr, bool no_fwd)
722 {
723 struct pim_interface *pim_ifp = ifp->info;
724 struct bsmmsg_grpinfo *grpinfo, *curgrp;
725 uint8_t *firstgrp_ptr;
726 uint8_t *pkt;
727 uint8_t *pak_start;
728 uint32_t parsed_len = 0;
729 uint32_t this_pkt_rem;
730 uint32_t copy_byte_count;
731 uint32_t this_pkt_len;
732 uint8_t total_rp_cnt;
733 uint8_t this_rp_cnt;
734 uint8_t frag_rp_cnt;
735 uint8_t rp_fit_cnt;
736 bool pak_pending = false;
737
738 /* MTU passed here is PIM MTU (IP MTU less IP Hdr) */
739 if (pim_mtu < (PIM_MIN_BSM_LEN)) {
740 zlog_warn(
741 "%s: mtu(pim mtu: %d) size less than minimum bootstrap len",
742 __func__, pim_mtu);
743 if (PIM_DEBUG_BSM)
744 zlog_debug(
745 "%s: mtu (pim mtu:%d) less than minimum bootstrap len",
746 __func__, pim_mtu);
747 return false;
748 }
749
750 pak_start = XCALLOC(MTYPE_PIM_BSM_PKT_VAR_MEM, pim_mtu);
751
752 pkt = pak_start;
753
754 /* Fill PIM header later before sending packet to calc checksum */
755 pkt += PIM_MSG_HEADER_LEN;
756 buf += PIM_MSG_HEADER_LEN;
757
758 /* copy bsm header to new packet at offset of pim hdr */
759 memcpy(pkt, buf, PIM_BSM_HDR_LEN);
760 pkt += PIM_BSM_HDR_LEN;
761 buf += PIM_BSM_HDR_LEN;
762 parsed_len += (PIM_MSG_HEADER_LEN + PIM_BSM_HDR_LEN);
763
764 /* Store the position of first grp ptr, which can be reused for
765 * next packet to start filling group. old bsm header and pim hdr
766 * remains. So need not be filled again for next packet onwards.
767 */
768 firstgrp_ptr = pkt;
769
770 /* we received mtu excluding IP hdr len as param
771 * now this_pkt_rem is mtu excluding
772 * PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN
773 */
774 this_pkt_rem = pim_mtu - (PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN);
775
776 /* For each group till the packet length parsed */
777 while (parsed_len < len) {
778 /* pkt ---> fragment's current pointer
779 * buf ---> input buffer's current pointer
780 * mtu ---> size of the pim packet - PIM header
781 * curgrp ---> current group on the fragment
782 * grpinfo ---> current group on the input buffer
783 * this_pkt_rem ---> bytes remaing on the current fragment
784 * rp_fit_cnt ---> num of rp for current grp that
785 * fits this frag
786 * total_rp_cnt ---> total rp present for the group in the buf
787 * frag_rp_cnt ---> no of rp for the group to be fit in
788 * the frag
789 * this_rp_cnt ---> how many rp have we parsed
790 */
791 grpinfo = (struct bsmmsg_grpinfo *)buf;
792 memcpy(pkt, buf, PIM_BSM_GRP_LEN);
793 curgrp = (struct bsmmsg_grpinfo *)pkt;
794 parsed_len += PIM_BSM_GRP_LEN;
795 pkt += PIM_BSM_GRP_LEN;
796 buf += PIM_BSM_GRP_LEN;
797 this_pkt_rem -= PIM_BSM_GRP_LEN;
798
799 /* initialize rp count and total_rp_cnt before the rp loop */
800 this_rp_cnt = 0;
801 total_rp_cnt = grpinfo->frag_rp_count;
802
803 /* Loop till all RPs for the group parsed */
804 while (this_rp_cnt < total_rp_cnt) {
805 /* All RP from a group processed here.
806 * group is pointed by grpinfo.
807 * At this point make sure buf pointing to a RP
808 * within a group
809 */
810 rp_fit_cnt = this_pkt_rem / PIM_BSM_RP_LEN;
811
812 /* calculate how many rp am i going to copy in
813 * this frag
814 */
815 if (rp_fit_cnt > (total_rp_cnt - this_rp_cnt))
816 frag_rp_cnt = total_rp_cnt - this_rp_cnt;
817 else
818 frag_rp_cnt = rp_fit_cnt;
819
820 /* populate the frag rp count for the current grp */
821 curgrp->frag_rp_count = frag_rp_cnt;
822 copy_byte_count = frag_rp_cnt * PIM_BSM_RP_LEN;
823
824 /* copy all the rp that we are fitting in this
825 * frag for the grp
826 */
827 memcpy(pkt, buf, copy_byte_count);
828 this_rp_cnt += frag_rp_cnt;
829 buf += copy_byte_count;
830 pkt += copy_byte_count;
831 parsed_len += copy_byte_count;
832 this_pkt_rem -= copy_byte_count;
833
834 /* Either we couldn't fit all rp for the group or the
835 * mtu reached
836 */
837 if ((this_rp_cnt < total_rp_cnt)
838 || (this_pkt_rem
839 < (PIM_BSM_GRP_LEN + PIM_BSM_RP_LEN))) {
840 /* No space to fit in more rp, send this pkt */
841 this_pkt_len = pim_mtu - this_pkt_rem;
842 pim_msg_build_header(
843 pim_ifp->primary_address, dst_addr,
844 pak_start, this_pkt_len,
845 PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
846 pim_bsm_send_intf(pak_start, this_pkt_len, ifp,
847 dst_addr);
848
849 /* Construct next fragment. Reuse old packet */
850 pkt = firstgrp_ptr;
851 this_pkt_rem = pim_mtu - (PIM_BSM_HDR_LEN
852 + PIM_MSG_HEADER_LEN);
853
854 /* If pkt can't accommodate next group + at
855 * least one rp, we must break out of this inner
856 * loop and process next RP
857 */
858 if (total_rp_cnt == this_rp_cnt)
859 break;
860
861 /* If some more RPs for the same group pending,
862 * fill grp hdr
863 */
864 memcpy(pkt, (uint8_t *)grpinfo,
865 PIM_BSM_GRP_LEN);
866 curgrp = (struct bsmmsg_grpinfo *)pkt;
867 pkt += PIM_BSM_GRP_LEN;
868 this_pkt_rem -= PIM_BSM_GRP_LEN;
869 pak_pending = false;
870 } else {
871 /* We filled something but not yet sent out */
872 pak_pending = true;
873 }
874 } /* while RP count */
875 } /*while parsed len */
876
877 /* Send if we have any unsent packet */
878 if (pak_pending) {
879 this_pkt_len = pim_mtu - this_pkt_rem;
880 pim_msg_build_header(pim_ifp->primary_address, dst_addr,
881 pak_start, this_pkt_len,
882 PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
883 pim_bsm_send_intf(pak_start, (pim_mtu - this_pkt_rem), ifp,
884 dst_addr);
885 }
886 XFREE(MTYPE_PIM_BSM_PKT_VAR_MEM, pak_start);
887 return true;
888 }
889
890 static void pim_bsm_fwd_whole_sz(struct pim_instance *pim, uint8_t *buf,
891 uint32_t len, int sz)
892 {
893 struct interface *ifp;
894 struct pim_interface *pim_ifp;
895 pim_addr dst_addr;
896 uint32_t pim_mtu;
897 bool no_fwd = false;
898 bool ret = false;
899
900 /* For now only global scope zone is supported, so send on all
901 * pim interfaces in the vrf
902 */
903 dst_addr = qpim_all_pim_routers_addr;
904 FOR_ALL_INTERFACES (pim->vrf, ifp) {
905 pim_ifp = ifp->info;
906 if ((!pim_ifp) || (!pim_ifp->bsm_enable))
907 continue;
908
909 /*
910 * RFC 5059 Sec 3.4:
911 * When a Bootstrap message is forwarded, it is forwarded out
912 * of every multicast-capable interface that has PIM neighbors.
913 *
914 * So skipping pim interfaces with no neighbors.
915 */
916 if (listcount(pim_ifp->pim_neighbor_list) == 0)
917 continue;
918
919 pim_hello_require(ifp);
920 pim_mtu = ifp->mtu - MAX_IP_HDR_LEN;
921 if (pim_mtu < len) {
922 ret = pim_bsm_frag_send(buf, len, ifp, pim_mtu,
923 dst_addr, no_fwd);
924 if (PIM_DEBUG_BSM)
925 zlog_debug("%s: pim_bsm_frag_send returned %s",
926 __func__, ret ? "TRUE" : "FALSE");
927 } else {
928 pim_msg_build_header(pim_ifp->primary_address, dst_addr,
929 buf, len, PIM_MSG_TYPE_BOOTSTRAP,
930 no_fwd);
931 if (!pim_bsm_send_intf(buf, len, ifp, dst_addr)) {
932 if (PIM_DEBUG_BSM)
933 zlog_debug(
934 "%s: pim_bsm_send_intf returned false",
935 __func__);
936 }
937 }
938 }
939 }
940
941 bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)
942 {
943 pim_addr dst_addr;
944 struct pim_interface *pim_ifp;
945 struct bsm_scope *scope;
946 struct bsm_frag *bsfrag;
947 char neigh_src_str[INET_ADDRSTRLEN];
948 uint32_t pim_mtu;
949 bool no_fwd = true;
950 bool ret = false;
951
952 if (PIM_DEBUG_BSM)
953 zlog_debug("%s: New neighbor %pPA seen on %s", __func__,
954 &neigh->source_addr, ifp->name);
955
956 pim_ifp = ifp->info;
957
958 /* DR only forwards BSM packet */
959 if (!pim_addr_cmp(pim_ifp->pim_dr_addr, pim_ifp->primary_address)) {
960 if (PIM_DEBUG_BSM)
961 zlog_debug(
962 "%s: It is not DR, so don't forward BSM packet",
963 __func__);
964 }
965
966 if (!pim_ifp->bsm_enable) {
967 if (PIM_DEBUG_BSM)
968 zlog_debug("%s: BSM proc not enabled on %s", __func__,
969 ifp->name);
970 return ret;
971 }
972
973 scope = &pim_ifp->pim->global_scope;
974
975 if (!bsm_frags_count(scope->bsm_frags)) {
976 if (PIM_DEBUG_BSM)
977 zlog_debug("%s: BSM list for the scope is empty",
978 __func__);
979 return ret;
980 }
981
982 if (!pim_ifp->ucast_bsm_accept) {
983 dst_addr = qpim_all_pim_routers_addr;
984 if (PIM_DEBUG_BSM)
985 zlog_debug("%s: Sending BSM mcast to %s", __func__,
986 neigh_src_str);
987 } else {
988 dst_addr = neigh->source_addr;
989 if (PIM_DEBUG_BSM)
990 zlog_debug("%s: Sending BSM ucast to %s", __func__,
991 neigh_src_str);
992 }
993 pim_mtu = ifp->mtu - MAX_IP_HDR_LEN;
994 pim_hello_require(ifp);
995
996 frr_each (bsm_frags, scope->bsm_frags, bsfrag) {
997 if (pim_mtu < bsfrag->size) {
998 ret = pim_bsm_frag_send(bsfrag->data, bsfrag->size, ifp,
999 pim_mtu, dst_addr, no_fwd);
1000 if (!ret) {
1001 if (PIM_DEBUG_BSM)
1002 zlog_debug(
1003 "%s: pim_bsm_frag_send failed",
1004 __func__);
1005 }
1006 } else {
1007 /* Pim header needs to be constructed */
1008 pim_msg_build_header(pim_ifp->primary_address, dst_addr,
1009 bsfrag->data, bsfrag->size,
1010 PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
1011 ret = pim_bsm_send_intf(bsfrag->data, bsfrag->size, ifp,
1012 dst_addr);
1013 if (!ret) {
1014 if (PIM_DEBUG_BSM)
1015 zlog_debug(
1016 "%s: pim_bsm_frag_send failed",
1017 __func__);
1018 }
1019 }
1020 }
1021 return ret;
1022 }
1023
1024 struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
1025 struct prefix *grp)
1026 {
1027 struct route_node *rn;
1028 struct bsgrp_node *bsgrp;
1029
1030 rn = route_node_lookup(scope->bsrp_table, grp);
1031 if (!rn) {
1032 if (PIM_DEBUG_BSM)
1033 zlog_debug("%s: Route node doesn't exist for the group",
1034 __func__);
1035 return NULL;
1036 }
1037 bsgrp = rn->info;
1038 route_unlock_node(rn);
1039
1040 return bsgrp;
1041 }
1042
1043 static uint32_t hash_calc_on_grp_rp(struct prefix group, struct in_addr rp,
1044 uint8_t hashmasklen)
1045 {
1046 uint64_t temp;
1047 uint32_t hash;
1048 uint32_t grpaddr;
1049 uint32_t rp_add;
1050 uint32_t mask = 0xffffffff;
1051
1052 /* mask to be made zero if hashmasklen is 0 because mask << 32
1053 * may not give 0. hashmasklen can be 0 to 32.
1054 */
1055 if (hashmasklen == 0)
1056 mask = 0;
1057
1058 /* in_addr stores ip in big endian, hence network byte order
1059 * convert to uint32 before processing hash
1060 */
1061 grpaddr = ntohl(group.u.prefix4.s_addr);
1062 /* Avoid shifting by 32 bit on a 32 bit register */
1063 if (hashmasklen)
1064 grpaddr = grpaddr & ((mask << (32 - hashmasklen)));
1065 else
1066 grpaddr = grpaddr & mask;
1067 rp_add = ntohl(rp.s_addr);
1068 temp = 1103515245 * ((1103515245 * (uint64_t)grpaddr + 12345) ^ rp_add)
1069 + 12345;
1070 hash = temp & (0x7fffffff);
1071 return hash;
1072 }
1073
1074 static bool pim_install_bsm_grp_rp(struct pim_instance *pim,
1075 struct bsgrp_node *grpnode,
1076 struct bsmmsg_rpinfo *rp)
1077 {
1078 struct bsm_rpinfo *bsm_rpinfo;
1079 uint8_t hashMask_len = pim->global_scope.hashMasklen;
1080
1081 /*memory allocation for bsm_rpinfo */
1082 bsm_rpinfo = XCALLOC(MTYPE_PIM_BSRP_INFO, sizeof(*bsm_rpinfo));
1083
1084 bsm_rpinfo->rp_prio = rp->rp_pri;
1085 bsm_rpinfo->rp_holdtime = rp->rp_holdtime;
1086 memcpy(&bsm_rpinfo->rp_address, &rp->rpaddr.addr,
1087 sizeof(struct in_addr));
1088 bsm_rpinfo->elapse_time = 0;
1089
1090 /* Back pointer to the group node. */
1091 bsm_rpinfo->bsgrp_node = grpnode;
1092
1093 /* update hash for this rp node */
1094 bsm_rpinfo->hash = hash_calc_on_grp_rp(grpnode->group, rp->rpaddr.addr,
1095 hashMask_len);
1096 if (bsm_rpinfos_add(grpnode->partial_bsrp_list, bsm_rpinfo) == NULL) {
1097 if (PIM_DEBUG_BSM)
1098 zlog_debug(
1099 "%s, bs_rpinfo node added to the partial bs_rplist.",
1100 __func__);
1101 return true;
1102 }
1103
1104 if (PIM_DEBUG_BSM)
1105 zlog_debug("%s: list node not added", __func__);
1106
1107 XFREE(MTYPE_PIM_BSRP_INFO, bsm_rpinfo);
1108 return false;
1109 }
1110
1111 static void pim_update_pending_rp_cnt(struct bsm_scope *sz,
1112 struct bsgrp_node *bsgrp,
1113 uint16_t bsm_frag_tag,
1114 uint32_t total_rp_count)
1115 {
1116 if (bsgrp->pend_rp_cnt) {
1117 /* received bsm is different packet ,
1118 * it is not same fragment.
1119 */
1120 if (bsm_frag_tag != bsgrp->frag_tag) {
1121 if (PIM_DEBUG_BSM)
1122 zlog_debug(
1123 "%s,Received a new BSM ,so clear the pending bs_rpinfo list.",
1124 __func__);
1125 pim_bsm_rpinfos_free(bsgrp->partial_bsrp_list);
1126 bsgrp->pend_rp_cnt = total_rp_count;
1127 }
1128 } else
1129 bsgrp->pend_rp_cnt = total_rp_count;
1130
1131 bsgrp->frag_tag = bsm_frag_tag;
1132 }
1133
1134 /* Parsing BSR packet and adding to partial list of corresponding bsgrp node */
1135 static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,
1136 int buflen, uint16_t bsm_frag_tag)
1137 {
1138 struct bsmmsg_grpinfo grpinfo;
1139 struct bsmmsg_rpinfo rpinfo;
1140 struct prefix group;
1141 struct bsgrp_node *bsgrp = NULL;
1142 int frag_rp_cnt = 0;
1143 int offset = 0;
1144 int ins_count = 0;
1145
1146 while (buflen > offset) {
1147 if (offset + (int)sizeof(struct bsmmsg_grpinfo) > buflen) {
1148 if (PIM_DEBUG_BSM)
1149 zlog_debug(
1150 "%s: buflen received %d is less than the internal data structure of the packet would suggest",
1151 __func__, buflen);
1152 return false;
1153 }
1154 /* Extract Group tlv from BSM */
1155 memcpy(&grpinfo, buf, sizeof(struct bsmmsg_grpinfo));
1156
1157 if (PIM_DEBUG_BSM) {
1158 char grp_str[INET_ADDRSTRLEN];
1159
1160 pim_inet4_dump("<Group?>", grpinfo.group.addr, grp_str,
1161 sizeof(grp_str));
1162 zlog_debug(
1163 "%s, Group %s Rpcount:%d Fragment-Rp-count:%d",
1164 __func__, grp_str, grpinfo.rp_count,
1165 grpinfo.frag_rp_count);
1166 }
1167
1168 buf += sizeof(struct bsmmsg_grpinfo);
1169 offset += sizeof(struct bsmmsg_grpinfo);
1170
1171 group.family = AF_INET;
1172 if (grpinfo.group.mask > IPV4_MAX_BITLEN) {
1173 if (PIM_DEBUG_BSM)
1174 zlog_debug(
1175 "%s, v4 prefix length specified: %d is too long",
1176 __func__, grpinfo.group.mask);
1177 return false;
1178 }
1179 group.prefixlen = grpinfo.group.mask;
1180 group.u.prefix4.s_addr = grpinfo.group.addr.s_addr;
1181
1182 /* Get the Group node for the BSM rp table */
1183 bsgrp = pim_bsm_get_bsgrp_node(scope, &group);
1184
1185 if (grpinfo.rp_count == 0) {
1186 struct bsm_rpinfo *old_rpinfo;
1187
1188 /* BSR explicitly no longer has RPs for this group */
1189 if (!bsgrp)
1190 continue;
1191
1192 if (PIM_DEBUG_BSM) {
1193 char grp_str[INET_ADDRSTRLEN];
1194
1195 pim_inet4_dump("<Group?>", grpinfo.group.addr,
1196 grp_str, sizeof(grp_str));
1197 zlog_debug("%s, Rp count is zero for group: %s",
1198 __func__, grp_str);
1199 }
1200
1201 old_rpinfo = bsm_rpinfos_first(bsgrp->bsrp_list);
1202 if (old_rpinfo)
1203 pim_rp_del(scope->pim, old_rpinfo->rp_address,
1204 group, NULL, RP_SRC_BSR);
1205
1206 pim_free_bsgrp_node(scope->bsrp_table, &bsgrp->group);
1207 pim_free_bsgrp_data(bsgrp);
1208 continue;
1209 }
1210
1211 if (!bsgrp) {
1212 if (PIM_DEBUG_BSM)
1213 zlog_debug("%s, Create new BSM Group node.",
1214 __func__);
1215
1216 /* create a new node to be added to the tree. */
1217 bsgrp = pim_bsm_new_bsgrp_node(scope->bsrp_table,
1218 &group);
1219
1220 if (!bsgrp) {
1221 zlog_debug(
1222 "%s, Failed to get the BSM group node.",
1223 __func__);
1224 continue;
1225 }
1226
1227 bsgrp->scope = scope;
1228 }
1229
1230 pim_update_pending_rp_cnt(scope, bsgrp, bsm_frag_tag,
1231 grpinfo.rp_count);
1232 frag_rp_cnt = grpinfo.frag_rp_count;
1233 ins_count = 0;
1234
1235 while (frag_rp_cnt--) {
1236 if (offset + (int)sizeof(struct bsmmsg_rpinfo)
1237 > buflen) {
1238 if (PIM_DEBUG_BSM)
1239 zlog_debug(
1240 "%s, buflen received: %u is less than the internal data structure of the packet would suggest",
1241 __func__, buflen);
1242 return false;
1243 }
1244
1245 /* Extract RP address tlv from BSM */
1246 memcpy(&rpinfo, buf, sizeof(struct bsmmsg_rpinfo));
1247 rpinfo.rp_holdtime = ntohs(rpinfo.rp_holdtime);
1248 buf += sizeof(struct bsmmsg_rpinfo);
1249 offset += sizeof(struct bsmmsg_rpinfo);
1250
1251 if (PIM_DEBUG_BSM) {
1252 char rp_str[INET_ADDRSTRLEN];
1253
1254 pim_inet4_dump("<Rpaddr?>", rpinfo.rpaddr.addr,
1255 rp_str, sizeof(rp_str));
1256 zlog_debug(
1257 "%s, Rp address - %s; pri:%d hold:%d",
1258 __func__, rp_str, rpinfo.rp_pri,
1259 rpinfo.rp_holdtime);
1260 }
1261
1262 /* Call Install api to update grp-rp mappings */
1263 if (pim_install_bsm_grp_rp(scope->pim, bsgrp, &rpinfo))
1264 ins_count++;
1265 }
1266
1267 bsgrp->pend_rp_cnt -= ins_count;
1268
1269 if (!bsgrp->pend_rp_cnt) {
1270 if (PIM_DEBUG_BSM)
1271 zlog_debug(
1272 "%s, Recvd all the rps for this group, so bsrp list with penidng rp list.",
1273 __func__);
1274 /* replace the bsrp_list with pending list */
1275 pim_instate_pend_list(bsgrp);
1276 }
1277 }
1278 return true;
1279 }
1280
1281 int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
1282 uint32_t buf_size, bool no_fwd)
1283 {
1284 struct bsm_hdr *bshdr;
1285 int sz = PIM_GBL_SZ_ID;
1286 struct bsmmsg_grpinfo *msg_grp;
1287 struct pim_interface *pim_ifp = NULL;
1288 struct bsm_frag *bsfrag;
1289 struct pim_instance *pim;
1290 char bsr_str[INET_ADDRSTRLEN];
1291 uint16_t frag_tag;
1292 bool empty_bsm = false;
1293
1294 /* BSM Packet acceptance validation */
1295 pim_ifp = ifp->info;
1296 if (!pim_ifp) {
1297 if (PIM_DEBUG_BSM)
1298 zlog_debug("%s: multicast not enabled on interface %s",
1299 __func__, ifp->name);
1300 return -1;
1301 }
1302
1303 if (pim_ifp->pim_passive_enable) {
1304 if (PIM_DEBUG_PIM_PACKETS)
1305 zlog_debug(
1306 "skip receiving PIM message on passive interface %s",
1307 ifp->name);
1308 return 0;
1309 }
1310
1311 pim_ifp->pim_ifstat_bsm_rx++;
1312 pim = pim_ifp->pim;
1313 pim->bsm_rcvd++;
1314
1315 /* Drop if bsm processing is disabled on interface */
1316 if (!pim_ifp->bsm_enable) {
1317 zlog_warn("%s: BSM not enabled on interface %s", __func__,
1318 ifp->name);
1319 pim_ifp->pim_ifstat_bsm_cfg_miss++;
1320 pim->bsm_dropped++;
1321 return -1;
1322 }
1323
1324 if (buf_size < (PIM_MSG_HEADER_LEN + sizeof(struct bsm_hdr))) {
1325 if (PIM_DEBUG_BSM)
1326 zlog_debug(
1327 "%s: received buffer length of %d which is too small to properly decode",
1328 __func__, buf_size);
1329 return -1;
1330 }
1331
1332 bshdr = (struct bsm_hdr *)(buf + PIM_MSG_HEADER_LEN);
1333 pim_inet4_dump("<bsr?>", bshdr->bsr_addr.addr, bsr_str,
1334 sizeof(bsr_str));
1335 if (bshdr->hm_len > IPV4_MAX_BITLEN) {
1336 zlog_warn("Bad hashmask length for IPv4; got %hhu, expected value in range 0-32",
1337 bshdr->hm_len);
1338 pim->bsm_dropped++;
1339 return -1;
1340 }
1341 pim->global_scope.hashMasklen = bshdr->hm_len;
1342 frag_tag = ntohs(bshdr->frag_tag);
1343
1344 /* Identify empty BSM */
1345 if ((buf_size - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN) < PIM_BSM_GRP_LEN)
1346 empty_bsm = true;
1347
1348 if (!empty_bsm) {
1349 msg_grp = (struct bsmmsg_grpinfo *)(buf + PIM_MSG_HEADER_LEN
1350 + PIM_BSM_HDR_LEN);
1351 /* Currently we don't support scope zoned BSM */
1352 if (msg_grp->group.sz) {
1353 if (PIM_DEBUG_BSM)
1354 zlog_debug(
1355 "%s : Administratively scoped range BSM received",
1356 __func__);
1357 pim_ifp->pim_ifstat_bsm_invalid_sz++;
1358 pim->bsm_dropped++;
1359 return -1;
1360 }
1361 }
1362
1363 /* Drop if bsr is not preferred bsr */
1364 if (!is_preferred_bsr(pim, bshdr->bsr_addr.addr, bshdr->bsr_prio)) {
1365 if (PIM_DEBUG_BSM)
1366 zlog_debug("%s : Received a non-preferred BSM",
1367 __func__);
1368 pim->bsm_dropped++;
1369 return -1;
1370 }
1371
1372 if (no_fwd) {
1373 /* only accept no-forward BSM if quick refresh on startup */
1374 if ((pim->global_scope.accept_nofwd_bsm)
1375 || (frag_tag == pim->global_scope.bsm_frag_tag)) {
1376 pim->global_scope.accept_nofwd_bsm = false;
1377 } else {
1378 if (PIM_DEBUG_BSM)
1379 zlog_debug(
1380 "%s : nofwd_bsm received on %s when accpt_nofwd_bsm false",
1381 __func__, bsr_str);
1382 pim->bsm_dropped++;
1383 pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++;
1384 return -1;
1385 }
1386 }
1387
1388 #if PIM_IPV == 4
1389 if (!pim_addr_cmp(sg->grp, qpim_all_pim_routers_addr))
1390 #else
1391 if (0)
1392 #endif
1393 {
1394 /* Multicast BSMs are only accepted if source interface & IP
1395 * match RPF towards the BSR's IP address, or they have
1396 * no-forward set
1397 */
1398 if (!no_fwd && !pim_nht_bsr_rpf_check(pim, bshdr->bsr_addr.addr,
1399 ifp, sg->src)) {
1400 if (PIM_DEBUG_BSM)
1401 zlog_debug(
1402 "BSM check: RPF to BSR %s is not %pPA%%%s",
1403 bsr_str, &sg->src, ifp->name);
1404 pim->bsm_dropped++;
1405 return -1;
1406 }
1407 } else if (if_address_is_local(&sg->grp, PIM_AF, pim->vrf->vrf_id)) {
1408 /* Unicast BSM received - if ucast bsm not enabled on
1409 * the interface, drop it
1410 */
1411 if (!pim_ifp->ucast_bsm_accept) {
1412 if (PIM_DEBUG_BSM)
1413 zlog_debug(
1414 "%s : Unicast BSM not enabled on interface %s",
1415 __func__, ifp->name);
1416 pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++;
1417 pim->bsm_dropped++;
1418 return -1;
1419 }
1420
1421 } else {
1422 if (PIM_DEBUG_BSM)
1423 zlog_debug("%s : Invalid destination address",
1424 __func__);
1425 pim->bsm_dropped++;
1426 return -1;
1427 }
1428
1429 if (empty_bsm) {
1430 if (PIM_DEBUG_BSM)
1431 zlog_debug("%s : Empty Pref BSM received", __func__);
1432 }
1433 /* Parse Update bsm rp table and install/uninstall rp if required */
1434 if (!pim_bsm_parse_install_g2rp(
1435 &pim_ifp->pim->global_scope,
1436 (buf + PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN),
1437 (buf_size - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN),
1438 frag_tag)) {
1439 if (PIM_DEBUG_BSM) {
1440 zlog_debug("%s, Parsing BSM failed.", __func__);
1441 }
1442 pim->bsm_dropped++;
1443 return -1;
1444 }
1445 /* Restart the bootstrap timer */
1446 pim_bs_timer_restart(&pim_ifp->pim->global_scope,
1447 PIM_BSR_DEFAULT_TIMEOUT);
1448
1449 /* If new BSM received, clear the old bsm database */
1450 if (pim_ifp->pim->global_scope.bsm_frag_tag != frag_tag) {
1451 if (PIM_DEBUG_BSM) {
1452 zlog_debug("%s: Current frag tag: %d Frag teg rcvd: %d",
1453 __func__,
1454 pim_ifp->pim->global_scope.bsm_frag_tag,
1455 frag_tag);
1456 }
1457 pim_bsm_frags_free(&pim_ifp->pim->global_scope);
1458 pim_ifp->pim->global_scope.bsm_frag_tag = frag_tag;
1459 }
1460
1461 /* update the scope information from bsm */
1462 pim_bsm_update(pim, bshdr->bsr_addr.addr, bshdr->bsr_prio);
1463
1464 if (!no_fwd) {
1465 pim_bsm_fwd_whole_sz(pim_ifp->pim, buf, buf_size, sz);
1466 bsfrag = XCALLOC(MTYPE_PIM_BSM_FRAG,
1467 sizeof(struct bsm_frag) + buf_size);
1468
1469 bsfrag->size = buf_size;
1470 memcpy(bsfrag->data, buf, buf_size);
1471 bsm_frags_add_tail(pim_ifp->pim->global_scope.bsm_frags,
1472 bsfrag);
1473 }
1474
1475 return 0;
1476 }