3 * Copyright (C) 2015 Cumulus Networks, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
34 #include "lib_errors.h"
37 #include "pim_instance.h"
40 #include "pim_iface.h"
44 #include "pim_memory.h"
45 #include "pim_neighbor.h"
48 #include "pim_mroute.h"
50 #include "pim_zebra.h"
54 #include "termtable.h"
56 /* Cleanup pim->rpf_hash each node data */
57 void pim_rp_list_hash_clean(void *data
)
59 struct pim_nexthop_cache
*pnc
= (struct pim_nexthop_cache
*)data
;
61 list_delete(&pnc
->rp_list
);
63 hash_clean(pnc
->upstream_hash
, NULL
);
64 hash_free(pnc
->upstream_hash
);
65 pnc
->upstream_hash
= NULL
;
67 nexthops_free(pnc
->nexthop
);
69 XFREE(MTYPE_PIM_NEXTHOP_CACHE
, pnc
);
72 static void pim_rp_info_free(struct rp_info
*rp_info
)
74 XFREE(MTYPE_PIM_FILTER_NAME
, rp_info
->plist
);
76 XFREE(MTYPE_PIM_RP
, rp_info
);
79 int pim_rp_list_cmp(void *v1
, void *v2
)
81 struct rp_info
*rp1
= (struct rp_info
*)v1
;
82 struct rp_info
*rp2
= (struct rp_info
*)v2
;
86 * Sort by RP IP address
88 ret
= pim_addr_cmp(rp1
->rp
.rpf_addr
, rp2
->rp
.rpf_addr
);
93 * Sort by group IP address
95 ret
= prefix_cmp(&rp1
->group
, &rp2
->group
);
102 void pim_rp_init(struct pim_instance
*pim
)
104 struct rp_info
*rp_info
;
105 struct route_node
*rn
;
107 pim
->rp_list
= list_new();
108 pim
->rp_list
->del
= (void (*)(void *))pim_rp_info_free
;
109 pim
->rp_list
->cmp
= pim_rp_list_cmp
;
111 pim
->rp_table
= route_table_init();
113 rp_info
= XCALLOC(MTYPE_PIM_RP
, sizeof(*rp_info
));
115 if (!pim_get_all_mcast_group(&rp_info
->group
)) {
116 flog_err(EC_LIB_DEVELOPMENT
,
117 "Unable to convert all-multicast prefix");
118 list_delete(&pim
->rp_list
);
119 route_table_finish(pim
->rp_table
);
120 XFREE(MTYPE_PIM_RP
, rp_info
);
123 rp_info
->rp
.rpf_addr
= PIMADDR_ANY
;
125 listnode_add(pim
->rp_list
, rp_info
);
127 rn
= route_node_get(pim
->rp_table
, &rp_info
->group
);
129 if (PIM_DEBUG_PIM_TRACE
)
130 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn
,
131 rp_info
, &rp_info
->group
,
132 route_node_get_lock_count(rn
));
135 void pim_rp_free(struct pim_instance
*pim
)
138 route_table_finish(pim
->rp_table
);
139 pim
->rp_table
= NULL
;
142 list_delete(&pim
->rp_list
);
146 * Given an RP's prefix-list, return the RP's rp_info for that prefix-list
148 static struct rp_info
*pim_rp_find_prefix_list(struct pim_instance
*pim
,
149 pim_addr rp
, const char *plist
)
151 struct listnode
*node
;
152 struct rp_info
*rp_info
;
154 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
155 if ((!pim_addr_cmp(rp
, rp_info
->rp
.rpf_addr
)) &&
156 rp_info
->plist
&& strcmp(rp_info
->plist
, plist
) == 0) {
165 * Return true if plist is used by any rp_info
167 static int pim_rp_prefix_list_used(struct pim_instance
*pim
, const char *plist
)
169 struct listnode
*node
;
170 struct rp_info
*rp_info
;
172 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
173 if (rp_info
->plist
&& strcmp(rp_info
->plist
, plist
) == 0) {
182 * Given an RP's address, return the RP's rp_info that is an exact match for
185 static struct rp_info
*pim_rp_find_exact(struct pim_instance
*pim
, pim_addr rp
,
186 const struct prefix
*group
)
188 struct listnode
*node
;
189 struct rp_info
*rp_info
;
191 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
192 if ((!pim_addr_cmp(rp
, rp_info
->rp
.rpf_addr
)) &&
193 prefix_same(&rp_info
->group
, group
))
201 * XXX: long-term issue: we don't actually have a good "ip address-list"
202 * implementation. ("access-list XYZ" is the closest but honestly it's
205 * So it's using a prefix-list to match an address here, which causes very
206 * unexpected results for the user since prefix-lists by default only match
207 * when the prefix length is an exact match too. i.e. you'd have to add the
208 * "le 32" and do "ip prefix-list foo permit 10.0.0.0/24 le 32"
210 * To avoid this pitfall, this code uses "address_mode = true" for the prefix
211 * list match (this is the only user for that.)
213 * In the long run, we need to add a "ip address-list", but that's a wholly
214 * separate bag of worms, and existing configs using ip prefix-list would
215 * drop into the UX pitfall.
218 #include "lib/plist_int.h"
221 * Given a group, return the rp_info for that group
223 struct rp_info
*pim_rp_find_match_group(struct pim_instance
*pim
,
224 const struct prefix
*group
)
226 struct listnode
*node
;
227 struct rp_info
*best
= NULL
;
228 struct rp_info
*rp_info
;
229 struct prefix_list
*plist
;
230 const struct prefix
*bp
;
231 const struct prefix_list_entry
*entry
;
232 struct route_node
*rn
;
235 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
236 if (rp_info
->plist
) {
237 plist
= prefix_list_lookup(PIM_AFI
, rp_info
->plist
);
239 if (prefix_list_apply_ext(plist
, &entry
, group
, true)
240 == PREFIX_DENY
|| !entry
)
249 if (bp
&& bp
->prefixlen
< entry
->prefix
.prefixlen
) {
256 rn
= route_node_match(pim
->rp_table
, group
);
260 "%s: BUG We should have found default group information",
266 if (PIM_DEBUG_PIM_TRACE
) {
269 "Lookedup(%pFX): prefix_list match %s, rn %p found: %pFX",
270 group
, best
->plist
, rn
, &rp_info
->group
);
272 zlog_debug("Lookedup(%pFX): rn %p found:%pFX", group
,
273 rn
, &rp_info
->group
);
276 route_unlock_node(rn
);
279 * rp's with prefix lists have the group as 224.0.0.0/4 which will
280 * match anything. So if we have a rp_info that should match a prefix
281 * list then if we do match then best should be the answer( even
284 if (!rp_info
|| (rp_info
&& rp_info
->plist
))
288 * So we have a non plist rp_info found in the lookup and no plists
289 * at all to be choosen, return it!
295 * If we have a matching non prefix list and a matching prefix
296 * list we should return the actual rp_info that has the LPM
297 * If they are equal, use the prefix-list( but let's hope
298 * the end-operator doesn't do this )
300 if (rp_info
->group
.prefixlen
> bp
->prefixlen
)
307 * When the user makes "ip pim rp" configuration changes or if they change the
308 * prefix-list(s) used by these statements we must tickle the upstream state
309 * for each group to make them re-lookup who their RP should be.
311 * This is a placeholder function for now.
313 void pim_rp_refresh_group_to_rp_mapping(struct pim_instance
*pim
)
315 pim_msdp_i_am_rp_changed(pim
);
316 pim_upstream_reeval_use_rpt(pim
);
319 void pim_rp_prefix_list_update(struct pim_instance
*pim
,
320 struct prefix_list
*plist
)
322 struct listnode
*node
;
323 struct rp_info
*rp_info
;
324 int refresh_needed
= 0;
326 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
328 && strcmp(rp_info
->plist
, prefix_list_name(plist
)) == 0) {
335 pim_rp_refresh_group_to_rp_mapping(pim
);
338 static int pim_rp_check_interface_addrs(struct rp_info
*rp_info
,
339 struct pim_interface
*pim_ifp
)
341 struct listnode
*node
;
342 struct pim_secondary_addr
*sec_addr
;
345 if (!pim_addr_cmp(pim_ifp
->primary_address
, rp_info
->rp
.rpf_addr
))
348 if (!pim_ifp
->sec_addr_list
) {
352 for (ALL_LIST_ELEMENTS_RO(pim_ifp
->sec_addr_list
, node
, sec_addr
)) {
353 sec_paddr
= pim_addr_from_prefix(&sec_addr
->addr
);
354 /* If an RP-address is self, It should be enough to say
355 * I am RP the prefix-length should not matter here */
356 if (!pim_addr_cmp(sec_paddr
, rp_info
->rp
.rpf_addr
))
363 static void pim_rp_check_interfaces(struct pim_instance
*pim
,
364 struct rp_info
*rp_info
)
366 struct interface
*ifp
;
368 rp_info
->i_am_rp
= 0;
369 FOR_ALL_INTERFACES (pim
->vrf
, ifp
) {
370 struct pim_interface
*pim_ifp
= ifp
->info
;
375 if (pim_rp_check_interface_addrs(rp_info
, pim_ifp
)) {
376 rp_info
->i_am_rp
= 1;
381 void pim_upstream_update(struct pim_instance
*pim
, struct pim_upstream
*up
)
383 struct pim_rpf old_rpf
;
384 enum pim_rpf_result rpf_result
;
385 pim_addr old_upstream_addr
;
386 pim_addr new_upstream_addr
;
388 old_upstream_addr
= up
->upstream_addr
;
389 pim_rp_set_upstream_addr(pim
, &new_upstream_addr
, up
->sg
.src
,
392 if (PIM_DEBUG_PIM_TRACE
)
393 zlog_debug("%s: pim upstream update for old upstream %pPA",
394 __func__
, &old_upstream_addr
);
396 if (!pim_addr_cmp(old_upstream_addr
, new_upstream_addr
))
399 /* Lets consider a case, where a PIM upstream has a better RP as a
400 * result of a new RP configuration with more precise group range.
401 * This upstream has to be added to the upstream hash of new RP's
402 * NHT(pnc) and has to be removed from old RP's NHT upstream hash
404 if (!pim_addr_is_any(old_upstream_addr
)) {
405 /* Deregister addr with Zebra NHT */
406 if (PIM_DEBUG_PIM_TRACE
)
408 "%s: Deregister upstream %s addr %pPA with Zebra NHT",
409 __func__
, up
->sg_str
, &old_upstream_addr
);
410 pim_delete_tracked_nexthop(pim
, old_upstream_addr
, up
, NULL
);
413 /* Update the upstream address */
414 up
->upstream_addr
= new_upstream_addr
;
416 old_rpf
.source_nexthop
.interface
= up
->rpf
.source_nexthop
.interface
;
418 rpf_result
= pim_rpf_update(pim
, up
, &old_rpf
, __func__
);
419 if (rpf_result
== PIM_RPF_FAILURE
)
420 pim_mroute_del(up
->channel_oil
, __func__
);
422 /* update kernel multicast forwarding cache (MFC) */
423 if (up
->rpf
.source_nexthop
.interface
&& up
->channel_oil
)
424 pim_upstream_mroute_iif_update(up
->channel_oil
, __func__
);
426 if (rpf_result
== PIM_RPF_CHANGED
||
427 (rpf_result
== PIM_RPF_FAILURE
&&
428 old_rpf
.source_nexthop
.interface
))
429 pim_zebra_upstream_rpf_changed(pim
, up
, &old_rpf
);
433 int pim_rp_new(struct pim_instance
*pim
, pim_addr rp_addr
, struct prefix group
,
434 const char *plist
, enum rp_source rp_src_flag
)
437 struct rp_info
*rp_info
;
438 struct rp_info
*rp_all
;
439 struct prefix group_all
;
440 struct listnode
*node
, *nnode
;
441 struct rp_info
*tmp_rp_info
;
444 struct route_node
*rn
= NULL
;
445 struct pim_upstream
*up
;
446 bool upstream_updated
= false;
448 if (pim_addr_is_any(rp_addr
))
449 return PIM_RP_BAD_ADDRESS
;
451 rp_info
= XCALLOC(MTYPE_PIM_RP
, sizeof(*rp_info
));
453 rp_info
->rp
.rpf_addr
= rp_addr
;
454 prefix_copy(&rp_info
->group
, &group
);
455 rp_info
->rp_src
= rp_src_flag
;
459 * Return if the prefix-list is already configured for this RP
461 if (pim_rp_find_prefix_list(pim
, rp_addr
, plist
)) {
462 XFREE(MTYPE_PIM_RP
, rp_info
);
467 * Barf if the prefix-list is already configured for an RP
469 if (pim_rp_prefix_list_used(pim
, plist
)) {
470 XFREE(MTYPE_PIM_RP
, rp_info
);
471 return PIM_RP_PFXLIST_IN_USE
;
475 * Free any existing rp_info entries for this RP
477 for (ALL_LIST_ELEMENTS(pim
->rp_list
, node
, nnode
,
479 if (!pim_addr_cmp(rp_info
->rp
.rpf_addr
,
480 tmp_rp_info
->rp
.rpf_addr
)) {
481 if (tmp_rp_info
->plist
)
482 pim_rp_del_config(pim
, rp_addr
, NULL
,
487 prefix2str(&tmp_rp_info
->group
,
493 rp_info
->plist
= XSTRDUP(MTYPE_PIM_FILTER_NAME
, plist
);
496 if (!pim_get_all_mcast_group(&group_all
)) {
497 XFREE(MTYPE_PIM_RP
, rp_info
);
498 return PIM_GROUP_BAD_ADDRESS
;
500 rp_all
= pim_rp_find_match_group(pim
, &group_all
);
503 * Barf if group is a non-multicast subnet
505 if (!prefix_match(&rp_all
->group
, &rp_info
->group
)) {
506 XFREE(MTYPE_PIM_RP
, rp_info
);
507 return PIM_GROUP_BAD_ADDRESS
;
511 * Remove any prefix-list rp_info entries for this RP
513 for (ALL_LIST_ELEMENTS(pim
->rp_list
, node
, nnode
,
515 if (tmp_rp_info
->plist
&&
516 (!pim_addr_cmp(rp_info
->rp
.rpf_addr
,
517 tmp_rp_info
->rp
.rpf_addr
))) {
518 pim_rp_del_config(pim
, rp_addr
, NULL
,
524 * Take over the 224.0.0.0/4 group if the rp is INADDR_ANY
526 if (prefix_same(&rp_all
->group
, &rp_info
->group
) &&
527 pim_rpf_addr_is_inaddr_any(&rp_all
->rp
)) {
528 rp_all
->rp
.rpf_addr
= rp_info
->rp
.rpf_addr
;
529 rp_all
->rp_src
= rp_src_flag
;
530 XFREE(MTYPE_PIM_RP
, rp_info
);
532 /* Register addr with Zebra NHT */
533 nht_p
= rp_all
->rp
.rpf_addr
;
534 if (PIM_DEBUG_PIM_NHT_RP
)
536 "%s: NHT Register rp_all addr %pPA grp %pFX ",
537 __func__
, &nht_p
, &rp_all
->group
);
539 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
540 /* Find (*, G) upstream whose RP is not
543 if (pim_addr_is_any(up
->upstream_addr
) &&
544 pim_addr_is_any(up
->sg
.src
)) {
546 struct rp_info
*trp_info
;
548 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
549 trp_info
= pim_rp_find_match_group(
551 if (trp_info
== rp_all
) {
552 pim_upstream_update(pim
, up
);
553 upstream_updated
= true;
557 if (upstream_updated
)
558 pim_zebra_update_all_interfaces(pim
);
560 pim_rp_check_interfaces(pim
, rp_all
);
561 pim_rp_refresh_group_to_rp_mapping(pim
);
562 pim_find_or_track_nexthop(pim
, nht_p
, NULL
, rp_all
,
565 if (!pim_ecmp_nexthop_lookup(pim
,
566 &rp_all
->rp
.source_nexthop
,
567 nht_p
, &rp_all
->group
, 1))
568 return PIM_RP_NO_PATH
;
573 * Return if the group is already configured for this RP
575 tmp_rp_info
= pim_rp_find_exact(pim
, rp_addr
, &rp_info
->group
);
577 if ((tmp_rp_info
->rp_src
!= rp_src_flag
)
578 && (rp_src_flag
== RP_SRC_STATIC
))
579 tmp_rp_info
->rp_src
= rp_src_flag
;
580 XFREE(MTYPE_PIM_RP
, rp_info
);
585 * Barf if this group is already covered by some other RP
587 tmp_rp_info
= pim_rp_find_match_group(pim
, &rp_info
->group
);
590 if (tmp_rp_info
->plist
) {
591 XFREE(MTYPE_PIM_RP
, rp_info
);
592 return PIM_GROUP_PFXLIST_OVERLAP
;
595 * If the only RP that covers this group is an
597 * 224.0.0.0/4 that is fine, ignore that one.
599 * though we must return PIM_GROUP_OVERLAP
601 if (prefix_same(&rp_info
->group
,
602 &tmp_rp_info
->group
)) {
603 if ((rp_src_flag
== RP_SRC_STATIC
)
604 && (tmp_rp_info
->rp_src
606 XFREE(MTYPE_PIM_RP
, rp_info
);
607 return PIM_GROUP_OVERLAP
;
610 result
= pim_rp_change(
614 XFREE(MTYPE_PIM_RP
, rp_info
);
621 listnode_add_sort(pim
->rp_list
, rp_info
);
623 if (!rp_info
->plist
) {
624 rn
= route_node_get(pim
->rp_table
, &rp_info
->group
);
628 if (PIM_DEBUG_PIM_TRACE
)
629 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn
,
630 rp_info
, &rp_info
->group
,
631 rn
? route_node_get_lock_count(rn
) : 0);
633 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
634 if (pim_addr_is_any(up
->sg
.src
)) {
636 struct rp_info
*trp_info
;
638 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
639 trp_info
= pim_rp_find_match_group(pim
, &grp
);
641 if (trp_info
== rp_info
) {
642 pim_upstream_update(pim
, up
);
643 upstream_updated
= true;
648 if (upstream_updated
)
649 pim_zebra_update_all_interfaces(pim
);
651 pim_rp_check_interfaces(pim
, rp_info
);
652 pim_rp_refresh_group_to_rp_mapping(pim
);
654 /* Register addr with Zebra NHT */
655 nht_p
= rp_info
->rp
.rpf_addr
;
656 if (PIM_DEBUG_PIM_NHT_RP
)
657 zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
658 __func__
, &nht_p
, &rp_info
->group
);
659 pim_find_or_track_nexthop(pim
, nht_p
, NULL
, rp_info
, NULL
);
660 if (!pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
, nht_p
,
662 return PIM_RP_NO_PATH
;
667 void pim_rp_del_config(struct pim_instance
*pim
, pim_addr rp_addr
,
668 const char *group_range
, const char *plist
)
673 if (group_range
== NULL
)
674 result
= pim_get_all_mcast_group(&group
);
676 result
= str2prefix(group_range
, &group
);
679 if (PIM_DEBUG_PIM_TRACE
)
681 "%s: String to prefix failed for %pPAs group",
686 pim_rp_del(pim
, rp_addr
, group
, plist
, RP_SRC_STATIC
);
689 int pim_rp_del(struct pim_instance
*pim
, pim_addr rp_addr
, struct prefix group
,
690 const char *plist
, enum rp_source rp_src_flag
)
693 struct rp_info
*rp_info
;
694 struct rp_info
*rp_all
;
696 struct route_node
*rn
;
697 bool was_plist
= false;
698 struct rp_info
*trp_info
;
699 struct pim_upstream
*up
;
700 struct bsgrp_node
*bsgrp
= NULL
;
701 struct bsm_rpinfo
*bsrp
= NULL
;
702 bool upstream_updated
= false;
705 rp_info
= pim_rp_find_prefix_list(pim
, rp_addr
, plist
);
707 rp_info
= pim_rp_find_exact(pim
, rp_addr
, &group
);
710 return PIM_RP_NOT_FOUND
;
712 if (rp_info
->plist
) {
713 XFREE(MTYPE_PIM_FILTER_NAME
, rp_info
->plist
);
717 if (PIM_DEBUG_PIM_TRACE
)
718 zlog_debug("%s: Delete RP %pPA for the group %pFX", __func__
,
721 /* While static RP is getting deleted, we need to check if dynamic RP
722 * present for the same group in BSM RP table, then install the dynamic
723 * RP for the group node into the main rp table
725 if (rp_src_flag
== RP_SRC_STATIC
) {
726 bsgrp
= pim_bsm_get_bsgrp_node(&pim
->global_scope
, &group
);
729 bsrp
= bsm_rpinfos_first(bsgrp
->bsrp_list
);
731 if (PIM_DEBUG_PIM_TRACE
)
733 "%s: BSM RP %pPA found for the group %pFX",
734 __func__
, &bsrp
->rp_address
,
736 return pim_rp_change(pim
, bsrp
->rp_address
,
740 if (PIM_DEBUG_PIM_TRACE
)
742 "%s: BSM RP not found for the group %pFX",
747 /* Deregister addr with Zebra NHT */
748 nht_p
= rp_info
->rp
.rpf_addr
;
749 if (PIM_DEBUG_PIM_NHT_RP
)
750 zlog_debug("%s: Deregister RP addr %pPA with Zebra ", __func__
,
752 pim_delete_tracked_nexthop(pim
, nht_p
, NULL
, rp_info
);
754 if (!pim_get_all_mcast_group(&g_all
))
755 return PIM_RP_BAD_ADDRESS
;
757 rp_all
= pim_rp_find_match_group(pim
, &g_all
);
759 if (rp_all
== rp_info
) {
760 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
761 /* Find the upstream (*, G) whose upstream address is
762 * same as the deleted RP
766 rpf_addr
= rp_info
->rp
.rpf_addr
;
767 if (!pim_addr_cmp(up
->upstream_addr
, rpf_addr
) &&
768 pim_addr_is_any(up
->sg
.src
)) {
771 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
772 trp_info
= pim_rp_find_match_group(pim
, &grp
);
773 if (trp_info
== rp_all
) {
774 pim_upstream_rpf_clear(pim
, up
);
775 up
->upstream_addr
= PIMADDR_ANY
;
779 rp_all
->rp
.rpf_addr
= PIMADDR_ANY
;
784 listnode_delete(pim
->rp_list
, rp_info
);
787 rn
= route_node_get(pim
->rp_table
, &rp_info
->group
);
789 if (rn
->info
!= rp_info
)
792 "Expected rn->info to be equal to rp_info");
794 if (PIM_DEBUG_PIM_TRACE
)
796 "%s:Found for Freeing: %p for rp_info: %p(%pFX) Lock: %d",
797 __func__
, rn
, rp_info
, &rp_info
->group
,
798 route_node_get_lock_count(rn
));
801 route_unlock_node(rn
);
802 route_unlock_node(rn
);
806 pim_rp_refresh_group_to_rp_mapping(pim
);
808 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
809 /* Find the upstream (*, G) whose upstream address is same as
814 rpf_addr
= rp_info
->rp
.rpf_addr
;
815 if (!pim_addr_cmp(up
->upstream_addr
, rpf_addr
) &&
816 pim_addr_is_any(up
->sg
.src
)) {
819 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
820 trp_info
= pim_rp_find_match_group(pim
, &grp
);
822 /* RP not found for the group grp */
823 if (pim_rpf_addr_is_inaddr_any(&trp_info
->rp
)) {
824 pim_upstream_rpf_clear(pim
, up
);
825 pim_rp_set_upstream_addr(
826 pim
, &up
->upstream_addr
, up
->sg
.src
,
830 /* RP found for the group grp */
832 pim_upstream_update(pim
, up
);
833 upstream_updated
= true;
838 if (upstream_updated
)
839 pim_zebra_update_all_interfaces(pim
);
841 XFREE(MTYPE_PIM_RP
, rp_info
);
845 int pim_rp_change(struct pim_instance
*pim
, pim_addr new_rp_addr
,
846 struct prefix group
, enum rp_source rp_src_flag
)
849 struct route_node
*rn
;
851 struct rp_info
*rp_info
= NULL
;
852 struct pim_upstream
*up
;
853 bool upstream_updated
= false;
854 pim_addr old_rp_addr
;
856 rn
= route_node_lookup(pim
->rp_table
, &group
);
858 result
= pim_rp_new(pim
, new_rp_addr
, group
, NULL
, rp_src_flag
);
865 route_unlock_node(rn
);
866 result
= pim_rp_new(pim
, new_rp_addr
, group
, NULL
, rp_src_flag
);
870 old_rp_addr
= rp_info
->rp
.rpf_addr
;
871 if (!pim_addr_cmp(new_rp_addr
, old_rp_addr
)) {
872 if (rp_info
->rp_src
!= rp_src_flag
) {
873 rp_info
->rp_src
= rp_src_flag
;
874 route_unlock_node(rn
);
879 /* Deregister old RP addr with Zebra NHT */
881 if (!pim_addr_is_any(old_rp_addr
)) {
882 nht_p
= rp_info
->rp
.rpf_addr
;
883 if (PIM_DEBUG_PIM_NHT_RP
)
884 zlog_debug("%s: Deregister RP addr %pPA with Zebra ",
886 pim_delete_tracked_nexthop(pim
, nht_p
, NULL
, rp_info
);
889 pim_rp_nexthop_del(rp_info
);
890 listnode_delete(pim
->rp_list
, rp_info
);
891 /* Update the new RP address*/
893 rp_info
->rp
.rpf_addr
= new_rp_addr
;
894 rp_info
->rp_src
= rp_src_flag
;
895 rp_info
->i_am_rp
= 0;
897 listnode_add_sort(pim
->rp_list
, rp_info
);
899 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
900 if (pim_addr_is_any(up
->sg
.src
)) {
902 struct rp_info
*trp_info
;
904 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
905 trp_info
= pim_rp_find_match_group(pim
, &grp
);
907 if (trp_info
== rp_info
) {
908 pim_upstream_update(pim
, up
);
909 upstream_updated
= true;
914 if (upstream_updated
)
915 pim_zebra_update_all_interfaces(pim
);
917 /* Register new RP addr with Zebra NHT */
918 nht_p
= rp_info
->rp
.rpf_addr
;
919 if (PIM_DEBUG_PIM_NHT_RP
)
920 zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
921 __func__
, &nht_p
, &rp_info
->group
);
923 pim_find_or_track_nexthop(pim
, nht_p
, NULL
, rp_info
, NULL
);
924 if (!pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
, nht_p
,
925 &rp_info
->group
, 1)) {
926 route_unlock_node(rn
);
927 return PIM_RP_NO_PATH
;
930 pim_rp_check_interfaces(pim
, rp_info
);
932 route_unlock_node(rn
);
934 pim_rp_refresh_group_to_rp_mapping(pim
);
939 void pim_rp_setup(struct pim_instance
*pim
)
941 struct listnode
*node
;
942 struct rp_info
*rp_info
;
945 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
946 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
949 nht_p
= rp_info
->rp
.rpf_addr
;
951 pim_find_or_track_nexthop(pim
, nht_p
, NULL
, rp_info
, NULL
);
952 if (!pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
,
953 nht_p
, &rp_info
->group
, 1))
954 if (PIM_DEBUG_PIM_NHT_RP
)
956 "Unable to lookup nexthop for rp specified");
961 * Checks to see if we should elect ourself the actual RP when new if
962 * addresses are added against an interface.
964 void pim_rp_check_on_if_add(struct pim_interface
*pim_ifp
)
966 struct listnode
*node
;
967 struct rp_info
*rp_info
;
968 bool i_am_rp_changed
= false;
969 struct pim_instance
*pim
= pim_ifp
->pim
;
971 if (pim
->rp_list
== NULL
)
974 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
975 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
978 /* if i_am_rp is already set nothing to be done (adding new
980 * is not going to make a difference). */
981 if (rp_info
->i_am_rp
) {
985 if (pim_rp_check_interface_addrs(rp_info
, pim_ifp
)) {
986 i_am_rp_changed
= true;
987 rp_info
->i_am_rp
= 1;
988 if (PIM_DEBUG_PIM_NHT_RP
)
989 zlog_debug("%s: %pPA: i am rp", __func__
,
990 &rp_info
->rp
.rpf_addr
);
994 if (i_am_rp_changed
) {
995 pim_msdp_i_am_rp_changed(pim
);
996 pim_upstream_reeval_use_rpt(pim
);
1000 /* up-optimized re-evaluation of "i_am_rp". this is used when ifaddresses
1001 * are removed. Removing numbers is an uncommon event in an active network
1002 * so I have made no attempt to optimize it. */
1003 void pim_i_am_rp_re_evaluate(struct pim_instance
*pim
)
1005 struct listnode
*node
;
1006 struct rp_info
*rp_info
;
1007 bool i_am_rp_changed
= false;
1010 if (pim
->rp_list
== NULL
)
1013 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1014 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1017 old_i_am_rp
= rp_info
->i_am_rp
;
1018 pim_rp_check_interfaces(pim
, rp_info
);
1020 if (old_i_am_rp
!= rp_info
->i_am_rp
) {
1021 i_am_rp_changed
= true;
1022 if (PIM_DEBUG_PIM_NHT_RP
) {
1023 if (rp_info
->i_am_rp
)
1024 zlog_debug("%s: %pPA: i am rp",
1026 &rp_info
->rp
.rpf_addr
);
1029 "%s: %pPA: i am no longer rp",
1031 &rp_info
->rp
.rpf_addr
);
1036 if (i_am_rp_changed
) {
1037 pim_msdp_i_am_rp_changed(pim
);
1038 pim_upstream_reeval_use_rpt(pim
);
1043 * I_am_RP(G) is true if the group-to-RP mapping indicates that
1044 * this router is the RP for the group.
1046 * Since we only have static RP, all groups are part of this RP
1048 int pim_rp_i_am_rp(struct pim_instance
*pim
, pim_addr group
)
1051 struct rp_info
*rp_info
;
1053 memset(&g
, 0, sizeof(g
));
1054 pim_addr_to_prefix(&g
, group
);
1055 rp_info
= pim_rp_find_match_group(pim
, &g
);
1058 return rp_info
->i_am_rp
;
1065 * Return the RP that the Group belongs too.
1067 struct pim_rpf
*pim_rp_g(struct pim_instance
*pim
, pim_addr group
)
1070 struct rp_info
*rp_info
;
1072 memset(&g
, 0, sizeof(g
));
1073 pim_addr_to_prefix(&g
, group
);
1075 rp_info
= pim_rp_find_match_group(pim
, &g
);
1080 /* Register addr with Zebra NHT */
1081 nht_p
= rp_info
->rp
.rpf_addr
;
1082 if (PIM_DEBUG_PIM_NHT_RP
)
1084 "%s: NHT Register RP addr %pPA grp %pFX with Zebra",
1085 __func__
, &nht_p
, &rp_info
->group
);
1086 pim_find_or_track_nexthop(pim
, nht_p
, NULL
, rp_info
, NULL
);
1087 pim_rpf_set_refresh_time(pim
);
1088 (void)pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
,
1089 nht_p
, &rp_info
->group
, 1);
1090 return (&rp_info
->rp
);
1098 * Set the upstream IP address we want to talk to based upon
1099 * the rp configured and the source address
1101 * If we have don't have a RP configured and the source address is *
1102 * then set the upstream addr as INADDR_ANY and return failure.
1105 int pim_rp_set_upstream_addr(struct pim_instance
*pim
, pim_addr
*up
,
1106 pim_addr source
, pim_addr group
)
1108 struct rp_info
*rp_info
;
1111 memset(&g
, 0, sizeof(g
));
1113 pim_addr_to_prefix(&g
, group
);
1115 rp_info
= pim_rp_find_match_group(pim
, &g
);
1117 if (!rp_info
|| ((pim_rpf_addr_is_inaddr_any(&rp_info
->rp
)) &&
1118 (pim_addr_is_any(source
)))) {
1119 if (PIM_DEBUG_PIM_NHT_RP
)
1120 zlog_debug("%s: Received a (*,G) with no RP configured",
1126 if (pim_addr_is_any(source
))
1127 *up
= rp_info
->rp
.rpf_addr
;
1134 int pim_rp_config_write(struct pim_instance
*pim
, struct vty
*vty
,
1137 struct listnode
*node
;
1138 struct rp_info
*rp_info
;
1142 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1143 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1146 if (rp_info
->rp_src
== RP_SRC_BSR
)
1149 rp_addr
= rp_info
->rp
.rpf_addr
;
1153 " pim rp %pPA prefix-list %s\n",
1154 spaces
, &rp_addr
, rp_info
->plist
);
1156 vty_out(vty
, "%s" PIM_AF_NAME
" pim rp %pPA %pFX\n",
1157 spaces
, &rp_addr
, &rp_info
->group
);
1164 void pim_rp_show_information(struct pim_instance
*pim
, struct prefix
*range
,
1165 struct vty
*vty
, json_object
*json
)
1167 struct rp_info
*rp_info
;
1168 struct rp_info
*prev_rp_info
= NULL
;
1169 struct listnode
*node
;
1170 struct ttable
*tt
= NULL
;
1173 char grp
[INET6_ADDRSTRLEN
];
1175 json_object
*json_rp_rows
= NULL
;
1176 json_object
*json_row
= NULL
;
1179 /* Prepare table. */
1180 tt
= ttable_new(&ttable_styles
[TTSTYLE_BLANK
]);
1183 "RP address|group/prefix-list|OIF|I am RP|Source|Group-Type");
1184 tt
->style
.cell
.rpad
= 2;
1185 tt
->style
.corner
= '+';
1189 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1190 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1194 pim_addr group
= rp_info
->group
.u
.prefix4
;
1196 pim_addr group
= rp_info
->group
.u
.prefix6
;
1198 const char *group_type
=
1199 pim_is_grp_ssm(pim
, group
) ? "SSM" : "ASM";
1201 if (range
&& !prefix_match(&rp_info
->group
, range
))
1204 if (rp_info
->rp_src
== RP_SRC_STATIC
)
1205 strlcpy(source
, "Static", sizeof(source
));
1206 else if (rp_info
->rp_src
== RP_SRC_BSR
)
1207 strlcpy(source
, "BSR", sizeof(source
));
1209 strlcpy(source
, "None", sizeof(source
));
1212 * If we have moved on to a new RP then add the
1213 * entry for the previous RP
1216 (pim_addr_cmp(prev_rp_info
->rp
.rpf_addr
,
1217 rp_info
->rp
.rpf_addr
))) {
1218 json_object_object_addf(
1219 json
, json_rp_rows
, "%pPA",
1220 &prev_rp_info
->rp
.rpf_addr
);
1221 json_rp_rows
= NULL
;
1225 json_rp_rows
= json_object_new_array();
1227 json_row
= json_object_new_object();
1228 json_object_string_addf(json_row
, "rpAddress", "%pPA",
1229 &rp_info
->rp
.rpf_addr
);
1230 if (rp_info
->rp
.source_nexthop
.interface
)
1231 json_object_string_add(
1232 json_row
, "outboundInterface",
1233 rp_info
->rp
.source_nexthop
1236 json_object_string_add(json_row
,
1237 "outboundInterface",
1239 if (rp_info
->i_am_rp
)
1240 json_object_boolean_true_add(json_row
, "iAmRP");
1242 json_object_boolean_false_add(json_row
,
1246 json_object_string_add(json_row
, "prefixList",
1249 json_object_string_addf(json_row
, "group",
1252 json_object_string_add(json_row
, "source", source
);
1253 json_object_string_add(json_row
, "groupType",
1256 json_object_array_add(json_rp_rows
, json_row
);
1258 prefix2str(&rp_info
->group
, grp
, sizeof(grp
));
1259 ttable_add_row(tt
, "%pPA|%s|%s|%s|%s|%s",
1260 &rp_info
->rp
.rpf_addr
,
1264 rp_info
->rp
.source_nexthop
.interface
1265 ? rp_info
->rp
.source_nexthop
1271 source
, group_type
);
1273 prev_rp_info
= rp_info
;
1276 /* Dump the generated table. */
1278 table
= ttable_dump(tt
, "\n");
1279 vty_out(vty
, "%s\n", table
);
1280 XFREE(MTYPE_TMP
, table
);
1283 if (prev_rp_info
&& json_rp_rows
)
1284 json_object_object_addf(json
, json_rp_rows
, "%pPA",
1285 &prev_rp_info
->rp
.rpf_addr
);
1289 void pim_resolve_rp_nh(struct pim_instance
*pim
, struct pim_neighbor
*nbr
)
1291 struct listnode
*node
= NULL
;
1292 struct rp_info
*rp_info
= NULL
;
1293 struct nexthop
*nh_node
= NULL
;
1295 struct pim_nexthop_cache pnc
;
1297 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1298 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1301 nht_p
= rp_info
->rp
.rpf_addr
;
1302 memset(&pnc
, 0, sizeof(struct pim_nexthop_cache
));
1303 if (!pim_find_or_track_nexthop(pim
, nht_p
, NULL
, rp_info
, &pnc
))
1306 for (nh_node
= pnc
.nexthop
; nh_node
; nh_node
= nh_node
->next
) {
1308 if (!pim_addr_is_any(nh_node
->gate
.ipv4
))
1311 if (!pim_addr_is_any(nh_node
->gate
.ipv6
))
1315 struct interface
*ifp1
= if_lookup_by_index(
1316 nh_node
->ifindex
, pim
->vrf
->vrf_id
);
1318 if (nbr
->interface
!= ifp1
)
1322 nh_node
->gate
.ipv4
= nbr
->source_addr
;
1324 nh_node
->gate
.ipv6
= nbr
->source_addr
;
1326 if (PIM_DEBUG_PIM_NHT_RP
)
1328 "%s: addr %pPA new nexthop addr %pPAs interface %s",
1329 __func__
, &nht_p
, &nbr
->source_addr
,