1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2015 Cumulus Networks, Inc.
21 #include "lib_errors.h"
24 #include "pim_instance.h"
27 #include "pim_iface.h"
31 #include "pim_memory.h"
32 #include "pim_neighbor.h"
35 #include "pim_mroute.h"
37 #include "pim_zebra.h"
41 #include "termtable.h"
43 /* Cleanup pim->rpf_hash each node data */
44 void pim_rp_list_hash_clean(void *data
)
46 struct pim_nexthop_cache
*pnc
= (struct pim_nexthop_cache
*)data
;
48 list_delete(&pnc
->rp_list
);
50 hash_clean(pnc
->upstream_hash
, NULL
);
51 hash_free(pnc
->upstream_hash
);
52 pnc
->upstream_hash
= NULL
;
54 nexthops_free(pnc
->nexthop
);
56 XFREE(MTYPE_PIM_NEXTHOP_CACHE
, pnc
);
59 static void pim_rp_info_free(struct rp_info
*rp_info
)
61 XFREE(MTYPE_PIM_FILTER_NAME
, rp_info
->plist
);
63 XFREE(MTYPE_PIM_RP
, rp_info
);
66 int pim_rp_list_cmp(void *v1
, void *v2
)
68 struct rp_info
*rp1
= (struct rp_info
*)v1
;
69 struct rp_info
*rp2
= (struct rp_info
*)v2
;
73 * Sort by RP IP address
75 ret
= pim_addr_cmp(rp1
->rp
.rpf_addr
, rp2
->rp
.rpf_addr
);
80 * Sort by group IP address
82 ret
= prefix_cmp(&rp1
->group
, &rp2
->group
);
89 void pim_rp_init(struct pim_instance
*pim
)
91 struct rp_info
*rp_info
;
92 struct route_node
*rn
;
94 pim
->rp_list
= list_new();
95 pim
->rp_list
->del
= (void (*)(void *))pim_rp_info_free
;
96 pim
->rp_list
->cmp
= pim_rp_list_cmp
;
98 pim
->rp_table
= route_table_init();
100 rp_info
= XCALLOC(MTYPE_PIM_RP
, sizeof(*rp_info
));
102 if (!pim_get_all_mcast_group(&rp_info
->group
)) {
103 flog_err(EC_LIB_DEVELOPMENT
,
104 "Unable to convert all-multicast prefix");
105 list_delete(&pim
->rp_list
);
106 route_table_finish(pim
->rp_table
);
107 XFREE(MTYPE_PIM_RP
, rp_info
);
110 rp_info
->rp
.rpf_addr
= PIMADDR_ANY
;
112 listnode_add(pim
->rp_list
, rp_info
);
114 rn
= route_node_get(pim
->rp_table
, &rp_info
->group
);
116 if (PIM_DEBUG_PIM_TRACE
)
117 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn
,
118 rp_info
, &rp_info
->group
,
119 route_node_get_lock_count(rn
));
122 void pim_rp_free(struct pim_instance
*pim
)
125 route_table_finish(pim
->rp_table
);
126 pim
->rp_table
= NULL
;
129 list_delete(&pim
->rp_list
);
133 * Given an RP's prefix-list, return the RP's rp_info for that prefix-list
135 static struct rp_info
*pim_rp_find_prefix_list(struct pim_instance
*pim
,
136 pim_addr rp
, const char *plist
)
138 struct listnode
*node
;
139 struct rp_info
*rp_info
;
141 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
142 if ((!pim_addr_cmp(rp
, rp_info
->rp
.rpf_addr
)) &&
143 rp_info
->plist
&& strcmp(rp_info
->plist
, plist
) == 0) {
152 * Return true if plist is used by any rp_info
154 static int pim_rp_prefix_list_used(struct pim_instance
*pim
, const char *plist
)
156 struct listnode
*node
;
157 struct rp_info
*rp_info
;
159 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
160 if (rp_info
->plist
&& strcmp(rp_info
->plist
, plist
) == 0) {
169 * Given an RP's address, return the RP's rp_info that is an exact match for
172 static struct rp_info
*pim_rp_find_exact(struct pim_instance
*pim
, pim_addr rp
,
173 const struct prefix
*group
)
175 struct listnode
*node
;
176 struct rp_info
*rp_info
;
178 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
179 if ((!pim_addr_cmp(rp
, rp_info
->rp
.rpf_addr
)) &&
180 prefix_same(&rp_info
->group
, group
))
188 * XXX: long-term issue: we don't actually have a good "ip address-list"
189 * implementation. ("access-list XYZ" is the closest but honestly it's
192 * So it's using a prefix-list to match an address here, which causes very
193 * unexpected results for the user since prefix-lists by default only match
194 * when the prefix length is an exact match too. i.e. you'd have to add the
195 * "le 32" and do "ip prefix-list foo permit 10.0.0.0/24 le 32"
197 * To avoid this pitfall, this code uses "address_mode = true" for the prefix
198 * list match (this is the only user for that.)
200 * In the long run, we need to add a "ip address-list", but that's a wholly
201 * separate bag of worms, and existing configs using ip prefix-list would
202 * drop into the UX pitfall.
205 #include "lib/plist_int.h"
208 * Given a group, return the rp_info for that group
210 struct rp_info
*pim_rp_find_match_group(struct pim_instance
*pim
,
211 const struct prefix
*group
)
213 struct listnode
*node
;
214 struct rp_info
*best
= NULL
;
215 struct rp_info
*rp_info
;
216 struct prefix_list
*plist
;
217 const struct prefix
*bp
;
218 const struct prefix_list_entry
*entry
;
219 struct route_node
*rn
;
222 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
223 if (rp_info
->plist
) {
224 plist
= prefix_list_lookup(PIM_AFI
, rp_info
->plist
);
226 if (prefix_list_apply_ext(plist
, &entry
, group
, true)
227 == PREFIX_DENY
|| !entry
)
236 if (bp
&& bp
->prefixlen
< entry
->prefix
.prefixlen
) {
243 rn
= route_node_match(pim
->rp_table
, group
);
247 "%s: BUG We should have found default group information",
253 if (PIM_DEBUG_PIM_TRACE
) {
256 "Lookedup(%pFX): prefix_list match %s, rn %p found: %pFX",
257 group
, best
->plist
, rn
, &rp_info
->group
);
259 zlog_debug("Lookedup(%pFX): rn %p found:%pFX", group
,
260 rn
, &rp_info
->group
);
263 route_unlock_node(rn
);
266 * rp's with prefix lists have the group as 224.0.0.0/4 which will
267 * match anything. So if we have a rp_info that should match a prefix
268 * list then if we do match then best should be the answer( even
271 if (!rp_info
|| (rp_info
&& rp_info
->plist
))
275 * So we have a non plist rp_info found in the lookup and no plists
276 * at all to be choosen, return it!
282 * If we have a matching non prefix list and a matching prefix
283 * list we should return the actual rp_info that has the LPM
284 * If they are equal, use the prefix-list( but let's hope
285 * the end-operator doesn't do this )
287 if (rp_info
->group
.prefixlen
> bp
->prefixlen
)
294 * When the user makes "ip pim rp" configuration changes or if they change the
295 * prefix-list(s) used by these statements we must tickle the upstream state
296 * for each group to make them re-lookup who their RP should be.
298 * This is a placeholder function for now.
300 void pim_rp_refresh_group_to_rp_mapping(struct pim_instance
*pim
)
302 pim_msdp_i_am_rp_changed(pim
);
303 pim_upstream_reeval_use_rpt(pim
);
306 void pim_rp_prefix_list_update(struct pim_instance
*pim
,
307 struct prefix_list
*plist
)
309 struct listnode
*node
;
310 struct rp_info
*rp_info
;
311 int refresh_needed
= 0;
313 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
315 && strcmp(rp_info
->plist
, prefix_list_name(plist
)) == 0) {
322 pim_rp_refresh_group_to_rp_mapping(pim
);
325 static int pim_rp_check_interface_addrs(struct rp_info
*rp_info
,
326 struct pim_interface
*pim_ifp
)
328 struct listnode
*node
;
329 struct pim_secondary_addr
*sec_addr
;
332 if (!pim_addr_cmp(pim_ifp
->primary_address
, rp_info
->rp
.rpf_addr
))
335 if (!pim_ifp
->sec_addr_list
) {
339 for (ALL_LIST_ELEMENTS_RO(pim_ifp
->sec_addr_list
, node
, sec_addr
)) {
340 sec_paddr
= pim_addr_from_prefix(&sec_addr
->addr
);
341 /* If an RP-address is self, It should be enough to say
342 * I am RP the prefix-length should not matter here */
343 if (!pim_addr_cmp(sec_paddr
, rp_info
->rp
.rpf_addr
))
350 static void pim_rp_check_interfaces(struct pim_instance
*pim
,
351 struct rp_info
*rp_info
)
353 struct interface
*ifp
;
355 rp_info
->i_am_rp
= 0;
356 FOR_ALL_INTERFACES (pim
->vrf
, ifp
) {
357 struct pim_interface
*pim_ifp
= ifp
->info
;
362 if (pim_rp_check_interface_addrs(rp_info
, pim_ifp
)) {
363 rp_info
->i_am_rp
= 1;
368 void pim_upstream_update(struct pim_instance
*pim
, struct pim_upstream
*up
)
370 struct pim_rpf old_rpf
;
371 enum pim_rpf_result rpf_result
;
372 pim_addr old_upstream_addr
;
373 pim_addr new_upstream_addr
;
375 old_upstream_addr
= up
->upstream_addr
;
376 pim_rp_set_upstream_addr(pim
, &new_upstream_addr
, up
->sg
.src
,
379 if (PIM_DEBUG_PIM_TRACE
)
380 zlog_debug("%s: pim upstream update for old upstream %pPA",
381 __func__
, &old_upstream_addr
);
383 if (!pim_addr_cmp(old_upstream_addr
, new_upstream_addr
))
386 /* Lets consider a case, where a PIM upstream has a better RP as a
387 * result of a new RP configuration with more precise group range.
388 * This upstream has to be added to the upstream hash of new RP's
389 * NHT(pnc) and has to be removed from old RP's NHT upstream hash
391 if (!pim_addr_is_any(old_upstream_addr
)) {
392 /* Deregister addr with Zebra NHT */
393 if (PIM_DEBUG_PIM_TRACE
)
395 "%s: Deregister upstream %s addr %pPA with Zebra NHT",
396 __func__
, up
->sg_str
, &old_upstream_addr
);
397 pim_delete_tracked_nexthop(pim
, old_upstream_addr
, up
, NULL
);
400 /* Update the upstream address */
401 up
->upstream_addr
= new_upstream_addr
;
403 old_rpf
.source_nexthop
.interface
= up
->rpf
.source_nexthop
.interface
;
405 rpf_result
= pim_rpf_update(pim
, up
, &old_rpf
, __func__
);
406 if (rpf_result
== PIM_RPF_FAILURE
)
407 pim_mroute_del(up
->channel_oil
, __func__
);
409 /* update kernel multicast forwarding cache (MFC) */
410 if (up
->rpf
.source_nexthop
.interface
&& up
->channel_oil
)
411 pim_upstream_mroute_iif_update(up
->channel_oil
, __func__
);
413 if (rpf_result
== PIM_RPF_CHANGED
||
414 (rpf_result
== PIM_RPF_FAILURE
&&
415 old_rpf
.source_nexthop
.interface
))
416 pim_zebra_upstream_rpf_changed(pim
, up
, &old_rpf
);
420 int pim_rp_new(struct pim_instance
*pim
, pim_addr rp_addr
, struct prefix group
,
421 const char *plist
, enum rp_source rp_src_flag
)
424 struct rp_info
*rp_info
;
425 struct rp_info
*rp_all
;
426 struct prefix group_all
;
427 struct listnode
*node
, *nnode
;
428 struct rp_info
*tmp_rp_info
;
431 struct route_node
*rn
= NULL
;
432 struct pim_upstream
*up
;
433 bool upstream_updated
= false;
435 if (pim_addr_is_any(rp_addr
))
436 return PIM_RP_BAD_ADDRESS
;
438 rp_info
= XCALLOC(MTYPE_PIM_RP
, sizeof(*rp_info
));
440 rp_info
->rp
.rpf_addr
= rp_addr
;
441 prefix_copy(&rp_info
->group
, &group
);
442 rp_info
->rp_src
= rp_src_flag
;
446 * Return if the prefix-list is already configured for this RP
448 if (pim_rp_find_prefix_list(pim
, rp_addr
, plist
)) {
449 XFREE(MTYPE_PIM_RP
, rp_info
);
454 * Barf if the prefix-list is already configured for an RP
456 if (pim_rp_prefix_list_used(pim
, plist
)) {
457 XFREE(MTYPE_PIM_RP
, rp_info
);
458 return PIM_RP_PFXLIST_IN_USE
;
462 * Free any existing rp_info entries for this RP
464 for (ALL_LIST_ELEMENTS(pim
->rp_list
, node
, nnode
,
466 if (!pim_addr_cmp(rp_info
->rp
.rpf_addr
,
467 tmp_rp_info
->rp
.rpf_addr
)) {
468 if (tmp_rp_info
->plist
)
469 pim_rp_del_config(pim
, rp_addr
, NULL
,
474 prefix2str(&tmp_rp_info
->group
,
480 rp_info
->plist
= XSTRDUP(MTYPE_PIM_FILTER_NAME
, plist
);
483 if (!pim_get_all_mcast_group(&group_all
)) {
484 XFREE(MTYPE_PIM_RP
, rp_info
);
485 return PIM_GROUP_BAD_ADDRESS
;
487 rp_all
= pim_rp_find_match_group(pim
, &group_all
);
490 * Barf if group is a non-multicast subnet
492 if (!prefix_match(&rp_all
->group
, &rp_info
->group
)) {
493 XFREE(MTYPE_PIM_RP
, rp_info
);
494 return PIM_GROUP_BAD_ADDRESS
;
498 * Remove any prefix-list rp_info entries for this RP
500 for (ALL_LIST_ELEMENTS(pim
->rp_list
, node
, nnode
,
502 if (tmp_rp_info
->plist
&&
503 (!pim_addr_cmp(rp_info
->rp
.rpf_addr
,
504 tmp_rp_info
->rp
.rpf_addr
))) {
505 pim_rp_del_config(pim
, rp_addr
, NULL
,
511 * Take over the 224.0.0.0/4 group if the rp is INADDR_ANY
513 if (prefix_same(&rp_all
->group
, &rp_info
->group
) &&
514 pim_rpf_addr_is_inaddr_any(&rp_all
->rp
)) {
515 rp_all
->rp
.rpf_addr
= rp_info
->rp
.rpf_addr
;
516 rp_all
->rp_src
= rp_src_flag
;
517 XFREE(MTYPE_PIM_RP
, rp_info
);
519 /* Register addr with Zebra NHT */
520 nht_p
= rp_all
->rp
.rpf_addr
;
521 if (PIM_DEBUG_PIM_NHT_RP
)
523 "%s: NHT Register rp_all addr %pPA grp %pFX ",
524 __func__
, &nht_p
, &rp_all
->group
);
526 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
527 /* Find (*, G) upstream whose RP is not
530 if (pim_addr_is_any(up
->upstream_addr
) &&
531 pim_addr_is_any(up
->sg
.src
)) {
533 struct rp_info
*trp_info
;
535 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
536 trp_info
= pim_rp_find_match_group(
538 if (trp_info
== rp_all
) {
539 pim_upstream_update(pim
, up
);
540 upstream_updated
= true;
544 if (upstream_updated
)
545 pim_zebra_update_all_interfaces(pim
);
547 pim_rp_check_interfaces(pim
, rp_all
);
548 pim_rp_refresh_group_to_rp_mapping(pim
);
549 pim_find_or_track_nexthop(pim
, nht_p
, NULL
, rp_all
,
552 if (!pim_ecmp_nexthop_lookup(pim
,
553 &rp_all
->rp
.source_nexthop
,
554 nht_p
, &rp_all
->group
, 1))
555 return PIM_RP_NO_PATH
;
560 * Return if the group is already configured for this RP
562 tmp_rp_info
= pim_rp_find_exact(pim
, rp_addr
, &rp_info
->group
);
564 if ((tmp_rp_info
->rp_src
!= rp_src_flag
)
565 && (rp_src_flag
== RP_SRC_STATIC
))
566 tmp_rp_info
->rp_src
= rp_src_flag
;
567 XFREE(MTYPE_PIM_RP
, rp_info
);
572 * Barf if this group is already covered by some other RP
574 tmp_rp_info
= pim_rp_find_match_group(pim
, &rp_info
->group
);
577 if (tmp_rp_info
->plist
) {
578 XFREE(MTYPE_PIM_RP
, rp_info
);
579 return PIM_GROUP_PFXLIST_OVERLAP
;
582 * If the only RP that covers this group is an
584 * 224.0.0.0/4 that is fine, ignore that one.
586 * though we must return PIM_GROUP_OVERLAP
588 if (prefix_same(&rp_info
->group
,
589 &tmp_rp_info
->group
)) {
590 if ((rp_src_flag
== RP_SRC_STATIC
)
591 && (tmp_rp_info
->rp_src
593 XFREE(MTYPE_PIM_RP
, rp_info
);
594 return PIM_GROUP_OVERLAP
;
597 result
= pim_rp_change(
601 XFREE(MTYPE_PIM_RP
, rp_info
);
608 listnode_add_sort(pim
->rp_list
, rp_info
);
610 if (!rp_info
->plist
) {
611 rn
= route_node_get(pim
->rp_table
, &rp_info
->group
);
615 if (PIM_DEBUG_PIM_TRACE
)
616 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn
,
617 rp_info
, &rp_info
->group
,
618 rn
? route_node_get_lock_count(rn
) : 0);
620 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
621 if (pim_addr_is_any(up
->sg
.src
)) {
623 struct rp_info
*trp_info
;
625 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
626 trp_info
= pim_rp_find_match_group(pim
, &grp
);
628 if (trp_info
== rp_info
) {
629 pim_upstream_update(pim
, up
);
630 upstream_updated
= true;
635 if (upstream_updated
)
636 pim_zebra_update_all_interfaces(pim
);
638 pim_rp_check_interfaces(pim
, rp_info
);
639 pim_rp_refresh_group_to_rp_mapping(pim
);
641 /* Register addr with Zebra NHT */
642 nht_p
= rp_info
->rp
.rpf_addr
;
643 if (PIM_DEBUG_PIM_NHT_RP
)
644 zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
645 __func__
, &nht_p
, &rp_info
->group
);
646 pim_find_or_track_nexthop(pim
, nht_p
, NULL
, rp_info
, NULL
);
647 if (!pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
, nht_p
,
649 return PIM_RP_NO_PATH
;
654 void pim_rp_del_config(struct pim_instance
*pim
, pim_addr rp_addr
,
655 const char *group_range
, const char *plist
)
660 if (group_range
== NULL
)
661 result
= pim_get_all_mcast_group(&group
);
663 result
= str2prefix(group_range
, &group
);
666 if (PIM_DEBUG_PIM_TRACE
)
668 "%s: String to prefix failed for %pPAs group",
673 pim_rp_del(pim
, rp_addr
, group
, plist
, RP_SRC_STATIC
);
676 int pim_rp_del(struct pim_instance
*pim
, pim_addr rp_addr
, struct prefix group
,
677 const char *plist
, enum rp_source rp_src_flag
)
680 struct rp_info
*rp_info
;
681 struct rp_info
*rp_all
;
683 struct route_node
*rn
;
684 bool was_plist
= false;
685 struct rp_info
*trp_info
;
686 struct pim_upstream
*up
;
687 struct bsgrp_node
*bsgrp
= NULL
;
688 struct bsm_rpinfo
*bsrp
= NULL
;
689 bool upstream_updated
= false;
692 rp_info
= pim_rp_find_prefix_list(pim
, rp_addr
, plist
);
694 rp_info
= pim_rp_find_exact(pim
, rp_addr
, &group
);
697 return PIM_RP_NOT_FOUND
;
699 if (rp_info
->plist
) {
700 XFREE(MTYPE_PIM_FILTER_NAME
, rp_info
->plist
);
704 if (PIM_DEBUG_PIM_TRACE
)
705 zlog_debug("%s: Delete RP %pPA for the group %pFX", __func__
,
708 /* While static RP is getting deleted, we need to check if dynamic RP
709 * present for the same group in BSM RP table, then install the dynamic
710 * RP for the group node into the main rp table
712 if (rp_src_flag
== RP_SRC_STATIC
) {
713 bsgrp
= pim_bsm_get_bsgrp_node(&pim
->global_scope
, &group
);
716 bsrp
= bsm_rpinfos_first(bsgrp
->bsrp_list
);
718 if (PIM_DEBUG_PIM_TRACE
)
720 "%s: BSM RP %pPA found for the group %pFX",
721 __func__
, &bsrp
->rp_address
,
723 return pim_rp_change(pim
, bsrp
->rp_address
,
727 if (PIM_DEBUG_PIM_TRACE
)
729 "%s: BSM RP not found for the group %pFX",
734 /* Deregister addr with Zebra NHT */
735 nht_p
= rp_info
->rp
.rpf_addr
;
736 if (PIM_DEBUG_PIM_NHT_RP
)
737 zlog_debug("%s: Deregister RP addr %pPA with Zebra ", __func__
,
739 pim_delete_tracked_nexthop(pim
, nht_p
, NULL
, rp_info
);
741 if (!pim_get_all_mcast_group(&g_all
))
742 return PIM_RP_BAD_ADDRESS
;
744 rp_all
= pim_rp_find_match_group(pim
, &g_all
);
746 if (rp_all
== rp_info
) {
747 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
748 /* Find the upstream (*, G) whose upstream address is
749 * same as the deleted RP
753 rpf_addr
= rp_info
->rp
.rpf_addr
;
754 if (!pim_addr_cmp(up
->upstream_addr
, rpf_addr
) &&
755 pim_addr_is_any(up
->sg
.src
)) {
758 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
759 trp_info
= pim_rp_find_match_group(pim
, &grp
);
760 if (trp_info
== rp_all
) {
761 pim_upstream_rpf_clear(pim
, up
);
762 up
->upstream_addr
= PIMADDR_ANY
;
766 rp_all
->rp
.rpf_addr
= PIMADDR_ANY
;
771 listnode_delete(pim
->rp_list
, rp_info
);
774 rn
= route_node_get(pim
->rp_table
, &rp_info
->group
);
776 if (rn
->info
!= rp_info
)
779 "Expected rn->info to be equal to rp_info");
781 if (PIM_DEBUG_PIM_TRACE
)
783 "%s:Found for Freeing: %p for rp_info: %p(%pFX) Lock: %d",
784 __func__
, rn
, rp_info
, &rp_info
->group
,
785 route_node_get_lock_count(rn
));
788 route_unlock_node(rn
);
789 route_unlock_node(rn
);
793 pim_rp_refresh_group_to_rp_mapping(pim
);
795 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
796 /* Find the upstream (*, G) whose upstream address is same as
801 rpf_addr
= rp_info
->rp
.rpf_addr
;
802 if (!pim_addr_cmp(up
->upstream_addr
, rpf_addr
) &&
803 pim_addr_is_any(up
->sg
.src
)) {
806 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
807 trp_info
= pim_rp_find_match_group(pim
, &grp
);
809 /* RP not found for the group grp */
810 if (pim_rpf_addr_is_inaddr_any(&trp_info
->rp
)) {
811 pim_upstream_rpf_clear(pim
, up
);
812 pim_rp_set_upstream_addr(
813 pim
, &up
->upstream_addr
, up
->sg
.src
,
817 /* RP found for the group grp */
819 pim_upstream_update(pim
, up
);
820 upstream_updated
= true;
825 if (upstream_updated
)
826 pim_zebra_update_all_interfaces(pim
);
828 XFREE(MTYPE_PIM_RP
, rp_info
);
832 int pim_rp_change(struct pim_instance
*pim
, pim_addr new_rp_addr
,
833 struct prefix group
, enum rp_source rp_src_flag
)
836 struct route_node
*rn
;
838 struct rp_info
*rp_info
= NULL
;
839 struct pim_upstream
*up
;
840 bool upstream_updated
= false;
841 pim_addr old_rp_addr
;
843 rn
= route_node_lookup(pim
->rp_table
, &group
);
845 result
= pim_rp_new(pim
, new_rp_addr
, group
, NULL
, rp_src_flag
);
852 route_unlock_node(rn
);
853 result
= pim_rp_new(pim
, new_rp_addr
, group
, NULL
, rp_src_flag
);
857 old_rp_addr
= rp_info
->rp
.rpf_addr
;
858 if (!pim_addr_cmp(new_rp_addr
, old_rp_addr
)) {
859 if (rp_info
->rp_src
!= rp_src_flag
) {
860 rp_info
->rp_src
= rp_src_flag
;
861 route_unlock_node(rn
);
866 /* Deregister old RP addr with Zebra NHT */
868 if (!pim_addr_is_any(old_rp_addr
)) {
869 nht_p
= rp_info
->rp
.rpf_addr
;
870 if (PIM_DEBUG_PIM_NHT_RP
)
871 zlog_debug("%s: Deregister RP addr %pPA with Zebra ",
873 pim_delete_tracked_nexthop(pim
, nht_p
, NULL
, rp_info
);
876 pim_rp_nexthop_del(rp_info
);
877 listnode_delete(pim
->rp_list
, rp_info
);
878 /* Update the new RP address*/
880 rp_info
->rp
.rpf_addr
= new_rp_addr
;
881 rp_info
->rp_src
= rp_src_flag
;
882 rp_info
->i_am_rp
= 0;
884 listnode_add_sort(pim
->rp_list
, rp_info
);
886 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
887 if (pim_addr_is_any(up
->sg
.src
)) {
889 struct rp_info
*trp_info
;
891 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
892 trp_info
= pim_rp_find_match_group(pim
, &grp
);
894 if (trp_info
== rp_info
) {
895 pim_upstream_update(pim
, up
);
896 upstream_updated
= true;
901 if (upstream_updated
)
902 pim_zebra_update_all_interfaces(pim
);
904 /* Register new RP addr with Zebra NHT */
905 nht_p
= rp_info
->rp
.rpf_addr
;
906 if (PIM_DEBUG_PIM_NHT_RP
)
907 zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
908 __func__
, &nht_p
, &rp_info
->group
);
910 pim_find_or_track_nexthop(pim
, nht_p
, NULL
, rp_info
, NULL
);
911 if (!pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
, nht_p
,
912 &rp_info
->group
, 1)) {
913 route_unlock_node(rn
);
914 return PIM_RP_NO_PATH
;
917 pim_rp_check_interfaces(pim
, rp_info
);
919 route_unlock_node(rn
);
921 pim_rp_refresh_group_to_rp_mapping(pim
);
926 void pim_rp_setup(struct pim_instance
*pim
)
928 struct listnode
*node
;
929 struct rp_info
*rp_info
;
932 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
933 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
936 nht_p
= rp_info
->rp
.rpf_addr
;
938 pim_find_or_track_nexthop(pim
, nht_p
, NULL
, rp_info
, NULL
);
939 if (!pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
,
940 nht_p
, &rp_info
->group
, 1)) {
941 if (PIM_DEBUG_PIM_NHT_RP
)
943 "Unable to lookup nexthop for rp specified");
944 pim_rp_nexthop_del(rp_info
);
950 * Checks to see if we should elect ourself the actual RP when new if
951 * addresses are added against an interface.
953 void pim_rp_check_on_if_add(struct pim_interface
*pim_ifp
)
955 struct listnode
*node
;
956 struct rp_info
*rp_info
;
957 bool i_am_rp_changed
= false;
958 struct pim_instance
*pim
= pim_ifp
->pim
;
960 if (pim
->rp_list
== NULL
)
963 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
964 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
967 /* if i_am_rp is already set nothing to be done (adding new
969 * is not going to make a difference). */
970 if (rp_info
->i_am_rp
) {
974 if (pim_rp_check_interface_addrs(rp_info
, pim_ifp
)) {
975 i_am_rp_changed
= true;
976 rp_info
->i_am_rp
= 1;
977 if (PIM_DEBUG_PIM_NHT_RP
)
978 zlog_debug("%s: %pPA: i am rp", __func__
,
979 &rp_info
->rp
.rpf_addr
);
983 if (i_am_rp_changed
) {
984 pim_msdp_i_am_rp_changed(pim
);
985 pim_upstream_reeval_use_rpt(pim
);
989 /* up-optimized re-evaluation of "i_am_rp". this is used when ifaddresses
990 * are removed. Removing numbers is an uncommon event in an active network
991 * so I have made no attempt to optimize it. */
992 void pim_i_am_rp_re_evaluate(struct pim_instance
*pim
)
994 struct listnode
*node
;
995 struct rp_info
*rp_info
;
996 bool i_am_rp_changed
= false;
999 if (pim
->rp_list
== NULL
)
1002 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1003 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1006 old_i_am_rp
= rp_info
->i_am_rp
;
1007 pim_rp_check_interfaces(pim
, rp_info
);
1009 if (old_i_am_rp
!= rp_info
->i_am_rp
) {
1010 i_am_rp_changed
= true;
1011 if (PIM_DEBUG_PIM_NHT_RP
) {
1012 if (rp_info
->i_am_rp
)
1013 zlog_debug("%s: %pPA: i am rp",
1015 &rp_info
->rp
.rpf_addr
);
1018 "%s: %pPA: i am no longer rp",
1020 &rp_info
->rp
.rpf_addr
);
1025 if (i_am_rp_changed
) {
1026 pim_msdp_i_am_rp_changed(pim
);
1027 pim_upstream_reeval_use_rpt(pim
);
1032 * I_am_RP(G) is true if the group-to-RP mapping indicates that
1033 * this router is the RP for the group.
1035 * Since we only have static RP, all groups are part of this RP
1037 int pim_rp_i_am_rp(struct pim_instance
*pim
, pim_addr group
)
1040 struct rp_info
*rp_info
;
1042 memset(&g
, 0, sizeof(g
));
1043 pim_addr_to_prefix(&g
, group
);
1044 rp_info
= pim_rp_find_match_group(pim
, &g
);
1047 return rp_info
->i_am_rp
;
1054 * Return the RP that the Group belongs too.
1056 struct pim_rpf
*pim_rp_g(struct pim_instance
*pim
, pim_addr group
)
1059 struct rp_info
*rp_info
;
1061 memset(&g
, 0, sizeof(g
));
1062 pim_addr_to_prefix(&g
, group
);
1064 rp_info
= pim_rp_find_match_group(pim
, &g
);
1069 /* Register addr with Zebra NHT */
1070 nht_p
= rp_info
->rp
.rpf_addr
;
1071 if (PIM_DEBUG_PIM_NHT_RP
)
1073 "%s: NHT Register RP addr %pPA grp %pFX with Zebra",
1074 __func__
, &nht_p
, &rp_info
->group
);
1075 pim_find_or_track_nexthop(pim
, nht_p
, NULL
, rp_info
, NULL
);
1076 pim_rpf_set_refresh_time(pim
);
1077 (void)pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
,
1078 nht_p
, &rp_info
->group
, 1);
1079 return (&rp_info
->rp
);
1087 * Set the upstream IP address we want to talk to based upon
1088 * the rp configured and the source address
1090 * If we have don't have a RP configured and the source address is *
1091 * then set the upstream addr as INADDR_ANY and return failure.
1094 int pim_rp_set_upstream_addr(struct pim_instance
*pim
, pim_addr
*up
,
1095 pim_addr source
, pim_addr group
)
1097 struct rp_info
*rp_info
;
1100 memset(&g
, 0, sizeof(g
));
1102 pim_addr_to_prefix(&g
, group
);
1104 rp_info
= pim_rp_find_match_group(pim
, &g
);
1106 if (!rp_info
|| ((pim_rpf_addr_is_inaddr_any(&rp_info
->rp
)) &&
1107 (pim_addr_is_any(source
)))) {
1108 if (PIM_DEBUG_PIM_NHT_RP
)
1109 zlog_debug("%s: Received a (*,G) with no RP configured",
1115 if (pim_addr_is_any(source
))
1116 *up
= rp_info
->rp
.rpf_addr
;
1123 int pim_rp_config_write(struct pim_instance
*pim
, struct vty
*vty
,
1126 struct listnode
*node
;
1127 struct rp_info
*rp_info
;
1131 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1132 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1135 if (rp_info
->rp_src
== RP_SRC_BSR
)
1138 rp_addr
= rp_info
->rp
.rpf_addr
;
1142 " pim rp %pPA prefix-list %s\n",
1143 spaces
, &rp_addr
, rp_info
->plist
);
1145 vty_out(vty
, "%s" PIM_AF_NAME
" pim rp %pPA %pFX\n",
1146 spaces
, &rp_addr
, &rp_info
->group
);
1153 void pim_rp_show_information(struct pim_instance
*pim
, struct prefix
*range
,
1154 struct vty
*vty
, json_object
*json
)
1156 struct rp_info
*rp_info
;
1157 struct rp_info
*prev_rp_info
= NULL
;
1158 struct listnode
*node
;
1159 struct ttable
*tt
= NULL
;
1162 char grp
[INET6_ADDRSTRLEN
];
1164 json_object
*json_rp_rows
= NULL
;
1165 json_object
*json_row
= NULL
;
1168 /* Prepare table. */
1169 tt
= ttable_new(&ttable_styles
[TTSTYLE_BLANK
]);
1172 "RP address|group/prefix-list|OIF|I am RP|Source|Group-Type");
1173 tt
->style
.cell
.rpad
= 2;
1174 tt
->style
.corner
= '+';
1178 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1179 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1183 pim_addr group
= rp_info
->group
.u
.prefix4
;
1185 pim_addr group
= rp_info
->group
.u
.prefix6
;
1187 const char *group_type
=
1188 pim_is_grp_ssm(pim
, group
) ? "SSM" : "ASM";
1190 if (range
&& !prefix_match(&rp_info
->group
, range
))
1193 if (rp_info
->rp_src
== RP_SRC_STATIC
)
1194 strlcpy(source
, "Static", sizeof(source
));
1195 else if (rp_info
->rp_src
== RP_SRC_BSR
)
1196 strlcpy(source
, "BSR", sizeof(source
));
1198 strlcpy(source
, "None", sizeof(source
));
1201 * If we have moved on to a new RP then add the
1202 * entry for the previous RP
1205 (pim_addr_cmp(prev_rp_info
->rp
.rpf_addr
,
1206 rp_info
->rp
.rpf_addr
))) {
1207 json_object_object_addf(
1208 json
, json_rp_rows
, "%pPA",
1209 &prev_rp_info
->rp
.rpf_addr
);
1210 json_rp_rows
= NULL
;
1214 json_rp_rows
= json_object_new_array();
1216 json_row
= json_object_new_object();
1217 json_object_string_addf(json_row
, "rpAddress", "%pPA",
1218 &rp_info
->rp
.rpf_addr
);
1219 if (rp_info
->rp
.source_nexthop
.interface
)
1220 json_object_string_add(
1221 json_row
, "outboundInterface",
1222 rp_info
->rp
.source_nexthop
1225 json_object_string_add(json_row
,
1226 "outboundInterface",
1228 if (rp_info
->i_am_rp
)
1229 json_object_boolean_true_add(json_row
, "iAmRP");
1231 json_object_boolean_false_add(json_row
,
1235 json_object_string_add(json_row
, "prefixList",
1238 json_object_string_addf(json_row
, "group",
1241 json_object_string_add(json_row
, "source", source
);
1242 json_object_string_add(json_row
, "groupType",
1245 json_object_array_add(json_rp_rows
, json_row
);
1247 prefix2str(&rp_info
->group
, grp
, sizeof(grp
));
1248 ttable_add_row(tt
, "%pPA|%s|%s|%s|%s|%s",
1249 &rp_info
->rp
.rpf_addr
,
1253 rp_info
->rp
.source_nexthop
.interface
1254 ? rp_info
->rp
.source_nexthop
1260 source
, group_type
);
1262 prev_rp_info
= rp_info
;
1265 /* Dump the generated table. */
1267 table
= ttable_dump(tt
, "\n");
1268 vty_out(vty
, "%s\n", table
);
1269 XFREE(MTYPE_TMP
, table
);
1272 if (prev_rp_info
&& json_rp_rows
)
1273 json_object_object_addf(json
, json_rp_rows
, "%pPA",
1274 &prev_rp_info
->rp
.rpf_addr
);
1278 void pim_resolve_rp_nh(struct pim_instance
*pim
, struct pim_neighbor
*nbr
)
1280 struct listnode
*node
= NULL
;
1281 struct rp_info
*rp_info
= NULL
;
1282 struct nexthop
*nh_node
= NULL
;
1284 struct pim_nexthop_cache pnc
;
1286 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1287 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1290 nht_p
= rp_info
->rp
.rpf_addr
;
1291 memset(&pnc
, 0, sizeof(struct pim_nexthop_cache
));
1292 if (!pim_find_or_track_nexthop(pim
, nht_p
, NULL
, rp_info
, &pnc
))
1295 for (nh_node
= pnc
.nexthop
; nh_node
; nh_node
= nh_node
->next
) {
1297 if (!pim_addr_is_any(nh_node
->gate
.ipv4
))
1300 if (!pim_addr_is_any(nh_node
->gate
.ipv6
))
1304 struct interface
*ifp1
= if_lookup_by_index(
1305 nh_node
->ifindex
, pim
->vrf
->vrf_id
);
1307 if (nbr
->interface
!= ifp1
)
1311 nh_node
->gate
.ipv4
= nbr
->source_addr
;
1313 nh_node
->gate
.ipv6
= nbr
->source_addr
;
1315 if (PIM_DEBUG_PIM_NHT_RP
)
1317 "%s: addr %pPA new nexthop addr %pPAs interface %s",
1318 __func__
, &nht_p
, &nbr
->source_addr
,