3 * Copyright (C) 2015 Cumulus Networks, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
34 #include "lib_errors.h"
39 #include "pim_iface.h"
43 #include "pim_memory.h"
44 #include "pim_neighbor.h"
47 #include "pim_mroute.h"
49 #include "pim_zebra.h"
54 /* Cleanup pim->rpf_hash each node data */
55 void pim_rp_list_hash_clean(void *data
)
57 struct pim_nexthop_cache
*pnc
= (struct pim_nexthop_cache
*)data
;
59 list_delete(&pnc
->rp_list
);
61 hash_clean(pnc
->upstream_hash
, NULL
);
62 hash_free(pnc
->upstream_hash
);
63 pnc
->upstream_hash
= NULL
;
65 nexthops_free(pnc
->nexthop
);
67 XFREE(MTYPE_PIM_NEXTHOP_CACHE
, pnc
);
70 static void pim_rp_info_free(struct rp_info
*rp_info
)
72 XFREE(MTYPE_PIM_FILTER_NAME
, rp_info
->plist
);
74 XFREE(MTYPE_PIM_RP
, rp_info
);
77 int pim_rp_list_cmp(void *v1
, void *v2
)
79 struct rp_info
*rp1
= (struct rp_info
*)v1
;
80 struct rp_info
*rp2
= (struct rp_info
*)v2
;
84 * Sort by RP IP address
86 ret
= prefix_cmp(&rp1
->rp
.rpf_addr
, &rp2
->rp
.rpf_addr
);
91 * Sort by group IP address
93 ret
= prefix_cmp(&rp1
->group
, &rp2
->group
);
100 void pim_rp_init(struct pim_instance
*pim
)
102 struct rp_info
*rp_info
;
103 struct route_node
*rn
;
105 pim
->rp_list
= list_new();
106 pim
->rp_list
->del
= (void (*)(void *))pim_rp_info_free
;
107 pim
->rp_list
->cmp
= pim_rp_list_cmp
;
109 pim
->rp_table
= route_table_init();
111 rp_info
= XCALLOC(MTYPE_PIM_RP
, sizeof(*rp_info
));
113 if (!pim_get_all_mcast_group(&rp_info
->group
)) {
114 flog_err(EC_LIB_DEVELOPMENT
,
115 "Unable to convert all-multicast prefix");
116 list_delete(&pim
->rp_list
);
117 route_table_finish(pim
->rp_table
);
118 XFREE(MTYPE_PIM_RP
, rp_info
);
121 pim_addr_to_prefix(&rp_info
->rp
.rpf_addr
, PIMADDR_ANY
);
123 listnode_add(pim
->rp_list
, rp_info
);
125 rn
= route_node_get(pim
->rp_table
, &rp_info
->group
);
127 if (PIM_DEBUG_PIM_TRACE
)
128 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn
,
129 rp_info
, &rp_info
->group
,
130 route_node_get_lock_count(rn
));
133 void pim_rp_free(struct pim_instance
*pim
)
136 route_table_finish(pim
->rp_table
);
137 pim
->rp_table
= NULL
;
140 list_delete(&pim
->rp_list
);
144 * Given an RP's prefix-list, return the RP's rp_info for that prefix-list
146 static struct rp_info
*pim_rp_find_prefix_list(struct pim_instance
*pim
,
147 pim_addr rp
, const char *plist
)
149 struct listnode
*node
;
150 struct rp_info
*rp_info
;
151 struct prefix rp_prefix
;
153 pim_addr_to_prefix(&rp_prefix
, rp
);
155 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
156 if (prefix_same(&rp_prefix
, &rp_info
->rp
.rpf_addr
) &&
157 rp_info
->plist
&& strcmp(rp_info
->plist
, plist
) == 0) {
166 * Return true if plist is used by any rp_info
168 static int pim_rp_prefix_list_used(struct pim_instance
*pim
, const char *plist
)
170 struct listnode
*node
;
171 struct rp_info
*rp_info
;
173 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
174 if (rp_info
->plist
&& strcmp(rp_info
->plist
, plist
) == 0) {
183 * Given an RP's address, return the RP's rp_info that is an exact match for
186 static struct rp_info
*pim_rp_find_exact(struct pim_instance
*pim
, pim_addr rp
,
187 const struct prefix
*group
)
189 struct listnode
*node
;
190 struct rp_info
*rp_info
;
191 struct prefix rp_prefix
;
193 pim_addr_to_prefix(&rp_prefix
, rp
);
194 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
195 if (prefix_same(&rp_prefix
, &rp_info
->rp
.rpf_addr
) &&
196 prefix_same(&rp_info
->group
, group
))
204 * XXX: long-term issue: we don't actually have a good "ip address-list"
205 * implementation. ("access-list XYZ" is the closest but honestly it's
208 * So it's using a prefix-list to match an address here, which causes very
209 * unexpected results for the user since prefix-lists by default only match
210 * when the prefix length is an exact match too. i.e. you'd have to add the
211 * "le 32" and do "ip prefix-list foo permit 10.0.0.0/24 le 32"
213 * To avoid this pitfall, this code uses "address_mode = true" for the prefix
214 * list match (this is the only user for that.)
216 * In the long run, we need to add a "ip address-list", but that's a wholly
217 * separate bag of worms, and existing configs using ip prefix-list would
218 * drop into the UX pitfall.
221 #include "lib/plist_int.h"
224 * Given a group, return the rp_info for that group
226 struct rp_info
*pim_rp_find_match_group(struct pim_instance
*pim
,
227 const struct prefix
*group
)
229 struct listnode
*node
;
230 struct rp_info
*best
= NULL
;
231 struct rp_info
*rp_info
;
232 struct prefix_list
*plist
;
233 const struct prefix
*bp
;
234 const struct prefix_list_entry
*entry
;
235 struct route_node
*rn
;
238 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
239 if (rp_info
->plist
) {
240 plist
= prefix_list_lookup(PIM_AFI
, rp_info
->plist
);
242 if (prefix_list_apply_ext(plist
, &entry
, group
, true)
243 == PREFIX_DENY
|| !entry
)
252 if (bp
&& bp
->prefixlen
< entry
->prefix
.prefixlen
) {
259 rn
= route_node_match(pim
->rp_table
, group
);
263 "%s: BUG We should have found default group information",
269 if (PIM_DEBUG_PIM_TRACE
)
270 zlog_debug("Lookedup: %p for rp_info: %p(%pFX) Lock: %d", rn
,
271 rp_info
, &rp_info
->group
,
272 route_node_get_lock_count(rn
));
274 route_unlock_node(rn
);
279 if (rp_info
->group
.prefixlen
< best
->group
.prefixlen
)
286 * When the user makes "ip pim rp" configuration changes or if they change the
287 * prefix-list(s) used by these statements we must tickle the upstream state
288 * for each group to make them re-lookup who their RP should be.
290 * This is a placeholder function for now.
292 void pim_rp_refresh_group_to_rp_mapping(struct pim_instance
*pim
)
294 pim_msdp_i_am_rp_changed(pim
);
295 pim_upstream_reeval_use_rpt(pim
);
298 void pim_rp_prefix_list_update(struct pim_instance
*pim
,
299 struct prefix_list
*plist
)
301 struct listnode
*node
;
302 struct rp_info
*rp_info
;
303 int refresh_needed
= 0;
305 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
307 && strcmp(rp_info
->plist
, prefix_list_name(plist
)) == 0) {
314 pim_rp_refresh_group_to_rp_mapping(pim
);
317 static int pim_rp_check_interface_addrs(struct rp_info
*rp_info
,
318 struct pim_interface
*pim_ifp
)
320 struct listnode
*node
;
321 struct pim_secondary_addr
*sec_addr
;
324 rpf_addr
= pim_addr_from_prefix(&rp_info
->rp
.rpf_addr
);
326 if (!pim_addr_cmp(pim_ifp
->primary_address
, rpf_addr
))
329 if (!pim_ifp
->sec_addr_list
) {
333 for (ALL_LIST_ELEMENTS_RO(pim_ifp
->sec_addr_list
, node
, sec_addr
)) {
334 if (prefix_same(&sec_addr
->addr
, &rp_info
->rp
.rpf_addr
)) {
342 static void pim_rp_check_interfaces(struct pim_instance
*pim
,
343 struct rp_info
*rp_info
)
345 struct interface
*ifp
;
347 rp_info
->i_am_rp
= 0;
348 FOR_ALL_INTERFACES (pim
->vrf
, ifp
) {
349 struct pim_interface
*pim_ifp
= ifp
->info
;
354 if (pim_rp_check_interface_addrs(rp_info
, pim_ifp
)) {
355 rp_info
->i_am_rp
= 1;
360 void pim_upstream_update(struct pim_instance
*pim
, struct pim_upstream
*up
)
362 struct pim_rpf old_rpf
;
363 enum pim_rpf_result rpf_result
;
364 pim_addr old_upstream_addr
;
365 pim_addr new_upstream_addr
;
368 old_upstream_addr
= up
->upstream_addr
;
369 pim_rp_set_upstream_addr(pim
, &new_upstream_addr
, up
->sg
.src
,
372 if (PIM_DEBUG_PIM_TRACE
)
373 zlog_debug("%s: pim upstream update for old upstream %pPA",
374 __func__
, &old_upstream_addr
);
376 if (!pim_addr_cmp(old_upstream_addr
, new_upstream_addr
))
379 /* Lets consider a case, where a PIM upstream has a better RP as a
380 * result of a new RP configuration with more precise group range.
381 * This upstream has to be added to the upstream hash of new RP's
382 * NHT(pnc) and has to be removed from old RP's NHT upstream hash
384 if (!pim_addr_is_any(old_upstream_addr
)) {
385 /* Deregister addr with Zebra NHT */
386 pim_addr_to_prefix(&nht_p
, old_upstream_addr
);
387 if (PIM_DEBUG_PIM_TRACE
)
389 "%s: Deregister upstream %s addr %pFX with Zebra NHT",
390 __func__
, up
->sg_str
, &nht_p
);
391 pim_delete_tracked_nexthop(pim
, &nht_p
, up
, NULL
);
394 /* Update the upstream address */
395 up
->upstream_addr
= new_upstream_addr
;
397 old_rpf
.source_nexthop
.interface
= up
->rpf
.source_nexthop
.interface
;
399 rpf_result
= pim_rpf_update(pim
, up
, &old_rpf
, __func__
);
400 if (rpf_result
== PIM_RPF_FAILURE
)
401 pim_mroute_del(up
->channel_oil
, __func__
);
403 /* update kernel multicast forwarding cache (MFC) */
404 if (up
->rpf
.source_nexthop
.interface
&& up
->channel_oil
)
405 pim_upstream_mroute_iif_update(up
->channel_oil
, __func__
);
407 if (rpf_result
== PIM_RPF_CHANGED
||
408 (rpf_result
== PIM_RPF_FAILURE
&&
409 old_rpf
.source_nexthop
.interface
))
410 pim_zebra_upstream_rpf_changed(pim
, up
, &old_rpf
);
414 int pim_rp_new(struct pim_instance
*pim
, pim_addr rp_addr
, struct prefix group
,
415 const char *plist
, enum rp_source rp_src_flag
)
418 struct rp_info
*rp_info
;
419 struct rp_info
*rp_all
;
420 struct prefix group_all
;
421 struct listnode
*node
, *nnode
;
422 struct rp_info
*tmp_rp_info
;
425 struct route_node
*rn
;
426 struct pim_upstream
*up
;
427 bool upstream_updated
= false;
429 if (pim_addr_is_any(rp_addr
))
430 return PIM_RP_BAD_ADDRESS
;
432 rp_info
= XCALLOC(MTYPE_PIM_RP
, sizeof(*rp_info
));
434 pim_addr_to_prefix(&rp_info
->rp
.rpf_addr
, rp_addr
);
435 prefix_copy(&rp_info
->group
, &group
);
436 rp_info
->rp_src
= rp_src_flag
;
440 * Return if the prefix-list is already configured for this RP
442 if (pim_rp_find_prefix_list(pim
, rp_addr
, plist
)) {
443 XFREE(MTYPE_PIM_RP
, rp_info
);
448 * Barf if the prefix-list is already configured for an RP
450 if (pim_rp_prefix_list_used(pim
, plist
)) {
451 XFREE(MTYPE_PIM_RP
, rp_info
);
452 return PIM_RP_PFXLIST_IN_USE
;
456 * Free any existing rp_info entries for this RP
458 for (ALL_LIST_ELEMENTS(pim
->rp_list
, node
, nnode
,
460 if (prefix_same(&rp_info
->rp
.rpf_addr
,
461 &tmp_rp_info
->rp
.rpf_addr
)) {
462 if (tmp_rp_info
->plist
)
463 pim_rp_del_config(pim
, rp_addr
, NULL
,
468 prefix2str(&tmp_rp_info
->group
,
474 rp_info
->plist
= XSTRDUP(MTYPE_PIM_FILTER_NAME
, plist
);
477 if (!pim_get_all_mcast_group(&group_all
)) {
478 XFREE(MTYPE_PIM_RP
, rp_info
);
479 return PIM_GROUP_BAD_ADDRESS
;
481 rp_all
= pim_rp_find_match_group(pim
, &group_all
);
484 * Barf if group is a non-multicast subnet
486 if (!prefix_match(&rp_all
->group
, &rp_info
->group
)) {
487 XFREE(MTYPE_PIM_RP
, rp_info
);
488 return PIM_GROUP_BAD_ADDRESS
;
492 * Remove any prefix-list rp_info entries for this RP
494 for (ALL_LIST_ELEMENTS(pim
->rp_list
, node
, nnode
,
496 if (tmp_rp_info
->plist
&&
497 prefix_same(&rp_info
->rp
.rpf_addr
,
498 &tmp_rp_info
->rp
.rpf_addr
)) {
499 pim_rp_del_config(pim
, rp_addr
, NULL
,
505 * Take over the 224.0.0.0/4 group if the rp is INADDR_ANY
507 if (prefix_same(&rp_all
->group
, &rp_info
->group
) &&
508 pim_rpf_addr_is_inaddr_any(&rp_all
->rp
)) {
509 rp_all
->rp
.rpf_addr
= rp_info
->rp
.rpf_addr
;
510 rp_all
->rp_src
= rp_src_flag
;
511 XFREE(MTYPE_PIM_RP
, rp_info
);
513 /* Register addr with Zebra NHT */
514 nht_p
= rp_all
->rp
.rpf_addr
;
515 if (PIM_DEBUG_PIM_NHT_RP
)
517 "%s: NHT Register rp_all addr %pFX grp %pFX ",
518 __func__
, &nht_p
, &rp_all
->group
);
520 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
521 /* Find (*, G) upstream whose RP is not
524 if (pim_addr_is_any(up
->upstream_addr
) &&
525 pim_addr_is_any(up
->sg
.src
)) {
527 struct rp_info
*trp_info
;
529 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
530 trp_info
= pim_rp_find_match_group(
532 if (trp_info
== rp_all
) {
533 pim_upstream_update(pim
, up
);
534 upstream_updated
= true;
538 if (upstream_updated
)
539 pim_zebra_update_all_interfaces(pim
);
541 pim_rp_check_interfaces(pim
, rp_all
);
542 pim_rp_refresh_group_to_rp_mapping(pim
);
543 pim_find_or_track_nexthop(pim
, &nht_p
, NULL
, rp_all
,
546 if (!pim_ecmp_nexthop_lookup(pim
,
547 &rp_all
->rp
.source_nexthop
,
548 &nht_p
, &rp_all
->group
, 1))
549 return PIM_RP_NO_PATH
;
554 * Return if the group is already configured for this RP
556 tmp_rp_info
= pim_rp_find_exact(pim
, rp_addr
, &rp_info
->group
);
558 if ((tmp_rp_info
->rp_src
!= rp_src_flag
)
559 && (rp_src_flag
== RP_SRC_STATIC
))
560 tmp_rp_info
->rp_src
= rp_src_flag
;
561 XFREE(MTYPE_PIM_RP
, rp_info
);
566 * Barf if this group is already covered by some other RP
568 tmp_rp_info
= pim_rp_find_match_group(pim
, &rp_info
->group
);
571 if (tmp_rp_info
->plist
) {
572 XFREE(MTYPE_PIM_RP
, rp_info
);
573 return PIM_GROUP_PFXLIST_OVERLAP
;
576 * If the only RP that covers this group is an
578 * 224.0.0.0/4 that is fine, ignore that one.
580 * though we must return PIM_GROUP_OVERLAP
582 if (prefix_same(&rp_info
->group
,
583 &tmp_rp_info
->group
)) {
584 if ((rp_src_flag
== RP_SRC_STATIC
)
585 && (tmp_rp_info
->rp_src
587 XFREE(MTYPE_PIM_RP
, rp_info
);
588 return PIM_GROUP_OVERLAP
;
591 result
= pim_rp_change(
595 XFREE(MTYPE_PIM_RP
, rp_info
);
602 listnode_add_sort(pim
->rp_list
, rp_info
);
603 rn
= route_node_get(pim
->rp_table
, &rp_info
->group
);
606 if (PIM_DEBUG_PIM_TRACE
)
607 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn
,
608 rp_info
, &rp_info
->group
,
609 route_node_get_lock_count(rn
));
611 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
612 if (pim_addr_is_any(up
->sg
.src
)) {
614 struct rp_info
*trp_info
;
616 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
617 trp_info
= pim_rp_find_match_group(pim
, &grp
);
619 if (trp_info
== rp_info
) {
620 pim_upstream_update(pim
, up
);
621 upstream_updated
= true;
626 if (upstream_updated
)
627 pim_zebra_update_all_interfaces(pim
);
629 pim_rp_check_interfaces(pim
, rp_info
);
630 pim_rp_refresh_group_to_rp_mapping(pim
);
632 /* Register addr with Zebra NHT */
633 nht_p
= rp_info
->rp
.rpf_addr
;
634 if (PIM_DEBUG_PIM_NHT_RP
)
635 zlog_debug("%s: NHT Register RP addr %pFX grp %pFX with Zebra ",
636 __func__
, &nht_p
, &rp_info
->group
);
637 pim_find_or_track_nexthop(pim
, &nht_p
, NULL
, rp_info
, NULL
);
638 if (!pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
, &nht_p
,
640 return PIM_RP_NO_PATH
;
645 void pim_rp_del_config(struct pim_instance
*pim
, pim_addr rp_addr
,
646 const char *group_range
, const char *plist
)
651 if (group_range
== NULL
)
652 result
= pim_get_all_mcast_group(&group
);
654 result
= str2prefix(group_range
, &group
);
657 if (PIM_DEBUG_PIM_TRACE
)
659 "%s: String to prefix failed for %pPAs group",
664 pim_rp_del(pim
, rp_addr
, group
, plist
, RP_SRC_STATIC
);
667 int pim_rp_del(struct pim_instance
*pim
, pim_addr rp_addr
, struct prefix group
,
668 const char *plist
, enum rp_source rp_src_flag
)
671 struct rp_info
*rp_info
;
672 struct rp_info
*rp_all
;
674 struct route_node
*rn
;
675 bool was_plist
= false;
676 struct rp_info
*trp_info
;
677 struct pim_upstream
*up
;
678 struct bsgrp_node
*bsgrp
= NULL
;
679 struct bsm_rpinfo
*bsrp
= NULL
;
680 bool upstream_updated
= false;
683 rp_info
= pim_rp_find_prefix_list(pim
, rp_addr
, plist
);
685 rp_info
= pim_rp_find_exact(pim
, rp_addr
, &group
);
688 return PIM_RP_NOT_FOUND
;
690 if (rp_info
->plist
) {
691 XFREE(MTYPE_PIM_FILTER_NAME
, rp_info
->plist
);
695 if (PIM_DEBUG_PIM_TRACE
)
696 zlog_debug("%s: Delete RP %pPA for the group %pFX", __func__
,
699 /* While static RP is getting deleted, we need to check if dynamic RP
700 * present for the same group in BSM RP table, then install the dynamic
701 * RP for the group node into the main rp table
703 if (rp_src_flag
== RP_SRC_STATIC
) {
704 bsgrp
= pim_bsm_get_bsgrp_node(&pim
->global_scope
, &group
);
707 bsrp
= bsm_rpinfos_first(bsgrp
->bsrp_list
);
709 if (PIM_DEBUG_PIM_TRACE
)
711 "%s: BSM RP %pPA found for the group %pFX",
712 __func__
, &bsrp
->rp_address
,
714 return pim_rp_change(pim
, bsrp
->rp_address
,
718 if (PIM_DEBUG_PIM_TRACE
)
720 "%s: BSM RP not found for the group %pFX",
725 /* Deregister addr with Zebra NHT */
726 nht_p
= rp_info
->rp
.rpf_addr
;
727 if (PIM_DEBUG_PIM_NHT_RP
)
728 zlog_debug("%s: Deregister RP addr %pFX with Zebra ", __func__
,
730 pim_delete_tracked_nexthop(pim
, &nht_p
, NULL
, rp_info
);
732 if (!pim_get_all_mcast_group(&g_all
))
733 return PIM_RP_BAD_ADDRESS
;
735 rp_all
= pim_rp_find_match_group(pim
, &g_all
);
737 if (rp_all
== rp_info
) {
738 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
739 /* Find the upstream (*, G) whose upstream address is
740 * same as the deleted RP
744 rpf_addr
= pim_addr_from_prefix(&rp_info
->rp
.rpf_addr
);
745 if (!pim_addr_cmp(up
->upstream_addr
, rpf_addr
) &&
746 pim_addr_is_any(up
->sg
.src
)) {
749 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
750 trp_info
= pim_rp_find_match_group(pim
, &grp
);
751 if (trp_info
== rp_all
) {
752 pim_upstream_rpf_clear(pim
, up
);
753 up
->upstream_addr
= PIMADDR_ANY
;
757 pim_addr_to_prefix(&rp_all
->rp
.rpf_addr
, PIMADDR_ANY
);
762 listnode_delete(pim
->rp_list
, rp_info
);
765 rn
= route_node_get(pim
->rp_table
, &rp_info
->group
);
767 if (rn
->info
!= rp_info
)
770 "Expected rn->info to be equal to rp_info");
772 if (PIM_DEBUG_PIM_TRACE
)
774 "%s:Found for Freeing: %p for rp_info: %p(%pFX) Lock: %d",
775 __func__
, rn
, rp_info
, &rp_info
->group
,
776 route_node_get_lock_count(rn
));
779 route_unlock_node(rn
);
780 route_unlock_node(rn
);
784 pim_rp_refresh_group_to_rp_mapping(pim
);
786 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
787 /* Find the upstream (*, G) whose upstream address is same as
792 rpf_addr
= pim_addr_from_prefix(&rp_info
->rp
.rpf_addr
);
793 if (!pim_addr_cmp(up
->upstream_addr
, rpf_addr
) &&
794 pim_addr_is_any(up
->sg
.src
)) {
797 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
798 trp_info
= pim_rp_find_match_group(pim
, &grp
);
800 /* RP not found for the group grp */
801 if (pim_rpf_addr_is_inaddr_any(&trp_info
->rp
)) {
802 pim_upstream_rpf_clear(pim
, up
);
803 pim_rp_set_upstream_addr(
804 pim
, &up
->upstream_addr
, up
->sg
.src
,
808 /* RP found for the group grp */
810 pim_upstream_update(pim
, up
);
811 upstream_updated
= true;
816 if (upstream_updated
)
817 pim_zebra_update_all_interfaces(pim
);
819 XFREE(MTYPE_PIM_RP
, rp_info
);
823 int pim_rp_change(struct pim_instance
*pim
, pim_addr new_rp_addr
,
824 struct prefix group
, enum rp_source rp_src_flag
)
827 struct route_node
*rn
;
829 struct rp_info
*rp_info
= NULL
;
830 struct pim_upstream
*up
;
831 bool upstream_updated
= false;
832 pim_addr old_rp_addr
;
834 rn
= route_node_lookup(pim
->rp_table
, &group
);
836 result
= pim_rp_new(pim
, new_rp_addr
, group
, NULL
, rp_src_flag
);
843 route_unlock_node(rn
);
844 result
= pim_rp_new(pim
, new_rp_addr
, group
, NULL
, rp_src_flag
);
848 old_rp_addr
= pim_addr_from_prefix(&rp_info
->rp
.rpf_addr
);
849 if (!pim_addr_cmp(new_rp_addr
, old_rp_addr
)) {
850 if (rp_info
->rp_src
!= rp_src_flag
) {
851 rp_info
->rp_src
= rp_src_flag
;
852 route_unlock_node(rn
);
857 nht_p
.family
= PIM_AF
;
858 nht_p
.prefixlen
= PIM_MAX_BITLEN
;
860 /* Deregister old RP addr with Zebra NHT */
862 if (!pim_addr_is_any(old_rp_addr
)) {
863 nht_p
= rp_info
->rp
.rpf_addr
;
864 if (PIM_DEBUG_PIM_NHT_RP
)
865 zlog_debug("%s: Deregister RP addr %pFX with Zebra ",
867 pim_delete_tracked_nexthop(pim
, &nht_p
, NULL
, rp_info
);
870 pim_rp_nexthop_del(rp_info
);
871 listnode_delete(pim
->rp_list
, rp_info
);
872 /* Update the new RP address*/
874 pim_addr_to_prefix(&rp_info
->rp
.rpf_addr
, new_rp_addr
);
875 rp_info
->rp_src
= rp_src_flag
;
876 rp_info
->i_am_rp
= 0;
878 listnode_add_sort(pim
->rp_list
, rp_info
);
880 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
881 if (pim_addr_is_any(up
->sg
.src
)) {
883 struct rp_info
*trp_info
;
885 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
886 trp_info
= pim_rp_find_match_group(pim
, &grp
);
888 if (trp_info
== rp_info
) {
889 pim_upstream_update(pim
, up
);
890 upstream_updated
= true;
895 if (upstream_updated
)
896 pim_zebra_update_all_interfaces(pim
);
898 /* Register new RP addr with Zebra NHT */
899 nht_p
= rp_info
->rp
.rpf_addr
;
900 if (PIM_DEBUG_PIM_NHT_RP
)
901 zlog_debug("%s: NHT Register RP addr %pFX grp %pFX with Zebra ",
902 __func__
, &nht_p
, &rp_info
->group
);
904 pim_find_or_track_nexthop(pim
, &nht_p
, NULL
, rp_info
, NULL
);
905 if (!pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
, &nht_p
,
906 &rp_info
->group
, 1)) {
907 route_unlock_node(rn
);
908 return PIM_RP_NO_PATH
;
911 pim_rp_check_interfaces(pim
, rp_info
);
913 route_unlock_node(rn
);
915 pim_rp_refresh_group_to_rp_mapping(pim
);
920 void pim_rp_setup(struct pim_instance
*pim
)
922 struct listnode
*node
;
923 struct rp_info
*rp_info
;
926 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
927 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
930 nht_p
= rp_info
->rp
.rpf_addr
;
932 pim_find_or_track_nexthop(pim
, &nht_p
, NULL
, rp_info
, NULL
);
933 if (!pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
,
934 &nht_p
, &rp_info
->group
, 1))
935 if (PIM_DEBUG_PIM_NHT_RP
)
937 "Unable to lookup nexthop for rp specified");
942 * Checks to see if we should elect ourself the actual RP when new if
943 * addresses are added against an interface.
945 void pim_rp_check_on_if_add(struct pim_interface
*pim_ifp
)
947 struct listnode
*node
;
948 struct rp_info
*rp_info
;
949 bool i_am_rp_changed
= false;
950 struct pim_instance
*pim
= pim_ifp
->pim
;
952 if (pim
->rp_list
== NULL
)
955 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
956 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
959 /* if i_am_rp is already set nothing to be done (adding new
961 * is not going to make a difference). */
962 if (rp_info
->i_am_rp
) {
966 if (pim_rp_check_interface_addrs(rp_info
, pim_ifp
)) {
967 i_am_rp_changed
= true;
968 rp_info
->i_am_rp
= 1;
969 if (PIM_DEBUG_PIM_NHT_RP
) {
970 char rp
[PREFIX_STRLEN
];
971 pim_addr_dump("<rp?>", &rp_info
->rp
.rpf_addr
,
973 zlog_debug("%s: %s: i am rp", __func__
, rp
);
978 if (i_am_rp_changed
) {
979 pim_msdp_i_am_rp_changed(pim
);
980 pim_upstream_reeval_use_rpt(pim
);
984 /* up-optimized re-evaluation of "i_am_rp". this is used when ifaddresses
985 * are removed. Removing numbers is an uncommon event in an active network
986 * so I have made no attempt to optimize it. */
987 void pim_i_am_rp_re_evaluate(struct pim_instance
*pim
)
989 struct listnode
*node
;
990 struct rp_info
*rp_info
;
991 bool i_am_rp_changed
= false;
994 if (pim
->rp_list
== NULL
)
997 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
998 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1001 old_i_am_rp
= rp_info
->i_am_rp
;
1002 pim_rp_check_interfaces(pim
, rp_info
);
1004 if (old_i_am_rp
!= rp_info
->i_am_rp
) {
1005 i_am_rp_changed
= true;
1006 if (PIM_DEBUG_PIM_NHT_RP
) {
1007 char rp
[PREFIX_STRLEN
];
1008 pim_addr_dump("<rp?>", &rp_info
->rp
.rpf_addr
,
1010 if (rp_info
->i_am_rp
) {
1011 zlog_debug("%s: %s: i am rp", __func__
,
1014 zlog_debug("%s: %s: i am no longer rp",
1021 if (i_am_rp_changed
) {
1022 pim_msdp_i_am_rp_changed(pim
);
1023 pim_upstream_reeval_use_rpt(pim
);
1028 * I_am_RP(G) is true if the group-to-RP mapping indicates that
1029 * this router is the RP for the group.
1031 * Since we only have static RP, all groups are part of this RP
1033 int pim_rp_i_am_rp(struct pim_instance
*pim
, pim_addr group
)
1036 struct rp_info
*rp_info
;
1038 memset(&g
, 0, sizeof(g
));
1039 pim_addr_to_prefix(&g
, group
);
1040 rp_info
= pim_rp_find_match_group(pim
, &g
);
1043 return rp_info
->i_am_rp
;
1050 * Return the RP that the Group belongs too.
1052 struct pim_rpf
*pim_rp_g(struct pim_instance
*pim
, pim_addr group
)
1055 struct rp_info
*rp_info
;
1057 memset(&g
, 0, sizeof(g
));
1058 pim_addr_to_prefix(&g
, group
);
1060 rp_info
= pim_rp_find_match_group(pim
, &g
);
1063 struct prefix nht_p
;
1065 /* Register addr with Zebra NHT */
1066 nht_p
= rp_info
->rp
.rpf_addr
;
1067 if (PIM_DEBUG_PIM_NHT_RP
)
1069 "%s: NHT Register RP addr %pFX grp %pFX with Zebra",
1070 __func__
, &nht_p
, &rp_info
->group
);
1071 pim_find_or_track_nexthop(pim
, &nht_p
, NULL
, rp_info
, NULL
);
1072 pim_rpf_set_refresh_time(pim
);
1073 (void)pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
,
1074 &nht_p
, &rp_info
->group
, 1);
1075 return (&rp_info
->rp
);
1083 * Set the upstream IP address we want to talk to based upon
1084 * the rp configured and the source address
1086 * If we have don't have a RP configured and the source address is *
1087 * then set the upstream addr as INADDR_ANY and return failure.
1090 int pim_rp_set_upstream_addr(struct pim_instance
*pim
, pim_addr
*up
,
1091 pim_addr source
, pim_addr group
)
1093 struct rp_info
*rp_info
;
1096 memset(&g
, 0, sizeof(g
));
1098 pim_addr_to_prefix(&g
, group
);
1100 rp_info
= pim_rp_find_match_group(pim
, &g
);
1102 if (!rp_info
|| ((pim_rpf_addr_is_inaddr_any(&rp_info
->rp
)) &&
1103 (pim_addr_is_any(source
)))) {
1104 if (PIM_DEBUG_PIM_NHT_RP
)
1105 zlog_debug("%s: Received a (*,G) with no RP configured",
1111 if (pim_addr_is_any(source
))
1112 *up
= pim_addr_from_prefix(&rp_info
->rp
.rpf_addr
);
1119 int pim_rp_config_write(struct pim_instance
*pim
, struct vty
*vty
,
1122 struct listnode
*node
;
1123 struct rp_info
*rp_info
;
1127 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1128 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1131 if (rp_info
->rp_src
== RP_SRC_BSR
)
1134 rp_addr
= pim_addr_from_prefix(&rp_info
->rp
.rpf_addr
);
1138 " pim rp %pPA prefix-list %s\n",
1139 spaces
, &rp_addr
, rp_info
->plist
);
1141 vty_out(vty
, "%s" PIM_AF_NAME
" pim rp %pPA %pFX\n",
1142 spaces
, &rp_addr
, &rp_info
->group
);
1149 void pim_rp_show_information(struct pim_instance
*pim
, struct prefix
*range
,
1150 struct vty
*vty
, bool uj
)
1152 struct rp_info
*rp_info
;
1153 struct rp_info
*prev_rp_info
= NULL
;
1154 struct listnode
*node
;
1157 json_object
*json
= NULL
;
1158 json_object
*json_rp_rows
= NULL
;
1159 json_object
*json_row
= NULL
;
1162 json
= json_object_new_object();
1165 "RP address group/prefix-list OIF I am RP Source Group-Type\n");
1166 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1167 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1171 pim_addr group
= rp_info
->group
.u
.prefix4
;
1173 pim_addr group
= rp_info
->group
.u
.prefix6
;
1175 const char *group_type
=
1176 pim_is_grp_ssm(pim
, group
) ? "SSM" : "ASM";
1178 if (range
&& !prefix_same(&rp_info
->group
, range
))
1181 if (rp_info
->rp_src
== RP_SRC_STATIC
)
1182 strlcpy(source
, "Static", sizeof(source
));
1183 else if (rp_info
->rp_src
== RP_SRC_BSR
)
1184 strlcpy(source
, "BSR", sizeof(source
));
1186 strlcpy(source
, "None", sizeof(source
));
1189 * If we have moved on to a new RP then add the
1190 * entry for the previous RP
1193 prefix_cmp(&prev_rp_info
->rp
.rpf_addr
,
1194 &rp_info
->rp
.rpf_addr
)) {
1195 json_object_object_addf(
1196 json
, json_rp_rows
, "%pFXh",
1197 &prev_rp_info
->rp
.rpf_addr
);
1198 json_rp_rows
= NULL
;
1202 json_rp_rows
= json_object_new_array();
1204 json_row
= json_object_new_object();
1205 json_object_string_addf(json_row
, "rpAddress", "%pFXh",
1206 &rp_info
->rp
.rpf_addr
);
1207 if (rp_info
->rp
.source_nexthop
.interface
)
1208 json_object_string_add(
1209 json_row
, "outboundInterface",
1210 rp_info
->rp
.source_nexthop
1213 json_object_string_add(json_row
,
1214 "outboundInterface",
1216 if (rp_info
->i_am_rp
)
1217 json_object_boolean_true_add(json_row
, "iAmRP");
1219 json_object_boolean_false_add(json_row
,
1223 json_object_string_add(json_row
, "prefixList",
1226 json_object_string_addf(json_row
, "group",
1229 json_object_string_add(json_row
, "source", source
);
1230 json_object_string_add(json_row
, "groupType",
1233 json_object_array_add(json_rp_rows
, json_row
);
1235 vty_out(vty
, "%-15pFXh ", &rp_info
->rp
.rpf_addr
);
1238 vty_out(vty
, "%-18s ", rp_info
->plist
);
1240 vty_out(vty
, "%-18pFX ", &rp_info
->group
);
1242 if (rp_info
->rp
.source_nexthop
.interface
)
1243 vty_out(vty
, "%-16s ",
1244 rp_info
->rp
.source_nexthop
1247 vty_out(vty
, "%-16s ", "(Unknown)");
1249 if (rp_info
->i_am_rp
)
1250 vty_out(vty
, "yes");
1254 vty_out(vty
, "%14s", source
);
1255 vty_out(vty
, "%6s\n", group_type
);
1257 prev_rp_info
= rp_info
;
1261 if (prev_rp_info
&& json_rp_rows
)
1262 json_object_object_addf(json
, json_rp_rows
, "%pFXh",
1263 &prev_rp_info
->rp
.rpf_addr
);
1265 vty_json(vty
, json
);
1269 void pim_resolve_rp_nh(struct pim_instance
*pim
, struct pim_neighbor
*nbr
)
1271 struct listnode
*node
= NULL
;
1272 struct rp_info
*rp_info
= NULL
;
1273 struct nexthop
*nh_node
= NULL
;
1274 struct prefix nht_p
;
1275 struct pim_nexthop_cache pnc
;
1277 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1278 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1281 nht_p
= rp_info
->rp
.rpf_addr
;
1282 memset(&pnc
, 0, sizeof(struct pim_nexthop_cache
));
1283 if (!pim_find_or_track_nexthop(pim
, &nht_p
, NULL
, rp_info
,
1287 for (nh_node
= pnc
.nexthop
; nh_node
; nh_node
= nh_node
->next
) {
1289 if (!pim_addr_is_any(nh_node
->gate
.ipv4
))
1292 if (!pim_addr_is_any(nh_node
->gate
.ipv6
))
1296 struct interface
*ifp1
= if_lookup_by_index(
1297 nh_node
->ifindex
, pim
->vrf
->vrf_id
);
1299 if (nbr
->interface
!= ifp1
)
1303 nh_node
->gate
.ipv4
= nbr
->source_addr
;
1305 nh_node
->gate
.ipv6
= nbr
->source_addr
;
1307 if (PIM_DEBUG_PIM_NHT_RP
)
1309 "%s: addr %pFXh new nexthop addr %pPAs interface %s",
1310 __func__
, &nht_p
, &nbr
->source_addr
,