3 * Copyright (C) 2015 Cumulus Networks, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
34 #include "lib_errors.h"
37 #include "pim_instance.h"
40 #include "pim_iface.h"
44 #include "pim_memory.h"
45 #include "pim_neighbor.h"
48 #include "pim_mroute.h"
50 #include "pim_zebra.h"
55 /* Cleanup pim->rpf_hash each node data */
56 void pim_rp_list_hash_clean(void *data
)
58 struct pim_nexthop_cache
*pnc
= (struct pim_nexthop_cache
*)data
;
60 list_delete(&pnc
->rp_list
);
62 hash_clean(pnc
->upstream_hash
, NULL
);
63 hash_free(pnc
->upstream_hash
);
64 pnc
->upstream_hash
= NULL
;
66 nexthops_free(pnc
->nexthop
);
68 XFREE(MTYPE_PIM_NEXTHOP_CACHE
, pnc
);
71 static void pim_rp_info_free(struct rp_info
*rp_info
)
73 XFREE(MTYPE_PIM_FILTER_NAME
, rp_info
->plist
);
75 XFREE(MTYPE_PIM_RP
, rp_info
);
78 int pim_rp_list_cmp(void *v1
, void *v2
)
80 struct rp_info
*rp1
= (struct rp_info
*)v1
;
81 struct rp_info
*rp2
= (struct rp_info
*)v2
;
85 * Sort by RP IP address
87 ret
= prefix_cmp(&rp1
->rp
.rpf_addr
, &rp2
->rp
.rpf_addr
);
92 * Sort by group IP address
94 ret
= prefix_cmp(&rp1
->group
, &rp2
->group
);
101 void pim_rp_init(struct pim_instance
*pim
)
103 struct rp_info
*rp_info
;
104 struct route_node
*rn
;
106 pim
->rp_list
= list_new();
107 pim
->rp_list
->del
= (void (*)(void *))pim_rp_info_free
;
108 pim
->rp_list
->cmp
= pim_rp_list_cmp
;
110 pim
->rp_table
= route_table_init();
112 rp_info
= XCALLOC(MTYPE_PIM_RP
, sizeof(*rp_info
));
114 if (!pim_get_all_mcast_group(&rp_info
->group
)) {
115 flog_err(EC_LIB_DEVELOPMENT
,
116 "Unable to convert all-multicast prefix");
117 list_delete(&pim
->rp_list
);
118 route_table_finish(pim
->rp_table
);
119 XFREE(MTYPE_PIM_RP
, rp_info
);
122 pim_addr_to_prefix(&rp_info
->rp
.rpf_addr
, PIMADDR_ANY
);
124 listnode_add(pim
->rp_list
, rp_info
);
126 rn
= route_node_get(pim
->rp_table
, &rp_info
->group
);
128 if (PIM_DEBUG_PIM_TRACE
)
129 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn
,
130 rp_info
, &rp_info
->group
,
131 route_node_get_lock_count(rn
));
134 void pim_rp_free(struct pim_instance
*pim
)
137 route_table_finish(pim
->rp_table
);
138 pim
->rp_table
= NULL
;
141 list_delete(&pim
->rp_list
);
145 * Given an RP's prefix-list, return the RP's rp_info for that prefix-list
147 static struct rp_info
*pim_rp_find_prefix_list(struct pim_instance
*pim
,
148 pim_addr rp
, const char *plist
)
150 struct listnode
*node
;
151 struct rp_info
*rp_info
;
152 struct prefix rp_prefix
;
154 pim_addr_to_prefix(&rp_prefix
, rp
);
156 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
157 if (prefix_same(&rp_prefix
, &rp_info
->rp
.rpf_addr
) &&
158 rp_info
->plist
&& strcmp(rp_info
->plist
, plist
) == 0) {
167 * Return true if plist is used by any rp_info
169 static int pim_rp_prefix_list_used(struct pim_instance
*pim
, const char *plist
)
171 struct listnode
*node
;
172 struct rp_info
*rp_info
;
174 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
175 if (rp_info
->plist
&& strcmp(rp_info
->plist
, plist
) == 0) {
184 * Given an RP's address, return the RP's rp_info that is an exact match for
187 static struct rp_info
*pim_rp_find_exact(struct pim_instance
*pim
, pim_addr rp
,
188 const struct prefix
*group
)
190 struct listnode
*node
;
191 struct rp_info
*rp_info
;
192 struct prefix rp_prefix
;
194 pim_addr_to_prefix(&rp_prefix
, rp
);
195 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
196 if (prefix_same(&rp_prefix
, &rp_info
->rp
.rpf_addr
) &&
197 prefix_same(&rp_info
->group
, group
))
205 * XXX: long-term issue: we don't actually have a good "ip address-list"
206 * implementation. ("access-list XYZ" is the closest but honestly it's
209 * So it's using a prefix-list to match an address here, which causes very
210 * unexpected results for the user since prefix-lists by default only match
211 * when the prefix length is an exact match too. i.e. you'd have to add the
212 * "le 32" and do "ip prefix-list foo permit 10.0.0.0/24 le 32"
214 * To avoid this pitfall, this code uses "address_mode = true" for the prefix
215 * list match (this is the only user for that.)
217 * In the long run, we need to add a "ip address-list", but that's a wholly
218 * separate bag of worms, and existing configs using ip prefix-list would
219 * drop into the UX pitfall.
222 #include "lib/plist_int.h"
225 * Given a group, return the rp_info for that group
227 struct rp_info
*pim_rp_find_match_group(struct pim_instance
*pim
,
228 const struct prefix
*group
)
230 struct listnode
*node
;
231 struct rp_info
*best
= NULL
;
232 struct rp_info
*rp_info
;
233 struct prefix_list
*plist
;
234 const struct prefix
*bp
;
235 const struct prefix_list_entry
*entry
;
236 struct route_node
*rn
;
239 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
240 if (rp_info
->plist
) {
241 plist
= prefix_list_lookup(PIM_AFI
, rp_info
->plist
);
243 if (prefix_list_apply_ext(plist
, &entry
, group
, true)
244 == PREFIX_DENY
|| !entry
)
253 if (bp
&& bp
->prefixlen
< entry
->prefix
.prefixlen
) {
260 rn
= route_node_match(pim
->rp_table
, group
);
264 "%s: BUG We should have found default group information",
270 if (PIM_DEBUG_PIM_TRACE
) {
273 "Lookedup(%pFX): prefix_list match %s, rn %p found: %pFX",
274 group
, best
->plist
, rn
, &rp_info
->group
);
276 zlog_debug("Lookedup(%pFX): rn %p found:%pFX", group
,
277 rn
, &rp_info
->group
);
280 route_unlock_node(rn
);
283 * rp's with prefix lists have the group as 224.0.0.0/4 which will
284 * match anything. So if we have a rp_info that should match a prefix
285 * list then if we do match then best should be the answer( even
288 if (!rp_info
|| (rp_info
&& rp_info
->plist
))
292 * So we have a non plist rp_info found in the lookup and no plists
293 * at all to be choosen, return it!
299 * If we have a matching non prefix list and a matching prefix
300 * list we should return the actual rp_info that has the LPM
301 * If they are equal, use the prefix-list( but let's hope
302 * the end-operator doesn't do this )
304 if (rp_info
->group
.prefixlen
> bp
->prefixlen
)
311 * When the user makes "ip pim rp" configuration changes or if they change the
312 * prefix-list(s) used by these statements we must tickle the upstream state
313 * for each group to make them re-lookup who their RP should be.
315 * This is a placeholder function for now.
317 void pim_rp_refresh_group_to_rp_mapping(struct pim_instance
*pim
)
319 pim_msdp_i_am_rp_changed(pim
);
320 pim_upstream_reeval_use_rpt(pim
);
323 void pim_rp_prefix_list_update(struct pim_instance
*pim
,
324 struct prefix_list
*plist
)
326 struct listnode
*node
;
327 struct rp_info
*rp_info
;
328 int refresh_needed
= 0;
330 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
332 && strcmp(rp_info
->plist
, prefix_list_name(plist
)) == 0) {
339 pim_rp_refresh_group_to_rp_mapping(pim
);
342 static int pim_rp_check_interface_addrs(struct rp_info
*rp_info
,
343 struct pim_interface
*pim_ifp
)
345 struct listnode
*node
;
346 struct pim_secondary_addr
*sec_addr
;
349 rpf_addr
= pim_addr_from_prefix(&rp_info
->rp
.rpf_addr
);
351 if (!pim_addr_cmp(pim_ifp
->primary_address
, rpf_addr
))
354 if (!pim_ifp
->sec_addr_list
) {
358 for (ALL_LIST_ELEMENTS_RO(pim_ifp
->sec_addr_list
, node
, sec_addr
)) {
359 if (prefix_same(&sec_addr
->addr
, &rp_info
->rp
.rpf_addr
)) {
367 static void pim_rp_check_interfaces(struct pim_instance
*pim
,
368 struct rp_info
*rp_info
)
370 struct interface
*ifp
;
372 rp_info
->i_am_rp
= 0;
373 FOR_ALL_INTERFACES (pim
->vrf
, ifp
) {
374 struct pim_interface
*pim_ifp
= ifp
->info
;
379 if (pim_rp_check_interface_addrs(rp_info
, pim_ifp
)) {
380 rp_info
->i_am_rp
= 1;
385 void pim_upstream_update(struct pim_instance
*pim
, struct pim_upstream
*up
)
387 struct pim_rpf old_rpf
;
388 enum pim_rpf_result rpf_result
;
389 pim_addr old_upstream_addr
;
390 pim_addr new_upstream_addr
;
393 old_upstream_addr
= up
->upstream_addr
;
394 pim_rp_set_upstream_addr(pim
, &new_upstream_addr
, up
->sg
.src
,
397 if (PIM_DEBUG_PIM_TRACE
)
398 zlog_debug("%s: pim upstream update for old upstream %pPA",
399 __func__
, &old_upstream_addr
);
401 if (!pim_addr_cmp(old_upstream_addr
, new_upstream_addr
))
404 /* Lets consider a case, where a PIM upstream has a better RP as a
405 * result of a new RP configuration with more precise group range.
406 * This upstream has to be added to the upstream hash of new RP's
407 * NHT(pnc) and has to be removed from old RP's NHT upstream hash
409 if (!pim_addr_is_any(old_upstream_addr
)) {
410 /* Deregister addr with Zebra NHT */
411 pim_addr_to_prefix(&nht_p
, old_upstream_addr
);
412 if (PIM_DEBUG_PIM_TRACE
)
414 "%s: Deregister upstream %s addr %pFX with Zebra NHT",
415 __func__
, up
->sg_str
, &nht_p
);
416 pim_delete_tracked_nexthop(pim
, &nht_p
, up
, NULL
);
419 /* Update the upstream address */
420 up
->upstream_addr
= new_upstream_addr
;
422 old_rpf
.source_nexthop
.interface
= up
->rpf
.source_nexthop
.interface
;
424 rpf_result
= pim_rpf_update(pim
, up
, &old_rpf
, __func__
);
425 if (rpf_result
== PIM_RPF_FAILURE
)
426 pim_mroute_del(up
->channel_oil
, __func__
);
428 /* update kernel multicast forwarding cache (MFC) */
429 if (up
->rpf
.source_nexthop
.interface
&& up
->channel_oil
)
430 pim_upstream_mroute_iif_update(up
->channel_oil
, __func__
);
432 if (rpf_result
== PIM_RPF_CHANGED
||
433 (rpf_result
== PIM_RPF_FAILURE
&&
434 old_rpf
.source_nexthop
.interface
))
435 pim_zebra_upstream_rpf_changed(pim
, up
, &old_rpf
);
439 int pim_rp_new(struct pim_instance
*pim
, pim_addr rp_addr
, struct prefix group
,
440 const char *plist
, enum rp_source rp_src_flag
)
443 struct rp_info
*rp_info
;
444 struct rp_info
*rp_all
;
445 struct prefix group_all
;
446 struct listnode
*node
, *nnode
;
447 struct rp_info
*tmp_rp_info
;
450 struct route_node
*rn
= NULL
;
451 struct pim_upstream
*up
;
452 bool upstream_updated
= false;
454 if (pim_addr_is_any(rp_addr
))
455 return PIM_RP_BAD_ADDRESS
;
457 rp_info
= XCALLOC(MTYPE_PIM_RP
, sizeof(*rp_info
));
459 pim_addr_to_prefix(&rp_info
->rp
.rpf_addr
, rp_addr
);
460 prefix_copy(&rp_info
->group
, &group
);
461 rp_info
->rp_src
= rp_src_flag
;
465 * Return if the prefix-list is already configured for this RP
467 if (pim_rp_find_prefix_list(pim
, rp_addr
, plist
)) {
468 XFREE(MTYPE_PIM_RP
, rp_info
);
473 * Barf if the prefix-list is already configured for an RP
475 if (pim_rp_prefix_list_used(pim
, plist
)) {
476 XFREE(MTYPE_PIM_RP
, rp_info
);
477 return PIM_RP_PFXLIST_IN_USE
;
481 * Free any existing rp_info entries for this RP
483 for (ALL_LIST_ELEMENTS(pim
->rp_list
, node
, nnode
,
485 if (prefix_same(&rp_info
->rp
.rpf_addr
,
486 &tmp_rp_info
->rp
.rpf_addr
)) {
487 if (tmp_rp_info
->plist
)
488 pim_rp_del_config(pim
, rp_addr
, NULL
,
493 prefix2str(&tmp_rp_info
->group
,
499 rp_info
->plist
= XSTRDUP(MTYPE_PIM_FILTER_NAME
, plist
);
502 if (!pim_get_all_mcast_group(&group_all
)) {
503 XFREE(MTYPE_PIM_RP
, rp_info
);
504 return PIM_GROUP_BAD_ADDRESS
;
506 rp_all
= pim_rp_find_match_group(pim
, &group_all
);
509 * Barf if group is a non-multicast subnet
511 if (!prefix_match(&rp_all
->group
, &rp_info
->group
)) {
512 XFREE(MTYPE_PIM_RP
, rp_info
);
513 return PIM_GROUP_BAD_ADDRESS
;
517 * Remove any prefix-list rp_info entries for this RP
519 for (ALL_LIST_ELEMENTS(pim
->rp_list
, node
, nnode
,
521 if (tmp_rp_info
->plist
&&
522 prefix_same(&rp_info
->rp
.rpf_addr
,
523 &tmp_rp_info
->rp
.rpf_addr
)) {
524 pim_rp_del_config(pim
, rp_addr
, NULL
,
530 * Take over the 224.0.0.0/4 group if the rp is INADDR_ANY
532 if (prefix_same(&rp_all
->group
, &rp_info
->group
) &&
533 pim_rpf_addr_is_inaddr_any(&rp_all
->rp
)) {
534 rp_all
->rp
.rpf_addr
= rp_info
->rp
.rpf_addr
;
535 rp_all
->rp_src
= rp_src_flag
;
536 XFREE(MTYPE_PIM_RP
, rp_info
);
538 /* Register addr with Zebra NHT */
539 nht_p
= rp_all
->rp
.rpf_addr
;
540 if (PIM_DEBUG_PIM_NHT_RP
)
542 "%s: NHT Register rp_all addr %pFX grp %pFX ",
543 __func__
, &nht_p
, &rp_all
->group
);
545 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
546 /* Find (*, G) upstream whose RP is not
549 if (pim_addr_is_any(up
->upstream_addr
) &&
550 pim_addr_is_any(up
->sg
.src
)) {
552 struct rp_info
*trp_info
;
554 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
555 trp_info
= pim_rp_find_match_group(
557 if (trp_info
== rp_all
) {
558 pim_upstream_update(pim
, up
);
559 upstream_updated
= true;
563 if (upstream_updated
)
564 pim_zebra_update_all_interfaces(pim
);
566 pim_rp_check_interfaces(pim
, rp_all
);
567 pim_rp_refresh_group_to_rp_mapping(pim
);
568 pim_find_or_track_nexthop(pim
, &nht_p
, NULL
, rp_all
,
571 if (!pim_ecmp_nexthop_lookup(pim
,
572 &rp_all
->rp
.source_nexthop
,
573 &nht_p
, &rp_all
->group
, 1))
574 return PIM_RP_NO_PATH
;
579 * Return if the group is already configured for this RP
581 tmp_rp_info
= pim_rp_find_exact(pim
, rp_addr
, &rp_info
->group
);
583 if ((tmp_rp_info
->rp_src
!= rp_src_flag
)
584 && (rp_src_flag
== RP_SRC_STATIC
))
585 tmp_rp_info
->rp_src
= rp_src_flag
;
586 XFREE(MTYPE_PIM_RP
, rp_info
);
591 * Barf if this group is already covered by some other RP
593 tmp_rp_info
= pim_rp_find_match_group(pim
, &rp_info
->group
);
596 if (tmp_rp_info
->plist
) {
597 XFREE(MTYPE_PIM_RP
, rp_info
);
598 return PIM_GROUP_PFXLIST_OVERLAP
;
601 * If the only RP that covers this group is an
603 * 224.0.0.0/4 that is fine, ignore that one.
605 * though we must return PIM_GROUP_OVERLAP
607 if (prefix_same(&rp_info
->group
,
608 &tmp_rp_info
->group
)) {
609 if ((rp_src_flag
== RP_SRC_STATIC
)
610 && (tmp_rp_info
->rp_src
612 XFREE(MTYPE_PIM_RP
, rp_info
);
613 return PIM_GROUP_OVERLAP
;
616 result
= pim_rp_change(
620 XFREE(MTYPE_PIM_RP
, rp_info
);
627 listnode_add_sort(pim
->rp_list
, rp_info
);
629 if (!rp_info
->plist
) {
630 rn
= route_node_get(pim
->rp_table
, &rp_info
->group
);
634 if (PIM_DEBUG_PIM_TRACE
)
635 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn
,
636 rp_info
, &rp_info
->group
,
637 rn
? route_node_get_lock_count(rn
) : 0);
639 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
640 if (pim_addr_is_any(up
->sg
.src
)) {
642 struct rp_info
*trp_info
;
644 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
645 trp_info
= pim_rp_find_match_group(pim
, &grp
);
647 if (trp_info
== rp_info
) {
648 pim_upstream_update(pim
, up
);
649 upstream_updated
= true;
654 if (upstream_updated
)
655 pim_zebra_update_all_interfaces(pim
);
657 pim_rp_check_interfaces(pim
, rp_info
);
658 pim_rp_refresh_group_to_rp_mapping(pim
);
660 /* Register addr with Zebra NHT */
661 nht_p
= rp_info
->rp
.rpf_addr
;
662 if (PIM_DEBUG_PIM_NHT_RP
)
663 zlog_debug("%s: NHT Register RP addr %pFX grp %pFX with Zebra ",
664 __func__
, &nht_p
, &rp_info
->group
);
665 pim_find_or_track_nexthop(pim
, &nht_p
, NULL
, rp_info
, NULL
);
666 if (!pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
, &nht_p
,
668 return PIM_RP_NO_PATH
;
673 void pim_rp_del_config(struct pim_instance
*pim
, pim_addr rp_addr
,
674 const char *group_range
, const char *plist
)
679 if (group_range
== NULL
)
680 result
= pim_get_all_mcast_group(&group
);
682 result
= str2prefix(group_range
, &group
);
685 if (PIM_DEBUG_PIM_TRACE
)
687 "%s: String to prefix failed for %pPAs group",
692 pim_rp_del(pim
, rp_addr
, group
, plist
, RP_SRC_STATIC
);
695 int pim_rp_del(struct pim_instance
*pim
, pim_addr rp_addr
, struct prefix group
,
696 const char *plist
, enum rp_source rp_src_flag
)
699 struct rp_info
*rp_info
;
700 struct rp_info
*rp_all
;
702 struct route_node
*rn
;
703 bool was_plist
= false;
704 struct rp_info
*trp_info
;
705 struct pim_upstream
*up
;
706 struct bsgrp_node
*bsgrp
= NULL
;
707 struct bsm_rpinfo
*bsrp
= NULL
;
708 bool upstream_updated
= false;
711 rp_info
= pim_rp_find_prefix_list(pim
, rp_addr
, plist
);
713 rp_info
= pim_rp_find_exact(pim
, rp_addr
, &group
);
716 return PIM_RP_NOT_FOUND
;
718 if (rp_info
->plist
) {
719 XFREE(MTYPE_PIM_FILTER_NAME
, rp_info
->plist
);
723 if (PIM_DEBUG_PIM_TRACE
)
724 zlog_debug("%s: Delete RP %pPA for the group %pFX", __func__
,
727 /* While static RP is getting deleted, we need to check if dynamic RP
728 * present for the same group in BSM RP table, then install the dynamic
729 * RP for the group node into the main rp table
731 if (rp_src_flag
== RP_SRC_STATIC
) {
732 bsgrp
= pim_bsm_get_bsgrp_node(&pim
->global_scope
, &group
);
735 bsrp
= bsm_rpinfos_first(bsgrp
->bsrp_list
);
737 if (PIM_DEBUG_PIM_TRACE
)
739 "%s: BSM RP %pPA found for the group %pFX",
740 __func__
, &bsrp
->rp_address
,
742 return pim_rp_change(pim
, bsrp
->rp_address
,
746 if (PIM_DEBUG_PIM_TRACE
)
748 "%s: BSM RP not found for the group %pFX",
753 /* Deregister addr with Zebra NHT */
754 nht_p
= rp_info
->rp
.rpf_addr
;
755 if (PIM_DEBUG_PIM_NHT_RP
)
756 zlog_debug("%s: Deregister RP addr %pFX with Zebra ", __func__
,
758 pim_delete_tracked_nexthop(pim
, &nht_p
, NULL
, rp_info
);
760 if (!pim_get_all_mcast_group(&g_all
))
761 return PIM_RP_BAD_ADDRESS
;
763 rp_all
= pim_rp_find_match_group(pim
, &g_all
);
765 if (rp_all
== rp_info
) {
766 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
767 /* Find the upstream (*, G) whose upstream address is
768 * same as the deleted RP
772 rpf_addr
= pim_addr_from_prefix(&rp_info
->rp
.rpf_addr
);
773 if (!pim_addr_cmp(up
->upstream_addr
, rpf_addr
) &&
774 pim_addr_is_any(up
->sg
.src
)) {
777 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
778 trp_info
= pim_rp_find_match_group(pim
, &grp
);
779 if (trp_info
== rp_all
) {
780 pim_upstream_rpf_clear(pim
, up
);
781 up
->upstream_addr
= PIMADDR_ANY
;
785 pim_addr_to_prefix(&rp_all
->rp
.rpf_addr
, PIMADDR_ANY
);
790 listnode_delete(pim
->rp_list
, rp_info
);
793 rn
= route_node_get(pim
->rp_table
, &rp_info
->group
);
795 if (rn
->info
!= rp_info
)
798 "Expected rn->info to be equal to rp_info");
800 if (PIM_DEBUG_PIM_TRACE
)
802 "%s:Found for Freeing: %p for rp_info: %p(%pFX) Lock: %d",
803 __func__
, rn
, rp_info
, &rp_info
->group
,
804 route_node_get_lock_count(rn
));
807 route_unlock_node(rn
);
808 route_unlock_node(rn
);
812 pim_rp_refresh_group_to_rp_mapping(pim
);
814 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
815 /* Find the upstream (*, G) whose upstream address is same as
820 rpf_addr
= pim_addr_from_prefix(&rp_info
->rp
.rpf_addr
);
821 if (!pim_addr_cmp(up
->upstream_addr
, rpf_addr
) &&
822 pim_addr_is_any(up
->sg
.src
)) {
825 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
826 trp_info
= pim_rp_find_match_group(pim
, &grp
);
828 /* RP not found for the group grp */
829 if (pim_rpf_addr_is_inaddr_any(&trp_info
->rp
)) {
830 pim_upstream_rpf_clear(pim
, up
);
831 pim_rp_set_upstream_addr(
832 pim
, &up
->upstream_addr
, up
->sg
.src
,
836 /* RP found for the group grp */
838 pim_upstream_update(pim
, up
);
839 upstream_updated
= true;
844 if (upstream_updated
)
845 pim_zebra_update_all_interfaces(pim
);
847 XFREE(MTYPE_PIM_RP
, rp_info
);
851 int pim_rp_change(struct pim_instance
*pim
, pim_addr new_rp_addr
,
852 struct prefix group
, enum rp_source rp_src_flag
)
855 struct route_node
*rn
;
857 struct rp_info
*rp_info
= NULL
;
858 struct pim_upstream
*up
;
859 bool upstream_updated
= false;
860 pim_addr old_rp_addr
;
862 rn
= route_node_lookup(pim
->rp_table
, &group
);
864 result
= pim_rp_new(pim
, new_rp_addr
, group
, NULL
, rp_src_flag
);
871 route_unlock_node(rn
);
872 result
= pim_rp_new(pim
, new_rp_addr
, group
, NULL
, rp_src_flag
);
876 old_rp_addr
= pim_addr_from_prefix(&rp_info
->rp
.rpf_addr
);
877 if (!pim_addr_cmp(new_rp_addr
, old_rp_addr
)) {
878 if (rp_info
->rp_src
!= rp_src_flag
) {
879 rp_info
->rp_src
= rp_src_flag
;
880 route_unlock_node(rn
);
885 nht_p
.family
= PIM_AF
;
886 nht_p
.prefixlen
= PIM_MAX_BITLEN
;
888 /* Deregister old RP addr with Zebra NHT */
890 if (!pim_addr_is_any(old_rp_addr
)) {
891 nht_p
= rp_info
->rp
.rpf_addr
;
892 if (PIM_DEBUG_PIM_NHT_RP
)
893 zlog_debug("%s: Deregister RP addr %pFX with Zebra ",
895 pim_delete_tracked_nexthop(pim
, &nht_p
, NULL
, rp_info
);
898 pim_rp_nexthop_del(rp_info
);
899 listnode_delete(pim
->rp_list
, rp_info
);
900 /* Update the new RP address*/
902 pim_addr_to_prefix(&rp_info
->rp
.rpf_addr
, new_rp_addr
);
903 rp_info
->rp_src
= rp_src_flag
;
904 rp_info
->i_am_rp
= 0;
906 listnode_add_sort(pim
->rp_list
, rp_info
);
908 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
909 if (pim_addr_is_any(up
->sg
.src
)) {
911 struct rp_info
*trp_info
;
913 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
914 trp_info
= pim_rp_find_match_group(pim
, &grp
);
916 if (trp_info
== rp_info
) {
917 pim_upstream_update(pim
, up
);
918 upstream_updated
= true;
923 if (upstream_updated
)
924 pim_zebra_update_all_interfaces(pim
);
926 /* Register new RP addr with Zebra NHT */
927 nht_p
= rp_info
->rp
.rpf_addr
;
928 if (PIM_DEBUG_PIM_NHT_RP
)
929 zlog_debug("%s: NHT Register RP addr %pFX grp %pFX with Zebra ",
930 __func__
, &nht_p
, &rp_info
->group
);
932 pim_find_or_track_nexthop(pim
, &nht_p
, NULL
, rp_info
, NULL
);
933 if (!pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
, &nht_p
,
934 &rp_info
->group
, 1)) {
935 route_unlock_node(rn
);
936 return PIM_RP_NO_PATH
;
939 pim_rp_check_interfaces(pim
, rp_info
);
941 route_unlock_node(rn
);
943 pim_rp_refresh_group_to_rp_mapping(pim
);
948 void pim_rp_setup(struct pim_instance
*pim
)
950 struct listnode
*node
;
951 struct rp_info
*rp_info
;
954 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
955 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
958 nht_p
= rp_info
->rp
.rpf_addr
;
960 pim_find_or_track_nexthop(pim
, &nht_p
, NULL
, rp_info
, NULL
);
961 if (!pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
,
962 &nht_p
, &rp_info
->group
, 1))
963 if (PIM_DEBUG_PIM_NHT_RP
)
965 "Unable to lookup nexthop for rp specified");
970 * Checks to see if we should elect ourself the actual RP when new if
971 * addresses are added against an interface.
973 void pim_rp_check_on_if_add(struct pim_interface
*pim_ifp
)
975 struct listnode
*node
;
976 struct rp_info
*rp_info
;
977 bool i_am_rp_changed
= false;
978 struct pim_instance
*pim
= pim_ifp
->pim
;
980 if (pim
->rp_list
== NULL
)
983 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
984 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
987 /* if i_am_rp is already set nothing to be done (adding new
989 * is not going to make a difference). */
990 if (rp_info
->i_am_rp
) {
994 if (pim_rp_check_interface_addrs(rp_info
, pim_ifp
)) {
995 i_am_rp_changed
= true;
996 rp_info
->i_am_rp
= 1;
997 if (PIM_DEBUG_PIM_NHT_RP
) {
998 char rp
[PREFIX_STRLEN
];
999 pim_addr_dump("<rp?>", &rp_info
->rp
.rpf_addr
,
1001 zlog_debug("%s: %s: i am rp", __func__
, rp
);
1006 if (i_am_rp_changed
) {
1007 pim_msdp_i_am_rp_changed(pim
);
1008 pim_upstream_reeval_use_rpt(pim
);
1012 /* up-optimized re-evaluation of "i_am_rp". this is used when ifaddresses
1013 * are removed. Removing numbers is an uncommon event in an active network
1014 * so I have made no attempt to optimize it. */
1015 void pim_i_am_rp_re_evaluate(struct pim_instance
*pim
)
1017 struct listnode
*node
;
1018 struct rp_info
*rp_info
;
1019 bool i_am_rp_changed
= false;
1022 if (pim
->rp_list
== NULL
)
1025 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1026 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1029 old_i_am_rp
= rp_info
->i_am_rp
;
1030 pim_rp_check_interfaces(pim
, rp_info
);
1032 if (old_i_am_rp
!= rp_info
->i_am_rp
) {
1033 i_am_rp_changed
= true;
1034 if (PIM_DEBUG_PIM_NHT_RP
) {
1035 char rp
[PREFIX_STRLEN
];
1036 pim_addr_dump("<rp?>", &rp_info
->rp
.rpf_addr
,
1038 if (rp_info
->i_am_rp
) {
1039 zlog_debug("%s: %s: i am rp", __func__
,
1042 zlog_debug("%s: %s: i am no longer rp",
1049 if (i_am_rp_changed
) {
1050 pim_msdp_i_am_rp_changed(pim
);
1051 pim_upstream_reeval_use_rpt(pim
);
1056 * I_am_RP(G) is true if the group-to-RP mapping indicates that
1057 * this router is the RP for the group.
1059 * Since we only have static RP, all groups are part of this RP
1061 int pim_rp_i_am_rp(struct pim_instance
*pim
, pim_addr group
)
1064 struct rp_info
*rp_info
;
1066 memset(&g
, 0, sizeof(g
));
1067 pim_addr_to_prefix(&g
, group
);
1068 rp_info
= pim_rp_find_match_group(pim
, &g
);
1071 return rp_info
->i_am_rp
;
1078 * Return the RP that the Group belongs too.
1080 struct pim_rpf
*pim_rp_g(struct pim_instance
*pim
, pim_addr group
)
1083 struct rp_info
*rp_info
;
1085 memset(&g
, 0, sizeof(g
));
1086 pim_addr_to_prefix(&g
, group
);
1088 rp_info
= pim_rp_find_match_group(pim
, &g
);
1091 struct prefix nht_p
;
1093 /* Register addr with Zebra NHT */
1094 nht_p
= rp_info
->rp
.rpf_addr
;
1095 if (PIM_DEBUG_PIM_NHT_RP
)
1097 "%s: NHT Register RP addr %pFX grp %pFX with Zebra",
1098 __func__
, &nht_p
, &rp_info
->group
);
1099 pim_find_or_track_nexthop(pim
, &nht_p
, NULL
, rp_info
, NULL
);
1100 pim_rpf_set_refresh_time(pim
);
1101 (void)pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
,
1102 &nht_p
, &rp_info
->group
, 1);
1103 return (&rp_info
->rp
);
1111 * Set the upstream IP address we want to talk to based upon
1112 * the rp configured and the source address
1114 * If we have don't have a RP configured and the source address is *
1115 * then set the upstream addr as INADDR_ANY and return failure.
1118 int pim_rp_set_upstream_addr(struct pim_instance
*pim
, pim_addr
*up
,
1119 pim_addr source
, pim_addr group
)
1121 struct rp_info
*rp_info
;
1124 memset(&g
, 0, sizeof(g
));
1126 pim_addr_to_prefix(&g
, group
);
1128 rp_info
= pim_rp_find_match_group(pim
, &g
);
1130 if (!rp_info
|| ((pim_rpf_addr_is_inaddr_any(&rp_info
->rp
)) &&
1131 (pim_addr_is_any(source
)))) {
1132 if (PIM_DEBUG_PIM_NHT_RP
)
1133 zlog_debug("%s: Received a (*,G) with no RP configured",
1139 if (pim_addr_is_any(source
))
1140 *up
= pim_addr_from_prefix(&rp_info
->rp
.rpf_addr
);
1147 int pim_rp_config_write(struct pim_instance
*pim
, struct vty
*vty
,
1150 struct listnode
*node
;
1151 struct rp_info
*rp_info
;
1155 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1156 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1159 if (rp_info
->rp_src
== RP_SRC_BSR
)
1162 rp_addr
= pim_addr_from_prefix(&rp_info
->rp
.rpf_addr
);
1166 " pim rp %pPA prefix-list %s\n",
1167 spaces
, &rp_addr
, rp_info
->plist
);
1169 vty_out(vty
, "%s" PIM_AF_NAME
" pim rp %pPA %pFX\n",
1170 spaces
, &rp_addr
, &rp_info
->group
);
1177 void pim_rp_show_information(struct pim_instance
*pim
, struct prefix
*range
,
1178 struct vty
*vty
, json_object
*json
)
1180 struct rp_info
*rp_info
;
1181 struct rp_info
*prev_rp_info
= NULL
;
1182 struct listnode
*node
;
1185 json_object
*json_rp_rows
= NULL
;
1186 json_object
*json_row
= NULL
;
1190 "RP address group/prefix-list OIF I am RP Source Group-Type\n");
1191 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1192 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1196 pim_addr group
= rp_info
->group
.u
.prefix4
;
1198 pim_addr group
= rp_info
->group
.u
.prefix6
;
1200 const char *group_type
=
1201 pim_is_grp_ssm(pim
, group
) ? "SSM" : "ASM";
1203 if (range
&& !prefix_match(&rp_info
->group
, range
))
1206 if (rp_info
->rp_src
== RP_SRC_STATIC
)
1207 strlcpy(source
, "Static", sizeof(source
));
1208 else if (rp_info
->rp_src
== RP_SRC_BSR
)
1209 strlcpy(source
, "BSR", sizeof(source
));
1211 strlcpy(source
, "None", sizeof(source
));
1214 * If we have moved on to a new RP then add the
1215 * entry for the previous RP
1218 prefix_cmp(&prev_rp_info
->rp
.rpf_addr
,
1219 &rp_info
->rp
.rpf_addr
)) {
1220 json_object_object_addf(
1221 json
, json_rp_rows
, "%pFXh",
1222 &prev_rp_info
->rp
.rpf_addr
);
1223 json_rp_rows
= NULL
;
1227 json_rp_rows
= json_object_new_array();
1229 json_row
= json_object_new_object();
1230 json_object_string_addf(json_row
, "rpAddress", "%pFXh",
1231 &rp_info
->rp
.rpf_addr
);
1232 if (rp_info
->rp
.source_nexthop
.interface
)
1233 json_object_string_add(
1234 json_row
, "outboundInterface",
1235 rp_info
->rp
.source_nexthop
1238 json_object_string_add(json_row
,
1239 "outboundInterface",
1241 if (rp_info
->i_am_rp
)
1242 json_object_boolean_true_add(json_row
, "iAmRP");
1244 json_object_boolean_false_add(json_row
,
1248 json_object_string_add(json_row
, "prefixList",
1251 json_object_string_addf(json_row
, "group",
1254 json_object_string_add(json_row
, "source", source
);
1255 json_object_string_add(json_row
, "groupType",
1258 json_object_array_add(json_rp_rows
, json_row
);
1260 vty_out(vty
, "%-15pFXh ", &rp_info
->rp
.rpf_addr
);
1263 vty_out(vty
, "%-18s ", rp_info
->plist
);
1265 vty_out(vty
, "%-18pFX ", &rp_info
->group
);
1267 if (rp_info
->rp
.source_nexthop
.interface
)
1268 vty_out(vty
, "%-16s ",
1269 rp_info
->rp
.source_nexthop
1272 vty_out(vty
, "%-16s ", "(Unknown)");
1274 if (rp_info
->i_am_rp
)
1275 vty_out(vty
, "yes");
1279 vty_out(vty
, "%14s", source
);
1280 vty_out(vty
, "%6s\n", group_type
);
1282 prev_rp_info
= rp_info
;
1286 if (prev_rp_info
&& json_rp_rows
)
1287 json_object_object_addf(json
, json_rp_rows
, "%pFXh",
1288 &prev_rp_info
->rp
.rpf_addr
);
1292 void pim_resolve_rp_nh(struct pim_instance
*pim
, struct pim_neighbor
*nbr
)
1294 struct listnode
*node
= NULL
;
1295 struct rp_info
*rp_info
= NULL
;
1296 struct nexthop
*nh_node
= NULL
;
1297 struct prefix nht_p
;
1298 struct pim_nexthop_cache pnc
;
1300 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1301 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1304 nht_p
= rp_info
->rp
.rpf_addr
;
1305 memset(&pnc
, 0, sizeof(struct pim_nexthop_cache
));
1306 if (!pim_find_or_track_nexthop(pim
, &nht_p
, NULL
, rp_info
,
1310 for (nh_node
= pnc
.nexthop
; nh_node
; nh_node
= nh_node
->next
) {
1312 if (!pim_addr_is_any(nh_node
->gate
.ipv4
))
1315 if (!pim_addr_is_any(nh_node
->gate
.ipv6
))
1319 struct interface
*ifp1
= if_lookup_by_index(
1320 nh_node
->ifindex
, pim
->vrf
->vrf_id
);
1322 if (nbr
->interface
!= ifp1
)
1326 nh_node
->gate
.ipv4
= nbr
->source_addr
;
1328 nh_node
->gate
.ipv6
= nbr
->source_addr
;
1330 if (PIM_DEBUG_PIM_NHT_RP
)
1332 "%s: addr %pFXh new nexthop addr %pPAs interface %s",
1333 __func__
, &nht_p
, &nbr
->source_addr
,