3 * Copyright (C) 2015 Cumulus Networks, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
34 #include "lib_errors.h"
37 #include "pim_instance.h"
40 #include "pim_iface.h"
44 #include "pim_memory.h"
45 #include "pim_neighbor.h"
48 #include "pim_mroute.h"
50 #include "pim_zebra.h"
55 /* Cleanup pim->rpf_hash each node data */
56 void pim_rp_list_hash_clean(void *data
)
58 struct pim_nexthop_cache
*pnc
= (struct pim_nexthop_cache
*)data
;
60 list_delete(&pnc
->rp_list
);
62 hash_clean(pnc
->upstream_hash
, NULL
);
63 hash_free(pnc
->upstream_hash
);
64 pnc
->upstream_hash
= NULL
;
66 nexthops_free(pnc
->nexthop
);
68 XFREE(MTYPE_PIM_NEXTHOP_CACHE
, pnc
);
71 static void pim_rp_info_free(struct rp_info
*rp_info
)
73 XFREE(MTYPE_PIM_FILTER_NAME
, rp_info
->plist
);
75 XFREE(MTYPE_PIM_RP
, rp_info
);
78 int pim_rp_list_cmp(void *v1
, void *v2
)
80 struct rp_info
*rp1
= (struct rp_info
*)v1
;
81 struct rp_info
*rp2
= (struct rp_info
*)v2
;
85 * Sort by RP IP address
87 ret
= pim_addr_cmp(rp1
->rp
.rpf_addr
, rp2
->rp
.rpf_addr
);
92 * Sort by group IP address
94 ret
= prefix_cmp(&rp1
->group
, &rp2
->group
);
101 void pim_rp_init(struct pim_instance
*pim
)
103 struct rp_info
*rp_info
;
104 struct route_node
*rn
;
106 pim
->rp_list
= list_new();
107 pim
->rp_list
->del
= (void (*)(void *))pim_rp_info_free
;
108 pim
->rp_list
->cmp
= pim_rp_list_cmp
;
110 pim
->rp_table
= route_table_init();
112 rp_info
= XCALLOC(MTYPE_PIM_RP
, sizeof(*rp_info
));
114 if (!pim_get_all_mcast_group(&rp_info
->group
)) {
115 flog_err(EC_LIB_DEVELOPMENT
,
116 "Unable to convert all-multicast prefix");
117 list_delete(&pim
->rp_list
);
118 route_table_finish(pim
->rp_table
);
119 XFREE(MTYPE_PIM_RP
, rp_info
);
122 rp_info
->rp
.rpf_addr
= PIMADDR_ANY
;
124 listnode_add(pim
->rp_list
, rp_info
);
126 rn
= route_node_get(pim
->rp_table
, &rp_info
->group
);
128 if (PIM_DEBUG_PIM_TRACE
)
129 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn
,
130 rp_info
, &rp_info
->group
,
131 route_node_get_lock_count(rn
));
134 void pim_rp_free(struct pim_instance
*pim
)
137 route_table_finish(pim
->rp_table
);
138 pim
->rp_table
= NULL
;
141 list_delete(&pim
->rp_list
);
145 * Given an RP's prefix-list, return the RP's rp_info for that prefix-list
147 static struct rp_info
*pim_rp_find_prefix_list(struct pim_instance
*pim
,
148 pim_addr rp
, const char *plist
)
150 struct listnode
*node
;
151 struct rp_info
*rp_info
;
153 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
154 if ((!pim_addr_cmp(rp
, rp_info
->rp
.rpf_addr
)) &&
155 rp_info
->plist
&& strcmp(rp_info
->plist
, plist
) == 0) {
164 * Return true if plist is used by any rp_info
166 static int pim_rp_prefix_list_used(struct pim_instance
*pim
, const char *plist
)
168 struct listnode
*node
;
169 struct rp_info
*rp_info
;
171 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
172 if (rp_info
->plist
&& strcmp(rp_info
->plist
, plist
) == 0) {
181 * Given an RP's address, return the RP's rp_info that is an exact match for
184 static struct rp_info
*pim_rp_find_exact(struct pim_instance
*pim
, pim_addr rp
,
185 const struct prefix
*group
)
187 struct listnode
*node
;
188 struct rp_info
*rp_info
;
190 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
191 if ((!pim_addr_cmp(rp
, rp_info
->rp
.rpf_addr
)) &&
192 prefix_same(&rp_info
->group
, group
))
200 * XXX: long-term issue: we don't actually have a good "ip address-list"
201 * implementation. ("access-list XYZ" is the closest but honestly it's
204 * So it's using a prefix-list to match an address here, which causes very
205 * unexpected results for the user since prefix-lists by default only match
206 * when the prefix length is an exact match too. i.e. you'd have to add the
207 * "le 32" and do "ip prefix-list foo permit 10.0.0.0/24 le 32"
209 * To avoid this pitfall, this code uses "address_mode = true" for the prefix
210 * list match (this is the only user for that.)
212 * In the long run, we need to add a "ip address-list", but that's a wholly
213 * separate bag of worms, and existing configs using ip prefix-list would
214 * drop into the UX pitfall.
217 #include "lib/plist_int.h"
220 * Given a group, return the rp_info for that group
222 struct rp_info
*pim_rp_find_match_group(struct pim_instance
*pim
,
223 const struct prefix
*group
)
225 struct listnode
*node
;
226 struct rp_info
*best
= NULL
;
227 struct rp_info
*rp_info
;
228 struct prefix_list
*plist
;
229 const struct prefix
*bp
;
230 const struct prefix_list_entry
*entry
;
231 struct route_node
*rn
;
234 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
235 if (rp_info
->plist
) {
236 plist
= prefix_list_lookup(PIM_AFI
, rp_info
->plist
);
238 if (prefix_list_apply_ext(plist
, &entry
, group
, true)
239 == PREFIX_DENY
|| !entry
)
248 if (bp
&& bp
->prefixlen
< entry
->prefix
.prefixlen
) {
255 rn
= route_node_match(pim
->rp_table
, group
);
259 "%s: BUG We should have found default group information",
265 if (PIM_DEBUG_PIM_TRACE
) {
268 "Lookedup(%pFX): prefix_list match %s, rn %p found: %pFX",
269 group
, best
->plist
, rn
, &rp_info
->group
);
271 zlog_debug("Lookedup(%pFX): rn %p found:%pFX", group
,
272 rn
, &rp_info
->group
);
275 route_unlock_node(rn
);
278 * rp's with prefix lists have the group as 224.0.0.0/4 which will
279 * match anything. So if we have a rp_info that should match a prefix
280 * list then if we do match then best should be the answer( even
283 if (!rp_info
|| (rp_info
&& rp_info
->plist
))
287 * So we have a non plist rp_info found in the lookup and no plists
288 * at all to be choosen, return it!
294 * If we have a matching non prefix list and a matching prefix
295 * list we should return the actual rp_info that has the LPM
296 * If they are equal, use the prefix-list( but let's hope
297 * the end-operator doesn't do this )
299 if (rp_info
->group
.prefixlen
> bp
->prefixlen
)
306 * When the user makes "ip pim rp" configuration changes or if they change the
307 * prefix-list(s) used by these statements we must tickle the upstream state
308 * for each group to make them re-lookup who their RP should be.
310 * This is a placeholder function for now.
312 void pim_rp_refresh_group_to_rp_mapping(struct pim_instance
*pim
)
314 pim_msdp_i_am_rp_changed(pim
);
315 pim_upstream_reeval_use_rpt(pim
);
318 void pim_rp_prefix_list_update(struct pim_instance
*pim
,
319 struct prefix_list
*plist
)
321 struct listnode
*node
;
322 struct rp_info
*rp_info
;
323 int refresh_needed
= 0;
325 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
327 && strcmp(rp_info
->plist
, prefix_list_name(plist
)) == 0) {
334 pim_rp_refresh_group_to_rp_mapping(pim
);
337 static int pim_rp_check_interface_addrs(struct rp_info
*rp_info
,
338 struct pim_interface
*pim_ifp
)
340 struct listnode
*node
;
341 struct pim_secondary_addr
*sec_addr
;
344 if (!pim_addr_cmp(pim_ifp
->primary_address
, rp_info
->rp
.rpf_addr
))
347 if (!pim_ifp
->sec_addr_list
) {
351 for (ALL_LIST_ELEMENTS_RO(pim_ifp
->sec_addr_list
, node
, sec_addr
)) {
352 sec_paddr
= pim_addr_from_prefix(&sec_addr
->addr
);
353 /* If an RP-address is self, It should be enough to say
354 * I am RP the prefix-length should not matter here */
355 if (!pim_addr_cmp(sec_paddr
, rp_info
->rp
.rpf_addr
))
362 static void pim_rp_check_interfaces(struct pim_instance
*pim
,
363 struct rp_info
*rp_info
)
365 struct interface
*ifp
;
367 rp_info
->i_am_rp
= 0;
368 FOR_ALL_INTERFACES (pim
->vrf
, ifp
) {
369 struct pim_interface
*pim_ifp
= ifp
->info
;
374 if (pim_rp_check_interface_addrs(rp_info
, pim_ifp
)) {
375 rp_info
->i_am_rp
= 1;
380 void pim_upstream_update(struct pim_instance
*pim
, struct pim_upstream
*up
)
382 struct pim_rpf old_rpf
;
383 enum pim_rpf_result rpf_result
;
384 pim_addr old_upstream_addr
;
385 pim_addr new_upstream_addr
;
387 old_upstream_addr
= up
->upstream_addr
;
388 pim_rp_set_upstream_addr(pim
, &new_upstream_addr
, up
->sg
.src
,
391 if (PIM_DEBUG_PIM_TRACE
)
392 zlog_debug("%s: pim upstream update for old upstream %pPA",
393 __func__
, &old_upstream_addr
);
395 if (!pim_addr_cmp(old_upstream_addr
, new_upstream_addr
))
398 /* Lets consider a case, where a PIM upstream has a better RP as a
399 * result of a new RP configuration with more precise group range.
400 * This upstream has to be added to the upstream hash of new RP's
401 * NHT(pnc) and has to be removed from old RP's NHT upstream hash
403 if (!pim_addr_is_any(old_upstream_addr
)) {
404 /* Deregister addr with Zebra NHT */
405 if (PIM_DEBUG_PIM_TRACE
)
407 "%s: Deregister upstream %s addr %pPA with Zebra NHT",
408 __func__
, up
->sg_str
, &old_upstream_addr
);
409 pim_delete_tracked_nexthop(pim
, old_upstream_addr
, up
, NULL
);
412 /* Update the upstream address */
413 up
->upstream_addr
= new_upstream_addr
;
415 old_rpf
.source_nexthop
.interface
= up
->rpf
.source_nexthop
.interface
;
417 rpf_result
= pim_rpf_update(pim
, up
, &old_rpf
, __func__
);
418 if (rpf_result
== PIM_RPF_FAILURE
)
419 pim_mroute_del(up
->channel_oil
, __func__
);
421 /* update kernel multicast forwarding cache (MFC) */
422 if (up
->rpf
.source_nexthop
.interface
&& up
->channel_oil
)
423 pim_upstream_mroute_iif_update(up
->channel_oil
, __func__
);
425 if (rpf_result
== PIM_RPF_CHANGED
||
426 (rpf_result
== PIM_RPF_FAILURE
&&
427 old_rpf
.source_nexthop
.interface
))
428 pim_zebra_upstream_rpf_changed(pim
, up
, &old_rpf
);
432 int pim_rp_new(struct pim_instance
*pim
, pim_addr rp_addr
, struct prefix group
,
433 const char *plist
, enum rp_source rp_src_flag
)
436 struct rp_info
*rp_info
;
437 struct rp_info
*rp_all
;
438 struct prefix group_all
;
439 struct listnode
*node
, *nnode
;
440 struct rp_info
*tmp_rp_info
;
443 struct route_node
*rn
= NULL
;
444 struct pim_upstream
*up
;
445 bool upstream_updated
= false;
447 if (pim_addr_is_any(rp_addr
))
448 return PIM_RP_BAD_ADDRESS
;
450 rp_info
= XCALLOC(MTYPE_PIM_RP
, sizeof(*rp_info
));
452 rp_info
->rp
.rpf_addr
= rp_addr
;
453 prefix_copy(&rp_info
->group
, &group
);
454 rp_info
->rp_src
= rp_src_flag
;
458 * Return if the prefix-list is already configured for this RP
460 if (pim_rp_find_prefix_list(pim
, rp_addr
, plist
)) {
461 XFREE(MTYPE_PIM_RP
, rp_info
);
466 * Barf if the prefix-list is already configured for an RP
468 if (pim_rp_prefix_list_used(pim
, plist
)) {
469 XFREE(MTYPE_PIM_RP
, rp_info
);
470 return PIM_RP_PFXLIST_IN_USE
;
474 * Free any existing rp_info entries for this RP
476 for (ALL_LIST_ELEMENTS(pim
->rp_list
, node
, nnode
,
478 if (!pim_addr_cmp(rp_info
->rp
.rpf_addr
,
479 tmp_rp_info
->rp
.rpf_addr
)) {
480 if (tmp_rp_info
->plist
)
481 pim_rp_del_config(pim
, rp_addr
, NULL
,
486 prefix2str(&tmp_rp_info
->group
,
492 rp_info
->plist
= XSTRDUP(MTYPE_PIM_FILTER_NAME
, plist
);
495 if (!pim_get_all_mcast_group(&group_all
)) {
496 XFREE(MTYPE_PIM_RP
, rp_info
);
497 return PIM_GROUP_BAD_ADDRESS
;
499 rp_all
= pim_rp_find_match_group(pim
, &group_all
);
502 * Barf if group is a non-multicast subnet
504 if (!prefix_match(&rp_all
->group
, &rp_info
->group
)) {
505 XFREE(MTYPE_PIM_RP
, rp_info
);
506 return PIM_GROUP_BAD_ADDRESS
;
510 * Remove any prefix-list rp_info entries for this RP
512 for (ALL_LIST_ELEMENTS(pim
->rp_list
, node
, nnode
,
514 if (tmp_rp_info
->plist
&&
515 (!pim_addr_cmp(rp_info
->rp
.rpf_addr
,
516 tmp_rp_info
->rp
.rpf_addr
))) {
517 pim_rp_del_config(pim
, rp_addr
, NULL
,
523 * Take over the 224.0.0.0/4 group if the rp is INADDR_ANY
525 if (prefix_same(&rp_all
->group
, &rp_info
->group
) &&
526 pim_rpf_addr_is_inaddr_any(&rp_all
->rp
)) {
527 rp_all
->rp
.rpf_addr
= rp_info
->rp
.rpf_addr
;
528 rp_all
->rp_src
= rp_src_flag
;
529 XFREE(MTYPE_PIM_RP
, rp_info
);
531 /* Register addr with Zebra NHT */
532 nht_p
= rp_all
->rp
.rpf_addr
;
533 if (PIM_DEBUG_PIM_NHT_RP
)
535 "%s: NHT Register rp_all addr %pPA grp %pFX ",
536 __func__
, &nht_p
, &rp_all
->group
);
538 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
539 /* Find (*, G) upstream whose RP is not
542 if (pim_addr_is_any(up
->upstream_addr
) &&
543 pim_addr_is_any(up
->sg
.src
)) {
545 struct rp_info
*trp_info
;
547 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
548 trp_info
= pim_rp_find_match_group(
550 if (trp_info
== rp_all
) {
551 pim_upstream_update(pim
, up
);
552 upstream_updated
= true;
556 if (upstream_updated
)
557 pim_zebra_update_all_interfaces(pim
);
559 pim_rp_check_interfaces(pim
, rp_all
);
560 pim_rp_refresh_group_to_rp_mapping(pim
);
561 pim_find_or_track_nexthop(pim
, nht_p
, NULL
, rp_all
,
564 if (!pim_ecmp_nexthop_lookup(pim
,
565 &rp_all
->rp
.source_nexthop
,
566 nht_p
, &rp_all
->group
, 1))
567 return PIM_RP_NO_PATH
;
572 * Return if the group is already configured for this RP
574 tmp_rp_info
= pim_rp_find_exact(pim
, rp_addr
, &rp_info
->group
);
576 if ((tmp_rp_info
->rp_src
!= rp_src_flag
)
577 && (rp_src_flag
== RP_SRC_STATIC
))
578 tmp_rp_info
->rp_src
= rp_src_flag
;
579 XFREE(MTYPE_PIM_RP
, rp_info
);
584 * Barf if this group is already covered by some other RP
586 tmp_rp_info
= pim_rp_find_match_group(pim
, &rp_info
->group
);
589 if (tmp_rp_info
->plist
) {
590 XFREE(MTYPE_PIM_RP
, rp_info
);
591 return PIM_GROUP_PFXLIST_OVERLAP
;
594 * If the only RP that covers this group is an
596 * 224.0.0.0/4 that is fine, ignore that one.
598 * though we must return PIM_GROUP_OVERLAP
600 if (prefix_same(&rp_info
->group
,
601 &tmp_rp_info
->group
)) {
602 if ((rp_src_flag
== RP_SRC_STATIC
)
603 && (tmp_rp_info
->rp_src
605 XFREE(MTYPE_PIM_RP
, rp_info
);
606 return PIM_GROUP_OVERLAP
;
609 result
= pim_rp_change(
613 XFREE(MTYPE_PIM_RP
, rp_info
);
620 listnode_add_sort(pim
->rp_list
, rp_info
);
622 if (!rp_info
->plist
) {
623 rn
= route_node_get(pim
->rp_table
, &rp_info
->group
);
627 if (PIM_DEBUG_PIM_TRACE
)
628 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn
,
629 rp_info
, &rp_info
->group
,
630 rn
? route_node_get_lock_count(rn
) : 0);
632 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
633 if (pim_addr_is_any(up
->sg
.src
)) {
635 struct rp_info
*trp_info
;
637 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
638 trp_info
= pim_rp_find_match_group(pim
, &grp
);
640 if (trp_info
== rp_info
) {
641 pim_upstream_update(pim
, up
);
642 upstream_updated
= true;
647 if (upstream_updated
)
648 pim_zebra_update_all_interfaces(pim
);
650 pim_rp_check_interfaces(pim
, rp_info
);
651 pim_rp_refresh_group_to_rp_mapping(pim
);
653 /* Register addr with Zebra NHT */
654 nht_p
= rp_info
->rp
.rpf_addr
;
655 if (PIM_DEBUG_PIM_NHT_RP
)
656 zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
657 __func__
, &nht_p
, &rp_info
->group
);
658 pim_find_or_track_nexthop(pim
, nht_p
, NULL
, rp_info
, NULL
);
659 if (!pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
, nht_p
,
661 return PIM_RP_NO_PATH
;
666 void pim_rp_del_config(struct pim_instance
*pim
, pim_addr rp_addr
,
667 const char *group_range
, const char *plist
)
672 if (group_range
== NULL
)
673 result
= pim_get_all_mcast_group(&group
);
675 result
= str2prefix(group_range
, &group
);
678 if (PIM_DEBUG_PIM_TRACE
)
680 "%s: String to prefix failed for %pPAs group",
685 pim_rp_del(pim
, rp_addr
, group
, plist
, RP_SRC_STATIC
);
688 int pim_rp_del(struct pim_instance
*pim
, pim_addr rp_addr
, struct prefix group
,
689 const char *plist
, enum rp_source rp_src_flag
)
692 struct rp_info
*rp_info
;
693 struct rp_info
*rp_all
;
695 struct route_node
*rn
;
696 bool was_plist
= false;
697 struct rp_info
*trp_info
;
698 struct pim_upstream
*up
;
699 struct bsgrp_node
*bsgrp
= NULL
;
700 struct bsm_rpinfo
*bsrp
= NULL
;
701 bool upstream_updated
= false;
704 rp_info
= pim_rp_find_prefix_list(pim
, rp_addr
, plist
);
706 rp_info
= pim_rp_find_exact(pim
, rp_addr
, &group
);
709 return PIM_RP_NOT_FOUND
;
711 if (rp_info
->plist
) {
712 XFREE(MTYPE_PIM_FILTER_NAME
, rp_info
->plist
);
716 if (PIM_DEBUG_PIM_TRACE
)
717 zlog_debug("%s: Delete RP %pPA for the group %pFX", __func__
,
720 /* While static RP is getting deleted, we need to check if dynamic RP
721 * present for the same group in BSM RP table, then install the dynamic
722 * RP for the group node into the main rp table
724 if (rp_src_flag
== RP_SRC_STATIC
) {
725 bsgrp
= pim_bsm_get_bsgrp_node(&pim
->global_scope
, &group
);
728 bsrp
= bsm_rpinfos_first(bsgrp
->bsrp_list
);
730 if (PIM_DEBUG_PIM_TRACE
)
732 "%s: BSM RP %pPA found for the group %pFX",
733 __func__
, &bsrp
->rp_address
,
735 return pim_rp_change(pim
, bsrp
->rp_address
,
739 if (PIM_DEBUG_PIM_TRACE
)
741 "%s: BSM RP not found for the group %pFX",
746 /* Deregister addr with Zebra NHT */
747 nht_p
= rp_info
->rp
.rpf_addr
;
748 if (PIM_DEBUG_PIM_NHT_RP
)
749 zlog_debug("%s: Deregister RP addr %pPA with Zebra ", __func__
,
751 pim_delete_tracked_nexthop(pim
, nht_p
, NULL
, rp_info
);
753 if (!pim_get_all_mcast_group(&g_all
))
754 return PIM_RP_BAD_ADDRESS
;
756 rp_all
= pim_rp_find_match_group(pim
, &g_all
);
758 if (rp_all
== rp_info
) {
759 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
760 /* Find the upstream (*, G) whose upstream address is
761 * same as the deleted RP
765 rpf_addr
= rp_info
->rp
.rpf_addr
;
766 if (!pim_addr_cmp(up
->upstream_addr
, rpf_addr
) &&
767 pim_addr_is_any(up
->sg
.src
)) {
770 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
771 trp_info
= pim_rp_find_match_group(pim
, &grp
);
772 if (trp_info
== rp_all
) {
773 pim_upstream_rpf_clear(pim
, up
);
774 up
->upstream_addr
= PIMADDR_ANY
;
778 rp_all
->rp
.rpf_addr
= PIMADDR_ANY
;
783 listnode_delete(pim
->rp_list
, rp_info
);
786 rn
= route_node_get(pim
->rp_table
, &rp_info
->group
);
788 if (rn
->info
!= rp_info
)
791 "Expected rn->info to be equal to rp_info");
793 if (PIM_DEBUG_PIM_TRACE
)
795 "%s:Found for Freeing: %p for rp_info: %p(%pFX) Lock: %d",
796 __func__
, rn
, rp_info
, &rp_info
->group
,
797 route_node_get_lock_count(rn
));
800 route_unlock_node(rn
);
801 route_unlock_node(rn
);
805 pim_rp_refresh_group_to_rp_mapping(pim
);
807 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
808 /* Find the upstream (*, G) whose upstream address is same as
813 rpf_addr
= rp_info
->rp
.rpf_addr
;
814 if (!pim_addr_cmp(up
->upstream_addr
, rpf_addr
) &&
815 pim_addr_is_any(up
->sg
.src
)) {
818 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
819 trp_info
= pim_rp_find_match_group(pim
, &grp
);
821 /* RP not found for the group grp */
822 if (pim_rpf_addr_is_inaddr_any(&trp_info
->rp
)) {
823 pim_upstream_rpf_clear(pim
, up
);
824 pim_rp_set_upstream_addr(
825 pim
, &up
->upstream_addr
, up
->sg
.src
,
829 /* RP found for the group grp */
831 pim_upstream_update(pim
, up
);
832 upstream_updated
= true;
837 if (upstream_updated
)
838 pim_zebra_update_all_interfaces(pim
);
840 XFREE(MTYPE_PIM_RP
, rp_info
);
844 int pim_rp_change(struct pim_instance
*pim
, pim_addr new_rp_addr
,
845 struct prefix group
, enum rp_source rp_src_flag
)
848 struct route_node
*rn
;
850 struct rp_info
*rp_info
= NULL
;
851 struct pim_upstream
*up
;
852 bool upstream_updated
= false;
853 pim_addr old_rp_addr
;
855 rn
= route_node_lookup(pim
->rp_table
, &group
);
857 result
= pim_rp_new(pim
, new_rp_addr
, group
, NULL
, rp_src_flag
);
864 route_unlock_node(rn
);
865 result
= pim_rp_new(pim
, new_rp_addr
, group
, NULL
, rp_src_flag
);
869 old_rp_addr
= rp_info
->rp
.rpf_addr
;
870 if (!pim_addr_cmp(new_rp_addr
, old_rp_addr
)) {
871 if (rp_info
->rp_src
!= rp_src_flag
) {
872 rp_info
->rp_src
= rp_src_flag
;
873 route_unlock_node(rn
);
878 /* Deregister old RP addr with Zebra NHT */
880 if (!pim_addr_is_any(old_rp_addr
)) {
881 nht_p
= rp_info
->rp
.rpf_addr
;
882 if (PIM_DEBUG_PIM_NHT_RP
)
883 zlog_debug("%s: Deregister RP addr %pPA with Zebra ",
885 pim_delete_tracked_nexthop(pim
, nht_p
, NULL
, rp_info
);
888 pim_rp_nexthop_del(rp_info
);
889 listnode_delete(pim
->rp_list
, rp_info
);
890 /* Update the new RP address*/
892 rp_info
->rp
.rpf_addr
= new_rp_addr
;
893 rp_info
->rp_src
= rp_src_flag
;
894 rp_info
->i_am_rp
= 0;
896 listnode_add_sort(pim
->rp_list
, rp_info
);
898 frr_each (rb_pim_upstream
, &pim
->upstream_head
, up
) {
899 if (pim_addr_is_any(up
->sg
.src
)) {
901 struct rp_info
*trp_info
;
903 pim_addr_to_prefix(&grp
, up
->sg
.grp
);
904 trp_info
= pim_rp_find_match_group(pim
, &grp
);
906 if (trp_info
== rp_info
) {
907 pim_upstream_update(pim
, up
);
908 upstream_updated
= true;
913 if (upstream_updated
)
914 pim_zebra_update_all_interfaces(pim
);
916 /* Register new RP addr with Zebra NHT */
917 nht_p
= rp_info
->rp
.rpf_addr
;
918 if (PIM_DEBUG_PIM_NHT_RP
)
919 zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
920 __func__
, &nht_p
, &rp_info
->group
);
922 pim_find_or_track_nexthop(pim
, nht_p
, NULL
, rp_info
, NULL
);
923 if (!pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
, nht_p
,
924 &rp_info
->group
, 1)) {
925 route_unlock_node(rn
);
926 return PIM_RP_NO_PATH
;
929 pim_rp_check_interfaces(pim
, rp_info
);
931 route_unlock_node(rn
);
933 pim_rp_refresh_group_to_rp_mapping(pim
);
938 void pim_rp_setup(struct pim_instance
*pim
)
940 struct listnode
*node
;
941 struct rp_info
*rp_info
;
944 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
945 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
948 nht_p
= rp_info
->rp
.rpf_addr
;
950 pim_find_or_track_nexthop(pim
, nht_p
, NULL
, rp_info
, NULL
);
951 if (!pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
,
952 nht_p
, &rp_info
->group
, 1))
953 if (PIM_DEBUG_PIM_NHT_RP
)
955 "Unable to lookup nexthop for rp specified");
960 * Checks to see if we should elect ourself the actual RP when new if
961 * addresses are added against an interface.
963 void pim_rp_check_on_if_add(struct pim_interface
*pim_ifp
)
965 struct listnode
*node
;
966 struct rp_info
*rp_info
;
967 bool i_am_rp_changed
= false;
968 struct pim_instance
*pim
= pim_ifp
->pim
;
970 if (pim
->rp_list
== NULL
)
973 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
974 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
977 /* if i_am_rp is already set nothing to be done (adding new
979 * is not going to make a difference). */
980 if (rp_info
->i_am_rp
) {
984 if (pim_rp_check_interface_addrs(rp_info
, pim_ifp
)) {
985 i_am_rp_changed
= true;
986 rp_info
->i_am_rp
= 1;
987 if (PIM_DEBUG_PIM_NHT_RP
)
988 zlog_debug("%s: %pPA: i am rp", __func__
,
989 &rp_info
->rp
.rpf_addr
);
993 if (i_am_rp_changed
) {
994 pim_msdp_i_am_rp_changed(pim
);
995 pim_upstream_reeval_use_rpt(pim
);
999 /* up-optimized re-evaluation of "i_am_rp". this is used when ifaddresses
1000 * are removed. Removing numbers is an uncommon event in an active network
1001 * so I have made no attempt to optimize it. */
1002 void pim_i_am_rp_re_evaluate(struct pim_instance
*pim
)
1004 struct listnode
*node
;
1005 struct rp_info
*rp_info
;
1006 bool i_am_rp_changed
= false;
1009 if (pim
->rp_list
== NULL
)
1012 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1013 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1016 old_i_am_rp
= rp_info
->i_am_rp
;
1017 pim_rp_check_interfaces(pim
, rp_info
);
1019 if (old_i_am_rp
!= rp_info
->i_am_rp
) {
1020 i_am_rp_changed
= true;
1021 if (PIM_DEBUG_PIM_NHT_RP
) {
1022 if (rp_info
->i_am_rp
)
1023 zlog_debug("%s: %pPA: i am rp",
1025 &rp_info
->rp
.rpf_addr
);
1028 "%s: %pPA: i am no longer rp",
1030 &rp_info
->rp
.rpf_addr
);
1035 if (i_am_rp_changed
) {
1036 pim_msdp_i_am_rp_changed(pim
);
1037 pim_upstream_reeval_use_rpt(pim
);
1042 * I_am_RP(G) is true if the group-to-RP mapping indicates that
1043 * this router is the RP for the group.
1045 * Since we only have static RP, all groups are part of this RP
1047 int pim_rp_i_am_rp(struct pim_instance
*pim
, pim_addr group
)
1050 struct rp_info
*rp_info
;
1052 memset(&g
, 0, sizeof(g
));
1053 pim_addr_to_prefix(&g
, group
);
1054 rp_info
= pim_rp_find_match_group(pim
, &g
);
1057 return rp_info
->i_am_rp
;
1064 * Return the RP that the Group belongs too.
1066 struct pim_rpf
*pim_rp_g(struct pim_instance
*pim
, pim_addr group
)
1069 struct rp_info
*rp_info
;
1071 memset(&g
, 0, sizeof(g
));
1072 pim_addr_to_prefix(&g
, group
);
1074 rp_info
= pim_rp_find_match_group(pim
, &g
);
1079 /* Register addr with Zebra NHT */
1080 nht_p
= rp_info
->rp
.rpf_addr
;
1081 if (PIM_DEBUG_PIM_NHT_RP
)
1083 "%s: NHT Register RP addr %pPA grp %pFX with Zebra",
1084 __func__
, &nht_p
, &rp_info
->group
);
1085 pim_find_or_track_nexthop(pim
, nht_p
, NULL
, rp_info
, NULL
);
1086 pim_rpf_set_refresh_time(pim
);
1087 (void)pim_ecmp_nexthop_lookup(pim
, &rp_info
->rp
.source_nexthop
,
1088 nht_p
, &rp_info
->group
, 1);
1089 return (&rp_info
->rp
);
1097 * Set the upstream IP address we want to talk to based upon
1098 * the rp configured and the source address
1100 * If we have don't have a RP configured and the source address is *
1101 * then set the upstream addr as INADDR_ANY and return failure.
1104 int pim_rp_set_upstream_addr(struct pim_instance
*pim
, pim_addr
*up
,
1105 pim_addr source
, pim_addr group
)
1107 struct rp_info
*rp_info
;
1110 memset(&g
, 0, sizeof(g
));
1112 pim_addr_to_prefix(&g
, group
);
1114 rp_info
= pim_rp_find_match_group(pim
, &g
);
1116 if (!rp_info
|| ((pim_rpf_addr_is_inaddr_any(&rp_info
->rp
)) &&
1117 (pim_addr_is_any(source
)))) {
1118 if (PIM_DEBUG_PIM_NHT_RP
)
1119 zlog_debug("%s: Received a (*,G) with no RP configured",
1125 if (pim_addr_is_any(source
))
1126 *up
= rp_info
->rp
.rpf_addr
;
1133 int pim_rp_config_write(struct pim_instance
*pim
, struct vty
*vty
,
1136 struct listnode
*node
;
1137 struct rp_info
*rp_info
;
1141 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1142 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1145 if (rp_info
->rp_src
== RP_SRC_BSR
)
1148 rp_addr
= rp_info
->rp
.rpf_addr
;
1152 " pim rp %pPA prefix-list %s\n",
1153 spaces
, &rp_addr
, rp_info
->plist
);
1155 vty_out(vty
, "%s" PIM_AF_NAME
" pim rp %pPA %pFX\n",
1156 spaces
, &rp_addr
, &rp_info
->group
);
1163 void pim_rp_show_information(struct pim_instance
*pim
, struct prefix
*range
,
1164 struct vty
*vty
, json_object
*json
)
1166 struct rp_info
*rp_info
;
1167 struct rp_info
*prev_rp_info
= NULL
;
1168 struct listnode
*node
;
1171 json_object
*json_rp_rows
= NULL
;
1172 json_object
*json_row
= NULL
;
1176 "RP address group/prefix-list OIF I am RP Source Group-Type\n");
1177 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1178 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1182 pim_addr group
= rp_info
->group
.u
.prefix4
;
1184 pim_addr group
= rp_info
->group
.u
.prefix6
;
1186 const char *group_type
=
1187 pim_is_grp_ssm(pim
, group
) ? "SSM" : "ASM";
1189 if (range
&& !prefix_match(&rp_info
->group
, range
))
1192 if (rp_info
->rp_src
== RP_SRC_STATIC
)
1193 strlcpy(source
, "Static", sizeof(source
));
1194 else if (rp_info
->rp_src
== RP_SRC_BSR
)
1195 strlcpy(source
, "BSR", sizeof(source
));
1197 strlcpy(source
, "None", sizeof(source
));
1200 * If we have moved on to a new RP then add the
1201 * entry for the previous RP
1204 (pim_addr_cmp(prev_rp_info
->rp
.rpf_addr
,
1205 rp_info
->rp
.rpf_addr
))) {
1206 json_object_object_addf(
1207 json
, json_rp_rows
, "%pPA",
1208 &prev_rp_info
->rp
.rpf_addr
);
1209 json_rp_rows
= NULL
;
1213 json_rp_rows
= json_object_new_array();
1215 json_row
= json_object_new_object();
1216 json_object_string_addf(json_row
, "rpAddress", "%pPA",
1217 &rp_info
->rp
.rpf_addr
);
1218 if (rp_info
->rp
.source_nexthop
.interface
)
1219 json_object_string_add(
1220 json_row
, "outboundInterface",
1221 rp_info
->rp
.source_nexthop
1224 json_object_string_add(json_row
,
1225 "outboundInterface",
1227 if (rp_info
->i_am_rp
)
1228 json_object_boolean_true_add(json_row
, "iAmRP");
1230 json_object_boolean_false_add(json_row
,
1234 json_object_string_add(json_row
, "prefixList",
1237 json_object_string_addf(json_row
, "group",
1240 json_object_string_add(json_row
, "source", source
);
1241 json_object_string_add(json_row
, "groupType",
1244 json_object_array_add(json_rp_rows
, json_row
);
1246 vty_out(vty
, "%-15pPA ", &rp_info
->rp
.rpf_addr
);
1249 vty_out(vty
, "%-18s ", rp_info
->plist
);
1251 vty_out(vty
, "%-18pFX ", &rp_info
->group
);
1253 if (rp_info
->rp
.source_nexthop
.interface
)
1254 vty_out(vty
, "%-16s ",
1255 rp_info
->rp
.source_nexthop
1258 vty_out(vty
, "%-16s ", "(Unknown)");
1260 if (rp_info
->i_am_rp
)
1261 vty_out(vty
, "yes");
1265 vty_out(vty
, "%14s", source
);
1266 vty_out(vty
, "%6s\n", group_type
);
1268 prev_rp_info
= rp_info
;
1272 if (prev_rp_info
&& json_rp_rows
)
1273 json_object_object_addf(json
, json_rp_rows
, "%pPA",
1274 &prev_rp_info
->rp
.rpf_addr
);
1278 void pim_resolve_rp_nh(struct pim_instance
*pim
, struct pim_neighbor
*nbr
)
1280 struct listnode
*node
= NULL
;
1281 struct rp_info
*rp_info
= NULL
;
1282 struct nexthop
*nh_node
= NULL
;
1284 struct pim_nexthop_cache pnc
;
1286 for (ALL_LIST_ELEMENTS_RO(pim
->rp_list
, node
, rp_info
)) {
1287 if (pim_rpf_addr_is_inaddr_any(&rp_info
->rp
))
1290 nht_p
= rp_info
->rp
.rpf_addr
;
1291 memset(&pnc
, 0, sizeof(struct pim_nexthop_cache
));
1292 if (!pim_find_or_track_nexthop(pim
, nht_p
, NULL
, rp_info
, &pnc
))
1295 for (nh_node
= pnc
.nexthop
; nh_node
; nh_node
= nh_node
->next
) {
1297 if (!pim_addr_is_any(nh_node
->gate
.ipv4
))
1300 if (!pim_addr_is_any(nh_node
->gate
.ipv6
))
1304 struct interface
*ifp1
= if_lookup_by_index(
1305 nh_node
->ifindex
, pim
->vrf
->vrf_id
);
1307 if (nbr
->interface
!= ifp1
)
1311 nh_node
->gate
.ipv4
= nbr
->source_addr
;
1313 nh_node
->gate
.ipv6
= nbr
->source_addr
;
1315 if (PIM_DEBUG_PIM_NHT_RP
)
1317 "%s: addr %pPA new nexthop addr %pPAs interface %s",
1318 __func__
, &nht_p
, &nbr
->source_addr
,