]> git.proxmox.com Git - mirror_frr.git/blob - pimd/pim_rp.c
Merge pull request #12798 from donaldsharp/rib_match_multicast
[mirror_frr.git] / pimd / pim_rp.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * PIM for Quagga
4 * Copyright (C) 2015 Cumulus Networks, Inc.
5 * Donald Sharp
6 */
7 #include <zebra.h>
8
9 #include "lib/json.h"
10 #include "log.h"
11 #include "network.h"
12 #include "if.h"
13 #include "linklist.h"
14 #include "prefix.h"
15 #include "memory.h"
16 #include "vty.h"
17 #include "vrf.h"
18 #include "plist.h"
19 #include "nexthop.h"
20 #include "table.h"
21 #include "lib_errors.h"
22
23 #include "pimd.h"
24 #include "pim_instance.h"
25 #include "pim_vty.h"
26 #include "pim_str.h"
27 #include "pim_iface.h"
28 #include "pim_rp.h"
29 #include "pim_rpf.h"
30 #include "pim_sock.h"
31 #include "pim_memory.h"
32 #include "pim_neighbor.h"
33 #include "pim_msdp.h"
34 #include "pim_nht.h"
35 #include "pim_mroute.h"
36 #include "pim_oil.h"
37 #include "pim_zebra.h"
38 #include "pim_bsm.h"
39 #include "pim_util.h"
40 #include "pim_ssm.h"
41 #include "termtable.h"
42
43 /* Cleanup pim->rpf_hash each node data */
44 void pim_rp_list_hash_clean(void *data)
45 {
46 struct pim_nexthop_cache *pnc = (struct pim_nexthop_cache *)data;
47
48 list_delete(&pnc->rp_list);
49
50 hash_clean(pnc->upstream_hash, NULL);
51 hash_free(pnc->upstream_hash);
52 pnc->upstream_hash = NULL;
53 if (pnc->nexthop)
54 nexthops_free(pnc->nexthop);
55
56 XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc);
57 }
58
59 static void pim_rp_info_free(struct rp_info *rp_info)
60 {
61 XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
62
63 XFREE(MTYPE_PIM_RP, rp_info);
64 }
65
66 int pim_rp_list_cmp(void *v1, void *v2)
67 {
68 struct rp_info *rp1 = (struct rp_info *)v1;
69 struct rp_info *rp2 = (struct rp_info *)v2;
70 int ret;
71
72 /*
73 * Sort by RP IP address
74 */
75 ret = pim_addr_cmp(rp1->rp.rpf_addr, rp2->rp.rpf_addr);
76 if (ret)
77 return ret;
78
79 /*
80 * Sort by group IP address
81 */
82 ret = prefix_cmp(&rp1->group, &rp2->group);
83 if (ret)
84 return ret;
85
86 return 0;
87 }
88
89 void pim_rp_init(struct pim_instance *pim)
90 {
91 struct rp_info *rp_info;
92 struct route_node *rn;
93
94 pim->rp_list = list_new();
95 pim->rp_list->del = (void (*)(void *))pim_rp_info_free;
96 pim->rp_list->cmp = pim_rp_list_cmp;
97
98 pim->rp_table = route_table_init();
99
100 rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
101
102 if (!pim_get_all_mcast_group(&rp_info->group)) {
103 flog_err(EC_LIB_DEVELOPMENT,
104 "Unable to convert all-multicast prefix");
105 list_delete(&pim->rp_list);
106 route_table_finish(pim->rp_table);
107 XFREE(MTYPE_PIM_RP, rp_info);
108 return;
109 }
110 rp_info->rp.rpf_addr = PIMADDR_ANY;
111
112 listnode_add(pim->rp_list, rp_info);
113
114 rn = route_node_get(pim->rp_table, &rp_info->group);
115 rn->info = rp_info;
116 if (PIM_DEBUG_PIM_TRACE)
117 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn,
118 rp_info, &rp_info->group,
119 route_node_get_lock_count(rn));
120 }
121
122 void pim_rp_free(struct pim_instance *pim)
123 {
124 if (pim->rp_table)
125 route_table_finish(pim->rp_table);
126 pim->rp_table = NULL;
127
128 if (pim->rp_list)
129 list_delete(&pim->rp_list);
130 }
131
132 /*
133 * Given an RP's prefix-list, return the RP's rp_info for that prefix-list
134 */
135 static struct rp_info *pim_rp_find_prefix_list(struct pim_instance *pim,
136 pim_addr rp, const char *plist)
137 {
138 struct listnode *node;
139 struct rp_info *rp_info;
140
141 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
142 if ((!pim_addr_cmp(rp, rp_info->rp.rpf_addr)) &&
143 rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
144 return rp_info;
145 }
146 }
147
148 return NULL;
149 }
150
151 /*
152 * Return true if plist is used by any rp_info
153 */
154 static int pim_rp_prefix_list_used(struct pim_instance *pim, const char *plist)
155 {
156 struct listnode *node;
157 struct rp_info *rp_info;
158
159 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
160 if (rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
161 return 1;
162 }
163 }
164
165 return 0;
166 }
167
168 /*
169 * Given an RP's address, return the RP's rp_info that is an exact match for
170 * 'group'
171 */
172 static struct rp_info *pim_rp_find_exact(struct pim_instance *pim, pim_addr rp,
173 const struct prefix *group)
174 {
175 struct listnode *node;
176 struct rp_info *rp_info;
177
178 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
179 if ((!pim_addr_cmp(rp, rp_info->rp.rpf_addr)) &&
180 prefix_same(&rp_info->group, group))
181 return rp_info;
182 }
183
184 return NULL;
185 }
186
187 /*
188 * XXX: long-term issue: we don't actually have a good "ip address-list"
189 * implementation. ("access-list XYZ" is the closest but honestly it's
190 * kinda garbage.)
191 *
192 * So it's using a prefix-list to match an address here, which causes very
193 * unexpected results for the user since prefix-lists by default only match
194 * when the prefix length is an exact match too. i.e. you'd have to add the
195 * "le 32" and do "ip prefix-list foo permit 10.0.0.0/24 le 32"
196 *
197 * To avoid this pitfall, this code uses "address_mode = true" for the prefix
198 * list match (this is the only user for that.)
199 *
200 * In the long run, we need to add a "ip address-list", but that's a wholly
201 * separate bag of worms, and existing configs using ip prefix-list would
202 * drop into the UX pitfall.
203 */
204
205 #include "lib/plist_int.h"
206
207 /*
208 * Given a group, return the rp_info for that group
209 */
210 struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
211 const struct prefix *group)
212 {
213 struct listnode *node;
214 struct rp_info *best = NULL;
215 struct rp_info *rp_info;
216 struct prefix_list *plist;
217 const struct prefix *bp;
218 const struct prefix_list_entry *entry;
219 struct route_node *rn;
220
221 bp = NULL;
222 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
223 if (rp_info->plist) {
224 plist = prefix_list_lookup(PIM_AFI, rp_info->plist);
225
226 if (prefix_list_apply_ext(plist, &entry, group, true)
227 == PREFIX_DENY || !entry)
228 continue;
229
230 if (!best) {
231 best = rp_info;
232 bp = &entry->prefix;
233 continue;
234 }
235
236 if (bp && bp->prefixlen < entry->prefix.prefixlen) {
237 best = rp_info;
238 bp = &entry->prefix;
239 }
240 }
241 }
242
243 rn = route_node_match(pim->rp_table, group);
244 if (!rn) {
245 flog_err(
246 EC_LIB_DEVELOPMENT,
247 "%s: BUG We should have found default group information",
248 __func__);
249 return best;
250 }
251
252 rp_info = rn->info;
253 if (PIM_DEBUG_PIM_TRACE) {
254 if (best)
255 zlog_debug(
256 "Lookedup(%pFX): prefix_list match %s, rn %p found: %pFX",
257 group, best->plist, rn, &rp_info->group);
258 else
259 zlog_debug("Lookedup(%pFX): rn %p found:%pFX", group,
260 rn, &rp_info->group);
261 }
262
263 route_unlock_node(rn);
264
265 /*
266 * rp's with prefix lists have the group as 224.0.0.0/4 which will
267 * match anything. So if we have a rp_info that should match a prefix
268 * list then if we do match then best should be the answer( even
269 * if it is NULL )
270 */
271 if (!rp_info || (rp_info && rp_info->plist))
272 return best;
273
274 /*
275 * So we have a non plist rp_info found in the lookup and no plists
276 * at all to be choosen, return it!
277 */
278 if (!best)
279 return rp_info;
280
281 /*
282 * If we have a matching non prefix list and a matching prefix
283 * list we should return the actual rp_info that has the LPM
284 * If they are equal, use the prefix-list( but let's hope
285 * the end-operator doesn't do this )
286 */
287 if (rp_info->group.prefixlen > bp->prefixlen)
288 best = rp_info;
289
290 return best;
291 }
292
293 /*
294 * When the user makes "ip pim rp" configuration changes or if they change the
295 * prefix-list(s) used by these statements we must tickle the upstream state
296 * for each group to make them re-lookup who their RP should be.
297 *
298 * This is a placeholder function for now.
299 */
300 void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim)
301 {
302 pim_msdp_i_am_rp_changed(pim);
303 pim_upstream_reeval_use_rpt(pim);
304 }
305
306 void pim_rp_prefix_list_update(struct pim_instance *pim,
307 struct prefix_list *plist)
308 {
309 struct listnode *node;
310 struct rp_info *rp_info;
311 int refresh_needed = 0;
312
313 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
314 if (rp_info->plist
315 && strcmp(rp_info->plist, prefix_list_name(plist)) == 0) {
316 refresh_needed = 1;
317 break;
318 }
319 }
320
321 if (refresh_needed)
322 pim_rp_refresh_group_to_rp_mapping(pim);
323 }
324
325 static int pim_rp_check_interface_addrs(struct rp_info *rp_info,
326 struct pim_interface *pim_ifp)
327 {
328 struct listnode *node;
329 struct pim_secondary_addr *sec_addr;
330 pim_addr sec_paddr;
331
332 if (!pim_addr_cmp(pim_ifp->primary_address, rp_info->rp.rpf_addr))
333 return 1;
334
335 if (!pim_ifp->sec_addr_list) {
336 return 0;
337 }
338
339 for (ALL_LIST_ELEMENTS_RO(pim_ifp->sec_addr_list, node, sec_addr)) {
340 sec_paddr = pim_addr_from_prefix(&sec_addr->addr);
341 /* If an RP-address is self, It should be enough to say
342 * I am RP the prefix-length should not matter here */
343 if (!pim_addr_cmp(sec_paddr, rp_info->rp.rpf_addr))
344 return 1;
345 }
346
347 return 0;
348 }
349
350 static void pim_rp_check_interfaces(struct pim_instance *pim,
351 struct rp_info *rp_info)
352 {
353 struct interface *ifp;
354
355 rp_info->i_am_rp = 0;
356 FOR_ALL_INTERFACES (pim->vrf, ifp) {
357 struct pim_interface *pim_ifp = ifp->info;
358
359 if (!pim_ifp)
360 continue;
361
362 if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
363 rp_info->i_am_rp = 1;
364 }
365 }
366 }
367
368 void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
369 {
370 struct pim_rpf old_rpf;
371 enum pim_rpf_result rpf_result;
372 pim_addr old_upstream_addr;
373 pim_addr new_upstream_addr;
374
375 old_upstream_addr = up->upstream_addr;
376 pim_rp_set_upstream_addr(pim, &new_upstream_addr, up->sg.src,
377 up->sg.grp);
378
379 if (PIM_DEBUG_PIM_TRACE)
380 zlog_debug("%s: pim upstream update for old upstream %pPA",
381 __func__, &old_upstream_addr);
382
383 if (!pim_addr_cmp(old_upstream_addr, new_upstream_addr))
384 return;
385
386 /* Lets consider a case, where a PIM upstream has a better RP as a
387 * result of a new RP configuration with more precise group range.
388 * This upstream has to be added to the upstream hash of new RP's
389 * NHT(pnc) and has to be removed from old RP's NHT upstream hash
390 */
391 if (!pim_addr_is_any(old_upstream_addr)) {
392 /* Deregister addr with Zebra NHT */
393 if (PIM_DEBUG_PIM_TRACE)
394 zlog_debug(
395 "%s: Deregister upstream %s addr %pPA with Zebra NHT",
396 __func__, up->sg_str, &old_upstream_addr);
397 pim_delete_tracked_nexthop(pim, old_upstream_addr, up, NULL);
398 }
399
400 /* Update the upstream address */
401 up->upstream_addr = new_upstream_addr;
402
403 old_rpf.source_nexthop.interface = up->rpf.source_nexthop.interface;
404
405 rpf_result = pim_rpf_update(pim, up, &old_rpf, __func__);
406 if (rpf_result == PIM_RPF_FAILURE)
407 pim_mroute_del(up->channel_oil, __func__);
408
409 /* update kernel multicast forwarding cache (MFC) */
410 if (up->rpf.source_nexthop.interface && up->channel_oil)
411 pim_upstream_mroute_iif_update(up->channel_oil, __func__);
412
413 if (rpf_result == PIM_RPF_CHANGED ||
414 (rpf_result == PIM_RPF_FAILURE &&
415 old_rpf.source_nexthop.interface))
416 pim_zebra_upstream_rpf_changed(pim, up, &old_rpf);
417
418 }
419
420 int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
421 const char *plist, enum rp_source rp_src_flag)
422 {
423 int result = 0;
424 struct rp_info *rp_info;
425 struct rp_info *rp_all;
426 struct prefix group_all;
427 struct listnode *node, *nnode;
428 struct rp_info *tmp_rp_info;
429 char buffer[BUFSIZ];
430 pim_addr nht_p;
431 struct route_node *rn = NULL;
432 struct pim_upstream *up;
433 bool upstream_updated = false;
434
435 if (pim_addr_is_any(rp_addr))
436 return PIM_RP_BAD_ADDRESS;
437
438 rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
439
440 rp_info->rp.rpf_addr = rp_addr;
441 prefix_copy(&rp_info->group, &group);
442 rp_info->rp_src = rp_src_flag;
443
444 if (plist) {
445 /*
446 * Return if the prefix-list is already configured for this RP
447 */
448 if (pim_rp_find_prefix_list(pim, rp_addr, plist)) {
449 XFREE(MTYPE_PIM_RP, rp_info);
450 return PIM_SUCCESS;
451 }
452
453 /*
454 * Barf if the prefix-list is already configured for an RP
455 */
456 if (pim_rp_prefix_list_used(pim, plist)) {
457 XFREE(MTYPE_PIM_RP, rp_info);
458 return PIM_RP_PFXLIST_IN_USE;
459 }
460
461 /*
462 * Free any existing rp_info entries for this RP
463 */
464 for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
465 tmp_rp_info)) {
466 if (!pim_addr_cmp(rp_info->rp.rpf_addr,
467 tmp_rp_info->rp.rpf_addr)) {
468 if (tmp_rp_info->plist)
469 pim_rp_del_config(pim, rp_addr, NULL,
470 tmp_rp_info->plist);
471 else
472 pim_rp_del_config(
473 pim, rp_addr,
474 prefix2str(&tmp_rp_info->group,
475 buffer, BUFSIZ),
476 NULL);
477 }
478 }
479
480 rp_info->plist = XSTRDUP(MTYPE_PIM_FILTER_NAME, plist);
481 } else {
482
483 if (!pim_get_all_mcast_group(&group_all)) {
484 XFREE(MTYPE_PIM_RP, rp_info);
485 return PIM_GROUP_BAD_ADDRESS;
486 }
487 rp_all = pim_rp_find_match_group(pim, &group_all);
488
489 /*
490 * Barf if group is a non-multicast subnet
491 */
492 if (!prefix_match(&rp_all->group, &rp_info->group)) {
493 XFREE(MTYPE_PIM_RP, rp_info);
494 return PIM_GROUP_BAD_ADDRESS;
495 }
496
497 /*
498 * Remove any prefix-list rp_info entries for this RP
499 */
500 for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
501 tmp_rp_info)) {
502 if (tmp_rp_info->plist &&
503 (!pim_addr_cmp(rp_info->rp.rpf_addr,
504 tmp_rp_info->rp.rpf_addr))) {
505 pim_rp_del_config(pim, rp_addr, NULL,
506 tmp_rp_info->plist);
507 }
508 }
509
510 /*
511 * Take over the 224.0.0.0/4 group if the rp is INADDR_ANY
512 */
513 if (prefix_same(&rp_all->group, &rp_info->group) &&
514 pim_rpf_addr_is_inaddr_any(&rp_all->rp)) {
515 rp_all->rp.rpf_addr = rp_info->rp.rpf_addr;
516 rp_all->rp_src = rp_src_flag;
517 XFREE(MTYPE_PIM_RP, rp_info);
518
519 /* Register addr with Zebra NHT */
520 nht_p = rp_all->rp.rpf_addr;
521 if (PIM_DEBUG_PIM_NHT_RP)
522 zlog_debug(
523 "%s: NHT Register rp_all addr %pPA grp %pFX ",
524 __func__, &nht_p, &rp_all->group);
525
526 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
527 /* Find (*, G) upstream whose RP is not
528 * configured yet
529 */
530 if (pim_addr_is_any(up->upstream_addr) &&
531 pim_addr_is_any(up->sg.src)) {
532 struct prefix grp;
533 struct rp_info *trp_info;
534
535 pim_addr_to_prefix(&grp, up->sg.grp);
536 trp_info = pim_rp_find_match_group(
537 pim, &grp);
538 if (trp_info == rp_all) {
539 pim_upstream_update(pim, up);
540 upstream_updated = true;
541 }
542 }
543 }
544 if (upstream_updated)
545 pim_zebra_update_all_interfaces(pim);
546
547 pim_rp_check_interfaces(pim, rp_all);
548 pim_rp_refresh_group_to_rp_mapping(pim);
549 pim_find_or_track_nexthop(pim, nht_p, NULL, rp_all,
550 NULL);
551
552 if (!pim_ecmp_nexthop_lookup(pim,
553 &rp_all->rp.source_nexthop,
554 nht_p, &rp_all->group, 1))
555 return PIM_RP_NO_PATH;
556 return PIM_SUCCESS;
557 }
558
559 /*
560 * Return if the group is already configured for this RP
561 */
562 tmp_rp_info = pim_rp_find_exact(pim, rp_addr, &rp_info->group);
563 if (tmp_rp_info) {
564 if ((tmp_rp_info->rp_src != rp_src_flag)
565 && (rp_src_flag == RP_SRC_STATIC))
566 tmp_rp_info->rp_src = rp_src_flag;
567 XFREE(MTYPE_PIM_RP, rp_info);
568 return result;
569 }
570
571 /*
572 * Barf if this group is already covered by some other RP
573 */
574 tmp_rp_info = pim_rp_find_match_group(pim, &rp_info->group);
575
576 if (tmp_rp_info) {
577 if (tmp_rp_info->plist) {
578 XFREE(MTYPE_PIM_RP, rp_info);
579 return PIM_GROUP_PFXLIST_OVERLAP;
580 } else {
581 /*
582 * If the only RP that covers this group is an
583 * RP configured for
584 * 224.0.0.0/4 that is fine, ignore that one.
585 * For all others
586 * though we must return PIM_GROUP_OVERLAP
587 */
588 if (prefix_same(&rp_info->group,
589 &tmp_rp_info->group)) {
590 if ((rp_src_flag == RP_SRC_STATIC)
591 && (tmp_rp_info->rp_src
592 == RP_SRC_STATIC)) {
593 XFREE(MTYPE_PIM_RP, rp_info);
594 return PIM_GROUP_OVERLAP;
595 }
596
597 result = pim_rp_change(
598 pim, rp_addr,
599 tmp_rp_info->group,
600 rp_src_flag);
601 XFREE(MTYPE_PIM_RP, rp_info);
602 return result;
603 }
604 }
605 }
606 }
607
608 listnode_add_sort(pim->rp_list, rp_info);
609
610 if (!rp_info->plist) {
611 rn = route_node_get(pim->rp_table, &rp_info->group);
612 rn->info = rp_info;
613 }
614
615 if (PIM_DEBUG_PIM_TRACE)
616 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn,
617 rp_info, &rp_info->group,
618 rn ? route_node_get_lock_count(rn) : 0);
619
620 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
621 if (pim_addr_is_any(up->sg.src)) {
622 struct prefix grp;
623 struct rp_info *trp_info;
624
625 pim_addr_to_prefix(&grp, up->sg.grp);
626 trp_info = pim_rp_find_match_group(pim, &grp);
627
628 if (trp_info == rp_info) {
629 pim_upstream_update(pim, up);
630 upstream_updated = true;
631 }
632 }
633 }
634
635 if (upstream_updated)
636 pim_zebra_update_all_interfaces(pim);
637
638 pim_rp_check_interfaces(pim, rp_info);
639 pim_rp_refresh_group_to_rp_mapping(pim);
640
641 /* Register addr with Zebra NHT */
642 nht_p = rp_info->rp.rpf_addr;
643 if (PIM_DEBUG_PIM_NHT_RP)
644 zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
645 __func__, &nht_p, &rp_info->group);
646 pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
647 if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
648 &rp_info->group, 1))
649 return PIM_RP_NO_PATH;
650
651 return PIM_SUCCESS;
652 }
653
654 void pim_rp_del_config(struct pim_instance *pim, pim_addr rp_addr,
655 const char *group_range, const char *plist)
656 {
657 struct prefix group;
658 int result;
659
660 if (group_range == NULL)
661 result = pim_get_all_mcast_group(&group);
662 else
663 result = str2prefix(group_range, &group);
664
665 if (!result) {
666 if (PIM_DEBUG_PIM_TRACE)
667 zlog_debug(
668 "%s: String to prefix failed for %pPAs group",
669 __func__, &rp_addr);
670 return;
671 }
672
673 pim_rp_del(pim, rp_addr, group, plist, RP_SRC_STATIC);
674 }
675
676 int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
677 const char *plist, enum rp_source rp_src_flag)
678 {
679 struct prefix g_all;
680 struct rp_info *rp_info;
681 struct rp_info *rp_all;
682 pim_addr nht_p;
683 struct route_node *rn;
684 bool was_plist = false;
685 struct rp_info *trp_info;
686 struct pim_upstream *up;
687 struct bsgrp_node *bsgrp = NULL;
688 struct bsm_rpinfo *bsrp = NULL;
689 bool upstream_updated = false;
690
691 if (plist)
692 rp_info = pim_rp_find_prefix_list(pim, rp_addr, plist);
693 else
694 rp_info = pim_rp_find_exact(pim, rp_addr, &group);
695
696 if (!rp_info)
697 return PIM_RP_NOT_FOUND;
698
699 if (rp_info->plist) {
700 XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
701 was_plist = true;
702 }
703
704 if (PIM_DEBUG_PIM_TRACE)
705 zlog_debug("%s: Delete RP %pPA for the group %pFX", __func__,
706 &rp_addr, &group);
707
708 /* While static RP is getting deleted, we need to check if dynamic RP
709 * present for the same group in BSM RP table, then install the dynamic
710 * RP for the group node into the main rp table
711 */
712 if (rp_src_flag == RP_SRC_STATIC) {
713 bsgrp = pim_bsm_get_bsgrp_node(&pim->global_scope, &group);
714
715 if (bsgrp) {
716 bsrp = bsm_rpinfos_first(bsgrp->bsrp_list);
717 if (bsrp) {
718 if (PIM_DEBUG_PIM_TRACE)
719 zlog_debug(
720 "%s: BSM RP %pPA found for the group %pFX",
721 __func__, &bsrp->rp_address,
722 &group);
723 return pim_rp_change(pim, bsrp->rp_address,
724 group, RP_SRC_BSR);
725 }
726 } else {
727 if (PIM_DEBUG_PIM_TRACE)
728 zlog_debug(
729 "%s: BSM RP not found for the group %pFX",
730 __func__, &group);
731 }
732 }
733
734 /* Deregister addr with Zebra NHT */
735 nht_p = rp_info->rp.rpf_addr;
736 if (PIM_DEBUG_PIM_NHT_RP)
737 zlog_debug("%s: Deregister RP addr %pPA with Zebra ", __func__,
738 &nht_p);
739 pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
740
741 if (!pim_get_all_mcast_group(&g_all))
742 return PIM_RP_BAD_ADDRESS;
743
744 rp_all = pim_rp_find_match_group(pim, &g_all);
745
746 if (rp_all == rp_info) {
747 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
748 /* Find the upstream (*, G) whose upstream address is
749 * same as the deleted RP
750 */
751 pim_addr rpf_addr;
752
753 rpf_addr = rp_info->rp.rpf_addr;
754 if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
755 pim_addr_is_any(up->sg.src)) {
756 struct prefix grp;
757
758 pim_addr_to_prefix(&grp, up->sg.grp);
759 trp_info = pim_rp_find_match_group(pim, &grp);
760 if (trp_info == rp_all) {
761 pim_upstream_rpf_clear(pim, up);
762 up->upstream_addr = PIMADDR_ANY;
763 }
764 }
765 }
766 rp_all->rp.rpf_addr = PIMADDR_ANY;
767 rp_all->i_am_rp = 0;
768 return PIM_SUCCESS;
769 }
770
771 listnode_delete(pim->rp_list, rp_info);
772
773 if (!was_plist) {
774 rn = route_node_get(pim->rp_table, &rp_info->group);
775 if (rn) {
776 if (rn->info != rp_info)
777 flog_err(
778 EC_LIB_DEVELOPMENT,
779 "Expected rn->info to be equal to rp_info");
780
781 if (PIM_DEBUG_PIM_TRACE)
782 zlog_debug(
783 "%s:Found for Freeing: %p for rp_info: %p(%pFX) Lock: %d",
784 __func__, rn, rp_info, &rp_info->group,
785 route_node_get_lock_count(rn));
786
787 rn->info = NULL;
788 route_unlock_node(rn);
789 route_unlock_node(rn);
790 }
791 }
792
793 pim_rp_refresh_group_to_rp_mapping(pim);
794
795 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
796 /* Find the upstream (*, G) whose upstream address is same as
797 * the deleted RP
798 */
799 pim_addr rpf_addr;
800
801 rpf_addr = rp_info->rp.rpf_addr;
802 if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
803 pim_addr_is_any(up->sg.src)) {
804 struct prefix grp;
805
806 pim_addr_to_prefix(&grp, up->sg.grp);
807 trp_info = pim_rp_find_match_group(pim, &grp);
808
809 /* RP not found for the group grp */
810 if (pim_rpf_addr_is_inaddr_any(&trp_info->rp)) {
811 pim_upstream_rpf_clear(pim, up);
812 pim_rp_set_upstream_addr(
813 pim, &up->upstream_addr, up->sg.src,
814 up->sg.grp);
815 }
816
817 /* RP found for the group grp */
818 else {
819 pim_upstream_update(pim, up);
820 upstream_updated = true;
821 }
822 }
823 }
824
825 if (upstream_updated)
826 pim_zebra_update_all_interfaces(pim);
827
828 XFREE(MTYPE_PIM_RP, rp_info);
829 return PIM_SUCCESS;
830 }
831
832 int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
833 struct prefix group, enum rp_source rp_src_flag)
834 {
835 pim_addr nht_p;
836 struct route_node *rn;
837 int result = 0;
838 struct rp_info *rp_info = NULL;
839 struct pim_upstream *up;
840 bool upstream_updated = false;
841 pim_addr old_rp_addr;
842
843 rn = route_node_lookup(pim->rp_table, &group);
844 if (!rn) {
845 result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
846 return result;
847 }
848
849 rp_info = rn->info;
850
851 if (!rp_info) {
852 route_unlock_node(rn);
853 result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
854 return result;
855 }
856
857 old_rp_addr = rp_info->rp.rpf_addr;
858 if (!pim_addr_cmp(new_rp_addr, old_rp_addr)) {
859 if (rp_info->rp_src != rp_src_flag) {
860 rp_info->rp_src = rp_src_flag;
861 route_unlock_node(rn);
862 return PIM_SUCCESS;
863 }
864 }
865
866 /* Deregister old RP addr with Zebra NHT */
867
868 if (!pim_addr_is_any(old_rp_addr)) {
869 nht_p = rp_info->rp.rpf_addr;
870 if (PIM_DEBUG_PIM_NHT_RP)
871 zlog_debug("%s: Deregister RP addr %pPA with Zebra ",
872 __func__, &nht_p);
873 pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
874 }
875
876 pim_rp_nexthop_del(rp_info);
877 listnode_delete(pim->rp_list, rp_info);
878 /* Update the new RP address*/
879
880 rp_info->rp.rpf_addr = new_rp_addr;
881 rp_info->rp_src = rp_src_flag;
882 rp_info->i_am_rp = 0;
883
884 listnode_add_sort(pim->rp_list, rp_info);
885
886 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
887 if (pim_addr_is_any(up->sg.src)) {
888 struct prefix grp;
889 struct rp_info *trp_info;
890
891 pim_addr_to_prefix(&grp, up->sg.grp);
892 trp_info = pim_rp_find_match_group(pim, &grp);
893
894 if (trp_info == rp_info) {
895 pim_upstream_update(pim, up);
896 upstream_updated = true;
897 }
898 }
899 }
900
901 if (upstream_updated)
902 pim_zebra_update_all_interfaces(pim);
903
904 /* Register new RP addr with Zebra NHT */
905 nht_p = rp_info->rp.rpf_addr;
906 if (PIM_DEBUG_PIM_NHT_RP)
907 zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
908 __func__, &nht_p, &rp_info->group);
909
910 pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
911 if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
912 &rp_info->group, 1)) {
913 route_unlock_node(rn);
914 return PIM_RP_NO_PATH;
915 }
916
917 pim_rp_check_interfaces(pim, rp_info);
918
919 route_unlock_node(rn);
920
921 pim_rp_refresh_group_to_rp_mapping(pim);
922
923 return result;
924 }
925
926 void pim_rp_setup(struct pim_instance *pim)
927 {
928 struct listnode *node;
929 struct rp_info *rp_info;
930 pim_addr nht_p;
931
932 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
933 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
934 continue;
935
936 nht_p = rp_info->rp.rpf_addr;
937
938 pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
939 if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
940 nht_p, &rp_info->group, 1)) {
941 if (PIM_DEBUG_PIM_NHT_RP)
942 zlog_debug(
943 "Unable to lookup nexthop for rp specified");
944 pim_rp_nexthop_del(rp_info);
945 }
946 }
947 }
948
949 /*
950 * Checks to see if we should elect ourself the actual RP when new if
951 * addresses are added against an interface.
952 */
953 void pim_rp_check_on_if_add(struct pim_interface *pim_ifp)
954 {
955 struct listnode *node;
956 struct rp_info *rp_info;
957 bool i_am_rp_changed = false;
958 struct pim_instance *pim = pim_ifp->pim;
959
960 if (pim->rp_list == NULL)
961 return;
962
963 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
964 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
965 continue;
966
967 /* if i_am_rp is already set nothing to be done (adding new
968 * addresses
969 * is not going to make a difference). */
970 if (rp_info->i_am_rp) {
971 continue;
972 }
973
974 if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
975 i_am_rp_changed = true;
976 rp_info->i_am_rp = 1;
977 if (PIM_DEBUG_PIM_NHT_RP)
978 zlog_debug("%s: %pPA: i am rp", __func__,
979 &rp_info->rp.rpf_addr);
980 }
981 }
982
983 if (i_am_rp_changed) {
984 pim_msdp_i_am_rp_changed(pim);
985 pim_upstream_reeval_use_rpt(pim);
986 }
987 }
988
989 /* up-optimized re-evaluation of "i_am_rp". this is used when ifaddresses
990 * are removed. Removing numbers is an uncommon event in an active network
991 * so I have made no attempt to optimize it. */
992 void pim_i_am_rp_re_evaluate(struct pim_instance *pim)
993 {
994 struct listnode *node;
995 struct rp_info *rp_info;
996 bool i_am_rp_changed = false;
997 int old_i_am_rp;
998
999 if (pim->rp_list == NULL)
1000 return;
1001
1002 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1003 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1004 continue;
1005
1006 old_i_am_rp = rp_info->i_am_rp;
1007 pim_rp_check_interfaces(pim, rp_info);
1008
1009 if (old_i_am_rp != rp_info->i_am_rp) {
1010 i_am_rp_changed = true;
1011 if (PIM_DEBUG_PIM_NHT_RP) {
1012 if (rp_info->i_am_rp)
1013 zlog_debug("%s: %pPA: i am rp",
1014 __func__,
1015 &rp_info->rp.rpf_addr);
1016 else
1017 zlog_debug(
1018 "%s: %pPA: i am no longer rp",
1019 __func__,
1020 &rp_info->rp.rpf_addr);
1021 }
1022 }
1023 }
1024
1025 if (i_am_rp_changed) {
1026 pim_msdp_i_am_rp_changed(pim);
1027 pim_upstream_reeval_use_rpt(pim);
1028 }
1029 }
1030
1031 /*
1032 * I_am_RP(G) is true if the group-to-RP mapping indicates that
1033 * this router is the RP for the group.
1034 *
1035 * Since we only have static RP, all groups are part of this RP
1036 */
1037 int pim_rp_i_am_rp(struct pim_instance *pim, pim_addr group)
1038 {
1039 struct prefix g;
1040 struct rp_info *rp_info;
1041
1042 memset(&g, 0, sizeof(g));
1043 pim_addr_to_prefix(&g, group);
1044 rp_info = pim_rp_find_match_group(pim, &g);
1045
1046 if (rp_info)
1047 return rp_info->i_am_rp;
1048 return 0;
1049 }
1050
1051 /*
1052 * RP(G)
1053 *
1054 * Return the RP that the Group belongs too.
1055 */
1056 struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group)
1057 {
1058 struct prefix g;
1059 struct rp_info *rp_info;
1060
1061 memset(&g, 0, sizeof(g));
1062 pim_addr_to_prefix(&g, group);
1063
1064 rp_info = pim_rp_find_match_group(pim, &g);
1065
1066 if (rp_info) {
1067 pim_addr nht_p;
1068
1069 /* Register addr with Zebra NHT */
1070 nht_p = rp_info->rp.rpf_addr;
1071 if (PIM_DEBUG_PIM_NHT_RP)
1072 zlog_debug(
1073 "%s: NHT Register RP addr %pPA grp %pFX with Zebra",
1074 __func__, &nht_p, &rp_info->group);
1075 pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
1076 pim_rpf_set_refresh_time(pim);
1077 (void)pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
1078 nht_p, &rp_info->group, 1);
1079 return (&rp_info->rp);
1080 }
1081
1082 // About to Go Down
1083 return NULL;
1084 }
1085
1086 /*
1087 * Set the upstream IP address we want to talk to based upon
1088 * the rp configured and the source address
1089 *
1090 * If we have don't have a RP configured and the source address is *
1091 * then set the upstream addr as INADDR_ANY and return failure.
1092 *
1093 */
1094 int pim_rp_set_upstream_addr(struct pim_instance *pim, pim_addr *up,
1095 pim_addr source, pim_addr group)
1096 {
1097 struct rp_info *rp_info;
1098 struct prefix g;
1099
1100 memset(&g, 0, sizeof(g));
1101
1102 pim_addr_to_prefix(&g, group);
1103
1104 rp_info = pim_rp_find_match_group(pim, &g);
1105
1106 if (!rp_info || ((pim_rpf_addr_is_inaddr_any(&rp_info->rp)) &&
1107 (pim_addr_is_any(source)))) {
1108 if (PIM_DEBUG_PIM_NHT_RP)
1109 zlog_debug("%s: Received a (*,G) with no RP configured",
1110 __func__);
1111 *up = PIMADDR_ANY;
1112 return 0;
1113 }
1114
1115 if (pim_addr_is_any(source))
1116 *up = rp_info->rp.rpf_addr;
1117 else
1118 *up = source;
1119
1120 return 1;
1121 }
1122
1123 int pim_rp_config_write(struct pim_instance *pim, struct vty *vty,
1124 const char *spaces)
1125 {
1126 struct listnode *node;
1127 struct rp_info *rp_info;
1128 int count = 0;
1129 pim_addr rp_addr;
1130
1131 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1132 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1133 continue;
1134
1135 if (rp_info->rp_src == RP_SRC_BSR)
1136 continue;
1137
1138 rp_addr = rp_info->rp.rpf_addr;
1139 if (rp_info->plist)
1140 vty_out(vty,
1141 "%s" PIM_AF_NAME
1142 " pim rp %pPA prefix-list %s\n",
1143 spaces, &rp_addr, rp_info->plist);
1144 else
1145 vty_out(vty, "%s" PIM_AF_NAME " pim rp %pPA %pFX\n",
1146 spaces, &rp_addr, &rp_info->group);
1147 count++;
1148 }
1149
1150 return count;
1151 }
1152
1153 void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
1154 struct vty *vty, json_object *json)
1155 {
1156 struct rp_info *rp_info;
1157 struct rp_info *prev_rp_info = NULL;
1158 struct listnode *node;
1159 struct ttable *tt = NULL;
1160 char *table = NULL;
1161 char source[7];
1162 char grp[INET6_ADDRSTRLEN];
1163
1164 json_object *json_rp_rows = NULL;
1165 json_object *json_row = NULL;
1166
1167 if (!json) {
1168 /* Prepare table. */
1169 tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
1170 ttable_add_row(
1171 tt,
1172 "RP address|group/prefix-list|OIF|I am RP|Source|Group-Type");
1173 tt->style.cell.rpad = 2;
1174 tt->style.corner = '+';
1175 ttable_restyle(tt);
1176 }
1177
1178 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1179 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1180 continue;
1181
1182 #if PIM_IPV == 4
1183 pim_addr group = rp_info->group.u.prefix4;
1184 #else
1185 pim_addr group = rp_info->group.u.prefix6;
1186 #endif
1187 const char *group_type =
1188 pim_is_grp_ssm(pim, group) ? "SSM" : "ASM";
1189
1190 if (range && !prefix_match(&rp_info->group, range))
1191 continue;
1192
1193 if (rp_info->rp_src == RP_SRC_STATIC)
1194 strlcpy(source, "Static", sizeof(source));
1195 else if (rp_info->rp_src == RP_SRC_BSR)
1196 strlcpy(source, "BSR", sizeof(source));
1197 else
1198 strlcpy(source, "None", sizeof(source));
1199 if (json) {
1200 /*
1201 * If we have moved on to a new RP then add the
1202 * entry for the previous RP
1203 */
1204 if (prev_rp_info &&
1205 (pim_addr_cmp(prev_rp_info->rp.rpf_addr,
1206 rp_info->rp.rpf_addr))) {
1207 json_object_object_addf(
1208 json, json_rp_rows, "%pPA",
1209 &prev_rp_info->rp.rpf_addr);
1210 json_rp_rows = NULL;
1211 }
1212
1213 if (!json_rp_rows)
1214 json_rp_rows = json_object_new_array();
1215
1216 json_row = json_object_new_object();
1217 json_object_string_addf(json_row, "rpAddress", "%pPA",
1218 &rp_info->rp.rpf_addr);
1219 if (rp_info->rp.source_nexthop.interface)
1220 json_object_string_add(
1221 json_row, "outboundInterface",
1222 rp_info->rp.source_nexthop
1223 .interface->name);
1224 else
1225 json_object_string_add(json_row,
1226 "outboundInterface",
1227 "Unknown");
1228 if (rp_info->i_am_rp)
1229 json_object_boolean_true_add(json_row, "iAmRP");
1230 else
1231 json_object_boolean_false_add(json_row,
1232 "iAmRP");
1233
1234 if (rp_info->plist)
1235 json_object_string_add(json_row, "prefixList",
1236 rp_info->plist);
1237 else
1238 json_object_string_addf(json_row, "group",
1239 "%pFX",
1240 &rp_info->group);
1241 json_object_string_add(json_row, "source", source);
1242 json_object_string_add(json_row, "groupType",
1243 group_type);
1244
1245 json_object_array_add(json_rp_rows, json_row);
1246 } else {
1247 prefix2str(&rp_info->group, grp, sizeof(grp));
1248 ttable_add_row(tt, "%pPA|%s|%s|%s|%s|%s",
1249 &rp_info->rp.rpf_addr,
1250 rp_info->plist
1251 ? rp_info->plist
1252 : grp,
1253 rp_info->rp.source_nexthop.interface
1254 ? rp_info->rp.source_nexthop
1255 .interface->name
1256 : "Unknown",
1257 rp_info->i_am_rp
1258 ? "yes"
1259 : "no",
1260 source, group_type);
1261 }
1262 prev_rp_info = rp_info;
1263 }
1264
1265 /* Dump the generated table. */
1266 if (!json) {
1267 table = ttable_dump(tt, "\n");
1268 vty_out(vty, "%s\n", table);
1269 XFREE(MTYPE_TMP, table);
1270 ttable_del(tt);
1271 } else {
1272 if (prev_rp_info && json_rp_rows)
1273 json_object_object_addf(json, json_rp_rows, "%pPA",
1274 &prev_rp_info->rp.rpf_addr);
1275 }
1276 }
1277
1278 void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
1279 {
1280 struct listnode *node = NULL;
1281 struct rp_info *rp_info = NULL;
1282 struct nexthop *nh_node = NULL;
1283 pim_addr nht_p;
1284 struct pim_nexthop_cache pnc;
1285
1286 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1287 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1288 continue;
1289
1290 nht_p = rp_info->rp.rpf_addr;
1291 memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
1292 if (!pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, &pnc))
1293 continue;
1294
1295 for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) {
1296 #if PIM_IPV == 4
1297 if (!pim_addr_is_any(nh_node->gate.ipv4))
1298 continue;
1299 #else
1300 if (!pim_addr_is_any(nh_node->gate.ipv6))
1301 continue;
1302 #endif
1303
1304 struct interface *ifp1 = if_lookup_by_index(
1305 nh_node->ifindex, pim->vrf->vrf_id);
1306
1307 if (nbr->interface != ifp1)
1308 continue;
1309
1310 #if PIM_IPV == 4
1311 nh_node->gate.ipv4 = nbr->source_addr;
1312 #else
1313 nh_node->gate.ipv6 = nbr->source_addr;
1314 #endif
1315 if (PIM_DEBUG_PIM_NHT_RP)
1316 zlog_debug(
1317 "%s: addr %pPA new nexthop addr %pPAs interface %s",
1318 __func__, &nht_p, &nbr->source_addr,
1319 ifp1->name);
1320 }
1321 }
1322 }