]> git.proxmox.com Git - mirror_frr.git/blob - pimd/pim_rp.c
Merge pull request #10770 from chiragshah6/evpn_dev3
[mirror_frr.git] / pimd / pim_rp.c
1 /*
2 * PIM for Quagga
3 * Copyright (C) 2015 Cumulus Networks, Inc.
4 * Donald Sharp
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20 #include <zebra.h>
21
22 #include "lib/json.h"
23 #include "log.h"
24 #include "network.h"
25 #include "if.h"
26 #include "linklist.h"
27 #include "prefix.h"
28 #include "memory.h"
29 #include "vty.h"
30 #include "vrf.h"
31 #include "plist.h"
32 #include "nexthop.h"
33 #include "table.h"
34 #include "lib_errors.h"
35
36 #include "pimd.h"
37 #include "pim_vty.h"
38 #include "pim_str.h"
39 #include "pim_iface.h"
40 #include "pim_rp.h"
41 #include "pim_rpf.h"
42 #include "pim_sock.h"
43 #include "pim_memory.h"
44 #include "pim_neighbor.h"
45 #include "pim_msdp.h"
46 #include "pim_nht.h"
47 #include "pim_mroute.h"
48 #include "pim_oil.h"
49 #include "pim_zebra.h"
50 #include "pim_bsm.h"
51 #include "pim_util.h"
52 #include "pim_ssm.h"
53
54 /* Cleanup pim->rpf_hash each node data */
55 void pim_rp_list_hash_clean(void *data)
56 {
57 struct pim_nexthop_cache *pnc = (struct pim_nexthop_cache *)data;
58
59 list_delete(&pnc->rp_list);
60
61 hash_clean(pnc->upstream_hash, NULL);
62 hash_free(pnc->upstream_hash);
63 pnc->upstream_hash = NULL;
64 if (pnc->nexthop)
65 nexthops_free(pnc->nexthop);
66
67 XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc);
68 }
69
70 static void pim_rp_info_free(struct rp_info *rp_info)
71 {
72 XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
73
74 XFREE(MTYPE_PIM_RP, rp_info);
75 }
76
77 int pim_rp_list_cmp(void *v1, void *v2)
78 {
79 struct rp_info *rp1 = (struct rp_info *)v1;
80 struct rp_info *rp2 = (struct rp_info *)v2;
81 int ret;
82
83 /*
84 * Sort by RP IP address
85 */
86 ret = prefix_cmp(&rp1->rp.rpf_addr, &rp2->rp.rpf_addr);
87 if (ret)
88 return ret;
89
90 /*
91 * Sort by group IP address
92 */
93 ret = prefix_cmp(&rp1->group, &rp2->group);
94 if (ret)
95 return ret;
96
97 return 0;
98 }
99
100 void pim_rp_init(struct pim_instance *pim)
101 {
102 struct rp_info *rp_info;
103 struct route_node *rn;
104
105 pim->rp_list = list_new();
106 pim->rp_list->del = (void (*)(void *))pim_rp_info_free;
107 pim->rp_list->cmp = pim_rp_list_cmp;
108
109 pim->rp_table = route_table_init();
110
111 rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
112
113 if (!pim_get_all_mcast_group(&rp_info->group)) {
114 flog_err(EC_LIB_DEVELOPMENT,
115 "Unable to convert all-multicast prefix");
116 list_delete(&pim->rp_list);
117 route_table_finish(pim->rp_table);
118 XFREE(MTYPE_PIM_RP, rp_info);
119 return;
120 }
121 pim_addr_to_prefix(&rp_info->rp.rpf_addr, PIMADDR_ANY);
122
123 listnode_add(pim->rp_list, rp_info);
124
125 rn = route_node_get(pim->rp_table, &rp_info->group);
126 rn->info = rp_info;
127 if (PIM_DEBUG_PIM_TRACE)
128 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn,
129 rp_info, &rp_info->group,
130 route_node_get_lock_count(rn));
131 }
132
133 void pim_rp_free(struct pim_instance *pim)
134 {
135 if (pim->rp_table)
136 route_table_finish(pim->rp_table);
137 pim->rp_table = NULL;
138
139 if (pim->rp_list)
140 list_delete(&pim->rp_list);
141 }
142
143 /*
144 * Given an RP's prefix-list, return the RP's rp_info for that prefix-list
145 */
146 static struct rp_info *pim_rp_find_prefix_list(struct pim_instance *pim,
147 pim_addr rp, const char *plist)
148 {
149 struct listnode *node;
150 struct rp_info *rp_info;
151 struct prefix rp_prefix;
152
153 pim_addr_to_prefix(&rp_prefix, rp);
154
155 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
156 if (prefix_same(&rp_prefix, &rp_info->rp.rpf_addr) &&
157 rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
158 return rp_info;
159 }
160 }
161
162 return NULL;
163 }
164
165 /*
166 * Return true if plist is used by any rp_info
167 */
168 static int pim_rp_prefix_list_used(struct pim_instance *pim, const char *plist)
169 {
170 struct listnode *node;
171 struct rp_info *rp_info;
172
173 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
174 if (rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
175 return 1;
176 }
177 }
178
179 return 0;
180 }
181
182 /*
183 * Given an RP's address, return the RP's rp_info that is an exact match for
184 * 'group'
185 */
186 static struct rp_info *pim_rp_find_exact(struct pim_instance *pim, pim_addr rp,
187 const struct prefix *group)
188 {
189 struct listnode *node;
190 struct rp_info *rp_info;
191 struct prefix rp_prefix;
192
193 pim_addr_to_prefix(&rp_prefix, rp);
194 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
195 if (prefix_same(&rp_prefix, &rp_info->rp.rpf_addr) &&
196 prefix_same(&rp_info->group, group))
197 return rp_info;
198 }
199
200 return NULL;
201 }
202
203 /*
204 * XXX: long-term issue: we don't actually have a good "ip address-list"
205 * implementation. ("access-list XYZ" is the closest but honestly it's
206 * kinda garbage.)
207 *
208 * So it's using a prefix-list to match an address here, which causes very
209 * unexpected results for the user since prefix-lists by default only match
210 * when the prefix length is an exact match too. i.e. you'd have to add the
211 * "le 32" and do "ip prefix-list foo permit 10.0.0.0/24 le 32"
212 *
213 * To avoid this pitfall, this code uses "address_mode = true" for the prefix
214 * list match (this is the only user for that.)
215 *
216 * In the long run, we need to add a "ip address-list", but that's a wholly
217 * separate bag of worms, and existing configs using ip prefix-list would
218 * drop into the UX pitfall.
219 */
220
221 #include "lib/plist_int.h"
222
223 /*
224 * Given a group, return the rp_info for that group
225 */
226 struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
227 const struct prefix *group)
228 {
229 struct listnode *node;
230 struct rp_info *best = NULL;
231 struct rp_info *rp_info;
232 struct prefix_list *plist;
233 const struct prefix *bp;
234 const struct prefix_list_entry *entry;
235 struct route_node *rn;
236
237 bp = NULL;
238 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
239 if (rp_info->plist) {
240 plist = prefix_list_lookup(PIM_AFI, rp_info->plist);
241
242 if (prefix_list_apply_ext(plist, &entry, group, true)
243 == PREFIX_DENY || !entry)
244 continue;
245
246 if (!best) {
247 best = rp_info;
248 bp = &entry->prefix;
249 continue;
250 }
251
252 if (bp && bp->prefixlen < entry->prefix.prefixlen) {
253 best = rp_info;
254 bp = &entry->prefix;
255 }
256 }
257 }
258
259 rn = route_node_match(pim->rp_table, group);
260 if (!rn) {
261 flog_err(
262 EC_LIB_DEVELOPMENT,
263 "%s: BUG We should have found default group information",
264 __func__);
265 return best;
266 }
267
268 rp_info = rn->info;
269 if (PIM_DEBUG_PIM_TRACE)
270 zlog_debug("Lookedup: %p for rp_info: %p(%pFX) Lock: %d", rn,
271 rp_info, &rp_info->group,
272 route_node_get_lock_count(rn));
273
274 route_unlock_node(rn);
275
276 if (!best)
277 return rp_info;
278
279 if (rp_info->group.prefixlen < best->group.prefixlen)
280 best = rp_info;
281
282 return best;
283 }
284
285 /*
286 * When the user makes "ip pim rp" configuration changes or if they change the
287 * prefix-list(s) used by these statements we must tickle the upstream state
288 * for each group to make them re-lookup who their RP should be.
289 *
290 * This is a placeholder function for now.
291 */
292 void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim)
293 {
294 pim_msdp_i_am_rp_changed(pim);
295 pim_upstream_reeval_use_rpt(pim);
296 }
297
298 void pim_rp_prefix_list_update(struct pim_instance *pim,
299 struct prefix_list *plist)
300 {
301 struct listnode *node;
302 struct rp_info *rp_info;
303 int refresh_needed = 0;
304
305 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
306 if (rp_info->plist
307 && strcmp(rp_info->plist, prefix_list_name(plist)) == 0) {
308 refresh_needed = 1;
309 break;
310 }
311 }
312
313 if (refresh_needed)
314 pim_rp_refresh_group_to_rp_mapping(pim);
315 }
316
317 static int pim_rp_check_interface_addrs(struct rp_info *rp_info,
318 struct pim_interface *pim_ifp)
319 {
320 struct listnode *node;
321 struct pim_secondary_addr *sec_addr;
322 pim_addr rpf_addr;
323
324 rpf_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
325
326 if (!pim_addr_cmp(pim_ifp->primary_address, rpf_addr))
327 return 1;
328
329 if (!pim_ifp->sec_addr_list) {
330 return 0;
331 }
332
333 for (ALL_LIST_ELEMENTS_RO(pim_ifp->sec_addr_list, node, sec_addr)) {
334 if (prefix_same(&sec_addr->addr, &rp_info->rp.rpf_addr)) {
335 return 1;
336 }
337 }
338
339 return 0;
340 }
341
342 static void pim_rp_check_interfaces(struct pim_instance *pim,
343 struct rp_info *rp_info)
344 {
345 struct interface *ifp;
346
347 rp_info->i_am_rp = 0;
348 FOR_ALL_INTERFACES (pim->vrf, ifp) {
349 struct pim_interface *pim_ifp = ifp->info;
350
351 if (!pim_ifp)
352 continue;
353
354 if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
355 rp_info->i_am_rp = 1;
356 }
357 }
358 }
359
360 void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
361 {
362 struct pim_rpf old_rpf;
363 enum pim_rpf_result rpf_result;
364 pim_addr old_upstream_addr;
365 pim_addr new_upstream_addr;
366 struct prefix nht_p;
367
368 old_upstream_addr = up->upstream_addr;
369 pim_rp_set_upstream_addr(pim, &new_upstream_addr, up->sg.src,
370 up->sg.grp);
371
372 if (PIM_DEBUG_PIM_TRACE)
373 zlog_debug("%s: pim upstream update for old upstream %pPA",
374 __func__, &old_upstream_addr);
375
376 if (!pim_addr_cmp(old_upstream_addr, new_upstream_addr))
377 return;
378
379 /* Lets consider a case, where a PIM upstream has a better RP as a
380 * result of a new RP configuration with more precise group range.
381 * This upstream has to be added to the upstream hash of new RP's
382 * NHT(pnc) and has to be removed from old RP's NHT upstream hash
383 */
384 if (!pim_addr_is_any(old_upstream_addr)) {
385 /* Deregister addr with Zebra NHT */
386 pim_addr_to_prefix(&nht_p, old_upstream_addr);
387 if (PIM_DEBUG_PIM_TRACE)
388 zlog_debug(
389 "%s: Deregister upstream %s addr %pFX with Zebra NHT",
390 __func__, up->sg_str, &nht_p);
391 pim_delete_tracked_nexthop(pim, &nht_p, up, NULL);
392 }
393
394 /* Update the upstream address */
395 up->upstream_addr = new_upstream_addr;
396
397 old_rpf.source_nexthop.interface = up->rpf.source_nexthop.interface;
398
399 rpf_result = pim_rpf_update(pim, up, &old_rpf, __func__);
400 if (rpf_result == PIM_RPF_FAILURE)
401 pim_mroute_del(up->channel_oil, __func__);
402
403 /* update kernel multicast forwarding cache (MFC) */
404 if (up->rpf.source_nexthop.interface && up->channel_oil)
405 pim_upstream_mroute_iif_update(up->channel_oil, __func__);
406
407 if (rpf_result == PIM_RPF_CHANGED ||
408 (rpf_result == PIM_RPF_FAILURE &&
409 old_rpf.source_nexthop.interface))
410 pim_zebra_upstream_rpf_changed(pim, up, &old_rpf);
411
412 }
413
414 int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
415 const char *plist, enum rp_source rp_src_flag)
416 {
417 int result = 0;
418 struct rp_info *rp_info;
419 struct rp_info *rp_all;
420 struct prefix group_all;
421 struct listnode *node, *nnode;
422 struct rp_info *tmp_rp_info;
423 char buffer[BUFSIZ];
424 struct prefix nht_p;
425 struct route_node *rn;
426 struct pim_upstream *up;
427 bool upstream_updated = false;
428
429 if (pim_addr_is_any(rp_addr))
430 return PIM_RP_BAD_ADDRESS;
431
432 rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
433
434 pim_addr_to_prefix(&rp_info->rp.rpf_addr, rp_addr);
435 prefix_copy(&rp_info->group, &group);
436 rp_info->rp_src = rp_src_flag;
437
438 if (plist) {
439 /*
440 * Return if the prefix-list is already configured for this RP
441 */
442 if (pim_rp_find_prefix_list(pim, rp_addr, plist)) {
443 XFREE(MTYPE_PIM_RP, rp_info);
444 return PIM_SUCCESS;
445 }
446
447 /*
448 * Barf if the prefix-list is already configured for an RP
449 */
450 if (pim_rp_prefix_list_used(pim, plist)) {
451 XFREE(MTYPE_PIM_RP, rp_info);
452 return PIM_RP_PFXLIST_IN_USE;
453 }
454
455 /*
456 * Free any existing rp_info entries for this RP
457 */
458 for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
459 tmp_rp_info)) {
460 if (prefix_same(&rp_info->rp.rpf_addr,
461 &tmp_rp_info->rp.rpf_addr)) {
462 if (tmp_rp_info->plist)
463 pim_rp_del_config(pim, rp_addr, NULL,
464 tmp_rp_info->plist);
465 else
466 pim_rp_del_config(
467 pim, rp_addr,
468 prefix2str(&tmp_rp_info->group,
469 buffer, BUFSIZ),
470 NULL);
471 }
472 }
473
474 rp_info->plist = XSTRDUP(MTYPE_PIM_FILTER_NAME, plist);
475 } else {
476
477 if (!pim_get_all_mcast_group(&group_all)) {
478 XFREE(MTYPE_PIM_RP, rp_info);
479 return PIM_GROUP_BAD_ADDRESS;
480 }
481 rp_all = pim_rp_find_match_group(pim, &group_all);
482
483 /*
484 * Barf if group is a non-multicast subnet
485 */
486 if (!prefix_match(&rp_all->group, &rp_info->group)) {
487 XFREE(MTYPE_PIM_RP, rp_info);
488 return PIM_GROUP_BAD_ADDRESS;
489 }
490
491 /*
492 * Remove any prefix-list rp_info entries for this RP
493 */
494 for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
495 tmp_rp_info)) {
496 if (tmp_rp_info->plist &&
497 prefix_same(&rp_info->rp.rpf_addr,
498 &tmp_rp_info->rp.rpf_addr)) {
499 pim_rp_del_config(pim, rp_addr, NULL,
500 tmp_rp_info->plist);
501 }
502 }
503
504 /*
505 * Take over the 224.0.0.0/4 group if the rp is INADDR_ANY
506 */
507 if (prefix_same(&rp_all->group, &rp_info->group) &&
508 pim_rpf_addr_is_inaddr_any(&rp_all->rp)) {
509 rp_all->rp.rpf_addr = rp_info->rp.rpf_addr;
510 rp_all->rp_src = rp_src_flag;
511 XFREE(MTYPE_PIM_RP, rp_info);
512
513 /* Register addr with Zebra NHT */
514 nht_p = rp_all->rp.rpf_addr;
515 if (PIM_DEBUG_PIM_NHT_RP)
516 zlog_debug(
517 "%s: NHT Register rp_all addr %pFX grp %pFX ",
518 __func__, &nht_p, &rp_all->group);
519
520 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
521 /* Find (*, G) upstream whose RP is not
522 * configured yet
523 */
524 if (pim_addr_is_any(up->upstream_addr) &&
525 pim_addr_is_any(up->sg.src)) {
526 struct prefix grp;
527 struct rp_info *trp_info;
528
529 pim_addr_to_prefix(&grp, up->sg.grp);
530 trp_info = pim_rp_find_match_group(
531 pim, &grp);
532 if (trp_info == rp_all) {
533 pim_upstream_update(pim, up);
534 upstream_updated = true;
535 }
536 }
537 }
538 if (upstream_updated)
539 pim_zebra_update_all_interfaces(pim);
540
541 pim_rp_check_interfaces(pim, rp_all);
542 pim_rp_refresh_group_to_rp_mapping(pim);
543 pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_all,
544 NULL);
545
546 if (!pim_ecmp_nexthop_lookup(pim,
547 &rp_all->rp.source_nexthop,
548 &nht_p, &rp_all->group, 1))
549 return PIM_RP_NO_PATH;
550 return PIM_SUCCESS;
551 }
552
553 /*
554 * Return if the group is already configured for this RP
555 */
556 tmp_rp_info = pim_rp_find_exact(pim, rp_addr, &rp_info->group);
557 if (tmp_rp_info) {
558 if ((tmp_rp_info->rp_src != rp_src_flag)
559 && (rp_src_flag == RP_SRC_STATIC))
560 tmp_rp_info->rp_src = rp_src_flag;
561 XFREE(MTYPE_PIM_RP, rp_info);
562 return result;
563 }
564
565 /*
566 * Barf if this group is already covered by some other RP
567 */
568 tmp_rp_info = pim_rp_find_match_group(pim, &rp_info->group);
569
570 if (tmp_rp_info) {
571 if (tmp_rp_info->plist) {
572 XFREE(MTYPE_PIM_RP, rp_info);
573 return PIM_GROUP_PFXLIST_OVERLAP;
574 } else {
575 /*
576 * If the only RP that covers this group is an
577 * RP configured for
578 * 224.0.0.0/4 that is fine, ignore that one.
579 * For all others
580 * though we must return PIM_GROUP_OVERLAP
581 */
582 if (prefix_same(&rp_info->group,
583 &tmp_rp_info->group)) {
584 if ((rp_src_flag == RP_SRC_STATIC)
585 && (tmp_rp_info->rp_src
586 == RP_SRC_STATIC)) {
587 XFREE(MTYPE_PIM_RP, rp_info);
588 return PIM_GROUP_OVERLAP;
589 }
590
591 result = pim_rp_change(
592 pim, rp_addr,
593 tmp_rp_info->group,
594 rp_src_flag);
595 XFREE(MTYPE_PIM_RP, rp_info);
596 return result;
597 }
598 }
599 }
600 }
601
602 listnode_add_sort(pim->rp_list, rp_info);
603 rn = route_node_get(pim->rp_table, &rp_info->group);
604 rn->info = rp_info;
605
606 if (PIM_DEBUG_PIM_TRACE)
607 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn,
608 rp_info, &rp_info->group,
609 route_node_get_lock_count(rn));
610
611 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
612 if (pim_addr_is_any(up->sg.src)) {
613 struct prefix grp;
614 struct rp_info *trp_info;
615
616 pim_addr_to_prefix(&grp, up->sg.grp);
617 trp_info = pim_rp_find_match_group(pim, &grp);
618
619 if (trp_info == rp_info) {
620 pim_upstream_update(pim, up);
621 upstream_updated = true;
622 }
623 }
624 }
625
626 if (upstream_updated)
627 pim_zebra_update_all_interfaces(pim);
628
629 pim_rp_check_interfaces(pim, rp_info);
630 pim_rp_refresh_group_to_rp_mapping(pim);
631
632 /* Register addr with Zebra NHT */
633 nht_p = rp_info->rp.rpf_addr;
634 if (PIM_DEBUG_PIM_NHT_RP)
635 zlog_debug("%s: NHT Register RP addr %pFX grp %pFX with Zebra ",
636 __func__, &nht_p, &rp_info->group);
637 pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
638 if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, &nht_p,
639 &rp_info->group, 1))
640 return PIM_RP_NO_PATH;
641
642 return PIM_SUCCESS;
643 }
644
645 void pim_rp_del_config(struct pim_instance *pim, pim_addr rp_addr,
646 const char *group_range, const char *plist)
647 {
648 struct prefix group;
649 int result;
650
651 if (group_range == NULL)
652 result = pim_get_all_mcast_group(&group);
653 else
654 result = str2prefix(group_range, &group);
655
656 if (!result) {
657 if (PIM_DEBUG_PIM_TRACE)
658 zlog_debug(
659 "%s: String to prefix failed for %pPAs group",
660 __func__, &rp_addr);
661 return;
662 }
663
664 pim_rp_del(pim, rp_addr, group, plist, RP_SRC_STATIC);
665 }
666
667 int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
668 const char *plist, enum rp_source rp_src_flag)
669 {
670 struct prefix g_all;
671 struct rp_info *rp_info;
672 struct rp_info *rp_all;
673 struct prefix nht_p;
674 struct route_node *rn;
675 bool was_plist = false;
676 struct rp_info *trp_info;
677 struct pim_upstream *up;
678 struct bsgrp_node *bsgrp = NULL;
679 struct bsm_rpinfo *bsrp = NULL;
680 bool upstream_updated = false;
681
682 if (plist)
683 rp_info = pim_rp_find_prefix_list(pim, rp_addr, plist);
684 else
685 rp_info = pim_rp_find_exact(pim, rp_addr, &group);
686
687 if (!rp_info)
688 return PIM_RP_NOT_FOUND;
689
690 if (rp_info->plist) {
691 XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
692 was_plist = true;
693 }
694
695 if (PIM_DEBUG_PIM_TRACE)
696 zlog_debug("%s: Delete RP %pPA for the group %pFX", __func__,
697 &rp_addr, &group);
698
699 /* While static RP is getting deleted, we need to check if dynamic RP
700 * present for the same group in BSM RP table, then install the dynamic
701 * RP for the group node into the main rp table
702 */
703 if (rp_src_flag == RP_SRC_STATIC) {
704 bsgrp = pim_bsm_get_bsgrp_node(&pim->global_scope, &group);
705
706 if (bsgrp) {
707 bsrp = bsm_rpinfos_first(bsgrp->bsrp_list);
708 if (bsrp) {
709 if (PIM_DEBUG_PIM_TRACE)
710 zlog_debug(
711 "%s: BSM RP %pPA found for the group %pFX",
712 __func__, &bsrp->rp_address,
713 &group);
714 return pim_rp_change(pim, bsrp->rp_address,
715 group, RP_SRC_BSR);
716 }
717 } else {
718 if (PIM_DEBUG_PIM_TRACE)
719 zlog_debug(
720 "%s: BSM RP not found for the group %pFX",
721 __func__, &group);
722 }
723 }
724
725 /* Deregister addr with Zebra NHT */
726 nht_p = rp_info->rp.rpf_addr;
727 if (PIM_DEBUG_PIM_NHT_RP)
728 zlog_debug("%s: Deregister RP addr %pFX with Zebra ", __func__,
729 &nht_p);
730 pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info);
731
732 if (!pim_get_all_mcast_group(&g_all))
733 return PIM_RP_BAD_ADDRESS;
734
735 rp_all = pim_rp_find_match_group(pim, &g_all);
736
737 if (rp_all == rp_info) {
738 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
739 /* Find the upstream (*, G) whose upstream address is
740 * same as the deleted RP
741 */
742 pim_addr rpf_addr;
743
744 rpf_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
745 if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
746 pim_addr_is_any(up->sg.src)) {
747 struct prefix grp;
748
749 pim_addr_to_prefix(&grp, up->sg.grp);
750 trp_info = pim_rp_find_match_group(pim, &grp);
751 if (trp_info == rp_all) {
752 pim_upstream_rpf_clear(pim, up);
753 up->upstream_addr = PIMADDR_ANY;
754 }
755 }
756 }
757 pim_addr_to_prefix(&rp_all->rp.rpf_addr, PIMADDR_ANY);
758 rp_all->i_am_rp = 0;
759 return PIM_SUCCESS;
760 }
761
762 listnode_delete(pim->rp_list, rp_info);
763
764 if (!was_plist) {
765 rn = route_node_get(pim->rp_table, &rp_info->group);
766 if (rn) {
767 if (rn->info != rp_info)
768 flog_err(
769 EC_LIB_DEVELOPMENT,
770 "Expected rn->info to be equal to rp_info");
771
772 if (PIM_DEBUG_PIM_TRACE)
773 zlog_debug(
774 "%s:Found for Freeing: %p for rp_info: %p(%pFX) Lock: %d",
775 __func__, rn, rp_info, &rp_info->group,
776 route_node_get_lock_count(rn));
777
778 rn->info = NULL;
779 route_unlock_node(rn);
780 route_unlock_node(rn);
781 }
782 }
783
784 pim_rp_refresh_group_to_rp_mapping(pim);
785
786 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
787 /* Find the upstream (*, G) whose upstream address is same as
788 * the deleted RP
789 */
790 pim_addr rpf_addr;
791
792 rpf_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
793 if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
794 pim_addr_is_any(up->sg.src)) {
795 struct prefix grp;
796
797 pim_addr_to_prefix(&grp, up->sg.grp);
798 trp_info = pim_rp_find_match_group(pim, &grp);
799
800 /* RP not found for the group grp */
801 if (pim_rpf_addr_is_inaddr_any(&trp_info->rp)) {
802 pim_upstream_rpf_clear(pim, up);
803 pim_rp_set_upstream_addr(
804 pim, &up->upstream_addr, up->sg.src,
805 up->sg.grp);
806 }
807
808 /* RP found for the group grp */
809 else {
810 pim_upstream_update(pim, up);
811 upstream_updated = true;
812 }
813 }
814 }
815
816 if (upstream_updated)
817 pim_zebra_update_all_interfaces(pim);
818
819 XFREE(MTYPE_PIM_RP, rp_info);
820 return PIM_SUCCESS;
821 }
822
823 int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
824 struct prefix group, enum rp_source rp_src_flag)
825 {
826 struct prefix nht_p;
827 struct route_node *rn;
828 int result = 0;
829 struct rp_info *rp_info = NULL;
830 struct pim_upstream *up;
831 bool upstream_updated = false;
832 pim_addr old_rp_addr;
833
834 rn = route_node_lookup(pim->rp_table, &group);
835 if (!rn) {
836 result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
837 return result;
838 }
839
840 rp_info = rn->info;
841
842 if (!rp_info) {
843 route_unlock_node(rn);
844 result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
845 return result;
846 }
847
848 old_rp_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
849 if (!pim_addr_cmp(new_rp_addr, old_rp_addr)) {
850 if (rp_info->rp_src != rp_src_flag) {
851 rp_info->rp_src = rp_src_flag;
852 route_unlock_node(rn);
853 return PIM_SUCCESS;
854 }
855 }
856
857 nht_p.family = PIM_AF;
858 nht_p.prefixlen = PIM_MAX_BITLEN;
859
860 /* Deregister old RP addr with Zebra NHT */
861
862 if (!pim_addr_is_any(old_rp_addr)) {
863 nht_p = rp_info->rp.rpf_addr;
864 if (PIM_DEBUG_PIM_NHT_RP)
865 zlog_debug("%s: Deregister RP addr %pFX with Zebra ",
866 __func__, &nht_p);
867 pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info);
868 }
869
870 pim_rp_nexthop_del(rp_info);
871 listnode_delete(pim->rp_list, rp_info);
872 /* Update the new RP address*/
873
874 pim_addr_to_prefix(&rp_info->rp.rpf_addr, new_rp_addr);
875 rp_info->rp_src = rp_src_flag;
876 rp_info->i_am_rp = 0;
877
878 listnode_add_sort(pim->rp_list, rp_info);
879
880 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
881 if (pim_addr_is_any(up->sg.src)) {
882 struct prefix grp;
883 struct rp_info *trp_info;
884
885 pim_addr_to_prefix(&grp, up->sg.grp);
886 trp_info = pim_rp_find_match_group(pim, &grp);
887
888 if (trp_info == rp_info) {
889 pim_upstream_update(pim, up);
890 upstream_updated = true;
891 }
892 }
893 }
894
895 if (upstream_updated)
896 pim_zebra_update_all_interfaces(pim);
897
898 /* Register new RP addr with Zebra NHT */
899 nht_p = rp_info->rp.rpf_addr;
900 if (PIM_DEBUG_PIM_NHT_RP)
901 zlog_debug("%s: NHT Register RP addr %pFX grp %pFX with Zebra ",
902 __func__, &nht_p, &rp_info->group);
903
904 pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
905 if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, &nht_p,
906 &rp_info->group, 1)) {
907 route_unlock_node(rn);
908 return PIM_RP_NO_PATH;
909 }
910
911 pim_rp_check_interfaces(pim, rp_info);
912
913 route_unlock_node(rn);
914
915 pim_rp_refresh_group_to_rp_mapping(pim);
916
917 return result;
918 }
919
920 void pim_rp_setup(struct pim_instance *pim)
921 {
922 struct listnode *node;
923 struct rp_info *rp_info;
924 struct prefix nht_p;
925
926 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
927 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
928 continue;
929
930 nht_p = rp_info->rp.rpf_addr;
931
932 pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
933 if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
934 &nht_p, &rp_info->group, 1))
935 if (PIM_DEBUG_PIM_NHT_RP)
936 zlog_debug(
937 "Unable to lookup nexthop for rp specified");
938 }
939 }
940
941 /*
942 * Checks to see if we should elect ourself the actual RP when new if
943 * addresses are added against an interface.
944 */
945 void pim_rp_check_on_if_add(struct pim_interface *pim_ifp)
946 {
947 struct listnode *node;
948 struct rp_info *rp_info;
949 bool i_am_rp_changed = false;
950 struct pim_instance *pim = pim_ifp->pim;
951
952 if (pim->rp_list == NULL)
953 return;
954
955 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
956 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
957 continue;
958
959 /* if i_am_rp is already set nothing to be done (adding new
960 * addresses
961 * is not going to make a difference). */
962 if (rp_info->i_am_rp) {
963 continue;
964 }
965
966 if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
967 i_am_rp_changed = true;
968 rp_info->i_am_rp = 1;
969 if (PIM_DEBUG_PIM_NHT_RP) {
970 char rp[PREFIX_STRLEN];
971 pim_addr_dump("<rp?>", &rp_info->rp.rpf_addr,
972 rp, sizeof(rp));
973 zlog_debug("%s: %s: i am rp", __func__, rp);
974 }
975 }
976 }
977
978 if (i_am_rp_changed) {
979 pim_msdp_i_am_rp_changed(pim);
980 pim_upstream_reeval_use_rpt(pim);
981 }
982 }
983
984 /* up-optimized re-evaluation of "i_am_rp". this is used when ifaddresses
985 * are removed. Removing numbers is an uncommon event in an active network
986 * so I have made no attempt to optimize it. */
987 void pim_i_am_rp_re_evaluate(struct pim_instance *pim)
988 {
989 struct listnode *node;
990 struct rp_info *rp_info;
991 bool i_am_rp_changed = false;
992 int old_i_am_rp;
993
994 if (pim->rp_list == NULL)
995 return;
996
997 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
998 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
999 continue;
1000
1001 old_i_am_rp = rp_info->i_am_rp;
1002 pim_rp_check_interfaces(pim, rp_info);
1003
1004 if (old_i_am_rp != rp_info->i_am_rp) {
1005 i_am_rp_changed = true;
1006 if (PIM_DEBUG_PIM_NHT_RP) {
1007 char rp[PREFIX_STRLEN];
1008 pim_addr_dump("<rp?>", &rp_info->rp.rpf_addr,
1009 rp, sizeof(rp));
1010 if (rp_info->i_am_rp) {
1011 zlog_debug("%s: %s: i am rp", __func__,
1012 rp);
1013 } else {
1014 zlog_debug("%s: %s: i am no longer rp",
1015 __func__, rp);
1016 }
1017 }
1018 }
1019 }
1020
1021 if (i_am_rp_changed) {
1022 pim_msdp_i_am_rp_changed(pim);
1023 pim_upstream_reeval_use_rpt(pim);
1024 }
1025 }
1026
1027 /*
1028 * I_am_RP(G) is true if the group-to-RP mapping indicates that
1029 * this router is the RP for the group.
1030 *
1031 * Since we only have static RP, all groups are part of this RP
1032 */
1033 int pim_rp_i_am_rp(struct pim_instance *pim, pim_addr group)
1034 {
1035 struct prefix g;
1036 struct rp_info *rp_info;
1037
1038 memset(&g, 0, sizeof(g));
1039 pim_addr_to_prefix(&g, group);
1040 rp_info = pim_rp_find_match_group(pim, &g);
1041
1042 if (rp_info)
1043 return rp_info->i_am_rp;
1044 return 0;
1045 }
1046
1047 /*
1048 * RP(G)
1049 *
1050 * Return the RP that the Group belongs too.
1051 */
1052 struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group)
1053 {
1054 struct prefix g;
1055 struct rp_info *rp_info;
1056
1057 memset(&g, 0, sizeof(g));
1058 pim_addr_to_prefix(&g, group);
1059
1060 rp_info = pim_rp_find_match_group(pim, &g);
1061
1062 if (rp_info) {
1063 struct prefix nht_p;
1064
1065 /* Register addr with Zebra NHT */
1066 nht_p = rp_info->rp.rpf_addr;
1067 if (PIM_DEBUG_PIM_NHT_RP)
1068 zlog_debug(
1069 "%s: NHT Register RP addr %pFX grp %pFX with Zebra",
1070 __func__, &nht_p, &rp_info->group);
1071 pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
1072 pim_rpf_set_refresh_time(pim);
1073 (void)pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
1074 &nht_p, &rp_info->group, 1);
1075 return (&rp_info->rp);
1076 }
1077
1078 // About to Go Down
1079 return NULL;
1080 }
1081
1082 /*
1083 * Set the upstream IP address we want to talk to based upon
1084 * the rp configured and the source address
1085 *
1086 * If we have don't have a RP configured and the source address is *
1087 * then set the upstream addr as INADDR_ANY and return failure.
1088 *
1089 */
1090 int pim_rp_set_upstream_addr(struct pim_instance *pim, pim_addr *up,
1091 pim_addr source, pim_addr group)
1092 {
1093 struct rp_info *rp_info;
1094 struct prefix g;
1095
1096 memset(&g, 0, sizeof(g));
1097
1098 pim_addr_to_prefix(&g, group);
1099
1100 rp_info = pim_rp_find_match_group(pim, &g);
1101
1102 if (!rp_info || ((pim_rpf_addr_is_inaddr_any(&rp_info->rp)) &&
1103 (pim_addr_is_any(source)))) {
1104 if (PIM_DEBUG_PIM_NHT_RP)
1105 zlog_debug("%s: Received a (*,G) with no RP configured",
1106 __func__);
1107 *up = PIMADDR_ANY;
1108 return 0;
1109 }
1110
1111 if (pim_addr_is_any(source))
1112 *up = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
1113 else
1114 *up = source;
1115
1116 return 1;
1117 }
1118
1119 int pim_rp_config_write(struct pim_instance *pim, struct vty *vty,
1120 const char *spaces)
1121 {
1122 struct listnode *node;
1123 struct rp_info *rp_info;
1124 int count = 0;
1125 pim_addr rp_addr;
1126
1127 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1128 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1129 continue;
1130
1131 if (rp_info->rp_src == RP_SRC_BSR)
1132 continue;
1133
1134 rp_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
1135 if (rp_info->plist)
1136 vty_out(vty,
1137 "%s" PIM_AF_NAME
1138 " pim rp %pPA prefix-list %s\n",
1139 spaces, &rp_addr, rp_info->plist);
1140 else
1141 vty_out(vty, "%s" PIM_AF_NAME " pim rp %pPA %pFX\n",
1142 spaces, &rp_addr, &rp_info->group);
1143 count++;
1144 }
1145
1146 return count;
1147 }
1148
1149 void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
1150 struct vty *vty, bool uj)
1151 {
1152 struct rp_info *rp_info;
1153 struct rp_info *prev_rp_info = NULL;
1154 struct listnode *node;
1155 char source[7];
1156
1157 json_object *json = NULL;
1158 json_object *json_rp_rows = NULL;
1159 json_object *json_row = NULL;
1160
1161 if (uj)
1162 json = json_object_new_object();
1163 else
1164 vty_out(vty,
1165 "RP address group/prefix-list OIF I am RP Source Group-Type\n");
1166 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1167 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1168 continue;
1169
1170 #if PIM_IPV == 4
1171 pim_addr group = rp_info->group.u.prefix4;
1172 #else
1173 pim_addr group = rp_info->group.u.prefix6;
1174 #endif
1175 const char *group_type =
1176 pim_is_grp_ssm(pim, group) ? "SSM" : "ASM";
1177
1178 if (range && !prefix_same(&rp_info->group, range))
1179 continue;
1180
1181 if (rp_info->rp_src == RP_SRC_STATIC)
1182 strlcpy(source, "Static", sizeof(source));
1183 else if (rp_info->rp_src == RP_SRC_BSR)
1184 strlcpy(source, "BSR", sizeof(source));
1185 else
1186 strlcpy(source, "None", sizeof(source));
1187 if (uj) {
1188 /*
1189 * If we have moved on to a new RP then add the
1190 * entry for the previous RP
1191 */
1192 if (prev_rp_info &&
1193 prefix_cmp(&prev_rp_info->rp.rpf_addr,
1194 &rp_info->rp.rpf_addr)) {
1195 json_object_object_addf(
1196 json, json_rp_rows, "%pFXh",
1197 &prev_rp_info->rp.rpf_addr);
1198 json_rp_rows = NULL;
1199 }
1200
1201 if (!json_rp_rows)
1202 json_rp_rows = json_object_new_array();
1203
1204 json_row = json_object_new_object();
1205 json_object_string_addf(json_row, "rpAddress", "%pFXh",
1206 &rp_info->rp.rpf_addr);
1207 if (rp_info->rp.source_nexthop.interface)
1208 json_object_string_add(
1209 json_row, "outboundInterface",
1210 rp_info->rp.source_nexthop
1211 .interface->name);
1212 else
1213 json_object_string_add(json_row,
1214 "outboundInterface",
1215 "Unknown");
1216 if (rp_info->i_am_rp)
1217 json_object_boolean_true_add(json_row, "iAmRP");
1218 else
1219 json_object_boolean_false_add(json_row,
1220 "iAmRP");
1221
1222 if (rp_info->plist)
1223 json_object_string_add(json_row, "prefixList",
1224 rp_info->plist);
1225 else
1226 json_object_string_addf(json_row, "group",
1227 "%pFX",
1228 &rp_info->group);
1229 json_object_string_add(json_row, "source", source);
1230 json_object_string_add(json_row, "groupType",
1231 group_type);
1232
1233 json_object_array_add(json_rp_rows, json_row);
1234 } else {
1235 vty_out(vty, "%-15pFXh ", &rp_info->rp.rpf_addr);
1236
1237 if (rp_info->plist)
1238 vty_out(vty, "%-18s ", rp_info->plist);
1239 else
1240 vty_out(vty, "%-18pFX ", &rp_info->group);
1241
1242 if (rp_info->rp.source_nexthop.interface)
1243 vty_out(vty, "%-16s ",
1244 rp_info->rp.source_nexthop
1245 .interface->name);
1246 else
1247 vty_out(vty, "%-16s ", "(Unknown)");
1248
1249 if (rp_info->i_am_rp)
1250 vty_out(vty, "yes");
1251 else
1252 vty_out(vty, "no");
1253
1254 vty_out(vty, "%14s", source);
1255 vty_out(vty, "%6s\n", group_type);
1256 }
1257 prev_rp_info = rp_info;
1258 }
1259
1260 if (uj) {
1261 if (prev_rp_info && json_rp_rows)
1262 json_object_object_addf(json, json_rp_rows, "%pFXh",
1263 &prev_rp_info->rp.rpf_addr);
1264
1265 vty_json(vty, json);
1266 }
1267 }
1268
1269 void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
1270 {
1271 struct listnode *node = NULL;
1272 struct rp_info *rp_info = NULL;
1273 struct nexthop *nh_node = NULL;
1274 struct prefix nht_p;
1275 struct pim_nexthop_cache pnc;
1276
1277 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1278 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1279 continue;
1280
1281 nht_p = rp_info->rp.rpf_addr;
1282 memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
1283 if (!pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info,
1284 &pnc))
1285 continue;
1286
1287 for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) {
1288 #if PIM_IPV == 4
1289 if (!pim_addr_is_any(nh_node->gate.ipv4))
1290 continue;
1291 #else
1292 if (!pim_addr_is_any(nh_node->gate.ipv6))
1293 continue;
1294 #endif
1295
1296 struct interface *ifp1 = if_lookup_by_index(
1297 nh_node->ifindex, pim->vrf->vrf_id);
1298
1299 if (nbr->interface != ifp1)
1300 continue;
1301
1302 #if PIM_IPV == 4
1303 nh_node->gate.ipv4 = nbr->source_addr;
1304 #else
1305 nh_node->gate.ipv6 = nbr->source_addr;
1306 #endif
1307 if (PIM_DEBUG_PIM_NHT_RP)
1308 zlog_debug(
1309 "%s: addr %pFXh new nexthop addr %pPAs interface %s",
1310 __func__, &nht_p, &nbr->source_addr,
1311 ifp1->name);
1312 }
1313 }
1314 }