]> git.proxmox.com Git - mirror_frr.git/blob - pimd/pim_rp.c
Merge pull request #11832 from sigeryang/master
[mirror_frr.git] / pimd / pim_rp.c
1 /*
2 * PIM for Quagga
3 * Copyright (C) 2015 Cumulus Networks, Inc.
4 * Donald Sharp
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20 #include <zebra.h>
21
22 #include "lib/json.h"
23 #include "log.h"
24 #include "network.h"
25 #include "if.h"
26 #include "linklist.h"
27 #include "prefix.h"
28 #include "memory.h"
29 #include "vty.h"
30 #include "vrf.h"
31 #include "plist.h"
32 #include "nexthop.h"
33 #include "table.h"
34 #include "lib_errors.h"
35
36 #include "pimd.h"
37 #include "pim_instance.h"
38 #include "pim_vty.h"
39 #include "pim_str.h"
40 #include "pim_iface.h"
41 #include "pim_rp.h"
42 #include "pim_rpf.h"
43 #include "pim_sock.h"
44 #include "pim_memory.h"
45 #include "pim_neighbor.h"
46 #include "pim_msdp.h"
47 #include "pim_nht.h"
48 #include "pim_mroute.h"
49 #include "pim_oil.h"
50 #include "pim_zebra.h"
51 #include "pim_bsm.h"
52 #include "pim_util.h"
53 #include "pim_ssm.h"
54 #include "termtable.h"
55
56 /* Cleanup pim->rpf_hash each node data */
57 void pim_rp_list_hash_clean(void *data)
58 {
59 struct pim_nexthop_cache *pnc = (struct pim_nexthop_cache *)data;
60
61 list_delete(&pnc->rp_list);
62
63 hash_clean(pnc->upstream_hash, NULL);
64 hash_free(pnc->upstream_hash);
65 pnc->upstream_hash = NULL;
66 if (pnc->nexthop)
67 nexthops_free(pnc->nexthop);
68
69 XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc);
70 }
71
72 static void pim_rp_info_free(struct rp_info *rp_info)
73 {
74 XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
75
76 XFREE(MTYPE_PIM_RP, rp_info);
77 }
78
79 int pim_rp_list_cmp(void *v1, void *v2)
80 {
81 struct rp_info *rp1 = (struct rp_info *)v1;
82 struct rp_info *rp2 = (struct rp_info *)v2;
83 int ret;
84
85 /*
86 * Sort by RP IP address
87 */
88 ret = pim_addr_cmp(rp1->rp.rpf_addr, rp2->rp.rpf_addr);
89 if (ret)
90 return ret;
91
92 /*
93 * Sort by group IP address
94 */
95 ret = prefix_cmp(&rp1->group, &rp2->group);
96 if (ret)
97 return ret;
98
99 return 0;
100 }
101
102 void pim_rp_init(struct pim_instance *pim)
103 {
104 struct rp_info *rp_info;
105 struct route_node *rn;
106
107 pim->rp_list = list_new();
108 pim->rp_list->del = (void (*)(void *))pim_rp_info_free;
109 pim->rp_list->cmp = pim_rp_list_cmp;
110
111 pim->rp_table = route_table_init();
112
113 rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
114
115 if (!pim_get_all_mcast_group(&rp_info->group)) {
116 flog_err(EC_LIB_DEVELOPMENT,
117 "Unable to convert all-multicast prefix");
118 list_delete(&pim->rp_list);
119 route_table_finish(pim->rp_table);
120 XFREE(MTYPE_PIM_RP, rp_info);
121 return;
122 }
123 rp_info->rp.rpf_addr = PIMADDR_ANY;
124
125 listnode_add(pim->rp_list, rp_info);
126
127 rn = route_node_get(pim->rp_table, &rp_info->group);
128 rn->info = rp_info;
129 if (PIM_DEBUG_PIM_TRACE)
130 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn,
131 rp_info, &rp_info->group,
132 route_node_get_lock_count(rn));
133 }
134
135 void pim_rp_free(struct pim_instance *pim)
136 {
137 if (pim->rp_table)
138 route_table_finish(pim->rp_table);
139 pim->rp_table = NULL;
140
141 if (pim->rp_list)
142 list_delete(&pim->rp_list);
143 }
144
145 /*
146 * Given an RP's prefix-list, return the RP's rp_info for that prefix-list
147 */
148 static struct rp_info *pim_rp_find_prefix_list(struct pim_instance *pim,
149 pim_addr rp, const char *plist)
150 {
151 struct listnode *node;
152 struct rp_info *rp_info;
153
154 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
155 if ((!pim_addr_cmp(rp, rp_info->rp.rpf_addr)) &&
156 rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
157 return rp_info;
158 }
159 }
160
161 return NULL;
162 }
163
164 /*
165 * Return true if plist is used by any rp_info
166 */
167 static int pim_rp_prefix_list_used(struct pim_instance *pim, const char *plist)
168 {
169 struct listnode *node;
170 struct rp_info *rp_info;
171
172 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
173 if (rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
174 return 1;
175 }
176 }
177
178 return 0;
179 }
180
181 /*
182 * Given an RP's address, return the RP's rp_info that is an exact match for
183 * 'group'
184 */
185 static struct rp_info *pim_rp_find_exact(struct pim_instance *pim, pim_addr rp,
186 const struct prefix *group)
187 {
188 struct listnode *node;
189 struct rp_info *rp_info;
190
191 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
192 if ((!pim_addr_cmp(rp, rp_info->rp.rpf_addr)) &&
193 prefix_same(&rp_info->group, group))
194 return rp_info;
195 }
196
197 return NULL;
198 }
199
200 /*
201 * XXX: long-term issue: we don't actually have a good "ip address-list"
202 * implementation. ("access-list XYZ" is the closest but honestly it's
203 * kinda garbage.)
204 *
205 * So it's using a prefix-list to match an address here, which causes very
206 * unexpected results for the user since prefix-lists by default only match
207 * when the prefix length is an exact match too. i.e. you'd have to add the
208 * "le 32" and do "ip prefix-list foo permit 10.0.0.0/24 le 32"
209 *
210 * To avoid this pitfall, this code uses "address_mode = true" for the prefix
211 * list match (this is the only user for that.)
212 *
213 * In the long run, we need to add a "ip address-list", but that's a wholly
214 * separate bag of worms, and existing configs using ip prefix-list would
215 * drop into the UX pitfall.
216 */
217
218 #include "lib/plist_int.h"
219
220 /*
221 * Given a group, return the rp_info for that group
222 */
223 struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
224 const struct prefix *group)
225 {
226 struct listnode *node;
227 struct rp_info *best = NULL;
228 struct rp_info *rp_info;
229 struct prefix_list *plist;
230 const struct prefix *bp;
231 const struct prefix_list_entry *entry;
232 struct route_node *rn;
233
234 bp = NULL;
235 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
236 if (rp_info->plist) {
237 plist = prefix_list_lookup(PIM_AFI, rp_info->plist);
238
239 if (prefix_list_apply_ext(plist, &entry, group, true)
240 == PREFIX_DENY || !entry)
241 continue;
242
243 if (!best) {
244 best = rp_info;
245 bp = &entry->prefix;
246 continue;
247 }
248
249 if (bp && bp->prefixlen < entry->prefix.prefixlen) {
250 best = rp_info;
251 bp = &entry->prefix;
252 }
253 }
254 }
255
256 rn = route_node_match(pim->rp_table, group);
257 if (!rn) {
258 flog_err(
259 EC_LIB_DEVELOPMENT,
260 "%s: BUG We should have found default group information",
261 __func__);
262 return best;
263 }
264
265 rp_info = rn->info;
266 if (PIM_DEBUG_PIM_TRACE) {
267 if (best)
268 zlog_debug(
269 "Lookedup(%pFX): prefix_list match %s, rn %p found: %pFX",
270 group, best->plist, rn, &rp_info->group);
271 else
272 zlog_debug("Lookedup(%pFX): rn %p found:%pFX", group,
273 rn, &rp_info->group);
274 }
275
276 route_unlock_node(rn);
277
278 /*
279 * rp's with prefix lists have the group as 224.0.0.0/4 which will
280 * match anything. So if we have a rp_info that should match a prefix
281 * list then if we do match then best should be the answer( even
282 * if it is NULL )
283 */
284 if (!rp_info || (rp_info && rp_info->plist))
285 return best;
286
287 /*
288 * So we have a non plist rp_info found in the lookup and no plists
289 * at all to be choosen, return it!
290 */
291 if (!best)
292 return rp_info;
293
294 /*
295 * If we have a matching non prefix list and a matching prefix
296 * list we should return the actual rp_info that has the LPM
297 * If they are equal, use the prefix-list( but let's hope
298 * the end-operator doesn't do this )
299 */
300 if (rp_info->group.prefixlen > bp->prefixlen)
301 best = rp_info;
302
303 return best;
304 }
305
306 /*
307 * When the user makes "ip pim rp" configuration changes or if they change the
308 * prefix-list(s) used by these statements we must tickle the upstream state
309 * for each group to make them re-lookup who their RP should be.
310 *
311 * This is a placeholder function for now.
312 */
313 void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim)
314 {
315 pim_msdp_i_am_rp_changed(pim);
316 pim_upstream_reeval_use_rpt(pim);
317 }
318
319 void pim_rp_prefix_list_update(struct pim_instance *pim,
320 struct prefix_list *plist)
321 {
322 struct listnode *node;
323 struct rp_info *rp_info;
324 int refresh_needed = 0;
325
326 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
327 if (rp_info->plist
328 && strcmp(rp_info->plist, prefix_list_name(plist)) == 0) {
329 refresh_needed = 1;
330 break;
331 }
332 }
333
334 if (refresh_needed)
335 pim_rp_refresh_group_to_rp_mapping(pim);
336 }
337
338 static int pim_rp_check_interface_addrs(struct rp_info *rp_info,
339 struct pim_interface *pim_ifp)
340 {
341 struct listnode *node;
342 struct pim_secondary_addr *sec_addr;
343 pim_addr sec_paddr;
344
345 if (!pim_addr_cmp(pim_ifp->primary_address, rp_info->rp.rpf_addr))
346 return 1;
347
348 if (!pim_ifp->sec_addr_list) {
349 return 0;
350 }
351
352 for (ALL_LIST_ELEMENTS_RO(pim_ifp->sec_addr_list, node, sec_addr)) {
353 sec_paddr = pim_addr_from_prefix(&sec_addr->addr);
354 /* If an RP-address is self, It should be enough to say
355 * I am RP the prefix-length should not matter here */
356 if (!pim_addr_cmp(sec_paddr, rp_info->rp.rpf_addr))
357 return 1;
358 }
359
360 return 0;
361 }
362
363 static void pim_rp_check_interfaces(struct pim_instance *pim,
364 struct rp_info *rp_info)
365 {
366 struct interface *ifp;
367
368 rp_info->i_am_rp = 0;
369 FOR_ALL_INTERFACES (pim->vrf, ifp) {
370 struct pim_interface *pim_ifp = ifp->info;
371
372 if (!pim_ifp)
373 continue;
374
375 if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
376 rp_info->i_am_rp = 1;
377 }
378 }
379 }
380
381 void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
382 {
383 struct pim_rpf old_rpf;
384 enum pim_rpf_result rpf_result;
385 pim_addr old_upstream_addr;
386 pim_addr new_upstream_addr;
387
388 old_upstream_addr = up->upstream_addr;
389 pim_rp_set_upstream_addr(pim, &new_upstream_addr, up->sg.src,
390 up->sg.grp);
391
392 if (PIM_DEBUG_PIM_TRACE)
393 zlog_debug("%s: pim upstream update for old upstream %pPA",
394 __func__, &old_upstream_addr);
395
396 if (!pim_addr_cmp(old_upstream_addr, new_upstream_addr))
397 return;
398
399 /* Lets consider a case, where a PIM upstream has a better RP as a
400 * result of a new RP configuration with more precise group range.
401 * This upstream has to be added to the upstream hash of new RP's
402 * NHT(pnc) and has to be removed from old RP's NHT upstream hash
403 */
404 if (!pim_addr_is_any(old_upstream_addr)) {
405 /* Deregister addr with Zebra NHT */
406 if (PIM_DEBUG_PIM_TRACE)
407 zlog_debug(
408 "%s: Deregister upstream %s addr %pPA with Zebra NHT",
409 __func__, up->sg_str, &old_upstream_addr);
410 pim_delete_tracked_nexthop(pim, old_upstream_addr, up, NULL);
411 }
412
413 /* Update the upstream address */
414 up->upstream_addr = new_upstream_addr;
415
416 old_rpf.source_nexthop.interface = up->rpf.source_nexthop.interface;
417
418 rpf_result = pim_rpf_update(pim, up, &old_rpf, __func__);
419 if (rpf_result == PIM_RPF_FAILURE)
420 pim_mroute_del(up->channel_oil, __func__);
421
422 /* update kernel multicast forwarding cache (MFC) */
423 if (up->rpf.source_nexthop.interface && up->channel_oil)
424 pim_upstream_mroute_iif_update(up->channel_oil, __func__);
425
426 if (rpf_result == PIM_RPF_CHANGED ||
427 (rpf_result == PIM_RPF_FAILURE &&
428 old_rpf.source_nexthop.interface))
429 pim_zebra_upstream_rpf_changed(pim, up, &old_rpf);
430
431 }
432
433 int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
434 const char *plist, enum rp_source rp_src_flag)
435 {
436 int result = 0;
437 struct rp_info *rp_info;
438 struct rp_info *rp_all;
439 struct prefix group_all;
440 struct listnode *node, *nnode;
441 struct rp_info *tmp_rp_info;
442 char buffer[BUFSIZ];
443 pim_addr nht_p;
444 struct route_node *rn = NULL;
445 struct pim_upstream *up;
446 bool upstream_updated = false;
447
448 if (pim_addr_is_any(rp_addr))
449 return PIM_RP_BAD_ADDRESS;
450
451 rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
452
453 rp_info->rp.rpf_addr = rp_addr;
454 prefix_copy(&rp_info->group, &group);
455 rp_info->rp_src = rp_src_flag;
456
457 if (plist) {
458 /*
459 * Return if the prefix-list is already configured for this RP
460 */
461 if (pim_rp_find_prefix_list(pim, rp_addr, plist)) {
462 XFREE(MTYPE_PIM_RP, rp_info);
463 return PIM_SUCCESS;
464 }
465
466 /*
467 * Barf if the prefix-list is already configured for an RP
468 */
469 if (pim_rp_prefix_list_used(pim, plist)) {
470 XFREE(MTYPE_PIM_RP, rp_info);
471 return PIM_RP_PFXLIST_IN_USE;
472 }
473
474 /*
475 * Free any existing rp_info entries for this RP
476 */
477 for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
478 tmp_rp_info)) {
479 if (!pim_addr_cmp(rp_info->rp.rpf_addr,
480 tmp_rp_info->rp.rpf_addr)) {
481 if (tmp_rp_info->plist)
482 pim_rp_del_config(pim, rp_addr, NULL,
483 tmp_rp_info->plist);
484 else
485 pim_rp_del_config(
486 pim, rp_addr,
487 prefix2str(&tmp_rp_info->group,
488 buffer, BUFSIZ),
489 NULL);
490 }
491 }
492
493 rp_info->plist = XSTRDUP(MTYPE_PIM_FILTER_NAME, plist);
494 } else {
495
496 if (!pim_get_all_mcast_group(&group_all)) {
497 XFREE(MTYPE_PIM_RP, rp_info);
498 return PIM_GROUP_BAD_ADDRESS;
499 }
500 rp_all = pim_rp_find_match_group(pim, &group_all);
501
502 /*
503 * Barf if group is a non-multicast subnet
504 */
505 if (!prefix_match(&rp_all->group, &rp_info->group)) {
506 XFREE(MTYPE_PIM_RP, rp_info);
507 return PIM_GROUP_BAD_ADDRESS;
508 }
509
510 /*
511 * Remove any prefix-list rp_info entries for this RP
512 */
513 for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
514 tmp_rp_info)) {
515 if (tmp_rp_info->plist &&
516 (!pim_addr_cmp(rp_info->rp.rpf_addr,
517 tmp_rp_info->rp.rpf_addr))) {
518 pim_rp_del_config(pim, rp_addr, NULL,
519 tmp_rp_info->plist);
520 }
521 }
522
523 /*
524 * Take over the 224.0.0.0/4 group if the rp is INADDR_ANY
525 */
526 if (prefix_same(&rp_all->group, &rp_info->group) &&
527 pim_rpf_addr_is_inaddr_any(&rp_all->rp)) {
528 rp_all->rp.rpf_addr = rp_info->rp.rpf_addr;
529 rp_all->rp_src = rp_src_flag;
530 XFREE(MTYPE_PIM_RP, rp_info);
531
532 /* Register addr with Zebra NHT */
533 nht_p = rp_all->rp.rpf_addr;
534 if (PIM_DEBUG_PIM_NHT_RP)
535 zlog_debug(
536 "%s: NHT Register rp_all addr %pPA grp %pFX ",
537 __func__, &nht_p, &rp_all->group);
538
539 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
540 /* Find (*, G) upstream whose RP is not
541 * configured yet
542 */
543 if (pim_addr_is_any(up->upstream_addr) &&
544 pim_addr_is_any(up->sg.src)) {
545 struct prefix grp;
546 struct rp_info *trp_info;
547
548 pim_addr_to_prefix(&grp, up->sg.grp);
549 trp_info = pim_rp_find_match_group(
550 pim, &grp);
551 if (trp_info == rp_all) {
552 pim_upstream_update(pim, up);
553 upstream_updated = true;
554 }
555 }
556 }
557 if (upstream_updated)
558 pim_zebra_update_all_interfaces(pim);
559
560 pim_rp_check_interfaces(pim, rp_all);
561 pim_rp_refresh_group_to_rp_mapping(pim);
562 pim_find_or_track_nexthop(pim, nht_p, NULL, rp_all,
563 NULL);
564
565 if (!pim_ecmp_nexthop_lookup(pim,
566 &rp_all->rp.source_nexthop,
567 nht_p, &rp_all->group, 1))
568 return PIM_RP_NO_PATH;
569 return PIM_SUCCESS;
570 }
571
572 /*
573 * Return if the group is already configured for this RP
574 */
575 tmp_rp_info = pim_rp_find_exact(pim, rp_addr, &rp_info->group);
576 if (tmp_rp_info) {
577 if ((tmp_rp_info->rp_src != rp_src_flag)
578 && (rp_src_flag == RP_SRC_STATIC))
579 tmp_rp_info->rp_src = rp_src_flag;
580 XFREE(MTYPE_PIM_RP, rp_info);
581 return result;
582 }
583
584 /*
585 * Barf if this group is already covered by some other RP
586 */
587 tmp_rp_info = pim_rp_find_match_group(pim, &rp_info->group);
588
589 if (tmp_rp_info) {
590 if (tmp_rp_info->plist) {
591 XFREE(MTYPE_PIM_RP, rp_info);
592 return PIM_GROUP_PFXLIST_OVERLAP;
593 } else {
594 /*
595 * If the only RP that covers this group is an
596 * RP configured for
597 * 224.0.0.0/4 that is fine, ignore that one.
598 * For all others
599 * though we must return PIM_GROUP_OVERLAP
600 */
601 if (prefix_same(&rp_info->group,
602 &tmp_rp_info->group)) {
603 if ((rp_src_flag == RP_SRC_STATIC)
604 && (tmp_rp_info->rp_src
605 == RP_SRC_STATIC)) {
606 XFREE(MTYPE_PIM_RP, rp_info);
607 return PIM_GROUP_OVERLAP;
608 }
609
610 result = pim_rp_change(
611 pim, rp_addr,
612 tmp_rp_info->group,
613 rp_src_flag);
614 XFREE(MTYPE_PIM_RP, rp_info);
615 return result;
616 }
617 }
618 }
619 }
620
621 listnode_add_sort(pim->rp_list, rp_info);
622
623 if (!rp_info->plist) {
624 rn = route_node_get(pim->rp_table, &rp_info->group);
625 rn->info = rp_info;
626 }
627
628 if (PIM_DEBUG_PIM_TRACE)
629 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn,
630 rp_info, &rp_info->group,
631 rn ? route_node_get_lock_count(rn) : 0);
632
633 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
634 if (pim_addr_is_any(up->sg.src)) {
635 struct prefix grp;
636 struct rp_info *trp_info;
637
638 pim_addr_to_prefix(&grp, up->sg.grp);
639 trp_info = pim_rp_find_match_group(pim, &grp);
640
641 if (trp_info == rp_info) {
642 pim_upstream_update(pim, up);
643 upstream_updated = true;
644 }
645 }
646 }
647
648 if (upstream_updated)
649 pim_zebra_update_all_interfaces(pim);
650
651 pim_rp_check_interfaces(pim, rp_info);
652 pim_rp_refresh_group_to_rp_mapping(pim);
653
654 /* Register addr with Zebra NHT */
655 nht_p = rp_info->rp.rpf_addr;
656 if (PIM_DEBUG_PIM_NHT_RP)
657 zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
658 __func__, &nht_p, &rp_info->group);
659 pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
660 if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
661 &rp_info->group, 1))
662 return PIM_RP_NO_PATH;
663
664 return PIM_SUCCESS;
665 }
666
667 void pim_rp_del_config(struct pim_instance *pim, pim_addr rp_addr,
668 const char *group_range, const char *plist)
669 {
670 struct prefix group;
671 int result;
672
673 if (group_range == NULL)
674 result = pim_get_all_mcast_group(&group);
675 else
676 result = str2prefix(group_range, &group);
677
678 if (!result) {
679 if (PIM_DEBUG_PIM_TRACE)
680 zlog_debug(
681 "%s: String to prefix failed for %pPAs group",
682 __func__, &rp_addr);
683 return;
684 }
685
686 pim_rp_del(pim, rp_addr, group, plist, RP_SRC_STATIC);
687 }
688
689 int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
690 const char *plist, enum rp_source rp_src_flag)
691 {
692 struct prefix g_all;
693 struct rp_info *rp_info;
694 struct rp_info *rp_all;
695 pim_addr nht_p;
696 struct route_node *rn;
697 bool was_plist = false;
698 struct rp_info *trp_info;
699 struct pim_upstream *up;
700 struct bsgrp_node *bsgrp = NULL;
701 struct bsm_rpinfo *bsrp = NULL;
702 bool upstream_updated = false;
703
704 if (plist)
705 rp_info = pim_rp_find_prefix_list(pim, rp_addr, plist);
706 else
707 rp_info = pim_rp_find_exact(pim, rp_addr, &group);
708
709 if (!rp_info)
710 return PIM_RP_NOT_FOUND;
711
712 if (rp_info->plist) {
713 XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
714 was_plist = true;
715 }
716
717 if (PIM_DEBUG_PIM_TRACE)
718 zlog_debug("%s: Delete RP %pPA for the group %pFX", __func__,
719 &rp_addr, &group);
720
721 /* While static RP is getting deleted, we need to check if dynamic RP
722 * present for the same group in BSM RP table, then install the dynamic
723 * RP for the group node into the main rp table
724 */
725 if (rp_src_flag == RP_SRC_STATIC) {
726 bsgrp = pim_bsm_get_bsgrp_node(&pim->global_scope, &group);
727
728 if (bsgrp) {
729 bsrp = bsm_rpinfos_first(bsgrp->bsrp_list);
730 if (bsrp) {
731 if (PIM_DEBUG_PIM_TRACE)
732 zlog_debug(
733 "%s: BSM RP %pPA found for the group %pFX",
734 __func__, &bsrp->rp_address,
735 &group);
736 return pim_rp_change(pim, bsrp->rp_address,
737 group, RP_SRC_BSR);
738 }
739 } else {
740 if (PIM_DEBUG_PIM_TRACE)
741 zlog_debug(
742 "%s: BSM RP not found for the group %pFX",
743 __func__, &group);
744 }
745 }
746
747 /* Deregister addr with Zebra NHT */
748 nht_p = rp_info->rp.rpf_addr;
749 if (PIM_DEBUG_PIM_NHT_RP)
750 zlog_debug("%s: Deregister RP addr %pPA with Zebra ", __func__,
751 &nht_p);
752 pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
753
754 if (!pim_get_all_mcast_group(&g_all))
755 return PIM_RP_BAD_ADDRESS;
756
757 rp_all = pim_rp_find_match_group(pim, &g_all);
758
759 if (rp_all == rp_info) {
760 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
761 /* Find the upstream (*, G) whose upstream address is
762 * same as the deleted RP
763 */
764 pim_addr rpf_addr;
765
766 rpf_addr = rp_info->rp.rpf_addr;
767 if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
768 pim_addr_is_any(up->sg.src)) {
769 struct prefix grp;
770
771 pim_addr_to_prefix(&grp, up->sg.grp);
772 trp_info = pim_rp_find_match_group(pim, &grp);
773 if (trp_info == rp_all) {
774 pim_upstream_rpf_clear(pim, up);
775 up->upstream_addr = PIMADDR_ANY;
776 }
777 }
778 }
779 rp_all->rp.rpf_addr = PIMADDR_ANY;
780 rp_all->i_am_rp = 0;
781 return PIM_SUCCESS;
782 }
783
784 listnode_delete(pim->rp_list, rp_info);
785
786 if (!was_plist) {
787 rn = route_node_get(pim->rp_table, &rp_info->group);
788 if (rn) {
789 if (rn->info != rp_info)
790 flog_err(
791 EC_LIB_DEVELOPMENT,
792 "Expected rn->info to be equal to rp_info");
793
794 if (PIM_DEBUG_PIM_TRACE)
795 zlog_debug(
796 "%s:Found for Freeing: %p for rp_info: %p(%pFX) Lock: %d",
797 __func__, rn, rp_info, &rp_info->group,
798 route_node_get_lock_count(rn));
799
800 rn->info = NULL;
801 route_unlock_node(rn);
802 route_unlock_node(rn);
803 }
804 }
805
806 pim_rp_refresh_group_to_rp_mapping(pim);
807
808 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
809 /* Find the upstream (*, G) whose upstream address is same as
810 * the deleted RP
811 */
812 pim_addr rpf_addr;
813
814 rpf_addr = rp_info->rp.rpf_addr;
815 if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
816 pim_addr_is_any(up->sg.src)) {
817 struct prefix grp;
818
819 pim_addr_to_prefix(&grp, up->sg.grp);
820 trp_info = pim_rp_find_match_group(pim, &grp);
821
822 /* RP not found for the group grp */
823 if (pim_rpf_addr_is_inaddr_any(&trp_info->rp)) {
824 pim_upstream_rpf_clear(pim, up);
825 pim_rp_set_upstream_addr(
826 pim, &up->upstream_addr, up->sg.src,
827 up->sg.grp);
828 }
829
830 /* RP found for the group grp */
831 else {
832 pim_upstream_update(pim, up);
833 upstream_updated = true;
834 }
835 }
836 }
837
838 if (upstream_updated)
839 pim_zebra_update_all_interfaces(pim);
840
841 XFREE(MTYPE_PIM_RP, rp_info);
842 return PIM_SUCCESS;
843 }
844
845 int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
846 struct prefix group, enum rp_source rp_src_flag)
847 {
848 pim_addr nht_p;
849 struct route_node *rn;
850 int result = 0;
851 struct rp_info *rp_info = NULL;
852 struct pim_upstream *up;
853 bool upstream_updated = false;
854 pim_addr old_rp_addr;
855
856 rn = route_node_lookup(pim->rp_table, &group);
857 if (!rn) {
858 result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
859 return result;
860 }
861
862 rp_info = rn->info;
863
864 if (!rp_info) {
865 route_unlock_node(rn);
866 result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
867 return result;
868 }
869
870 old_rp_addr = rp_info->rp.rpf_addr;
871 if (!pim_addr_cmp(new_rp_addr, old_rp_addr)) {
872 if (rp_info->rp_src != rp_src_flag) {
873 rp_info->rp_src = rp_src_flag;
874 route_unlock_node(rn);
875 return PIM_SUCCESS;
876 }
877 }
878
879 /* Deregister old RP addr with Zebra NHT */
880
881 if (!pim_addr_is_any(old_rp_addr)) {
882 nht_p = rp_info->rp.rpf_addr;
883 if (PIM_DEBUG_PIM_NHT_RP)
884 zlog_debug("%s: Deregister RP addr %pPA with Zebra ",
885 __func__, &nht_p);
886 pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
887 }
888
889 pim_rp_nexthop_del(rp_info);
890 listnode_delete(pim->rp_list, rp_info);
891 /* Update the new RP address*/
892
893 rp_info->rp.rpf_addr = new_rp_addr;
894 rp_info->rp_src = rp_src_flag;
895 rp_info->i_am_rp = 0;
896
897 listnode_add_sort(pim->rp_list, rp_info);
898
899 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
900 if (pim_addr_is_any(up->sg.src)) {
901 struct prefix grp;
902 struct rp_info *trp_info;
903
904 pim_addr_to_prefix(&grp, up->sg.grp);
905 trp_info = pim_rp_find_match_group(pim, &grp);
906
907 if (trp_info == rp_info) {
908 pim_upstream_update(pim, up);
909 upstream_updated = true;
910 }
911 }
912 }
913
914 if (upstream_updated)
915 pim_zebra_update_all_interfaces(pim);
916
917 /* Register new RP addr with Zebra NHT */
918 nht_p = rp_info->rp.rpf_addr;
919 if (PIM_DEBUG_PIM_NHT_RP)
920 zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
921 __func__, &nht_p, &rp_info->group);
922
923 pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
924 if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
925 &rp_info->group, 1)) {
926 route_unlock_node(rn);
927 return PIM_RP_NO_PATH;
928 }
929
930 pim_rp_check_interfaces(pim, rp_info);
931
932 route_unlock_node(rn);
933
934 pim_rp_refresh_group_to_rp_mapping(pim);
935
936 return result;
937 }
938
939 void pim_rp_setup(struct pim_instance *pim)
940 {
941 struct listnode *node;
942 struct rp_info *rp_info;
943 pim_addr nht_p;
944
945 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
946 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
947 continue;
948
949 nht_p = rp_info->rp.rpf_addr;
950
951 pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
952 if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
953 nht_p, &rp_info->group, 1))
954 if (PIM_DEBUG_PIM_NHT_RP)
955 zlog_debug(
956 "Unable to lookup nexthop for rp specified");
957 }
958 }
959
960 /*
961 * Checks to see if we should elect ourself the actual RP when new if
962 * addresses are added against an interface.
963 */
964 void pim_rp_check_on_if_add(struct pim_interface *pim_ifp)
965 {
966 struct listnode *node;
967 struct rp_info *rp_info;
968 bool i_am_rp_changed = false;
969 struct pim_instance *pim = pim_ifp->pim;
970
971 if (pim->rp_list == NULL)
972 return;
973
974 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
975 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
976 continue;
977
978 /* if i_am_rp is already set nothing to be done (adding new
979 * addresses
980 * is not going to make a difference). */
981 if (rp_info->i_am_rp) {
982 continue;
983 }
984
985 if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
986 i_am_rp_changed = true;
987 rp_info->i_am_rp = 1;
988 if (PIM_DEBUG_PIM_NHT_RP)
989 zlog_debug("%s: %pPA: i am rp", __func__,
990 &rp_info->rp.rpf_addr);
991 }
992 }
993
994 if (i_am_rp_changed) {
995 pim_msdp_i_am_rp_changed(pim);
996 pim_upstream_reeval_use_rpt(pim);
997 }
998 }
999
1000 /* up-optimized re-evaluation of "i_am_rp". this is used when ifaddresses
1001 * are removed. Removing numbers is an uncommon event in an active network
1002 * so I have made no attempt to optimize it. */
1003 void pim_i_am_rp_re_evaluate(struct pim_instance *pim)
1004 {
1005 struct listnode *node;
1006 struct rp_info *rp_info;
1007 bool i_am_rp_changed = false;
1008 int old_i_am_rp;
1009
1010 if (pim->rp_list == NULL)
1011 return;
1012
1013 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1014 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1015 continue;
1016
1017 old_i_am_rp = rp_info->i_am_rp;
1018 pim_rp_check_interfaces(pim, rp_info);
1019
1020 if (old_i_am_rp != rp_info->i_am_rp) {
1021 i_am_rp_changed = true;
1022 if (PIM_DEBUG_PIM_NHT_RP) {
1023 if (rp_info->i_am_rp)
1024 zlog_debug("%s: %pPA: i am rp",
1025 __func__,
1026 &rp_info->rp.rpf_addr);
1027 else
1028 zlog_debug(
1029 "%s: %pPA: i am no longer rp",
1030 __func__,
1031 &rp_info->rp.rpf_addr);
1032 }
1033 }
1034 }
1035
1036 if (i_am_rp_changed) {
1037 pim_msdp_i_am_rp_changed(pim);
1038 pim_upstream_reeval_use_rpt(pim);
1039 }
1040 }
1041
1042 /*
1043 * I_am_RP(G) is true if the group-to-RP mapping indicates that
1044 * this router is the RP for the group.
1045 *
1046 * Since we only have static RP, all groups are part of this RP
1047 */
1048 int pim_rp_i_am_rp(struct pim_instance *pim, pim_addr group)
1049 {
1050 struct prefix g;
1051 struct rp_info *rp_info;
1052
1053 memset(&g, 0, sizeof(g));
1054 pim_addr_to_prefix(&g, group);
1055 rp_info = pim_rp_find_match_group(pim, &g);
1056
1057 if (rp_info)
1058 return rp_info->i_am_rp;
1059 return 0;
1060 }
1061
1062 /*
1063 * RP(G)
1064 *
1065 * Return the RP that the Group belongs too.
1066 */
1067 struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group)
1068 {
1069 struct prefix g;
1070 struct rp_info *rp_info;
1071
1072 memset(&g, 0, sizeof(g));
1073 pim_addr_to_prefix(&g, group);
1074
1075 rp_info = pim_rp_find_match_group(pim, &g);
1076
1077 if (rp_info) {
1078 pim_addr nht_p;
1079
1080 /* Register addr with Zebra NHT */
1081 nht_p = rp_info->rp.rpf_addr;
1082 if (PIM_DEBUG_PIM_NHT_RP)
1083 zlog_debug(
1084 "%s: NHT Register RP addr %pPA grp %pFX with Zebra",
1085 __func__, &nht_p, &rp_info->group);
1086 pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
1087 pim_rpf_set_refresh_time(pim);
1088 (void)pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
1089 nht_p, &rp_info->group, 1);
1090 return (&rp_info->rp);
1091 }
1092
1093 // About to Go Down
1094 return NULL;
1095 }
1096
1097 /*
1098 * Set the upstream IP address we want to talk to based upon
1099 * the rp configured and the source address
1100 *
1101 * If we have don't have a RP configured and the source address is *
1102 * then set the upstream addr as INADDR_ANY and return failure.
1103 *
1104 */
1105 int pim_rp_set_upstream_addr(struct pim_instance *pim, pim_addr *up,
1106 pim_addr source, pim_addr group)
1107 {
1108 struct rp_info *rp_info;
1109 struct prefix g;
1110
1111 memset(&g, 0, sizeof(g));
1112
1113 pim_addr_to_prefix(&g, group);
1114
1115 rp_info = pim_rp_find_match_group(pim, &g);
1116
1117 if (!rp_info || ((pim_rpf_addr_is_inaddr_any(&rp_info->rp)) &&
1118 (pim_addr_is_any(source)))) {
1119 if (PIM_DEBUG_PIM_NHT_RP)
1120 zlog_debug("%s: Received a (*,G) with no RP configured",
1121 __func__);
1122 *up = PIMADDR_ANY;
1123 return 0;
1124 }
1125
1126 if (pim_addr_is_any(source))
1127 *up = rp_info->rp.rpf_addr;
1128 else
1129 *up = source;
1130
1131 return 1;
1132 }
1133
1134 int pim_rp_config_write(struct pim_instance *pim, struct vty *vty,
1135 const char *spaces)
1136 {
1137 struct listnode *node;
1138 struct rp_info *rp_info;
1139 int count = 0;
1140 pim_addr rp_addr;
1141
1142 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1143 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1144 continue;
1145
1146 if (rp_info->rp_src == RP_SRC_BSR)
1147 continue;
1148
1149 rp_addr = rp_info->rp.rpf_addr;
1150 if (rp_info->plist)
1151 vty_out(vty,
1152 "%s" PIM_AF_NAME
1153 " pim rp %pPA prefix-list %s\n",
1154 spaces, &rp_addr, rp_info->plist);
1155 else
1156 vty_out(vty, "%s" PIM_AF_NAME " pim rp %pPA %pFX\n",
1157 spaces, &rp_addr, &rp_info->group);
1158 count++;
1159 }
1160
1161 return count;
1162 }
1163
1164 void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
1165 struct vty *vty, json_object *json)
1166 {
1167 struct rp_info *rp_info;
1168 struct rp_info *prev_rp_info = NULL;
1169 struct listnode *node;
1170 struct ttable *tt = NULL;
1171 char *table = NULL;
1172 char source[7];
1173 char grp[INET6_ADDRSTRLEN];
1174
1175 json_object *json_rp_rows = NULL;
1176 json_object *json_row = NULL;
1177
1178 if (!json) {
1179 /* Prepare table. */
1180 tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
1181 ttable_add_row(
1182 tt,
1183 "RP address|group/prefix-list|OIF|I am RP|Source|Group-Type");
1184 tt->style.cell.rpad = 2;
1185 tt->style.corner = '+';
1186 ttable_restyle(tt);
1187 }
1188
1189 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1190 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1191 continue;
1192
1193 #if PIM_IPV == 4
1194 pim_addr group = rp_info->group.u.prefix4;
1195 #else
1196 pim_addr group = rp_info->group.u.prefix6;
1197 #endif
1198 const char *group_type =
1199 pim_is_grp_ssm(pim, group) ? "SSM" : "ASM";
1200
1201 if (range && !prefix_match(&rp_info->group, range))
1202 continue;
1203
1204 if (rp_info->rp_src == RP_SRC_STATIC)
1205 strlcpy(source, "Static", sizeof(source));
1206 else if (rp_info->rp_src == RP_SRC_BSR)
1207 strlcpy(source, "BSR", sizeof(source));
1208 else
1209 strlcpy(source, "None", sizeof(source));
1210 if (json) {
1211 /*
1212 * If we have moved on to a new RP then add the
1213 * entry for the previous RP
1214 */
1215 if (prev_rp_info &&
1216 (pim_addr_cmp(prev_rp_info->rp.rpf_addr,
1217 rp_info->rp.rpf_addr))) {
1218 json_object_object_addf(
1219 json, json_rp_rows, "%pPA",
1220 &prev_rp_info->rp.rpf_addr);
1221 json_rp_rows = NULL;
1222 }
1223
1224 if (!json_rp_rows)
1225 json_rp_rows = json_object_new_array();
1226
1227 json_row = json_object_new_object();
1228 json_object_string_addf(json_row, "rpAddress", "%pPA",
1229 &rp_info->rp.rpf_addr);
1230 if (rp_info->rp.source_nexthop.interface)
1231 json_object_string_add(
1232 json_row, "outboundInterface",
1233 rp_info->rp.source_nexthop
1234 .interface->name);
1235 else
1236 json_object_string_add(json_row,
1237 "outboundInterface",
1238 "Unknown");
1239 if (rp_info->i_am_rp)
1240 json_object_boolean_true_add(json_row, "iAmRP");
1241 else
1242 json_object_boolean_false_add(json_row,
1243 "iAmRP");
1244
1245 if (rp_info->plist)
1246 json_object_string_add(json_row, "prefixList",
1247 rp_info->plist);
1248 else
1249 json_object_string_addf(json_row, "group",
1250 "%pFX",
1251 &rp_info->group);
1252 json_object_string_add(json_row, "source", source);
1253 json_object_string_add(json_row, "groupType",
1254 group_type);
1255
1256 json_object_array_add(json_rp_rows, json_row);
1257 } else {
1258 prefix2str(&rp_info->group, grp, sizeof(grp));
1259 ttable_add_row(tt, "%pPA|%s|%s|%s|%s|%s",
1260 &rp_info->rp.rpf_addr,
1261 rp_info->plist
1262 ? rp_info->plist
1263 : grp,
1264 rp_info->rp.source_nexthop.interface
1265 ? rp_info->rp.source_nexthop
1266 .interface->name
1267 : "Unknown",
1268 rp_info->i_am_rp
1269 ? "yes"
1270 : "no",
1271 source, group_type);
1272 }
1273 prev_rp_info = rp_info;
1274 }
1275
1276 /* Dump the generated table. */
1277 if (!json) {
1278 table = ttable_dump(tt, "\n");
1279 vty_out(vty, "%s\n", table);
1280 XFREE(MTYPE_TMP, table);
1281 ttable_del(tt);
1282 } else {
1283 if (prev_rp_info && json_rp_rows)
1284 json_object_object_addf(json, json_rp_rows, "%pPA",
1285 &prev_rp_info->rp.rpf_addr);
1286 }
1287 }
1288
1289 void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
1290 {
1291 struct listnode *node = NULL;
1292 struct rp_info *rp_info = NULL;
1293 struct nexthop *nh_node = NULL;
1294 pim_addr nht_p;
1295 struct pim_nexthop_cache pnc;
1296
1297 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1298 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1299 continue;
1300
1301 nht_p = rp_info->rp.rpf_addr;
1302 memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
1303 if (!pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, &pnc))
1304 continue;
1305
1306 for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) {
1307 #if PIM_IPV == 4
1308 if (!pim_addr_is_any(nh_node->gate.ipv4))
1309 continue;
1310 #else
1311 if (!pim_addr_is_any(nh_node->gate.ipv6))
1312 continue;
1313 #endif
1314
1315 struct interface *ifp1 = if_lookup_by_index(
1316 nh_node->ifindex, pim->vrf->vrf_id);
1317
1318 if (nbr->interface != ifp1)
1319 continue;
1320
1321 #if PIM_IPV == 4
1322 nh_node->gate.ipv4 = nbr->source_addr;
1323 #else
1324 nh_node->gate.ipv6 = nbr->source_addr;
1325 #endif
1326 if (PIM_DEBUG_PIM_NHT_RP)
1327 zlog_debug(
1328 "%s: addr %pPA new nexthop addr %pPAs interface %s",
1329 __func__, &nht_p, &nbr->source_addr,
1330 ifp1->name);
1331 }
1332 }
1333 }