]> git.proxmox.com Git - mirror_frr.git/blob - pimd/pim_rp.c
Merge pull request #11585 from patrasar/sg_str
[mirror_frr.git] / pimd / pim_rp.c
1 /*
2 * PIM for Quagga
3 * Copyright (C) 2015 Cumulus Networks, Inc.
4 * Donald Sharp
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20 #include <zebra.h>
21
22 #include "lib/json.h"
23 #include "log.h"
24 #include "network.h"
25 #include "if.h"
26 #include "linklist.h"
27 #include "prefix.h"
28 #include "memory.h"
29 #include "vty.h"
30 #include "vrf.h"
31 #include "plist.h"
32 #include "nexthop.h"
33 #include "table.h"
34 #include "lib_errors.h"
35
36 #include "pimd.h"
37 #include "pim_instance.h"
38 #include "pim_vty.h"
39 #include "pim_str.h"
40 #include "pim_iface.h"
41 #include "pim_rp.h"
42 #include "pim_rpf.h"
43 #include "pim_sock.h"
44 #include "pim_memory.h"
45 #include "pim_neighbor.h"
46 #include "pim_msdp.h"
47 #include "pim_nht.h"
48 #include "pim_mroute.h"
49 #include "pim_oil.h"
50 #include "pim_zebra.h"
51 #include "pim_bsm.h"
52 #include "pim_util.h"
53 #include "pim_ssm.h"
54
55 /* Cleanup pim->rpf_hash each node data */
56 void pim_rp_list_hash_clean(void *data)
57 {
58 struct pim_nexthop_cache *pnc = (struct pim_nexthop_cache *)data;
59
60 list_delete(&pnc->rp_list);
61
62 hash_clean(pnc->upstream_hash, NULL);
63 hash_free(pnc->upstream_hash);
64 pnc->upstream_hash = NULL;
65 if (pnc->nexthop)
66 nexthops_free(pnc->nexthop);
67
68 XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc);
69 }
70
71 static void pim_rp_info_free(struct rp_info *rp_info)
72 {
73 XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
74
75 XFREE(MTYPE_PIM_RP, rp_info);
76 }
77
78 int pim_rp_list_cmp(void *v1, void *v2)
79 {
80 struct rp_info *rp1 = (struct rp_info *)v1;
81 struct rp_info *rp2 = (struct rp_info *)v2;
82 int ret;
83
84 /*
85 * Sort by RP IP address
86 */
87 ret = pim_addr_cmp(rp1->rp.rpf_addr, rp2->rp.rpf_addr);
88 if (ret)
89 return ret;
90
91 /*
92 * Sort by group IP address
93 */
94 ret = prefix_cmp(&rp1->group, &rp2->group);
95 if (ret)
96 return ret;
97
98 return 0;
99 }
100
101 void pim_rp_init(struct pim_instance *pim)
102 {
103 struct rp_info *rp_info;
104 struct route_node *rn;
105
106 pim->rp_list = list_new();
107 pim->rp_list->del = (void (*)(void *))pim_rp_info_free;
108 pim->rp_list->cmp = pim_rp_list_cmp;
109
110 pim->rp_table = route_table_init();
111
112 rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
113
114 if (!pim_get_all_mcast_group(&rp_info->group)) {
115 flog_err(EC_LIB_DEVELOPMENT,
116 "Unable to convert all-multicast prefix");
117 list_delete(&pim->rp_list);
118 route_table_finish(pim->rp_table);
119 XFREE(MTYPE_PIM_RP, rp_info);
120 return;
121 }
122 rp_info->rp.rpf_addr = PIMADDR_ANY;
123
124 listnode_add(pim->rp_list, rp_info);
125
126 rn = route_node_get(pim->rp_table, &rp_info->group);
127 rn->info = rp_info;
128 if (PIM_DEBUG_PIM_TRACE)
129 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn,
130 rp_info, &rp_info->group,
131 route_node_get_lock_count(rn));
132 }
133
134 void pim_rp_free(struct pim_instance *pim)
135 {
136 if (pim->rp_table)
137 route_table_finish(pim->rp_table);
138 pim->rp_table = NULL;
139
140 if (pim->rp_list)
141 list_delete(&pim->rp_list);
142 }
143
144 /*
145 * Given an RP's prefix-list, return the RP's rp_info for that prefix-list
146 */
147 static struct rp_info *pim_rp_find_prefix_list(struct pim_instance *pim,
148 pim_addr rp, const char *plist)
149 {
150 struct listnode *node;
151 struct rp_info *rp_info;
152
153 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
154 if ((!pim_addr_cmp(rp, rp_info->rp.rpf_addr)) &&
155 rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
156 return rp_info;
157 }
158 }
159
160 return NULL;
161 }
162
163 /*
164 * Return true if plist is used by any rp_info
165 */
166 static int pim_rp_prefix_list_used(struct pim_instance *pim, const char *plist)
167 {
168 struct listnode *node;
169 struct rp_info *rp_info;
170
171 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
172 if (rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
173 return 1;
174 }
175 }
176
177 return 0;
178 }
179
180 /*
181 * Given an RP's address, return the RP's rp_info that is an exact match for
182 * 'group'
183 */
184 static struct rp_info *pim_rp_find_exact(struct pim_instance *pim, pim_addr rp,
185 const struct prefix *group)
186 {
187 struct listnode *node;
188 struct rp_info *rp_info;
189
190 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
191 if ((!pim_addr_cmp(rp, rp_info->rp.rpf_addr)) &&
192 prefix_same(&rp_info->group, group))
193 return rp_info;
194 }
195
196 return NULL;
197 }
198
199 /*
200 * XXX: long-term issue: we don't actually have a good "ip address-list"
201 * implementation. ("access-list XYZ" is the closest but honestly it's
202 * kinda garbage.)
203 *
204 * So it's using a prefix-list to match an address here, which causes very
205 * unexpected results for the user since prefix-lists by default only match
206 * when the prefix length is an exact match too. i.e. you'd have to add the
207 * "le 32" and do "ip prefix-list foo permit 10.0.0.0/24 le 32"
208 *
209 * To avoid this pitfall, this code uses "address_mode = true" for the prefix
210 * list match (this is the only user for that.)
211 *
212 * In the long run, we need to add a "ip address-list", but that's a wholly
213 * separate bag of worms, and existing configs using ip prefix-list would
214 * drop into the UX pitfall.
215 */
216
217 #include "lib/plist_int.h"
218
219 /*
220 * Given a group, return the rp_info for that group
221 */
222 struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
223 const struct prefix *group)
224 {
225 struct listnode *node;
226 struct rp_info *best = NULL;
227 struct rp_info *rp_info;
228 struct prefix_list *plist;
229 const struct prefix *bp;
230 const struct prefix_list_entry *entry;
231 struct route_node *rn;
232
233 bp = NULL;
234 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
235 if (rp_info->plist) {
236 plist = prefix_list_lookup(PIM_AFI, rp_info->plist);
237
238 if (prefix_list_apply_ext(plist, &entry, group, true)
239 == PREFIX_DENY || !entry)
240 continue;
241
242 if (!best) {
243 best = rp_info;
244 bp = &entry->prefix;
245 continue;
246 }
247
248 if (bp && bp->prefixlen < entry->prefix.prefixlen) {
249 best = rp_info;
250 bp = &entry->prefix;
251 }
252 }
253 }
254
255 rn = route_node_match(pim->rp_table, group);
256 if (!rn) {
257 flog_err(
258 EC_LIB_DEVELOPMENT,
259 "%s: BUG We should have found default group information",
260 __func__);
261 return best;
262 }
263
264 rp_info = rn->info;
265 if (PIM_DEBUG_PIM_TRACE) {
266 if (best)
267 zlog_debug(
268 "Lookedup(%pFX): prefix_list match %s, rn %p found: %pFX",
269 group, best->plist, rn, &rp_info->group);
270 else
271 zlog_debug("Lookedup(%pFX): rn %p found:%pFX", group,
272 rn, &rp_info->group);
273 }
274
275 route_unlock_node(rn);
276
277 /*
278 * rp's with prefix lists have the group as 224.0.0.0/4 which will
279 * match anything. So if we have a rp_info that should match a prefix
280 * list then if we do match then best should be the answer( even
281 * if it is NULL )
282 */
283 if (!rp_info || (rp_info && rp_info->plist))
284 return best;
285
286 /*
287 * So we have a non plist rp_info found in the lookup and no plists
288 * at all to be choosen, return it!
289 */
290 if (!best)
291 return rp_info;
292
293 /*
294 * If we have a matching non prefix list and a matching prefix
295 * list we should return the actual rp_info that has the LPM
296 * If they are equal, use the prefix-list( but let's hope
297 * the end-operator doesn't do this )
298 */
299 if (rp_info->group.prefixlen > bp->prefixlen)
300 best = rp_info;
301
302 return best;
303 }
304
305 /*
306 * When the user makes "ip pim rp" configuration changes or if they change the
307 * prefix-list(s) used by these statements we must tickle the upstream state
308 * for each group to make them re-lookup who their RP should be.
309 *
310 * This is a placeholder function for now.
311 */
312 void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim)
313 {
314 pim_msdp_i_am_rp_changed(pim);
315 pim_upstream_reeval_use_rpt(pim);
316 }
317
318 void pim_rp_prefix_list_update(struct pim_instance *pim,
319 struct prefix_list *plist)
320 {
321 struct listnode *node;
322 struct rp_info *rp_info;
323 int refresh_needed = 0;
324
325 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
326 if (rp_info->plist
327 && strcmp(rp_info->plist, prefix_list_name(plist)) == 0) {
328 refresh_needed = 1;
329 break;
330 }
331 }
332
333 if (refresh_needed)
334 pim_rp_refresh_group_to_rp_mapping(pim);
335 }
336
337 static int pim_rp_check_interface_addrs(struct rp_info *rp_info,
338 struct pim_interface *pim_ifp)
339 {
340 struct listnode *node;
341 struct pim_secondary_addr *sec_addr;
342 pim_addr sec_paddr;
343
344 if (!pim_addr_cmp(pim_ifp->primary_address, rp_info->rp.rpf_addr))
345 return 1;
346
347 if (!pim_ifp->sec_addr_list) {
348 return 0;
349 }
350
351 for (ALL_LIST_ELEMENTS_RO(pim_ifp->sec_addr_list, node, sec_addr)) {
352 sec_paddr = pim_addr_from_prefix(&sec_addr->addr);
353 /* If an RP-address is self, It should be enough to say
354 * I am RP the prefix-length should not matter here */
355 if (!pim_addr_cmp(sec_paddr, rp_info->rp.rpf_addr))
356 return 1;
357 }
358
359 return 0;
360 }
361
362 static void pim_rp_check_interfaces(struct pim_instance *pim,
363 struct rp_info *rp_info)
364 {
365 struct interface *ifp;
366
367 rp_info->i_am_rp = 0;
368 FOR_ALL_INTERFACES (pim->vrf, ifp) {
369 struct pim_interface *pim_ifp = ifp->info;
370
371 if (!pim_ifp)
372 continue;
373
374 if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
375 rp_info->i_am_rp = 1;
376 }
377 }
378 }
379
380 void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
381 {
382 struct pim_rpf old_rpf;
383 enum pim_rpf_result rpf_result;
384 pim_addr old_upstream_addr;
385 pim_addr new_upstream_addr;
386
387 old_upstream_addr = up->upstream_addr;
388 pim_rp_set_upstream_addr(pim, &new_upstream_addr, up->sg.src,
389 up->sg.grp);
390
391 if (PIM_DEBUG_PIM_TRACE)
392 zlog_debug("%s: pim upstream update for old upstream %pPA",
393 __func__, &old_upstream_addr);
394
395 if (!pim_addr_cmp(old_upstream_addr, new_upstream_addr))
396 return;
397
398 /* Lets consider a case, where a PIM upstream has a better RP as a
399 * result of a new RP configuration with more precise group range.
400 * This upstream has to be added to the upstream hash of new RP's
401 * NHT(pnc) and has to be removed from old RP's NHT upstream hash
402 */
403 if (!pim_addr_is_any(old_upstream_addr)) {
404 /* Deregister addr with Zebra NHT */
405 if (PIM_DEBUG_PIM_TRACE)
406 zlog_debug(
407 "%s: Deregister upstream %s addr %pPA with Zebra NHT",
408 __func__, up->sg_str, &old_upstream_addr);
409 pim_delete_tracked_nexthop(pim, old_upstream_addr, up, NULL);
410 }
411
412 /* Update the upstream address */
413 up->upstream_addr = new_upstream_addr;
414
415 old_rpf.source_nexthop.interface = up->rpf.source_nexthop.interface;
416
417 rpf_result = pim_rpf_update(pim, up, &old_rpf, __func__);
418 if (rpf_result == PIM_RPF_FAILURE)
419 pim_mroute_del(up->channel_oil, __func__);
420
421 /* update kernel multicast forwarding cache (MFC) */
422 if (up->rpf.source_nexthop.interface && up->channel_oil)
423 pim_upstream_mroute_iif_update(up->channel_oil, __func__);
424
425 if (rpf_result == PIM_RPF_CHANGED ||
426 (rpf_result == PIM_RPF_FAILURE &&
427 old_rpf.source_nexthop.interface))
428 pim_zebra_upstream_rpf_changed(pim, up, &old_rpf);
429
430 }
431
432 int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
433 const char *plist, enum rp_source rp_src_flag)
434 {
435 int result = 0;
436 struct rp_info *rp_info;
437 struct rp_info *rp_all;
438 struct prefix group_all;
439 struct listnode *node, *nnode;
440 struct rp_info *tmp_rp_info;
441 char buffer[BUFSIZ];
442 pim_addr nht_p;
443 struct route_node *rn = NULL;
444 struct pim_upstream *up;
445 bool upstream_updated = false;
446
447 if (pim_addr_is_any(rp_addr))
448 return PIM_RP_BAD_ADDRESS;
449
450 rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
451
452 rp_info->rp.rpf_addr = rp_addr;
453 prefix_copy(&rp_info->group, &group);
454 rp_info->rp_src = rp_src_flag;
455
456 if (plist) {
457 /*
458 * Return if the prefix-list is already configured for this RP
459 */
460 if (pim_rp_find_prefix_list(pim, rp_addr, plist)) {
461 XFREE(MTYPE_PIM_RP, rp_info);
462 return PIM_SUCCESS;
463 }
464
465 /*
466 * Barf if the prefix-list is already configured for an RP
467 */
468 if (pim_rp_prefix_list_used(pim, plist)) {
469 XFREE(MTYPE_PIM_RP, rp_info);
470 return PIM_RP_PFXLIST_IN_USE;
471 }
472
473 /*
474 * Free any existing rp_info entries for this RP
475 */
476 for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
477 tmp_rp_info)) {
478 if (!pim_addr_cmp(rp_info->rp.rpf_addr,
479 tmp_rp_info->rp.rpf_addr)) {
480 if (tmp_rp_info->plist)
481 pim_rp_del_config(pim, rp_addr, NULL,
482 tmp_rp_info->plist);
483 else
484 pim_rp_del_config(
485 pim, rp_addr,
486 prefix2str(&tmp_rp_info->group,
487 buffer, BUFSIZ),
488 NULL);
489 }
490 }
491
492 rp_info->plist = XSTRDUP(MTYPE_PIM_FILTER_NAME, plist);
493 } else {
494
495 if (!pim_get_all_mcast_group(&group_all)) {
496 XFREE(MTYPE_PIM_RP, rp_info);
497 return PIM_GROUP_BAD_ADDRESS;
498 }
499 rp_all = pim_rp_find_match_group(pim, &group_all);
500
501 /*
502 * Barf if group is a non-multicast subnet
503 */
504 if (!prefix_match(&rp_all->group, &rp_info->group)) {
505 XFREE(MTYPE_PIM_RP, rp_info);
506 return PIM_GROUP_BAD_ADDRESS;
507 }
508
509 /*
510 * Remove any prefix-list rp_info entries for this RP
511 */
512 for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
513 tmp_rp_info)) {
514 if (tmp_rp_info->plist &&
515 (!pim_addr_cmp(rp_info->rp.rpf_addr,
516 tmp_rp_info->rp.rpf_addr))) {
517 pim_rp_del_config(pim, rp_addr, NULL,
518 tmp_rp_info->plist);
519 }
520 }
521
522 /*
523 * Take over the 224.0.0.0/4 group if the rp is INADDR_ANY
524 */
525 if (prefix_same(&rp_all->group, &rp_info->group) &&
526 pim_rpf_addr_is_inaddr_any(&rp_all->rp)) {
527 rp_all->rp.rpf_addr = rp_info->rp.rpf_addr;
528 rp_all->rp_src = rp_src_flag;
529 XFREE(MTYPE_PIM_RP, rp_info);
530
531 /* Register addr with Zebra NHT */
532 nht_p = rp_all->rp.rpf_addr;
533 if (PIM_DEBUG_PIM_NHT_RP)
534 zlog_debug(
535 "%s: NHT Register rp_all addr %pPA grp %pFX ",
536 __func__, &nht_p, &rp_all->group);
537
538 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
539 /* Find (*, G) upstream whose RP is not
540 * configured yet
541 */
542 if (pim_addr_is_any(up->upstream_addr) &&
543 pim_addr_is_any(up->sg.src)) {
544 struct prefix grp;
545 struct rp_info *trp_info;
546
547 pim_addr_to_prefix(&grp, up->sg.grp);
548 trp_info = pim_rp_find_match_group(
549 pim, &grp);
550 if (trp_info == rp_all) {
551 pim_upstream_update(pim, up);
552 upstream_updated = true;
553 }
554 }
555 }
556 if (upstream_updated)
557 pim_zebra_update_all_interfaces(pim);
558
559 pim_rp_check_interfaces(pim, rp_all);
560 pim_rp_refresh_group_to_rp_mapping(pim);
561 pim_find_or_track_nexthop(pim, nht_p, NULL, rp_all,
562 NULL);
563
564 if (!pim_ecmp_nexthop_lookup(pim,
565 &rp_all->rp.source_nexthop,
566 nht_p, &rp_all->group, 1))
567 return PIM_RP_NO_PATH;
568 return PIM_SUCCESS;
569 }
570
571 /*
572 * Return if the group is already configured for this RP
573 */
574 tmp_rp_info = pim_rp_find_exact(pim, rp_addr, &rp_info->group);
575 if (tmp_rp_info) {
576 if ((tmp_rp_info->rp_src != rp_src_flag)
577 && (rp_src_flag == RP_SRC_STATIC))
578 tmp_rp_info->rp_src = rp_src_flag;
579 XFREE(MTYPE_PIM_RP, rp_info);
580 return result;
581 }
582
583 /*
584 * Barf if this group is already covered by some other RP
585 */
586 tmp_rp_info = pim_rp_find_match_group(pim, &rp_info->group);
587
588 if (tmp_rp_info) {
589 if (tmp_rp_info->plist) {
590 XFREE(MTYPE_PIM_RP, rp_info);
591 return PIM_GROUP_PFXLIST_OVERLAP;
592 } else {
593 /*
594 * If the only RP that covers this group is an
595 * RP configured for
596 * 224.0.0.0/4 that is fine, ignore that one.
597 * For all others
598 * though we must return PIM_GROUP_OVERLAP
599 */
600 if (prefix_same(&rp_info->group,
601 &tmp_rp_info->group)) {
602 if ((rp_src_flag == RP_SRC_STATIC)
603 && (tmp_rp_info->rp_src
604 == RP_SRC_STATIC)) {
605 XFREE(MTYPE_PIM_RP, rp_info);
606 return PIM_GROUP_OVERLAP;
607 }
608
609 result = pim_rp_change(
610 pim, rp_addr,
611 tmp_rp_info->group,
612 rp_src_flag);
613 XFREE(MTYPE_PIM_RP, rp_info);
614 return result;
615 }
616 }
617 }
618 }
619
620 listnode_add_sort(pim->rp_list, rp_info);
621
622 if (!rp_info->plist) {
623 rn = route_node_get(pim->rp_table, &rp_info->group);
624 rn->info = rp_info;
625 }
626
627 if (PIM_DEBUG_PIM_TRACE)
628 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn,
629 rp_info, &rp_info->group,
630 rn ? route_node_get_lock_count(rn) : 0);
631
632 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
633 if (pim_addr_is_any(up->sg.src)) {
634 struct prefix grp;
635 struct rp_info *trp_info;
636
637 pim_addr_to_prefix(&grp, up->sg.grp);
638 trp_info = pim_rp_find_match_group(pim, &grp);
639
640 if (trp_info == rp_info) {
641 pim_upstream_update(pim, up);
642 upstream_updated = true;
643 }
644 }
645 }
646
647 if (upstream_updated)
648 pim_zebra_update_all_interfaces(pim);
649
650 pim_rp_check_interfaces(pim, rp_info);
651 pim_rp_refresh_group_to_rp_mapping(pim);
652
653 /* Register addr with Zebra NHT */
654 nht_p = rp_info->rp.rpf_addr;
655 if (PIM_DEBUG_PIM_NHT_RP)
656 zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
657 __func__, &nht_p, &rp_info->group);
658 pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
659 if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
660 &rp_info->group, 1))
661 return PIM_RP_NO_PATH;
662
663 return PIM_SUCCESS;
664 }
665
666 void pim_rp_del_config(struct pim_instance *pim, pim_addr rp_addr,
667 const char *group_range, const char *plist)
668 {
669 struct prefix group;
670 int result;
671
672 if (group_range == NULL)
673 result = pim_get_all_mcast_group(&group);
674 else
675 result = str2prefix(group_range, &group);
676
677 if (!result) {
678 if (PIM_DEBUG_PIM_TRACE)
679 zlog_debug(
680 "%s: String to prefix failed for %pPAs group",
681 __func__, &rp_addr);
682 return;
683 }
684
685 pim_rp_del(pim, rp_addr, group, plist, RP_SRC_STATIC);
686 }
687
688 int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
689 const char *plist, enum rp_source rp_src_flag)
690 {
691 struct prefix g_all;
692 struct rp_info *rp_info;
693 struct rp_info *rp_all;
694 pim_addr nht_p;
695 struct route_node *rn;
696 bool was_plist = false;
697 struct rp_info *trp_info;
698 struct pim_upstream *up;
699 struct bsgrp_node *bsgrp = NULL;
700 struct bsm_rpinfo *bsrp = NULL;
701 bool upstream_updated = false;
702
703 if (plist)
704 rp_info = pim_rp_find_prefix_list(pim, rp_addr, plist);
705 else
706 rp_info = pim_rp_find_exact(pim, rp_addr, &group);
707
708 if (!rp_info)
709 return PIM_RP_NOT_FOUND;
710
711 if (rp_info->plist) {
712 XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
713 was_plist = true;
714 }
715
716 if (PIM_DEBUG_PIM_TRACE)
717 zlog_debug("%s: Delete RP %pPA for the group %pFX", __func__,
718 &rp_addr, &group);
719
720 /* While static RP is getting deleted, we need to check if dynamic RP
721 * present for the same group in BSM RP table, then install the dynamic
722 * RP for the group node into the main rp table
723 */
724 if (rp_src_flag == RP_SRC_STATIC) {
725 bsgrp = pim_bsm_get_bsgrp_node(&pim->global_scope, &group);
726
727 if (bsgrp) {
728 bsrp = bsm_rpinfos_first(bsgrp->bsrp_list);
729 if (bsrp) {
730 if (PIM_DEBUG_PIM_TRACE)
731 zlog_debug(
732 "%s: BSM RP %pPA found for the group %pFX",
733 __func__, &bsrp->rp_address,
734 &group);
735 return pim_rp_change(pim, bsrp->rp_address,
736 group, RP_SRC_BSR);
737 }
738 } else {
739 if (PIM_DEBUG_PIM_TRACE)
740 zlog_debug(
741 "%s: BSM RP not found for the group %pFX",
742 __func__, &group);
743 }
744 }
745
746 /* Deregister addr with Zebra NHT */
747 nht_p = rp_info->rp.rpf_addr;
748 if (PIM_DEBUG_PIM_NHT_RP)
749 zlog_debug("%s: Deregister RP addr %pPA with Zebra ", __func__,
750 &nht_p);
751 pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
752
753 if (!pim_get_all_mcast_group(&g_all))
754 return PIM_RP_BAD_ADDRESS;
755
756 rp_all = pim_rp_find_match_group(pim, &g_all);
757
758 if (rp_all == rp_info) {
759 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
760 /* Find the upstream (*, G) whose upstream address is
761 * same as the deleted RP
762 */
763 pim_addr rpf_addr;
764
765 rpf_addr = rp_info->rp.rpf_addr;
766 if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
767 pim_addr_is_any(up->sg.src)) {
768 struct prefix grp;
769
770 pim_addr_to_prefix(&grp, up->sg.grp);
771 trp_info = pim_rp_find_match_group(pim, &grp);
772 if (trp_info == rp_all) {
773 pim_upstream_rpf_clear(pim, up);
774 up->upstream_addr = PIMADDR_ANY;
775 }
776 }
777 }
778 rp_all->rp.rpf_addr = PIMADDR_ANY;
779 rp_all->i_am_rp = 0;
780 return PIM_SUCCESS;
781 }
782
783 listnode_delete(pim->rp_list, rp_info);
784
785 if (!was_plist) {
786 rn = route_node_get(pim->rp_table, &rp_info->group);
787 if (rn) {
788 if (rn->info != rp_info)
789 flog_err(
790 EC_LIB_DEVELOPMENT,
791 "Expected rn->info to be equal to rp_info");
792
793 if (PIM_DEBUG_PIM_TRACE)
794 zlog_debug(
795 "%s:Found for Freeing: %p for rp_info: %p(%pFX) Lock: %d",
796 __func__, rn, rp_info, &rp_info->group,
797 route_node_get_lock_count(rn));
798
799 rn->info = NULL;
800 route_unlock_node(rn);
801 route_unlock_node(rn);
802 }
803 }
804
805 pim_rp_refresh_group_to_rp_mapping(pim);
806
807 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
808 /* Find the upstream (*, G) whose upstream address is same as
809 * the deleted RP
810 */
811 pim_addr rpf_addr;
812
813 rpf_addr = rp_info->rp.rpf_addr;
814 if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
815 pim_addr_is_any(up->sg.src)) {
816 struct prefix grp;
817
818 pim_addr_to_prefix(&grp, up->sg.grp);
819 trp_info = pim_rp_find_match_group(pim, &grp);
820
821 /* RP not found for the group grp */
822 if (pim_rpf_addr_is_inaddr_any(&trp_info->rp)) {
823 pim_upstream_rpf_clear(pim, up);
824 pim_rp_set_upstream_addr(
825 pim, &up->upstream_addr, up->sg.src,
826 up->sg.grp);
827 }
828
829 /* RP found for the group grp */
830 else {
831 pim_upstream_update(pim, up);
832 upstream_updated = true;
833 }
834 }
835 }
836
837 if (upstream_updated)
838 pim_zebra_update_all_interfaces(pim);
839
840 XFREE(MTYPE_PIM_RP, rp_info);
841 return PIM_SUCCESS;
842 }
843
844 int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
845 struct prefix group, enum rp_source rp_src_flag)
846 {
847 pim_addr nht_p;
848 struct route_node *rn;
849 int result = 0;
850 struct rp_info *rp_info = NULL;
851 struct pim_upstream *up;
852 bool upstream_updated = false;
853 pim_addr old_rp_addr;
854
855 rn = route_node_lookup(pim->rp_table, &group);
856 if (!rn) {
857 result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
858 return result;
859 }
860
861 rp_info = rn->info;
862
863 if (!rp_info) {
864 route_unlock_node(rn);
865 result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
866 return result;
867 }
868
869 old_rp_addr = rp_info->rp.rpf_addr;
870 if (!pim_addr_cmp(new_rp_addr, old_rp_addr)) {
871 if (rp_info->rp_src != rp_src_flag) {
872 rp_info->rp_src = rp_src_flag;
873 route_unlock_node(rn);
874 return PIM_SUCCESS;
875 }
876 }
877
878 /* Deregister old RP addr with Zebra NHT */
879
880 if (!pim_addr_is_any(old_rp_addr)) {
881 nht_p = rp_info->rp.rpf_addr;
882 if (PIM_DEBUG_PIM_NHT_RP)
883 zlog_debug("%s: Deregister RP addr %pPA with Zebra ",
884 __func__, &nht_p);
885 pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
886 }
887
888 pim_rp_nexthop_del(rp_info);
889 listnode_delete(pim->rp_list, rp_info);
890 /* Update the new RP address*/
891
892 rp_info->rp.rpf_addr = new_rp_addr;
893 rp_info->rp_src = rp_src_flag;
894 rp_info->i_am_rp = 0;
895
896 listnode_add_sort(pim->rp_list, rp_info);
897
898 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
899 if (pim_addr_is_any(up->sg.src)) {
900 struct prefix grp;
901 struct rp_info *trp_info;
902
903 pim_addr_to_prefix(&grp, up->sg.grp);
904 trp_info = pim_rp_find_match_group(pim, &grp);
905
906 if (trp_info == rp_info) {
907 pim_upstream_update(pim, up);
908 upstream_updated = true;
909 }
910 }
911 }
912
913 if (upstream_updated)
914 pim_zebra_update_all_interfaces(pim);
915
916 /* Register new RP addr with Zebra NHT */
917 nht_p = rp_info->rp.rpf_addr;
918 if (PIM_DEBUG_PIM_NHT_RP)
919 zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
920 __func__, &nht_p, &rp_info->group);
921
922 pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
923 if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
924 &rp_info->group, 1)) {
925 route_unlock_node(rn);
926 return PIM_RP_NO_PATH;
927 }
928
929 pim_rp_check_interfaces(pim, rp_info);
930
931 route_unlock_node(rn);
932
933 pim_rp_refresh_group_to_rp_mapping(pim);
934
935 return result;
936 }
937
938 void pim_rp_setup(struct pim_instance *pim)
939 {
940 struct listnode *node;
941 struct rp_info *rp_info;
942 pim_addr nht_p;
943
944 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
945 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
946 continue;
947
948 nht_p = rp_info->rp.rpf_addr;
949
950 pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
951 if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
952 nht_p, &rp_info->group, 1))
953 if (PIM_DEBUG_PIM_NHT_RP)
954 zlog_debug(
955 "Unable to lookup nexthop for rp specified");
956 }
957 }
958
959 /*
960 * Checks to see if we should elect ourself the actual RP when new if
961 * addresses are added against an interface.
962 */
963 void pim_rp_check_on_if_add(struct pim_interface *pim_ifp)
964 {
965 struct listnode *node;
966 struct rp_info *rp_info;
967 bool i_am_rp_changed = false;
968 struct pim_instance *pim = pim_ifp->pim;
969
970 if (pim->rp_list == NULL)
971 return;
972
973 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
974 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
975 continue;
976
977 /* if i_am_rp is already set nothing to be done (adding new
978 * addresses
979 * is not going to make a difference). */
980 if (rp_info->i_am_rp) {
981 continue;
982 }
983
984 if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
985 i_am_rp_changed = true;
986 rp_info->i_am_rp = 1;
987 if (PIM_DEBUG_PIM_NHT_RP)
988 zlog_debug("%s: %pPA: i am rp", __func__,
989 &rp_info->rp.rpf_addr);
990 }
991 }
992
993 if (i_am_rp_changed) {
994 pim_msdp_i_am_rp_changed(pim);
995 pim_upstream_reeval_use_rpt(pim);
996 }
997 }
998
999 /* up-optimized re-evaluation of "i_am_rp". this is used when ifaddresses
1000 * are removed. Removing numbers is an uncommon event in an active network
1001 * so I have made no attempt to optimize it. */
1002 void pim_i_am_rp_re_evaluate(struct pim_instance *pim)
1003 {
1004 struct listnode *node;
1005 struct rp_info *rp_info;
1006 bool i_am_rp_changed = false;
1007 int old_i_am_rp;
1008
1009 if (pim->rp_list == NULL)
1010 return;
1011
1012 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1013 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1014 continue;
1015
1016 old_i_am_rp = rp_info->i_am_rp;
1017 pim_rp_check_interfaces(pim, rp_info);
1018
1019 if (old_i_am_rp != rp_info->i_am_rp) {
1020 i_am_rp_changed = true;
1021 if (PIM_DEBUG_PIM_NHT_RP) {
1022 if (rp_info->i_am_rp)
1023 zlog_debug("%s: %pPA: i am rp",
1024 __func__,
1025 &rp_info->rp.rpf_addr);
1026 else
1027 zlog_debug(
1028 "%s: %pPA: i am no longer rp",
1029 __func__,
1030 &rp_info->rp.rpf_addr);
1031 }
1032 }
1033 }
1034
1035 if (i_am_rp_changed) {
1036 pim_msdp_i_am_rp_changed(pim);
1037 pim_upstream_reeval_use_rpt(pim);
1038 }
1039 }
1040
1041 /*
1042 * I_am_RP(G) is true if the group-to-RP mapping indicates that
1043 * this router is the RP for the group.
1044 *
1045 * Since we only have static RP, all groups are part of this RP
1046 */
1047 int pim_rp_i_am_rp(struct pim_instance *pim, pim_addr group)
1048 {
1049 struct prefix g;
1050 struct rp_info *rp_info;
1051
1052 memset(&g, 0, sizeof(g));
1053 pim_addr_to_prefix(&g, group);
1054 rp_info = pim_rp_find_match_group(pim, &g);
1055
1056 if (rp_info)
1057 return rp_info->i_am_rp;
1058 return 0;
1059 }
1060
1061 /*
1062 * RP(G)
1063 *
1064 * Return the RP that the Group belongs too.
1065 */
1066 struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group)
1067 {
1068 struct prefix g;
1069 struct rp_info *rp_info;
1070
1071 memset(&g, 0, sizeof(g));
1072 pim_addr_to_prefix(&g, group);
1073
1074 rp_info = pim_rp_find_match_group(pim, &g);
1075
1076 if (rp_info) {
1077 pim_addr nht_p;
1078
1079 /* Register addr with Zebra NHT */
1080 nht_p = rp_info->rp.rpf_addr;
1081 if (PIM_DEBUG_PIM_NHT_RP)
1082 zlog_debug(
1083 "%s: NHT Register RP addr %pPA grp %pFX with Zebra",
1084 __func__, &nht_p, &rp_info->group);
1085 pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
1086 pim_rpf_set_refresh_time(pim);
1087 (void)pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
1088 nht_p, &rp_info->group, 1);
1089 return (&rp_info->rp);
1090 }
1091
1092 // About to Go Down
1093 return NULL;
1094 }
1095
1096 /*
1097 * Set the upstream IP address we want to talk to based upon
1098 * the rp configured and the source address
1099 *
1100 * If we have don't have a RP configured and the source address is *
1101 * then set the upstream addr as INADDR_ANY and return failure.
1102 *
1103 */
1104 int pim_rp_set_upstream_addr(struct pim_instance *pim, pim_addr *up,
1105 pim_addr source, pim_addr group)
1106 {
1107 struct rp_info *rp_info;
1108 struct prefix g;
1109
1110 memset(&g, 0, sizeof(g));
1111
1112 pim_addr_to_prefix(&g, group);
1113
1114 rp_info = pim_rp_find_match_group(pim, &g);
1115
1116 if (!rp_info || ((pim_rpf_addr_is_inaddr_any(&rp_info->rp)) &&
1117 (pim_addr_is_any(source)))) {
1118 if (PIM_DEBUG_PIM_NHT_RP)
1119 zlog_debug("%s: Received a (*,G) with no RP configured",
1120 __func__);
1121 *up = PIMADDR_ANY;
1122 return 0;
1123 }
1124
1125 if (pim_addr_is_any(source))
1126 *up = rp_info->rp.rpf_addr;
1127 else
1128 *up = source;
1129
1130 return 1;
1131 }
1132
1133 int pim_rp_config_write(struct pim_instance *pim, struct vty *vty,
1134 const char *spaces)
1135 {
1136 struct listnode *node;
1137 struct rp_info *rp_info;
1138 int count = 0;
1139 pim_addr rp_addr;
1140
1141 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1142 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1143 continue;
1144
1145 if (rp_info->rp_src == RP_SRC_BSR)
1146 continue;
1147
1148 rp_addr = rp_info->rp.rpf_addr;
1149 if (rp_info->plist)
1150 vty_out(vty,
1151 "%s" PIM_AF_NAME
1152 " pim rp %pPA prefix-list %s\n",
1153 spaces, &rp_addr, rp_info->plist);
1154 else
1155 vty_out(vty, "%s" PIM_AF_NAME " pim rp %pPA %pFX\n",
1156 spaces, &rp_addr, &rp_info->group);
1157 count++;
1158 }
1159
1160 return count;
1161 }
1162
1163 void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
1164 struct vty *vty, json_object *json)
1165 {
1166 struct rp_info *rp_info;
1167 struct rp_info *prev_rp_info = NULL;
1168 struct listnode *node;
1169 char source[7];
1170
1171 json_object *json_rp_rows = NULL;
1172 json_object *json_row = NULL;
1173
1174 if (!json)
1175 vty_out(vty,
1176 "RP address group/prefix-list OIF I am RP Source Group-Type\n");
1177 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1178 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1179 continue;
1180
1181 #if PIM_IPV == 4
1182 pim_addr group = rp_info->group.u.prefix4;
1183 #else
1184 pim_addr group = rp_info->group.u.prefix6;
1185 #endif
1186 const char *group_type =
1187 pim_is_grp_ssm(pim, group) ? "SSM" : "ASM";
1188
1189 if (range && !prefix_match(&rp_info->group, range))
1190 continue;
1191
1192 if (rp_info->rp_src == RP_SRC_STATIC)
1193 strlcpy(source, "Static", sizeof(source));
1194 else if (rp_info->rp_src == RP_SRC_BSR)
1195 strlcpy(source, "BSR", sizeof(source));
1196 else
1197 strlcpy(source, "None", sizeof(source));
1198 if (json) {
1199 /*
1200 * If we have moved on to a new RP then add the
1201 * entry for the previous RP
1202 */
1203 if (prev_rp_info &&
1204 (pim_addr_cmp(prev_rp_info->rp.rpf_addr,
1205 rp_info->rp.rpf_addr))) {
1206 json_object_object_addf(
1207 json, json_rp_rows, "%pPA",
1208 &prev_rp_info->rp.rpf_addr);
1209 json_rp_rows = NULL;
1210 }
1211
1212 if (!json_rp_rows)
1213 json_rp_rows = json_object_new_array();
1214
1215 json_row = json_object_new_object();
1216 json_object_string_addf(json_row, "rpAddress", "%pPA",
1217 &rp_info->rp.rpf_addr);
1218 if (rp_info->rp.source_nexthop.interface)
1219 json_object_string_add(
1220 json_row, "outboundInterface",
1221 rp_info->rp.source_nexthop
1222 .interface->name);
1223 else
1224 json_object_string_add(json_row,
1225 "outboundInterface",
1226 "Unknown");
1227 if (rp_info->i_am_rp)
1228 json_object_boolean_true_add(json_row, "iAmRP");
1229 else
1230 json_object_boolean_false_add(json_row,
1231 "iAmRP");
1232
1233 if (rp_info->plist)
1234 json_object_string_add(json_row, "prefixList",
1235 rp_info->plist);
1236 else
1237 json_object_string_addf(json_row, "group",
1238 "%pFX",
1239 &rp_info->group);
1240 json_object_string_add(json_row, "source", source);
1241 json_object_string_add(json_row, "groupType",
1242 group_type);
1243
1244 json_object_array_add(json_rp_rows, json_row);
1245 } else {
1246 vty_out(vty, "%-15pPA ", &rp_info->rp.rpf_addr);
1247
1248 if (rp_info->plist)
1249 vty_out(vty, "%-18s ", rp_info->plist);
1250 else
1251 vty_out(vty, "%-18pFX ", &rp_info->group);
1252
1253 if (rp_info->rp.source_nexthop.interface)
1254 vty_out(vty, "%-16s ",
1255 rp_info->rp.source_nexthop
1256 .interface->name);
1257 else
1258 vty_out(vty, "%-16s ", "(Unknown)");
1259
1260 if (rp_info->i_am_rp)
1261 vty_out(vty, "yes");
1262 else
1263 vty_out(vty, "no");
1264
1265 vty_out(vty, "%14s", source);
1266 vty_out(vty, "%6s\n", group_type);
1267 }
1268 prev_rp_info = rp_info;
1269 }
1270
1271 if (json) {
1272 if (prev_rp_info && json_rp_rows)
1273 json_object_object_addf(json, json_rp_rows, "%pPA",
1274 &prev_rp_info->rp.rpf_addr);
1275 }
1276 }
1277
1278 void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
1279 {
1280 struct listnode *node = NULL;
1281 struct rp_info *rp_info = NULL;
1282 struct nexthop *nh_node = NULL;
1283 pim_addr nht_p;
1284 struct pim_nexthop_cache pnc;
1285
1286 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1287 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1288 continue;
1289
1290 nht_p = rp_info->rp.rpf_addr;
1291 memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
1292 if (!pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, &pnc))
1293 continue;
1294
1295 for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) {
1296 #if PIM_IPV == 4
1297 if (!pim_addr_is_any(nh_node->gate.ipv4))
1298 continue;
1299 #else
1300 if (!pim_addr_is_any(nh_node->gate.ipv6))
1301 continue;
1302 #endif
1303
1304 struct interface *ifp1 = if_lookup_by_index(
1305 nh_node->ifindex, pim->vrf->vrf_id);
1306
1307 if (nbr->interface != ifp1)
1308 continue;
1309
1310 #if PIM_IPV == 4
1311 nh_node->gate.ipv4 = nbr->source_addr;
1312 #else
1313 nh_node->gate.ipv6 = nbr->source_addr;
1314 #endif
1315 if (PIM_DEBUG_PIM_NHT_RP)
1316 zlog_debug(
1317 "%s: addr %pPA new nexthop addr %pPAs interface %s",
1318 __func__, &nht_p, &nbr->source_addr,
1319 ifp1->name);
1320 }
1321 }
1322 }