]> git.proxmox.com Git - mirror_frr.git/blob - pimd/pim_rp.c
Merge pull request #10629 from leonshaw/fix/mp-evpn-nh
[mirror_frr.git] / pimd / pim_rp.c
1 /*
2 * PIM for Quagga
3 * Copyright (C) 2015 Cumulus Networks, Inc.
4 * Donald Sharp
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20 #include <zebra.h>
21
22 #include "lib/json.h"
23 #include "log.h"
24 #include "network.h"
25 #include "if.h"
26 #include "linklist.h"
27 #include "prefix.h"
28 #include "memory.h"
29 #include "vty.h"
30 #include "vrf.h"
31 #include "plist.h"
32 #include "nexthop.h"
33 #include "table.h"
34 #include "lib_errors.h"
35
36 #include "pimd.h"
37 #include "pim_instance.h"
38 #include "pim_vty.h"
39 #include "pim_str.h"
40 #include "pim_iface.h"
41 #include "pim_rp.h"
42 #include "pim_rpf.h"
43 #include "pim_sock.h"
44 #include "pim_memory.h"
45 #include "pim_neighbor.h"
46 #include "pim_msdp.h"
47 #include "pim_nht.h"
48 #include "pim_mroute.h"
49 #include "pim_oil.h"
50 #include "pim_zebra.h"
51 #include "pim_bsm.h"
52 #include "pim_util.h"
53 #include "pim_ssm.h"
54
55 /* Cleanup pim->rpf_hash each node data */
56 void pim_rp_list_hash_clean(void *data)
57 {
58 struct pim_nexthop_cache *pnc = (struct pim_nexthop_cache *)data;
59
60 list_delete(&pnc->rp_list);
61
62 hash_clean(pnc->upstream_hash, NULL);
63 hash_free(pnc->upstream_hash);
64 pnc->upstream_hash = NULL;
65 if (pnc->nexthop)
66 nexthops_free(pnc->nexthop);
67
68 XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc);
69 }
70
71 static void pim_rp_info_free(struct rp_info *rp_info)
72 {
73 XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
74
75 XFREE(MTYPE_PIM_RP, rp_info);
76 }
77
78 int pim_rp_list_cmp(void *v1, void *v2)
79 {
80 struct rp_info *rp1 = (struct rp_info *)v1;
81 struct rp_info *rp2 = (struct rp_info *)v2;
82 int ret;
83
84 /*
85 * Sort by RP IP address
86 */
87 ret = prefix_cmp(&rp1->rp.rpf_addr, &rp2->rp.rpf_addr);
88 if (ret)
89 return ret;
90
91 /*
92 * Sort by group IP address
93 */
94 ret = prefix_cmp(&rp1->group, &rp2->group);
95 if (ret)
96 return ret;
97
98 return 0;
99 }
100
101 void pim_rp_init(struct pim_instance *pim)
102 {
103 struct rp_info *rp_info;
104 struct route_node *rn;
105
106 pim->rp_list = list_new();
107 pim->rp_list->del = (void (*)(void *))pim_rp_info_free;
108 pim->rp_list->cmp = pim_rp_list_cmp;
109
110 pim->rp_table = route_table_init();
111
112 rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
113
114 if (!pim_get_all_mcast_group(&rp_info->group)) {
115 flog_err(EC_LIB_DEVELOPMENT,
116 "Unable to convert all-multicast prefix");
117 list_delete(&pim->rp_list);
118 route_table_finish(pim->rp_table);
119 XFREE(MTYPE_PIM_RP, rp_info);
120 return;
121 }
122 pim_addr_to_prefix(&rp_info->rp.rpf_addr, PIMADDR_ANY);
123
124 listnode_add(pim->rp_list, rp_info);
125
126 rn = route_node_get(pim->rp_table, &rp_info->group);
127 rn->info = rp_info;
128 if (PIM_DEBUG_PIM_TRACE)
129 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn,
130 rp_info, &rp_info->group,
131 route_node_get_lock_count(rn));
132 }
133
134 void pim_rp_free(struct pim_instance *pim)
135 {
136 if (pim->rp_table)
137 route_table_finish(pim->rp_table);
138 pim->rp_table = NULL;
139
140 if (pim->rp_list)
141 list_delete(&pim->rp_list);
142 }
143
144 /*
145 * Given an RP's prefix-list, return the RP's rp_info for that prefix-list
146 */
147 static struct rp_info *pim_rp_find_prefix_list(struct pim_instance *pim,
148 pim_addr rp, const char *plist)
149 {
150 struct listnode *node;
151 struct rp_info *rp_info;
152 struct prefix rp_prefix;
153
154 pim_addr_to_prefix(&rp_prefix, rp);
155
156 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
157 if (prefix_same(&rp_prefix, &rp_info->rp.rpf_addr) &&
158 rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
159 return rp_info;
160 }
161 }
162
163 return NULL;
164 }
165
166 /*
167 * Return true if plist is used by any rp_info
168 */
169 static int pim_rp_prefix_list_used(struct pim_instance *pim, const char *plist)
170 {
171 struct listnode *node;
172 struct rp_info *rp_info;
173
174 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
175 if (rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
176 return 1;
177 }
178 }
179
180 return 0;
181 }
182
183 /*
184 * Given an RP's address, return the RP's rp_info that is an exact match for
185 * 'group'
186 */
187 static struct rp_info *pim_rp_find_exact(struct pim_instance *pim, pim_addr rp,
188 const struct prefix *group)
189 {
190 struct listnode *node;
191 struct rp_info *rp_info;
192 struct prefix rp_prefix;
193
194 pim_addr_to_prefix(&rp_prefix, rp);
195 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
196 if (prefix_same(&rp_prefix, &rp_info->rp.rpf_addr) &&
197 prefix_same(&rp_info->group, group))
198 return rp_info;
199 }
200
201 return NULL;
202 }
203
204 /*
205 * XXX: long-term issue: we don't actually have a good "ip address-list"
206 * implementation. ("access-list XYZ" is the closest but honestly it's
207 * kinda garbage.)
208 *
209 * So it's using a prefix-list to match an address here, which causes very
210 * unexpected results for the user since prefix-lists by default only match
211 * when the prefix length is an exact match too. i.e. you'd have to add the
212 * "le 32" and do "ip prefix-list foo permit 10.0.0.0/24 le 32"
213 *
214 * To avoid this pitfall, this code uses "address_mode = true" for the prefix
215 * list match (this is the only user for that.)
216 *
217 * In the long run, we need to add a "ip address-list", but that's a wholly
218 * separate bag of worms, and existing configs using ip prefix-list would
219 * drop into the UX pitfall.
220 */
221
222 #include "lib/plist_int.h"
223
224 /*
225 * Given a group, return the rp_info for that group
226 */
227 struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
228 const struct prefix *group)
229 {
230 struct listnode *node;
231 struct rp_info *best = NULL;
232 struct rp_info *rp_info;
233 struct prefix_list *plist;
234 const struct prefix *bp;
235 const struct prefix_list_entry *entry;
236 struct route_node *rn;
237
238 bp = NULL;
239 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
240 if (rp_info->plist) {
241 plist = prefix_list_lookup(PIM_AFI, rp_info->plist);
242
243 if (prefix_list_apply_ext(plist, &entry, group, true)
244 == PREFIX_DENY || !entry)
245 continue;
246
247 if (!best) {
248 best = rp_info;
249 bp = &entry->prefix;
250 continue;
251 }
252
253 if (bp && bp->prefixlen < entry->prefix.prefixlen) {
254 best = rp_info;
255 bp = &entry->prefix;
256 }
257 }
258 }
259
260 rn = route_node_match(pim->rp_table, group);
261 if (!rn) {
262 flog_err(
263 EC_LIB_DEVELOPMENT,
264 "%s: BUG We should have found default group information",
265 __func__);
266 return best;
267 }
268
269 rp_info = rn->info;
270 if (PIM_DEBUG_PIM_TRACE) {
271 if (best)
272 zlog_debug(
273 "Lookedup(%pFX): prefix_list match %s, rn %p found: %pFX",
274 group, best->plist, rn, &rp_info->group);
275 else
276 zlog_debug("Lookedup(%pFX): rn %p found:%pFX", group,
277 rn, &rp_info->group);
278 }
279
280 route_unlock_node(rn);
281
282 /*
283 * rp's with prefix lists have the group as 224.0.0.0/4 which will
284 * match anything. So if we have a rp_info that should match a prefix
285 * list then if we do match then best should be the answer( even
286 * if it is NULL )
287 */
288 if (!rp_info || (rp_info && rp_info->plist))
289 return best;
290
291 /*
292 * So we have a non plist rp_info found in the lookup and no plists
293 * at all to be choosen, return it!
294 */
295 if (!best)
296 return rp_info;
297
298 /*
299 * If we have a matching non prefix list and a matching prefix
300 * list we should return the actual rp_info that has the LPM
301 * If they are equal, use the prefix-list( but let's hope
302 * the end-operator doesn't do this )
303 */
304 if (rp_info->group.prefixlen > bp->prefixlen)
305 best = rp_info;
306
307 return best;
308 }
309
310 /*
311 * When the user makes "ip pim rp" configuration changes or if they change the
312 * prefix-list(s) used by these statements we must tickle the upstream state
313 * for each group to make them re-lookup who their RP should be.
314 *
315 * This is a placeholder function for now.
316 */
317 void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim)
318 {
319 pim_msdp_i_am_rp_changed(pim);
320 pim_upstream_reeval_use_rpt(pim);
321 }
322
323 void pim_rp_prefix_list_update(struct pim_instance *pim,
324 struct prefix_list *plist)
325 {
326 struct listnode *node;
327 struct rp_info *rp_info;
328 int refresh_needed = 0;
329
330 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
331 if (rp_info->plist
332 && strcmp(rp_info->plist, prefix_list_name(plist)) == 0) {
333 refresh_needed = 1;
334 break;
335 }
336 }
337
338 if (refresh_needed)
339 pim_rp_refresh_group_to_rp_mapping(pim);
340 }
341
342 static int pim_rp_check_interface_addrs(struct rp_info *rp_info,
343 struct pim_interface *pim_ifp)
344 {
345 struct listnode *node;
346 struct pim_secondary_addr *sec_addr;
347 pim_addr rpf_addr;
348
349 rpf_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
350
351 if (!pim_addr_cmp(pim_ifp->primary_address, rpf_addr))
352 return 1;
353
354 if (!pim_ifp->sec_addr_list) {
355 return 0;
356 }
357
358 for (ALL_LIST_ELEMENTS_RO(pim_ifp->sec_addr_list, node, sec_addr)) {
359 if (prefix_same(&sec_addr->addr, &rp_info->rp.rpf_addr)) {
360 return 1;
361 }
362 }
363
364 return 0;
365 }
366
367 static void pim_rp_check_interfaces(struct pim_instance *pim,
368 struct rp_info *rp_info)
369 {
370 struct interface *ifp;
371
372 rp_info->i_am_rp = 0;
373 FOR_ALL_INTERFACES (pim->vrf, ifp) {
374 struct pim_interface *pim_ifp = ifp->info;
375
376 if (!pim_ifp)
377 continue;
378
379 if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
380 rp_info->i_am_rp = 1;
381 }
382 }
383 }
384
385 void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
386 {
387 struct pim_rpf old_rpf;
388 enum pim_rpf_result rpf_result;
389 pim_addr old_upstream_addr;
390 pim_addr new_upstream_addr;
391 struct prefix nht_p;
392
393 old_upstream_addr = up->upstream_addr;
394 pim_rp_set_upstream_addr(pim, &new_upstream_addr, up->sg.src,
395 up->sg.grp);
396
397 if (PIM_DEBUG_PIM_TRACE)
398 zlog_debug("%s: pim upstream update for old upstream %pPA",
399 __func__, &old_upstream_addr);
400
401 if (!pim_addr_cmp(old_upstream_addr, new_upstream_addr))
402 return;
403
404 /* Lets consider a case, where a PIM upstream has a better RP as a
405 * result of a new RP configuration with more precise group range.
406 * This upstream has to be added to the upstream hash of new RP's
407 * NHT(pnc) and has to be removed from old RP's NHT upstream hash
408 */
409 if (!pim_addr_is_any(old_upstream_addr)) {
410 /* Deregister addr with Zebra NHT */
411 pim_addr_to_prefix(&nht_p, old_upstream_addr);
412 if (PIM_DEBUG_PIM_TRACE)
413 zlog_debug(
414 "%s: Deregister upstream %s addr %pFX with Zebra NHT",
415 __func__, up->sg_str, &nht_p);
416 pim_delete_tracked_nexthop(pim, &nht_p, up, NULL);
417 }
418
419 /* Update the upstream address */
420 up->upstream_addr = new_upstream_addr;
421
422 old_rpf.source_nexthop.interface = up->rpf.source_nexthop.interface;
423
424 rpf_result = pim_rpf_update(pim, up, &old_rpf, __func__);
425 if (rpf_result == PIM_RPF_FAILURE)
426 pim_mroute_del(up->channel_oil, __func__);
427
428 /* update kernel multicast forwarding cache (MFC) */
429 if (up->rpf.source_nexthop.interface && up->channel_oil)
430 pim_upstream_mroute_iif_update(up->channel_oil, __func__);
431
432 if (rpf_result == PIM_RPF_CHANGED ||
433 (rpf_result == PIM_RPF_FAILURE &&
434 old_rpf.source_nexthop.interface))
435 pim_zebra_upstream_rpf_changed(pim, up, &old_rpf);
436
437 }
438
439 int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
440 const char *plist, enum rp_source rp_src_flag)
441 {
442 int result = 0;
443 struct rp_info *rp_info;
444 struct rp_info *rp_all;
445 struct prefix group_all;
446 struct listnode *node, *nnode;
447 struct rp_info *tmp_rp_info;
448 char buffer[BUFSIZ];
449 struct prefix nht_p;
450 struct route_node *rn = NULL;
451 struct pim_upstream *up;
452 bool upstream_updated = false;
453
454 if (pim_addr_is_any(rp_addr))
455 return PIM_RP_BAD_ADDRESS;
456
457 rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
458
459 pim_addr_to_prefix(&rp_info->rp.rpf_addr, rp_addr);
460 prefix_copy(&rp_info->group, &group);
461 rp_info->rp_src = rp_src_flag;
462
463 if (plist) {
464 /*
465 * Return if the prefix-list is already configured for this RP
466 */
467 if (pim_rp_find_prefix_list(pim, rp_addr, plist)) {
468 XFREE(MTYPE_PIM_RP, rp_info);
469 return PIM_SUCCESS;
470 }
471
472 /*
473 * Barf if the prefix-list is already configured for an RP
474 */
475 if (pim_rp_prefix_list_used(pim, plist)) {
476 XFREE(MTYPE_PIM_RP, rp_info);
477 return PIM_RP_PFXLIST_IN_USE;
478 }
479
480 /*
481 * Free any existing rp_info entries for this RP
482 */
483 for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
484 tmp_rp_info)) {
485 if (prefix_same(&rp_info->rp.rpf_addr,
486 &tmp_rp_info->rp.rpf_addr)) {
487 if (tmp_rp_info->plist)
488 pim_rp_del_config(pim, rp_addr, NULL,
489 tmp_rp_info->plist);
490 else
491 pim_rp_del_config(
492 pim, rp_addr,
493 prefix2str(&tmp_rp_info->group,
494 buffer, BUFSIZ),
495 NULL);
496 }
497 }
498
499 rp_info->plist = XSTRDUP(MTYPE_PIM_FILTER_NAME, plist);
500 } else {
501
502 if (!pim_get_all_mcast_group(&group_all)) {
503 XFREE(MTYPE_PIM_RP, rp_info);
504 return PIM_GROUP_BAD_ADDRESS;
505 }
506 rp_all = pim_rp_find_match_group(pim, &group_all);
507
508 /*
509 * Barf if group is a non-multicast subnet
510 */
511 if (!prefix_match(&rp_all->group, &rp_info->group)) {
512 XFREE(MTYPE_PIM_RP, rp_info);
513 return PIM_GROUP_BAD_ADDRESS;
514 }
515
516 /*
517 * Remove any prefix-list rp_info entries for this RP
518 */
519 for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
520 tmp_rp_info)) {
521 if (tmp_rp_info->plist &&
522 prefix_same(&rp_info->rp.rpf_addr,
523 &tmp_rp_info->rp.rpf_addr)) {
524 pim_rp_del_config(pim, rp_addr, NULL,
525 tmp_rp_info->plist);
526 }
527 }
528
529 /*
530 * Take over the 224.0.0.0/4 group if the rp is INADDR_ANY
531 */
532 if (prefix_same(&rp_all->group, &rp_info->group) &&
533 pim_rpf_addr_is_inaddr_any(&rp_all->rp)) {
534 rp_all->rp.rpf_addr = rp_info->rp.rpf_addr;
535 rp_all->rp_src = rp_src_flag;
536 XFREE(MTYPE_PIM_RP, rp_info);
537
538 /* Register addr with Zebra NHT */
539 nht_p = rp_all->rp.rpf_addr;
540 if (PIM_DEBUG_PIM_NHT_RP)
541 zlog_debug(
542 "%s: NHT Register rp_all addr %pFX grp %pFX ",
543 __func__, &nht_p, &rp_all->group);
544
545 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
546 /* Find (*, G) upstream whose RP is not
547 * configured yet
548 */
549 if (pim_addr_is_any(up->upstream_addr) &&
550 pim_addr_is_any(up->sg.src)) {
551 struct prefix grp;
552 struct rp_info *trp_info;
553
554 pim_addr_to_prefix(&grp, up->sg.grp);
555 trp_info = pim_rp_find_match_group(
556 pim, &grp);
557 if (trp_info == rp_all) {
558 pim_upstream_update(pim, up);
559 upstream_updated = true;
560 }
561 }
562 }
563 if (upstream_updated)
564 pim_zebra_update_all_interfaces(pim);
565
566 pim_rp_check_interfaces(pim, rp_all);
567 pim_rp_refresh_group_to_rp_mapping(pim);
568 pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_all,
569 NULL);
570
571 if (!pim_ecmp_nexthop_lookup(pim,
572 &rp_all->rp.source_nexthop,
573 &nht_p, &rp_all->group, 1))
574 return PIM_RP_NO_PATH;
575 return PIM_SUCCESS;
576 }
577
578 /*
579 * Return if the group is already configured for this RP
580 */
581 tmp_rp_info = pim_rp_find_exact(pim, rp_addr, &rp_info->group);
582 if (tmp_rp_info) {
583 if ((tmp_rp_info->rp_src != rp_src_flag)
584 && (rp_src_flag == RP_SRC_STATIC))
585 tmp_rp_info->rp_src = rp_src_flag;
586 XFREE(MTYPE_PIM_RP, rp_info);
587 return result;
588 }
589
590 /*
591 * Barf if this group is already covered by some other RP
592 */
593 tmp_rp_info = pim_rp_find_match_group(pim, &rp_info->group);
594
595 if (tmp_rp_info) {
596 if (tmp_rp_info->plist) {
597 XFREE(MTYPE_PIM_RP, rp_info);
598 return PIM_GROUP_PFXLIST_OVERLAP;
599 } else {
600 /*
601 * If the only RP that covers this group is an
602 * RP configured for
603 * 224.0.0.0/4 that is fine, ignore that one.
604 * For all others
605 * though we must return PIM_GROUP_OVERLAP
606 */
607 if (prefix_same(&rp_info->group,
608 &tmp_rp_info->group)) {
609 if ((rp_src_flag == RP_SRC_STATIC)
610 && (tmp_rp_info->rp_src
611 == RP_SRC_STATIC)) {
612 XFREE(MTYPE_PIM_RP, rp_info);
613 return PIM_GROUP_OVERLAP;
614 }
615
616 result = pim_rp_change(
617 pim, rp_addr,
618 tmp_rp_info->group,
619 rp_src_flag);
620 XFREE(MTYPE_PIM_RP, rp_info);
621 return result;
622 }
623 }
624 }
625 }
626
627 listnode_add_sort(pim->rp_list, rp_info);
628
629 if (!rp_info->plist) {
630 rn = route_node_get(pim->rp_table, &rp_info->group);
631 rn->info = rp_info;
632 }
633
634 if (PIM_DEBUG_PIM_TRACE)
635 zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn,
636 rp_info, &rp_info->group,
637 rn ? route_node_get_lock_count(rn) : 0);
638
639 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
640 if (pim_addr_is_any(up->sg.src)) {
641 struct prefix grp;
642 struct rp_info *trp_info;
643
644 pim_addr_to_prefix(&grp, up->sg.grp);
645 trp_info = pim_rp_find_match_group(pim, &grp);
646
647 if (trp_info == rp_info) {
648 pim_upstream_update(pim, up);
649 upstream_updated = true;
650 }
651 }
652 }
653
654 if (upstream_updated)
655 pim_zebra_update_all_interfaces(pim);
656
657 pim_rp_check_interfaces(pim, rp_info);
658 pim_rp_refresh_group_to_rp_mapping(pim);
659
660 /* Register addr with Zebra NHT */
661 nht_p = rp_info->rp.rpf_addr;
662 if (PIM_DEBUG_PIM_NHT_RP)
663 zlog_debug("%s: NHT Register RP addr %pFX grp %pFX with Zebra ",
664 __func__, &nht_p, &rp_info->group);
665 pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
666 if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, &nht_p,
667 &rp_info->group, 1))
668 return PIM_RP_NO_PATH;
669
670 return PIM_SUCCESS;
671 }
672
673 void pim_rp_del_config(struct pim_instance *pim, pim_addr rp_addr,
674 const char *group_range, const char *plist)
675 {
676 struct prefix group;
677 int result;
678
679 if (group_range == NULL)
680 result = pim_get_all_mcast_group(&group);
681 else
682 result = str2prefix(group_range, &group);
683
684 if (!result) {
685 if (PIM_DEBUG_PIM_TRACE)
686 zlog_debug(
687 "%s: String to prefix failed for %pPAs group",
688 __func__, &rp_addr);
689 return;
690 }
691
692 pim_rp_del(pim, rp_addr, group, plist, RP_SRC_STATIC);
693 }
694
695 int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
696 const char *plist, enum rp_source rp_src_flag)
697 {
698 struct prefix g_all;
699 struct rp_info *rp_info;
700 struct rp_info *rp_all;
701 struct prefix nht_p;
702 struct route_node *rn;
703 bool was_plist = false;
704 struct rp_info *trp_info;
705 struct pim_upstream *up;
706 struct bsgrp_node *bsgrp = NULL;
707 struct bsm_rpinfo *bsrp = NULL;
708 bool upstream_updated = false;
709
710 if (plist)
711 rp_info = pim_rp_find_prefix_list(pim, rp_addr, plist);
712 else
713 rp_info = pim_rp_find_exact(pim, rp_addr, &group);
714
715 if (!rp_info)
716 return PIM_RP_NOT_FOUND;
717
718 if (rp_info->plist) {
719 XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
720 was_plist = true;
721 }
722
723 if (PIM_DEBUG_PIM_TRACE)
724 zlog_debug("%s: Delete RP %pPA for the group %pFX", __func__,
725 &rp_addr, &group);
726
727 /* While static RP is getting deleted, we need to check if dynamic RP
728 * present for the same group in BSM RP table, then install the dynamic
729 * RP for the group node into the main rp table
730 */
731 if (rp_src_flag == RP_SRC_STATIC) {
732 bsgrp = pim_bsm_get_bsgrp_node(&pim->global_scope, &group);
733
734 if (bsgrp) {
735 bsrp = bsm_rpinfos_first(bsgrp->bsrp_list);
736 if (bsrp) {
737 if (PIM_DEBUG_PIM_TRACE)
738 zlog_debug(
739 "%s: BSM RP %pPA found for the group %pFX",
740 __func__, &bsrp->rp_address,
741 &group);
742 return pim_rp_change(pim, bsrp->rp_address,
743 group, RP_SRC_BSR);
744 }
745 } else {
746 if (PIM_DEBUG_PIM_TRACE)
747 zlog_debug(
748 "%s: BSM RP not found for the group %pFX",
749 __func__, &group);
750 }
751 }
752
753 /* Deregister addr with Zebra NHT */
754 nht_p = rp_info->rp.rpf_addr;
755 if (PIM_DEBUG_PIM_NHT_RP)
756 zlog_debug("%s: Deregister RP addr %pFX with Zebra ", __func__,
757 &nht_p);
758 pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info);
759
760 if (!pim_get_all_mcast_group(&g_all))
761 return PIM_RP_BAD_ADDRESS;
762
763 rp_all = pim_rp_find_match_group(pim, &g_all);
764
765 if (rp_all == rp_info) {
766 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
767 /* Find the upstream (*, G) whose upstream address is
768 * same as the deleted RP
769 */
770 pim_addr rpf_addr;
771
772 rpf_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
773 if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
774 pim_addr_is_any(up->sg.src)) {
775 struct prefix grp;
776
777 pim_addr_to_prefix(&grp, up->sg.grp);
778 trp_info = pim_rp_find_match_group(pim, &grp);
779 if (trp_info == rp_all) {
780 pim_upstream_rpf_clear(pim, up);
781 up->upstream_addr = PIMADDR_ANY;
782 }
783 }
784 }
785 pim_addr_to_prefix(&rp_all->rp.rpf_addr, PIMADDR_ANY);
786 rp_all->i_am_rp = 0;
787 return PIM_SUCCESS;
788 }
789
790 listnode_delete(pim->rp_list, rp_info);
791
792 if (!was_plist) {
793 rn = route_node_get(pim->rp_table, &rp_info->group);
794 if (rn) {
795 if (rn->info != rp_info)
796 flog_err(
797 EC_LIB_DEVELOPMENT,
798 "Expected rn->info to be equal to rp_info");
799
800 if (PIM_DEBUG_PIM_TRACE)
801 zlog_debug(
802 "%s:Found for Freeing: %p for rp_info: %p(%pFX) Lock: %d",
803 __func__, rn, rp_info, &rp_info->group,
804 route_node_get_lock_count(rn));
805
806 rn->info = NULL;
807 route_unlock_node(rn);
808 route_unlock_node(rn);
809 }
810 }
811
812 pim_rp_refresh_group_to_rp_mapping(pim);
813
814 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
815 /* Find the upstream (*, G) whose upstream address is same as
816 * the deleted RP
817 */
818 pim_addr rpf_addr;
819
820 rpf_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
821 if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
822 pim_addr_is_any(up->sg.src)) {
823 struct prefix grp;
824
825 pim_addr_to_prefix(&grp, up->sg.grp);
826 trp_info = pim_rp_find_match_group(pim, &grp);
827
828 /* RP not found for the group grp */
829 if (pim_rpf_addr_is_inaddr_any(&trp_info->rp)) {
830 pim_upstream_rpf_clear(pim, up);
831 pim_rp_set_upstream_addr(
832 pim, &up->upstream_addr, up->sg.src,
833 up->sg.grp);
834 }
835
836 /* RP found for the group grp */
837 else {
838 pim_upstream_update(pim, up);
839 upstream_updated = true;
840 }
841 }
842 }
843
844 if (upstream_updated)
845 pim_zebra_update_all_interfaces(pim);
846
847 XFREE(MTYPE_PIM_RP, rp_info);
848 return PIM_SUCCESS;
849 }
850
851 int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
852 struct prefix group, enum rp_source rp_src_flag)
853 {
854 struct prefix nht_p;
855 struct route_node *rn;
856 int result = 0;
857 struct rp_info *rp_info = NULL;
858 struct pim_upstream *up;
859 bool upstream_updated = false;
860 pim_addr old_rp_addr;
861
862 rn = route_node_lookup(pim->rp_table, &group);
863 if (!rn) {
864 result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
865 return result;
866 }
867
868 rp_info = rn->info;
869
870 if (!rp_info) {
871 route_unlock_node(rn);
872 result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
873 return result;
874 }
875
876 old_rp_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
877 if (!pim_addr_cmp(new_rp_addr, old_rp_addr)) {
878 if (rp_info->rp_src != rp_src_flag) {
879 rp_info->rp_src = rp_src_flag;
880 route_unlock_node(rn);
881 return PIM_SUCCESS;
882 }
883 }
884
885 nht_p.family = PIM_AF;
886 nht_p.prefixlen = PIM_MAX_BITLEN;
887
888 /* Deregister old RP addr with Zebra NHT */
889
890 if (!pim_addr_is_any(old_rp_addr)) {
891 nht_p = rp_info->rp.rpf_addr;
892 if (PIM_DEBUG_PIM_NHT_RP)
893 zlog_debug("%s: Deregister RP addr %pFX with Zebra ",
894 __func__, &nht_p);
895 pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info);
896 }
897
898 pim_rp_nexthop_del(rp_info);
899 listnode_delete(pim->rp_list, rp_info);
900 /* Update the new RP address*/
901
902 pim_addr_to_prefix(&rp_info->rp.rpf_addr, new_rp_addr);
903 rp_info->rp_src = rp_src_flag;
904 rp_info->i_am_rp = 0;
905
906 listnode_add_sort(pim->rp_list, rp_info);
907
908 frr_each (rb_pim_upstream, &pim->upstream_head, up) {
909 if (pim_addr_is_any(up->sg.src)) {
910 struct prefix grp;
911 struct rp_info *trp_info;
912
913 pim_addr_to_prefix(&grp, up->sg.grp);
914 trp_info = pim_rp_find_match_group(pim, &grp);
915
916 if (trp_info == rp_info) {
917 pim_upstream_update(pim, up);
918 upstream_updated = true;
919 }
920 }
921 }
922
923 if (upstream_updated)
924 pim_zebra_update_all_interfaces(pim);
925
926 /* Register new RP addr with Zebra NHT */
927 nht_p = rp_info->rp.rpf_addr;
928 if (PIM_DEBUG_PIM_NHT_RP)
929 zlog_debug("%s: NHT Register RP addr %pFX grp %pFX with Zebra ",
930 __func__, &nht_p, &rp_info->group);
931
932 pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
933 if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, &nht_p,
934 &rp_info->group, 1)) {
935 route_unlock_node(rn);
936 return PIM_RP_NO_PATH;
937 }
938
939 pim_rp_check_interfaces(pim, rp_info);
940
941 route_unlock_node(rn);
942
943 pim_rp_refresh_group_to_rp_mapping(pim);
944
945 return result;
946 }
947
948 void pim_rp_setup(struct pim_instance *pim)
949 {
950 struct listnode *node;
951 struct rp_info *rp_info;
952 struct prefix nht_p;
953
954 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
955 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
956 continue;
957
958 nht_p = rp_info->rp.rpf_addr;
959
960 pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
961 if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
962 &nht_p, &rp_info->group, 1))
963 if (PIM_DEBUG_PIM_NHT_RP)
964 zlog_debug(
965 "Unable to lookup nexthop for rp specified");
966 }
967 }
968
969 /*
970 * Checks to see if we should elect ourself the actual RP when new if
971 * addresses are added against an interface.
972 */
973 void pim_rp_check_on_if_add(struct pim_interface *pim_ifp)
974 {
975 struct listnode *node;
976 struct rp_info *rp_info;
977 bool i_am_rp_changed = false;
978 struct pim_instance *pim = pim_ifp->pim;
979
980 if (pim->rp_list == NULL)
981 return;
982
983 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
984 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
985 continue;
986
987 /* if i_am_rp is already set nothing to be done (adding new
988 * addresses
989 * is not going to make a difference). */
990 if (rp_info->i_am_rp) {
991 continue;
992 }
993
994 if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
995 i_am_rp_changed = true;
996 rp_info->i_am_rp = 1;
997 if (PIM_DEBUG_PIM_NHT_RP) {
998 char rp[PREFIX_STRLEN];
999 pim_addr_dump("<rp?>", &rp_info->rp.rpf_addr,
1000 rp, sizeof(rp));
1001 zlog_debug("%s: %s: i am rp", __func__, rp);
1002 }
1003 }
1004 }
1005
1006 if (i_am_rp_changed) {
1007 pim_msdp_i_am_rp_changed(pim);
1008 pim_upstream_reeval_use_rpt(pim);
1009 }
1010 }
1011
1012 /* up-optimized re-evaluation of "i_am_rp". this is used when ifaddresses
1013 * are removed. Removing numbers is an uncommon event in an active network
1014 * so I have made no attempt to optimize it. */
1015 void pim_i_am_rp_re_evaluate(struct pim_instance *pim)
1016 {
1017 struct listnode *node;
1018 struct rp_info *rp_info;
1019 bool i_am_rp_changed = false;
1020 int old_i_am_rp;
1021
1022 if (pim->rp_list == NULL)
1023 return;
1024
1025 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1026 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1027 continue;
1028
1029 old_i_am_rp = rp_info->i_am_rp;
1030 pim_rp_check_interfaces(pim, rp_info);
1031
1032 if (old_i_am_rp != rp_info->i_am_rp) {
1033 i_am_rp_changed = true;
1034 if (PIM_DEBUG_PIM_NHT_RP) {
1035 char rp[PREFIX_STRLEN];
1036 pim_addr_dump("<rp?>", &rp_info->rp.rpf_addr,
1037 rp, sizeof(rp));
1038 if (rp_info->i_am_rp) {
1039 zlog_debug("%s: %s: i am rp", __func__,
1040 rp);
1041 } else {
1042 zlog_debug("%s: %s: i am no longer rp",
1043 __func__, rp);
1044 }
1045 }
1046 }
1047 }
1048
1049 if (i_am_rp_changed) {
1050 pim_msdp_i_am_rp_changed(pim);
1051 pim_upstream_reeval_use_rpt(pim);
1052 }
1053 }
1054
1055 /*
1056 * I_am_RP(G) is true if the group-to-RP mapping indicates that
1057 * this router is the RP for the group.
1058 *
1059 * Since we only have static RP, all groups are part of this RP
1060 */
1061 int pim_rp_i_am_rp(struct pim_instance *pim, pim_addr group)
1062 {
1063 struct prefix g;
1064 struct rp_info *rp_info;
1065
1066 memset(&g, 0, sizeof(g));
1067 pim_addr_to_prefix(&g, group);
1068 rp_info = pim_rp_find_match_group(pim, &g);
1069
1070 if (rp_info)
1071 return rp_info->i_am_rp;
1072 return 0;
1073 }
1074
1075 /*
1076 * RP(G)
1077 *
1078 * Return the RP that the Group belongs too.
1079 */
1080 struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group)
1081 {
1082 struct prefix g;
1083 struct rp_info *rp_info;
1084
1085 memset(&g, 0, sizeof(g));
1086 pim_addr_to_prefix(&g, group);
1087
1088 rp_info = pim_rp_find_match_group(pim, &g);
1089
1090 if (rp_info) {
1091 struct prefix nht_p;
1092
1093 /* Register addr with Zebra NHT */
1094 nht_p = rp_info->rp.rpf_addr;
1095 if (PIM_DEBUG_PIM_NHT_RP)
1096 zlog_debug(
1097 "%s: NHT Register RP addr %pFX grp %pFX with Zebra",
1098 __func__, &nht_p, &rp_info->group);
1099 pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
1100 pim_rpf_set_refresh_time(pim);
1101 (void)pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
1102 &nht_p, &rp_info->group, 1);
1103 return (&rp_info->rp);
1104 }
1105
1106 // About to Go Down
1107 return NULL;
1108 }
1109
1110 /*
1111 * Set the upstream IP address we want to talk to based upon
1112 * the rp configured and the source address
1113 *
1114 * If we have don't have a RP configured and the source address is *
1115 * then set the upstream addr as INADDR_ANY and return failure.
1116 *
1117 */
1118 int pim_rp_set_upstream_addr(struct pim_instance *pim, pim_addr *up,
1119 pim_addr source, pim_addr group)
1120 {
1121 struct rp_info *rp_info;
1122 struct prefix g;
1123
1124 memset(&g, 0, sizeof(g));
1125
1126 pim_addr_to_prefix(&g, group);
1127
1128 rp_info = pim_rp_find_match_group(pim, &g);
1129
1130 if (!rp_info || ((pim_rpf_addr_is_inaddr_any(&rp_info->rp)) &&
1131 (pim_addr_is_any(source)))) {
1132 if (PIM_DEBUG_PIM_NHT_RP)
1133 zlog_debug("%s: Received a (*,G) with no RP configured",
1134 __func__);
1135 *up = PIMADDR_ANY;
1136 return 0;
1137 }
1138
1139 if (pim_addr_is_any(source))
1140 *up = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
1141 else
1142 *up = source;
1143
1144 return 1;
1145 }
1146
1147 int pim_rp_config_write(struct pim_instance *pim, struct vty *vty,
1148 const char *spaces)
1149 {
1150 struct listnode *node;
1151 struct rp_info *rp_info;
1152 int count = 0;
1153 pim_addr rp_addr;
1154
1155 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1156 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1157 continue;
1158
1159 if (rp_info->rp_src == RP_SRC_BSR)
1160 continue;
1161
1162 rp_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
1163 if (rp_info->plist)
1164 vty_out(vty,
1165 "%s" PIM_AF_NAME
1166 " pim rp %pPA prefix-list %s\n",
1167 spaces, &rp_addr, rp_info->plist);
1168 else
1169 vty_out(vty, "%s" PIM_AF_NAME " pim rp %pPA %pFX\n",
1170 spaces, &rp_addr, &rp_info->group);
1171 count++;
1172 }
1173
1174 return count;
1175 }
1176
1177 void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
1178 struct vty *vty, json_object *json)
1179 {
1180 struct rp_info *rp_info;
1181 struct rp_info *prev_rp_info = NULL;
1182 struct listnode *node;
1183 char source[7];
1184
1185 json_object *json_rp_rows = NULL;
1186 json_object *json_row = NULL;
1187
1188 if (!json)
1189 vty_out(vty,
1190 "RP address group/prefix-list OIF I am RP Source Group-Type\n");
1191 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1192 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1193 continue;
1194
1195 #if PIM_IPV == 4
1196 pim_addr group = rp_info->group.u.prefix4;
1197 #else
1198 pim_addr group = rp_info->group.u.prefix6;
1199 #endif
1200 const char *group_type =
1201 pim_is_grp_ssm(pim, group) ? "SSM" : "ASM";
1202
1203 if (range && !prefix_match(&rp_info->group, range))
1204 continue;
1205
1206 if (rp_info->rp_src == RP_SRC_STATIC)
1207 strlcpy(source, "Static", sizeof(source));
1208 else if (rp_info->rp_src == RP_SRC_BSR)
1209 strlcpy(source, "BSR", sizeof(source));
1210 else
1211 strlcpy(source, "None", sizeof(source));
1212 if (json) {
1213 /*
1214 * If we have moved on to a new RP then add the
1215 * entry for the previous RP
1216 */
1217 if (prev_rp_info &&
1218 prefix_cmp(&prev_rp_info->rp.rpf_addr,
1219 &rp_info->rp.rpf_addr)) {
1220 json_object_object_addf(
1221 json, json_rp_rows, "%pFXh",
1222 &prev_rp_info->rp.rpf_addr);
1223 json_rp_rows = NULL;
1224 }
1225
1226 if (!json_rp_rows)
1227 json_rp_rows = json_object_new_array();
1228
1229 json_row = json_object_new_object();
1230 json_object_string_addf(json_row, "rpAddress", "%pFXh",
1231 &rp_info->rp.rpf_addr);
1232 if (rp_info->rp.source_nexthop.interface)
1233 json_object_string_add(
1234 json_row, "outboundInterface",
1235 rp_info->rp.source_nexthop
1236 .interface->name);
1237 else
1238 json_object_string_add(json_row,
1239 "outboundInterface",
1240 "Unknown");
1241 if (rp_info->i_am_rp)
1242 json_object_boolean_true_add(json_row, "iAmRP");
1243 else
1244 json_object_boolean_false_add(json_row,
1245 "iAmRP");
1246
1247 if (rp_info->plist)
1248 json_object_string_add(json_row, "prefixList",
1249 rp_info->plist);
1250 else
1251 json_object_string_addf(json_row, "group",
1252 "%pFX",
1253 &rp_info->group);
1254 json_object_string_add(json_row, "source", source);
1255 json_object_string_add(json_row, "groupType",
1256 group_type);
1257
1258 json_object_array_add(json_rp_rows, json_row);
1259 } else {
1260 vty_out(vty, "%-15pFXh ", &rp_info->rp.rpf_addr);
1261
1262 if (rp_info->plist)
1263 vty_out(vty, "%-18s ", rp_info->plist);
1264 else
1265 vty_out(vty, "%-18pFX ", &rp_info->group);
1266
1267 if (rp_info->rp.source_nexthop.interface)
1268 vty_out(vty, "%-16s ",
1269 rp_info->rp.source_nexthop
1270 .interface->name);
1271 else
1272 vty_out(vty, "%-16s ", "(Unknown)");
1273
1274 if (rp_info->i_am_rp)
1275 vty_out(vty, "yes");
1276 else
1277 vty_out(vty, "no");
1278
1279 vty_out(vty, "%14s", source);
1280 vty_out(vty, "%6s\n", group_type);
1281 }
1282 prev_rp_info = rp_info;
1283 }
1284
1285 if (json) {
1286 if (prev_rp_info && json_rp_rows)
1287 json_object_object_addf(json, json_rp_rows, "%pFXh",
1288 &prev_rp_info->rp.rpf_addr);
1289 }
1290 }
1291
1292 void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
1293 {
1294 struct listnode *node = NULL;
1295 struct rp_info *rp_info = NULL;
1296 struct nexthop *nh_node = NULL;
1297 struct prefix nht_p;
1298 struct pim_nexthop_cache pnc;
1299
1300 for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1301 if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1302 continue;
1303
1304 nht_p = rp_info->rp.rpf_addr;
1305 memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
1306 if (!pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info,
1307 &pnc))
1308 continue;
1309
1310 for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) {
1311 #if PIM_IPV == 4
1312 if (!pim_addr_is_any(nh_node->gate.ipv4))
1313 continue;
1314 #else
1315 if (!pim_addr_is_any(nh_node->gate.ipv6))
1316 continue;
1317 #endif
1318
1319 struct interface *ifp1 = if_lookup_by_index(
1320 nh_node->ifindex, pim->vrf->vrf_id);
1321
1322 if (nbr->interface != ifp1)
1323 continue;
1324
1325 #if PIM_IPV == 4
1326 nh_node->gate.ipv4 = nbr->source_addr;
1327 #else
1328 nh_node->gate.ipv6 = nbr->source_addr;
1329 #endif
1330 if (PIM_DEBUG_PIM_NHT_RP)
1331 zlog_debug(
1332 "%s: addr %pFXh new nexthop addr %pPAs interface %s",
1333 __func__, &nht_p, &nbr->source_addr,
1334 ifp1->name);
1335 }
1336 }
1337 }