]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_nhg.c
Merge pull request #10447 from ton31337/fix/json_with_whitespaces
[mirror_frr.git] / zebra / zebra_nhg.c
1 /* Zebra Nexthop Group Code.
2 * Copyright (C) 2019 Cumulus Networks, Inc.
3 * Donald Sharp
4 * Stephen Worley
5 *
6 * This file is part of FRR.
7 *
8 * FRR is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
11 * later version.
12 *
13 * FRR is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with FRR; see the file COPYING. If not, write to the Free
20 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
21 * 02111-1307, USA.
22 */
23 #include <zebra.h>
24
25 #include "lib/nexthop.h"
26 #include "lib/nexthop_group_private.h"
27 #include "lib/routemap.h"
28 #include "lib/mpls.h"
29 #include "lib/jhash.h"
30 #include "lib/debug.h"
31 #include "lib/lib_errors.h"
32
33 #include "zebra/connected.h"
34 #include "zebra/debug.h"
35 #include "zebra/zebra_router.h"
36 #include "zebra/zebra_nhg_private.h"
37 #include "zebra/zebra_rnh.h"
38 #include "zebra/zebra_routemap.h"
39 #include "zebra/zebra_srte.h"
40 #include "zebra/zserv.h"
41 #include "zebra/rt.h"
42 #include "zebra_errors.h"
43 #include "zebra_dplane.h"
44 #include "zebra/interface.h"
45 #include "zebra/zapi_msg.h"
46
47 DEFINE_MTYPE_STATIC(ZEBRA, NHG, "Nexthop Group Entry");
48 DEFINE_MTYPE_STATIC(ZEBRA, NHG_CONNECTED, "Nexthop Group Connected");
49 DEFINE_MTYPE_STATIC(ZEBRA, NHG_CTX, "Nexthop Group Context");
50
51 /* Map backup nexthop indices between two nhes */
52 struct backup_nh_map_s {
53 int map_count;
54
55 struct {
56 uint8_t orig_idx;
57 uint8_t new_idx;
58 } map[MULTIPATH_NUM];
59 };
60
61 /* id counter to keep in sync with kernel */
62 uint32_t id_counter;
63
64 /* Controlled through ui */
65 static bool g_nexthops_enabled = true;
66 static bool proto_nexthops_only;
67 static bool use_recursive_backups = true;
68
69 static struct nhg_hash_entry *depends_find(const struct nexthop *nh, afi_t afi,
70 int type, bool from_dplane);
71 static void depends_add(struct nhg_connected_tree_head *head,
72 struct nhg_hash_entry *depend);
73 static struct nhg_hash_entry *
74 depends_find_add(struct nhg_connected_tree_head *head, struct nexthop *nh,
75 afi_t afi, int type, bool from_dplane);
76 static struct nhg_hash_entry *
77 depends_find_id_add(struct nhg_connected_tree_head *head, uint32_t id);
78 static void depends_decrement_free(struct nhg_connected_tree_head *head);
79
80 static struct nhg_backup_info *
81 nhg_backup_copy(const struct nhg_backup_info *orig);
82
83 /* Helper function for getting the next allocatable ID */
84 static uint32_t nhg_get_next_id(void)
85 {
86 while (1) {
87 id_counter++;
88
89 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
90 zlog_debug("%s: ID %u checking", __func__, id_counter);
91
92 if (id_counter == ZEBRA_NHG_PROTO_LOWER) {
93 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
94 zlog_debug("%s: ID counter wrapped", __func__);
95
96 id_counter = 0;
97 continue;
98 }
99
100 if (zebra_nhg_lookup_id(id_counter)) {
101 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
102 zlog_debug("%s: ID already exists", __func__);
103
104 continue;
105 }
106
107 break;
108 }
109
110 return id_counter;
111 }
112
113 static void nhg_connected_free(struct nhg_connected *dep)
114 {
115 XFREE(MTYPE_NHG_CONNECTED, dep);
116 }
117
118 static struct nhg_connected *nhg_connected_new(struct nhg_hash_entry *nhe)
119 {
120 struct nhg_connected *new = NULL;
121
122 new = XCALLOC(MTYPE_NHG_CONNECTED, sizeof(struct nhg_connected));
123 new->nhe = nhe;
124
125 return new;
126 }
127
128 void nhg_connected_tree_free(struct nhg_connected_tree_head *head)
129 {
130 struct nhg_connected *rb_node_dep = NULL;
131
132 if (!nhg_connected_tree_is_empty(head)) {
133 frr_each_safe(nhg_connected_tree, head, rb_node_dep) {
134 nhg_connected_tree_del(head, rb_node_dep);
135 nhg_connected_free(rb_node_dep);
136 }
137 }
138 }
139
140 bool nhg_connected_tree_is_empty(const struct nhg_connected_tree_head *head)
141 {
142 return nhg_connected_tree_count(head) ? false : true;
143 }
144
145 struct nhg_connected *
146 nhg_connected_tree_root(struct nhg_connected_tree_head *head)
147 {
148 return nhg_connected_tree_first(head);
149 }
150
151 struct nhg_hash_entry *
152 nhg_connected_tree_del_nhe(struct nhg_connected_tree_head *head,
153 struct nhg_hash_entry *depend)
154 {
155 struct nhg_connected lookup = {};
156 struct nhg_connected *remove = NULL;
157 struct nhg_hash_entry *removed_nhe;
158
159 lookup.nhe = depend;
160
161 /* Lookup to find the element, then remove it */
162 remove = nhg_connected_tree_find(head, &lookup);
163 if (remove)
164 /* Re-returning here just in case this API changes..
165 * the _del list api's are a bit undefined at the moment.
166 *
167 * So hopefully returning here will make it fail if the api
168 * changes to something different than currently expected.
169 */
170 remove = nhg_connected_tree_del(head, remove);
171
172 /* If the entry was sucessfully removed, free the 'connected` struct */
173 if (remove) {
174 removed_nhe = remove->nhe;
175 nhg_connected_free(remove);
176 return removed_nhe;
177 }
178
179 return NULL;
180 }
181
182 /* Assuming UNIQUE RB tree. If this changes, assumptions here about
183 * insertion need to change.
184 */
185 struct nhg_hash_entry *
186 nhg_connected_tree_add_nhe(struct nhg_connected_tree_head *head,
187 struct nhg_hash_entry *depend)
188 {
189 struct nhg_connected *new = NULL;
190
191 new = nhg_connected_new(depend);
192
193 /* On success, NULL will be returned from the
194 * RB code.
195 */
196 if (new && (nhg_connected_tree_add(head, new) == NULL))
197 return NULL;
198
199 /* If it wasn't successful, it must be a duplicate. We enforce the
200 * unique property for the `nhg_connected` tree.
201 */
202 nhg_connected_free(new);
203
204 return depend;
205 }
206
207 static void
208 nhg_connected_tree_decrement_ref(struct nhg_connected_tree_head *head)
209 {
210 struct nhg_connected *rb_node_dep = NULL;
211
212 frr_each_safe(nhg_connected_tree, head, rb_node_dep) {
213 zebra_nhg_decrement_ref(rb_node_dep->nhe);
214 }
215 }
216
217 static void
218 nhg_connected_tree_increment_ref(struct nhg_connected_tree_head *head)
219 {
220 struct nhg_connected *rb_node_dep = NULL;
221
222 frr_each(nhg_connected_tree, head, rb_node_dep) {
223 zebra_nhg_increment_ref(rb_node_dep->nhe);
224 }
225 }
226
227 struct nhg_hash_entry *zebra_nhg_resolve(struct nhg_hash_entry *nhe)
228 {
229 if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_RECURSIVE)
230 && !zebra_nhg_depends_is_empty(nhe)) {
231 nhe = nhg_connected_tree_root(&nhe->nhg_depends)->nhe;
232 return zebra_nhg_resolve(nhe);
233 }
234
235 return nhe;
236 }
237
238 unsigned int zebra_nhg_depends_count(const struct nhg_hash_entry *nhe)
239 {
240 return nhg_connected_tree_count(&nhe->nhg_depends);
241 }
242
243 bool zebra_nhg_depends_is_empty(const struct nhg_hash_entry *nhe)
244 {
245 return nhg_connected_tree_is_empty(&nhe->nhg_depends);
246 }
247
248 static void zebra_nhg_depends_del(struct nhg_hash_entry *from,
249 struct nhg_hash_entry *depend)
250 {
251 nhg_connected_tree_del_nhe(&from->nhg_depends, depend);
252 }
253
254 static void zebra_nhg_depends_init(struct nhg_hash_entry *nhe)
255 {
256 nhg_connected_tree_init(&nhe->nhg_depends);
257 }
258
259 unsigned int zebra_nhg_dependents_count(const struct nhg_hash_entry *nhe)
260 {
261 return nhg_connected_tree_count(&nhe->nhg_dependents);
262 }
263
264
265 bool zebra_nhg_dependents_is_empty(const struct nhg_hash_entry *nhe)
266 {
267 return nhg_connected_tree_is_empty(&nhe->nhg_dependents);
268 }
269
270 static void zebra_nhg_dependents_del(struct nhg_hash_entry *from,
271 struct nhg_hash_entry *dependent)
272 {
273 nhg_connected_tree_del_nhe(&from->nhg_dependents, dependent);
274 }
275
276 static void zebra_nhg_dependents_add(struct nhg_hash_entry *to,
277 struct nhg_hash_entry *dependent)
278 {
279 nhg_connected_tree_add_nhe(&to->nhg_dependents, dependent);
280 }
281
282 static void zebra_nhg_dependents_init(struct nhg_hash_entry *nhe)
283 {
284 nhg_connected_tree_init(&nhe->nhg_dependents);
285 }
286
287 /* Release this nhe from anything depending on it */
288 static void zebra_nhg_dependents_release(struct nhg_hash_entry *nhe)
289 {
290 struct nhg_connected *rb_node_dep = NULL;
291
292 frr_each_safe(nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep) {
293 zebra_nhg_depends_del(rb_node_dep->nhe, nhe);
294 /* recheck validity of the dependent */
295 zebra_nhg_check_valid(rb_node_dep->nhe);
296 }
297 }
298
299 /* Release this nhe from anything that it depends on */
300 static void zebra_nhg_depends_release(struct nhg_hash_entry *nhe)
301 {
302 if (!zebra_nhg_depends_is_empty(nhe)) {
303 struct nhg_connected *rb_node_dep = NULL;
304
305 frr_each_safe(nhg_connected_tree, &nhe->nhg_depends,
306 rb_node_dep) {
307 zebra_nhg_dependents_del(rb_node_dep->nhe, nhe);
308 }
309 }
310 }
311
312
313 struct nhg_hash_entry *zebra_nhg_lookup_id(uint32_t id)
314 {
315 struct nhg_hash_entry lookup = {};
316
317 lookup.id = id;
318 return hash_lookup(zrouter.nhgs_id, &lookup);
319 }
320
321 static int zebra_nhg_insert_id(struct nhg_hash_entry *nhe)
322 {
323 if (hash_lookup(zrouter.nhgs_id, nhe)) {
324 flog_err(
325 EC_ZEBRA_NHG_TABLE_INSERT_FAILED,
326 "Failed inserting NHG id=%u into the ID hash table, entry already exists",
327 nhe->id);
328 return -1;
329 }
330
331 hash_get(zrouter.nhgs_id, nhe, hash_alloc_intern);
332
333 return 0;
334 }
335
336 static void zebra_nhg_set_if(struct nhg_hash_entry *nhe, struct interface *ifp)
337 {
338 nhe->ifp = ifp;
339 if_nhg_dependents_add(ifp, nhe);
340 }
341
342 static void
343 zebra_nhg_connect_depends(struct nhg_hash_entry *nhe,
344 struct nhg_connected_tree_head *nhg_depends)
345 {
346 struct nhg_connected *rb_node_dep = NULL;
347
348 /* This has been allocated higher above in the stack. Could probably
349 * re-allocate and free the old stuff but just using the same memory
350 * for now. Otherwise, their might be a time trade-off for repeated
351 * alloc/frees as startup.
352 */
353 nhe->nhg_depends = *nhg_depends;
354
355 /* Attach backpointer to anything that it depends on */
356 zebra_nhg_dependents_init(nhe);
357 if (!zebra_nhg_depends_is_empty(nhe)) {
358 frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) {
359 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
360 zlog_debug("%s: nhe %p (%u), dep %p (%u)",
361 __func__, nhe, nhe->id,
362 rb_node_dep->nhe,
363 rb_node_dep->nhe->id);
364
365 zebra_nhg_dependents_add(rb_node_dep->nhe, nhe);
366 }
367 }
368 }
369
370 /* Init an nhe, for use in a hash lookup for example */
371 void zebra_nhe_init(struct nhg_hash_entry *nhe, afi_t afi,
372 const struct nexthop *nh)
373 {
374 memset(nhe, 0, sizeof(struct nhg_hash_entry));
375 nhe->vrf_id = VRF_DEFAULT;
376 nhe->type = ZEBRA_ROUTE_NHG;
377 nhe->afi = AFI_UNSPEC;
378
379 /* There are some special rules that apply to groups representing
380 * a single nexthop.
381 */
382 if (nh && (nh->next == NULL)) {
383 switch (nh->type) {
384 case NEXTHOP_TYPE_IFINDEX:
385 case NEXTHOP_TYPE_BLACKHOLE:
386 /*
387 * This switch case handles setting the afi different
388 * for ipv4/v6 routes. Ifindex/blackhole nexthop
389 * objects cannot be ambiguous, they must be Address
390 * Family specific. If we get here, we will either use
391 * the AF of the route, or the one we got passed from
392 * here from the kernel.
393 */
394 nhe->afi = afi;
395 break;
396 case NEXTHOP_TYPE_IPV4_IFINDEX:
397 case NEXTHOP_TYPE_IPV4:
398 nhe->afi = AFI_IP;
399 break;
400 case NEXTHOP_TYPE_IPV6_IFINDEX:
401 case NEXTHOP_TYPE_IPV6:
402 nhe->afi = AFI_IP6;
403 break;
404 }
405 }
406 }
407
408 struct nhg_hash_entry *zebra_nhg_alloc(void)
409 {
410 struct nhg_hash_entry *nhe;
411
412 nhe = XCALLOC(MTYPE_NHG, sizeof(struct nhg_hash_entry));
413
414 return nhe;
415 }
416
417 /*
418 * Allocate new nhe and make shallow copy of 'orig'; no
419 * recursive info is copied.
420 */
421 struct nhg_hash_entry *zebra_nhe_copy(const struct nhg_hash_entry *orig,
422 uint32_t id)
423 {
424 struct nhg_hash_entry *nhe;
425
426 nhe = zebra_nhg_alloc();
427
428 nhe->id = id;
429
430 nexthop_group_copy(&(nhe->nhg), &(orig->nhg));
431
432 nhe->vrf_id = orig->vrf_id;
433 nhe->afi = orig->afi;
434 nhe->type = orig->type ? orig->type : ZEBRA_ROUTE_NHG;
435 nhe->refcnt = 0;
436 nhe->dplane_ref = zebra_router_get_next_sequence();
437
438 /* Copy backup info also, if present */
439 if (orig->backup_info)
440 nhe->backup_info = nhg_backup_copy(orig->backup_info);
441
442 return nhe;
443 }
444
445 /* Allocation via hash handler */
446 static void *zebra_nhg_hash_alloc(void *arg)
447 {
448 struct nhg_hash_entry *nhe = NULL;
449 struct nhg_hash_entry *copy = arg;
450
451 nhe = zebra_nhe_copy(copy, copy->id);
452
453 /* Mark duplicate nexthops in a group at creation time. */
454 nexthop_group_mark_duplicates(&(nhe->nhg));
455
456 /*
457 * Add the ifp now if it's not a group or recursive and has ifindex.
458 *
459 * A proto-owned ID is always a group.
460 */
461 if (!PROTO_OWNED(nhe) && nhe->nhg.nexthop && !nhe->nhg.nexthop->next
462 && !nhe->nhg.nexthop->resolved && nhe->nhg.nexthop->ifindex) {
463 struct interface *ifp = NULL;
464
465 ifp = if_lookup_by_index(nhe->nhg.nexthop->ifindex,
466 nhe->nhg.nexthop->vrf_id);
467 if (ifp)
468 zebra_nhg_set_if(nhe, ifp);
469 else {
470 if (IS_ZEBRA_DEBUG_NHG)
471 zlog_debug(
472 "Failed to lookup an interface with ifindex=%d in vrf=%u for NHE id=%u",
473 nhe->nhg.nexthop->ifindex,
474 nhe->nhg.nexthop->vrf_id, nhe->id);
475 }
476 }
477
478 return nhe;
479 }
480
481 uint32_t zebra_nhg_hash_key(const void *arg)
482 {
483 const struct nhg_hash_entry *nhe = arg;
484 uint32_t key = 0x5a351234;
485 uint32_t primary = 0;
486 uint32_t backup = 0;
487
488 primary = nexthop_group_hash(&(nhe->nhg));
489 if (nhe->backup_info)
490 backup = nexthop_group_hash(&(nhe->backup_info->nhe->nhg));
491
492 key = jhash_3words(primary, backup, nhe->type, key);
493
494 key = jhash_2words(nhe->vrf_id, nhe->afi, key);
495
496 return key;
497 }
498
499 uint32_t zebra_nhg_id_key(const void *arg)
500 {
501 const struct nhg_hash_entry *nhe = arg;
502
503 return nhe->id;
504 }
505
506 /* Helper with common nhg/nhe nexthop comparison logic */
507 static bool nhg_compare_nexthops(const struct nexthop *nh1,
508 const struct nexthop *nh2)
509 {
510 assert(nh1 != NULL && nh2 != NULL);
511
512 /*
513 * We have to check the active flag of each individual one,
514 * not just the overall active_num. This solves the special case
515 * issue of a route with a nexthop group with one nexthop
516 * resolving to itself and thus marking it inactive. If we
517 * have two different routes each wanting to mark a different
518 * nexthop inactive, they need to hash to two different groups.
519 *
520 * If we just hashed on num_active, they would hash the same
521 * which is incorrect.
522 *
523 * ex)
524 * 1.1.1.0/24
525 * -> 1.1.1.1 dummy1 (inactive)
526 * -> 1.1.2.1 dummy2
527 *
528 * 1.1.2.0/24
529 * -> 1.1.1.1 dummy1
530 * -> 1.1.2.1 dummy2 (inactive)
531 *
532 * Without checking each individual one, they would hash to
533 * the same group and both have 1.1.1.1 dummy1 marked inactive.
534 *
535 */
536 if (CHECK_FLAG(nh1->flags, NEXTHOP_FLAG_ACTIVE)
537 != CHECK_FLAG(nh2->flags, NEXTHOP_FLAG_ACTIVE))
538 return false;
539
540 if (!nexthop_same(nh1, nh2))
541 return false;
542
543 return true;
544 }
545
546 bool zebra_nhg_hash_equal(const void *arg1, const void *arg2)
547 {
548 const struct nhg_hash_entry *nhe1 = arg1;
549 const struct nhg_hash_entry *nhe2 = arg2;
550 struct nexthop *nexthop1;
551 struct nexthop *nexthop2;
552
553 /* No matter what if they equal IDs, assume equal */
554 if (nhe1->id && nhe2->id && (nhe1->id == nhe2->id))
555 return true;
556
557 if (nhe1->type != nhe2->type)
558 return false;
559
560 if (nhe1->vrf_id != nhe2->vrf_id)
561 return false;
562
563 if (nhe1->afi != nhe2->afi)
564 return false;
565
566 /* Nexthops should be in-order, so we simply compare them in-place */
567 for (nexthop1 = nhe1->nhg.nexthop, nexthop2 = nhe2->nhg.nexthop;
568 nexthop1 && nexthop2;
569 nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) {
570
571 if (!nhg_compare_nexthops(nexthop1, nexthop2))
572 return false;
573 }
574
575 /* Check for unequal list lengths */
576 if (nexthop1 || nexthop2)
577 return false;
578
579 /* If there's no backup info, comparison is done. */
580 if ((nhe1->backup_info == NULL) && (nhe2->backup_info == NULL))
581 return true;
582
583 /* Compare backup info also - test the easy things first */
584 if (nhe1->backup_info && (nhe2->backup_info == NULL))
585 return false;
586 if (nhe2->backup_info && (nhe1->backup_info == NULL))
587 return false;
588
589 /* Compare number of backups before actually comparing any */
590 for (nexthop1 = nhe1->backup_info->nhe->nhg.nexthop,
591 nexthop2 = nhe2->backup_info->nhe->nhg.nexthop;
592 nexthop1 && nexthop2;
593 nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) {
594 ;
595 }
596
597 /* Did we find the end of one list before the other? */
598 if (nexthop1 || nexthop2)
599 return false;
600
601 /* Have to compare the backup nexthops */
602 for (nexthop1 = nhe1->backup_info->nhe->nhg.nexthop,
603 nexthop2 = nhe2->backup_info->nhe->nhg.nexthop;
604 nexthop1 && nexthop2;
605 nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) {
606
607 if (!nhg_compare_nexthops(nexthop1, nexthop2))
608 return false;
609 }
610
611 return true;
612 }
613
614 bool zebra_nhg_hash_id_equal(const void *arg1, const void *arg2)
615 {
616 const struct nhg_hash_entry *nhe1 = arg1;
617 const struct nhg_hash_entry *nhe2 = arg2;
618
619 return nhe1->id == nhe2->id;
620 }
621
622 static int zebra_nhg_process_grp(struct nexthop_group *nhg,
623 struct nhg_connected_tree_head *depends,
624 struct nh_grp *grp, uint8_t count)
625 {
626 nhg_connected_tree_init(depends);
627
628 for (int i = 0; i < count; i++) {
629 struct nhg_hash_entry *depend = NULL;
630 /* We do not care about nexthop_grp.weight at
631 * this time. But we should figure out
632 * how to adapt this to our code in
633 * the future.
634 */
635 depend = depends_find_id_add(depends, grp[i].id);
636
637 if (!depend) {
638 flog_err(
639 EC_ZEBRA_NHG_SYNC,
640 "Received Nexthop Group from the kernel with a dependent Nexthop ID (%u) which we do not have in our table",
641 grp[i].id);
642 return -1;
643 }
644
645 /*
646 * If this is a nexthop with its own group
647 * dependencies, add them as well. Not sure its
648 * even possible to have a group within a group
649 * in the kernel.
650 */
651
652 copy_nexthops(&nhg->nexthop, depend->nhg.nexthop, NULL);
653 }
654
655 return 0;
656 }
657
658 static void handle_recursive_depend(struct nhg_connected_tree_head *nhg_depends,
659 struct nexthop *nh, afi_t afi, int type)
660 {
661 struct nhg_hash_entry *depend = NULL;
662 struct nexthop_group resolved_ng = {};
663
664 resolved_ng.nexthop = nh;
665
666 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
667 zlog_debug("%s: head %p, nh %pNHv",
668 __func__, nhg_depends, nh);
669
670 depend = zebra_nhg_rib_find(0, &resolved_ng, afi, type);
671
672 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
673 zlog_debug("%s: nh %pNHv => %p (%u)",
674 __func__, nh, depend,
675 depend ? depend->id : 0);
676
677 if (depend)
678 depends_add(nhg_depends, depend);
679 }
680
681 /*
682 * Lookup an nhe in the global hash, using data from another nhe. If 'lookup'
683 * has an id value, that's used. Create a new global/shared nhe if not found.
684 */
685 static bool zebra_nhe_find(struct nhg_hash_entry **nhe, /* return value */
686 struct nhg_hash_entry *lookup,
687 struct nhg_connected_tree_head *nhg_depends,
688 afi_t afi, bool from_dplane)
689 {
690 bool created = false;
691 bool recursive = false;
692 struct nhg_hash_entry *newnhe, *backup_nhe;
693 struct nexthop *nh = NULL;
694
695 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
696 zlog_debug(
697 "%s: id %u, lookup %p, vrf %d, type %d, depends %p%s",
698 __func__, lookup->id, lookup, lookup->vrf_id,
699 lookup->type, nhg_depends,
700 (from_dplane ? " (from dplane)" : ""));
701
702 if (lookup->id)
703 (*nhe) = zebra_nhg_lookup_id(lookup->id);
704 else
705 (*nhe) = hash_lookup(zrouter.nhgs, lookup);
706
707 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
708 zlog_debug("%s: lookup => %p (%u)",
709 __func__, (*nhe),
710 (*nhe) ? (*nhe)->id : 0);
711
712 /* If we found an existing object, we're done */
713 if (*nhe)
714 goto done;
715
716 /* We're going to create/insert a new nhe:
717 * assign the next global id value if necessary.
718 */
719 if (lookup->id == 0)
720 lookup->id = nhg_get_next_id();
721
722 if (!from_dplane && lookup->id < ZEBRA_NHG_PROTO_LOWER) {
723 /*
724 * This is a zebra hashed/owned NHG.
725 *
726 * It goes in HASH and ID table.
727 */
728 newnhe = hash_get(zrouter.nhgs, lookup, zebra_nhg_hash_alloc);
729 zebra_nhg_insert_id(newnhe);
730 } else {
731 /*
732 * This is upperproto owned NHG or one we read in from dataplane
733 * and should not be hashed to.
734 *
735 * It goes in ID table.
736 */
737 newnhe =
738 hash_get(zrouter.nhgs_id, lookup, zebra_nhg_hash_alloc);
739 }
740
741 created = true;
742
743 /* Mail back the new object */
744 *nhe = newnhe;
745
746 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
747 zlog_debug("%s: => created %p (%u)", __func__, newnhe,
748 newnhe->id);
749
750 /* Only hash/lookup the depends if the first lookup
751 * fails to find something. This should hopefully save a
752 * lot of cycles for larger ecmp sizes.
753 */
754 if (nhg_depends) {
755 /* If you don't want to hash on each nexthop in the
756 * nexthop group struct you can pass the depends
757 * directly. Kernel-side we do this since it just looks
758 * them up via IDs.
759 */
760 zebra_nhg_connect_depends(newnhe, nhg_depends);
761 goto done;
762 }
763
764 /* Prepare dependency relationships if this is not a
765 * singleton nexthop. There are two cases: a single
766 * recursive nexthop, where we need a relationship to the
767 * resolving nexthop; or a group of nexthops, where we need
768 * relationships with the corresponding singletons.
769 */
770 zebra_nhg_depends_init(newnhe);
771
772 nh = newnhe->nhg.nexthop;
773
774 if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE))
775 SET_FLAG(newnhe->flags, NEXTHOP_GROUP_VALID);
776
777 if (nh->next == NULL && newnhe->id < ZEBRA_NHG_PROTO_LOWER) {
778 if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) {
779 /* Single recursive nexthop */
780 handle_recursive_depend(&newnhe->nhg_depends,
781 nh->resolved, afi,
782 newnhe->type);
783 recursive = true;
784 }
785 } else {
786 /* Proto-owned are groups by default */
787 /* List of nexthops */
788 for (nh = newnhe->nhg.nexthop; nh; nh = nh->next) {
789 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
790 zlog_debug("%s: depends NH %pNHv %s",
791 __func__, nh,
792 CHECK_FLAG(nh->flags,
793 NEXTHOP_FLAG_RECURSIVE) ?
794 "(R)" : "");
795
796 depends_find_add(&newnhe->nhg_depends, nh, afi,
797 newnhe->type, from_dplane);
798 }
799 }
800
801 if (recursive)
802 SET_FLAG(newnhe->flags, NEXTHOP_GROUP_RECURSIVE);
803
804 /* Attach dependent backpointers to singletons */
805 zebra_nhg_connect_depends(newnhe, &newnhe->nhg_depends);
806
807 /**
808 * Backup Nexthops
809 */
810
811 if (zebra_nhg_get_backup_nhg(newnhe) == NULL ||
812 zebra_nhg_get_backup_nhg(newnhe)->nexthop == NULL)
813 goto done;
814
815 /* If there are backup nexthops, add them to the backup
816 * depends tree. The rules here are a little different.
817 */
818 recursive = false;
819 backup_nhe = newnhe->backup_info->nhe;
820
821 nh = backup_nhe->nhg.nexthop;
822
823 /* Singleton recursive NH */
824 if (nh->next == NULL &&
825 CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) {
826 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
827 zlog_debug("%s: backup depend NH %pNHv (R)",
828 __func__, nh);
829
830 /* Single recursive nexthop */
831 handle_recursive_depend(&backup_nhe->nhg_depends, nh->resolved,
832 afi, backup_nhe->type);
833 recursive = true;
834 } else {
835 /* One or more backup NHs */
836 for (; nh; nh = nh->next) {
837 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
838 zlog_debug("%s: backup depend NH %pNHv %s",
839 __func__, nh,
840 CHECK_FLAG(nh->flags,
841 NEXTHOP_FLAG_RECURSIVE) ?
842 "(R)" : "");
843
844 depends_find_add(&backup_nhe->nhg_depends, nh, afi,
845 backup_nhe->type, from_dplane);
846 }
847 }
848
849 if (recursive)
850 SET_FLAG(backup_nhe->flags, NEXTHOP_GROUP_RECURSIVE);
851
852 done:
853 /* Reset time since last update */
854 (*nhe)->uptime = monotime(NULL);
855
856 return created;
857 }
858
859 /*
860 * Lookup or create an nhe, based on an nhg or an nhe id.
861 */
862 static bool zebra_nhg_find(struct nhg_hash_entry **nhe, uint32_t id,
863 struct nexthop_group *nhg,
864 struct nhg_connected_tree_head *nhg_depends,
865 vrf_id_t vrf_id, afi_t afi, int type,
866 bool from_dplane)
867 {
868 struct nhg_hash_entry lookup = {};
869 bool created = false;
870
871 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
872 zlog_debug("%s: id %u, nhg %p, vrf %d, type %d, depends %p",
873 __func__, id, nhg, vrf_id, type,
874 nhg_depends);
875
876 /* Use a temporary nhe and call into the superset/common code */
877 lookup.id = id;
878 lookup.type = type ? type : ZEBRA_ROUTE_NHG;
879 lookup.nhg = *nhg;
880
881 lookup.vrf_id = vrf_id;
882 if (nhg_depends || lookup.nhg.nexthop->next) {
883 /* Groups can have all vrfs and AF's in them */
884 lookup.afi = AFI_UNSPEC;
885 } else {
886 switch (lookup.nhg.nexthop->type) {
887 case (NEXTHOP_TYPE_IFINDEX):
888 case (NEXTHOP_TYPE_BLACKHOLE):
889 /*
890 * This switch case handles setting the afi different
891 * for ipv4/v6 routes. Ifindex/blackhole nexthop
892 * objects cannot be ambiguous, they must be Address
893 * Family specific. If we get here, we will either use
894 * the AF of the route, or the one we got passed from
895 * here from the kernel.
896 */
897 lookup.afi = afi;
898 break;
899 case (NEXTHOP_TYPE_IPV4_IFINDEX):
900 case (NEXTHOP_TYPE_IPV4):
901 lookup.afi = AFI_IP;
902 break;
903 case (NEXTHOP_TYPE_IPV6_IFINDEX):
904 case (NEXTHOP_TYPE_IPV6):
905 lookup.afi = AFI_IP6;
906 break;
907 }
908 }
909
910 created = zebra_nhe_find(nhe, &lookup, nhg_depends, afi, from_dplane);
911
912 return created;
913 }
914
915 /* Find/create a single nexthop */
916 static struct nhg_hash_entry *zebra_nhg_find_nexthop(uint32_t id,
917 struct nexthop *nh,
918 afi_t afi, int type,
919 bool from_dplane)
920 {
921 struct nhg_hash_entry *nhe = NULL;
922 struct nexthop_group nhg = {};
923 vrf_id_t vrf_id = !vrf_is_backend_netns() ? VRF_DEFAULT : nh->vrf_id;
924
925 nexthop_group_add_sorted(&nhg, nh);
926
927 zebra_nhg_find(&nhe, id, &nhg, NULL, vrf_id, afi, type, from_dplane);
928
929 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
930 zlog_debug("%s: nh %pNHv => %p (%u)",
931 __func__, nh, nhe, nhe ? nhe->id : 0);
932
933 return nhe;
934 }
935
936 static uint32_t nhg_ctx_get_id(const struct nhg_ctx *ctx)
937 {
938 return ctx->id;
939 }
940
941 static void nhg_ctx_set_status(struct nhg_ctx *ctx, enum nhg_ctx_status status)
942 {
943 ctx->status = status;
944 }
945
946 static enum nhg_ctx_status nhg_ctx_get_status(const struct nhg_ctx *ctx)
947 {
948 return ctx->status;
949 }
950
951 static void nhg_ctx_set_op(struct nhg_ctx *ctx, enum nhg_ctx_op_e op)
952 {
953 ctx->op = op;
954 }
955
956 static enum nhg_ctx_op_e nhg_ctx_get_op(const struct nhg_ctx *ctx)
957 {
958 return ctx->op;
959 }
960
961 static vrf_id_t nhg_ctx_get_vrf_id(const struct nhg_ctx *ctx)
962 {
963 return ctx->vrf_id;
964 }
965
966 static int nhg_ctx_get_type(const struct nhg_ctx *ctx)
967 {
968 return ctx->type;
969 }
970
971 static int nhg_ctx_get_afi(const struct nhg_ctx *ctx)
972 {
973 return ctx->afi;
974 }
975
976 static struct nexthop *nhg_ctx_get_nh(struct nhg_ctx *ctx)
977 {
978 return &ctx->u.nh;
979 }
980
981 static uint8_t nhg_ctx_get_count(const struct nhg_ctx *ctx)
982 {
983 return ctx->count;
984 }
985
986 static struct nh_grp *nhg_ctx_get_grp(struct nhg_ctx *ctx)
987 {
988 return ctx->u.grp;
989 }
990
991 static struct nhg_ctx *nhg_ctx_new(void)
992 {
993 struct nhg_ctx *new;
994
995 new = XCALLOC(MTYPE_NHG_CTX, sizeof(struct nhg_ctx));
996
997 return new;
998 }
999
1000 void nhg_ctx_free(struct nhg_ctx **ctx)
1001 {
1002 struct nexthop *nh;
1003
1004 if (ctx == NULL)
1005 return;
1006
1007 assert((*ctx) != NULL);
1008
1009 if (nhg_ctx_get_count(*ctx))
1010 goto done;
1011
1012 nh = nhg_ctx_get_nh(*ctx);
1013
1014 nexthop_del_labels(nh);
1015 nexthop_del_srv6_seg6local(nh);
1016 nexthop_del_srv6_seg6(nh);
1017
1018 done:
1019 XFREE(MTYPE_NHG_CTX, *ctx);
1020 }
1021
1022 static struct nhg_ctx *nhg_ctx_init(uint32_t id, struct nexthop *nh,
1023 struct nh_grp *grp, vrf_id_t vrf_id,
1024 afi_t afi, int type, uint8_t count)
1025 {
1026 struct nhg_ctx *ctx = NULL;
1027
1028 ctx = nhg_ctx_new();
1029
1030 ctx->id = id;
1031 ctx->vrf_id = vrf_id;
1032 ctx->afi = afi;
1033 ctx->type = type;
1034 ctx->count = count;
1035
1036 if (count)
1037 /* Copy over the array */
1038 memcpy(&ctx->u.grp, grp, count * sizeof(struct nh_grp));
1039 else if (nh)
1040 ctx->u.nh = *nh;
1041
1042 return ctx;
1043 }
1044
1045 static void zebra_nhg_set_valid(struct nhg_hash_entry *nhe)
1046 {
1047 struct nhg_connected *rb_node_dep;
1048
1049 SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
1050
1051 frr_each(nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep)
1052 zebra_nhg_set_valid(rb_node_dep->nhe);
1053 }
1054
1055 static void zebra_nhg_set_invalid(struct nhg_hash_entry *nhe)
1056 {
1057 struct nhg_connected *rb_node_dep;
1058
1059 UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
1060
1061 /* Update validity of nexthops depending on it */
1062 frr_each(nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep)
1063 zebra_nhg_check_valid(rb_node_dep->nhe);
1064 }
1065
1066 void zebra_nhg_check_valid(struct nhg_hash_entry *nhe)
1067 {
1068 struct nhg_connected *rb_node_dep = NULL;
1069 bool valid = false;
1070
1071 /* If anthing else in the group is valid, the group is valid */
1072 frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) {
1073 if (CHECK_FLAG(rb_node_dep->nhe->flags, NEXTHOP_GROUP_VALID)) {
1074 valid = true;
1075 goto done;
1076 }
1077 }
1078
1079 done:
1080 if (valid)
1081 zebra_nhg_set_valid(nhe);
1082 else
1083 zebra_nhg_set_invalid(nhe);
1084 }
1085
1086 static void zebra_nhg_release_all_deps(struct nhg_hash_entry *nhe)
1087 {
1088 /* Remove it from any lists it may be on */
1089 zebra_nhg_depends_release(nhe);
1090 zebra_nhg_dependents_release(nhe);
1091 if (nhe->ifp)
1092 if_nhg_dependents_del(nhe->ifp, nhe);
1093 }
1094
1095 static void zebra_nhg_release(struct nhg_hash_entry *nhe)
1096 {
1097 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1098 zlog_debug("%s: nhe %p (%u)", __func__, nhe, nhe->id);
1099
1100 zebra_nhg_release_all_deps(nhe);
1101
1102 /*
1103 * If its not zebra owned, we didn't store it here and have to be
1104 * sure we don't clear one thats actually being used.
1105 */
1106 if (nhe->id < ZEBRA_NHG_PROTO_LOWER)
1107 hash_release(zrouter.nhgs, nhe);
1108
1109 hash_release(zrouter.nhgs_id, nhe);
1110 }
1111
1112 static void zebra_nhg_handle_uninstall(struct nhg_hash_entry *nhe)
1113 {
1114 zebra_nhg_release(nhe);
1115 zebra_nhg_free(nhe);
1116 }
1117
1118 static void zebra_nhg_handle_install(struct nhg_hash_entry *nhe)
1119 {
1120 /* Update validity of groups depending on it */
1121 struct nhg_connected *rb_node_dep;
1122
1123 frr_each_safe(nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep)
1124 zebra_nhg_set_valid(rb_node_dep->nhe);
1125 }
1126
1127 /*
1128 * The kernel/other program has changed the state of a nexthop object we are
1129 * using.
1130 */
1131 static void zebra_nhg_handle_kernel_state_change(struct nhg_hash_entry *nhe,
1132 bool is_delete)
1133 {
1134 if (nhe->refcnt) {
1135 flog_err(
1136 EC_ZEBRA_NHG_SYNC,
1137 "Kernel %s a nexthop group with ID (%u) that we are still using for a route, sending it back down",
1138 (is_delete ? "deleted" : "updated"), nhe->id);
1139
1140 UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
1141 zebra_nhg_install_kernel(nhe);
1142 } else
1143 zebra_nhg_handle_uninstall(nhe);
1144 }
1145
1146 static int nhg_ctx_process_new(struct nhg_ctx *ctx)
1147 {
1148 struct nexthop_group *nhg = NULL;
1149 struct nhg_connected_tree_head nhg_depends = {};
1150 struct nhg_hash_entry *lookup = NULL;
1151 struct nhg_hash_entry *nhe = NULL;
1152
1153 uint32_t id = nhg_ctx_get_id(ctx);
1154 uint8_t count = nhg_ctx_get_count(ctx);
1155 vrf_id_t vrf_id = nhg_ctx_get_vrf_id(ctx);
1156 int type = nhg_ctx_get_type(ctx);
1157 afi_t afi = nhg_ctx_get_afi(ctx);
1158
1159 lookup = zebra_nhg_lookup_id(id);
1160
1161 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1162 zlog_debug("%s: id %u, count %d, lookup => %p",
1163 __func__, id, count, lookup);
1164
1165 if (lookup) {
1166 /* This is already present in our table, hence an update
1167 * that we did not initate.
1168 */
1169 zebra_nhg_handle_kernel_state_change(lookup, false);
1170 return 0;
1171 }
1172
1173 if (nhg_ctx_get_count(ctx)) {
1174 nhg = nexthop_group_new();
1175 if (zebra_nhg_process_grp(nhg, &nhg_depends,
1176 nhg_ctx_get_grp(ctx), count)) {
1177 depends_decrement_free(&nhg_depends);
1178 nexthop_group_delete(&nhg);
1179 return -ENOENT;
1180 }
1181
1182 if (!zebra_nhg_find(&nhe, id, nhg, &nhg_depends, vrf_id, afi,
1183 type, true))
1184 depends_decrement_free(&nhg_depends);
1185
1186 /* These got copied over in zebra_nhg_alloc() */
1187 nexthop_group_delete(&nhg);
1188 } else
1189 nhe = zebra_nhg_find_nexthop(id, nhg_ctx_get_nh(ctx), afi, type,
1190 true);
1191
1192 if (!nhe) {
1193 flog_err(
1194 EC_ZEBRA_TABLE_LOOKUP_FAILED,
1195 "Zebra failed to find or create a nexthop hash entry for ID (%u)",
1196 id);
1197 return -1;
1198 }
1199
1200 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1201 zlog_debug("%s: nhe %p (%u) is new", __func__, nhe, nhe->id);
1202
1203 /*
1204 * If daemon nhg from the kernel, add a refcnt here to indicate the
1205 * daemon owns it.
1206 */
1207 if (PROTO_OWNED(nhe))
1208 zebra_nhg_increment_ref(nhe);
1209
1210 SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
1211 SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
1212
1213 return 0;
1214 }
1215
1216 static int nhg_ctx_process_del(struct nhg_ctx *ctx)
1217 {
1218 struct nhg_hash_entry *nhe = NULL;
1219 uint32_t id = nhg_ctx_get_id(ctx);
1220
1221 nhe = zebra_nhg_lookup_id(id);
1222
1223 if (!nhe) {
1224 flog_warn(
1225 EC_ZEBRA_BAD_NHG_MESSAGE,
1226 "Kernel delete message received for nexthop group ID (%u) that we do not have in our ID table",
1227 id);
1228 return -1;
1229 }
1230
1231 zebra_nhg_handle_kernel_state_change(nhe, true);
1232
1233 return 0;
1234 }
1235
1236 static void nhg_ctx_fini(struct nhg_ctx **ctx)
1237 {
1238 /*
1239 * Just freeing for now, maybe do something more in the future
1240 * based on flag.
1241 */
1242
1243 nhg_ctx_free(ctx);
1244 }
1245
1246 static int queue_add(struct nhg_ctx *ctx)
1247 {
1248 /* If its queued or already processed do nothing */
1249 if (nhg_ctx_get_status(ctx) == NHG_CTX_QUEUED)
1250 return 0;
1251
1252 if (rib_queue_nhg_ctx_add(ctx)) {
1253 nhg_ctx_set_status(ctx, NHG_CTX_FAILURE);
1254 return -1;
1255 }
1256
1257 nhg_ctx_set_status(ctx, NHG_CTX_QUEUED);
1258
1259 return 0;
1260 }
1261
1262 int nhg_ctx_process(struct nhg_ctx *ctx)
1263 {
1264 int ret = 0;
1265
1266 switch (nhg_ctx_get_op(ctx)) {
1267 case NHG_CTX_OP_NEW:
1268 ret = nhg_ctx_process_new(ctx);
1269 if (nhg_ctx_get_count(ctx) && ret == -ENOENT
1270 && nhg_ctx_get_status(ctx) != NHG_CTX_REQUEUED) {
1271 /**
1272 * We have entered a situation where we are
1273 * processing a group from the kernel
1274 * that has a contained nexthop which
1275 * we have not yet processed.
1276 *
1277 * Re-enqueue this ctx to be handled exactly one
1278 * more time (indicated by the flag).
1279 *
1280 * By the time we get back to it, we
1281 * should have processed its depends.
1282 */
1283 nhg_ctx_set_status(ctx, NHG_CTX_NONE);
1284 if (queue_add(ctx) == 0) {
1285 nhg_ctx_set_status(ctx, NHG_CTX_REQUEUED);
1286 return 0;
1287 }
1288 }
1289 break;
1290 case NHG_CTX_OP_DEL:
1291 ret = nhg_ctx_process_del(ctx);
1292 case NHG_CTX_OP_NONE:
1293 break;
1294 }
1295
1296 nhg_ctx_set_status(ctx, (ret ? NHG_CTX_FAILURE : NHG_CTX_SUCCESS));
1297
1298 nhg_ctx_fini(&ctx);
1299
1300 return ret;
1301 }
1302
1303 /* Kernel-side, you either get a single new nexthop or a array of ID's */
1304 int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, struct nh_grp *grp,
1305 uint8_t count, vrf_id_t vrf_id, afi_t afi, int type,
1306 int startup)
1307 {
1308 struct nhg_ctx *ctx = NULL;
1309
1310 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1311 zlog_debug("%s: nh %pNHv, id %u, count %d",
1312 __func__, nh, id, (int)count);
1313
1314 if (id > id_counter && id < ZEBRA_NHG_PROTO_LOWER)
1315 /* Increase our counter so we don't try to create
1316 * an ID that already exists
1317 */
1318 id_counter = id;
1319
1320 ctx = nhg_ctx_init(id, nh, grp, vrf_id, afi, type, count);
1321 nhg_ctx_set_op(ctx, NHG_CTX_OP_NEW);
1322
1323 /* Under statup conditions, we need to handle them immediately
1324 * like we do for routes. Otherwise, we are going to get a route
1325 * with a nhe_id that we have not handled.
1326 */
1327 if (startup)
1328 return nhg_ctx_process(ctx);
1329
1330 if (queue_add(ctx)) {
1331 nhg_ctx_fini(&ctx);
1332 return -1;
1333 }
1334
1335 return 0;
1336 }
1337
1338 /* Kernel-side, received delete message */
1339 int zebra_nhg_kernel_del(uint32_t id, vrf_id_t vrf_id)
1340 {
1341 struct nhg_ctx *ctx = NULL;
1342
1343 ctx = nhg_ctx_init(id, NULL, NULL, vrf_id, 0, 0, 0);
1344
1345 nhg_ctx_set_op(ctx, NHG_CTX_OP_DEL);
1346
1347 if (queue_add(ctx)) {
1348 nhg_ctx_fini(&ctx);
1349 return -1;
1350 }
1351
1352 return 0;
1353 }
1354
1355 /* Some dependency helper functions */
1356 static struct nhg_hash_entry *depends_find_recursive(const struct nexthop *nh,
1357 afi_t afi, int type)
1358 {
1359 struct nhg_hash_entry *nhe;
1360 struct nexthop *lookup = NULL;
1361
1362 lookup = nexthop_dup(nh, NULL);
1363
1364 nhe = zebra_nhg_find_nexthop(0, lookup, afi, type, false);
1365
1366 nexthops_free(lookup);
1367
1368 return nhe;
1369 }
1370
1371 static struct nhg_hash_entry *depends_find_singleton(const struct nexthop *nh,
1372 afi_t afi, int type,
1373 bool from_dplane)
1374 {
1375 struct nhg_hash_entry *nhe;
1376 struct nexthop lookup = {};
1377
1378 /* Capture a snapshot of this single nh; it might be part of a list,
1379 * so we need to make a standalone copy.
1380 */
1381 nexthop_copy_no_recurse(&lookup, nh, NULL);
1382
1383 nhe = zebra_nhg_find_nexthop(0, &lookup, afi, type, from_dplane);
1384
1385 /* The copy may have allocated labels; free them if necessary. */
1386 nexthop_del_labels(&lookup);
1387 nexthop_del_srv6_seg6local(&lookup);
1388 nexthop_del_srv6_seg6(&lookup);
1389
1390 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1391 zlog_debug("%s: nh %pNHv => %p (%u)",
1392 __func__, nh, nhe, nhe ? nhe->id : 0);
1393
1394 return nhe;
1395 }
1396
1397 static struct nhg_hash_entry *depends_find(const struct nexthop *nh, afi_t afi,
1398 int type, bool from_dplane)
1399 {
1400 struct nhg_hash_entry *nhe = NULL;
1401
1402 if (!nh)
1403 goto done;
1404
1405 /* We are separating these functions out to increase handling speed
1406 * in the non-recursive case (by not alloc/freeing)
1407 */
1408 if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE))
1409 nhe = depends_find_recursive(nh, afi, type);
1410 else
1411 nhe = depends_find_singleton(nh, afi, type, from_dplane);
1412
1413
1414 if (IS_ZEBRA_DEBUG_NHG_DETAIL) {
1415 zlog_debug("%s: nh %pNHv %s => %p (%u)", __func__, nh,
1416 CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE) ? "(R)"
1417 : "",
1418 nhe, nhe ? nhe->id : 0);
1419 }
1420
1421 done:
1422 return nhe;
1423 }
1424
1425 static void depends_add(struct nhg_connected_tree_head *head,
1426 struct nhg_hash_entry *depend)
1427 {
1428 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1429 zlog_debug("%s: head %p nh %pNHv",
1430 __func__, head, depend->nhg.nexthop);
1431
1432 /* If NULL is returned, it was successfully added and
1433 * needs to have its refcnt incremented.
1434 *
1435 * Else the NHE is already present in the tree and doesn't
1436 * need to increment the refcnt.
1437 */
1438 if (nhg_connected_tree_add_nhe(head, depend) == NULL)
1439 zebra_nhg_increment_ref(depend);
1440 }
1441
1442 static struct nhg_hash_entry *
1443 depends_find_add(struct nhg_connected_tree_head *head, struct nexthop *nh,
1444 afi_t afi, int type, bool from_dplane)
1445 {
1446 struct nhg_hash_entry *depend = NULL;
1447
1448 depend = depends_find(nh, afi, type, from_dplane);
1449
1450 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1451 zlog_debug("%s: nh %pNHv => %p",
1452 __func__, nh, depend);
1453
1454 if (depend)
1455 depends_add(head, depend);
1456
1457 return depend;
1458 }
1459
1460 static struct nhg_hash_entry *
1461 depends_find_id_add(struct nhg_connected_tree_head *head, uint32_t id)
1462 {
1463 struct nhg_hash_entry *depend = NULL;
1464
1465 depend = zebra_nhg_lookup_id(id);
1466
1467 if (depend)
1468 depends_add(head, depend);
1469
1470 return depend;
1471 }
1472
1473 static void depends_decrement_free(struct nhg_connected_tree_head *head)
1474 {
1475 nhg_connected_tree_decrement_ref(head);
1476 nhg_connected_tree_free(head);
1477 }
1478
1479 /* Find an nhe based on a list of nexthops */
1480 struct nhg_hash_entry *zebra_nhg_rib_find(uint32_t id,
1481 struct nexthop_group *nhg,
1482 afi_t rt_afi, int type)
1483 {
1484 struct nhg_hash_entry *nhe = NULL;
1485 vrf_id_t vrf_id;
1486
1487 /*
1488 * CLANG SA is complaining that nexthop may be NULL
1489 * Make it happy but this is ridonc
1490 */
1491 assert(nhg->nexthop);
1492 vrf_id = !vrf_is_backend_netns() ? VRF_DEFAULT : nhg->nexthop->vrf_id;
1493
1494 zebra_nhg_find(&nhe, id, nhg, NULL, vrf_id, rt_afi, type, false);
1495
1496 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1497 zlog_debug("%s: => nhe %p (%u)",
1498 __func__, nhe, nhe ? nhe->id : 0);
1499
1500 return nhe;
1501 }
1502
1503 /* Find an nhe based on a route's nhe */
1504 struct nhg_hash_entry *
1505 zebra_nhg_rib_find_nhe(struct nhg_hash_entry *rt_nhe, afi_t rt_afi)
1506 {
1507 struct nhg_hash_entry *nhe = NULL;
1508
1509 if (!(rt_nhe && rt_nhe->nhg.nexthop)) {
1510 flog_err(EC_ZEBRA_TABLE_LOOKUP_FAILED,
1511 "No nexthop passed to %s", __func__);
1512 return NULL;
1513 }
1514
1515 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1516 zlog_debug("%s: rt_nhe %p (%u)", __func__, rt_nhe, rt_nhe->id);
1517
1518 zebra_nhe_find(&nhe, rt_nhe, NULL, rt_afi, false);
1519
1520 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1521 zlog_debug("%s: => nhe %p (%u)",
1522 __func__, nhe, nhe ? nhe->id : 0);
1523
1524 return nhe;
1525 }
1526
1527 /*
1528 * Allocate backup nexthop info object. Typically these are embedded in
1529 * nhg_hash_entry objects.
1530 */
1531 struct nhg_backup_info *zebra_nhg_backup_alloc(void)
1532 {
1533 struct nhg_backup_info *p;
1534
1535 p = XCALLOC(MTYPE_NHG, sizeof(struct nhg_backup_info));
1536
1537 p->nhe = zebra_nhg_alloc();
1538
1539 /* Identify the embedded group used to hold the list of backups */
1540 SET_FLAG(p->nhe->flags, NEXTHOP_GROUP_BACKUP);
1541
1542 return p;
1543 }
1544
1545 /*
1546 * Free backup nexthop info object, deal with any embedded allocations
1547 */
1548 void zebra_nhg_backup_free(struct nhg_backup_info **p)
1549 {
1550 if (p && *p) {
1551 if ((*p)->nhe)
1552 zebra_nhg_free((*p)->nhe);
1553
1554 XFREE(MTYPE_NHG, (*p));
1555 }
1556 }
1557
1558 /* Accessor for backup nexthop group */
1559 struct nexthop_group *zebra_nhg_get_backup_nhg(struct nhg_hash_entry *nhe)
1560 {
1561 struct nexthop_group *p = NULL;
1562
1563 if (nhe) {
1564 if (nhe->backup_info && nhe->backup_info->nhe)
1565 p = &(nhe->backup_info->nhe->nhg);
1566 }
1567
1568 return p;
1569 }
1570
1571 /*
1572 * Helper to return a copy of a backup_info - note that this is a shallow
1573 * copy, meant to be used when creating a new nhe from info passed in with
1574 * a route e.g.
1575 */
1576 static struct nhg_backup_info *
1577 nhg_backup_copy(const struct nhg_backup_info *orig)
1578 {
1579 struct nhg_backup_info *b;
1580
1581 b = zebra_nhg_backup_alloc();
1582
1583 /* Copy list of nexthops */
1584 nexthop_group_copy(&(b->nhe->nhg), &(orig->nhe->nhg));
1585
1586 return b;
1587 }
1588
1589 static void zebra_nhg_free_members(struct nhg_hash_entry *nhe)
1590 {
1591 nexthops_free(nhe->nhg.nexthop);
1592
1593 zebra_nhg_backup_free(&nhe->backup_info);
1594
1595 /* Decrement to remove connection ref */
1596 nhg_connected_tree_decrement_ref(&nhe->nhg_depends);
1597 nhg_connected_tree_free(&nhe->nhg_depends);
1598 nhg_connected_tree_free(&nhe->nhg_dependents);
1599 }
1600
1601 void zebra_nhg_free(struct nhg_hash_entry *nhe)
1602 {
1603 if (IS_ZEBRA_DEBUG_NHG_DETAIL) {
1604 /* Group or singleton? */
1605 if (nhe->nhg.nexthop && nhe->nhg.nexthop->next)
1606 zlog_debug("%s: nhe %p (%u), refcnt %d",
1607 __func__, nhe, nhe->id, nhe->refcnt);
1608 else
1609 zlog_debug("%s: nhe %p (%u), refcnt %d, NH %pNHv",
1610 __func__, nhe, nhe->id, nhe->refcnt,
1611 nhe->nhg.nexthop);
1612 }
1613
1614 if (nhe->refcnt)
1615 zlog_debug("nhe_id=%u hash refcnt=%d", nhe->id, nhe->refcnt);
1616
1617 zebra_nhg_free_members(nhe);
1618
1619 XFREE(MTYPE_NHG, nhe);
1620 }
1621
1622 void zebra_nhg_hash_free(void *p)
1623 {
1624 zebra_nhg_release_all_deps((struct nhg_hash_entry *)p);
1625 zebra_nhg_free((struct nhg_hash_entry *)p);
1626 }
1627
1628 void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe)
1629 {
1630 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1631 zlog_debug("%s: nhe %p (%u) %d => %d",
1632 __func__, nhe, nhe->id, nhe->refcnt,
1633 nhe->refcnt - 1);
1634
1635 nhe->refcnt--;
1636
1637 if (!zebra_nhg_depends_is_empty(nhe))
1638 nhg_connected_tree_decrement_ref(&nhe->nhg_depends);
1639
1640 if (ZEBRA_NHG_CREATED(nhe) && nhe->refcnt <= 0)
1641 zebra_nhg_uninstall_kernel(nhe);
1642 }
1643
1644 void zebra_nhg_increment_ref(struct nhg_hash_entry *nhe)
1645 {
1646 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1647 zlog_debug("%s: nhe %p (%u) %d => %d",
1648 __func__, nhe, nhe->id, nhe->refcnt,
1649 nhe->refcnt + 1);
1650
1651 nhe->refcnt++;
1652
1653 if (!zebra_nhg_depends_is_empty(nhe))
1654 nhg_connected_tree_increment_ref(&nhe->nhg_depends);
1655 }
1656
1657 static struct nexthop *nexthop_set_resolved(afi_t afi,
1658 const struct nexthop *newhop,
1659 struct nexthop *nexthop,
1660 struct zebra_sr_policy *policy)
1661 {
1662 struct nexthop *resolved_hop;
1663 uint8_t num_labels = 0;
1664 mpls_label_t labels[MPLS_MAX_LABELS];
1665 enum lsp_types_t label_type = ZEBRA_LSP_NONE;
1666 int i = 0;
1667
1668 resolved_hop = nexthop_new();
1669 SET_FLAG(resolved_hop->flags, NEXTHOP_FLAG_ACTIVE);
1670
1671 resolved_hop->vrf_id = nexthop->vrf_id;
1672 switch (newhop->type) {
1673 case NEXTHOP_TYPE_IPV4:
1674 case NEXTHOP_TYPE_IPV4_IFINDEX:
1675 /* If the resolving route specifies a gateway, use it */
1676 resolved_hop->type = newhop->type;
1677 resolved_hop->gate.ipv4 = newhop->gate.ipv4;
1678
1679 if (newhop->ifindex) {
1680 resolved_hop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
1681 resolved_hop->ifindex = newhop->ifindex;
1682 }
1683 break;
1684 case NEXTHOP_TYPE_IPV6:
1685 case NEXTHOP_TYPE_IPV6_IFINDEX:
1686 resolved_hop->type = newhop->type;
1687 resolved_hop->gate.ipv6 = newhop->gate.ipv6;
1688
1689 if (newhop->ifindex) {
1690 resolved_hop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
1691 resolved_hop->ifindex = newhop->ifindex;
1692 }
1693 break;
1694 case NEXTHOP_TYPE_IFINDEX:
1695 /* If the resolving route is an interface route,
1696 * it means the gateway we are looking up is connected
1697 * to that interface. (The actual network is _not_ onlink).
1698 * Therefore, the resolved route should have the original
1699 * gateway as nexthop as it is directly connected.
1700 *
1701 * On Linux, we have to set the onlink netlink flag because
1702 * otherwise, the kernel won't accept the route.
1703 */
1704 resolved_hop->flags |= NEXTHOP_FLAG_ONLINK;
1705 if (afi == AFI_IP) {
1706 resolved_hop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
1707 resolved_hop->gate.ipv4 = nexthop->gate.ipv4;
1708 } else if (afi == AFI_IP6) {
1709 resolved_hop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
1710 resolved_hop->gate.ipv6 = nexthop->gate.ipv6;
1711 }
1712 resolved_hop->ifindex = newhop->ifindex;
1713 break;
1714 case NEXTHOP_TYPE_BLACKHOLE:
1715 resolved_hop->type = NEXTHOP_TYPE_BLACKHOLE;
1716 resolved_hop->bh_type = newhop->bh_type;
1717 break;
1718 }
1719
1720 if (newhop->flags & NEXTHOP_FLAG_ONLINK)
1721 resolved_hop->flags |= NEXTHOP_FLAG_ONLINK;
1722
1723 /* Copy labels of the resolved route and the parent resolving to it */
1724 if (policy) {
1725 int i = 0;
1726
1727 /*
1728 * Don't push the first SID if the corresponding action in the
1729 * LFIB is POP.
1730 */
1731 if (!newhop->nh_label || !newhop->nh_label->num_labels
1732 || newhop->nh_label->label[0] == MPLS_LABEL_IMPLICIT_NULL)
1733 i = 1;
1734
1735 for (; i < policy->segment_list.label_num; i++)
1736 labels[num_labels++] = policy->segment_list.labels[i];
1737 label_type = policy->segment_list.type;
1738 } else if (newhop->nh_label) {
1739 for (i = 0; i < newhop->nh_label->num_labels; i++) {
1740 /* Be a bit picky about overrunning the local array */
1741 if (num_labels >= MPLS_MAX_LABELS) {
1742 if (IS_ZEBRA_DEBUG_NHG || IS_ZEBRA_DEBUG_RIB)
1743 zlog_debug("%s: too many labels in newhop %pNHv",
1744 __func__, newhop);
1745 break;
1746 }
1747 labels[num_labels++] = newhop->nh_label->label[i];
1748 }
1749 /* Use the "outer" type */
1750 label_type = newhop->nh_label_type;
1751 }
1752
1753 if (nexthop->nh_label) {
1754 for (i = 0; i < nexthop->nh_label->num_labels; i++) {
1755 /* Be a bit picky about overrunning the local array */
1756 if (num_labels >= MPLS_MAX_LABELS) {
1757 if (IS_ZEBRA_DEBUG_NHG || IS_ZEBRA_DEBUG_RIB)
1758 zlog_debug("%s: too many labels in nexthop %pNHv",
1759 __func__, nexthop);
1760 break;
1761 }
1762 labels[num_labels++] = nexthop->nh_label->label[i];
1763 }
1764
1765 /* If the parent has labels, use its type if
1766 * we don't already have one.
1767 */
1768 if (label_type == ZEBRA_LSP_NONE)
1769 label_type = nexthop->nh_label_type;
1770 }
1771
1772 if (num_labels)
1773 nexthop_add_labels(resolved_hop, label_type, num_labels,
1774 labels);
1775
1776 if (nexthop->nh_srv6) {
1777 nexthop_add_srv6_seg6local(resolved_hop,
1778 nexthop->nh_srv6->seg6local_action,
1779 &nexthop->nh_srv6->seg6local_ctx);
1780 nexthop_add_srv6_seg6(resolved_hop,
1781 &nexthop->nh_srv6->seg6_segs);
1782 }
1783
1784 resolved_hop->rparent = nexthop;
1785 _nexthop_add(&nexthop->resolved, resolved_hop);
1786
1787 return resolved_hop;
1788 }
1789
1790 /* Checks if nexthop we are trying to resolve to is valid */
1791 static bool nexthop_valid_resolve(const struct nexthop *nexthop,
1792 const struct nexthop *resolved)
1793 {
1794 /* Can't resolve to a recursive nexthop */
1795 if (CHECK_FLAG(resolved->flags, NEXTHOP_FLAG_RECURSIVE))
1796 return false;
1797
1798 /* Must be ACTIVE */
1799 if (!CHECK_FLAG(resolved->flags, NEXTHOP_FLAG_ACTIVE))
1800 return false;
1801
1802 /* Must not be duplicate */
1803 if (CHECK_FLAG(resolved->flags, NEXTHOP_FLAG_DUPLICATE))
1804 return false;
1805
1806 switch (nexthop->type) {
1807 case NEXTHOP_TYPE_IPV4_IFINDEX:
1808 case NEXTHOP_TYPE_IPV6_IFINDEX:
1809 /* If the nexthop we are resolving to does not match the
1810 * ifindex for the nexthop the route wanted, its not valid.
1811 */
1812 if (nexthop->ifindex != resolved->ifindex)
1813 return false;
1814 break;
1815 case NEXTHOP_TYPE_IPV4:
1816 case NEXTHOP_TYPE_IPV6:
1817 case NEXTHOP_TYPE_IFINDEX:
1818 case NEXTHOP_TYPE_BLACKHOLE:
1819 break;
1820 }
1821
1822 return true;
1823 }
1824
1825 /*
1826 * When resolving a recursive nexthop, capture backup nexthop(s) also
1827 * so they can be conveyed through the dataplane to the FIB. We'll look
1828 * at the backups in the resolving nh 'nexthop' and its nhe, and copy them
1829 * into the route's resolved nh 'resolved' and its nhe 'nhe'.
1830 */
1831 static int resolve_backup_nexthops(const struct nexthop *nexthop,
1832 const struct nhg_hash_entry *nhe,
1833 struct nexthop *resolved,
1834 struct nhg_hash_entry *resolve_nhe,
1835 struct backup_nh_map_s *map)
1836 {
1837 int i, j, idx;
1838 const struct nexthop *bnh;
1839 struct nexthop *nh, *newnh;
1840 mpls_label_t labels[MPLS_MAX_LABELS];
1841 uint8_t num_labels;
1842
1843 assert(nexthop->backup_num <= NEXTHOP_MAX_BACKUPS);
1844
1845 /* Locate backups from the original nexthop's backup index and nhe */
1846 for (i = 0; i < nexthop->backup_num; i++) {
1847 idx = nexthop->backup_idx[i];
1848
1849 /* Do we already know about this particular backup? */
1850 for (j = 0; j < map->map_count; j++) {
1851 if (map->map[j].orig_idx == idx)
1852 break;
1853 }
1854
1855 if (j < map->map_count) {
1856 resolved->backup_idx[resolved->backup_num] =
1857 map->map[j].new_idx;
1858 resolved->backup_num++;
1859
1860 SET_FLAG(resolved->flags, NEXTHOP_FLAG_HAS_BACKUP);
1861
1862 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
1863 zlog_debug("%s: found map idx orig %d, new %d",
1864 __func__, map->map[j].orig_idx,
1865 map->map[j].new_idx);
1866
1867 continue;
1868 }
1869
1870 /* We can't handle any new map entries at this point. */
1871 if (map->map_count == MULTIPATH_NUM)
1872 break;
1873
1874 /* Need to create/copy a new backup */
1875 bnh = nhe->backup_info->nhe->nhg.nexthop;
1876 for (j = 0; j < idx; j++) {
1877 if (bnh == NULL)
1878 break;
1879 bnh = bnh->next;
1880 }
1881
1882 /* Whoops - bad index in the nexthop? */
1883 if (bnh == NULL)
1884 continue;
1885
1886 if (resolve_nhe->backup_info == NULL)
1887 resolve_nhe->backup_info = zebra_nhg_backup_alloc();
1888
1889 /* Update backup info in the resolving nexthop and its nhe */
1890 newnh = nexthop_dup_no_recurse(bnh, NULL);
1891
1892 /* We may need some special handling for mpls labels: the new
1893 * backup needs to carry the recursive nexthop's labels,
1894 * if any: they may be vrf labels e.g.
1895 * The original/inner labels are in the stack of 'resolve_nhe',
1896 * if that is longer than the stack in 'nexthop'.
1897 */
1898 if (newnh->nh_label && resolved->nh_label &&
1899 nexthop->nh_label) {
1900 if (resolved->nh_label->num_labels >
1901 nexthop->nh_label->num_labels) {
1902 /* Prepare new label stack */
1903 num_labels = 0;
1904 for (j = 0; j < newnh->nh_label->num_labels;
1905 j++) {
1906 labels[j] = newnh->nh_label->label[j];
1907 num_labels++;
1908 }
1909
1910 /* Include inner labels */
1911 for (j = nexthop->nh_label->num_labels;
1912 j < resolved->nh_label->num_labels;
1913 j++) {
1914 labels[num_labels] =
1915 resolved->nh_label->label[j];
1916 num_labels++;
1917 }
1918
1919 /* Replace existing label stack in the backup */
1920 nexthop_del_labels(newnh);
1921 nexthop_add_labels(newnh, bnh->nh_label_type,
1922 num_labels, labels);
1923 }
1924 }
1925
1926 /* Need to compute the new backup index in the new
1927 * backup list, and add to map struct.
1928 */
1929 j = 0;
1930 nh = resolve_nhe->backup_info->nhe->nhg.nexthop;
1931 if (nh) {
1932 while (nh->next) {
1933 nh = nh->next;
1934 j++;
1935 }
1936
1937 nh->next = newnh;
1938 j++;
1939
1940 } else /* First one */
1941 resolve_nhe->backup_info->nhe->nhg.nexthop = newnh;
1942
1943 /* Capture index */
1944 resolved->backup_idx[resolved->backup_num] = j;
1945 resolved->backup_num++;
1946
1947 SET_FLAG(resolved->flags, NEXTHOP_FLAG_HAS_BACKUP);
1948
1949 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
1950 zlog_debug("%s: added idx orig %d, new %d",
1951 __func__, idx, j);
1952
1953 /* Update map/cache */
1954 map->map[map->map_count].orig_idx = idx;
1955 map->map[map->map_count].new_idx = j;
1956 map->map_count++;
1957 }
1958
1959 return 0;
1960 }
1961
1962 /*
1963 * Given a nexthop we need to properly recursively resolve,
1964 * do a table lookup to find and match if at all possible.
1965 * Set the nexthop->ifindex and resolution info as appropriate.
1966 */
1967 static int nexthop_active(struct nexthop *nexthop, struct nhg_hash_entry *nhe,
1968 const struct prefix *top, int type, uint32_t flags,
1969 uint32_t *pmtu, vrf_id_t vrf_id)
1970 {
1971 struct prefix p;
1972 struct route_table *table;
1973 struct route_node *rn;
1974 struct route_entry *match = NULL;
1975 int resolved;
1976 struct zebra_nhlfe *nhlfe;
1977 struct nexthop *newhop;
1978 struct interface *ifp;
1979 rib_dest_t *dest;
1980 struct zebra_vrf *zvrf;
1981 struct in_addr local_ipv4;
1982 struct in_addr *ipv4;
1983 afi_t afi = AFI_IP;
1984
1985 /* Reset some nexthop attributes that we'll recompute if necessary */
1986 if ((nexthop->type == NEXTHOP_TYPE_IPV4)
1987 || (nexthop->type == NEXTHOP_TYPE_IPV6))
1988 nexthop->ifindex = 0;
1989
1990 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE);
1991 nexthops_free(nexthop->resolved);
1992 nexthop->resolved = NULL;
1993
1994 /*
1995 * Set afi based on nexthop type.
1996 * Some nexthop types get special handling, possibly skipping
1997 * the normal processing.
1998 */
1999 switch (nexthop->type) {
2000 case NEXTHOP_TYPE_IFINDEX:
2001
2002 ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id);
2003 /*
2004 * If the interface exists and its operative or its a kernel
2005 * route and interface is up, its active. We trust kernel routes
2006 * to be good.
2007 */
2008 if (ifp
2009 && (if_is_operative(ifp)
2010 || (if_is_up(ifp)
2011 && (type == ZEBRA_ROUTE_KERNEL
2012 || type == ZEBRA_ROUTE_SYSTEM))))
2013 return 1;
2014 else
2015 return 0;
2016 break;
2017
2018 case NEXTHOP_TYPE_IPV6_IFINDEX:
2019 afi = AFI_IP6;
2020
2021 if (IN6_IS_ADDR_LINKLOCAL(&nexthop->gate.ipv6)) {
2022 ifp = if_lookup_by_index(nexthop->ifindex,
2023 nexthop->vrf_id);
2024 if (ifp && if_is_operative(ifp))
2025 return 1;
2026 else
2027 return 0;
2028 }
2029 break;
2030
2031 case NEXTHOP_TYPE_IPV4:
2032 case NEXTHOP_TYPE_IPV4_IFINDEX:
2033 afi = AFI_IP;
2034 break;
2035 case NEXTHOP_TYPE_IPV6:
2036 afi = AFI_IP6;
2037 break;
2038
2039 case NEXTHOP_TYPE_BLACKHOLE:
2040 return 1;
2041 }
2042
2043 /*
2044 * If the nexthop has been marked as 'onlink' we just need to make
2045 * sure the nexthop's interface is known and is operational.
2046 */
2047 if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) {
2048 ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id);
2049 if (!ifp) {
2050 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2051 zlog_debug("nexthop %pNHv marked onlink but nhif %u doesn't exist",
2052 nexthop, nexthop->ifindex);
2053 return 0;
2054 }
2055 if (!if_is_operative(ifp)) {
2056 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2057 zlog_debug("nexthop %pNHv marked onlink but nhif %s is not operational",
2058 nexthop, ifp->name);
2059 return 0;
2060 }
2061 return 1;
2062 }
2063
2064 if (top &&
2065 ((top->family == AF_INET && top->prefixlen == IPV4_MAX_BITLEN &&
2066 nexthop->gate.ipv4.s_addr == top->u.prefix4.s_addr) ||
2067 (top->family == AF_INET6 && top->prefixlen == IPV6_MAX_BITLEN &&
2068 memcmp(&nexthop->gate.ipv6, &top->u.prefix6, IPV6_MAX_BYTELEN) ==
2069 0)) &&
2070 nexthop->vrf_id == vrf_id) {
2071 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2072 zlog_debug(
2073 " :%s: Attempting to install a max prefixlength route through itself",
2074 __func__);
2075 return 0;
2076 }
2077
2078 /* Validation for ipv4 mapped ipv6 nexthop. */
2079 if (IS_MAPPED_IPV6(&nexthop->gate.ipv6)) {
2080 afi = AFI_IP;
2081 ipv4 = &local_ipv4;
2082 ipv4_mapped_ipv6_to_ipv4(&nexthop->gate.ipv6, ipv4);
2083 } else {
2084 ipv4 = &nexthop->gate.ipv4;
2085 }
2086
2087 /* Processing for nexthops with SR 'color' attribute, using
2088 * the corresponding SR policy object.
2089 */
2090 if (nexthop->srte_color) {
2091 struct ipaddr endpoint = {0};
2092 struct zebra_sr_policy *policy;
2093
2094 switch (afi) {
2095 case AFI_IP:
2096 endpoint.ipa_type = IPADDR_V4;
2097 endpoint.ipaddr_v4 = *ipv4;
2098 break;
2099 case AFI_IP6:
2100 endpoint.ipa_type = IPADDR_V6;
2101 endpoint.ipaddr_v6 = nexthop->gate.ipv6;
2102 break;
2103 default:
2104 flog_err(EC_LIB_DEVELOPMENT,
2105 "%s: unknown address-family: %u", __func__,
2106 afi);
2107 exit(1);
2108 }
2109
2110 policy = zebra_sr_policy_find(nexthop->srte_color, &endpoint);
2111 if (policy && policy->status == ZEBRA_SR_POLICY_UP) {
2112 resolved = 0;
2113 frr_each_safe (nhlfe_list, &policy->lsp->nhlfe_list,
2114 nhlfe) {
2115 if (!CHECK_FLAG(nhlfe->flags,
2116 NHLFE_FLAG_SELECTED)
2117 || CHECK_FLAG(nhlfe->flags,
2118 NHLFE_FLAG_DELETED))
2119 continue;
2120 SET_FLAG(nexthop->flags,
2121 NEXTHOP_FLAG_RECURSIVE);
2122 nexthop_set_resolved(afi, nhlfe->nexthop,
2123 nexthop, policy);
2124 resolved = 1;
2125 }
2126 if (resolved)
2127 return 1;
2128 }
2129 }
2130
2131 /* Make lookup prefix. */
2132 memset(&p, 0, sizeof(struct prefix));
2133 switch (afi) {
2134 case AFI_IP:
2135 p.family = AF_INET;
2136 p.prefixlen = IPV4_MAX_BITLEN;
2137 p.u.prefix4 = *ipv4;
2138 break;
2139 case AFI_IP6:
2140 p.family = AF_INET6;
2141 p.prefixlen = IPV6_MAX_BITLEN;
2142 p.u.prefix6 = nexthop->gate.ipv6;
2143 break;
2144 default:
2145 assert(afi != AFI_IP && afi != AFI_IP6);
2146 break;
2147 }
2148 /* Lookup table. */
2149 table = zebra_vrf_table(afi, SAFI_UNICAST, nexthop->vrf_id);
2150 /* get zvrf */
2151 zvrf = zebra_vrf_lookup_by_id(nexthop->vrf_id);
2152 if (!table || !zvrf) {
2153 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2154 zlog_debug(" %s: Table not found", __func__);
2155 return 0;
2156 }
2157
2158 rn = route_node_match(table, (struct prefix *)&p);
2159 while (rn) {
2160 route_unlock_node(rn);
2161
2162 /* Lookup should halt if we've matched against ourselves ('top',
2163 * if specified) - i.e., we cannot have a nexthop NH1 is
2164 * resolved by a route NH1. The exception is if the route is a
2165 * host route.
2166 */
2167 if (prefix_same(&rn->p, top))
2168 if (((afi == AFI_IP)
2169 && (rn->p.prefixlen != IPV4_MAX_BITLEN))
2170 || ((afi == AFI_IP6)
2171 && (rn->p.prefixlen != IPV6_MAX_BITLEN))) {
2172 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2173 zlog_debug(
2174 " %s: Matched against ourself and prefix length is not max bit length",
2175 __func__);
2176 return 0;
2177 }
2178
2179 /* Pick up selected route. */
2180 /* However, do not resolve over default route unless explicitly
2181 * allowed.
2182 */
2183 if (is_default_prefix(&rn->p)
2184 && !rnh_resolve_via_default(zvrf, p.family)) {
2185 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2186 zlog_debug(
2187 " :%s: Resolved against default route",
2188 __func__);
2189 return 0;
2190 }
2191
2192 dest = rib_dest_from_rnode(rn);
2193 if (dest && dest->selected_fib
2194 && !CHECK_FLAG(dest->selected_fib->status,
2195 ROUTE_ENTRY_REMOVED)
2196 && dest->selected_fib->type != ZEBRA_ROUTE_TABLE)
2197 match = dest->selected_fib;
2198
2199 /* If there is no selected route or matched route is EGP, go up
2200 * tree.
2201 */
2202 if (!match) {
2203 do {
2204 rn = rn->parent;
2205 } while (rn && rn->info == NULL);
2206 if (rn)
2207 route_lock_node(rn);
2208
2209 continue;
2210 }
2211
2212 if (match->type == ZEBRA_ROUTE_CONNECT) {
2213 /* Directly point connected route. */
2214 newhop = match->nhe->nhg.nexthop;
2215 if (newhop) {
2216 if (nexthop->type == NEXTHOP_TYPE_IPV4
2217 || nexthop->type == NEXTHOP_TYPE_IPV6)
2218 nexthop->ifindex = newhop->ifindex;
2219 else if (nexthop->ifindex != newhop->ifindex) {
2220 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2221 zlog_debug(
2222 "%s: %pNHv given ifindex does not match nexthops ifindex found found: %pNHv",
2223 __func__, nexthop,
2224 newhop);
2225 /*
2226 * NEXTHOP_TYPE_*_IFINDEX but ifindex
2227 * doesn't match what we found.
2228 */
2229 return 0;
2230 }
2231 }
2232
2233 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
2234 zlog_debug("%s: CONNECT match %p (%u), newhop %pNHv",
2235 __func__, match,
2236 match->nhe->id, newhop);
2237
2238 return 1;
2239 } else if (CHECK_FLAG(flags, ZEBRA_FLAG_ALLOW_RECURSION)) {
2240 struct nexthop_group *nhg;
2241 struct nexthop *resolver;
2242 struct backup_nh_map_s map = {};
2243
2244 resolved = 0;
2245
2246 /* Only useful if installed */
2247 if (!CHECK_FLAG(match->status, ROUTE_ENTRY_INSTALLED)) {
2248 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2249 zlog_debug("%s: match %p (%u) not installed",
2250 __func__, match,
2251 match->nhe->id);
2252
2253 goto done_with_match;
2254 }
2255
2256 /* Examine installed nexthops; note that there
2257 * may not be any installed primary nexthops if
2258 * only backups are installed.
2259 */
2260 nhg = rib_get_fib_nhg(match);
2261 for (ALL_NEXTHOPS_PTR(nhg, newhop)) {
2262 if (!nexthop_valid_resolve(nexthop, newhop))
2263 continue;
2264
2265 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
2266 zlog_debug("%s: RECURSIVE match %p (%u), newhop %pNHv",
2267 __func__, match,
2268 match->nhe->id, newhop);
2269
2270 SET_FLAG(nexthop->flags,
2271 NEXTHOP_FLAG_RECURSIVE);
2272 resolver = nexthop_set_resolved(afi, newhop,
2273 nexthop, NULL);
2274 resolved = 1;
2275
2276 /* If there are backup nexthops, capture
2277 * that info with the resolving nexthop.
2278 */
2279 if (resolver && newhop->backup_num > 0) {
2280 resolve_backup_nexthops(newhop,
2281 match->nhe,
2282 resolver, nhe,
2283 &map);
2284 }
2285 }
2286
2287 /* Examine installed backup nexthops, if any. There
2288 * are only installed backups *if* there is a
2289 * dedicated fib list. The UI can also control use
2290 * of backups for resolution.
2291 */
2292 nhg = rib_get_fib_backup_nhg(match);
2293 if (!use_recursive_backups ||
2294 nhg == NULL || nhg->nexthop == NULL)
2295 goto done_with_match;
2296
2297 for (ALL_NEXTHOPS_PTR(nhg, newhop)) {
2298 if (!nexthop_valid_resolve(nexthop, newhop))
2299 continue;
2300
2301 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
2302 zlog_debug("%s: RECURSIVE match backup %p (%u), newhop %pNHv",
2303 __func__, match,
2304 match->nhe->id, newhop);
2305
2306 SET_FLAG(nexthop->flags,
2307 NEXTHOP_FLAG_RECURSIVE);
2308 nexthop_set_resolved(afi, newhop, nexthop,
2309 NULL);
2310 resolved = 1;
2311 }
2312
2313 done_with_match:
2314 /* Capture resolving mtu */
2315 if (resolved) {
2316 if (pmtu)
2317 *pmtu = match->mtu;
2318
2319 } else if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2320 zlog_debug(
2321 " %s: Recursion failed to find",
2322 __func__);
2323
2324 return resolved;
2325 } else {
2326 if (IS_ZEBRA_DEBUG_RIB_DETAILED) {
2327 zlog_debug(
2328 " %s: Route Type %s has not turned on recursion",
2329 __func__, zebra_route_string(type));
2330 if (type == ZEBRA_ROUTE_BGP
2331 && !CHECK_FLAG(flags, ZEBRA_FLAG_IBGP))
2332 zlog_debug(
2333 " EBGP: see \"disable-ebgp-connected-route-check\" or \"disable-connected-check\"");
2334 }
2335 return 0;
2336 }
2337 }
2338 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2339 zlog_debug(" %s: Nexthop did not lookup in table",
2340 __func__);
2341 return 0;
2342 }
2343
2344 /* This function verifies reachability of one given nexthop, which can be
2345 * numbered or unnumbered, IPv4 or IPv6. The result is unconditionally stored
2346 * in nexthop->flags field. The nexthop->ifindex will be updated
2347 * appropriately as well.
2348 *
2349 * An existing route map can turn an otherwise active nexthop into inactive,
2350 * but not vice versa.
2351 *
2352 * The return value is the final value of 'ACTIVE' flag.
2353 */
2354 static unsigned nexthop_active_check(struct route_node *rn,
2355 struct route_entry *re,
2356 struct nexthop *nexthop,
2357 struct nhg_hash_entry *nhe)
2358 {
2359 route_map_result_t ret = RMAP_PERMITMATCH;
2360 afi_t family;
2361 const struct prefix *p, *src_p;
2362 struct zebra_vrf *zvrf;
2363 uint32_t mtu = 0;
2364 vrf_id_t vrf_id;
2365
2366 srcdest_rnode_prefixes(rn, &p, &src_p);
2367
2368 if (rn->p.family == AF_INET)
2369 family = AFI_IP;
2370 else if (rn->p.family == AF_INET6)
2371 family = AFI_IP6;
2372 else
2373 family = AF_UNSPEC;
2374
2375 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
2376 zlog_debug("%s: re %p, nexthop %pNHv", __func__, re, nexthop);
2377
2378 /*
2379 * If the kernel has sent us a NEW route, then
2380 * by golly gee whiz it's a good route.
2381 *
2382 * If its an already INSTALLED route we have already handled, then the
2383 * kernel route's nexthop might have became unreachable
2384 * and we have to handle that.
2385 */
2386 if (!CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED) &&
2387 (re->type == ZEBRA_ROUTE_KERNEL ||
2388 re->type == ZEBRA_ROUTE_SYSTEM)) {
2389 SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2390 goto skip_check;
2391 }
2392
2393
2394 vrf_id = zvrf_id(rib_dest_vrf(rib_dest_from_rnode(rn)));
2395 switch (nexthop->type) {
2396 case NEXTHOP_TYPE_IFINDEX:
2397 if (nexthop_active(nexthop, nhe, &rn->p, re->type, re->flags,
2398 &mtu, vrf_id))
2399 SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2400 else
2401 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2402 break;
2403 case NEXTHOP_TYPE_IPV4:
2404 case NEXTHOP_TYPE_IPV4_IFINDEX:
2405 family = AFI_IP;
2406 if (nexthop_active(nexthop, nhe, &rn->p, re->type, re->flags,
2407 &mtu, vrf_id))
2408 SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2409 else
2410 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2411 break;
2412 case NEXTHOP_TYPE_IPV6:
2413 family = AFI_IP6;
2414 if (nexthop_active(nexthop, nhe, &rn->p, re->type, re->flags,
2415 &mtu, vrf_id))
2416 SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2417 else
2418 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2419 break;
2420 case NEXTHOP_TYPE_IPV6_IFINDEX:
2421 /* RFC 5549, v4 prefix with v6 NH */
2422 if (rn->p.family != AF_INET)
2423 family = AFI_IP6;
2424
2425 if (nexthop_active(nexthop, nhe, &rn->p, re->type, re->flags,
2426 &mtu, vrf_id))
2427 SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2428 else
2429 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2430 break;
2431 case NEXTHOP_TYPE_BLACKHOLE:
2432 SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2433 break;
2434 default:
2435 break;
2436 }
2437
2438 skip_check:
2439
2440 if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) {
2441 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2442 zlog_debug(" %s: Unable to find active nexthop",
2443 __func__);
2444 return 0;
2445 }
2446
2447 /* Capture recursive nexthop mtu.
2448 * TODO -- the code used to just reset the re's value to zero
2449 * for each nexthop, and then jam any resolving route's mtu value in,
2450 * whether or not that was zero, or lt/gt any existing value? The
2451 * way this is used appears to be as a floor value, so let's try
2452 * using it that way here.
2453 */
2454 if (mtu > 0) {
2455 if (re->nexthop_mtu == 0 || re->nexthop_mtu > mtu)
2456 re->nexthop_mtu = mtu;
2457 }
2458
2459 /* XXX: What exactly do those checks do? Do we support
2460 * e.g. IPv4 routes with IPv6 nexthops or vice versa?
2461 */
2462 if (RIB_SYSTEM_ROUTE(re) || (family == AFI_IP && p->family != AF_INET)
2463 || (family == AFI_IP6 && p->family != AF_INET6))
2464 return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2465
2466 /* The original code didn't determine the family correctly
2467 * e.g. for NEXTHOP_TYPE_IFINDEX. Retrieve the correct afi
2468 * from the rib_table_info in those cases.
2469 * Possibly it may be better to use only the rib_table_info
2470 * in every case.
2471 */
2472 if (family == 0) {
2473 struct rib_table_info *info;
2474
2475 info = srcdest_rnode_table_info(rn);
2476 family = info->afi;
2477 }
2478
2479 memset(&nexthop->rmap_src.ipv6, 0, sizeof(union g_addr));
2480
2481 zvrf = zebra_vrf_lookup_by_id(re->vrf_id);
2482 if (!zvrf) {
2483 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2484 zlog_debug(" %s: zvrf is NULL", __func__);
2485 return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2486 }
2487
2488 /* It'll get set if required inside */
2489 ret = zebra_route_map_check(family, re->type, re->instance, p, nexthop,
2490 zvrf, re->tag);
2491 if (ret == RMAP_DENYMATCH) {
2492 if (IS_ZEBRA_DEBUG_RIB) {
2493 zlog_debug(
2494 "%u:%pRN: Filtering out with NH out %s due to route map",
2495 re->vrf_id, rn,
2496 ifindex2ifname(nexthop->ifindex,
2497 nexthop->vrf_id));
2498 }
2499 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2500 }
2501 return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2502 }
2503
2504 /* Helper function called after resolution to walk nhg rb trees
2505 * and toggle the NEXTHOP_GROUP_VALID flag if the nexthop
2506 * is active on singleton NHEs.
2507 */
2508 static bool zebra_nhg_set_valid_if_active(struct nhg_hash_entry *nhe)
2509 {
2510 struct nhg_connected *rb_node_dep = NULL;
2511 bool valid = false;
2512
2513 if (!zebra_nhg_depends_is_empty(nhe)) {
2514 /* Is at least one depend valid? */
2515 frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) {
2516 if (zebra_nhg_set_valid_if_active(rb_node_dep->nhe))
2517 valid = true;
2518 }
2519
2520 goto done;
2521 }
2522
2523 /* should be fully resolved singleton at this point */
2524 if (CHECK_FLAG(nhe->nhg.nexthop->flags, NEXTHOP_FLAG_ACTIVE))
2525 valid = true;
2526
2527 done:
2528 if (valid)
2529 SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
2530
2531 return valid;
2532 }
2533
2534 /*
2535 * Process a list of nexthops, given an nhe, determining
2536 * whether each one is ACTIVE/installable at this time.
2537 */
2538 static uint32_t nexthop_list_active_update(struct route_node *rn,
2539 struct route_entry *re,
2540 struct nhg_hash_entry *nhe,
2541 bool is_backup)
2542 {
2543 union g_addr prev_src;
2544 unsigned int prev_active, new_active;
2545 ifindex_t prev_index;
2546 uint32_t counter = 0;
2547 struct nexthop *nexthop;
2548 struct nexthop_group *nhg = &nhe->nhg;
2549
2550 nexthop = nhg->nexthop;
2551
2552 /* Init recursive nh mtu */
2553 re->nexthop_mtu = 0;
2554
2555 /* Process nexthops one-by-one */
2556 for ( ; nexthop; nexthop = nexthop->next) {
2557
2558 /* No protocol daemon provides src and so we're skipping
2559 * tracking it
2560 */
2561 prev_src = nexthop->rmap_src;
2562 prev_active = CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2563 prev_index = nexthop->ifindex;
2564
2565 /* Include the containing nhe for primary nexthops: if there's
2566 * recursive resolution, we capture the backup info also.
2567 */
2568 new_active =
2569 nexthop_active_check(rn, re, nexthop,
2570 (is_backup ? NULL : nhe));
2571
2572 /*
2573 * We need to respect the multipath_num here
2574 * as that what we should be able to install from
2575 * a multipath perspective should not be a data plane
2576 * decision point.
2577 */
2578 if (new_active && counter >= zrouter.multipath_num) {
2579 struct nexthop *nh;
2580
2581 /* Set it and its resolved nexthop as inactive. */
2582 for (nh = nexthop; nh; nh = nh->resolved)
2583 UNSET_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE);
2584
2585 new_active = 0;
2586 }
2587
2588 if (new_active)
2589 counter++;
2590
2591 /* Check for changes to the nexthop - set ROUTE_ENTRY_CHANGED */
2592 if (prev_active != new_active || prev_index != nexthop->ifindex
2593 || ((nexthop->type >= NEXTHOP_TYPE_IFINDEX
2594 && nexthop->type < NEXTHOP_TYPE_IPV6)
2595 && prev_src.ipv4.s_addr
2596 != nexthop->rmap_src.ipv4.s_addr)
2597 || ((nexthop->type >= NEXTHOP_TYPE_IPV6
2598 && nexthop->type < NEXTHOP_TYPE_BLACKHOLE)
2599 && !(IPV6_ADDR_SAME(&prev_src.ipv6,
2600 &nexthop->rmap_src.ipv6)))
2601 || CHECK_FLAG(re->status, ROUTE_ENTRY_LABELS_CHANGED))
2602 SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
2603 }
2604
2605 return counter;
2606 }
2607
2608
2609 static uint32_t proto_nhg_nexthop_active_update(struct nexthop_group *nhg)
2610 {
2611 struct nexthop *nh;
2612 uint32_t curr_active = 0;
2613
2614 /* Assume all active for now */
2615
2616 for (nh = nhg->nexthop; nh; nh = nh->next) {
2617 SET_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE);
2618 curr_active++;
2619 }
2620
2621 return curr_active;
2622 }
2623
2624 /*
2625 * Iterate over all nexthops of the given RIB entry and refresh their
2626 * ACTIVE flag. If any nexthop is found to toggle the ACTIVE flag,
2627 * the whole re structure is flagged with ROUTE_ENTRY_CHANGED.
2628 *
2629 * Return value is the new number of active nexthops.
2630 */
2631 int nexthop_active_update(struct route_node *rn, struct route_entry *re)
2632 {
2633 struct nhg_hash_entry *curr_nhe;
2634 uint32_t curr_active = 0, backup_active = 0;
2635
2636 if (PROTO_OWNED(re->nhe))
2637 return proto_nhg_nexthop_active_update(&re->nhe->nhg);
2638
2639 afi_t rt_afi = family2afi(rn->p.family);
2640
2641 UNSET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
2642
2643 /* Make a local copy of the existing nhe, so we don't work on/modify
2644 * the shared nhe.
2645 */
2646 curr_nhe = zebra_nhe_copy(re->nhe, re->nhe->id);
2647
2648 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
2649 zlog_debug("%s: re %p nhe %p (%u), curr_nhe %p",
2650 __func__, re, re->nhe, re->nhe->id,
2651 curr_nhe);
2652
2653 /* Clear the existing id, if any: this will avoid any confusion
2654 * if the id exists, and will also force the creation
2655 * of a new nhe reflecting the changes we may make in this local copy.
2656 */
2657 curr_nhe->id = 0;
2658
2659 /* Process nexthops */
2660 curr_active = nexthop_list_active_update(rn, re, curr_nhe, false);
2661
2662 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
2663 zlog_debug("%s: re %p curr_active %u", __func__, re,
2664 curr_active);
2665
2666 /* If there are no backup nexthops, we are done */
2667 if (zebra_nhg_get_backup_nhg(curr_nhe) == NULL)
2668 goto backups_done;
2669
2670 backup_active = nexthop_list_active_update(
2671 rn, re, curr_nhe->backup_info->nhe, true /*is_backup*/);
2672
2673 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
2674 zlog_debug("%s: re %p backup_active %u", __func__, re,
2675 backup_active);
2676
2677 backups_done:
2678
2679 /*
2680 * Ref or create an nhe that matches the current state of the
2681 * nexthop(s).
2682 */
2683 if (CHECK_FLAG(re->status, ROUTE_ENTRY_CHANGED)) {
2684 struct nhg_hash_entry *new_nhe = NULL;
2685
2686 new_nhe = zebra_nhg_rib_find_nhe(curr_nhe, rt_afi);
2687
2688 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
2689 zlog_debug("%s: re %p CHANGED: nhe %p (%u) => new_nhe %p (%u)",
2690 __func__, re, re->nhe,
2691 re->nhe->id, new_nhe, new_nhe->id);
2692
2693 route_entry_update_nhe(re, new_nhe);
2694 }
2695
2696
2697 /* Walk the NHE depends tree and toggle NEXTHOP_GROUP_VALID
2698 * flag where appropriate.
2699 */
2700 if (curr_active)
2701 zebra_nhg_set_valid_if_active(re->nhe);
2702
2703 /*
2704 * Do not need the old / copied nhe anymore since it
2705 * was either copied over into a new nhe or not
2706 * used at all.
2707 */
2708 zebra_nhg_free(curr_nhe);
2709 return curr_active;
2710 }
2711
2712 /* Recursively construct a grp array of fully resolved IDs.
2713 *
2714 * This function allows us to account for groups within groups,
2715 * by converting them into a flat array of IDs.
2716 *
2717 * nh_grp is modified at every level of recursion to append
2718 * to it the next unique, fully resolved ID from the entire tree.
2719 *
2720 *
2721 * Note:
2722 * I'm pretty sure we only allow ONE level of group within group currently.
2723 * But making this recursive just in case that ever changes.
2724 */
2725 static uint8_t zebra_nhg_nhe2grp_internal(struct nh_grp *grp,
2726 uint8_t curr_index,
2727 struct nhg_hash_entry *nhe,
2728 int max_num)
2729 {
2730 struct nhg_connected *rb_node_dep = NULL;
2731 struct nhg_hash_entry *depend = NULL;
2732 uint8_t i = curr_index;
2733
2734 frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) {
2735 bool duplicate = false;
2736
2737 if (i >= max_num)
2738 goto done;
2739
2740 depend = rb_node_dep->nhe;
2741
2742 /*
2743 * If its recursive, use its resolved nhe in the group
2744 */
2745 if (CHECK_FLAG(depend->flags, NEXTHOP_GROUP_RECURSIVE)) {
2746 depend = zebra_nhg_resolve(depend);
2747 if (!depend) {
2748 flog_err(
2749 EC_ZEBRA_NHG_FIB_UPDATE,
2750 "Failed to recursively resolve Nexthop Hash Entry in the group id=%u",
2751 nhe->id);
2752 continue;
2753 }
2754 }
2755
2756 if (!zebra_nhg_depends_is_empty(depend)) {
2757 /* This is a group within a group */
2758 i = zebra_nhg_nhe2grp_internal(grp, i, depend, max_num);
2759 } else {
2760 if (!CHECK_FLAG(depend->flags, NEXTHOP_GROUP_VALID)) {
2761 if (IS_ZEBRA_DEBUG_RIB_DETAILED
2762 || IS_ZEBRA_DEBUG_NHG)
2763 zlog_debug(
2764 "%s: Nexthop ID (%u) not valid, not appending to dataplane install group",
2765 __func__, depend->id);
2766 continue;
2767 }
2768
2769 /* If the nexthop not installed/queued for install don't
2770 * put in the ID array.
2771 */
2772 if (!(CHECK_FLAG(depend->flags, NEXTHOP_GROUP_INSTALLED)
2773 || CHECK_FLAG(depend->flags,
2774 NEXTHOP_GROUP_QUEUED))) {
2775 if (IS_ZEBRA_DEBUG_RIB_DETAILED
2776 || IS_ZEBRA_DEBUG_NHG)
2777 zlog_debug(
2778 "%s: Nexthop ID (%u) not installed or queued for install, not appending to dataplane install group",
2779 __func__, depend->id);
2780 continue;
2781 }
2782
2783 /* Check for duplicate IDs, ignore if found. */
2784 for (int j = 0; j < i; j++) {
2785 if (depend->id == grp[j].id) {
2786 duplicate = true;
2787 break;
2788 }
2789 }
2790
2791 if (duplicate) {
2792 if (IS_ZEBRA_DEBUG_RIB_DETAILED
2793 || IS_ZEBRA_DEBUG_NHG)
2794 zlog_debug(
2795 "%s: Nexthop ID (%u) is duplicate, not appending to dataplane install group",
2796 __func__, depend->id);
2797 continue;
2798 }
2799
2800 grp[i].id = depend->id;
2801 grp[i].weight = depend->nhg.nexthop->weight;
2802 i++;
2803 }
2804 }
2805
2806 if (nhe->backup_info == NULL || nhe->backup_info->nhe == NULL)
2807 goto done;
2808
2809 /* TODO -- For now, we are not trying to use or install any
2810 * backup info in this nexthop-id path: we aren't prepared
2811 * to use the backups here yet. We're just debugging what we find.
2812 */
2813 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
2814 zlog_debug("%s: skipping backup nhe", __func__);
2815
2816 done:
2817 return i;
2818 }
2819
2820 /* Convert a nhe into a group array */
2821 uint8_t zebra_nhg_nhe2grp(struct nh_grp *grp, struct nhg_hash_entry *nhe,
2822 int max_num)
2823 {
2824 /* Call into the recursive function */
2825 return zebra_nhg_nhe2grp_internal(grp, 0, nhe, max_num);
2826 }
2827
2828 void zebra_nhg_install_kernel(struct nhg_hash_entry *nhe)
2829 {
2830 struct nhg_connected *rb_node_dep = NULL;
2831
2832 /* Resolve it first */
2833 nhe = zebra_nhg_resolve(nhe);
2834
2835 /* Make sure all depends are installed/queued */
2836 frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) {
2837 zebra_nhg_install_kernel(rb_node_dep->nhe);
2838 }
2839
2840 if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_VALID)
2841 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)
2842 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED)) {
2843 /* Change its type to us since we are installing it */
2844 if (!ZEBRA_NHG_CREATED(nhe))
2845 nhe->type = ZEBRA_ROUTE_NHG;
2846
2847 int ret = dplane_nexthop_add(nhe);
2848
2849 switch (ret) {
2850 case ZEBRA_DPLANE_REQUEST_QUEUED:
2851 SET_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED);
2852 break;
2853 case ZEBRA_DPLANE_REQUEST_FAILURE:
2854 flog_err(
2855 EC_ZEBRA_DP_INSTALL_FAIL,
2856 "Failed to install Nexthop ID (%u) into the kernel",
2857 nhe->id);
2858 break;
2859 case ZEBRA_DPLANE_REQUEST_SUCCESS:
2860 SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
2861 zebra_nhg_handle_install(nhe);
2862 break;
2863 }
2864 }
2865 }
2866
2867 void zebra_nhg_uninstall_kernel(struct nhg_hash_entry *nhe)
2868 {
2869 if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)) {
2870 int ret = dplane_nexthop_delete(nhe);
2871
2872 switch (ret) {
2873 case ZEBRA_DPLANE_REQUEST_QUEUED:
2874 SET_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED);
2875 break;
2876 case ZEBRA_DPLANE_REQUEST_FAILURE:
2877 flog_err(
2878 EC_ZEBRA_DP_DELETE_FAIL,
2879 "Failed to uninstall Nexthop ID (%u) from the kernel",
2880 nhe->id);
2881 break;
2882 case ZEBRA_DPLANE_REQUEST_SUCCESS:
2883 UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
2884 break;
2885 }
2886 }
2887
2888 zebra_nhg_handle_uninstall(nhe);
2889 }
2890
2891 void zebra_nhg_dplane_result(struct zebra_dplane_ctx *ctx)
2892 {
2893 enum dplane_op_e op;
2894 enum zebra_dplane_result status;
2895 uint32_t id = 0;
2896 struct nhg_hash_entry *nhe = NULL;
2897
2898 op = dplane_ctx_get_op(ctx);
2899 status = dplane_ctx_get_status(ctx);
2900
2901 id = dplane_ctx_get_nhe_id(ctx);
2902
2903 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL || IS_ZEBRA_DEBUG_NHG_DETAIL)
2904 zlog_debug(
2905 "Nexthop dplane ctx %p, op %s, nexthop ID (%u), result %s",
2906 ctx, dplane_op2str(op), id, dplane_res2str(status));
2907
2908 switch (op) {
2909 case DPLANE_OP_NH_DELETE:
2910 if (status != ZEBRA_DPLANE_REQUEST_SUCCESS)
2911 flog_err(
2912 EC_ZEBRA_DP_DELETE_FAIL,
2913 "Failed to uninstall Nexthop ID (%u) from the kernel",
2914 id);
2915
2916 /* We already free'd the data, nothing to do */
2917 break;
2918 case DPLANE_OP_NH_INSTALL:
2919 case DPLANE_OP_NH_UPDATE:
2920 nhe = zebra_nhg_lookup_id(id);
2921
2922 if (!nhe) {
2923 if (IS_ZEBRA_DEBUG_NHG)
2924 zlog_debug(
2925 "%s operation preformed on Nexthop ID (%u) in the kernel, that we no longer have in our table",
2926 dplane_op2str(op), id);
2927
2928 break;
2929 }
2930
2931 UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED);
2932 if (status == ZEBRA_DPLANE_REQUEST_SUCCESS) {
2933 SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
2934 SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
2935 zebra_nhg_handle_install(nhe);
2936
2937 /* If daemon nhg, send it an update */
2938 if (PROTO_OWNED(nhe))
2939 zsend_nhg_notify(nhe->type, nhe->zapi_instance,
2940 nhe->zapi_session, nhe->id,
2941 ZAPI_NHG_INSTALLED);
2942 } else {
2943 /* If daemon nhg, send it an update */
2944 if (PROTO_OWNED(nhe))
2945 zsend_nhg_notify(nhe->type, nhe->zapi_instance,
2946 nhe->zapi_session, nhe->id,
2947 ZAPI_NHG_FAIL_INSTALL);
2948
2949 flog_err(
2950 EC_ZEBRA_DP_INSTALL_FAIL,
2951 "Failed to install Nexthop ID (%u) into the kernel",
2952 nhe->id);
2953 }
2954 break;
2955
2956 case DPLANE_OP_ROUTE_INSTALL:
2957 case DPLANE_OP_ROUTE_UPDATE:
2958 case DPLANE_OP_ROUTE_DELETE:
2959 case DPLANE_OP_ROUTE_NOTIFY:
2960 case DPLANE_OP_LSP_INSTALL:
2961 case DPLANE_OP_LSP_UPDATE:
2962 case DPLANE_OP_LSP_DELETE:
2963 case DPLANE_OP_LSP_NOTIFY:
2964 case DPLANE_OP_PW_INSTALL:
2965 case DPLANE_OP_PW_UNINSTALL:
2966 case DPLANE_OP_SYS_ROUTE_ADD:
2967 case DPLANE_OP_SYS_ROUTE_DELETE:
2968 case DPLANE_OP_ADDR_INSTALL:
2969 case DPLANE_OP_ADDR_UNINSTALL:
2970 case DPLANE_OP_MAC_INSTALL:
2971 case DPLANE_OP_MAC_DELETE:
2972 case DPLANE_OP_NEIGH_INSTALL:
2973 case DPLANE_OP_NEIGH_UPDATE:
2974 case DPLANE_OP_NEIGH_DELETE:
2975 case DPLANE_OP_NEIGH_IP_INSTALL:
2976 case DPLANE_OP_NEIGH_IP_DELETE:
2977 case DPLANE_OP_VTEP_ADD:
2978 case DPLANE_OP_VTEP_DELETE:
2979 case DPLANE_OP_RULE_ADD:
2980 case DPLANE_OP_RULE_DELETE:
2981 case DPLANE_OP_RULE_UPDATE:
2982 case DPLANE_OP_NEIGH_DISCOVER:
2983 case DPLANE_OP_BR_PORT_UPDATE:
2984 case DPLANE_OP_NONE:
2985 case DPLANE_OP_IPTABLE_ADD:
2986 case DPLANE_OP_IPTABLE_DELETE:
2987 case DPLANE_OP_IPSET_ADD:
2988 case DPLANE_OP_IPSET_DELETE:
2989 case DPLANE_OP_IPSET_ENTRY_ADD:
2990 case DPLANE_OP_IPSET_ENTRY_DELETE:
2991 case DPLANE_OP_NEIGH_TABLE_UPDATE:
2992 case DPLANE_OP_GRE_SET:
2993 case DPLANE_OP_INTF_ADDR_ADD:
2994 case DPLANE_OP_INTF_ADDR_DEL:
2995 case DPLANE_OP_INTF_NETCONFIG:
2996 break;
2997 }
2998
2999 dplane_ctx_fini(&ctx);
3000 }
3001
3002 static int zebra_nhg_sweep_entry(struct hash_bucket *bucket, void *arg)
3003 {
3004 struct nhg_hash_entry *nhe = NULL;
3005
3006 nhe = (struct nhg_hash_entry *)bucket->data;
3007
3008 /*
3009 * same logic as with routes.
3010 *
3011 * If older than startup time, we know we read them in from the
3012 * kernel and have not gotten and update for them since startup
3013 * from an upper level proto.
3014 */
3015 if (zrouter.startup_time < nhe->uptime)
3016 return HASHWALK_CONTINUE;
3017
3018 /*
3019 * If it's proto-owned and not being used by a route, remove it since
3020 * we haven't gotten an update about it from the proto since startup.
3021 * This means that either the config for it was removed or the daemon
3022 * didn't get started. This handles graceful restart & retain scenario.
3023 */
3024 if (PROTO_OWNED(nhe) && nhe->refcnt == 1) {
3025 zebra_nhg_decrement_ref(nhe);
3026 return HASHWALK_ABORT;
3027 }
3028
3029 /*
3030 * If its being ref'd by routes, just let it be uninstalled via a route
3031 * removal.
3032 */
3033 if (ZEBRA_NHG_CREATED(nhe) && nhe->refcnt <= 0) {
3034 zebra_nhg_uninstall_kernel(nhe);
3035 return HASHWALK_ABORT;
3036 }
3037
3038 return HASHWALK_CONTINUE;
3039 }
3040
3041 void zebra_nhg_sweep_table(struct hash *hash)
3042 {
3043 uint32_t count;
3044
3045 /*
3046 * Yes this is extremely odd. Effectively nhg's have
3047 * other nexthop groups that depend on them and when you
3048 * remove them, you can have other entries blown up.
3049 * our hash code does not work with deleting multiple
3050 * entries at a time and will possibly cause crashes
3051 * So what to do? Whenever zebra_nhg_sweep_entry
3052 * deletes an entry it will return HASHWALK_ABORT,
3053 * cause that deletion might have triggered more.
3054 * then we can just keep sweeping this table
3055 * until nothing more is found to do.
3056 */
3057 do {
3058 count = hashcount(hash);
3059 hash_walk(hash, zebra_nhg_sweep_entry, NULL);
3060 } while (count != hashcount(hash));
3061 }
3062
3063 static void zebra_nhg_mark_keep_entry(struct hash_bucket *bucket, void *arg)
3064 {
3065 struct nhg_hash_entry *nhe = bucket->data;
3066
3067 UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
3068 }
3069
3070 /*
3071 * When we are shutting down and we have retain mode enabled
3072 * in zebra the process is to mark each vrf that it's
3073 * routes should not be deleted. The problem with that
3074 * is that shutdown actually free's up memory which
3075 * causes the nexthop group's ref counts to go to zero
3076 * we need a way to subtly tell the system to not remove
3077 * the nexthop groups from the kernel at the same time.
3078 * The easiest just looks like that we should not mark
3079 * the nhg's as installed any more and when the ref count
3080 * goes to zero we'll attempt to delete and do nothing
3081 */
3082 void zebra_nhg_mark_keep(void)
3083 {
3084 hash_iterate(zrouter.nhgs_id, zebra_nhg_mark_keep_entry, NULL);
3085 }
3086
3087 /* Global control to disable use of kernel nexthops, if available. We can't
3088 * force the kernel to support nexthop ids, of course, but we can disable
3089 * zebra's use of them, for testing e.g. By default, if the kernel supports
3090 * nexthop ids, zebra uses them.
3091 */
3092 void zebra_nhg_enable_kernel_nexthops(bool set)
3093 {
3094 g_nexthops_enabled = set;
3095 }
3096
3097 bool zebra_nhg_kernel_nexthops_enabled(void)
3098 {
3099 return g_nexthops_enabled;
3100 }
3101
3102 /* Global control for use of activated backups for recursive resolution. */
3103 void zebra_nhg_set_recursive_use_backups(bool set)
3104 {
3105 use_recursive_backups = set;
3106 }
3107
3108 bool zebra_nhg_recursive_use_backups(void)
3109 {
3110 return use_recursive_backups;
3111 }
3112
3113 /*
3114 * Global control to only use kernel nexthops for protocol created NHGs.
3115 * There are some use cases where you may not want zebra to implicitly
3116 * create kernel nexthops for all routes and only create them for NHGs
3117 * passed down by upper level protos.
3118 *
3119 * Default is off.
3120 */
3121 void zebra_nhg_set_proto_nexthops_only(bool set)
3122 {
3123 proto_nexthops_only = set;
3124 }
3125
3126 bool zebra_nhg_proto_nexthops_only(void)
3127 {
3128 return proto_nexthops_only;
3129 }
3130
3131 /* Add NHE from upper level proto */
3132 struct nhg_hash_entry *zebra_nhg_proto_add(uint32_t id, int type,
3133 uint16_t instance, uint32_t session,
3134 struct nexthop_group *nhg, afi_t afi)
3135 {
3136 struct nhg_hash_entry lookup;
3137 struct nhg_hash_entry *new, *old;
3138 struct nhg_connected *rb_node_dep = NULL;
3139 struct nexthop *newhop;
3140 bool replace = false;
3141
3142 if (!nhg->nexthop) {
3143 if (IS_ZEBRA_DEBUG_NHG)
3144 zlog_debug("%s: id %u, no nexthops passed to add",
3145 __func__, id);
3146 return NULL;
3147 }
3148
3149
3150 /* Set nexthop list as active, since they wont go through rib
3151 * processing.
3152 *
3153 * Assuming valid/onlink for now.
3154 *
3155 * Once resolution is figured out, we won't need this!
3156 */
3157 for (ALL_NEXTHOPS_PTR(nhg, newhop)) {
3158 if (CHECK_FLAG(newhop->flags, NEXTHOP_FLAG_HAS_BACKUP)) {
3159 if (IS_ZEBRA_DEBUG_NHG)
3160 zlog_debug(
3161 "%s: id %u, backup nexthops not supported",
3162 __func__, id);
3163 return NULL;
3164 }
3165
3166 if (newhop->type == NEXTHOP_TYPE_BLACKHOLE) {
3167 if (IS_ZEBRA_DEBUG_NHG)
3168 zlog_debug(
3169 "%s: id %u, blackhole nexthop not supported",
3170 __func__, id);
3171 return NULL;
3172 }
3173
3174 if (newhop->type == NEXTHOP_TYPE_IFINDEX) {
3175 if (IS_ZEBRA_DEBUG_NHG)
3176 zlog_debug(
3177 "%s: id %u, nexthop without gateway not supported",
3178 __func__, id);
3179 return NULL;
3180 }
3181
3182 if (!newhop->ifindex) {
3183 if (IS_ZEBRA_DEBUG_NHG)
3184 zlog_debug(
3185 "%s: id %u, nexthop without ifindex is not supported",
3186 __func__, id);
3187 return NULL;
3188 }
3189 SET_FLAG(newhop->flags, NEXTHOP_FLAG_ACTIVE);
3190 }
3191
3192 zebra_nhe_init(&lookup, afi, nhg->nexthop);
3193 lookup.nhg.nexthop = nhg->nexthop;
3194 lookup.id = id;
3195 lookup.type = type;
3196
3197 old = zebra_nhg_lookup_id(id);
3198
3199 if (old) {
3200 /*
3201 * This is a replace, just release NHE from ID for now, The
3202 * depends/dependents may still be used in the replacement so
3203 * we don't touch them other than to remove their refs to their
3204 * old parent.
3205 */
3206 replace = true;
3207 hash_release(zrouter.nhgs_id, old);
3208
3209 /* Free all the things */
3210 zebra_nhg_release_all_deps(old);
3211 }
3212
3213 new = zebra_nhg_rib_find_nhe(&lookup, afi);
3214
3215 zebra_nhg_increment_ref(new);
3216
3217 /* Capture zapi client info */
3218 new->zapi_instance = instance;
3219 new->zapi_session = session;
3220
3221 zebra_nhg_set_valid_if_active(new);
3222
3223 zebra_nhg_install_kernel(new);
3224
3225 if (old) {
3226 /*
3227 * Check to handle recving DEL while routes still in use then
3228 * a replace.
3229 *
3230 * In this case we would have decremented the refcnt already
3231 * but set the FLAG here. Go ahead and increment once to fix
3232 * the misordering we have been sent.
3233 */
3234 if (CHECK_FLAG(old->flags, NEXTHOP_GROUP_PROTO_RELEASED))
3235 zebra_nhg_increment_ref(old);
3236
3237 rib_handle_nhg_replace(old, new);
3238
3239 /* if this != 1 at this point, we have a bug */
3240 assert(old->refcnt == 1);
3241
3242 /* We have to decrement its singletons
3243 * because some might not exist in NEW.
3244 */
3245 if (!zebra_nhg_depends_is_empty(old)) {
3246 frr_each (nhg_connected_tree, &old->nhg_depends,
3247 rb_node_dep)
3248 zebra_nhg_decrement_ref(rb_node_dep->nhe);
3249 }
3250
3251 /* Dont call the dec API, we dont want to uninstall the ID */
3252 old->refcnt = 0;
3253 zebra_nhg_free(old);
3254 old = NULL;
3255 }
3256
3257 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
3258 zlog_debug("%s: %s nhe %p (%u), vrf %d, type %s", __func__,
3259 (replace ? "replaced" : "added"), new, new->id,
3260 new->vrf_id, zebra_route_string(new->type));
3261
3262 return new;
3263 }
3264
3265 /* Delete NHE from upper level proto, caller must decrement ref */
3266 struct nhg_hash_entry *zebra_nhg_proto_del(uint32_t id, int type)
3267 {
3268 struct nhg_hash_entry *nhe;
3269
3270 nhe = zebra_nhg_lookup_id(id);
3271
3272 if (!nhe) {
3273 if (IS_ZEBRA_DEBUG_NHG)
3274 zlog_debug("%s: id %u, lookup failed", __func__, id);
3275
3276 return NULL;
3277 }
3278
3279 if (type != nhe->type) {
3280 if (IS_ZEBRA_DEBUG_NHG)
3281 zlog_debug(
3282 "%s: id %u, type %s mismatch, sent by %s, ignoring",
3283 __func__, id, zebra_route_string(nhe->type),
3284 zebra_route_string(type));
3285 return NULL;
3286 }
3287
3288 if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_PROTO_RELEASED)) {
3289 if (IS_ZEBRA_DEBUG_NHG)
3290 zlog_debug("%s: id %u, already released", __func__, id);
3291
3292 return NULL;
3293 }
3294
3295 SET_FLAG(nhe->flags, NEXTHOP_GROUP_PROTO_RELEASED);
3296
3297 if (nhe->refcnt > 1) {
3298 if (IS_ZEBRA_DEBUG_NHG)
3299 zlog_debug(
3300 "%s: id %u, still being used by routes refcnt %u",
3301 __func__, nhe->id, nhe->refcnt);
3302 return nhe;
3303 }
3304
3305 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
3306 zlog_debug("%s: deleted nhe %p (%u), vrf %d, type %s", __func__,
3307 nhe, nhe->id, nhe->vrf_id,
3308 zebra_route_string(nhe->type));
3309
3310 return nhe;
3311 }
3312
3313 struct nhg_score_proto_iter {
3314 int type;
3315 struct list *found;
3316 };
3317
3318 static void zebra_nhg_score_proto_entry(struct hash_bucket *bucket, void *arg)
3319 {
3320 struct nhg_hash_entry *nhe;
3321 struct nhg_score_proto_iter *iter;
3322
3323 nhe = (struct nhg_hash_entry *)bucket->data;
3324 iter = arg;
3325
3326 /* Needs to match type and outside zebra ID space */
3327 if (nhe->type == iter->type && PROTO_OWNED(nhe)) {
3328 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
3329 zlog_debug(
3330 "%s: found nhe %p (%u), vrf %d, type %s after client disconnect",
3331 __func__, nhe, nhe->id, nhe->vrf_id,
3332 zebra_route_string(nhe->type));
3333
3334 /* Add to removal list */
3335 listnode_add(iter->found, nhe);
3336 }
3337 }
3338
3339 /* Remove specific by proto NHGs */
3340 unsigned long zebra_nhg_score_proto(int type)
3341 {
3342 struct nhg_hash_entry *nhe;
3343 struct nhg_score_proto_iter iter = {};
3344 struct listnode *ln;
3345 unsigned long count;
3346
3347 iter.type = type;
3348 iter.found = list_new();
3349
3350 /* Find matching entries to remove */
3351 hash_iterate(zrouter.nhgs_id, zebra_nhg_score_proto_entry, &iter);
3352
3353 /* Now remove them */
3354 for (ALL_LIST_ELEMENTS_RO(iter.found, ln, nhe)) {
3355 /*
3356 * This should be the last ref if we remove client routes too,
3357 * and thus should remove and free them.
3358 */
3359 zebra_nhg_decrement_ref(nhe);
3360 }
3361
3362 count = iter.found->count;
3363 list_delete(&iter.found);
3364
3365 return count;
3366 }