]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_nhg.c
Merge pull request #12686 from opensourcerouting/debian-sync-20230124
[mirror_frr.git] / zebra / zebra_nhg.c
1 /* Zebra Nexthop Group Code.
2 * Copyright (C) 2019 Cumulus Networks, Inc.
3 * Donald Sharp
4 * Stephen Worley
5 *
6 * This file is part of FRR.
7 *
8 * FRR is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
11 * later version.
12 *
13 * FRR is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with FRR; see the file COPYING. If not, write to the Free
20 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
21 * 02111-1307, USA.
22 */
23 #include <zebra.h>
24
25 #include "lib/nexthop.h"
26 #include "lib/nexthop_group_private.h"
27 #include "lib/routemap.h"
28 #include "lib/mpls.h"
29 #include "lib/jhash.h"
30 #include "lib/debug.h"
31 #include "lib/lib_errors.h"
32
33 #include "zebra/connected.h"
34 #include "zebra/debug.h"
35 #include "zebra/zebra_router.h"
36 #include "zebra/zebra_nhg_private.h"
37 #include "zebra/zebra_rnh.h"
38 #include "zebra/zebra_routemap.h"
39 #include "zebra/zebra_srte.h"
40 #include "zebra/zserv.h"
41 #include "zebra/rt.h"
42 #include "zebra_errors.h"
43 #include "zebra_dplane.h"
44 #include "zebra/interface.h"
45 #include "zebra/zapi_msg.h"
46 #include "zebra/rib.h"
47
48 DEFINE_MTYPE_STATIC(ZEBRA, NHG, "Nexthop Group Entry");
49 DEFINE_MTYPE_STATIC(ZEBRA, NHG_CONNECTED, "Nexthop Group Connected");
50 DEFINE_MTYPE_STATIC(ZEBRA, NHG_CTX, "Nexthop Group Context");
51
52 /* Map backup nexthop indices between two nhes */
53 struct backup_nh_map_s {
54 int map_count;
55
56 struct {
57 uint8_t orig_idx;
58 uint8_t new_idx;
59 } map[MULTIPATH_NUM];
60 };
61
62 /* id counter to keep in sync with kernel */
63 uint32_t id_counter;
64
65 /* Controlled through ui */
66 static bool g_nexthops_enabled = true;
67 static bool proto_nexthops_only;
68 static bool use_recursive_backups = true;
69
70 static struct nhg_hash_entry *depends_find(const struct nexthop *nh, afi_t afi,
71 int type, bool from_dplane);
72 static void depends_add(struct nhg_connected_tree_head *head,
73 struct nhg_hash_entry *depend);
74 static struct nhg_hash_entry *
75 depends_find_add(struct nhg_connected_tree_head *head, struct nexthop *nh,
76 afi_t afi, int type, bool from_dplane);
77 static struct nhg_hash_entry *
78 depends_find_id_add(struct nhg_connected_tree_head *head, uint32_t id);
79 static void depends_decrement_free(struct nhg_connected_tree_head *head);
80
81 static struct nhg_backup_info *
82 nhg_backup_copy(const struct nhg_backup_info *orig);
83
84 /* Helper function for getting the next allocatable ID */
85 static uint32_t nhg_get_next_id(void)
86 {
87 while (1) {
88 id_counter++;
89
90 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
91 zlog_debug("%s: ID %u checking", __func__, id_counter);
92
93 if (id_counter == ZEBRA_NHG_PROTO_LOWER) {
94 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
95 zlog_debug("%s: ID counter wrapped", __func__);
96
97 id_counter = 0;
98 continue;
99 }
100
101 if (zebra_nhg_lookup_id(id_counter)) {
102 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
103 zlog_debug("%s: ID already exists", __func__);
104
105 continue;
106 }
107
108 break;
109 }
110
111 return id_counter;
112 }
113
114 static void nhg_connected_free(struct nhg_connected *dep)
115 {
116 XFREE(MTYPE_NHG_CONNECTED, dep);
117 }
118
119 static struct nhg_connected *nhg_connected_new(struct nhg_hash_entry *nhe)
120 {
121 struct nhg_connected *new = NULL;
122
123 new = XCALLOC(MTYPE_NHG_CONNECTED, sizeof(struct nhg_connected));
124 new->nhe = nhe;
125
126 return new;
127 }
128
129 void nhg_connected_tree_free(struct nhg_connected_tree_head *head)
130 {
131 struct nhg_connected *rb_node_dep = NULL;
132
133 if (!nhg_connected_tree_is_empty(head)) {
134 frr_each_safe(nhg_connected_tree, head, rb_node_dep) {
135 nhg_connected_tree_del(head, rb_node_dep);
136 nhg_connected_free(rb_node_dep);
137 }
138 }
139 }
140
141 bool nhg_connected_tree_is_empty(const struct nhg_connected_tree_head *head)
142 {
143 return nhg_connected_tree_count(head) ? false : true;
144 }
145
146 struct nhg_connected *
147 nhg_connected_tree_root(struct nhg_connected_tree_head *head)
148 {
149 return nhg_connected_tree_first(head);
150 }
151
152 struct nhg_hash_entry *
153 nhg_connected_tree_del_nhe(struct nhg_connected_tree_head *head,
154 struct nhg_hash_entry *depend)
155 {
156 struct nhg_connected lookup = {};
157 struct nhg_connected *remove = NULL;
158 struct nhg_hash_entry *removed_nhe;
159
160 lookup.nhe = depend;
161
162 /* Lookup to find the element, then remove it */
163 remove = nhg_connected_tree_find(head, &lookup);
164 if (remove)
165 /* Re-returning here just in case this API changes..
166 * the _del list api's are a bit undefined at the moment.
167 *
168 * So hopefully returning here will make it fail if the api
169 * changes to something different than currently expected.
170 */
171 remove = nhg_connected_tree_del(head, remove);
172
173 /* If the entry was sucessfully removed, free the 'connected` struct */
174 if (remove) {
175 removed_nhe = remove->nhe;
176 nhg_connected_free(remove);
177 return removed_nhe;
178 }
179
180 return NULL;
181 }
182
183 /* Assuming UNIQUE RB tree. If this changes, assumptions here about
184 * insertion need to change.
185 */
186 struct nhg_hash_entry *
187 nhg_connected_tree_add_nhe(struct nhg_connected_tree_head *head,
188 struct nhg_hash_entry *depend)
189 {
190 struct nhg_connected *new = NULL;
191
192 new = nhg_connected_new(depend);
193
194 /* On success, NULL will be returned from the
195 * RB code.
196 */
197 if (new && (nhg_connected_tree_add(head, new) == NULL))
198 return NULL;
199
200 /* If it wasn't successful, it must be a duplicate. We enforce the
201 * unique property for the `nhg_connected` tree.
202 */
203 nhg_connected_free(new);
204
205 return depend;
206 }
207
208 static void
209 nhg_connected_tree_decrement_ref(struct nhg_connected_tree_head *head)
210 {
211 struct nhg_connected *rb_node_dep = NULL;
212
213 frr_each_safe(nhg_connected_tree, head, rb_node_dep) {
214 zebra_nhg_decrement_ref(rb_node_dep->nhe);
215 }
216 }
217
218 static void
219 nhg_connected_tree_increment_ref(struct nhg_connected_tree_head *head)
220 {
221 struct nhg_connected *rb_node_dep = NULL;
222
223 frr_each(nhg_connected_tree, head, rb_node_dep) {
224 zebra_nhg_increment_ref(rb_node_dep->nhe);
225 }
226 }
227
228 struct nhg_hash_entry *zebra_nhg_resolve(struct nhg_hash_entry *nhe)
229 {
230 if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_RECURSIVE)
231 && !zebra_nhg_depends_is_empty(nhe)) {
232 nhe = nhg_connected_tree_root(&nhe->nhg_depends)->nhe;
233 return zebra_nhg_resolve(nhe);
234 }
235
236 return nhe;
237 }
238
239 unsigned int zebra_nhg_depends_count(const struct nhg_hash_entry *nhe)
240 {
241 return nhg_connected_tree_count(&nhe->nhg_depends);
242 }
243
244 bool zebra_nhg_depends_is_empty(const struct nhg_hash_entry *nhe)
245 {
246 return nhg_connected_tree_is_empty(&nhe->nhg_depends);
247 }
248
249 static void zebra_nhg_depends_del(struct nhg_hash_entry *from,
250 struct nhg_hash_entry *depend)
251 {
252 nhg_connected_tree_del_nhe(&from->nhg_depends, depend);
253 }
254
255 static void zebra_nhg_depends_init(struct nhg_hash_entry *nhe)
256 {
257 nhg_connected_tree_init(&nhe->nhg_depends);
258 }
259
260 unsigned int zebra_nhg_dependents_count(const struct nhg_hash_entry *nhe)
261 {
262 return nhg_connected_tree_count(&nhe->nhg_dependents);
263 }
264
265
266 bool zebra_nhg_dependents_is_empty(const struct nhg_hash_entry *nhe)
267 {
268 return nhg_connected_tree_is_empty(&nhe->nhg_dependents);
269 }
270
271 static void zebra_nhg_dependents_del(struct nhg_hash_entry *from,
272 struct nhg_hash_entry *dependent)
273 {
274 nhg_connected_tree_del_nhe(&from->nhg_dependents, dependent);
275 }
276
277 static void zebra_nhg_dependents_add(struct nhg_hash_entry *to,
278 struct nhg_hash_entry *dependent)
279 {
280 nhg_connected_tree_add_nhe(&to->nhg_dependents, dependent);
281 }
282
283 static void zebra_nhg_dependents_init(struct nhg_hash_entry *nhe)
284 {
285 nhg_connected_tree_init(&nhe->nhg_dependents);
286 }
287
288 /* Release this nhe from anything depending on it */
289 static void zebra_nhg_dependents_release(struct nhg_hash_entry *nhe)
290 {
291 struct nhg_connected *rb_node_dep = NULL;
292
293 frr_each_safe(nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep) {
294 zebra_nhg_depends_del(rb_node_dep->nhe, nhe);
295 /* recheck validity of the dependent */
296 zebra_nhg_check_valid(rb_node_dep->nhe);
297 }
298 }
299
300 /* Release this nhe from anything that it depends on */
301 static void zebra_nhg_depends_release(struct nhg_hash_entry *nhe)
302 {
303 if (!zebra_nhg_depends_is_empty(nhe)) {
304 struct nhg_connected *rb_node_dep = NULL;
305
306 frr_each_safe(nhg_connected_tree, &nhe->nhg_depends,
307 rb_node_dep) {
308 zebra_nhg_dependents_del(rb_node_dep->nhe, nhe);
309 }
310 }
311 }
312
313
314 struct nhg_hash_entry *zebra_nhg_lookup_id(uint32_t id)
315 {
316 struct nhg_hash_entry lookup = {};
317
318 lookup.id = id;
319 return hash_lookup(zrouter.nhgs_id, &lookup);
320 }
321
322 static int zebra_nhg_insert_id(struct nhg_hash_entry *nhe)
323 {
324 if (hash_lookup(zrouter.nhgs_id, nhe)) {
325 flog_err(
326 EC_ZEBRA_NHG_TABLE_INSERT_FAILED,
327 "Failed inserting NHG %pNG into the ID hash table, entry already exists",
328 nhe);
329 return -1;
330 }
331
332 (void)hash_get(zrouter.nhgs_id, nhe, hash_alloc_intern);
333
334 return 0;
335 }
336
337 static void zebra_nhg_set_if(struct nhg_hash_entry *nhe, struct interface *ifp)
338 {
339 nhe->ifp = ifp;
340 if_nhg_dependents_add(ifp, nhe);
341 }
342
343 static void
344 zebra_nhg_connect_depends(struct nhg_hash_entry *nhe,
345 struct nhg_connected_tree_head *nhg_depends)
346 {
347 struct nhg_connected *rb_node_dep = NULL;
348
349 /* This has been allocated higher above in the stack. Could probably
350 * re-allocate and free the old stuff but just using the same memory
351 * for now. Otherwise, their might be a time trade-off for repeated
352 * alloc/frees as startup.
353 */
354 nhe->nhg_depends = *nhg_depends;
355
356 /* Attach backpointer to anything that it depends on */
357 zebra_nhg_dependents_init(nhe);
358 if (!zebra_nhg_depends_is_empty(nhe)) {
359 frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) {
360 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
361 zlog_debug("%s: nhe %p (%pNG), dep %p (%pNG)",
362 __func__, nhe, nhe, rb_node_dep->nhe,
363 rb_node_dep->nhe);
364
365 zebra_nhg_dependents_add(rb_node_dep->nhe, nhe);
366 }
367 }
368 }
369
370 /* Init an nhe, for use in a hash lookup for example */
371 void zebra_nhe_init(struct nhg_hash_entry *nhe, afi_t afi,
372 const struct nexthop *nh)
373 {
374 memset(nhe, 0, sizeof(struct nhg_hash_entry));
375 nhe->vrf_id = VRF_DEFAULT;
376 nhe->type = ZEBRA_ROUTE_NHG;
377 nhe->afi = AFI_UNSPEC;
378
379 /* There are some special rules that apply to groups representing
380 * a single nexthop.
381 */
382 if (nh && (nh->next == NULL)) {
383 switch (nh->type) {
384 case NEXTHOP_TYPE_IFINDEX:
385 case NEXTHOP_TYPE_BLACKHOLE:
386 /*
387 * This switch case handles setting the afi different
388 * for ipv4/v6 routes. Ifindex/blackhole nexthop
389 * objects cannot be ambiguous, they must be Address
390 * Family specific. If we get here, we will either use
391 * the AF of the route, or the one we got passed from
392 * here from the kernel.
393 */
394 nhe->afi = afi;
395 break;
396 case NEXTHOP_TYPE_IPV4_IFINDEX:
397 case NEXTHOP_TYPE_IPV4:
398 nhe->afi = AFI_IP;
399 break;
400 case NEXTHOP_TYPE_IPV6_IFINDEX:
401 case NEXTHOP_TYPE_IPV6:
402 nhe->afi = AFI_IP6;
403 break;
404 }
405 }
406 }
407
408 struct nhg_hash_entry *zebra_nhg_alloc(void)
409 {
410 struct nhg_hash_entry *nhe;
411
412 nhe = XCALLOC(MTYPE_NHG, sizeof(struct nhg_hash_entry));
413
414 return nhe;
415 }
416
417 /*
418 * Allocate new nhe and make shallow copy of 'orig'; no
419 * recursive info is copied.
420 */
421 struct nhg_hash_entry *zebra_nhe_copy(const struct nhg_hash_entry *orig,
422 uint32_t id)
423 {
424 struct nhg_hash_entry *nhe;
425
426 nhe = zebra_nhg_alloc();
427
428 nhe->id = id;
429
430 nexthop_group_copy(&(nhe->nhg), &(orig->nhg));
431
432 nhe->vrf_id = orig->vrf_id;
433 nhe->afi = orig->afi;
434 nhe->type = orig->type ? orig->type : ZEBRA_ROUTE_NHG;
435 nhe->refcnt = 0;
436 nhe->dplane_ref = zebra_router_get_next_sequence();
437
438 /* Copy backup info also, if present */
439 if (orig->backup_info)
440 nhe->backup_info = nhg_backup_copy(orig->backup_info);
441
442 return nhe;
443 }
444
445 /* Allocation via hash handler */
446 static void *zebra_nhg_hash_alloc(void *arg)
447 {
448 struct nhg_hash_entry *nhe = NULL;
449 struct nhg_hash_entry *copy = arg;
450
451 nhe = zebra_nhe_copy(copy, copy->id);
452
453 /* Mark duplicate nexthops in a group at creation time. */
454 nexthop_group_mark_duplicates(&(nhe->nhg));
455
456 /*
457 * Add the ifp now if it's not a group or recursive and has ifindex.
458 *
459 * A proto-owned ID is always a group.
460 */
461 if (!PROTO_OWNED(nhe) && nhe->nhg.nexthop && !nhe->nhg.nexthop->next
462 && !nhe->nhg.nexthop->resolved && nhe->nhg.nexthop->ifindex) {
463 struct interface *ifp = NULL;
464
465 ifp = if_lookup_by_index(nhe->nhg.nexthop->ifindex,
466 nhe->nhg.nexthop->vrf_id);
467 if (ifp)
468 zebra_nhg_set_if(nhe, ifp);
469 else {
470 if (IS_ZEBRA_DEBUG_NHG)
471 zlog_debug(
472 "Failed to lookup an interface with ifindex=%d in vrf=%u for NHE %pNG",
473 nhe->nhg.nexthop->ifindex,
474 nhe->nhg.nexthop->vrf_id, nhe);
475 }
476 }
477
478 return nhe;
479 }
480
481 uint32_t zebra_nhg_hash_key(const void *arg)
482 {
483 const struct nhg_hash_entry *nhe = arg;
484 uint32_t key = 0x5a351234;
485 uint32_t primary = 0;
486 uint32_t backup = 0;
487
488 primary = nexthop_group_hash(&(nhe->nhg));
489 if (nhe->backup_info)
490 backup = nexthop_group_hash(&(nhe->backup_info->nhe->nhg));
491
492 key = jhash_3words(primary, backup, nhe->type, key);
493
494 key = jhash_2words(nhe->vrf_id, nhe->afi, key);
495
496 return key;
497 }
498
499 uint32_t zebra_nhg_id_key(const void *arg)
500 {
501 const struct nhg_hash_entry *nhe = arg;
502
503 return nhe->id;
504 }
505
506 /* Helper with common nhg/nhe nexthop comparison logic */
507 static bool nhg_compare_nexthops(const struct nexthop *nh1,
508 const struct nexthop *nh2)
509 {
510 assert(nh1 != NULL && nh2 != NULL);
511
512 /*
513 * We have to check the active flag of each individual one,
514 * not just the overall active_num. This solves the special case
515 * issue of a route with a nexthop group with one nexthop
516 * resolving to itself and thus marking it inactive. If we
517 * have two different routes each wanting to mark a different
518 * nexthop inactive, they need to hash to two different groups.
519 *
520 * If we just hashed on num_active, they would hash the same
521 * which is incorrect.
522 *
523 * ex)
524 * 1.1.1.0/24
525 * -> 1.1.1.1 dummy1 (inactive)
526 * -> 1.1.2.1 dummy2
527 *
528 * 1.1.2.0/24
529 * -> 1.1.1.1 dummy1
530 * -> 1.1.2.1 dummy2 (inactive)
531 *
532 * Without checking each individual one, they would hash to
533 * the same group and both have 1.1.1.1 dummy1 marked inactive.
534 *
535 */
536 if (CHECK_FLAG(nh1->flags, NEXTHOP_FLAG_ACTIVE)
537 != CHECK_FLAG(nh2->flags, NEXTHOP_FLAG_ACTIVE))
538 return false;
539
540 if (!nexthop_same(nh1, nh2))
541 return false;
542
543 return true;
544 }
545
546 bool zebra_nhg_hash_equal(const void *arg1, const void *arg2)
547 {
548 const struct nhg_hash_entry *nhe1 = arg1;
549 const struct nhg_hash_entry *nhe2 = arg2;
550 struct nexthop *nexthop1;
551 struct nexthop *nexthop2;
552
553 /* No matter what if they equal IDs, assume equal */
554 if (nhe1->id && nhe2->id && (nhe1->id == nhe2->id))
555 return true;
556
557 if (nhe1->type != nhe2->type)
558 return false;
559
560 if (nhe1->vrf_id != nhe2->vrf_id)
561 return false;
562
563 if (nhe1->afi != nhe2->afi)
564 return false;
565
566 if (nhe1->nhg.nhgr.buckets != nhe2->nhg.nhgr.buckets)
567 return false;
568
569 if (nhe1->nhg.nhgr.idle_timer != nhe2->nhg.nhgr.idle_timer)
570 return false;
571
572 if (nhe1->nhg.nhgr.unbalanced_timer != nhe2->nhg.nhgr.unbalanced_timer)
573 return false;
574
575 /* Nexthops should be in-order, so we simply compare them in-place */
576 for (nexthop1 = nhe1->nhg.nexthop, nexthop2 = nhe2->nhg.nexthop;
577 nexthop1 && nexthop2;
578 nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) {
579
580 if (!nhg_compare_nexthops(nexthop1, nexthop2))
581 return false;
582 }
583
584 /* Check for unequal list lengths */
585 if (nexthop1 || nexthop2)
586 return false;
587
588 /* If there's no backup info, comparison is done. */
589 if ((nhe1->backup_info == NULL) && (nhe2->backup_info == NULL))
590 return true;
591
592 /* Compare backup info also - test the easy things first */
593 if (nhe1->backup_info && (nhe2->backup_info == NULL))
594 return false;
595 if (nhe2->backup_info && (nhe1->backup_info == NULL))
596 return false;
597
598 /* Compare number of backups before actually comparing any */
599 for (nexthop1 = nhe1->backup_info->nhe->nhg.nexthop,
600 nexthop2 = nhe2->backup_info->nhe->nhg.nexthop;
601 nexthop1 && nexthop2;
602 nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) {
603 ;
604 }
605
606 /* Did we find the end of one list before the other? */
607 if (nexthop1 || nexthop2)
608 return false;
609
610 /* Have to compare the backup nexthops */
611 for (nexthop1 = nhe1->backup_info->nhe->nhg.nexthop,
612 nexthop2 = nhe2->backup_info->nhe->nhg.nexthop;
613 nexthop1 && nexthop2;
614 nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) {
615
616 if (!nhg_compare_nexthops(nexthop1, nexthop2))
617 return false;
618 }
619
620 return true;
621 }
622
623 bool zebra_nhg_hash_id_equal(const void *arg1, const void *arg2)
624 {
625 const struct nhg_hash_entry *nhe1 = arg1;
626 const struct nhg_hash_entry *nhe2 = arg2;
627
628 return nhe1->id == nhe2->id;
629 }
630
631 static int zebra_nhg_process_grp(struct nexthop_group *nhg,
632 struct nhg_connected_tree_head *depends,
633 struct nh_grp *grp, uint8_t count,
634 struct nhg_resilience *resilience)
635 {
636 nhg_connected_tree_init(depends);
637
638 for (int i = 0; i < count; i++) {
639 struct nhg_hash_entry *depend = NULL;
640 /* We do not care about nexthop_grp.weight at
641 * this time. But we should figure out
642 * how to adapt this to our code in
643 * the future.
644 */
645 depend = depends_find_id_add(depends, grp[i].id);
646
647 if (!depend) {
648 flog_err(
649 EC_ZEBRA_NHG_SYNC,
650 "Received Nexthop Group from the kernel with a dependent Nexthop ID (%u) which we do not have in our table",
651 grp[i].id);
652 return -1;
653 }
654
655 /*
656 * If this is a nexthop with its own group
657 * dependencies, add them as well. Not sure its
658 * even possible to have a group within a group
659 * in the kernel.
660 */
661
662 copy_nexthops(&nhg->nexthop, depend->nhg.nexthop, NULL);
663 }
664
665 if (resilience)
666 nhg->nhgr = *resilience;
667
668 return 0;
669 }
670
671 static void handle_recursive_depend(struct nhg_connected_tree_head *nhg_depends,
672 struct nexthop *nh, afi_t afi, int type)
673 {
674 struct nhg_hash_entry *depend = NULL;
675 struct nexthop_group resolved_ng = {};
676
677 resolved_ng.nexthop = nh;
678
679 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
680 zlog_debug("%s: head %p, nh %pNHv",
681 __func__, nhg_depends, nh);
682
683 depend = zebra_nhg_rib_find(0, &resolved_ng, afi, type);
684
685 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
686 zlog_debug("%s: nh %pNHv => %p (%u)",
687 __func__, nh, depend,
688 depend ? depend->id : 0);
689
690 if (depend)
691 depends_add(nhg_depends, depend);
692 }
693
694 /*
695 * Lookup an nhe in the global hash, using data from another nhe. If 'lookup'
696 * has an id value, that's used. Create a new global/shared nhe if not found.
697 */
698 static bool zebra_nhe_find(struct nhg_hash_entry **nhe, /* return value */
699 struct nhg_hash_entry *lookup,
700 struct nhg_connected_tree_head *nhg_depends,
701 afi_t afi, bool from_dplane)
702 {
703 bool created = false;
704 bool recursive = false;
705 struct nhg_hash_entry *newnhe, *backup_nhe;
706 struct nexthop *nh = NULL;
707
708 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
709 zlog_debug(
710 "%s: id %u, lookup %p, vrf %d, type %d, depends %p%s",
711 __func__, lookup->id, lookup, lookup->vrf_id,
712 lookup->type, nhg_depends,
713 (from_dplane ? " (from dplane)" : ""));
714
715 if (lookup->id)
716 (*nhe) = zebra_nhg_lookup_id(lookup->id);
717 else
718 (*nhe) = hash_lookup(zrouter.nhgs, lookup);
719
720 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
721 zlog_debug("%s: lookup => %p (%pNG)", __func__, *nhe, *nhe);
722
723 /* If we found an existing object, we're done */
724 if (*nhe)
725 goto done;
726
727 /* We're going to create/insert a new nhe:
728 * assign the next global id value if necessary.
729 */
730 if (lookup->id == 0)
731 lookup->id = nhg_get_next_id();
732
733 if (!from_dplane && lookup->id < ZEBRA_NHG_PROTO_LOWER) {
734 /*
735 * This is a zebra hashed/owned NHG.
736 *
737 * It goes in HASH and ID table.
738 */
739 newnhe = hash_get(zrouter.nhgs, lookup, zebra_nhg_hash_alloc);
740 zebra_nhg_insert_id(newnhe);
741 } else {
742 /*
743 * This is upperproto owned NHG or one we read in from dataplane
744 * and should not be hashed to.
745 *
746 * It goes in ID table.
747 */
748 newnhe =
749 hash_get(zrouter.nhgs_id, lookup, zebra_nhg_hash_alloc);
750 }
751
752 created = true;
753
754 /* Mail back the new object */
755 *nhe = newnhe;
756
757 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
758 zlog_debug("%s: => created %p (%pNG)", __func__, newnhe,
759 newnhe);
760
761 /* Only hash/lookup the depends if the first lookup
762 * fails to find something. This should hopefully save a
763 * lot of cycles for larger ecmp sizes.
764 */
765 if (nhg_depends) {
766 /* If you don't want to hash on each nexthop in the
767 * nexthop group struct you can pass the depends
768 * directly. Kernel-side we do this since it just looks
769 * them up via IDs.
770 */
771 zebra_nhg_connect_depends(newnhe, nhg_depends);
772 goto done;
773 }
774
775 /* Prepare dependency relationships if this is not a
776 * singleton nexthop. There are two cases: a single
777 * recursive nexthop, where we need a relationship to the
778 * resolving nexthop; or a group of nexthops, where we need
779 * relationships with the corresponding singletons.
780 */
781 zebra_nhg_depends_init(newnhe);
782
783 nh = newnhe->nhg.nexthop;
784
785 if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE))
786 SET_FLAG(newnhe->flags, NEXTHOP_GROUP_VALID);
787
788 if (nh->next == NULL && newnhe->id < ZEBRA_NHG_PROTO_LOWER) {
789 if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) {
790 /* Single recursive nexthop */
791 handle_recursive_depend(&newnhe->nhg_depends,
792 nh->resolved, afi,
793 newnhe->type);
794 recursive = true;
795 }
796 } else {
797 /* Proto-owned are groups by default */
798 /* List of nexthops */
799 for (nh = newnhe->nhg.nexthop; nh; nh = nh->next) {
800 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
801 zlog_debug("%s: depends NH %pNHv %s",
802 __func__, nh,
803 CHECK_FLAG(nh->flags,
804 NEXTHOP_FLAG_RECURSIVE) ?
805 "(R)" : "");
806
807 depends_find_add(&newnhe->nhg_depends, nh, afi,
808 newnhe->type, from_dplane);
809 }
810 }
811
812 if (recursive)
813 SET_FLAG(newnhe->flags, NEXTHOP_GROUP_RECURSIVE);
814
815 /* Attach dependent backpointers to singletons */
816 zebra_nhg_connect_depends(newnhe, &newnhe->nhg_depends);
817
818 /**
819 * Backup Nexthops
820 */
821
822 if (zebra_nhg_get_backup_nhg(newnhe) == NULL ||
823 zebra_nhg_get_backup_nhg(newnhe)->nexthop == NULL)
824 goto done;
825
826 /* If there are backup nexthops, add them to the backup
827 * depends tree. The rules here are a little different.
828 */
829 recursive = false;
830 backup_nhe = newnhe->backup_info->nhe;
831
832 nh = backup_nhe->nhg.nexthop;
833
834 /* Singleton recursive NH */
835 if (nh->next == NULL &&
836 CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) {
837 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
838 zlog_debug("%s: backup depend NH %pNHv (R)",
839 __func__, nh);
840
841 /* Single recursive nexthop */
842 handle_recursive_depend(&backup_nhe->nhg_depends, nh->resolved,
843 afi, backup_nhe->type);
844 recursive = true;
845 } else {
846 /* One or more backup NHs */
847 for (; nh; nh = nh->next) {
848 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
849 zlog_debug("%s: backup depend NH %pNHv %s",
850 __func__, nh,
851 CHECK_FLAG(nh->flags,
852 NEXTHOP_FLAG_RECURSIVE) ?
853 "(R)" : "");
854
855 depends_find_add(&backup_nhe->nhg_depends, nh, afi,
856 backup_nhe->type, from_dplane);
857 }
858 }
859
860 if (recursive)
861 SET_FLAG(backup_nhe->flags, NEXTHOP_GROUP_RECURSIVE);
862
863 done:
864 /* Reset time since last update */
865 (*nhe)->uptime = monotime(NULL);
866
867 return created;
868 }
869
870 /*
871 * Lookup or create an nhe, based on an nhg or an nhe id.
872 */
873 static bool zebra_nhg_find(struct nhg_hash_entry **nhe, uint32_t id,
874 struct nexthop_group *nhg,
875 struct nhg_connected_tree_head *nhg_depends,
876 vrf_id_t vrf_id, afi_t afi, int type,
877 bool from_dplane)
878 {
879 struct nhg_hash_entry lookup = {};
880 bool created = false;
881
882 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
883 zlog_debug("%s: id %u, nhg %p, vrf %d, type %d, depends %p",
884 __func__, id, nhg, vrf_id, type,
885 nhg_depends);
886
887 /* Use a temporary nhe and call into the superset/common code */
888 lookup.id = id;
889 lookup.type = type ? type : ZEBRA_ROUTE_NHG;
890 lookup.nhg = *nhg;
891
892 lookup.vrf_id = vrf_id;
893 if (nhg_depends || lookup.nhg.nexthop->next) {
894 /* Groups can have all vrfs and AF's in them */
895 lookup.afi = AFI_UNSPEC;
896 } else {
897 switch (lookup.nhg.nexthop->type) {
898 case (NEXTHOP_TYPE_IFINDEX):
899 case (NEXTHOP_TYPE_BLACKHOLE):
900 /*
901 * This switch case handles setting the afi different
902 * for ipv4/v6 routes. Ifindex/blackhole nexthop
903 * objects cannot be ambiguous, they must be Address
904 * Family specific. If we get here, we will either use
905 * the AF of the route, or the one we got passed from
906 * here from the kernel.
907 */
908 lookup.afi = afi;
909 break;
910 case (NEXTHOP_TYPE_IPV4_IFINDEX):
911 case (NEXTHOP_TYPE_IPV4):
912 lookup.afi = AFI_IP;
913 break;
914 case (NEXTHOP_TYPE_IPV6_IFINDEX):
915 case (NEXTHOP_TYPE_IPV6):
916 lookup.afi = AFI_IP6;
917 break;
918 }
919 }
920
921 created = zebra_nhe_find(nhe, &lookup, nhg_depends, afi, from_dplane);
922
923 return created;
924 }
925
926 /* Find/create a single nexthop */
927 static struct nhg_hash_entry *zebra_nhg_find_nexthop(uint32_t id,
928 struct nexthop *nh,
929 afi_t afi, int type,
930 bool from_dplane)
931 {
932 struct nhg_hash_entry *nhe = NULL;
933 struct nexthop_group nhg = {};
934 vrf_id_t vrf_id = !vrf_is_backend_netns() ? VRF_DEFAULT : nh->vrf_id;
935
936 nexthop_group_add_sorted(&nhg, nh);
937
938 zebra_nhg_find(&nhe, id, &nhg, NULL, vrf_id, afi, type, from_dplane);
939
940 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
941 zlog_debug("%s: nh %pNHv => %p (%pNG)", __func__, nh, nhe, nhe);
942
943 return nhe;
944 }
945
946 static uint32_t nhg_ctx_get_id(const struct nhg_ctx *ctx)
947 {
948 return ctx->id;
949 }
950
951 static void nhg_ctx_set_status(struct nhg_ctx *ctx, enum nhg_ctx_status status)
952 {
953 ctx->status = status;
954 }
955
956 static enum nhg_ctx_status nhg_ctx_get_status(const struct nhg_ctx *ctx)
957 {
958 return ctx->status;
959 }
960
961 static void nhg_ctx_set_op(struct nhg_ctx *ctx, enum nhg_ctx_op_e op)
962 {
963 ctx->op = op;
964 }
965
966 static enum nhg_ctx_op_e nhg_ctx_get_op(const struct nhg_ctx *ctx)
967 {
968 return ctx->op;
969 }
970
971 static vrf_id_t nhg_ctx_get_vrf_id(const struct nhg_ctx *ctx)
972 {
973 return ctx->vrf_id;
974 }
975
976 static int nhg_ctx_get_type(const struct nhg_ctx *ctx)
977 {
978 return ctx->type;
979 }
980
981 static int nhg_ctx_get_afi(const struct nhg_ctx *ctx)
982 {
983 return ctx->afi;
984 }
985
986 static struct nexthop *nhg_ctx_get_nh(struct nhg_ctx *ctx)
987 {
988 return &ctx->u.nh;
989 }
990
991 static uint8_t nhg_ctx_get_count(const struct nhg_ctx *ctx)
992 {
993 return ctx->count;
994 }
995
996 static struct nh_grp *nhg_ctx_get_grp(struct nhg_ctx *ctx)
997 {
998 return ctx->u.grp;
999 }
1000
1001 static struct nhg_resilience *nhg_ctx_get_resilience(struct nhg_ctx *ctx)
1002 {
1003 return &ctx->resilience;
1004 }
1005
1006 static struct nhg_ctx *nhg_ctx_new(void)
1007 {
1008 struct nhg_ctx *new;
1009
1010 new = XCALLOC(MTYPE_NHG_CTX, sizeof(struct nhg_ctx));
1011
1012 return new;
1013 }
1014
1015 void nhg_ctx_free(struct nhg_ctx **ctx)
1016 {
1017 struct nexthop *nh;
1018
1019 if (ctx == NULL)
1020 return;
1021
1022 assert((*ctx) != NULL);
1023
1024 if (nhg_ctx_get_count(*ctx))
1025 goto done;
1026
1027 nh = nhg_ctx_get_nh(*ctx);
1028
1029 nexthop_del_labels(nh);
1030 nexthop_del_srv6_seg6local(nh);
1031 nexthop_del_srv6_seg6(nh);
1032
1033 done:
1034 XFREE(MTYPE_NHG_CTX, *ctx);
1035 }
1036
1037 static struct nhg_ctx *nhg_ctx_init(uint32_t id, struct nexthop *nh,
1038 struct nh_grp *grp, vrf_id_t vrf_id,
1039 afi_t afi, int type, uint8_t count,
1040 struct nhg_resilience *resilience)
1041 {
1042 struct nhg_ctx *ctx = NULL;
1043
1044 ctx = nhg_ctx_new();
1045
1046 ctx->id = id;
1047 ctx->vrf_id = vrf_id;
1048 ctx->afi = afi;
1049 ctx->type = type;
1050 ctx->count = count;
1051
1052 if (resilience)
1053 ctx->resilience = *resilience;
1054
1055 if (count)
1056 /* Copy over the array */
1057 memcpy(&ctx->u.grp, grp, count * sizeof(struct nh_grp));
1058 else if (nh)
1059 ctx->u.nh = *nh;
1060
1061 return ctx;
1062 }
1063
1064 static void zebra_nhg_set_valid(struct nhg_hash_entry *nhe)
1065 {
1066 struct nhg_connected *rb_node_dep;
1067
1068 SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
1069
1070 frr_each(nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep)
1071 zebra_nhg_set_valid(rb_node_dep->nhe);
1072 }
1073
1074 static void zebra_nhg_set_invalid(struct nhg_hash_entry *nhe)
1075 {
1076 struct nhg_connected *rb_node_dep;
1077
1078 UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
1079
1080 /* If we're in shutdown, this interface event needs to clean
1081 * up installed NHGs, so don't clear that flag directly.
1082 */
1083 if (!zebra_router_in_shutdown())
1084 UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
1085
1086 /* Update validity of nexthops depending on it */
1087 frr_each(nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep)
1088 zebra_nhg_check_valid(rb_node_dep->nhe);
1089 }
1090
1091 void zebra_nhg_check_valid(struct nhg_hash_entry *nhe)
1092 {
1093 struct nhg_connected *rb_node_dep = NULL;
1094 bool valid = false;
1095
1096 /* If anthing else in the group is valid, the group is valid */
1097 frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) {
1098 if (CHECK_FLAG(rb_node_dep->nhe->flags, NEXTHOP_GROUP_VALID)) {
1099 valid = true;
1100 goto done;
1101 }
1102 }
1103
1104 done:
1105 if (valid)
1106 zebra_nhg_set_valid(nhe);
1107 else
1108 zebra_nhg_set_invalid(nhe);
1109 }
1110
1111 static void zebra_nhg_release_all_deps(struct nhg_hash_entry *nhe)
1112 {
1113 /* Remove it from any lists it may be on */
1114 zebra_nhg_depends_release(nhe);
1115 zebra_nhg_dependents_release(nhe);
1116 if (nhe->ifp)
1117 if_nhg_dependents_del(nhe->ifp, nhe);
1118 }
1119
1120 static void zebra_nhg_release(struct nhg_hash_entry *nhe)
1121 {
1122 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1123 zlog_debug("%s: nhe %p (%pNG)", __func__, nhe, nhe);
1124
1125 zebra_nhg_release_all_deps(nhe);
1126
1127 /*
1128 * If its not zebra owned, we didn't store it here and have to be
1129 * sure we don't clear one thats actually being used.
1130 */
1131 if (nhe->id < ZEBRA_NHG_PROTO_LOWER)
1132 hash_release(zrouter.nhgs, nhe);
1133
1134 hash_release(zrouter.nhgs_id, nhe);
1135 }
1136
1137 static void zebra_nhg_handle_uninstall(struct nhg_hash_entry *nhe)
1138 {
1139 zebra_nhg_release(nhe);
1140 zebra_nhg_free(nhe);
1141 }
1142
1143 static void zebra_nhg_handle_install(struct nhg_hash_entry *nhe)
1144 {
1145 /* Update validity of groups depending on it */
1146 struct nhg_connected *rb_node_dep;
1147
1148 frr_each_safe(nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep)
1149 zebra_nhg_set_valid(rb_node_dep->nhe);
1150 }
1151
1152 /*
1153 * The kernel/other program has changed the state of a nexthop object we are
1154 * using.
1155 */
1156 static void zebra_nhg_handle_kernel_state_change(struct nhg_hash_entry *nhe,
1157 bool is_delete)
1158 {
1159 if (nhe->refcnt) {
1160 flog_err(
1161 EC_ZEBRA_NHG_SYNC,
1162 "Kernel %s a nexthop group with ID (%pNG) that we are still using for a route, sending it back down",
1163 (is_delete ? "deleted" : "updated"), nhe);
1164
1165 UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
1166 zebra_nhg_install_kernel(nhe);
1167 } else
1168 zebra_nhg_handle_uninstall(nhe);
1169 }
1170
1171 static int nhg_ctx_process_new(struct nhg_ctx *ctx)
1172 {
1173 struct nexthop_group *nhg = NULL;
1174 struct nhg_connected_tree_head nhg_depends = {};
1175 struct nhg_hash_entry *lookup = NULL;
1176 struct nhg_hash_entry *nhe = NULL;
1177
1178 uint32_t id = nhg_ctx_get_id(ctx);
1179 uint8_t count = nhg_ctx_get_count(ctx);
1180 vrf_id_t vrf_id = nhg_ctx_get_vrf_id(ctx);
1181 int type = nhg_ctx_get_type(ctx);
1182 afi_t afi = nhg_ctx_get_afi(ctx);
1183
1184 lookup = zebra_nhg_lookup_id(id);
1185
1186 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1187 zlog_debug("%s: id %u, count %d, lookup => %p",
1188 __func__, id, count, lookup);
1189
1190 if (lookup) {
1191 /* This is already present in our table, hence an update
1192 * that we did not initate.
1193 */
1194 zebra_nhg_handle_kernel_state_change(lookup, false);
1195 return 0;
1196 }
1197
1198 if (nhg_ctx_get_count(ctx)) {
1199 nhg = nexthop_group_new();
1200 if (zebra_nhg_process_grp(nhg, &nhg_depends,
1201 nhg_ctx_get_grp(ctx), count,
1202 nhg_ctx_get_resilience(ctx))) {
1203 depends_decrement_free(&nhg_depends);
1204 nexthop_group_delete(&nhg);
1205 return -ENOENT;
1206 }
1207
1208 if (!zebra_nhg_find(&nhe, id, nhg, &nhg_depends, vrf_id, afi,
1209 type, true))
1210 depends_decrement_free(&nhg_depends);
1211
1212 /* These got copied over in zebra_nhg_alloc() */
1213 nexthop_group_delete(&nhg);
1214 } else
1215 nhe = zebra_nhg_find_nexthop(id, nhg_ctx_get_nh(ctx), afi, type,
1216 true);
1217
1218 if (!nhe) {
1219 flog_err(
1220 EC_ZEBRA_TABLE_LOOKUP_FAILED,
1221 "Zebra failed to find or create a nexthop hash entry for ID (%u)",
1222 id);
1223 return -1;
1224 }
1225
1226 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1227 zlog_debug("%s: nhe %p (%pNG) is new", __func__, nhe, nhe);
1228
1229 /*
1230 * If daemon nhg from the kernel, add a refcnt here to indicate the
1231 * daemon owns it.
1232 */
1233 if (PROTO_OWNED(nhe))
1234 zebra_nhg_increment_ref(nhe);
1235
1236 SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
1237 SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
1238
1239 return 0;
1240 }
1241
1242 static int nhg_ctx_process_del(struct nhg_ctx *ctx)
1243 {
1244 struct nhg_hash_entry *nhe = NULL;
1245 uint32_t id = nhg_ctx_get_id(ctx);
1246
1247 nhe = zebra_nhg_lookup_id(id);
1248
1249 if (!nhe) {
1250 flog_warn(
1251 EC_ZEBRA_BAD_NHG_MESSAGE,
1252 "Kernel delete message received for nexthop group ID (%u) that we do not have in our ID table",
1253 id);
1254 return -1;
1255 }
1256
1257 zebra_nhg_handle_kernel_state_change(nhe, true);
1258
1259 return 0;
1260 }
1261
1262 static void nhg_ctx_fini(struct nhg_ctx **ctx)
1263 {
1264 /*
1265 * Just freeing for now, maybe do something more in the future
1266 * based on flag.
1267 */
1268
1269 nhg_ctx_free(ctx);
1270 }
1271
1272 static int queue_add(struct nhg_ctx *ctx)
1273 {
1274 /* If its queued or already processed do nothing */
1275 if (nhg_ctx_get_status(ctx) == NHG_CTX_QUEUED)
1276 return 0;
1277
1278 if (rib_queue_nhg_ctx_add(ctx)) {
1279 nhg_ctx_set_status(ctx, NHG_CTX_FAILURE);
1280 return -1;
1281 }
1282
1283 nhg_ctx_set_status(ctx, NHG_CTX_QUEUED);
1284
1285 return 0;
1286 }
1287
1288 int nhg_ctx_process(struct nhg_ctx *ctx)
1289 {
1290 int ret = 0;
1291
1292 switch (nhg_ctx_get_op(ctx)) {
1293 case NHG_CTX_OP_NEW:
1294 ret = nhg_ctx_process_new(ctx);
1295 if (nhg_ctx_get_count(ctx) && ret == -ENOENT
1296 && nhg_ctx_get_status(ctx) != NHG_CTX_REQUEUED) {
1297 /**
1298 * We have entered a situation where we are
1299 * processing a group from the kernel
1300 * that has a contained nexthop which
1301 * we have not yet processed.
1302 *
1303 * Re-enqueue this ctx to be handled exactly one
1304 * more time (indicated by the flag).
1305 *
1306 * By the time we get back to it, we
1307 * should have processed its depends.
1308 */
1309 nhg_ctx_set_status(ctx, NHG_CTX_NONE);
1310 if (queue_add(ctx) == 0) {
1311 nhg_ctx_set_status(ctx, NHG_CTX_REQUEUED);
1312 return 0;
1313 }
1314 }
1315 break;
1316 case NHG_CTX_OP_DEL:
1317 ret = nhg_ctx_process_del(ctx);
1318 case NHG_CTX_OP_NONE:
1319 break;
1320 }
1321
1322 nhg_ctx_set_status(ctx, (ret ? NHG_CTX_FAILURE : NHG_CTX_SUCCESS));
1323
1324 nhg_ctx_fini(&ctx);
1325
1326 return ret;
1327 }
1328
1329 /* Kernel-side, you either get a single new nexthop or a array of ID's */
1330 int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, struct nh_grp *grp,
1331 uint8_t count, vrf_id_t vrf_id, afi_t afi, int type,
1332 int startup, struct nhg_resilience *nhgr)
1333 {
1334 struct nhg_ctx *ctx = NULL;
1335
1336 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1337 zlog_debug("%s: nh %pNHv, id %u, count %d",
1338 __func__, nh, id, (int)count);
1339
1340 if (id > id_counter && id < ZEBRA_NHG_PROTO_LOWER)
1341 /* Increase our counter so we don't try to create
1342 * an ID that already exists
1343 */
1344 id_counter = id;
1345
1346 ctx = nhg_ctx_init(id, nh, grp, vrf_id, afi, type, count, nhgr);
1347 nhg_ctx_set_op(ctx, NHG_CTX_OP_NEW);
1348
1349 /* Under statup conditions, we need to handle them immediately
1350 * like we do for routes. Otherwise, we are going to get a route
1351 * with a nhe_id that we have not handled.
1352 */
1353 if (startup)
1354 return nhg_ctx_process(ctx);
1355
1356 if (queue_add(ctx)) {
1357 nhg_ctx_fini(&ctx);
1358 return -1;
1359 }
1360
1361 return 0;
1362 }
1363
1364 /* Kernel-side, received delete message */
1365 int zebra_nhg_kernel_del(uint32_t id, vrf_id_t vrf_id)
1366 {
1367 struct nhg_ctx *ctx = NULL;
1368
1369 ctx = nhg_ctx_init(id, NULL, NULL, vrf_id, 0, 0, 0, NULL);
1370
1371 nhg_ctx_set_op(ctx, NHG_CTX_OP_DEL);
1372
1373 if (queue_add(ctx)) {
1374 nhg_ctx_fini(&ctx);
1375 return -1;
1376 }
1377
1378 return 0;
1379 }
1380
1381 /* Some dependency helper functions */
1382 static struct nhg_hash_entry *depends_find_recursive(const struct nexthop *nh,
1383 afi_t afi, int type)
1384 {
1385 struct nhg_hash_entry *nhe;
1386 struct nexthop *lookup = NULL;
1387
1388 lookup = nexthop_dup(nh, NULL);
1389
1390 nhe = zebra_nhg_find_nexthop(0, lookup, afi, type, false);
1391
1392 nexthops_free(lookup);
1393
1394 return nhe;
1395 }
1396
1397 static struct nhg_hash_entry *depends_find_singleton(const struct nexthop *nh,
1398 afi_t afi, int type,
1399 bool from_dplane)
1400 {
1401 struct nhg_hash_entry *nhe;
1402 struct nexthop lookup = {};
1403
1404 /* Capture a snapshot of this single nh; it might be part of a list,
1405 * so we need to make a standalone copy.
1406 */
1407 nexthop_copy_no_recurse(&lookup, nh, NULL);
1408
1409 nhe = zebra_nhg_find_nexthop(0, &lookup, afi, type, from_dplane);
1410
1411 /* The copy may have allocated labels; free them if necessary. */
1412 nexthop_del_labels(&lookup);
1413 nexthop_del_srv6_seg6local(&lookup);
1414 nexthop_del_srv6_seg6(&lookup);
1415
1416 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1417 zlog_debug("%s: nh %pNHv => %p (%pNG)", __func__, nh, nhe, nhe);
1418
1419 return nhe;
1420 }
1421
1422 static struct nhg_hash_entry *depends_find(const struct nexthop *nh, afi_t afi,
1423 int type, bool from_dplane)
1424 {
1425 struct nhg_hash_entry *nhe = NULL;
1426
1427 if (!nh)
1428 goto done;
1429
1430 /* We are separating these functions out to increase handling speed
1431 * in the non-recursive case (by not alloc/freeing)
1432 */
1433 if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE))
1434 nhe = depends_find_recursive(nh, afi, type);
1435 else
1436 nhe = depends_find_singleton(nh, afi, type, from_dplane);
1437
1438
1439 if (IS_ZEBRA_DEBUG_NHG_DETAIL) {
1440 zlog_debug("%s: nh %pNHv %s => %p (%pNG)", __func__, nh,
1441 CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE) ? "(R)"
1442 : "",
1443 nhe, nhe);
1444 }
1445
1446 done:
1447 return nhe;
1448 }
1449
1450 static void depends_add(struct nhg_connected_tree_head *head,
1451 struct nhg_hash_entry *depend)
1452 {
1453 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1454 zlog_debug("%s: head %p nh %pNHv",
1455 __func__, head, depend->nhg.nexthop);
1456
1457 /* If NULL is returned, it was successfully added and
1458 * needs to have its refcnt incremented.
1459 *
1460 * Else the NHE is already present in the tree and doesn't
1461 * need to increment the refcnt.
1462 */
1463 if (nhg_connected_tree_add_nhe(head, depend) == NULL)
1464 zebra_nhg_increment_ref(depend);
1465 }
1466
1467 static struct nhg_hash_entry *
1468 depends_find_add(struct nhg_connected_tree_head *head, struct nexthop *nh,
1469 afi_t afi, int type, bool from_dplane)
1470 {
1471 struct nhg_hash_entry *depend = NULL;
1472
1473 depend = depends_find(nh, afi, type, from_dplane);
1474
1475 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1476 zlog_debug("%s: nh %pNHv => %p",
1477 __func__, nh, depend);
1478
1479 if (depend)
1480 depends_add(head, depend);
1481
1482 return depend;
1483 }
1484
1485 static struct nhg_hash_entry *
1486 depends_find_id_add(struct nhg_connected_tree_head *head, uint32_t id)
1487 {
1488 struct nhg_hash_entry *depend = NULL;
1489
1490 depend = zebra_nhg_lookup_id(id);
1491
1492 if (depend)
1493 depends_add(head, depend);
1494
1495 return depend;
1496 }
1497
1498 static void depends_decrement_free(struct nhg_connected_tree_head *head)
1499 {
1500 nhg_connected_tree_decrement_ref(head);
1501 nhg_connected_tree_free(head);
1502 }
1503
1504 /* Find an nhe based on a list of nexthops */
1505 struct nhg_hash_entry *zebra_nhg_rib_find(uint32_t id,
1506 struct nexthop_group *nhg,
1507 afi_t rt_afi, int type)
1508 {
1509 struct nhg_hash_entry *nhe = NULL;
1510 vrf_id_t vrf_id;
1511
1512 /*
1513 * CLANG SA is complaining that nexthop may be NULL
1514 * Make it happy but this is ridonc
1515 */
1516 assert(nhg->nexthop);
1517 vrf_id = !vrf_is_backend_netns() ? VRF_DEFAULT : nhg->nexthop->vrf_id;
1518
1519 zebra_nhg_find(&nhe, id, nhg, NULL, vrf_id, rt_afi, type, false);
1520
1521 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1522 zlog_debug("%s: => nhe %p (%pNG)", __func__, nhe, nhe);
1523
1524 return nhe;
1525 }
1526
1527 /* Find an nhe based on a route's nhe */
1528 struct nhg_hash_entry *
1529 zebra_nhg_rib_find_nhe(struct nhg_hash_entry *rt_nhe, afi_t rt_afi)
1530 {
1531 struct nhg_hash_entry *nhe = NULL;
1532
1533 if (!(rt_nhe && rt_nhe->nhg.nexthop)) {
1534 flog_err(EC_ZEBRA_TABLE_LOOKUP_FAILED,
1535 "No nexthop passed to %s", __func__);
1536 return NULL;
1537 }
1538
1539 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1540 zlog_debug("%s: rt_nhe %p (%pNG)", __func__, rt_nhe, rt_nhe);
1541
1542 zebra_nhe_find(&nhe, rt_nhe, NULL, rt_afi, false);
1543
1544 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1545 zlog_debug("%s: => nhe %p (%pNG)", __func__, nhe, nhe);
1546
1547 return nhe;
1548 }
1549
1550 /*
1551 * Allocate backup nexthop info object. Typically these are embedded in
1552 * nhg_hash_entry objects.
1553 */
1554 struct nhg_backup_info *zebra_nhg_backup_alloc(void)
1555 {
1556 struct nhg_backup_info *p;
1557
1558 p = XCALLOC(MTYPE_NHG, sizeof(struct nhg_backup_info));
1559
1560 p->nhe = zebra_nhg_alloc();
1561
1562 /* Identify the embedded group used to hold the list of backups */
1563 SET_FLAG(p->nhe->flags, NEXTHOP_GROUP_BACKUP);
1564
1565 return p;
1566 }
1567
1568 /*
1569 * Free backup nexthop info object, deal with any embedded allocations
1570 */
1571 void zebra_nhg_backup_free(struct nhg_backup_info **p)
1572 {
1573 if (p && *p) {
1574 if ((*p)->nhe)
1575 zebra_nhg_free((*p)->nhe);
1576
1577 XFREE(MTYPE_NHG, (*p));
1578 }
1579 }
1580
1581 /* Accessor for backup nexthop group */
1582 struct nexthop_group *zebra_nhg_get_backup_nhg(struct nhg_hash_entry *nhe)
1583 {
1584 struct nexthop_group *p = NULL;
1585
1586 if (nhe) {
1587 if (nhe->backup_info && nhe->backup_info->nhe)
1588 p = &(nhe->backup_info->nhe->nhg);
1589 }
1590
1591 return p;
1592 }
1593
1594 /*
1595 * Helper to return a copy of a backup_info - note that this is a shallow
1596 * copy, meant to be used when creating a new nhe from info passed in with
1597 * a route e.g.
1598 */
1599 static struct nhg_backup_info *
1600 nhg_backup_copy(const struct nhg_backup_info *orig)
1601 {
1602 struct nhg_backup_info *b;
1603
1604 b = zebra_nhg_backup_alloc();
1605
1606 /* Copy list of nexthops */
1607 nexthop_group_copy(&(b->nhe->nhg), &(orig->nhe->nhg));
1608
1609 return b;
1610 }
1611
1612 static void zebra_nhg_free_members(struct nhg_hash_entry *nhe)
1613 {
1614 nexthops_free(nhe->nhg.nexthop);
1615
1616 zebra_nhg_backup_free(&nhe->backup_info);
1617
1618 /* Decrement to remove connection ref */
1619 nhg_connected_tree_decrement_ref(&nhe->nhg_depends);
1620 nhg_connected_tree_free(&nhe->nhg_depends);
1621 nhg_connected_tree_free(&nhe->nhg_dependents);
1622 }
1623
1624 void zebra_nhg_free(struct nhg_hash_entry *nhe)
1625 {
1626 if (IS_ZEBRA_DEBUG_NHG_DETAIL) {
1627 /* Group or singleton? */
1628 if (nhe->nhg.nexthop && nhe->nhg.nexthop->next)
1629 zlog_debug("%s: nhe %p (%pNG), refcnt %d", __func__,
1630 nhe, nhe, nhe->refcnt);
1631 else
1632 zlog_debug("%s: nhe %p (%pNG), refcnt %d, NH %pNHv",
1633 __func__, nhe, nhe, nhe->refcnt,
1634 nhe->nhg.nexthop);
1635 }
1636
1637 THREAD_OFF(nhe->timer);
1638
1639 zebra_nhg_free_members(nhe);
1640
1641 XFREE(MTYPE_NHG, nhe);
1642 }
1643
1644 /*
1645 * Let's just drop the memory associated with each item
1646 */
1647 void zebra_nhg_hash_free(void *p)
1648 {
1649 struct nhg_hash_entry *nhe = p;
1650
1651 if (IS_ZEBRA_DEBUG_NHG_DETAIL) {
1652 /* Group or singleton? */
1653 if (nhe->nhg.nexthop && nhe->nhg.nexthop->next)
1654 zlog_debug("%s: nhe %p (%u), refcnt %d", __func__, nhe,
1655 nhe->id, nhe->refcnt);
1656 else
1657 zlog_debug("%s: nhe %p (%pNG), refcnt %d, NH %pNHv",
1658 __func__, nhe, nhe, nhe->refcnt,
1659 nhe->nhg.nexthop);
1660 }
1661
1662 THREAD_OFF(nhe->timer);
1663
1664 nexthops_free(nhe->nhg.nexthop);
1665
1666 XFREE(MTYPE_NHG, nhe);
1667 }
1668
1669 /*
1670 * On cleanup there are nexthop groups that have not
1671 * been resolved at all( a nhe->id of 0 ). As such
1672 * zebra needs to clean up the memory associated with
1673 * those entries.
1674 */
1675 void zebra_nhg_hash_free_zero_id(struct hash_bucket *b, void *arg)
1676 {
1677 struct nhg_hash_entry *nhe = b->data;
1678 struct nhg_connected *dep;
1679
1680 while ((dep = nhg_connected_tree_pop(&nhe->nhg_depends))) {
1681 if (dep->nhe->id == 0)
1682 zebra_nhg_hash_free(dep->nhe);
1683
1684 nhg_connected_free(dep);
1685 }
1686
1687 while ((dep = nhg_connected_tree_pop(&nhe->nhg_dependents)))
1688 nhg_connected_free(dep);
1689
1690 if (nhe->backup_info && nhe->backup_info->nhe->id == 0) {
1691 while ((dep = nhg_connected_tree_pop(
1692 &nhe->backup_info->nhe->nhg_depends)))
1693 nhg_connected_free(dep);
1694
1695 zebra_nhg_hash_free(nhe->backup_info->nhe);
1696
1697 XFREE(MTYPE_NHG, nhe->backup_info);
1698 }
1699 }
1700
1701 static void zebra_nhg_timer(struct thread *thread)
1702 {
1703 struct nhg_hash_entry *nhe = THREAD_ARG(thread);
1704
1705 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1706 zlog_debug("Nexthop Timer for nhe: %pNG", nhe);
1707
1708 if (nhe->refcnt == 1)
1709 zebra_nhg_decrement_ref(nhe);
1710 }
1711
1712 void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe)
1713 {
1714 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1715 zlog_debug("%s: nhe %p (%pNG) %d => %d", __func__, nhe, nhe,
1716 nhe->refcnt, nhe->refcnt - 1);
1717
1718 nhe->refcnt--;
1719
1720 if (!zebra_router_in_shutdown() && nhe->refcnt <= 0 &&
1721 CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED) &&
1722 !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND)) {
1723 nhe->refcnt = 1;
1724 SET_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND);
1725 thread_add_timer(zrouter.master, zebra_nhg_timer, nhe,
1726 zrouter.nhg_keep, &nhe->timer);
1727 return;
1728 }
1729
1730 if (!zebra_nhg_depends_is_empty(nhe))
1731 nhg_connected_tree_decrement_ref(&nhe->nhg_depends);
1732
1733 if (ZEBRA_NHG_CREATED(nhe) && nhe->refcnt <= 0)
1734 zebra_nhg_uninstall_kernel(nhe);
1735 }
1736
1737 void zebra_nhg_increment_ref(struct nhg_hash_entry *nhe)
1738 {
1739 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
1740 zlog_debug("%s: nhe %p (%pNG) %d => %d", __func__, nhe, nhe,
1741 nhe->refcnt, nhe->refcnt + 1);
1742
1743 nhe->refcnt++;
1744
1745 if (thread_is_scheduled(nhe->timer)) {
1746 THREAD_OFF(nhe->timer);
1747 nhe->refcnt--;
1748 UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND);
1749 }
1750
1751 if (!zebra_nhg_depends_is_empty(nhe))
1752 nhg_connected_tree_increment_ref(&nhe->nhg_depends);
1753 }
1754
1755 static struct nexthop *nexthop_set_resolved(afi_t afi,
1756 const struct nexthop *newhop,
1757 struct nexthop *nexthop,
1758 struct zebra_sr_policy *policy)
1759 {
1760 struct nexthop *resolved_hop;
1761 uint8_t num_labels = 0;
1762 mpls_label_t labels[MPLS_MAX_LABELS];
1763 enum lsp_types_t label_type = ZEBRA_LSP_NONE;
1764 int i = 0;
1765
1766 resolved_hop = nexthop_new();
1767 SET_FLAG(resolved_hop->flags, NEXTHOP_FLAG_ACTIVE);
1768
1769 resolved_hop->vrf_id = nexthop->vrf_id;
1770 switch (newhop->type) {
1771 case NEXTHOP_TYPE_IPV4:
1772 case NEXTHOP_TYPE_IPV4_IFINDEX:
1773 /* If the resolving route specifies a gateway, use it */
1774 resolved_hop->type = newhop->type;
1775 resolved_hop->gate.ipv4 = newhop->gate.ipv4;
1776
1777 if (newhop->ifindex) {
1778 resolved_hop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
1779 resolved_hop->ifindex = newhop->ifindex;
1780 }
1781 break;
1782 case NEXTHOP_TYPE_IPV6:
1783 case NEXTHOP_TYPE_IPV6_IFINDEX:
1784 resolved_hop->type = newhop->type;
1785 resolved_hop->gate.ipv6 = newhop->gate.ipv6;
1786
1787 if (newhop->ifindex) {
1788 resolved_hop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
1789 resolved_hop->ifindex = newhop->ifindex;
1790 }
1791 break;
1792 case NEXTHOP_TYPE_IFINDEX:
1793 /* If the resolving route is an interface route,
1794 * it means the gateway we are looking up is connected
1795 * to that interface. (The actual network is _not_ onlink).
1796 * Therefore, the resolved route should have the original
1797 * gateway as nexthop as it is directly connected.
1798 *
1799 * On Linux, we have to set the onlink netlink flag because
1800 * otherwise, the kernel won't accept the route.
1801 */
1802 resolved_hop->flags |= NEXTHOP_FLAG_ONLINK;
1803 if (afi == AFI_IP) {
1804 resolved_hop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
1805 resolved_hop->gate.ipv4 = nexthop->gate.ipv4;
1806 } else if (afi == AFI_IP6) {
1807 resolved_hop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
1808 resolved_hop->gate.ipv6 = nexthop->gate.ipv6;
1809 }
1810 resolved_hop->ifindex = newhop->ifindex;
1811 break;
1812 case NEXTHOP_TYPE_BLACKHOLE:
1813 resolved_hop->type = NEXTHOP_TYPE_BLACKHOLE;
1814 resolved_hop->bh_type = newhop->bh_type;
1815 break;
1816 }
1817
1818 if (newhop->flags & NEXTHOP_FLAG_ONLINK)
1819 resolved_hop->flags |= NEXTHOP_FLAG_ONLINK;
1820
1821 /* Copy labels of the resolved route and the parent resolving to it */
1822 if (policy) {
1823 int label_num = 0;
1824
1825 /*
1826 * Don't push the first SID if the corresponding action in the
1827 * LFIB is POP.
1828 */
1829 if (!newhop->nh_label || !newhop->nh_label->num_labels
1830 || newhop->nh_label->label[0] == MPLS_LABEL_IMPLICIT_NULL)
1831 label_num = 1;
1832
1833 for (; label_num < policy->segment_list.label_num; label_num++)
1834 labels[num_labels++] =
1835 policy->segment_list.labels[label_num];
1836 label_type = policy->segment_list.type;
1837 } else if (newhop->nh_label) {
1838 for (i = 0; i < newhop->nh_label->num_labels; i++) {
1839 /* Be a bit picky about overrunning the local array */
1840 if (num_labels >= MPLS_MAX_LABELS) {
1841 if (IS_ZEBRA_DEBUG_NHG || IS_ZEBRA_DEBUG_RIB)
1842 zlog_debug("%s: too many labels in newhop %pNHv",
1843 __func__, newhop);
1844 break;
1845 }
1846 labels[num_labels++] = newhop->nh_label->label[i];
1847 }
1848 /* Use the "outer" type */
1849 label_type = newhop->nh_label_type;
1850 }
1851
1852 if (nexthop->nh_label) {
1853 for (i = 0; i < nexthop->nh_label->num_labels; i++) {
1854 /* Be a bit picky about overrunning the local array */
1855 if (num_labels >= MPLS_MAX_LABELS) {
1856 if (IS_ZEBRA_DEBUG_NHG || IS_ZEBRA_DEBUG_RIB)
1857 zlog_debug("%s: too many labels in nexthop %pNHv",
1858 __func__, nexthop);
1859 break;
1860 }
1861 labels[num_labels++] = nexthop->nh_label->label[i];
1862 }
1863
1864 /* If the parent has labels, use its type if
1865 * we don't already have one.
1866 */
1867 if (label_type == ZEBRA_LSP_NONE)
1868 label_type = nexthop->nh_label_type;
1869 }
1870
1871 if (num_labels)
1872 nexthop_add_labels(resolved_hop, label_type, num_labels,
1873 labels);
1874
1875 if (nexthop->nh_srv6) {
1876 nexthop_add_srv6_seg6local(resolved_hop,
1877 nexthop->nh_srv6->seg6local_action,
1878 &nexthop->nh_srv6->seg6local_ctx);
1879 nexthop_add_srv6_seg6(resolved_hop,
1880 &nexthop->nh_srv6->seg6_segs);
1881 }
1882
1883 resolved_hop->rparent = nexthop;
1884 _nexthop_add(&nexthop->resolved, resolved_hop);
1885
1886 return resolved_hop;
1887 }
1888
1889 /* Checks if nexthop we are trying to resolve to is valid */
1890 static bool nexthop_valid_resolve(const struct nexthop *nexthop,
1891 const struct nexthop *resolved)
1892 {
1893 /* Can't resolve to a recursive nexthop */
1894 if (CHECK_FLAG(resolved->flags, NEXTHOP_FLAG_RECURSIVE))
1895 return false;
1896
1897 /* Must be ACTIVE */
1898 if (!CHECK_FLAG(resolved->flags, NEXTHOP_FLAG_ACTIVE))
1899 return false;
1900
1901 /* Must not be duplicate */
1902 if (CHECK_FLAG(resolved->flags, NEXTHOP_FLAG_DUPLICATE))
1903 return false;
1904
1905 switch (nexthop->type) {
1906 case NEXTHOP_TYPE_IPV4_IFINDEX:
1907 case NEXTHOP_TYPE_IPV6_IFINDEX:
1908 /* If the nexthop we are resolving to does not match the
1909 * ifindex for the nexthop the route wanted, its not valid.
1910 */
1911 if (nexthop->ifindex != resolved->ifindex)
1912 return false;
1913 break;
1914 case NEXTHOP_TYPE_IPV4:
1915 case NEXTHOP_TYPE_IPV6:
1916 case NEXTHOP_TYPE_IFINDEX:
1917 case NEXTHOP_TYPE_BLACKHOLE:
1918 break;
1919 }
1920
1921 return true;
1922 }
1923
1924 /*
1925 * When resolving a recursive nexthop, capture backup nexthop(s) also
1926 * so they can be conveyed through the dataplane to the FIB. We'll look
1927 * at the backups in the resolving nh 'nexthop' and its nhe, and copy them
1928 * into the route's resolved nh 'resolved' and its nhe 'nhe'.
1929 */
1930 static int resolve_backup_nexthops(const struct nexthop *nexthop,
1931 const struct nhg_hash_entry *nhe,
1932 struct nexthop *resolved,
1933 struct nhg_hash_entry *resolve_nhe,
1934 struct backup_nh_map_s *map)
1935 {
1936 int i, j, idx;
1937 const struct nexthop *bnh;
1938 struct nexthop *nh, *newnh;
1939 mpls_label_t labels[MPLS_MAX_LABELS];
1940 uint8_t num_labels;
1941
1942 assert(nexthop->backup_num <= NEXTHOP_MAX_BACKUPS);
1943
1944 /* Locate backups from the original nexthop's backup index and nhe */
1945 for (i = 0; i < nexthop->backup_num; i++) {
1946 idx = nexthop->backup_idx[i];
1947
1948 /* Do we already know about this particular backup? */
1949 for (j = 0; j < map->map_count; j++) {
1950 if (map->map[j].orig_idx == idx)
1951 break;
1952 }
1953
1954 if (j < map->map_count) {
1955 resolved->backup_idx[resolved->backup_num] =
1956 map->map[j].new_idx;
1957 resolved->backup_num++;
1958
1959 SET_FLAG(resolved->flags, NEXTHOP_FLAG_HAS_BACKUP);
1960
1961 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
1962 zlog_debug("%s: found map idx orig %d, new %d",
1963 __func__, map->map[j].orig_idx,
1964 map->map[j].new_idx);
1965
1966 continue;
1967 }
1968
1969 /* We can't handle any new map entries at this point. */
1970 if (map->map_count == MULTIPATH_NUM)
1971 break;
1972
1973 /* Need to create/copy a new backup */
1974 bnh = nhe->backup_info->nhe->nhg.nexthop;
1975 for (j = 0; j < idx; j++) {
1976 if (bnh == NULL)
1977 break;
1978 bnh = bnh->next;
1979 }
1980
1981 /* Whoops - bad index in the nexthop? */
1982 if (bnh == NULL)
1983 continue;
1984
1985 if (resolve_nhe->backup_info == NULL)
1986 resolve_nhe->backup_info = zebra_nhg_backup_alloc();
1987
1988 /* Update backup info in the resolving nexthop and its nhe */
1989 newnh = nexthop_dup_no_recurse(bnh, NULL);
1990
1991 /* We may need some special handling for mpls labels: the new
1992 * backup needs to carry the recursive nexthop's labels,
1993 * if any: they may be vrf labels e.g.
1994 * The original/inner labels are in the stack of 'resolve_nhe',
1995 * if that is longer than the stack in 'nexthop'.
1996 */
1997 if (newnh->nh_label && resolved->nh_label &&
1998 nexthop->nh_label) {
1999 if (resolved->nh_label->num_labels >
2000 nexthop->nh_label->num_labels) {
2001 /* Prepare new label stack */
2002 num_labels = 0;
2003 for (j = 0; j < newnh->nh_label->num_labels;
2004 j++) {
2005 labels[j] = newnh->nh_label->label[j];
2006 num_labels++;
2007 }
2008
2009 /* Include inner labels */
2010 for (j = nexthop->nh_label->num_labels;
2011 j < resolved->nh_label->num_labels;
2012 j++) {
2013 labels[num_labels] =
2014 resolved->nh_label->label[j];
2015 num_labels++;
2016 }
2017
2018 /* Replace existing label stack in the backup */
2019 nexthop_del_labels(newnh);
2020 nexthop_add_labels(newnh, bnh->nh_label_type,
2021 num_labels, labels);
2022 }
2023 }
2024
2025 /* Need to compute the new backup index in the new
2026 * backup list, and add to map struct.
2027 */
2028 j = 0;
2029 nh = resolve_nhe->backup_info->nhe->nhg.nexthop;
2030 if (nh) {
2031 while (nh->next) {
2032 nh = nh->next;
2033 j++;
2034 }
2035
2036 nh->next = newnh;
2037 j++;
2038
2039 } else /* First one */
2040 resolve_nhe->backup_info->nhe->nhg.nexthop = newnh;
2041
2042 /* Capture index */
2043 resolved->backup_idx[resolved->backup_num] = j;
2044 resolved->backup_num++;
2045
2046 SET_FLAG(resolved->flags, NEXTHOP_FLAG_HAS_BACKUP);
2047
2048 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2049 zlog_debug("%s: added idx orig %d, new %d",
2050 __func__, idx, j);
2051
2052 /* Update map/cache */
2053 map->map[map->map_count].orig_idx = idx;
2054 map->map[map->map_count].new_idx = j;
2055 map->map_count++;
2056 }
2057
2058 return 0;
2059 }
2060
2061 /*
2062 * So this nexthop resolution has decided that a connected route
2063 * is the correct choice. At this point in time if FRR has multiple
2064 * connected routes that all point to the same prefix one will be
2065 * selected, *but* the particular interface may not be the one
2066 * that the nexthop points at. Let's look at all the available
2067 * connected routes on this node and if any of them auto match
2068 * the routes nexthops ifindex that is good enough for a match
2069 *
2070 * This code is depending on the fact that a nexthop->ifindex is 0
2071 * if it is not known, if this assumption changes, yummy!
2072 * Additionally a ifindx of 0 means figure it out for us.
2073 */
2074 static struct route_entry *
2075 zebra_nhg_connected_ifindex(struct route_node *rn, struct route_entry *match,
2076 int32_t curr_ifindex)
2077 {
2078 struct nexthop *newhop = match->nhe->nhg.nexthop;
2079 struct route_entry *re;
2080
2081 assert(newhop); /* What a kick in the patooey */
2082
2083 if (curr_ifindex == 0)
2084 return match;
2085
2086 if (curr_ifindex == newhop->ifindex)
2087 return match;
2088
2089 /*
2090 * At this point we know that this route is matching a connected
2091 * but there are possibly a bunch of connected routes that are
2092 * alive that should be considered as well. So let's iterate over
2093 * all the re's and see if they are connected as well and maybe one
2094 * of those ifindexes match as well.
2095 */
2096 RNODE_FOREACH_RE (rn, re) {
2097 if (re->type != ZEBRA_ROUTE_CONNECT)
2098 continue;
2099
2100 if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED))
2101 continue;
2102
2103 /*
2104 * zebra has a connected route that is not removed
2105 * let's test if it is good
2106 */
2107 newhop = re->nhe->nhg.nexthop;
2108 assert(newhop);
2109 if (curr_ifindex == newhop->ifindex)
2110 return re;
2111 }
2112
2113 return match;
2114 }
2115
2116 /*
2117 * Given a nexthop we need to properly recursively resolve,
2118 * do a table lookup to find and match if at all possible.
2119 * Set the nexthop->ifindex and resolution info as appropriate.
2120 */
2121 static int nexthop_active(struct nexthop *nexthop, struct nhg_hash_entry *nhe,
2122 const struct prefix *top, int type, uint32_t flags,
2123 uint32_t *pmtu, vrf_id_t vrf_id)
2124 {
2125 struct prefix p;
2126 struct route_table *table;
2127 struct route_node *rn;
2128 struct route_entry *match = NULL;
2129 int resolved;
2130 struct zebra_nhlfe *nhlfe;
2131 struct nexthop *newhop;
2132 struct interface *ifp;
2133 rib_dest_t *dest;
2134 struct zebra_vrf *zvrf;
2135 struct in_addr local_ipv4;
2136 struct in_addr *ipv4;
2137 afi_t afi = AFI_IP;
2138
2139 /* Reset some nexthop attributes that we'll recompute if necessary */
2140 if ((nexthop->type == NEXTHOP_TYPE_IPV4)
2141 || (nexthop->type == NEXTHOP_TYPE_IPV6))
2142 nexthop->ifindex = 0;
2143
2144 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE);
2145 nexthops_free(nexthop->resolved);
2146 nexthop->resolved = NULL;
2147
2148 /*
2149 * Set afi based on nexthop type.
2150 * Some nexthop types get special handling, possibly skipping
2151 * the normal processing.
2152 */
2153 switch (nexthop->type) {
2154 case NEXTHOP_TYPE_IFINDEX:
2155
2156 ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id);
2157 /*
2158 * If the interface exists and its operative or its a kernel
2159 * route and interface is up, its active. We trust kernel routes
2160 * to be good.
2161 */
2162 if (ifp && (if_is_operative(ifp)))
2163 return 1;
2164 else
2165 return 0;
2166 break;
2167
2168 case NEXTHOP_TYPE_IPV6_IFINDEX:
2169 afi = AFI_IP6;
2170
2171 if (IN6_IS_ADDR_LINKLOCAL(&nexthop->gate.ipv6)) {
2172 ifp = if_lookup_by_index(nexthop->ifindex,
2173 nexthop->vrf_id);
2174 if (ifp && if_is_operative(ifp))
2175 return 1;
2176 else
2177 return 0;
2178 }
2179 break;
2180
2181 case NEXTHOP_TYPE_IPV4:
2182 case NEXTHOP_TYPE_IPV4_IFINDEX:
2183 afi = AFI_IP;
2184 break;
2185 case NEXTHOP_TYPE_IPV6:
2186 afi = AFI_IP6;
2187 break;
2188
2189 case NEXTHOP_TYPE_BLACKHOLE:
2190 return 1;
2191 }
2192
2193 /*
2194 * If the nexthop has been marked as 'onlink' we just need to make
2195 * sure the nexthop's interface is known and is operational.
2196 */
2197 if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) {
2198 ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id);
2199 if (!ifp) {
2200 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2201 zlog_debug("nexthop %pNHv marked onlink but nhif %u doesn't exist",
2202 nexthop, nexthop->ifindex);
2203 return 0;
2204 }
2205 if (!if_is_operative(ifp)) {
2206 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2207 zlog_debug("nexthop %pNHv marked onlink but nhif %s is not operational",
2208 nexthop, ifp->name);
2209 return 0;
2210 }
2211 return 1;
2212 }
2213
2214 if (top &&
2215 ((top->family == AF_INET && top->prefixlen == IPV4_MAX_BITLEN &&
2216 nexthop->gate.ipv4.s_addr == top->u.prefix4.s_addr) ||
2217 (top->family == AF_INET6 && top->prefixlen == IPV6_MAX_BITLEN &&
2218 memcmp(&nexthop->gate.ipv6, &top->u.prefix6, IPV6_MAX_BYTELEN) ==
2219 0)) &&
2220 nexthop->vrf_id == vrf_id) {
2221 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2222 zlog_debug(
2223 " :%s: Attempting to install a max prefixlength route through itself",
2224 __func__);
2225 return 0;
2226 }
2227
2228 /* Validation for ipv4 mapped ipv6 nexthop. */
2229 if (IS_MAPPED_IPV6(&nexthop->gate.ipv6)) {
2230 afi = AFI_IP;
2231 ipv4 = &local_ipv4;
2232 ipv4_mapped_ipv6_to_ipv4(&nexthop->gate.ipv6, ipv4);
2233 } else {
2234 ipv4 = &nexthop->gate.ipv4;
2235 }
2236
2237 /* Processing for nexthops with SR 'color' attribute, using
2238 * the corresponding SR policy object.
2239 */
2240 if (nexthop->srte_color) {
2241 struct ipaddr endpoint = {0};
2242 struct zebra_sr_policy *policy;
2243
2244 switch (afi) {
2245 case AFI_IP:
2246 endpoint.ipa_type = IPADDR_V4;
2247 endpoint.ipaddr_v4 = *ipv4;
2248 break;
2249 case AFI_IP6:
2250 endpoint.ipa_type = IPADDR_V6;
2251 endpoint.ipaddr_v6 = nexthop->gate.ipv6;
2252 break;
2253 default:
2254 flog_err(EC_LIB_DEVELOPMENT,
2255 "%s: unknown address-family: %u", __func__,
2256 afi);
2257 exit(1);
2258 }
2259
2260 policy = zebra_sr_policy_find(nexthop->srte_color, &endpoint);
2261 if (policy && policy->status == ZEBRA_SR_POLICY_UP) {
2262 resolved = 0;
2263 frr_each_safe (nhlfe_list, &policy->lsp->nhlfe_list,
2264 nhlfe) {
2265 if (!CHECK_FLAG(nhlfe->flags,
2266 NHLFE_FLAG_SELECTED)
2267 || CHECK_FLAG(nhlfe->flags,
2268 NHLFE_FLAG_DELETED))
2269 continue;
2270 SET_FLAG(nexthop->flags,
2271 NEXTHOP_FLAG_RECURSIVE);
2272 nexthop_set_resolved(afi, nhlfe->nexthop,
2273 nexthop, policy);
2274 resolved = 1;
2275 }
2276 if (resolved)
2277 return 1;
2278 }
2279 }
2280
2281 /* Make lookup prefix. */
2282 memset(&p, 0, sizeof(struct prefix));
2283 switch (afi) {
2284 case AFI_IP:
2285 p.family = AF_INET;
2286 p.prefixlen = IPV4_MAX_BITLEN;
2287 p.u.prefix4 = *ipv4;
2288 break;
2289 case AFI_IP6:
2290 p.family = AF_INET6;
2291 p.prefixlen = IPV6_MAX_BITLEN;
2292 p.u.prefix6 = nexthop->gate.ipv6;
2293 break;
2294 default:
2295 assert(afi != AFI_IP && afi != AFI_IP6);
2296 break;
2297 }
2298 /* Lookup table. */
2299 table = zebra_vrf_table(afi, SAFI_UNICAST, nexthop->vrf_id);
2300 /* get zvrf */
2301 zvrf = zebra_vrf_lookup_by_id(nexthop->vrf_id);
2302 if (!table || !zvrf) {
2303 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2304 zlog_debug(" %s: Table not found", __func__);
2305 return 0;
2306 }
2307
2308 rn = route_node_match(table, (struct prefix *)&p);
2309 while (rn) {
2310 route_unlock_node(rn);
2311
2312 /* Lookup should halt if we've matched against ourselves ('top',
2313 * if specified) - i.e., we cannot have a nexthop NH1 is
2314 * resolved by a route NH1. The exception is if the route is a
2315 * host route.
2316 */
2317 if (prefix_same(&rn->p, top))
2318 if (((afi == AFI_IP)
2319 && (rn->p.prefixlen != IPV4_MAX_BITLEN))
2320 || ((afi == AFI_IP6)
2321 && (rn->p.prefixlen != IPV6_MAX_BITLEN))) {
2322 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2323 zlog_debug(
2324 " %s: Matched against ourself and prefix length is not max bit length",
2325 __func__);
2326 return 0;
2327 }
2328
2329 /* Pick up selected route. */
2330 /* However, do not resolve over default route unless explicitly
2331 * allowed.
2332 */
2333 if (is_default_prefix(&rn->p)
2334 && !rnh_resolve_via_default(zvrf, p.family)) {
2335 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2336 zlog_debug(
2337 " :%s: Resolved against default route",
2338 __func__);
2339 return 0;
2340 }
2341
2342 dest = rib_dest_from_rnode(rn);
2343 if (dest && dest->selected_fib
2344 && !CHECK_FLAG(dest->selected_fib->status,
2345 ROUTE_ENTRY_REMOVED)
2346 && dest->selected_fib->type != ZEBRA_ROUTE_TABLE)
2347 match = dest->selected_fib;
2348
2349 /* If there is no selected route or matched route is EGP, go up
2350 * tree.
2351 */
2352 if (!match) {
2353 do {
2354 rn = rn->parent;
2355 } while (rn && rn->info == NULL);
2356 if (rn)
2357 route_lock_node(rn);
2358
2359 continue;
2360 }
2361
2362 if ((match->type == ZEBRA_ROUTE_CONNECT) ||
2363 (RIB_SYSTEM_ROUTE(match) && RSYSTEM_ROUTE(type))) {
2364 match = zebra_nhg_connected_ifindex(rn, match,
2365 nexthop->ifindex);
2366
2367 newhop = match->nhe->nhg.nexthop;
2368 if (nexthop->type == NEXTHOP_TYPE_IPV4 ||
2369 nexthop->type == NEXTHOP_TYPE_IPV6)
2370 nexthop->ifindex = newhop->ifindex;
2371 else if (nexthop->ifindex != newhop->ifindex) {
2372 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2373 zlog_debug(
2374 "%s: %pNHv given ifindex does not match nexthops ifindex found: %pNHv",
2375 __func__, nexthop, newhop);
2376 /*
2377 * NEXTHOP_TYPE_*_IFINDEX but ifindex
2378 * doesn't match what we found.
2379 */
2380 return 0;
2381 }
2382
2383 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
2384 zlog_debug(
2385 "%s: CONNECT match %p (%pNG), newhop %pNHv",
2386 __func__, match, match->nhe, newhop);
2387
2388 return 1;
2389 } else if (CHECK_FLAG(flags, ZEBRA_FLAG_ALLOW_RECURSION)) {
2390 struct nexthop_group *nhg;
2391 struct nexthop *resolver;
2392 struct backup_nh_map_s map = {};
2393
2394 resolved = 0;
2395
2396 /*
2397 * Only useful if installed or being Route Replacing
2398 * Why Being Route Replaced as well?
2399 * Imagine a route A and route B( that depends on A )
2400 * for recursive resolution and A already exists in the
2401 * zebra rib. If zebra receives the routes
2402 * for resolution at aproximately the same time in the [
2403 * B, A ] order on the workQ. If this happens then
2404 * normal route resolution will happen and B will be
2405 * resolved successfully and then A will be resolved
2406 * successfully. Now imagine the reversed order [A, B].
2407 * A will be resolved and then scheduled for installed
2408 * (Thus not having the ROUTE_ENTRY_INSTALLED flag ). B
2409 * will then get resolved and fail to be installed
2410 * because the original below test. Let's `loosen` this
2411 * up a tiny bit and allow the
2412 * ROUTE_ENTRY_ROUTE_REPLACING flag ( that is set when a
2413 * Route Replace operation is being initiated on A now )
2414 * to now satisfy this situation. This will allow
2415 * either order in the workQ to work properly.
2416 */
2417 if (!CHECK_FLAG(match->status, ROUTE_ENTRY_INSTALLED) &&
2418 !CHECK_FLAG(match->status,
2419 ROUTE_ENTRY_ROUTE_REPLACING)) {
2420 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2421 zlog_debug(
2422 "%s: match %p (%pNG) not installed or being Route Replaced",
2423 __func__, match, match->nhe);
2424
2425 goto done_with_match;
2426 }
2427
2428 /* Examine installed nexthops; note that there
2429 * may not be any installed primary nexthops if
2430 * only backups are installed.
2431 */
2432 nhg = rib_get_fib_nhg(match);
2433 for (ALL_NEXTHOPS_PTR(nhg, newhop)) {
2434 if (!nexthop_valid_resolve(nexthop, newhop))
2435 continue;
2436
2437 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
2438 zlog_debug(
2439 "%s: RECURSIVE match %p (%pNG), newhop %pNHv",
2440 __func__, match, match->nhe,
2441 newhop);
2442
2443 SET_FLAG(nexthop->flags,
2444 NEXTHOP_FLAG_RECURSIVE);
2445 resolver = nexthop_set_resolved(afi, newhop,
2446 nexthop, NULL);
2447 resolved = 1;
2448
2449 /* If there are backup nexthops, capture
2450 * that info with the resolving nexthop.
2451 */
2452 if (resolver && newhop->backup_num > 0) {
2453 resolve_backup_nexthops(newhop,
2454 match->nhe,
2455 resolver, nhe,
2456 &map);
2457 }
2458 }
2459
2460 /* Examine installed backup nexthops, if any. There
2461 * are only installed backups *if* there is a
2462 * dedicated fib list. The UI can also control use
2463 * of backups for resolution.
2464 */
2465 nhg = rib_get_fib_backup_nhg(match);
2466 if (!use_recursive_backups ||
2467 nhg == NULL || nhg->nexthop == NULL)
2468 goto done_with_match;
2469
2470 for (ALL_NEXTHOPS_PTR(nhg, newhop)) {
2471 if (!nexthop_valid_resolve(nexthop, newhop))
2472 continue;
2473
2474 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
2475 zlog_debug(
2476 "%s: RECURSIVE match backup %p (%pNG), newhop %pNHv",
2477 __func__, match, match->nhe,
2478 newhop);
2479
2480 SET_FLAG(nexthop->flags,
2481 NEXTHOP_FLAG_RECURSIVE);
2482 nexthop_set_resolved(afi, newhop, nexthop,
2483 NULL);
2484 resolved = 1;
2485 }
2486
2487 done_with_match:
2488 /* Capture resolving mtu */
2489 if (resolved) {
2490 if (pmtu)
2491 *pmtu = match->mtu;
2492
2493 } else if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2494 zlog_debug(
2495 " %s: Recursion failed to find",
2496 __func__);
2497
2498 return resolved;
2499 } else {
2500 if (IS_ZEBRA_DEBUG_RIB_DETAILED) {
2501 zlog_debug(
2502 " %s: Route Type %s has not turned on recursion",
2503 __func__, zebra_route_string(type));
2504 if (type == ZEBRA_ROUTE_BGP
2505 && !CHECK_FLAG(flags, ZEBRA_FLAG_IBGP))
2506 zlog_debug(
2507 " EBGP: see \"disable-ebgp-connected-route-check\" or \"disable-connected-check\"");
2508 }
2509 return 0;
2510 }
2511 }
2512 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2513 zlog_debug(" %s: Nexthop did not lookup in table",
2514 __func__);
2515 return 0;
2516 }
2517
2518 /* This function verifies reachability of one given nexthop, which can be
2519 * numbered or unnumbered, IPv4 or IPv6. The result is unconditionally stored
2520 * in nexthop->flags field. The nexthop->ifindex will be updated
2521 * appropriately as well.
2522 *
2523 * An existing route map can turn an otherwise active nexthop into inactive,
2524 * but not vice versa.
2525 *
2526 * The return value is the final value of 'ACTIVE' flag.
2527 */
2528 static unsigned nexthop_active_check(struct route_node *rn,
2529 struct route_entry *re,
2530 struct nexthop *nexthop,
2531 struct nhg_hash_entry *nhe)
2532 {
2533 route_map_result_t ret = RMAP_PERMITMATCH;
2534 afi_t family;
2535 const struct prefix *p, *src_p;
2536 struct zebra_vrf *zvrf;
2537 uint32_t mtu = 0;
2538 vrf_id_t vrf_id;
2539
2540 srcdest_rnode_prefixes(rn, &p, &src_p);
2541
2542 if (rn->p.family == AF_INET)
2543 family = AFI_IP;
2544 else if (rn->p.family == AF_INET6)
2545 family = AFI_IP6;
2546 else
2547 family = AF_UNSPEC;
2548
2549 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
2550 zlog_debug("%s: re %p, nexthop %pNHv", __func__, re, nexthop);
2551
2552 /*
2553 * If this is a kernel route, then if the interface is *up* then
2554 * by golly gee whiz it's a good route.
2555 */
2556 if (re->type == ZEBRA_ROUTE_KERNEL || re->type == ZEBRA_ROUTE_SYSTEM) {
2557 struct interface *ifp;
2558
2559 ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id);
2560
2561 if (ifp && (if_is_operative(ifp) || if_is_up(ifp))) {
2562 SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2563 goto skip_check;
2564 }
2565 }
2566
2567 vrf_id = zvrf_id(rib_dest_vrf(rib_dest_from_rnode(rn)));
2568 switch (nexthop->type) {
2569 case NEXTHOP_TYPE_IFINDEX:
2570 if (nexthop_active(nexthop, nhe, &rn->p, re->type, re->flags,
2571 &mtu, vrf_id))
2572 SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2573 else
2574 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2575 break;
2576 case NEXTHOP_TYPE_IPV4:
2577 case NEXTHOP_TYPE_IPV4_IFINDEX:
2578 family = AFI_IP;
2579 if (nexthop_active(nexthop, nhe, &rn->p, re->type, re->flags,
2580 &mtu, vrf_id))
2581 SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2582 else
2583 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2584 break;
2585 case NEXTHOP_TYPE_IPV6:
2586 family = AFI_IP6;
2587 if (nexthop_active(nexthop, nhe, &rn->p, re->type, re->flags,
2588 &mtu, vrf_id))
2589 SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2590 else
2591 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2592 break;
2593 case NEXTHOP_TYPE_IPV6_IFINDEX:
2594 /* RFC 5549, v4 prefix with v6 NH */
2595 if (rn->p.family != AF_INET)
2596 family = AFI_IP6;
2597
2598 if (nexthop_active(nexthop, nhe, &rn->p, re->type, re->flags,
2599 &mtu, vrf_id))
2600 SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2601 else
2602 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2603 break;
2604 case NEXTHOP_TYPE_BLACKHOLE:
2605 SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2606 break;
2607 default:
2608 break;
2609 }
2610
2611 skip_check:
2612
2613 if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) {
2614 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2615 zlog_debug(" %s: Unable to find active nexthop",
2616 __func__);
2617 return 0;
2618 }
2619
2620 /* Capture recursive nexthop mtu.
2621 * TODO -- the code used to just reset the re's value to zero
2622 * for each nexthop, and then jam any resolving route's mtu value in,
2623 * whether or not that was zero, or lt/gt any existing value? The
2624 * way this is used appears to be as a floor value, so let's try
2625 * using it that way here.
2626 */
2627 if (mtu > 0) {
2628 if (re->nexthop_mtu == 0 || re->nexthop_mtu > mtu)
2629 re->nexthop_mtu = mtu;
2630 }
2631
2632 /* XXX: What exactly do those checks do? Do we support
2633 * e.g. IPv4 routes with IPv6 nexthops or vice versa?
2634 */
2635 if (RIB_SYSTEM_ROUTE(re) || (family == AFI_IP && p->family != AF_INET)
2636 || (family == AFI_IP6 && p->family != AF_INET6))
2637 return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2638
2639 /* The original code didn't determine the family correctly
2640 * e.g. for NEXTHOP_TYPE_IFINDEX. Retrieve the correct afi
2641 * from the rib_table_info in those cases.
2642 * Possibly it may be better to use only the rib_table_info
2643 * in every case.
2644 */
2645 if (family == 0) {
2646 struct rib_table_info *info;
2647
2648 info = srcdest_rnode_table_info(rn);
2649 family = info->afi;
2650 }
2651
2652 memset(&nexthop->rmap_src.ipv6, 0, sizeof(union g_addr));
2653
2654 zvrf = zebra_vrf_lookup_by_id(re->vrf_id);
2655 if (!zvrf) {
2656 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
2657 zlog_debug(" %s: zvrf is NULL", __func__);
2658 return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2659 }
2660
2661 /* It'll get set if required inside */
2662 ret = zebra_route_map_check(family, re->type, re->instance, p, nexthop,
2663 zvrf, re->tag);
2664 if (ret == RMAP_DENYMATCH) {
2665 if (IS_ZEBRA_DEBUG_RIB) {
2666 zlog_debug(
2667 "%u:%pRN: Filtering out with NH %pNHv due to route map",
2668 re->vrf_id, rn, nexthop);
2669 }
2670 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2671 }
2672 return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2673 }
2674
2675 /* Helper function called after resolution to walk nhg rb trees
2676 * and toggle the NEXTHOP_GROUP_VALID flag if the nexthop
2677 * is active on singleton NHEs.
2678 */
2679 static bool zebra_nhg_set_valid_if_active(struct nhg_hash_entry *nhe)
2680 {
2681 struct nhg_connected *rb_node_dep = NULL;
2682 bool valid = false;
2683
2684 if (!zebra_nhg_depends_is_empty(nhe)) {
2685 /* Is at least one depend valid? */
2686 frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) {
2687 if (zebra_nhg_set_valid_if_active(rb_node_dep->nhe))
2688 valid = true;
2689 }
2690
2691 goto done;
2692 }
2693
2694 /* should be fully resolved singleton at this point */
2695 if (CHECK_FLAG(nhe->nhg.nexthop->flags, NEXTHOP_FLAG_ACTIVE))
2696 valid = true;
2697
2698 done:
2699 if (valid)
2700 SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
2701
2702 return valid;
2703 }
2704
2705 /*
2706 * Process a list of nexthops, given an nhe, determining
2707 * whether each one is ACTIVE/installable at this time.
2708 */
2709 static uint32_t nexthop_list_active_update(struct route_node *rn,
2710 struct route_entry *re,
2711 struct nhg_hash_entry *nhe,
2712 bool is_backup)
2713 {
2714 union g_addr prev_src;
2715 unsigned int prev_active, new_active;
2716 ifindex_t prev_index;
2717 uint32_t counter = 0;
2718 struct nexthop *nexthop;
2719 struct nexthop_group *nhg = &nhe->nhg;
2720
2721 nexthop = nhg->nexthop;
2722
2723 /* Init recursive nh mtu */
2724 re->nexthop_mtu = 0;
2725
2726 /* Process nexthops one-by-one */
2727 for ( ; nexthop; nexthop = nexthop->next) {
2728
2729 /* No protocol daemon provides src and so we're skipping
2730 * tracking it
2731 */
2732 prev_src = nexthop->rmap_src;
2733 prev_active = CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
2734 prev_index = nexthop->ifindex;
2735
2736 /* Include the containing nhe for primary nexthops: if there's
2737 * recursive resolution, we capture the backup info also.
2738 */
2739 new_active =
2740 nexthop_active_check(rn, re, nexthop,
2741 (is_backup ? NULL : nhe));
2742
2743 /*
2744 * We need to respect the multipath_num here
2745 * as that what we should be able to install from
2746 * a multipath perspective should not be a data plane
2747 * decision point.
2748 */
2749 if (new_active && counter >= zrouter.multipath_num) {
2750 struct nexthop *nh;
2751
2752 /* Set it and its resolved nexthop as inactive. */
2753 for (nh = nexthop; nh; nh = nh->resolved)
2754 UNSET_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE);
2755
2756 new_active = 0;
2757 }
2758
2759 if (new_active)
2760 counter++;
2761
2762 /* Check for changes to the nexthop - set ROUTE_ENTRY_CHANGED */
2763 if (prev_active != new_active || prev_index != nexthop->ifindex
2764 || ((nexthop->type >= NEXTHOP_TYPE_IFINDEX
2765 && nexthop->type < NEXTHOP_TYPE_IPV6)
2766 && prev_src.ipv4.s_addr
2767 != nexthop->rmap_src.ipv4.s_addr)
2768 || ((nexthop->type >= NEXTHOP_TYPE_IPV6
2769 && nexthop->type < NEXTHOP_TYPE_BLACKHOLE)
2770 && !(IPV6_ADDR_SAME(&prev_src.ipv6,
2771 &nexthop->rmap_src.ipv6)))
2772 || CHECK_FLAG(re->status, ROUTE_ENTRY_LABELS_CHANGED))
2773 SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
2774 }
2775
2776 return counter;
2777 }
2778
2779
2780 static uint32_t proto_nhg_nexthop_active_update(struct nexthop_group *nhg)
2781 {
2782 struct nexthop *nh;
2783 uint32_t curr_active = 0;
2784
2785 /* Assume all active for now */
2786
2787 for (nh = nhg->nexthop; nh; nh = nh->next) {
2788 SET_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE);
2789 curr_active++;
2790 }
2791
2792 return curr_active;
2793 }
2794
2795 /*
2796 * Iterate over all nexthops of the given RIB entry and refresh their
2797 * ACTIVE flag. If any nexthop is found to toggle the ACTIVE flag,
2798 * the whole re structure is flagged with ROUTE_ENTRY_CHANGED.
2799 *
2800 * Return value is the new number of active nexthops.
2801 */
2802 int nexthop_active_update(struct route_node *rn, struct route_entry *re)
2803 {
2804 struct nhg_hash_entry *curr_nhe;
2805 uint32_t curr_active = 0, backup_active = 0;
2806
2807 if (PROTO_OWNED(re->nhe))
2808 return proto_nhg_nexthop_active_update(&re->nhe->nhg);
2809
2810 afi_t rt_afi = family2afi(rn->p.family);
2811
2812 UNSET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
2813
2814 /* Make a local copy of the existing nhe, so we don't work on/modify
2815 * the shared nhe.
2816 */
2817 curr_nhe = zebra_nhe_copy(re->nhe, re->nhe->id);
2818
2819 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
2820 zlog_debug("%s: re %p nhe %p (%pNG), curr_nhe %p", __func__, re,
2821 re->nhe, re->nhe, curr_nhe);
2822
2823 /* Clear the existing id, if any: this will avoid any confusion
2824 * if the id exists, and will also force the creation
2825 * of a new nhe reflecting the changes we may make in this local copy.
2826 */
2827 curr_nhe->id = 0;
2828
2829 /* Process nexthops */
2830 curr_active = nexthop_list_active_update(rn, re, curr_nhe, false);
2831
2832 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
2833 zlog_debug("%s: re %p curr_active %u", __func__, re,
2834 curr_active);
2835
2836 /* If there are no backup nexthops, we are done */
2837 if (zebra_nhg_get_backup_nhg(curr_nhe) == NULL)
2838 goto backups_done;
2839
2840 backup_active = nexthop_list_active_update(
2841 rn, re, curr_nhe->backup_info->nhe, true /*is_backup*/);
2842
2843 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
2844 zlog_debug("%s: re %p backup_active %u", __func__, re,
2845 backup_active);
2846
2847 backups_done:
2848
2849 /*
2850 * Ref or create an nhe that matches the current state of the
2851 * nexthop(s).
2852 */
2853 if (CHECK_FLAG(re->status, ROUTE_ENTRY_CHANGED)) {
2854 struct nhg_hash_entry *new_nhe = NULL;
2855
2856 new_nhe = zebra_nhg_rib_find_nhe(curr_nhe, rt_afi);
2857
2858 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
2859 zlog_debug(
2860 "%s: re %p CHANGED: nhe %p (%pNG) => new_nhe %p (%pNG)",
2861 __func__, re, re->nhe, re->nhe, new_nhe,
2862 new_nhe);
2863
2864 route_entry_update_nhe(re, new_nhe);
2865 }
2866
2867
2868 /* Walk the NHE depends tree and toggle NEXTHOP_GROUP_VALID
2869 * flag where appropriate.
2870 */
2871 if (curr_active)
2872 zebra_nhg_set_valid_if_active(re->nhe);
2873
2874 /*
2875 * Do not need the old / copied nhe anymore since it
2876 * was either copied over into a new nhe or not
2877 * used at all.
2878 */
2879 zebra_nhg_free(curr_nhe);
2880 return curr_active;
2881 }
2882
2883 /* Recursively construct a grp array of fully resolved IDs.
2884 *
2885 * This function allows us to account for groups within groups,
2886 * by converting them into a flat array of IDs.
2887 *
2888 * nh_grp is modified at every level of recursion to append
2889 * to it the next unique, fully resolved ID from the entire tree.
2890 *
2891 *
2892 * Note:
2893 * I'm pretty sure we only allow ONE level of group within group currently.
2894 * But making this recursive just in case that ever changes.
2895 */
2896 static uint8_t zebra_nhg_nhe2grp_internal(struct nh_grp *grp,
2897 uint8_t curr_index,
2898 struct nhg_hash_entry *nhe,
2899 int max_num)
2900 {
2901 struct nhg_connected *rb_node_dep = NULL;
2902 struct nhg_hash_entry *depend = NULL;
2903 uint8_t i = curr_index;
2904
2905 frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) {
2906 bool duplicate = false;
2907
2908 if (i >= max_num)
2909 goto done;
2910
2911 depend = rb_node_dep->nhe;
2912
2913 /*
2914 * If its recursive, use its resolved nhe in the group
2915 */
2916 if (CHECK_FLAG(depend->flags, NEXTHOP_GROUP_RECURSIVE)) {
2917 depend = zebra_nhg_resolve(depend);
2918 if (!depend) {
2919 flog_err(
2920 EC_ZEBRA_NHG_FIB_UPDATE,
2921 "Failed to recursively resolve Nexthop Hash Entry in the group id=%pNG",
2922 nhe);
2923 continue;
2924 }
2925 }
2926
2927 if (!zebra_nhg_depends_is_empty(depend)) {
2928 /* This is a group within a group */
2929 i = zebra_nhg_nhe2grp_internal(grp, i, depend, max_num);
2930 } else {
2931 if (!CHECK_FLAG(depend->flags, NEXTHOP_GROUP_VALID)) {
2932 if (IS_ZEBRA_DEBUG_RIB_DETAILED
2933 || IS_ZEBRA_DEBUG_NHG)
2934 zlog_debug(
2935 "%s: Nexthop ID (%u) not valid, not appending to dataplane install group",
2936 __func__, depend->id);
2937 continue;
2938 }
2939
2940 /* If the nexthop not installed/queued for install don't
2941 * put in the ID array.
2942 */
2943 if (!(CHECK_FLAG(depend->flags, NEXTHOP_GROUP_INSTALLED)
2944 || CHECK_FLAG(depend->flags,
2945 NEXTHOP_GROUP_QUEUED))) {
2946 if (IS_ZEBRA_DEBUG_RIB_DETAILED
2947 || IS_ZEBRA_DEBUG_NHG)
2948 zlog_debug(
2949 "%s: Nexthop ID (%u) not installed or queued for install, not appending to dataplane install group",
2950 __func__, depend->id);
2951 continue;
2952 }
2953
2954 /* Check for duplicate IDs, ignore if found. */
2955 for (int j = 0; j < i; j++) {
2956 if (depend->id == grp[j].id) {
2957 duplicate = true;
2958 break;
2959 }
2960 }
2961
2962 if (duplicate) {
2963 if (IS_ZEBRA_DEBUG_RIB_DETAILED
2964 || IS_ZEBRA_DEBUG_NHG)
2965 zlog_debug(
2966 "%s: Nexthop ID (%u) is duplicate, not appending to dataplane install group",
2967 __func__, depend->id);
2968 continue;
2969 }
2970
2971 grp[i].id = depend->id;
2972 grp[i].weight = depend->nhg.nexthop->weight;
2973 i++;
2974 }
2975 }
2976
2977 if (nhe->backup_info == NULL || nhe->backup_info->nhe == NULL)
2978 goto done;
2979
2980 /* TODO -- For now, we are not trying to use or install any
2981 * backup info in this nexthop-id path: we aren't prepared
2982 * to use the backups here yet. We're just debugging what we find.
2983 */
2984 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
2985 zlog_debug("%s: skipping backup nhe", __func__);
2986
2987 done:
2988 return i;
2989 }
2990
2991 /* Convert a nhe into a group array */
2992 uint8_t zebra_nhg_nhe2grp(struct nh_grp *grp, struct nhg_hash_entry *nhe,
2993 int max_num)
2994 {
2995 /* Call into the recursive function */
2996 return zebra_nhg_nhe2grp_internal(grp, 0, nhe, max_num);
2997 }
2998
2999 void zebra_nhg_install_kernel(struct nhg_hash_entry *nhe)
3000 {
3001 struct nhg_connected *rb_node_dep = NULL;
3002
3003 /* Resolve it first */
3004 nhe = zebra_nhg_resolve(nhe);
3005
3006 /* Make sure all depends are installed/queued */
3007 frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) {
3008 zebra_nhg_install_kernel(rb_node_dep->nhe);
3009 }
3010
3011 if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_VALID)
3012 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)
3013 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED)) {
3014 /* Change its type to us since we are installing it */
3015 if (!ZEBRA_NHG_CREATED(nhe))
3016 nhe->type = ZEBRA_ROUTE_NHG;
3017
3018 int ret = dplane_nexthop_add(nhe);
3019
3020 switch (ret) {
3021 case ZEBRA_DPLANE_REQUEST_QUEUED:
3022 SET_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED);
3023 break;
3024 case ZEBRA_DPLANE_REQUEST_FAILURE:
3025 flog_err(
3026 EC_ZEBRA_DP_INSTALL_FAIL,
3027 "Failed to install Nexthop ID (%pNG) into the kernel",
3028 nhe);
3029 break;
3030 case ZEBRA_DPLANE_REQUEST_SUCCESS:
3031 SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
3032 zebra_nhg_handle_install(nhe);
3033 break;
3034 }
3035 }
3036 }
3037
3038 void zebra_nhg_uninstall_kernel(struct nhg_hash_entry *nhe)
3039 {
3040 if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)) {
3041 int ret = dplane_nexthop_delete(nhe);
3042
3043 switch (ret) {
3044 case ZEBRA_DPLANE_REQUEST_QUEUED:
3045 SET_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED);
3046 break;
3047 case ZEBRA_DPLANE_REQUEST_FAILURE:
3048 flog_err(
3049 EC_ZEBRA_DP_DELETE_FAIL,
3050 "Failed to uninstall Nexthop ID (%pNG) from the kernel",
3051 nhe);
3052 break;
3053 case ZEBRA_DPLANE_REQUEST_SUCCESS:
3054 UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
3055 break;
3056 }
3057 }
3058
3059 zebra_nhg_handle_uninstall(nhe);
3060 }
3061
3062 void zebra_nhg_dplane_result(struct zebra_dplane_ctx *ctx)
3063 {
3064 enum dplane_op_e op;
3065 enum zebra_dplane_result status;
3066 uint32_t id = 0;
3067 struct nhg_hash_entry *nhe = NULL;
3068
3069 op = dplane_ctx_get_op(ctx);
3070 status = dplane_ctx_get_status(ctx);
3071
3072 id = dplane_ctx_get_nhe_id(ctx);
3073
3074 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL || IS_ZEBRA_DEBUG_NHG_DETAIL)
3075 zlog_debug(
3076 "Nexthop dplane ctx %p, op %s, nexthop ID (%u), result %s",
3077 ctx, dplane_op2str(op), id, dplane_res2str(status));
3078
3079 switch (op) {
3080 case DPLANE_OP_NH_DELETE:
3081 if (status != ZEBRA_DPLANE_REQUEST_SUCCESS)
3082 flog_err(
3083 EC_ZEBRA_DP_DELETE_FAIL,
3084 "Failed to uninstall Nexthop ID (%u) from the kernel",
3085 id);
3086
3087 /* We already free'd the data, nothing to do */
3088 break;
3089 case DPLANE_OP_NH_INSTALL:
3090 case DPLANE_OP_NH_UPDATE:
3091 nhe = zebra_nhg_lookup_id(id);
3092
3093 if (!nhe) {
3094 if (IS_ZEBRA_DEBUG_NHG)
3095 zlog_debug(
3096 "%s operation preformed on Nexthop ID (%u) in the kernel, that we no longer have in our table",
3097 dplane_op2str(op), id);
3098
3099 break;
3100 }
3101
3102 UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED);
3103 if (status == ZEBRA_DPLANE_REQUEST_SUCCESS) {
3104 SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
3105 SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
3106 zebra_nhg_handle_install(nhe);
3107
3108 /* If daemon nhg, send it an update */
3109 if (PROTO_OWNED(nhe))
3110 zsend_nhg_notify(nhe->type, nhe->zapi_instance,
3111 nhe->zapi_session, nhe->id,
3112 ZAPI_NHG_INSTALLED);
3113 } else {
3114 /* If daemon nhg, send it an update */
3115 if (PROTO_OWNED(nhe))
3116 zsend_nhg_notify(nhe->type, nhe->zapi_instance,
3117 nhe->zapi_session, nhe->id,
3118 ZAPI_NHG_FAIL_INSTALL);
3119
3120 if (!(zebra_nhg_proto_nexthops_only() &&
3121 !PROTO_OWNED(nhe)))
3122 flog_err(
3123 EC_ZEBRA_DP_INSTALL_FAIL,
3124 "Failed to install Nexthop (%pNG) into the kernel",
3125 nhe);
3126 }
3127 break;
3128
3129 case DPLANE_OP_ROUTE_INSTALL:
3130 case DPLANE_OP_ROUTE_UPDATE:
3131 case DPLANE_OP_ROUTE_DELETE:
3132 case DPLANE_OP_ROUTE_NOTIFY:
3133 case DPLANE_OP_LSP_INSTALL:
3134 case DPLANE_OP_LSP_UPDATE:
3135 case DPLANE_OP_LSP_DELETE:
3136 case DPLANE_OP_LSP_NOTIFY:
3137 case DPLANE_OP_PW_INSTALL:
3138 case DPLANE_OP_PW_UNINSTALL:
3139 case DPLANE_OP_SYS_ROUTE_ADD:
3140 case DPLANE_OP_SYS_ROUTE_DELETE:
3141 case DPLANE_OP_ADDR_INSTALL:
3142 case DPLANE_OP_ADDR_UNINSTALL:
3143 case DPLANE_OP_MAC_INSTALL:
3144 case DPLANE_OP_MAC_DELETE:
3145 case DPLANE_OP_NEIGH_INSTALL:
3146 case DPLANE_OP_NEIGH_UPDATE:
3147 case DPLANE_OP_NEIGH_DELETE:
3148 case DPLANE_OP_NEIGH_IP_INSTALL:
3149 case DPLANE_OP_NEIGH_IP_DELETE:
3150 case DPLANE_OP_VTEP_ADD:
3151 case DPLANE_OP_VTEP_DELETE:
3152 case DPLANE_OP_RULE_ADD:
3153 case DPLANE_OP_RULE_DELETE:
3154 case DPLANE_OP_RULE_UPDATE:
3155 case DPLANE_OP_NEIGH_DISCOVER:
3156 case DPLANE_OP_BR_PORT_UPDATE:
3157 case DPLANE_OP_NONE:
3158 case DPLANE_OP_IPTABLE_ADD:
3159 case DPLANE_OP_IPTABLE_DELETE:
3160 case DPLANE_OP_IPSET_ADD:
3161 case DPLANE_OP_IPSET_DELETE:
3162 case DPLANE_OP_IPSET_ENTRY_ADD:
3163 case DPLANE_OP_IPSET_ENTRY_DELETE:
3164 case DPLANE_OP_NEIGH_TABLE_UPDATE:
3165 case DPLANE_OP_GRE_SET:
3166 case DPLANE_OP_INTF_ADDR_ADD:
3167 case DPLANE_OP_INTF_ADDR_DEL:
3168 case DPLANE_OP_INTF_NETCONFIG:
3169 case DPLANE_OP_INTF_INSTALL:
3170 case DPLANE_OP_INTF_UPDATE:
3171 case DPLANE_OP_INTF_DELETE:
3172 case DPLANE_OP_TC_QDISC_INSTALL:
3173 case DPLANE_OP_TC_QDISC_UNINSTALL:
3174 case DPLANE_OP_TC_CLASS_ADD:
3175 case DPLANE_OP_TC_CLASS_DELETE:
3176 case DPLANE_OP_TC_CLASS_UPDATE:
3177 case DPLANE_OP_TC_FILTER_ADD:
3178 case DPLANE_OP_TC_FILTER_DELETE:
3179 case DPLANE_OP_TC_FILTER_UPDATE:
3180 break;
3181 }
3182 }
3183
3184 static int zebra_nhg_sweep_entry(struct hash_bucket *bucket, void *arg)
3185 {
3186 struct nhg_hash_entry *nhe = NULL;
3187
3188 nhe = (struct nhg_hash_entry *)bucket->data;
3189
3190 /*
3191 * same logic as with routes.
3192 *
3193 * If older than startup time, we know we read them in from the
3194 * kernel and have not gotten and update for them since startup
3195 * from an upper level proto.
3196 */
3197 if (zrouter.startup_time < nhe->uptime)
3198 return HASHWALK_CONTINUE;
3199
3200 /*
3201 * If it's proto-owned and not being used by a route, remove it since
3202 * we haven't gotten an update about it from the proto since startup.
3203 * This means that either the config for it was removed or the daemon
3204 * didn't get started. This handles graceful restart & retain scenario.
3205 */
3206 if (PROTO_OWNED(nhe) && nhe->refcnt == 1) {
3207 zebra_nhg_decrement_ref(nhe);
3208 return HASHWALK_ABORT;
3209 }
3210
3211 /*
3212 * If its being ref'd by routes, just let it be uninstalled via a route
3213 * removal.
3214 */
3215 if (ZEBRA_NHG_CREATED(nhe) && nhe->refcnt <= 0) {
3216 zebra_nhg_uninstall_kernel(nhe);
3217 return HASHWALK_ABORT;
3218 }
3219
3220 return HASHWALK_CONTINUE;
3221 }
3222
3223 void zebra_nhg_sweep_table(struct hash *hash)
3224 {
3225 uint32_t count;
3226
3227 /*
3228 * Yes this is extremely odd. Effectively nhg's have
3229 * other nexthop groups that depend on them and when you
3230 * remove them, you can have other entries blown up.
3231 * our hash code does not work with deleting multiple
3232 * entries at a time and will possibly cause crashes
3233 * So what to do? Whenever zebra_nhg_sweep_entry
3234 * deletes an entry it will return HASHWALK_ABORT,
3235 * cause that deletion might have triggered more.
3236 * then we can just keep sweeping this table
3237 * until nothing more is found to do.
3238 */
3239 do {
3240 count = hashcount(hash);
3241 hash_walk(hash, zebra_nhg_sweep_entry, NULL);
3242 } while (count != hashcount(hash));
3243 }
3244
3245 static void zebra_nhg_mark_keep_entry(struct hash_bucket *bucket, void *arg)
3246 {
3247 struct nhg_hash_entry *nhe = bucket->data;
3248
3249 UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
3250 }
3251
3252 /*
3253 * When we are shutting down and we have retain mode enabled
3254 * in zebra the process is to mark each vrf that it's
3255 * routes should not be deleted. The problem with that
3256 * is that shutdown actually free's up memory which
3257 * causes the nexthop group's ref counts to go to zero
3258 * we need a way to subtly tell the system to not remove
3259 * the nexthop groups from the kernel at the same time.
3260 * The easiest just looks like that we should not mark
3261 * the nhg's as installed any more and when the ref count
3262 * goes to zero we'll attempt to delete and do nothing
3263 */
3264 void zebra_nhg_mark_keep(void)
3265 {
3266 hash_iterate(zrouter.nhgs_id, zebra_nhg_mark_keep_entry, NULL);
3267 }
3268
3269 /* Global control to disable use of kernel nexthops, if available. We can't
3270 * force the kernel to support nexthop ids, of course, but we can disable
3271 * zebra's use of them, for testing e.g. By default, if the kernel supports
3272 * nexthop ids, zebra uses them.
3273 */
3274 void zebra_nhg_enable_kernel_nexthops(bool set)
3275 {
3276 g_nexthops_enabled = set;
3277 }
3278
3279 bool zebra_nhg_kernel_nexthops_enabled(void)
3280 {
3281 return g_nexthops_enabled;
3282 }
3283
3284 /* Global control for use of activated backups for recursive resolution. */
3285 void zebra_nhg_set_recursive_use_backups(bool set)
3286 {
3287 use_recursive_backups = set;
3288 }
3289
3290 bool zebra_nhg_recursive_use_backups(void)
3291 {
3292 return use_recursive_backups;
3293 }
3294
3295 /*
3296 * Global control to only use kernel nexthops for protocol created NHGs.
3297 * There are some use cases where you may not want zebra to implicitly
3298 * create kernel nexthops for all routes and only create them for NHGs
3299 * passed down by upper level protos.
3300 *
3301 * Default is off.
3302 */
3303 void zebra_nhg_set_proto_nexthops_only(bool set)
3304 {
3305 proto_nexthops_only = set;
3306 }
3307
3308 bool zebra_nhg_proto_nexthops_only(void)
3309 {
3310 return proto_nexthops_only;
3311 }
3312
3313 /* Add NHE from upper level proto */
3314 struct nhg_hash_entry *zebra_nhg_proto_add(uint32_t id, int type,
3315 uint16_t instance, uint32_t session,
3316 struct nexthop_group *nhg, afi_t afi)
3317 {
3318 struct nhg_hash_entry lookup;
3319 struct nhg_hash_entry *new, *old;
3320 struct nhg_connected *rb_node_dep = NULL;
3321 struct nexthop *newhop;
3322 bool replace = false;
3323
3324 if (!nhg->nexthop) {
3325 if (IS_ZEBRA_DEBUG_NHG)
3326 zlog_debug("%s: id %u, no nexthops passed to add",
3327 __func__, id);
3328 return NULL;
3329 }
3330
3331
3332 /* Set nexthop list as active, since they wont go through rib
3333 * processing.
3334 *
3335 * Assuming valid/onlink for now.
3336 *
3337 * Once resolution is figured out, we won't need this!
3338 */
3339 for (ALL_NEXTHOPS_PTR(nhg, newhop)) {
3340 if (CHECK_FLAG(newhop->flags, NEXTHOP_FLAG_HAS_BACKUP)) {
3341 if (IS_ZEBRA_DEBUG_NHG)
3342 zlog_debug(
3343 "%s: id %u, backup nexthops not supported",
3344 __func__, id);
3345 return NULL;
3346 }
3347
3348 if (newhop->type == NEXTHOP_TYPE_BLACKHOLE) {
3349 if (IS_ZEBRA_DEBUG_NHG)
3350 zlog_debug(
3351 "%s: id %u, blackhole nexthop not supported",
3352 __func__, id);
3353 return NULL;
3354 }
3355
3356 if (newhop->type == NEXTHOP_TYPE_IFINDEX) {
3357 if (IS_ZEBRA_DEBUG_NHG)
3358 zlog_debug(
3359 "%s: id %u, nexthop without gateway not supported",
3360 __func__, id);
3361 return NULL;
3362 }
3363
3364 if (!newhop->ifindex) {
3365 if (IS_ZEBRA_DEBUG_NHG)
3366 zlog_debug(
3367 "%s: id %u, nexthop without ifindex is not supported",
3368 __func__, id);
3369 return NULL;
3370 }
3371 SET_FLAG(newhop->flags, NEXTHOP_FLAG_ACTIVE);
3372 }
3373
3374 zebra_nhe_init(&lookup, afi, nhg->nexthop);
3375 lookup.nhg.nexthop = nhg->nexthop;
3376 lookup.nhg.nhgr = nhg->nhgr;
3377 lookup.id = id;
3378 lookup.type = type;
3379
3380 old = zebra_nhg_lookup_id(id);
3381
3382 if (old) {
3383 /*
3384 * This is a replace, just release NHE from ID for now, The
3385 * depends/dependents may still be used in the replacement so
3386 * we don't touch them other than to remove their refs to their
3387 * old parent.
3388 */
3389 replace = true;
3390 hash_release(zrouter.nhgs_id, old);
3391
3392 /* Free all the things */
3393 zebra_nhg_release_all_deps(old);
3394 }
3395
3396 new = zebra_nhg_rib_find_nhe(&lookup, afi);
3397
3398 zebra_nhg_increment_ref(new);
3399
3400 /* Capture zapi client info */
3401 new->zapi_instance = instance;
3402 new->zapi_session = session;
3403
3404 zebra_nhg_set_valid_if_active(new);
3405
3406 zebra_nhg_install_kernel(new);
3407
3408 if (old) {
3409 /*
3410 * Check to handle recving DEL while routes still in use then
3411 * a replace.
3412 *
3413 * In this case we would have decremented the refcnt already
3414 * but set the FLAG here. Go ahead and increment once to fix
3415 * the misordering we have been sent.
3416 */
3417 if (CHECK_FLAG(old->flags, NEXTHOP_GROUP_PROTO_RELEASED))
3418 zebra_nhg_increment_ref(old);
3419
3420 rib_handle_nhg_replace(old, new);
3421
3422 /* We have to decrement its singletons
3423 * because some might not exist in NEW.
3424 */
3425 if (!zebra_nhg_depends_is_empty(old)) {
3426 frr_each (nhg_connected_tree, &old->nhg_depends,
3427 rb_node_dep)
3428 zebra_nhg_decrement_ref(rb_node_dep->nhe);
3429 }
3430
3431 /* Dont call the dec API, we dont want to uninstall the ID */
3432 old->refcnt = 0;
3433 THREAD_OFF(old->timer);
3434 zebra_nhg_free(old);
3435 old = NULL;
3436 }
3437
3438 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
3439 zlog_debug("%s: %s nhe %p (%u), vrf %d, type %s", __func__,
3440 (replace ? "replaced" : "added"), new, new->id,
3441 new->vrf_id, zebra_route_string(new->type));
3442
3443 return new;
3444 }
3445
3446 /* Delete NHE from upper level proto, caller must decrement ref */
3447 struct nhg_hash_entry *zebra_nhg_proto_del(uint32_t id, int type)
3448 {
3449 struct nhg_hash_entry *nhe;
3450
3451 nhe = zebra_nhg_lookup_id(id);
3452
3453 if (!nhe) {
3454 if (IS_ZEBRA_DEBUG_NHG)
3455 zlog_debug("%s: id %u, lookup failed", __func__, id);
3456
3457 return NULL;
3458 }
3459
3460 if (type != nhe->type) {
3461 if (IS_ZEBRA_DEBUG_NHG)
3462 zlog_debug(
3463 "%s: id %u, type %s mismatch, sent by %s, ignoring",
3464 __func__, id, zebra_route_string(nhe->type),
3465 zebra_route_string(type));
3466 return NULL;
3467 }
3468
3469 if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_PROTO_RELEASED)) {
3470 if (IS_ZEBRA_DEBUG_NHG)
3471 zlog_debug("%s: id %u, already released", __func__, id);
3472
3473 return NULL;
3474 }
3475
3476 SET_FLAG(nhe->flags, NEXTHOP_GROUP_PROTO_RELEASED);
3477
3478 if (nhe->refcnt > 1) {
3479 if (IS_ZEBRA_DEBUG_NHG)
3480 zlog_debug(
3481 "%s: %pNG, still being used by routes refcnt %u",
3482 __func__, nhe, nhe->refcnt);
3483 return nhe;
3484 }
3485
3486 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
3487 zlog_debug("%s: deleted nhe %p (%pNG), vrf %d, type %s",
3488 __func__, nhe, nhe, nhe->vrf_id,
3489 zebra_route_string(nhe->type));
3490
3491 return nhe;
3492 }
3493
3494 struct nhg_score_proto_iter {
3495 int type;
3496 struct list *found;
3497 };
3498
3499 static void zebra_nhg_score_proto_entry(struct hash_bucket *bucket, void *arg)
3500 {
3501 struct nhg_hash_entry *nhe;
3502 struct nhg_score_proto_iter *iter;
3503
3504 nhe = (struct nhg_hash_entry *)bucket->data;
3505 iter = arg;
3506
3507 /* Needs to match type and outside zebra ID space */
3508 if (nhe->type == iter->type && PROTO_OWNED(nhe)) {
3509 if (IS_ZEBRA_DEBUG_NHG_DETAIL)
3510 zlog_debug(
3511 "%s: found nhe %p (%pNG), vrf %d, type %s after client disconnect",
3512 __func__, nhe, nhe, nhe->vrf_id,
3513 zebra_route_string(nhe->type));
3514
3515 /* Add to removal list */
3516 listnode_add(iter->found, nhe);
3517 }
3518 }
3519
3520 /* Remove specific by proto NHGs */
3521 unsigned long zebra_nhg_score_proto(int type)
3522 {
3523 struct nhg_hash_entry *nhe;
3524 struct nhg_score_proto_iter iter = {};
3525 struct listnode *ln;
3526 unsigned long count;
3527
3528 iter.type = type;
3529 iter.found = list_new();
3530
3531 /* Find matching entries to remove */
3532 hash_iterate(zrouter.nhgs_id, zebra_nhg_score_proto_entry, &iter);
3533
3534 /* Now remove them */
3535 for (ALL_LIST_ELEMENTS_RO(iter.found, ln, nhe)) {
3536 /*
3537 * This should be the last ref if we remove client routes too,
3538 * and thus should remove and free them.
3539 */
3540 zebra_nhg_decrement_ref(nhe);
3541 }
3542
3543 count = iter.found->count;
3544 list_delete(&iter.found);
3545
3546 return count;
3547 }
3548
3549 printfrr_ext_autoreg_p("NG", printfrr_nhghe);
3550 static ssize_t printfrr_nhghe(struct fbuf *buf, struct printfrr_eargs *ea,
3551 const void *ptr)
3552 {
3553 const struct nhg_hash_entry *nhe = ptr;
3554 const struct nhg_connected *dep;
3555 ssize_t ret = 0;
3556
3557 if (!nhe)
3558 return bputs(buf, "[NULL]");
3559
3560 ret += bprintfrr(buf, "%u[", nhe->id);
3561 if (nhe->ifp)
3562 ret += printfrr_nhs(buf, nhe->nhg.nexthop);
3563 else {
3564 int count = zebra_nhg_depends_count(nhe);
3565
3566 frr_each (nhg_connected_tree_const, &nhe->nhg_depends, dep) {
3567 ret += bprintfrr(buf, "%u", dep->nhe->id);
3568 if (count > 1)
3569 ret += bputs(buf, "/");
3570 count--;
3571 }
3572 }
3573
3574 ret += bputs(buf, "]");
3575 return ret;
3576 }