]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_nhg.c
Merge pull request #5745 from donaldsharp/bgp_formating
[mirror_frr.git] / zebra / zebra_nhg.c
1 /* Zebra Nexthop Group Code.
2 * Copyright (C) 2019 Cumulus Networks, Inc.
3 * Donald Sharp
4 * Stephen Worley
5 *
6 * This file is part of FRR.
7 *
8 * FRR is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
11 * later version.
12 *
13 * FRR is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with FRR; see the file COPYING. If not, write to the Free
20 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
21 * 02111-1307, USA.
22 */
23 #include <zebra.h>
24
25 #include "lib/nexthop.h"
26 #include "lib/nexthop_group_private.h"
27 #include "lib/routemap.h"
28 #include "lib/mpls.h"
29 #include "lib/jhash.h"
30 #include "lib/debug.h"
31
32 #include "zebra/connected.h"
33 #include "zebra/debug.h"
34 #include "zebra/zebra_router.h"
35 #include "zebra/zebra_nhg_private.h"
36 #include "zebra/zebra_rnh.h"
37 #include "zebra/zebra_routemap.h"
38 #include "zebra/zebra_memory.h"
39 #include "zebra/zserv.h"
40 #include "zebra/rt.h"
41 #include "zebra_errors.h"
42 #include "zebra_dplane.h"
43 #include "zebra/interface.h"
44
45 DEFINE_MTYPE_STATIC(ZEBRA, NHG, "Nexthop Group Entry");
46 DEFINE_MTYPE_STATIC(ZEBRA, NHG_CONNECTED, "Nexthop Group Connected");
47 DEFINE_MTYPE_STATIC(ZEBRA, NHG_CTX, "Nexthop Group Context");
48
49 /* id counter to keep in sync with kernel */
50 uint32_t id_counter;
51
52 static struct nhg_hash_entry *depends_find(const struct nexthop *nh,
53 afi_t afi);
54 static void depends_add(struct nhg_connected_tree_head *head,
55 struct nhg_hash_entry *depend);
56 static struct nhg_hash_entry *
57 depends_find_add(struct nhg_connected_tree_head *head, struct nexthop *nh,
58 afi_t afi);
59 static struct nhg_hash_entry *
60 depends_find_id_add(struct nhg_connected_tree_head *head, uint32_t id);
61 static void depends_decrement_free(struct nhg_connected_tree_head *head);
62
63
64 static void nhg_connected_free(struct nhg_connected *dep)
65 {
66 XFREE(MTYPE_NHG_CONNECTED, dep);
67 }
68
69 static struct nhg_connected *nhg_connected_new(struct nhg_hash_entry *nhe)
70 {
71 struct nhg_connected *new = NULL;
72
73 new = XCALLOC(MTYPE_NHG_CONNECTED, sizeof(struct nhg_connected));
74 new->nhe = nhe;
75
76 return new;
77 }
78
79 void nhg_connected_tree_free(struct nhg_connected_tree_head *head)
80 {
81 struct nhg_connected *rb_node_dep = NULL;
82
83 if (!nhg_connected_tree_is_empty(head)) {
84 frr_each_safe(nhg_connected_tree, head, rb_node_dep) {
85 nhg_connected_tree_del(head, rb_node_dep);
86 nhg_connected_free(rb_node_dep);
87 }
88 }
89 }
90
91 bool nhg_connected_tree_is_empty(const struct nhg_connected_tree_head *head)
92 {
93 return nhg_connected_tree_count(head) ? false : true;
94 }
95
96 struct nhg_connected *
97 nhg_connected_tree_root(struct nhg_connected_tree_head *head)
98 {
99 return nhg_connected_tree_first(head);
100 }
101
102 struct nhg_hash_entry *
103 nhg_connected_tree_del_nhe(struct nhg_connected_tree_head *head,
104 struct nhg_hash_entry *depend)
105 {
106 struct nhg_connected lookup = {};
107 struct nhg_connected *remove = NULL;
108 struct nhg_hash_entry *removed_nhe;
109
110 lookup.nhe = depend;
111
112 /* Lookup to find the element, then remove it */
113 remove = nhg_connected_tree_find(head, &lookup);
114 if (remove)
115 /* Re-returning here just in case this API changes..
116 * the _del list api's are a bit undefined at the moment.
117 *
118 * So hopefully returning here will make it fail if the api
119 * changes to something different than currently expected.
120 */
121 remove = nhg_connected_tree_del(head, remove);
122
123 /* If the entry was sucessfully removed, free the 'connected` struct */
124 if (remove) {
125 removed_nhe = remove->nhe;
126 nhg_connected_free(remove);
127 return removed_nhe;
128 }
129
130 return NULL;
131 }
132
133 /* Assuming UNIQUE RB tree. If this changes, assumptions here about
134 * insertion need to change.
135 */
136 struct nhg_hash_entry *
137 nhg_connected_tree_add_nhe(struct nhg_connected_tree_head *head,
138 struct nhg_hash_entry *depend)
139 {
140 struct nhg_connected *new = NULL;
141
142 new = nhg_connected_new(depend);
143
144 /* On success, NULL will be returned from the
145 * RB code.
146 */
147 if (new && (nhg_connected_tree_add(head, new) == NULL))
148 return NULL;
149
150 /* If it wasn't successful, it must be a duplicate. We enforce the
151 * unique property for the `nhg_connected` tree.
152 */
153 nhg_connected_free(new);
154
155 return depend;
156 }
157
158 static void
159 nhg_connected_tree_decrement_ref(struct nhg_connected_tree_head *head)
160 {
161 struct nhg_connected *rb_node_dep = NULL;
162
163 frr_each_safe(nhg_connected_tree, head, rb_node_dep) {
164 zebra_nhg_decrement_ref(rb_node_dep->nhe);
165 }
166 }
167
168 static void
169 nhg_connected_tree_increment_ref(struct nhg_connected_tree_head *head)
170 {
171 struct nhg_connected *rb_node_dep = NULL;
172
173 frr_each(nhg_connected_tree, head, rb_node_dep) {
174 zebra_nhg_increment_ref(rb_node_dep->nhe);
175 }
176 }
177
178 struct nhg_hash_entry *zebra_nhg_resolve(struct nhg_hash_entry *nhe)
179 {
180 if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_RECURSIVE)
181 && !zebra_nhg_depends_is_empty(nhe)) {
182 nhe = nhg_connected_tree_root(&nhe->nhg_depends)->nhe;
183 return zebra_nhg_resolve(nhe);
184 }
185
186 return nhe;
187 }
188
189 unsigned int zebra_nhg_depends_count(const struct nhg_hash_entry *nhe)
190 {
191 return nhg_connected_tree_count(&nhe->nhg_depends);
192 }
193
194 bool zebra_nhg_depends_is_empty(const struct nhg_hash_entry *nhe)
195 {
196 return nhg_connected_tree_is_empty(&nhe->nhg_depends);
197 }
198
199 static void zebra_nhg_depends_del(struct nhg_hash_entry *from,
200 struct nhg_hash_entry *depend)
201 {
202 nhg_connected_tree_del_nhe(&from->nhg_depends, depend);
203 }
204
205 static void zebra_nhg_depends_init(struct nhg_hash_entry *nhe)
206 {
207 nhg_connected_tree_init(&nhe->nhg_depends);
208 }
209
210 unsigned int zebra_nhg_dependents_count(const struct nhg_hash_entry *nhe)
211 {
212 return nhg_connected_tree_count(&nhe->nhg_dependents);
213 }
214
215
216 bool zebra_nhg_dependents_is_empty(const struct nhg_hash_entry *nhe)
217 {
218 return nhg_connected_tree_is_empty(&nhe->nhg_dependents);
219 }
220
221 static void zebra_nhg_dependents_del(struct nhg_hash_entry *from,
222 struct nhg_hash_entry *dependent)
223 {
224 nhg_connected_tree_del_nhe(&from->nhg_dependents, dependent);
225 }
226
227 static void zebra_nhg_dependents_add(struct nhg_hash_entry *to,
228 struct nhg_hash_entry *dependent)
229 {
230 nhg_connected_tree_add_nhe(&to->nhg_dependents, dependent);
231 }
232
233 static void zebra_nhg_dependents_init(struct nhg_hash_entry *nhe)
234 {
235 nhg_connected_tree_init(&nhe->nhg_dependents);
236 }
237
238 /* Release this nhe from anything depending on it */
239 static void zebra_nhg_dependents_release(struct nhg_hash_entry *nhe)
240 {
241 struct nhg_connected *rb_node_dep = NULL;
242
243 frr_each_safe(nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep) {
244 zebra_nhg_depends_del(rb_node_dep->nhe, nhe);
245 /* recheck validity of the dependent */
246 zebra_nhg_check_valid(rb_node_dep->nhe);
247 }
248 }
249
250 /* Release this nhe from anything that it depends on */
251 static void zebra_nhg_depends_release(struct nhg_hash_entry *nhe)
252 {
253 if (!zebra_nhg_depends_is_empty(nhe)) {
254 struct nhg_connected *rb_node_dep = NULL;
255
256 frr_each_safe(nhg_connected_tree, &nhe->nhg_depends,
257 rb_node_dep) {
258 zebra_nhg_dependents_del(rb_node_dep->nhe, nhe);
259 }
260 }
261 }
262
263
264 struct nhg_hash_entry *zebra_nhg_lookup_id(uint32_t id)
265 {
266 struct nhg_hash_entry lookup = {};
267
268 lookup.id = id;
269 return hash_lookup(zrouter.nhgs_id, &lookup);
270 }
271
272 static int zebra_nhg_insert_id(struct nhg_hash_entry *nhe)
273 {
274 if (hash_lookup(zrouter.nhgs_id, nhe)) {
275 flog_err(
276 EC_ZEBRA_NHG_TABLE_INSERT_FAILED,
277 "Failed inserting NHG id=%u into the ID hash table, entry already exists",
278 nhe->id);
279 return -1;
280 }
281
282 hash_get(zrouter.nhgs_id, nhe, hash_alloc_intern);
283
284 return 0;
285 }
286
287 static void zebra_nhg_set_if(struct nhg_hash_entry *nhe, struct interface *ifp)
288 {
289 nhe->ifp = ifp;
290 if_nhg_dependents_add(ifp, nhe);
291 }
292
293 static void
294 zebra_nhg_connect_depends(struct nhg_hash_entry *nhe,
295 struct nhg_connected_tree_head nhg_depends)
296 {
297 struct nhg_connected *rb_node_dep = NULL;
298
299 /* This has been allocated higher above in the stack. Could probably
300 * re-allocate and free the old stuff but just using the same memory
301 * for now. Otherwise, their might be a time trade-off for repeated
302 * alloc/frees as startup.
303 */
304 nhe->nhg_depends = nhg_depends;
305
306 /* Attach backpointer to anything that it depends on */
307 zebra_nhg_dependents_init(nhe);
308 if (!zebra_nhg_depends_is_empty(nhe)) {
309 frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) {
310 zebra_nhg_dependents_add(rb_node_dep->nhe, nhe);
311 }
312 }
313
314 /* Add the ifp now if its not a group or recursive and has ifindex */
315 if (zebra_nhg_depends_is_empty(nhe) && nhe->nhg->nexthop
316 && nhe->nhg->nexthop->ifindex) {
317 struct interface *ifp = NULL;
318
319 ifp = if_lookup_by_index(nhe->nhg->nexthop->ifindex,
320 nhe->nhg->nexthop->vrf_id);
321 if (ifp)
322 zebra_nhg_set_if(nhe, ifp);
323 else
324 flog_err(
325 EC_ZEBRA_IF_LOOKUP_FAILED,
326 "Zebra failed to lookup an interface with ifindex=%d in vrf=%u for NHE id=%u",
327 nhe->nhg->nexthop->ifindex,
328 nhe->nhg->nexthop->vrf_id, nhe->id);
329 }
330 }
331
332 struct nhg_hash_entry *zebra_nhg_alloc(void)
333 {
334 struct nhg_hash_entry *nhe;
335
336 nhe = XCALLOC(MTYPE_NHG, sizeof(struct nhg_hash_entry));
337
338 return nhe;
339 }
340
341 static struct nhg_hash_entry *zebra_nhg_copy(const struct nhg_hash_entry *copy,
342 uint32_t id)
343 {
344 struct nhg_hash_entry *nhe;
345
346 nhe = zebra_nhg_alloc();
347
348 nhe->id = id;
349
350 nhe->nhg = nexthop_group_new();
351 nexthop_group_copy(nhe->nhg, copy->nhg);
352
353 nhe->vrf_id = copy->vrf_id;
354 nhe->afi = copy->afi;
355 nhe->type = copy->type ? copy->type : ZEBRA_ROUTE_NHG;
356 nhe->refcnt = 0;
357 nhe->dplane_ref = zebra_router_get_next_sequence();
358
359 return nhe;
360 }
361
362 /* Allocation via hash handler */
363 static void *zebra_nhg_hash_alloc(void *arg)
364 {
365 struct nhg_hash_entry *nhe = NULL;
366 struct nhg_hash_entry *copy = arg;
367
368 nhe = zebra_nhg_copy(copy, copy->id);
369
370 /* Mark duplicate nexthops in a group at creation time. */
371 nexthop_group_mark_duplicates(nhe->nhg);
372
373 zebra_nhg_connect_depends(nhe, copy->nhg_depends);
374 zebra_nhg_insert_id(nhe);
375
376 return nhe;
377 }
378
379 uint32_t zebra_nhg_hash_key(const void *arg)
380 {
381 const struct nhg_hash_entry *nhe = arg;
382
383 uint32_t key = 0x5a351234;
384
385 key = jhash_3words(nhe->vrf_id, nhe->afi, nexthop_group_hash(nhe->nhg),
386 key);
387
388 return key;
389 }
390
391 uint32_t zebra_nhg_id_key(const void *arg)
392 {
393 const struct nhg_hash_entry *nhe = arg;
394
395 return nhe->id;
396 }
397
398 bool zebra_nhg_hash_equal(const void *arg1, const void *arg2)
399 {
400 const struct nhg_hash_entry *nhe1 = arg1;
401 const struct nhg_hash_entry *nhe2 = arg2;
402 struct nexthop *nexthop1;
403 struct nexthop *nexthop2;
404
405 /* No matter what if they equal IDs, assume equal */
406 if (nhe1->id && nhe2->id && (nhe1->id == nhe2->id))
407 return true;
408
409 if (nhe1->vrf_id != nhe2->vrf_id)
410 return false;
411
412 if (nhe1->afi != nhe2->afi)
413 return false;
414
415 /* Nexthops should be sorted */
416 for (nexthop1 = nhe1->nhg->nexthop, nexthop2 = nhe2->nhg->nexthop;
417 nexthop1 || nexthop2;
418 nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) {
419 if (nexthop1 && !nexthop2)
420 return false;
421
422 if (!nexthop1 && nexthop2)
423 return false;
424
425 /*
426 * We have to check the active flag of each individual one,
427 * not just the overall active_num. This solves the special case
428 * issue of a route with a nexthop group with one nexthop
429 * resolving to itself and thus marking it inactive. If we
430 * have two different routes each wanting to mark a different
431 * nexthop inactive, they need to hash to two different groups.
432 *
433 * If we just hashed on num_active, they would hash the same
434 * which is incorrect.
435 *
436 * ex)
437 * 1.1.1.0/24
438 * -> 1.1.1.1 dummy1 (inactive)
439 * -> 1.1.2.1 dummy2
440 *
441 * 1.1.2.0/24
442 * -> 1.1.1.1 dummy1
443 * -> 1.1.2.1 dummy2 (inactive)
444 *
445 * Without checking each individual one, they would hash to
446 * the same group and both have 1.1.1.1 dummy1 marked inactive.
447 *
448 */
449 if (CHECK_FLAG(nexthop1->flags, NEXTHOP_FLAG_ACTIVE)
450 != CHECK_FLAG(nexthop2->flags, NEXTHOP_FLAG_ACTIVE))
451 return false;
452
453 if (!nexthop_same(nexthop1, nexthop2))
454 return false;
455 }
456
457 return true;
458 }
459
460 bool zebra_nhg_hash_id_equal(const void *arg1, const void *arg2)
461 {
462 const struct nhg_hash_entry *nhe1 = arg1;
463 const struct nhg_hash_entry *nhe2 = arg2;
464
465 return nhe1->id == nhe2->id;
466 }
467
468 static int zebra_nhg_process_grp(struct nexthop_group *nhg,
469 struct nhg_connected_tree_head *depends,
470 struct nh_grp *grp, uint8_t count)
471 {
472 nhg_connected_tree_init(depends);
473
474 for (int i = 0; i < count; i++) {
475 struct nhg_hash_entry *depend = NULL;
476 /* We do not care about nexthop_grp.weight at
477 * this time. But we should figure out
478 * how to adapt this to our code in
479 * the future.
480 */
481 depend = depends_find_id_add(depends, grp[i].id);
482
483 if (!depend) {
484 flog_err(
485 EC_ZEBRA_NHG_SYNC,
486 "Received Nexthop Group from the kernel with a dependent Nexthop ID (%u) which we do not have in our table",
487 grp[i].id);
488 return -1;
489 }
490
491 /*
492 * If this is a nexthop with its own group
493 * dependencies, add them as well. Not sure its
494 * even possible to have a group within a group
495 * in the kernel.
496 */
497
498 copy_nexthops(&nhg->nexthop, depend->nhg->nexthop, NULL);
499 }
500
501 return 0;
502 }
503
504 static void handle_recursive_depend(struct nhg_connected_tree_head *nhg_depends,
505 struct nexthop *nh, afi_t afi)
506 {
507 struct nhg_hash_entry *depend = NULL;
508 struct nexthop_group resolved_ng = {};
509
510 resolved_ng.nexthop = nh;
511
512 depend = zebra_nhg_rib_find(0, &resolved_ng, afi);
513
514 if (depend)
515 depends_add(nhg_depends, depend);
516 }
517
518 static bool zebra_nhg_find(struct nhg_hash_entry **nhe, uint32_t id,
519 struct nexthop_group *nhg,
520 struct nhg_connected_tree_head *nhg_depends,
521 vrf_id_t vrf_id, afi_t afi, int type)
522 {
523 struct nhg_hash_entry lookup = {};
524
525 uint32_t old_id_counter = id_counter;
526
527 bool created = false;
528 bool recursive = false;
529
530 /*
531 * If it has an id at this point, we must have gotten it from the kernel
532 */
533 lookup.id = id ? id : ++id_counter;
534
535 lookup.type = type ? type : ZEBRA_ROUTE_NHG;
536 lookup.nhg = nhg;
537
538 lookup.vrf_id = vrf_id;
539 if (lookup.nhg->nexthop->next) {
540 /* Groups can have all vrfs and AF's in them */
541 lookup.afi = AFI_UNSPEC;
542 } else {
543 switch (lookup.nhg->nexthop->type) {
544 case (NEXTHOP_TYPE_IFINDEX):
545 case (NEXTHOP_TYPE_BLACKHOLE):
546 /*
547 * This switch case handles setting the afi different
548 * for ipv4/v6 routes. Ifindex/blackhole nexthop
549 * objects cannot be ambiguous, they must be Address
550 * Family specific. If we get here, we will either use
551 * the AF of the route, or the one we got passed from
552 * here from the kernel.
553 */
554 lookup.afi = afi;
555 break;
556 case (NEXTHOP_TYPE_IPV4_IFINDEX):
557 case (NEXTHOP_TYPE_IPV4):
558 lookup.afi = AFI_IP;
559 break;
560 case (NEXTHOP_TYPE_IPV6_IFINDEX):
561 case (NEXTHOP_TYPE_IPV6):
562 lookup.afi = AFI_IP6;
563 break;
564 }
565 }
566
567 if (id)
568 (*nhe) = zebra_nhg_lookup_id(id);
569 else
570 (*nhe) = hash_lookup(zrouter.nhgs, &lookup);
571
572 /* If it found an nhe in our tables, this new ID is unused */
573 if (*nhe)
574 id_counter = old_id_counter;
575
576 if (!(*nhe)) {
577 /* Only hash/lookup the depends if the first lookup
578 * fails to find something. This should hopefully save a
579 * lot of cycles for larger ecmp sizes.
580 */
581 if (nhg_depends)
582 /* If you don't want to hash on each nexthop in the
583 * nexthop group struct you can pass the depends
584 * directly. Kernel-side we do this since it just looks
585 * them up via IDs.
586 */
587 lookup.nhg_depends = *nhg_depends;
588 else {
589 if (nhg->nexthop->next) {
590 zebra_nhg_depends_init(&lookup);
591
592 /* If its a group, create a dependency tree */
593 struct nexthop *nh = NULL;
594
595 for (nh = nhg->nexthop; nh; nh = nh->next)
596 depends_find_add(&lookup.nhg_depends,
597 nh, afi);
598 } else if (CHECK_FLAG(nhg->nexthop->flags,
599 NEXTHOP_FLAG_RECURSIVE)) {
600 zebra_nhg_depends_init(&lookup);
601 handle_recursive_depend(&lookup.nhg_depends,
602 nhg->nexthop->resolved,
603 afi);
604 recursive = true;
605 }
606 }
607
608 (*nhe) = hash_get(zrouter.nhgs, &lookup, zebra_nhg_hash_alloc);
609 created = true;
610
611 if (recursive)
612 SET_FLAG((*nhe)->flags, NEXTHOP_GROUP_RECURSIVE);
613 }
614 return created;
615 }
616
617 /* Find/create a single nexthop */
618 static struct nhg_hash_entry *
619 zebra_nhg_find_nexthop(uint32_t id, struct nexthop *nh, afi_t afi, int type)
620 {
621 struct nhg_hash_entry *nhe = NULL;
622 struct nexthop_group nhg = {};
623 vrf_id_t vrf_id = !vrf_is_backend_netns() ? VRF_DEFAULT : nh->vrf_id;
624
625 nexthop_group_add_sorted(&nhg, nh);
626
627 zebra_nhg_find(&nhe, id, &nhg, NULL, vrf_id, afi, type);
628
629 return nhe;
630 }
631
632 static uint32_t nhg_ctx_get_id(const struct nhg_ctx *ctx)
633 {
634 return ctx->id;
635 }
636
637 static void nhg_ctx_set_status(struct nhg_ctx *ctx, enum nhg_ctx_status status)
638 {
639 ctx->status = status;
640 }
641
642 static enum nhg_ctx_status nhg_ctx_get_status(const struct nhg_ctx *ctx)
643 {
644 return ctx->status;
645 }
646
647 static void nhg_ctx_set_op(struct nhg_ctx *ctx, enum nhg_ctx_op_e op)
648 {
649 ctx->op = op;
650 }
651
652 static enum nhg_ctx_op_e nhg_ctx_get_op(const struct nhg_ctx *ctx)
653 {
654 return ctx->op;
655 }
656
657 static vrf_id_t nhg_ctx_get_vrf_id(const struct nhg_ctx *ctx)
658 {
659 return ctx->vrf_id;
660 }
661
662 static int nhg_ctx_get_type(const struct nhg_ctx *ctx)
663 {
664 return ctx->type;
665 }
666
667 static int nhg_ctx_get_afi(const struct nhg_ctx *ctx)
668 {
669 return ctx->afi;
670 }
671
672 static struct nexthop *nhg_ctx_get_nh(struct nhg_ctx *ctx)
673 {
674 return &ctx->u.nh;
675 }
676
677 static uint8_t nhg_ctx_get_count(const struct nhg_ctx *ctx)
678 {
679 return ctx->count;
680 }
681
682 static struct nh_grp *nhg_ctx_get_grp(struct nhg_ctx *ctx)
683 {
684 return ctx->u.grp;
685 }
686
687 static struct nhg_ctx *nhg_ctx_new()
688 {
689 struct nhg_ctx *new = NULL;
690
691 new = XCALLOC(MTYPE_NHG_CTX, sizeof(struct nhg_ctx));
692
693 return new;
694 }
695
696 static void nhg_ctx_free(struct nhg_ctx **ctx)
697 {
698 struct nexthop *nh;
699
700 if (ctx == NULL)
701 return;
702
703 assert((*ctx) != NULL);
704
705 if (nhg_ctx_get_count(*ctx))
706 goto done;
707
708 nh = nhg_ctx_get_nh(*ctx);
709
710 nexthop_del_labels(nh);
711
712 done:
713 XFREE(MTYPE_NHG_CTX, *ctx);
714 *ctx = NULL;
715 }
716
717 static struct nhg_ctx *nhg_ctx_init(uint32_t id, struct nexthop *nh,
718 struct nh_grp *grp, vrf_id_t vrf_id,
719 afi_t afi, int type, uint8_t count)
720 {
721 struct nhg_ctx *ctx = NULL;
722
723 ctx = nhg_ctx_new();
724
725 ctx->id = id;
726 ctx->vrf_id = vrf_id;
727 ctx->afi = afi;
728 ctx->type = type;
729 ctx->count = count;
730
731 if (count)
732 /* Copy over the array */
733 memcpy(&ctx->u.grp, grp, count * sizeof(struct nh_grp));
734 else if (nh)
735 ctx->u.nh = *nh;
736
737 return ctx;
738 }
739
740 static bool zebra_nhg_contains_unhashable(struct nhg_hash_entry *nhe)
741 {
742 struct nhg_connected *rb_node_dep = NULL;
743
744 frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) {
745 if (CHECK_FLAG(rb_node_dep->nhe->flags,
746 NEXTHOP_GROUP_UNHASHABLE))
747 return true;
748 }
749
750 return false;
751 }
752
753 static void zebra_nhg_set_unhashable(struct nhg_hash_entry *nhe)
754 {
755 SET_FLAG(nhe->flags, NEXTHOP_GROUP_UNHASHABLE);
756 SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
757
758 flog_warn(
759 EC_ZEBRA_DUPLICATE_NHG_MESSAGE,
760 "Nexthop Group with ID (%d) is a duplicate, therefore unhashable, ignoring",
761 nhe->id);
762 }
763
764 static void zebra_nhg_set_valid(struct nhg_hash_entry *nhe)
765 {
766 struct nhg_connected *rb_node_dep;
767
768 SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
769
770 frr_each(nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep)
771 zebra_nhg_set_valid(rb_node_dep->nhe);
772 }
773
774 static void zebra_nhg_set_invalid(struct nhg_hash_entry *nhe)
775 {
776 struct nhg_connected *rb_node_dep;
777
778 UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
779
780 /* Update validity of nexthops depending on it */
781 frr_each(nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep)
782 zebra_nhg_check_valid(rb_node_dep->nhe);
783 }
784
785 void zebra_nhg_check_valid(struct nhg_hash_entry *nhe)
786 {
787 struct nhg_connected *rb_node_dep = NULL;
788 bool valid = false;
789
790 /* If anthing else in the group is valid, the group is valid */
791 frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) {
792 if (CHECK_FLAG(rb_node_dep->nhe->flags, NEXTHOP_GROUP_VALID)) {
793 valid = true;
794 goto done;
795 }
796 }
797
798 done:
799 if (valid)
800 zebra_nhg_set_valid(nhe);
801 else
802 zebra_nhg_set_invalid(nhe);
803 }
804
805
806 static void zebra_nhg_release(struct nhg_hash_entry *nhe)
807 {
808 /* Remove it from any lists it may be on */
809 zebra_nhg_depends_release(nhe);
810 zebra_nhg_dependents_release(nhe);
811 if (nhe->ifp)
812 if_nhg_dependents_del(nhe->ifp, nhe);
813
814 /*
815 * If its unhashable, we didn't store it here and have to be
816 * sure we don't clear one thats actually being used.
817 */
818 if (!CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_UNHASHABLE))
819 hash_release(zrouter.nhgs, nhe);
820
821 hash_release(zrouter.nhgs_id, nhe);
822 }
823
824 static void zebra_nhg_handle_uninstall(struct nhg_hash_entry *nhe)
825 {
826 zebra_nhg_release(nhe);
827 zebra_nhg_free(nhe);
828 }
829
830 static void zebra_nhg_handle_install(struct nhg_hash_entry *nhe)
831 {
832 /* Update validity of groups depending on it */
833 struct nhg_connected *rb_node_dep;
834
835 frr_each_safe(nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep)
836 zebra_nhg_set_valid(rb_node_dep->nhe);
837 }
838
839 /*
840 * The kernel/other program has changed the state of a nexthop object we are
841 * using.
842 */
843 static void zebra_nhg_handle_kernel_state_change(struct nhg_hash_entry *nhe,
844 bool is_delete)
845 {
846 if (nhe->refcnt) {
847 flog_err(
848 EC_ZEBRA_NHG_SYNC,
849 "Kernel %s a nexthop group with ID (%u) that we are still using for a route, sending it back down",
850 (is_delete ? "deleted" : "updated"), nhe->id);
851
852 UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
853 zebra_nhg_install_kernel(nhe);
854 } else
855 zebra_nhg_handle_uninstall(nhe);
856 }
857
858 static int nhg_ctx_process_new(struct nhg_ctx *ctx)
859 {
860 struct nexthop_group *nhg = NULL;
861 struct nhg_connected_tree_head nhg_depends = {};
862 struct nhg_hash_entry *lookup = NULL;
863 struct nhg_hash_entry *nhe = NULL;
864
865 uint32_t id = nhg_ctx_get_id(ctx);
866 uint8_t count = nhg_ctx_get_count(ctx);
867 vrf_id_t vrf_id = nhg_ctx_get_vrf_id(ctx);
868 int type = nhg_ctx_get_type(ctx);
869 afi_t afi = nhg_ctx_get_afi(ctx);
870
871 lookup = zebra_nhg_lookup_id(id);
872
873 if (lookup) {
874 /* This is already present in our table, hence an update
875 * that we did not initate.
876 */
877 zebra_nhg_handle_kernel_state_change(lookup, false);
878 return 0;
879 }
880
881 if (nhg_ctx_get_count(ctx)) {
882 nhg = nexthop_group_new();
883 if (zebra_nhg_process_grp(nhg, &nhg_depends,
884 nhg_ctx_get_grp(ctx), count)) {
885 depends_decrement_free(&nhg_depends);
886 nexthop_group_delete(&nhg);
887 return -ENOENT;
888 }
889
890 if (!zebra_nhg_find(&nhe, id, nhg, &nhg_depends, vrf_id, type,
891 afi))
892 depends_decrement_free(&nhg_depends);
893
894 /* These got copied over in zebra_nhg_alloc() */
895 nexthop_group_delete(&nhg);
896 } else
897 nhe = zebra_nhg_find_nexthop(id, nhg_ctx_get_nh(ctx), afi,
898 type);
899
900 if (nhe) {
901 if (id != nhe->id) {
902 struct nhg_hash_entry *kernel_nhe = NULL;
903
904 /* Duplicate but with different ID from
905 * the kernel
906 */
907
908 /* The kernel allows duplicate nexthops
909 * as long as they have different IDs.
910 * We are ignoring those to prevent
911 * syncing problems with the kernel
912 * changes.
913 *
914 * We maintain them *ONLY* in the ID hash table to
915 * track them and set the flag to indicated
916 * their attributes are unhashable.
917 */
918
919 kernel_nhe = zebra_nhg_copy(nhe, id);
920 zebra_nhg_insert_id(kernel_nhe);
921 zebra_nhg_set_unhashable(kernel_nhe);
922 } else if (zebra_nhg_contains_unhashable(nhe)) {
923 /* The group we got contains an unhashable/duplicated
924 * depend, so lets mark this group as unhashable as well
925 * and release it from the non-ID hash.
926 */
927 hash_release(zrouter.nhgs, nhe);
928 zebra_nhg_set_unhashable(nhe);
929 } else {
930 /* It actually created a new nhe */
931 SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
932 SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
933 }
934 } else {
935 flog_err(
936 EC_ZEBRA_TABLE_LOOKUP_FAILED,
937 "Zebra failed to find or create a nexthop hash entry for ID (%u)",
938 id);
939 return -1;
940 }
941
942 return 0;
943 }
944
945 static int nhg_ctx_process_del(struct nhg_ctx *ctx)
946 {
947 struct nhg_hash_entry *nhe = NULL;
948 uint32_t id = nhg_ctx_get_id(ctx);
949
950 nhe = zebra_nhg_lookup_id(id);
951
952 if (!nhe) {
953 flog_warn(
954 EC_ZEBRA_BAD_NHG_MESSAGE,
955 "Kernel delete message received for nexthop group ID (%u) that we do not have in our ID table",
956 id);
957 return -1;
958 }
959
960 zebra_nhg_handle_kernel_state_change(nhe, true);
961
962 return 0;
963 }
964
965 static void nhg_ctx_fini(struct nhg_ctx **ctx)
966 {
967 /*
968 * Just freeing for now, maybe do something more in the future
969 * based on flag.
970 */
971
972 nhg_ctx_free(ctx);
973 }
974
975 static int queue_add(struct nhg_ctx *ctx)
976 {
977 /* If its queued or already processed do nothing */
978 if (nhg_ctx_get_status(ctx) == NHG_CTX_QUEUED)
979 return 0;
980
981 if (rib_queue_nhg_add(ctx)) {
982 nhg_ctx_set_status(ctx, NHG_CTX_FAILURE);
983 return -1;
984 }
985
986 nhg_ctx_set_status(ctx, NHG_CTX_QUEUED);
987
988 return 0;
989 }
990
991 int nhg_ctx_process(struct nhg_ctx *ctx)
992 {
993 int ret = 0;
994
995 switch (nhg_ctx_get_op(ctx)) {
996 case NHG_CTX_OP_NEW:
997 ret = nhg_ctx_process_new(ctx);
998 if (nhg_ctx_get_count(ctx) && ret == -ENOENT
999 && nhg_ctx_get_status(ctx) != NHG_CTX_REQUEUED) {
1000 /**
1001 * We have entered a situation where we are
1002 * processing a group from the kernel
1003 * that has a contained nexthop which
1004 * we have not yet processed.
1005 *
1006 * Re-enqueue this ctx to be handled exactly one
1007 * more time (indicated by the flag).
1008 *
1009 * By the time we get back to it, we
1010 * should have processed its depends.
1011 */
1012 nhg_ctx_set_status(ctx, NHG_CTX_NONE);
1013 if (queue_add(ctx) == 0) {
1014 nhg_ctx_set_status(ctx, NHG_CTX_REQUEUED);
1015 return 0;
1016 }
1017 }
1018 break;
1019 case NHG_CTX_OP_DEL:
1020 ret = nhg_ctx_process_del(ctx);
1021 case NHG_CTX_OP_NONE:
1022 break;
1023 }
1024
1025 nhg_ctx_set_status(ctx, (ret ? NHG_CTX_FAILURE : NHG_CTX_SUCCESS));
1026
1027 nhg_ctx_fini(&ctx);
1028
1029 return ret;
1030 }
1031
1032 /* Kernel-side, you either get a single new nexthop or a array of ID's */
1033 int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, struct nh_grp *grp,
1034 uint8_t count, vrf_id_t vrf_id, afi_t afi, int type,
1035 int startup)
1036 {
1037 struct nhg_ctx *ctx = NULL;
1038
1039 if (id > id_counter)
1040 /* Increase our counter so we don't try to create
1041 * an ID that already exists
1042 */
1043 id_counter = id;
1044
1045 ctx = nhg_ctx_init(id, nh, grp, vrf_id, afi, type, count);
1046 nhg_ctx_set_op(ctx, NHG_CTX_OP_NEW);
1047
1048 /* Under statup conditions, we need to handle them immediately
1049 * like we do for routes. Otherwise, we are going to get a route
1050 * with a nhe_id that we have not handled.
1051 */
1052 if (startup)
1053 return nhg_ctx_process(ctx);
1054
1055 if (queue_add(ctx)) {
1056 nhg_ctx_fini(&ctx);
1057 return -1;
1058 }
1059
1060 return 0;
1061 }
1062
1063 /* Kernel-side, received delete message */
1064 int zebra_nhg_kernel_del(uint32_t id, vrf_id_t vrf_id)
1065 {
1066 struct nhg_ctx *ctx = NULL;
1067
1068 ctx = nhg_ctx_init(id, NULL, NULL, vrf_id, 0, 0, 0);
1069
1070 nhg_ctx_set_op(ctx, NHG_CTX_OP_DEL);
1071
1072 if (queue_add(ctx)) {
1073 nhg_ctx_fini(&ctx);
1074 return -1;
1075 }
1076
1077 return 0;
1078 }
1079
1080 /* Some dependency helper functions */
1081 static struct nhg_hash_entry *depends_find_recursive(const struct nexthop *nh,
1082 afi_t afi)
1083 {
1084 struct nhg_hash_entry *nhe;
1085 struct nexthop *lookup = NULL;
1086
1087 lookup = nexthop_dup(nh, NULL);
1088
1089 nhe = zebra_nhg_find_nexthop(0, lookup, afi, 0);
1090
1091 nexthops_free(lookup);
1092
1093 return nhe;
1094 }
1095
1096 static struct nhg_hash_entry *depends_find_singleton(const struct nexthop *nh,
1097 afi_t afi)
1098 {
1099 struct nhg_hash_entry *nhe;
1100 struct nexthop lookup = {};
1101
1102 /* Capture a snapshot of this single nh; it might be part of a list,
1103 * so we need to make a standalone copy.
1104 */
1105 nexthop_copy_no_recurse(&lookup, nh, NULL);
1106
1107 nhe = zebra_nhg_find_nexthop(0, &lookup, afi, 0);
1108
1109 /* The copy may have allocated labels; free them if necessary. */
1110 nexthop_del_labels(&lookup);
1111
1112 return nhe;
1113 }
1114
1115 static struct nhg_hash_entry *depends_find(const struct nexthop *nh, afi_t afi)
1116 {
1117 struct nhg_hash_entry *nhe = NULL;
1118
1119 if (!nh)
1120 goto done;
1121
1122 /* We are separating these functions out to increase handling speed
1123 * in the non-recursive case (by not alloc/freeing)
1124 */
1125 if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE))
1126 nhe = depends_find_recursive(nh, afi);
1127 else
1128 nhe = depends_find_singleton(nh, afi);
1129
1130 done:
1131 return nhe;
1132 }
1133
1134 static void depends_add(struct nhg_connected_tree_head *head,
1135 struct nhg_hash_entry *depend)
1136 {
1137 /* If NULL is returned, it was successfully added and
1138 * needs to have its refcnt incremented.
1139 *
1140 * Else the NHE is already present in the tree and doesn't
1141 * need to increment the refcnt.
1142 */
1143 if (nhg_connected_tree_add_nhe(head, depend) == NULL)
1144 zebra_nhg_increment_ref(depend);
1145 }
1146
1147 static struct nhg_hash_entry *
1148 depends_find_add(struct nhg_connected_tree_head *head, struct nexthop *nh,
1149 afi_t afi)
1150 {
1151 struct nhg_hash_entry *depend = NULL;
1152
1153 depend = depends_find(nh, afi);
1154
1155 if (depend)
1156 depends_add(head, depend);
1157
1158 return depend;
1159 }
1160
1161 static struct nhg_hash_entry *
1162 depends_find_id_add(struct nhg_connected_tree_head *head, uint32_t id)
1163 {
1164 struct nhg_hash_entry *depend = NULL;
1165
1166 depend = zebra_nhg_lookup_id(id);
1167
1168 if (depend)
1169 depends_add(head, depend);
1170
1171 return depend;
1172 }
1173
1174 static void depends_decrement_free(struct nhg_connected_tree_head *head)
1175 {
1176 nhg_connected_tree_decrement_ref(head);
1177 nhg_connected_tree_free(head);
1178 }
1179
1180 /* Rib-side, you get a nexthop group struct */
1181 struct nhg_hash_entry *
1182 zebra_nhg_rib_find(uint32_t id, struct nexthop_group *nhg, afi_t rt_afi)
1183 {
1184 struct nhg_hash_entry *nhe = NULL;
1185 vrf_id_t vrf_id;
1186
1187 /*
1188 * CLANG SA is complaining that nexthop may be NULL
1189 * Make it happy but this is ridonc
1190 */
1191 assert(nhg->nexthop);
1192 vrf_id = !vrf_is_backend_netns() ? VRF_DEFAULT : nhg->nexthop->vrf_id;
1193
1194 if (!(nhg && nhg->nexthop)) {
1195 flog_err(EC_ZEBRA_TABLE_LOOKUP_FAILED,
1196 "No nexthop passed to %s", __func__);
1197 return NULL;
1198 }
1199
1200 zebra_nhg_find(&nhe, id, nhg, NULL, vrf_id, rt_afi, 0);
1201
1202 return nhe;
1203 }
1204
1205 static void zebra_nhg_free_members(struct nhg_hash_entry *nhe)
1206 {
1207 nexthop_group_delete(&nhe->nhg);
1208 /* Decrement to remove connection ref */
1209 nhg_connected_tree_decrement_ref(&nhe->nhg_depends);
1210 nhg_connected_tree_free(&nhe->nhg_depends);
1211 nhg_connected_tree_free(&nhe->nhg_dependents);
1212 }
1213
1214 void zebra_nhg_free(struct nhg_hash_entry *nhe)
1215 {
1216 if (nhe->refcnt)
1217 zlog_debug("nhe_id=%u hash refcnt=%d", nhe->id, nhe->refcnt);
1218
1219 zebra_nhg_free_members(nhe);
1220
1221 XFREE(MTYPE_NHG, nhe);
1222 }
1223
1224 void zebra_nhg_hash_free(void *p)
1225 {
1226 zebra_nhg_free((struct nhg_hash_entry *)p);
1227 }
1228
1229 void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe)
1230 {
1231 nhe->refcnt--;
1232
1233 if (!zebra_nhg_depends_is_empty(nhe))
1234 nhg_connected_tree_decrement_ref(&nhe->nhg_depends);
1235
1236 if (ZEBRA_NHG_CREATED(nhe) && nhe->refcnt <= 0)
1237 zebra_nhg_uninstall_kernel(nhe);
1238 }
1239
1240 void zebra_nhg_increment_ref(struct nhg_hash_entry *nhe)
1241 {
1242 nhe->refcnt++;
1243
1244 if (!zebra_nhg_depends_is_empty(nhe))
1245 nhg_connected_tree_increment_ref(&nhe->nhg_depends);
1246 }
1247
1248 static void nexthop_set_resolved(afi_t afi, const struct nexthop *newhop,
1249 struct nexthop *nexthop)
1250 {
1251 struct nexthop *resolved_hop;
1252 uint8_t num_labels = 0;
1253 mpls_label_t labels[MPLS_MAX_LABELS];
1254 enum lsp_types_t label_type = ZEBRA_LSP_NONE;
1255 int i = 0;
1256
1257 resolved_hop = nexthop_new();
1258 SET_FLAG(resolved_hop->flags, NEXTHOP_FLAG_ACTIVE);
1259
1260 resolved_hop->vrf_id = nexthop->vrf_id;
1261 switch (newhop->type) {
1262 case NEXTHOP_TYPE_IPV4:
1263 case NEXTHOP_TYPE_IPV4_IFINDEX:
1264 /* If the resolving route specifies a gateway, use it */
1265 resolved_hop->type = newhop->type;
1266 resolved_hop->gate.ipv4 = newhop->gate.ipv4;
1267
1268 if (newhop->ifindex) {
1269 resolved_hop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
1270 resolved_hop->ifindex = newhop->ifindex;
1271 }
1272 break;
1273 case NEXTHOP_TYPE_IPV6:
1274 case NEXTHOP_TYPE_IPV6_IFINDEX:
1275 resolved_hop->type = newhop->type;
1276 resolved_hop->gate.ipv6 = newhop->gate.ipv6;
1277
1278 if (newhop->ifindex) {
1279 resolved_hop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
1280 resolved_hop->ifindex = newhop->ifindex;
1281 }
1282 break;
1283 case NEXTHOP_TYPE_IFINDEX:
1284 /* If the resolving route is an interface route,
1285 * it means the gateway we are looking up is connected
1286 * to that interface. (The actual network is _not_ onlink).
1287 * Therefore, the resolved route should have the original
1288 * gateway as nexthop as it is directly connected.
1289 *
1290 * On Linux, we have to set the onlink netlink flag because
1291 * otherwise, the kernel won't accept the route.
1292 */
1293 resolved_hop->flags |= NEXTHOP_FLAG_ONLINK;
1294 if (afi == AFI_IP) {
1295 resolved_hop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
1296 resolved_hop->gate.ipv4 = nexthop->gate.ipv4;
1297 } else if (afi == AFI_IP6) {
1298 resolved_hop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
1299 resolved_hop->gate.ipv6 = nexthop->gate.ipv6;
1300 }
1301 resolved_hop->ifindex = newhop->ifindex;
1302 break;
1303 case NEXTHOP_TYPE_BLACKHOLE:
1304 resolved_hop->type = NEXTHOP_TYPE_BLACKHOLE;
1305 resolved_hop->bh_type = newhop->bh_type;
1306 break;
1307 }
1308
1309 if (newhop->flags & NEXTHOP_FLAG_ONLINK)
1310 resolved_hop->flags |= NEXTHOP_FLAG_ONLINK;
1311
1312 /* Copy labels of the resolved route and the parent resolving to it */
1313 if (newhop->nh_label) {
1314 for (i = 0; i < newhop->nh_label->num_labels; i++)
1315 labels[num_labels++] = newhop->nh_label->label[i];
1316 label_type = newhop->nh_label_type;
1317 }
1318
1319 if (nexthop->nh_label) {
1320 for (i = 0; i < nexthop->nh_label->num_labels; i++)
1321 labels[num_labels++] = nexthop->nh_label->label[i];
1322
1323 /* If the parent has labels, use its type */
1324 label_type = nexthop->nh_label_type;
1325 }
1326
1327 if (num_labels)
1328 nexthop_add_labels(resolved_hop, label_type, num_labels,
1329 labels);
1330
1331 resolved_hop->rparent = nexthop;
1332 _nexthop_add(&nexthop->resolved, resolved_hop);
1333 }
1334
1335 /* Checks if nexthop we are trying to resolve to is valid */
1336 static bool nexthop_valid_resolve(const struct nexthop *nexthop,
1337 const struct nexthop *resolved)
1338 {
1339 /* Can't resolve to a recursive nexthop */
1340 if (CHECK_FLAG(resolved->flags, NEXTHOP_FLAG_RECURSIVE))
1341 return false;
1342
1343 switch (nexthop->type) {
1344 case NEXTHOP_TYPE_IPV4_IFINDEX:
1345 case NEXTHOP_TYPE_IPV6_IFINDEX:
1346 /* If the nexthop we are resolving to does not match the
1347 * ifindex for the nexthop the route wanted, its not valid.
1348 */
1349 if (nexthop->ifindex != resolved->ifindex)
1350 return false;
1351 break;
1352 case NEXTHOP_TYPE_IPV4:
1353 case NEXTHOP_TYPE_IPV6:
1354 case NEXTHOP_TYPE_IFINDEX:
1355 case NEXTHOP_TYPE_BLACKHOLE:
1356 break;
1357 }
1358
1359 return true;
1360 }
1361
1362 /*
1363 * Given a nexthop we need to properly recursively resolve
1364 * the route. As such, do a table lookup to find and match
1365 * if at all possible. Set the nexthop->ifindex and resolved_id
1366 * as appropriate
1367 */
1368 static int nexthop_active(afi_t afi, struct route_entry *re,
1369 struct nexthop *nexthop, struct route_node *top)
1370 {
1371 struct prefix p;
1372 struct route_table *table;
1373 struct route_node *rn;
1374 struct route_entry *match = NULL;
1375 int resolved;
1376 struct nexthop *newhop;
1377 struct interface *ifp;
1378 rib_dest_t *dest;
1379 struct zebra_vrf *zvrf;
1380
1381 if ((nexthop->type == NEXTHOP_TYPE_IPV4)
1382 || nexthop->type == NEXTHOP_TYPE_IPV6)
1383 nexthop->ifindex = 0;
1384
1385
1386 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE);
1387 nexthops_free(nexthop->resolved);
1388 nexthop->resolved = NULL;
1389 re->nexthop_mtu = 0;
1390
1391 /*
1392 * If the kernel has sent us a NEW route, then
1393 * by golly gee whiz it's a good route.
1394 *
1395 * If its an already INSTALLED route we have already handled, then the
1396 * kernel route's nexthop might have became unreachable
1397 * and we have to handle that.
1398 */
1399 if (!CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED)
1400 && (re->type == ZEBRA_ROUTE_KERNEL
1401 || re->type == ZEBRA_ROUTE_SYSTEM))
1402 return 1;
1403
1404 /*
1405 * Check to see if we should trust the passed in information
1406 * for UNNUMBERED interfaces as that we won't find the GW
1407 * address in the routing table.
1408 * This check should suffice to handle IPv4 or IPv6 routes
1409 * sourced from EVPN routes which are installed with the
1410 * next hop as the remote VTEP IP.
1411 */
1412 if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) {
1413 ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id);
1414 if (!ifp) {
1415 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
1416 zlog_debug(
1417 "\t%s: Onlink and interface: %u[%u] does not exist",
1418 __PRETTY_FUNCTION__, nexthop->ifindex,
1419 nexthop->vrf_id);
1420 return 0;
1421 }
1422 if (connected_is_unnumbered(ifp)) {
1423 if (if_is_operative(ifp))
1424 return 1;
1425
1426 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
1427 zlog_debug(
1428 "\t%s: Onlink and interface %s is not operative",
1429 __PRETTY_FUNCTION__, ifp->name);
1430 return 0;
1431 }
1432 if (!if_is_operative(ifp)) {
1433 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
1434 zlog_debug(
1435 "\t%s: Interface %s is not unnumbered",
1436 __PRETTY_FUNCTION__, ifp->name);
1437 return 0;
1438 }
1439 }
1440
1441 if ((top->p.family == AF_INET && top->p.prefixlen == 32
1442 && nexthop->gate.ipv4.s_addr == top->p.u.prefix4.s_addr)
1443 || (top->p.family == AF_INET6 && top->p.prefixlen == 128
1444 && memcmp(&nexthop->gate.ipv6, &top->p.u.prefix6, 16) == 0)) {
1445 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
1446 zlog_debug(
1447 "\t:%s: Attempting to install a max prefixlength route through itself",
1448 __PRETTY_FUNCTION__);
1449 return 0;
1450 }
1451
1452 /* Make lookup prefix. */
1453 memset(&p, 0, sizeof(struct prefix));
1454 switch (afi) {
1455 case AFI_IP:
1456 p.family = AF_INET;
1457 p.prefixlen = IPV4_MAX_PREFIXLEN;
1458 p.u.prefix4 = nexthop->gate.ipv4;
1459 break;
1460 case AFI_IP6:
1461 p.family = AF_INET6;
1462 p.prefixlen = IPV6_MAX_PREFIXLEN;
1463 p.u.prefix6 = nexthop->gate.ipv6;
1464 break;
1465 default:
1466 assert(afi != AFI_IP && afi != AFI_IP6);
1467 break;
1468 }
1469 /* Lookup table. */
1470 table = zebra_vrf_table(afi, SAFI_UNICAST, nexthop->vrf_id);
1471 /* get zvrf */
1472 zvrf = zebra_vrf_lookup_by_id(nexthop->vrf_id);
1473 if (!table || !zvrf) {
1474 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
1475 zlog_debug("\t%s: Table not found",
1476 __PRETTY_FUNCTION__);
1477 return 0;
1478 }
1479
1480 rn = route_node_match(table, (struct prefix *)&p);
1481 while (rn) {
1482 route_unlock_node(rn);
1483
1484 /* Lookup should halt if we've matched against ourselves ('top',
1485 * if specified) - i.e., we cannot have a nexthop NH1 is
1486 * resolved by a route NH1. The exception is if the route is a
1487 * host route.
1488 */
1489 if (top && rn == top)
1490 if (((afi == AFI_IP) && (rn->p.prefixlen != 32))
1491 || ((afi == AFI_IP6) && (rn->p.prefixlen != 128))) {
1492 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
1493 zlog_debug(
1494 "\t%s: Matched against ourself and prefix length is not max bit length",
1495 __PRETTY_FUNCTION__);
1496 return 0;
1497 }
1498
1499 /* Pick up selected route. */
1500 /* However, do not resolve over default route unless explicitly
1501 * allowed.
1502 */
1503 if (is_default_prefix(&rn->p)
1504 && !rnh_resolve_via_default(zvrf, p.family)) {
1505 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
1506 zlog_debug(
1507 "\t:%s: Resolved against default route",
1508 __PRETTY_FUNCTION__);
1509 return 0;
1510 }
1511
1512 dest = rib_dest_from_rnode(rn);
1513 if (dest && dest->selected_fib
1514 && !CHECK_FLAG(dest->selected_fib->status,
1515 ROUTE_ENTRY_REMOVED)
1516 && dest->selected_fib->type != ZEBRA_ROUTE_TABLE)
1517 match = dest->selected_fib;
1518
1519 /* If there is no selected route or matched route is EGP, go up
1520 * tree.
1521 */
1522 if (!match) {
1523 do {
1524 rn = rn->parent;
1525 } while (rn && rn->info == NULL);
1526 if (rn)
1527 route_lock_node(rn);
1528
1529 continue;
1530 }
1531
1532 if (match->type == ZEBRA_ROUTE_CONNECT) {
1533 /* Directly point connected route. */
1534 newhop = match->nhe->nhg->nexthop;
1535 if (newhop) {
1536 if (nexthop->type == NEXTHOP_TYPE_IPV4
1537 || nexthop->type == NEXTHOP_TYPE_IPV6)
1538 nexthop->ifindex = newhop->ifindex;
1539 }
1540 return 1;
1541 } else if (CHECK_FLAG(re->flags, ZEBRA_FLAG_ALLOW_RECURSION)) {
1542 resolved = 0;
1543 for (ALL_NEXTHOPS_PTR(match->nhe->nhg, newhop)) {
1544 if (!CHECK_FLAG(match->status,
1545 ROUTE_ENTRY_INSTALLED))
1546 continue;
1547 if (!nexthop_valid_resolve(nexthop, newhop))
1548 continue;
1549
1550 SET_FLAG(nexthop->flags,
1551 NEXTHOP_FLAG_RECURSIVE);
1552 nexthop_set_resolved(afi, newhop, nexthop);
1553 resolved = 1;
1554 }
1555 if (resolved)
1556 re->nexthop_mtu = match->mtu;
1557
1558 if (!resolved && IS_ZEBRA_DEBUG_RIB_DETAILED)
1559 zlog_debug("\t%s: Recursion failed to find",
1560 __PRETTY_FUNCTION__);
1561 return resolved;
1562 } else if (re->type == ZEBRA_ROUTE_STATIC) {
1563 resolved = 0;
1564 for (ALL_NEXTHOPS_PTR(match->nhe->nhg, newhop)) {
1565 if (!CHECK_FLAG(match->status,
1566 ROUTE_ENTRY_INSTALLED))
1567 continue;
1568 if (!nexthop_valid_resolve(nexthop, newhop))
1569 continue;
1570
1571 SET_FLAG(nexthop->flags,
1572 NEXTHOP_FLAG_RECURSIVE);
1573 nexthop_set_resolved(afi, newhop, nexthop);
1574 resolved = 1;
1575 }
1576 if (resolved)
1577 re->nexthop_mtu = match->mtu;
1578
1579 if (!resolved && IS_ZEBRA_DEBUG_RIB_DETAILED)
1580 zlog_debug(
1581 "\t%s: Static route unable to resolve",
1582 __PRETTY_FUNCTION__);
1583 return resolved;
1584 } else {
1585 if (IS_ZEBRA_DEBUG_RIB_DETAILED) {
1586 zlog_debug(
1587 "\t%s: Route Type %s has not turned on recursion",
1588 __PRETTY_FUNCTION__,
1589 zebra_route_string(re->type));
1590 if (re->type == ZEBRA_ROUTE_BGP
1591 && !CHECK_FLAG(re->flags, ZEBRA_FLAG_IBGP))
1592 zlog_debug(
1593 "\tEBGP: see \"disable-ebgp-connected-route-check\" or \"disable-connected-check\"");
1594 }
1595 return 0;
1596 }
1597 }
1598 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
1599 zlog_debug("\t%s: Nexthop did not lookup in table",
1600 __PRETTY_FUNCTION__);
1601 return 0;
1602 }
1603
1604 /* This function verifies reachability of one given nexthop, which can be
1605 * numbered or unnumbered, IPv4 or IPv6. The result is unconditionally stored
1606 * in nexthop->flags field. The nexthop->ifindex will be updated
1607 * appropriately as well. An existing route map can turn
1608 * (otherwise active) nexthop into inactive, but not vice versa.
1609 *
1610 * If it finds a nexthop recursivedly, set the resolved_id
1611 * to match that nexthop's nhg_hash_entry ID;
1612 *
1613 * The return value is the final value of 'ACTIVE' flag.
1614 */
1615 static unsigned nexthop_active_check(struct route_node *rn,
1616 struct route_entry *re,
1617 struct nexthop *nexthop)
1618 {
1619 struct interface *ifp;
1620 route_map_result_t ret = RMAP_PERMITMATCH;
1621 int family;
1622 char buf[SRCDEST2STR_BUFFER];
1623 const struct prefix *p, *src_p;
1624 struct zebra_vrf *zvrf;
1625
1626 srcdest_rnode_prefixes(rn, &p, &src_p);
1627
1628 if (rn->p.family == AF_INET)
1629 family = AFI_IP;
1630 else if (rn->p.family == AF_INET6)
1631 family = AFI_IP6;
1632 else
1633 family = 0;
1634 switch (nexthop->type) {
1635 case NEXTHOP_TYPE_IFINDEX:
1636 ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id);
1637 if (ifp && if_is_operative(ifp))
1638 SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
1639 else
1640 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
1641 break;
1642 case NEXTHOP_TYPE_IPV4:
1643 case NEXTHOP_TYPE_IPV4_IFINDEX:
1644 family = AFI_IP;
1645 if (nexthop_active(AFI_IP, re, nexthop, rn))
1646 SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
1647 else
1648 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
1649 break;
1650 case NEXTHOP_TYPE_IPV6:
1651 family = AFI_IP6;
1652 if (nexthop_active(AFI_IP6, re, nexthop, rn))
1653 SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
1654 else
1655 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
1656 break;
1657 case NEXTHOP_TYPE_IPV6_IFINDEX:
1658 /* RFC 5549, v4 prefix with v6 NH */
1659 if (rn->p.family != AF_INET)
1660 family = AFI_IP6;
1661 if (IN6_IS_ADDR_LINKLOCAL(&nexthop->gate.ipv6)) {
1662 ifp = if_lookup_by_index(nexthop->ifindex,
1663 nexthop->vrf_id);
1664 if (ifp && if_is_operative(ifp))
1665 SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
1666 else
1667 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
1668 } else {
1669 if (nexthop_active(AFI_IP6, re, nexthop, rn))
1670 SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
1671 else
1672 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
1673 }
1674 break;
1675 case NEXTHOP_TYPE_BLACKHOLE:
1676 SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
1677 break;
1678 default:
1679 break;
1680 }
1681 if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) {
1682 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
1683 zlog_debug("\t%s: Unable to find a active nexthop",
1684 __PRETTY_FUNCTION__);
1685 return 0;
1686 }
1687
1688 /* XXX: What exactly do those checks do? Do we support
1689 * e.g. IPv4 routes with IPv6 nexthops or vice versa?
1690 */
1691 if (RIB_SYSTEM_ROUTE(re) || (family == AFI_IP && p->family != AF_INET)
1692 || (family == AFI_IP6 && p->family != AF_INET6))
1693 return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
1694
1695 /* The original code didn't determine the family correctly
1696 * e.g. for NEXTHOP_TYPE_IFINDEX. Retrieve the correct afi
1697 * from the rib_table_info in those cases.
1698 * Possibly it may be better to use only the rib_table_info
1699 * in every case.
1700 */
1701 if (!family) {
1702 rib_table_info_t *info;
1703
1704 info = srcdest_rnode_table_info(rn);
1705 family = info->afi;
1706 }
1707
1708 memset(&nexthop->rmap_src.ipv6, 0, sizeof(union g_addr));
1709
1710 zvrf = zebra_vrf_lookup_by_id(nexthop->vrf_id);
1711 if (!zvrf) {
1712 if (IS_ZEBRA_DEBUG_RIB_DETAILED)
1713 zlog_debug("\t%s: zvrf is NULL", __PRETTY_FUNCTION__);
1714 return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
1715 }
1716
1717 /* It'll get set if required inside */
1718 ret = zebra_route_map_check(family, re->type, re->instance, p, nexthop,
1719 zvrf, re->tag);
1720 if (ret == RMAP_DENYMATCH) {
1721 if (IS_ZEBRA_DEBUG_RIB) {
1722 srcdest_rnode2str(rn, buf, sizeof(buf));
1723 zlog_debug(
1724 "%u:%s: Filtering out with NH out %s due to route map",
1725 re->vrf_id, buf,
1726 ifindex2ifname(nexthop->ifindex,
1727 nexthop->vrf_id));
1728 }
1729 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
1730 }
1731 return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
1732 }
1733
1734 /*
1735 * Iterate over all nexthops of the given RIB entry and refresh their
1736 * ACTIVE flag. If any nexthop is found to toggle the ACTIVE flag,
1737 * the whole re structure is flagged with ROUTE_ENTRY_CHANGED.
1738 *
1739 * Return value is the new number of active nexthops.
1740 */
1741 int nexthop_active_update(struct route_node *rn, struct route_entry *re)
1742 {
1743 struct nexthop_group new_grp = {};
1744 struct nexthop *nexthop;
1745 union g_addr prev_src;
1746 unsigned int prev_active, new_active;
1747 ifindex_t prev_index;
1748 uint8_t curr_active = 0;
1749
1750 afi_t rt_afi = family2afi(rn->p.family);
1751
1752 UNSET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
1753
1754 /* Copy over the nexthops in current state */
1755 nexthop_group_copy(&new_grp, re->nhe->nhg);
1756
1757 for (nexthop = new_grp.nexthop; nexthop; nexthop = nexthop->next) {
1758
1759 /* No protocol daemon provides src and so we're skipping
1760 * tracking it */
1761 prev_src = nexthop->rmap_src;
1762 prev_active = CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
1763 prev_index = nexthop->ifindex;
1764 /*
1765 * We need to respect the multipath_num here
1766 * as that what we should be able to install from
1767 * a multipath perpsective should not be a data plane
1768 * decision point.
1769 */
1770 new_active =
1771 nexthop_active_check(rn, re, nexthop);
1772
1773 if (new_active && curr_active >= zrouter.multipath_num) {
1774 struct nexthop *nh;
1775
1776 /* Set it and its resolved nexthop as inactive. */
1777 for (nh = nexthop; nh; nh = nh->resolved)
1778 UNSET_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE);
1779
1780 new_active = 0;
1781 }
1782
1783 if (new_active)
1784 curr_active++;
1785
1786 /* Don't allow src setting on IPv6 addr for now */
1787 if (prev_active != new_active || prev_index != nexthop->ifindex
1788 || ((nexthop->type >= NEXTHOP_TYPE_IFINDEX
1789 && nexthop->type < NEXTHOP_TYPE_IPV6)
1790 && prev_src.ipv4.s_addr
1791 != nexthop->rmap_src.ipv4.s_addr)
1792 || ((nexthop->type >= NEXTHOP_TYPE_IPV6
1793 && nexthop->type < NEXTHOP_TYPE_BLACKHOLE)
1794 && !(IPV6_ADDR_SAME(&prev_src.ipv6,
1795 &nexthop->rmap_src.ipv6)))
1796 || CHECK_FLAG(re->status, ROUTE_ENTRY_LABELS_CHANGED))
1797 SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
1798 }
1799
1800 if (CHECK_FLAG(re->status, ROUTE_ENTRY_CHANGED)) {
1801 struct nhg_hash_entry *new_nhe = NULL;
1802
1803 new_nhe = zebra_nhg_rib_find(0, &new_grp, rt_afi);
1804
1805 route_entry_update_nhe(re, new_nhe);
1806 }
1807
1808 if (curr_active) {
1809 struct nhg_hash_entry *nhe = NULL;
1810
1811 nhe = zebra_nhg_lookup_id(re->nhe_id);
1812
1813 if (nhe)
1814 SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
1815 else
1816 flog_err(
1817 EC_ZEBRA_TABLE_LOOKUP_FAILED,
1818 "Active update on NHE id=%u that we do not have in our tables",
1819 re->nhe_id);
1820 }
1821
1822 /*
1823 * Do not need these nexthops anymore since they
1824 * were either copied over into an nhe or not
1825 * used at all.
1826 */
1827 nexthops_free(new_grp.nexthop);
1828 return curr_active;
1829 }
1830
1831 /* Convert a nhe into a group array */
1832 uint8_t zebra_nhg_nhe2grp(struct nh_grp *grp, struct nhg_hash_entry *nhe,
1833 int max_num)
1834 {
1835 struct nhg_connected *rb_node_dep = NULL;
1836 struct nhg_hash_entry *depend = NULL;
1837 uint8_t i = 0;
1838
1839 frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) {
1840 bool duplicate = false;
1841
1842 depend = rb_node_dep->nhe;
1843
1844 /*
1845 * If its recursive, use its resolved nhe in the group
1846 */
1847 if (CHECK_FLAG(depend->flags, NEXTHOP_GROUP_RECURSIVE)) {
1848 depend = zebra_nhg_resolve(depend);
1849 if (!depend) {
1850 flog_err(
1851 EC_ZEBRA_NHG_FIB_UPDATE,
1852 "Failed to recursively resolve Nexthop Hash Entry in the group id=%u",
1853 nhe->id);
1854 continue;
1855 }
1856 }
1857
1858 /* Check for duplicate IDs, kernel doesn't like that */
1859 for (int j = 0; j < i; j++) {
1860 if (depend->id == grp[j].id)
1861 duplicate = true;
1862 }
1863
1864 if (!duplicate) {
1865 grp[i].id = depend->id;
1866 /* We aren't using weights for anything right now */
1867 grp[i].weight = depend->nhg->nexthop->weight;
1868 i++;
1869 }
1870
1871 if (i >= max_num)
1872 goto done;
1873 }
1874
1875 done:
1876 return i;
1877 }
1878
1879 void zebra_nhg_install_kernel(struct nhg_hash_entry *nhe)
1880 {
1881 struct nhg_connected *rb_node_dep = NULL;
1882
1883 /* Resolve it first */
1884 nhe = zebra_nhg_resolve(nhe);
1885
1886 /* Make sure all depends are installed/queued */
1887 frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) {
1888 zebra_nhg_install_kernel(rb_node_dep->nhe);
1889 }
1890
1891 if (!CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)
1892 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED)) {
1893 /* Change its type to us since we are installing it */
1894 nhe->type = ZEBRA_ROUTE_NHG;
1895
1896 int ret = dplane_nexthop_add(nhe);
1897
1898 switch (ret) {
1899 case ZEBRA_DPLANE_REQUEST_QUEUED:
1900 SET_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED);
1901 break;
1902 case ZEBRA_DPLANE_REQUEST_FAILURE:
1903 flog_err(
1904 EC_ZEBRA_DP_INSTALL_FAIL,
1905 "Failed to install Nexthop ID (%u) into the kernel",
1906 nhe->id);
1907 break;
1908 case ZEBRA_DPLANE_REQUEST_SUCCESS:
1909 SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
1910 zebra_nhg_handle_install(nhe);
1911 break;
1912 }
1913 }
1914 }
1915
1916 void zebra_nhg_uninstall_kernel(struct nhg_hash_entry *nhe)
1917 {
1918 if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)) {
1919 int ret = dplane_nexthop_delete(nhe);
1920
1921 switch (ret) {
1922 case ZEBRA_DPLANE_REQUEST_QUEUED:
1923 SET_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED);
1924 break;
1925 case ZEBRA_DPLANE_REQUEST_FAILURE:
1926 flog_err(
1927 EC_ZEBRA_DP_DELETE_FAIL,
1928 "Failed to uninstall Nexthop ID (%u) from the kernel",
1929 nhe->id);
1930 break;
1931 case ZEBRA_DPLANE_REQUEST_SUCCESS:
1932 UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
1933 break;
1934 }
1935 }
1936
1937 zebra_nhg_handle_uninstall(nhe);
1938 }
1939
1940 void zebra_nhg_dplane_result(struct zebra_dplane_ctx *ctx)
1941 {
1942 enum dplane_op_e op;
1943 enum zebra_dplane_result status;
1944 uint32_t id = 0;
1945 struct nhg_hash_entry *nhe = NULL;
1946
1947 op = dplane_ctx_get_op(ctx);
1948 status = dplane_ctx_get_status(ctx);
1949
1950 id = dplane_ctx_get_nhe_id(ctx);
1951
1952 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1953 zlog_debug(
1954 "Nexthop dplane ctx %p, op %s, nexthop ID (%u), result %s",
1955 ctx, dplane_op2str(op), id, dplane_res2str(status));
1956
1957 switch (op) {
1958 case DPLANE_OP_NH_DELETE:
1959 if (status != ZEBRA_DPLANE_REQUEST_SUCCESS)
1960 flog_err(
1961 EC_ZEBRA_DP_DELETE_FAIL,
1962 "Failed to uninstall Nexthop ID (%u) from the kernel",
1963 id);
1964 /* We already free'd the data, nothing to do */
1965 break;
1966 case DPLANE_OP_NH_INSTALL:
1967 case DPLANE_OP_NH_UPDATE:
1968 nhe = zebra_nhg_lookup_id(id);
1969
1970 if (!nhe) {
1971 flog_err(
1972 EC_ZEBRA_NHG_SYNC,
1973 "%s operation preformed on Nexthop ID (%u) in the kernel, that we no longer have in our table",
1974 dplane_op2str(op), id);
1975 break;
1976 }
1977
1978 UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED);
1979 if (status == ZEBRA_DPLANE_REQUEST_SUCCESS) {
1980 SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
1981 SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
1982 zebra_nhg_handle_install(nhe);
1983 } else
1984 flog_err(
1985 EC_ZEBRA_DP_INSTALL_FAIL,
1986 "Failed to install Nexthop ID (%u) into the kernel",
1987 nhe->id);
1988 break;
1989 case DPLANE_OP_ROUTE_INSTALL:
1990 case DPLANE_OP_ROUTE_UPDATE:
1991 case DPLANE_OP_ROUTE_DELETE:
1992 case DPLANE_OP_ROUTE_NOTIFY:
1993 case DPLANE_OP_LSP_INSTALL:
1994 case DPLANE_OP_LSP_UPDATE:
1995 case DPLANE_OP_LSP_DELETE:
1996 case DPLANE_OP_LSP_NOTIFY:
1997 case DPLANE_OP_PW_INSTALL:
1998 case DPLANE_OP_PW_UNINSTALL:
1999 case DPLANE_OP_SYS_ROUTE_ADD:
2000 case DPLANE_OP_SYS_ROUTE_DELETE:
2001 case DPLANE_OP_ADDR_INSTALL:
2002 case DPLANE_OP_ADDR_UNINSTALL:
2003 case DPLANE_OP_MAC_INSTALL:
2004 case DPLANE_OP_MAC_DELETE:
2005 case DPLANE_OP_NEIGH_INSTALL:
2006 case DPLANE_OP_NEIGH_UPDATE:
2007 case DPLANE_OP_NEIGH_DELETE:
2008 case DPLANE_OP_VTEP_ADD:
2009 case DPLANE_OP_VTEP_DELETE:
2010 case DPLANE_OP_NONE:
2011 break;
2012 }
2013
2014 dplane_ctx_fini(&ctx);
2015 }
2016
2017 static void zebra_nhg_sweep_entry(struct hash_bucket *bucket, void *arg)
2018 {
2019 struct nhg_hash_entry *nhe = NULL;
2020
2021 nhe = (struct nhg_hash_entry *)bucket->data;
2022
2023 /* If its being ref'd, just let it be uninstalled via a route removal */
2024 if (ZEBRA_NHG_CREATED(nhe) && nhe->refcnt <= 0)
2025 zebra_nhg_uninstall_kernel(nhe);
2026 }
2027
2028 void zebra_nhg_sweep_table(struct hash *hash)
2029 {
2030 hash_iterate(hash, zebra_nhg_sweep_entry, NULL);
2031 }