]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-or-later | |
2 | /* Zebra Nexthop Group Code. | |
3 | * Copyright (C) 2019 Cumulus Networks, Inc. | |
4 | * Donald Sharp | |
5 | * Stephen Worley | |
6 | */ | |
7 | #include <zebra.h> | |
8 | ||
9 | #include "lib/nexthop.h" | |
10 | #include "lib/nexthop_group_private.h" | |
11 | #include "lib/routemap.h" | |
12 | #include "lib/mpls.h" | |
13 | #include "lib/jhash.h" | |
14 | #include "lib/debug.h" | |
15 | #include "lib/lib_errors.h" | |
16 | ||
17 | #include "zebra/connected.h" | |
18 | #include "zebra/debug.h" | |
19 | #include "zebra/zebra_router.h" | |
20 | #include "zebra/zebra_nhg_private.h" | |
21 | #include "zebra/zebra_rnh.h" | |
22 | #include "zebra/zebra_routemap.h" | |
23 | #include "zebra/zebra_srte.h" | |
24 | #include "zebra/zserv.h" | |
25 | #include "zebra/rt.h" | |
26 | #include "zebra_errors.h" | |
27 | #include "zebra_dplane.h" | |
28 | #include "zebra/interface.h" | |
29 | #include "zebra/zapi_msg.h" | |
30 | #include "zebra/rib.h" | |
31 | #include "zebra/zebra_vxlan.h" | |
32 | ||
33 | DEFINE_MTYPE_STATIC(ZEBRA, NHG, "Nexthop Group Entry"); | |
34 | DEFINE_MTYPE_STATIC(ZEBRA, NHG_CONNECTED, "Nexthop Group Connected"); | |
35 | DEFINE_MTYPE_STATIC(ZEBRA, NHG_CTX, "Nexthop Group Context"); | |
36 | ||
37 | /* Map backup nexthop indices between two nhes */ | |
38 | struct backup_nh_map_s { | |
39 | int map_count; | |
40 | ||
41 | struct { | |
42 | uint8_t orig_idx; | |
43 | uint8_t new_idx; | |
44 | } map[MULTIPATH_NUM]; | |
45 | }; | |
46 | ||
47 | /* id counter to keep in sync with kernel */ | |
48 | uint32_t id_counter; | |
49 | ||
50 | /* Controlled through ui */ | |
51 | static bool g_nexthops_enabled = true; | |
52 | static bool proto_nexthops_only; | |
53 | static bool use_recursive_backups = true; | |
54 | ||
55 | static struct nhg_hash_entry *depends_find(const struct nexthop *nh, afi_t afi, | |
56 | int type, bool from_dplane); | |
57 | static void depends_add(struct nhg_connected_tree_head *head, | |
58 | struct nhg_hash_entry *depend); | |
59 | static struct nhg_hash_entry * | |
60 | depends_find_add(struct nhg_connected_tree_head *head, struct nexthop *nh, | |
61 | afi_t afi, int type, bool from_dplane); | |
62 | static struct nhg_hash_entry * | |
63 | depends_find_id_add(struct nhg_connected_tree_head *head, uint32_t id); | |
64 | static void depends_decrement_free(struct nhg_connected_tree_head *head); | |
65 | ||
66 | static struct nhg_backup_info * | |
67 | nhg_backup_copy(const struct nhg_backup_info *orig); | |
68 | ||
69 | /* Helper function for getting the next allocatable ID */ | |
70 | static uint32_t nhg_get_next_id(void) | |
71 | { | |
72 | while (1) { | |
73 | id_counter++; | |
74 | ||
75 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
76 | zlog_debug("%s: ID %u checking", __func__, id_counter); | |
77 | ||
78 | if (id_counter == ZEBRA_NHG_PROTO_LOWER) { | |
79 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
80 | zlog_debug("%s: ID counter wrapped", __func__); | |
81 | ||
82 | id_counter = 0; | |
83 | continue; | |
84 | } | |
85 | ||
86 | if (zebra_nhg_lookup_id(id_counter)) { | |
87 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
88 | zlog_debug("%s: ID already exists", __func__); | |
89 | ||
90 | continue; | |
91 | } | |
92 | ||
93 | break; | |
94 | } | |
95 | ||
96 | return id_counter; | |
97 | } | |
98 | ||
99 | static void nhg_connected_free(struct nhg_connected *dep) | |
100 | { | |
101 | XFREE(MTYPE_NHG_CONNECTED, dep); | |
102 | } | |
103 | ||
104 | static struct nhg_connected *nhg_connected_new(struct nhg_hash_entry *nhe) | |
105 | { | |
106 | struct nhg_connected *new = NULL; | |
107 | ||
108 | new = XCALLOC(MTYPE_NHG_CONNECTED, sizeof(struct nhg_connected)); | |
109 | new->nhe = nhe; | |
110 | ||
111 | return new; | |
112 | } | |
113 | ||
114 | void nhg_connected_tree_free(struct nhg_connected_tree_head *head) | |
115 | { | |
116 | struct nhg_connected *rb_node_dep = NULL; | |
117 | ||
118 | if (!nhg_connected_tree_is_empty(head)) { | |
119 | frr_each_safe(nhg_connected_tree, head, rb_node_dep) { | |
120 | nhg_connected_tree_del(head, rb_node_dep); | |
121 | nhg_connected_free(rb_node_dep); | |
122 | } | |
123 | } | |
124 | } | |
125 | ||
126 | bool nhg_connected_tree_is_empty(const struct nhg_connected_tree_head *head) | |
127 | { | |
128 | return nhg_connected_tree_count(head) ? false : true; | |
129 | } | |
130 | ||
131 | struct nhg_connected * | |
132 | nhg_connected_tree_root(struct nhg_connected_tree_head *head) | |
133 | { | |
134 | return nhg_connected_tree_first(head); | |
135 | } | |
136 | ||
137 | struct nhg_hash_entry * | |
138 | nhg_connected_tree_del_nhe(struct nhg_connected_tree_head *head, | |
139 | struct nhg_hash_entry *depend) | |
140 | { | |
141 | struct nhg_connected lookup = {}; | |
142 | struct nhg_connected *remove = NULL; | |
143 | struct nhg_hash_entry *removed_nhe; | |
144 | ||
145 | lookup.nhe = depend; | |
146 | ||
147 | /* Lookup to find the element, then remove it */ | |
148 | remove = nhg_connected_tree_find(head, &lookup); | |
149 | if (remove) | |
150 | /* Re-returning here just in case this API changes.. | |
151 | * the _del list api's are a bit undefined at the moment. | |
152 | * | |
153 | * So hopefully returning here will make it fail if the api | |
154 | * changes to something different than currently expected. | |
155 | */ | |
156 | remove = nhg_connected_tree_del(head, remove); | |
157 | ||
158 | /* If the entry was sucessfully removed, free the 'connected` struct */ | |
159 | if (remove) { | |
160 | removed_nhe = remove->nhe; | |
161 | nhg_connected_free(remove); | |
162 | return removed_nhe; | |
163 | } | |
164 | ||
165 | return NULL; | |
166 | } | |
167 | ||
168 | /* Assuming UNIQUE RB tree. If this changes, assumptions here about | |
169 | * insertion need to change. | |
170 | */ | |
171 | struct nhg_hash_entry * | |
172 | nhg_connected_tree_add_nhe(struct nhg_connected_tree_head *head, | |
173 | struct nhg_hash_entry *depend) | |
174 | { | |
175 | struct nhg_connected *new = NULL; | |
176 | ||
177 | new = nhg_connected_new(depend); | |
178 | ||
179 | /* On success, NULL will be returned from the | |
180 | * RB code. | |
181 | */ | |
182 | if (new && (nhg_connected_tree_add(head, new) == NULL)) | |
183 | return NULL; | |
184 | ||
185 | /* If it wasn't successful, it must be a duplicate. We enforce the | |
186 | * unique property for the `nhg_connected` tree. | |
187 | */ | |
188 | nhg_connected_free(new); | |
189 | ||
190 | return depend; | |
191 | } | |
192 | ||
193 | static void | |
194 | nhg_connected_tree_decrement_ref(struct nhg_connected_tree_head *head) | |
195 | { | |
196 | struct nhg_connected *rb_node_dep = NULL; | |
197 | ||
198 | frr_each_safe(nhg_connected_tree, head, rb_node_dep) { | |
199 | zebra_nhg_decrement_ref(rb_node_dep->nhe); | |
200 | } | |
201 | } | |
202 | ||
203 | static void | |
204 | nhg_connected_tree_increment_ref(struct nhg_connected_tree_head *head) | |
205 | { | |
206 | struct nhg_connected *rb_node_dep = NULL; | |
207 | ||
208 | frr_each(nhg_connected_tree, head, rb_node_dep) { | |
209 | zebra_nhg_increment_ref(rb_node_dep->nhe); | |
210 | } | |
211 | } | |
212 | ||
213 | struct nhg_hash_entry *zebra_nhg_resolve(struct nhg_hash_entry *nhe) | |
214 | { | |
215 | if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_RECURSIVE) | |
216 | && !zebra_nhg_depends_is_empty(nhe)) { | |
217 | nhe = nhg_connected_tree_root(&nhe->nhg_depends)->nhe; | |
218 | return zebra_nhg_resolve(nhe); | |
219 | } | |
220 | ||
221 | return nhe; | |
222 | } | |
223 | ||
224 | unsigned int zebra_nhg_depends_count(const struct nhg_hash_entry *nhe) | |
225 | { | |
226 | return nhg_connected_tree_count(&nhe->nhg_depends); | |
227 | } | |
228 | ||
229 | bool zebra_nhg_depends_is_empty(const struct nhg_hash_entry *nhe) | |
230 | { | |
231 | return nhg_connected_tree_is_empty(&nhe->nhg_depends); | |
232 | } | |
233 | ||
234 | static void zebra_nhg_depends_del(struct nhg_hash_entry *from, | |
235 | struct nhg_hash_entry *depend) | |
236 | { | |
237 | nhg_connected_tree_del_nhe(&from->nhg_depends, depend); | |
238 | } | |
239 | ||
240 | static void zebra_nhg_depends_init(struct nhg_hash_entry *nhe) | |
241 | { | |
242 | nhg_connected_tree_init(&nhe->nhg_depends); | |
243 | } | |
244 | ||
245 | unsigned int zebra_nhg_dependents_count(const struct nhg_hash_entry *nhe) | |
246 | { | |
247 | return nhg_connected_tree_count(&nhe->nhg_dependents); | |
248 | } | |
249 | ||
250 | ||
251 | bool zebra_nhg_dependents_is_empty(const struct nhg_hash_entry *nhe) | |
252 | { | |
253 | return nhg_connected_tree_is_empty(&nhe->nhg_dependents); | |
254 | } | |
255 | ||
256 | static void zebra_nhg_dependents_del(struct nhg_hash_entry *from, | |
257 | struct nhg_hash_entry *dependent) | |
258 | { | |
259 | nhg_connected_tree_del_nhe(&from->nhg_dependents, dependent); | |
260 | } | |
261 | ||
262 | static void zebra_nhg_dependents_add(struct nhg_hash_entry *to, | |
263 | struct nhg_hash_entry *dependent) | |
264 | { | |
265 | nhg_connected_tree_add_nhe(&to->nhg_dependents, dependent); | |
266 | } | |
267 | ||
268 | static void zebra_nhg_dependents_init(struct nhg_hash_entry *nhe) | |
269 | { | |
270 | nhg_connected_tree_init(&nhe->nhg_dependents); | |
271 | } | |
272 | ||
273 | /* Release this nhe from anything depending on it */ | |
274 | static void zebra_nhg_dependents_release(struct nhg_hash_entry *nhe) | |
275 | { | |
276 | struct nhg_connected *rb_node_dep = NULL; | |
277 | ||
278 | frr_each_safe(nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep) { | |
279 | zebra_nhg_depends_del(rb_node_dep->nhe, nhe); | |
280 | /* recheck validity of the dependent */ | |
281 | zebra_nhg_check_valid(rb_node_dep->nhe); | |
282 | } | |
283 | } | |
284 | ||
285 | /* Release this nhe from anything that it depends on */ | |
286 | static void zebra_nhg_depends_release(struct nhg_hash_entry *nhe) | |
287 | { | |
288 | if (!zebra_nhg_depends_is_empty(nhe)) { | |
289 | struct nhg_connected *rb_node_dep = NULL; | |
290 | ||
291 | frr_each_safe(nhg_connected_tree, &nhe->nhg_depends, | |
292 | rb_node_dep) { | |
293 | zebra_nhg_dependents_del(rb_node_dep->nhe, nhe); | |
294 | } | |
295 | } | |
296 | } | |
297 | ||
298 | ||
299 | struct nhg_hash_entry *zebra_nhg_lookup_id(uint32_t id) | |
300 | { | |
301 | struct nhg_hash_entry lookup = {}; | |
302 | ||
303 | lookup.id = id; | |
304 | return hash_lookup(zrouter.nhgs_id, &lookup); | |
305 | } | |
306 | ||
307 | static int zebra_nhg_insert_id(struct nhg_hash_entry *nhe) | |
308 | { | |
309 | if (hash_lookup(zrouter.nhgs_id, nhe)) { | |
310 | flog_err( | |
311 | EC_ZEBRA_NHG_TABLE_INSERT_FAILED, | |
312 | "Failed inserting NHG %pNG into the ID hash table, entry already exists", | |
313 | nhe); | |
314 | return -1; | |
315 | } | |
316 | ||
317 | (void)hash_get(zrouter.nhgs_id, nhe, hash_alloc_intern); | |
318 | ||
319 | return 0; | |
320 | } | |
321 | ||
322 | static void zebra_nhg_set_if(struct nhg_hash_entry *nhe, struct interface *ifp) | |
323 | { | |
324 | nhe->ifp = ifp; | |
325 | if_nhg_dependents_add(ifp, nhe); | |
326 | } | |
327 | ||
328 | static void | |
329 | zebra_nhg_connect_depends(struct nhg_hash_entry *nhe, | |
330 | struct nhg_connected_tree_head *nhg_depends) | |
331 | { | |
332 | struct nhg_connected *rb_node_dep = NULL; | |
333 | ||
334 | /* This has been allocated higher above in the stack. Could probably | |
335 | * re-allocate and free the old stuff but just using the same memory | |
336 | * for now. Otherwise, their might be a time trade-off for repeated | |
337 | * alloc/frees as startup. | |
338 | */ | |
339 | nhe->nhg_depends = *nhg_depends; | |
340 | ||
341 | /* Attach backpointer to anything that it depends on */ | |
342 | zebra_nhg_dependents_init(nhe); | |
343 | if (!zebra_nhg_depends_is_empty(nhe)) { | |
344 | frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) { | |
345 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
346 | zlog_debug("%s: nhe %p (%pNG), dep %p (%pNG)", | |
347 | __func__, nhe, nhe, rb_node_dep->nhe, | |
348 | rb_node_dep->nhe); | |
349 | ||
350 | zebra_nhg_dependents_add(rb_node_dep->nhe, nhe); | |
351 | } | |
352 | } | |
353 | } | |
354 | ||
355 | /* Init an nhe, for use in a hash lookup for example */ | |
356 | void zebra_nhe_init(struct nhg_hash_entry *nhe, afi_t afi, | |
357 | const struct nexthop *nh) | |
358 | { | |
359 | memset(nhe, 0, sizeof(struct nhg_hash_entry)); | |
360 | nhe->vrf_id = VRF_DEFAULT; | |
361 | nhe->type = ZEBRA_ROUTE_NHG; | |
362 | nhe->afi = AFI_UNSPEC; | |
363 | ||
364 | /* There are some special rules that apply to groups representing | |
365 | * a single nexthop. | |
366 | */ | |
367 | if (nh && (nh->next == NULL)) { | |
368 | switch (nh->type) { | |
369 | case NEXTHOP_TYPE_IFINDEX: | |
370 | case NEXTHOP_TYPE_BLACKHOLE: | |
371 | /* | |
372 | * This switch case handles setting the afi different | |
373 | * for ipv4/v6 routes. Ifindex/blackhole nexthop | |
374 | * objects cannot be ambiguous, they must be Address | |
375 | * Family specific. If we get here, we will either use | |
376 | * the AF of the route, or the one we got passed from | |
377 | * here from the kernel. | |
378 | */ | |
379 | nhe->afi = afi; | |
380 | break; | |
381 | case NEXTHOP_TYPE_IPV4_IFINDEX: | |
382 | case NEXTHOP_TYPE_IPV4: | |
383 | nhe->afi = AFI_IP; | |
384 | break; | |
385 | case NEXTHOP_TYPE_IPV6_IFINDEX: | |
386 | case NEXTHOP_TYPE_IPV6: | |
387 | nhe->afi = AFI_IP6; | |
388 | break; | |
389 | } | |
390 | } | |
391 | } | |
392 | ||
393 | struct nhg_hash_entry *zebra_nhg_alloc(void) | |
394 | { | |
395 | struct nhg_hash_entry *nhe; | |
396 | ||
397 | nhe = XCALLOC(MTYPE_NHG, sizeof(struct nhg_hash_entry)); | |
398 | ||
399 | return nhe; | |
400 | } | |
401 | ||
402 | /* | |
403 | * Allocate new nhe and make shallow copy of 'orig'; no | |
404 | * recursive info is copied. | |
405 | */ | |
406 | struct nhg_hash_entry *zebra_nhe_copy(const struct nhg_hash_entry *orig, | |
407 | uint32_t id) | |
408 | { | |
409 | struct nhg_hash_entry *nhe; | |
410 | ||
411 | nhe = zebra_nhg_alloc(); | |
412 | ||
413 | nhe->id = id; | |
414 | ||
415 | nexthop_group_copy(&(nhe->nhg), &(orig->nhg)); | |
416 | ||
417 | nhe->vrf_id = orig->vrf_id; | |
418 | nhe->afi = orig->afi; | |
419 | nhe->type = orig->type ? orig->type : ZEBRA_ROUTE_NHG; | |
420 | nhe->refcnt = 0; | |
421 | nhe->dplane_ref = zebra_router_get_next_sequence(); | |
422 | ||
423 | /* Copy backup info also, if present */ | |
424 | if (orig->backup_info) | |
425 | nhe->backup_info = nhg_backup_copy(orig->backup_info); | |
426 | ||
427 | return nhe; | |
428 | } | |
429 | ||
430 | /* Allocation via hash handler */ | |
431 | static void *zebra_nhg_hash_alloc(void *arg) | |
432 | { | |
433 | struct nhg_hash_entry *nhe = NULL; | |
434 | struct nhg_hash_entry *copy = arg; | |
435 | ||
436 | nhe = zebra_nhe_copy(copy, copy->id); | |
437 | ||
438 | /* Mark duplicate nexthops in a group at creation time. */ | |
439 | nexthop_group_mark_duplicates(&(nhe->nhg)); | |
440 | ||
441 | /* | |
442 | * Add the ifp now if it's not a group or recursive and has ifindex. | |
443 | * | |
444 | * A proto-owned ID is always a group. | |
445 | */ | |
446 | if (!PROTO_OWNED(nhe) && nhe->nhg.nexthop && !nhe->nhg.nexthop->next | |
447 | && !nhe->nhg.nexthop->resolved && nhe->nhg.nexthop->ifindex) { | |
448 | struct interface *ifp = NULL; | |
449 | ||
450 | ifp = if_lookup_by_index(nhe->nhg.nexthop->ifindex, | |
451 | nhe->nhg.nexthop->vrf_id); | |
452 | if (ifp) | |
453 | zebra_nhg_set_if(nhe, ifp); | |
454 | else { | |
455 | if (IS_ZEBRA_DEBUG_NHG) | |
456 | zlog_debug( | |
457 | "Failed to lookup an interface with ifindex=%d in vrf=%u for NHE %pNG", | |
458 | nhe->nhg.nexthop->ifindex, | |
459 | nhe->nhg.nexthop->vrf_id, nhe); | |
460 | } | |
461 | } | |
462 | ||
463 | return nhe; | |
464 | } | |
465 | ||
466 | uint32_t zebra_nhg_hash_key(const void *arg) | |
467 | { | |
468 | const struct nhg_hash_entry *nhe = arg; | |
469 | uint32_t key = 0x5a351234; | |
470 | uint32_t primary = 0; | |
471 | uint32_t backup = 0; | |
472 | ||
473 | primary = nexthop_group_hash(&(nhe->nhg)); | |
474 | if (nhe->backup_info) | |
475 | backup = nexthop_group_hash(&(nhe->backup_info->nhe->nhg)); | |
476 | ||
477 | key = jhash_3words(primary, backup, nhe->type, key); | |
478 | ||
479 | key = jhash_2words(nhe->vrf_id, nhe->afi, key); | |
480 | ||
481 | return key; | |
482 | } | |
483 | ||
484 | uint32_t zebra_nhg_id_key(const void *arg) | |
485 | { | |
486 | const struct nhg_hash_entry *nhe = arg; | |
487 | ||
488 | return nhe->id; | |
489 | } | |
490 | ||
491 | /* Helper with common nhg/nhe nexthop comparison logic */ | |
492 | static bool nhg_compare_nexthops(const struct nexthop *nh1, | |
493 | const struct nexthop *nh2) | |
494 | { | |
495 | assert(nh1 != NULL && nh2 != NULL); | |
496 | ||
497 | /* | |
498 | * We have to check the active flag of each individual one, | |
499 | * not just the overall active_num. This solves the special case | |
500 | * issue of a route with a nexthop group with one nexthop | |
501 | * resolving to itself and thus marking it inactive. If we | |
502 | * have two different routes each wanting to mark a different | |
503 | * nexthop inactive, they need to hash to two different groups. | |
504 | * | |
505 | * If we just hashed on num_active, they would hash the same | |
506 | * which is incorrect. | |
507 | * | |
508 | * ex) | |
509 | * 1.1.1.0/24 | |
510 | * -> 1.1.1.1 dummy1 (inactive) | |
511 | * -> 1.1.2.1 dummy2 | |
512 | * | |
513 | * 1.1.2.0/24 | |
514 | * -> 1.1.1.1 dummy1 | |
515 | * -> 1.1.2.1 dummy2 (inactive) | |
516 | * | |
517 | * Without checking each individual one, they would hash to | |
518 | * the same group and both have 1.1.1.1 dummy1 marked inactive. | |
519 | * | |
520 | */ | |
521 | if (CHECK_FLAG(nh1->flags, NEXTHOP_FLAG_ACTIVE) | |
522 | != CHECK_FLAG(nh2->flags, NEXTHOP_FLAG_ACTIVE)) | |
523 | return false; | |
524 | ||
525 | if (!nexthop_same(nh1, nh2)) | |
526 | return false; | |
527 | ||
528 | return true; | |
529 | } | |
530 | ||
531 | bool zebra_nhg_hash_equal(const void *arg1, const void *arg2) | |
532 | { | |
533 | const struct nhg_hash_entry *nhe1 = arg1; | |
534 | const struct nhg_hash_entry *nhe2 = arg2; | |
535 | struct nexthop *nexthop1; | |
536 | struct nexthop *nexthop2; | |
537 | ||
538 | /* No matter what if they equal IDs, assume equal */ | |
539 | if (nhe1->id && nhe2->id && (nhe1->id == nhe2->id)) | |
540 | return true; | |
541 | ||
542 | if (nhe1->type != nhe2->type) | |
543 | return false; | |
544 | ||
545 | if (nhe1->vrf_id != nhe2->vrf_id) | |
546 | return false; | |
547 | ||
548 | if (nhe1->afi != nhe2->afi) | |
549 | return false; | |
550 | ||
551 | if (nhe1->nhg.nhgr.buckets != nhe2->nhg.nhgr.buckets) | |
552 | return false; | |
553 | ||
554 | if (nhe1->nhg.nhgr.idle_timer != nhe2->nhg.nhgr.idle_timer) | |
555 | return false; | |
556 | ||
557 | if (nhe1->nhg.nhgr.unbalanced_timer != nhe2->nhg.nhgr.unbalanced_timer) | |
558 | return false; | |
559 | ||
560 | /* Nexthops should be in-order, so we simply compare them in-place */ | |
561 | for (nexthop1 = nhe1->nhg.nexthop, nexthop2 = nhe2->nhg.nexthop; | |
562 | nexthop1 && nexthop2; | |
563 | nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) { | |
564 | ||
565 | if (!nhg_compare_nexthops(nexthop1, nexthop2)) | |
566 | return false; | |
567 | } | |
568 | ||
569 | /* Check for unequal list lengths */ | |
570 | if (nexthop1 || nexthop2) | |
571 | return false; | |
572 | ||
573 | /* If there's no backup info, comparison is done. */ | |
574 | if ((nhe1->backup_info == NULL) && (nhe2->backup_info == NULL)) | |
575 | return true; | |
576 | ||
577 | /* Compare backup info also - test the easy things first */ | |
578 | if (nhe1->backup_info && (nhe2->backup_info == NULL)) | |
579 | return false; | |
580 | if (nhe2->backup_info && (nhe1->backup_info == NULL)) | |
581 | return false; | |
582 | ||
583 | /* Compare number of backups before actually comparing any */ | |
584 | for (nexthop1 = nhe1->backup_info->nhe->nhg.nexthop, | |
585 | nexthop2 = nhe2->backup_info->nhe->nhg.nexthop; | |
586 | nexthop1 && nexthop2; | |
587 | nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) { | |
588 | ; | |
589 | } | |
590 | ||
591 | /* Did we find the end of one list before the other? */ | |
592 | if (nexthop1 || nexthop2) | |
593 | return false; | |
594 | ||
595 | /* Have to compare the backup nexthops */ | |
596 | for (nexthop1 = nhe1->backup_info->nhe->nhg.nexthop, | |
597 | nexthop2 = nhe2->backup_info->nhe->nhg.nexthop; | |
598 | nexthop1 && nexthop2; | |
599 | nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) { | |
600 | ||
601 | if (!nhg_compare_nexthops(nexthop1, nexthop2)) | |
602 | return false; | |
603 | } | |
604 | ||
605 | return true; | |
606 | } | |
607 | ||
608 | bool zebra_nhg_hash_id_equal(const void *arg1, const void *arg2) | |
609 | { | |
610 | const struct nhg_hash_entry *nhe1 = arg1; | |
611 | const struct nhg_hash_entry *nhe2 = arg2; | |
612 | ||
613 | return nhe1->id == nhe2->id; | |
614 | } | |
615 | ||
616 | static int zebra_nhg_process_grp(struct nexthop_group *nhg, | |
617 | struct nhg_connected_tree_head *depends, | |
618 | struct nh_grp *grp, uint8_t count, | |
619 | struct nhg_resilience *resilience) | |
620 | { | |
621 | nhg_connected_tree_init(depends); | |
622 | ||
623 | for (int i = 0; i < count; i++) { | |
624 | struct nhg_hash_entry *depend = NULL; | |
625 | /* We do not care about nexthop_grp.weight at | |
626 | * this time. But we should figure out | |
627 | * how to adapt this to our code in | |
628 | * the future. | |
629 | */ | |
630 | depend = depends_find_id_add(depends, grp[i].id); | |
631 | ||
632 | if (!depend) { | |
633 | flog_err( | |
634 | EC_ZEBRA_NHG_SYNC, | |
635 | "Received Nexthop Group from the kernel with a dependent Nexthop ID (%u) which we do not have in our table", | |
636 | grp[i].id); | |
637 | return -1; | |
638 | } | |
639 | ||
640 | /* | |
641 | * If this is a nexthop with its own group | |
642 | * dependencies, add them as well. Not sure its | |
643 | * even possible to have a group within a group | |
644 | * in the kernel. | |
645 | */ | |
646 | ||
647 | copy_nexthops(&nhg->nexthop, depend->nhg.nexthop, NULL); | |
648 | } | |
649 | ||
650 | if (resilience) | |
651 | nhg->nhgr = *resilience; | |
652 | ||
653 | return 0; | |
654 | } | |
655 | ||
656 | static void handle_recursive_depend(struct nhg_connected_tree_head *nhg_depends, | |
657 | struct nexthop *nh, afi_t afi, int type) | |
658 | { | |
659 | struct nhg_hash_entry *depend = NULL; | |
660 | struct nexthop_group resolved_ng = {}; | |
661 | ||
662 | resolved_ng.nexthop = nh; | |
663 | ||
664 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
665 | zlog_debug("%s: head %p, nh %pNHv", | |
666 | __func__, nhg_depends, nh); | |
667 | ||
668 | depend = zebra_nhg_rib_find(0, &resolved_ng, afi, type); | |
669 | ||
670 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
671 | zlog_debug("%s: nh %pNHv => %p (%u)", | |
672 | __func__, nh, depend, | |
673 | depend ? depend->id : 0); | |
674 | ||
675 | if (depend) | |
676 | depends_add(nhg_depends, depend); | |
677 | } | |
678 | ||
679 | /* | |
680 | * Lookup an nhe in the global hash, using data from another nhe. If 'lookup' | |
681 | * has an id value, that's used. Create a new global/shared nhe if not found. | |
682 | */ | |
683 | static bool zebra_nhe_find(struct nhg_hash_entry **nhe, /* return value */ | |
684 | struct nhg_hash_entry *lookup, | |
685 | struct nhg_connected_tree_head *nhg_depends, | |
686 | afi_t afi, bool from_dplane) | |
687 | { | |
688 | bool created = false; | |
689 | bool recursive = false; | |
690 | struct nhg_hash_entry *newnhe, *backup_nhe; | |
691 | struct nexthop *nh = NULL; | |
692 | ||
693 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
694 | zlog_debug( | |
695 | "%s: id %u, lookup %p, vrf %d, type %d, depends %p%s", | |
696 | __func__, lookup->id, lookup, lookup->vrf_id, | |
697 | lookup->type, nhg_depends, | |
698 | (from_dplane ? " (from dplane)" : "")); | |
699 | ||
700 | if (lookup->id) | |
701 | (*nhe) = zebra_nhg_lookup_id(lookup->id); | |
702 | else | |
703 | (*nhe) = hash_lookup(zrouter.nhgs, lookup); | |
704 | ||
705 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
706 | zlog_debug("%s: lookup => %p (%pNG)", __func__, *nhe, *nhe); | |
707 | ||
708 | /* If we found an existing object, we're done */ | |
709 | if (*nhe) | |
710 | goto done; | |
711 | ||
712 | /* We're going to create/insert a new nhe: | |
713 | * assign the next global id value if necessary. | |
714 | */ | |
715 | if (lookup->id == 0) | |
716 | lookup->id = nhg_get_next_id(); | |
717 | ||
718 | if (!from_dplane && lookup->id < ZEBRA_NHG_PROTO_LOWER) { | |
719 | /* | |
720 | * This is a zebra hashed/owned NHG. | |
721 | * | |
722 | * It goes in HASH and ID table. | |
723 | */ | |
724 | newnhe = hash_get(zrouter.nhgs, lookup, zebra_nhg_hash_alloc); | |
725 | zebra_nhg_insert_id(newnhe); | |
726 | } else { | |
727 | /* | |
728 | * This is upperproto owned NHG or one we read in from dataplane | |
729 | * and should not be hashed to. | |
730 | * | |
731 | * It goes in ID table. | |
732 | */ | |
733 | newnhe = | |
734 | hash_get(zrouter.nhgs_id, lookup, zebra_nhg_hash_alloc); | |
735 | } | |
736 | ||
737 | created = true; | |
738 | ||
739 | /* Mail back the new object */ | |
740 | *nhe = newnhe; | |
741 | ||
742 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
743 | zlog_debug("%s: => created %p (%pNG)", __func__, newnhe, | |
744 | newnhe); | |
745 | ||
746 | /* Only hash/lookup the depends if the first lookup | |
747 | * fails to find something. This should hopefully save a | |
748 | * lot of cycles for larger ecmp sizes. | |
749 | */ | |
750 | if (nhg_depends) { | |
751 | /* If you don't want to hash on each nexthop in the | |
752 | * nexthop group struct you can pass the depends | |
753 | * directly. Kernel-side we do this since it just looks | |
754 | * them up via IDs. | |
755 | */ | |
756 | zebra_nhg_connect_depends(newnhe, nhg_depends); | |
757 | goto done; | |
758 | } | |
759 | ||
760 | /* Prepare dependency relationships if this is not a | |
761 | * singleton nexthop. There are two cases: a single | |
762 | * recursive nexthop, where we need a relationship to the | |
763 | * resolving nexthop; or a group of nexthops, where we need | |
764 | * relationships with the corresponding singletons. | |
765 | */ | |
766 | zebra_nhg_depends_init(newnhe); | |
767 | ||
768 | nh = newnhe->nhg.nexthop; | |
769 | ||
770 | if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE)) | |
771 | SET_FLAG(newnhe->flags, NEXTHOP_GROUP_VALID); | |
772 | ||
773 | if (nh->next == NULL && newnhe->id < ZEBRA_NHG_PROTO_LOWER) { | |
774 | if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) { | |
775 | /* Single recursive nexthop */ | |
776 | handle_recursive_depend(&newnhe->nhg_depends, | |
777 | nh->resolved, afi, | |
778 | newnhe->type); | |
779 | recursive = true; | |
780 | } | |
781 | } else { | |
782 | /* Proto-owned are groups by default */ | |
783 | /* List of nexthops */ | |
784 | for (nh = newnhe->nhg.nexthop; nh; nh = nh->next) { | |
785 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
786 | zlog_debug("%s: depends NH %pNHv %s", | |
787 | __func__, nh, | |
788 | CHECK_FLAG(nh->flags, | |
789 | NEXTHOP_FLAG_RECURSIVE) ? | |
790 | "(R)" : ""); | |
791 | ||
792 | depends_find_add(&newnhe->nhg_depends, nh, afi, | |
793 | newnhe->type, from_dplane); | |
794 | } | |
795 | } | |
796 | ||
797 | if (recursive) | |
798 | SET_FLAG(newnhe->flags, NEXTHOP_GROUP_RECURSIVE); | |
799 | ||
800 | /* Attach dependent backpointers to singletons */ | |
801 | zebra_nhg_connect_depends(newnhe, &newnhe->nhg_depends); | |
802 | ||
803 | /** | |
804 | * Backup Nexthops | |
805 | */ | |
806 | ||
807 | if (zebra_nhg_get_backup_nhg(newnhe) == NULL || | |
808 | zebra_nhg_get_backup_nhg(newnhe)->nexthop == NULL) | |
809 | goto done; | |
810 | ||
811 | /* If there are backup nexthops, add them to the backup | |
812 | * depends tree. The rules here are a little different. | |
813 | */ | |
814 | recursive = false; | |
815 | backup_nhe = newnhe->backup_info->nhe; | |
816 | ||
817 | nh = backup_nhe->nhg.nexthop; | |
818 | ||
819 | /* Singleton recursive NH */ | |
820 | if (nh->next == NULL && | |
821 | CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) { | |
822 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
823 | zlog_debug("%s: backup depend NH %pNHv (R)", | |
824 | __func__, nh); | |
825 | ||
826 | /* Single recursive nexthop */ | |
827 | handle_recursive_depend(&backup_nhe->nhg_depends, nh->resolved, | |
828 | afi, backup_nhe->type); | |
829 | recursive = true; | |
830 | } else { | |
831 | /* One or more backup NHs */ | |
832 | for (; nh; nh = nh->next) { | |
833 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
834 | zlog_debug("%s: backup depend NH %pNHv %s", | |
835 | __func__, nh, | |
836 | CHECK_FLAG(nh->flags, | |
837 | NEXTHOP_FLAG_RECURSIVE) ? | |
838 | "(R)" : ""); | |
839 | ||
840 | depends_find_add(&backup_nhe->nhg_depends, nh, afi, | |
841 | backup_nhe->type, from_dplane); | |
842 | } | |
843 | } | |
844 | ||
845 | if (recursive) | |
846 | SET_FLAG(backup_nhe->flags, NEXTHOP_GROUP_RECURSIVE); | |
847 | ||
848 | done: | |
849 | /* Reset time since last update */ | |
850 | (*nhe)->uptime = monotime(NULL); | |
851 | ||
852 | return created; | |
853 | } | |
854 | ||
855 | /* | |
856 | * Lookup or create an nhe, based on an nhg or an nhe id. | |
857 | */ | |
858 | static bool zebra_nhg_find(struct nhg_hash_entry **nhe, uint32_t id, | |
859 | struct nexthop_group *nhg, | |
860 | struct nhg_connected_tree_head *nhg_depends, | |
861 | vrf_id_t vrf_id, afi_t afi, int type, | |
862 | bool from_dplane) | |
863 | { | |
864 | struct nhg_hash_entry lookup = {}; | |
865 | bool created = false; | |
866 | ||
867 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
868 | zlog_debug("%s: id %u, nhg %p, vrf %d, type %d, depends %p", | |
869 | __func__, id, nhg, vrf_id, type, | |
870 | nhg_depends); | |
871 | ||
872 | /* Use a temporary nhe and call into the superset/common code */ | |
873 | lookup.id = id; | |
874 | lookup.type = type ? type : ZEBRA_ROUTE_NHG; | |
875 | lookup.nhg = *nhg; | |
876 | ||
877 | lookup.vrf_id = vrf_id; | |
878 | if (nhg_depends || lookup.nhg.nexthop->next) { | |
879 | /* Groups can have all vrfs and AF's in them */ | |
880 | lookup.afi = AFI_UNSPEC; | |
881 | } else { | |
882 | switch (lookup.nhg.nexthop->type) { | |
883 | case (NEXTHOP_TYPE_IFINDEX): | |
884 | case (NEXTHOP_TYPE_BLACKHOLE): | |
885 | /* | |
886 | * This switch case handles setting the afi different | |
887 | * for ipv4/v6 routes. Ifindex/blackhole nexthop | |
888 | * objects cannot be ambiguous, they must be Address | |
889 | * Family specific. If we get here, we will either use | |
890 | * the AF of the route, or the one we got passed from | |
891 | * here from the kernel. | |
892 | */ | |
893 | lookup.afi = afi; | |
894 | break; | |
895 | case (NEXTHOP_TYPE_IPV4_IFINDEX): | |
896 | case (NEXTHOP_TYPE_IPV4): | |
897 | lookup.afi = AFI_IP; | |
898 | break; | |
899 | case (NEXTHOP_TYPE_IPV6_IFINDEX): | |
900 | case (NEXTHOP_TYPE_IPV6): | |
901 | lookup.afi = AFI_IP6; | |
902 | break; | |
903 | } | |
904 | } | |
905 | ||
906 | created = zebra_nhe_find(nhe, &lookup, nhg_depends, afi, from_dplane); | |
907 | ||
908 | return created; | |
909 | } | |
910 | ||
911 | /* Find/create a single nexthop */ | |
912 | static struct nhg_hash_entry *zebra_nhg_find_nexthop(uint32_t id, | |
913 | struct nexthop *nh, | |
914 | afi_t afi, int type, | |
915 | bool from_dplane) | |
916 | { | |
917 | struct nhg_hash_entry *nhe = NULL; | |
918 | struct nexthop_group nhg = {}; | |
919 | vrf_id_t vrf_id = !vrf_is_backend_netns() ? VRF_DEFAULT : nh->vrf_id; | |
920 | ||
921 | nexthop_group_add_sorted(&nhg, nh); | |
922 | ||
923 | zebra_nhg_find(&nhe, id, &nhg, NULL, vrf_id, afi, type, from_dplane); | |
924 | ||
925 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
926 | zlog_debug("%s: nh %pNHv => %p (%pNG)", __func__, nh, nhe, nhe); | |
927 | ||
928 | return nhe; | |
929 | } | |
930 | ||
931 | static uint32_t nhg_ctx_get_id(const struct nhg_ctx *ctx) | |
932 | { | |
933 | return ctx->id; | |
934 | } | |
935 | ||
936 | static void nhg_ctx_set_status(struct nhg_ctx *ctx, enum nhg_ctx_status status) | |
937 | { | |
938 | ctx->status = status; | |
939 | } | |
940 | ||
941 | static enum nhg_ctx_status nhg_ctx_get_status(const struct nhg_ctx *ctx) | |
942 | { | |
943 | return ctx->status; | |
944 | } | |
945 | ||
946 | static void nhg_ctx_set_op(struct nhg_ctx *ctx, enum nhg_ctx_op_e op) | |
947 | { | |
948 | ctx->op = op; | |
949 | } | |
950 | ||
951 | static enum nhg_ctx_op_e nhg_ctx_get_op(const struct nhg_ctx *ctx) | |
952 | { | |
953 | return ctx->op; | |
954 | } | |
955 | ||
956 | static vrf_id_t nhg_ctx_get_vrf_id(const struct nhg_ctx *ctx) | |
957 | { | |
958 | return ctx->vrf_id; | |
959 | } | |
960 | ||
961 | static int nhg_ctx_get_type(const struct nhg_ctx *ctx) | |
962 | { | |
963 | return ctx->type; | |
964 | } | |
965 | ||
966 | static int nhg_ctx_get_afi(const struct nhg_ctx *ctx) | |
967 | { | |
968 | return ctx->afi; | |
969 | } | |
970 | ||
971 | static struct nexthop *nhg_ctx_get_nh(struct nhg_ctx *ctx) | |
972 | { | |
973 | return &ctx->u.nh; | |
974 | } | |
975 | ||
976 | static uint8_t nhg_ctx_get_count(const struct nhg_ctx *ctx) | |
977 | { | |
978 | return ctx->count; | |
979 | } | |
980 | ||
981 | static struct nh_grp *nhg_ctx_get_grp(struct nhg_ctx *ctx) | |
982 | { | |
983 | return ctx->u.grp; | |
984 | } | |
985 | ||
986 | static struct nhg_resilience *nhg_ctx_get_resilience(struct nhg_ctx *ctx) | |
987 | { | |
988 | return &ctx->resilience; | |
989 | } | |
990 | ||
991 | static struct nhg_ctx *nhg_ctx_new(void) | |
992 | { | |
993 | struct nhg_ctx *new; | |
994 | ||
995 | new = XCALLOC(MTYPE_NHG_CTX, sizeof(struct nhg_ctx)); | |
996 | ||
997 | return new; | |
998 | } | |
999 | ||
1000 | void nhg_ctx_free(struct nhg_ctx **ctx) | |
1001 | { | |
1002 | struct nexthop *nh; | |
1003 | ||
1004 | if (ctx == NULL) | |
1005 | return; | |
1006 | ||
1007 | assert((*ctx) != NULL); | |
1008 | ||
1009 | if (nhg_ctx_get_count(*ctx)) | |
1010 | goto done; | |
1011 | ||
1012 | nh = nhg_ctx_get_nh(*ctx); | |
1013 | ||
1014 | nexthop_del_labels(nh); | |
1015 | nexthop_del_srv6_seg6local(nh); | |
1016 | nexthop_del_srv6_seg6(nh); | |
1017 | ||
1018 | done: | |
1019 | XFREE(MTYPE_NHG_CTX, *ctx); | |
1020 | } | |
1021 | ||
1022 | static struct nhg_ctx *nhg_ctx_init(uint32_t id, struct nexthop *nh, | |
1023 | struct nh_grp *grp, vrf_id_t vrf_id, | |
1024 | afi_t afi, int type, uint8_t count, | |
1025 | struct nhg_resilience *resilience) | |
1026 | { | |
1027 | struct nhg_ctx *ctx = NULL; | |
1028 | ||
1029 | ctx = nhg_ctx_new(); | |
1030 | ||
1031 | ctx->id = id; | |
1032 | ctx->vrf_id = vrf_id; | |
1033 | ctx->afi = afi; | |
1034 | ctx->type = type; | |
1035 | ctx->count = count; | |
1036 | ||
1037 | if (resilience) | |
1038 | ctx->resilience = *resilience; | |
1039 | ||
1040 | if (count) | |
1041 | /* Copy over the array */ | |
1042 | memcpy(&ctx->u.grp, grp, count * sizeof(struct nh_grp)); | |
1043 | else if (nh) | |
1044 | ctx->u.nh = *nh; | |
1045 | ||
1046 | return ctx; | |
1047 | } | |
1048 | ||
1049 | static void zebra_nhg_set_valid(struct nhg_hash_entry *nhe) | |
1050 | { | |
1051 | struct nhg_connected *rb_node_dep; | |
1052 | ||
1053 | SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID); | |
1054 | ||
1055 | frr_each(nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep) | |
1056 | zebra_nhg_set_valid(rb_node_dep->nhe); | |
1057 | } | |
1058 | ||
1059 | static void zebra_nhg_set_invalid(struct nhg_hash_entry *nhe) | |
1060 | { | |
1061 | struct nhg_connected *rb_node_dep; | |
1062 | ||
1063 | UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID); | |
1064 | ||
1065 | /* If we're in shutdown, this interface event needs to clean | |
1066 | * up installed NHGs, so don't clear that flag directly. | |
1067 | */ | |
1068 | if (!zebra_router_in_shutdown()) | |
1069 | UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED); | |
1070 | ||
1071 | /* Update validity of nexthops depending on it */ | |
1072 | frr_each(nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep) | |
1073 | zebra_nhg_check_valid(rb_node_dep->nhe); | |
1074 | } | |
1075 | ||
1076 | void zebra_nhg_check_valid(struct nhg_hash_entry *nhe) | |
1077 | { | |
1078 | struct nhg_connected *rb_node_dep = NULL; | |
1079 | bool valid = false; | |
1080 | ||
1081 | /* If anthing else in the group is valid, the group is valid */ | |
1082 | frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) { | |
1083 | if (CHECK_FLAG(rb_node_dep->nhe->flags, NEXTHOP_GROUP_VALID)) { | |
1084 | valid = true; | |
1085 | goto done; | |
1086 | } | |
1087 | } | |
1088 | ||
1089 | done: | |
1090 | if (valid) | |
1091 | zebra_nhg_set_valid(nhe); | |
1092 | else | |
1093 | zebra_nhg_set_invalid(nhe); | |
1094 | } | |
1095 | ||
1096 | static void zebra_nhg_release_all_deps(struct nhg_hash_entry *nhe) | |
1097 | { | |
1098 | /* Remove it from any lists it may be on */ | |
1099 | zebra_nhg_depends_release(nhe); | |
1100 | zebra_nhg_dependents_release(nhe); | |
1101 | if (nhe->ifp) | |
1102 | if_nhg_dependents_del(nhe->ifp, nhe); | |
1103 | } | |
1104 | ||
1105 | static void zebra_nhg_release(struct nhg_hash_entry *nhe) | |
1106 | { | |
1107 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
1108 | zlog_debug("%s: nhe %p (%pNG)", __func__, nhe, nhe); | |
1109 | ||
1110 | zebra_nhg_release_all_deps(nhe); | |
1111 | ||
1112 | /* | |
1113 | * If its not zebra owned, we didn't store it here and have to be | |
1114 | * sure we don't clear one thats actually being used. | |
1115 | */ | |
1116 | if (nhe->id < ZEBRA_NHG_PROTO_LOWER) | |
1117 | hash_release(zrouter.nhgs, nhe); | |
1118 | ||
1119 | hash_release(zrouter.nhgs_id, nhe); | |
1120 | } | |
1121 | ||
1122 | static void zebra_nhg_handle_uninstall(struct nhg_hash_entry *nhe) | |
1123 | { | |
1124 | zebra_nhg_release(nhe); | |
1125 | zebra_nhg_free(nhe); | |
1126 | } | |
1127 | ||
1128 | static void zebra_nhg_handle_install(struct nhg_hash_entry *nhe, bool install) | |
1129 | { | |
1130 | /* Update validity of groups depending on it */ | |
1131 | struct nhg_connected *rb_node_dep; | |
1132 | ||
1133 | frr_each_safe (nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep) { | |
1134 | zebra_nhg_set_valid(rb_node_dep->nhe); | |
1135 | /* install dependent NHG into kernel */ | |
1136 | if (install) { | |
1137 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
1138 | zlog_debug( | |
1139 | "%s nh id %u (flags 0x%x) associated dependent NHG %pNG install", | |
1140 | __func__, nhe->id, nhe->flags, | |
1141 | rb_node_dep->nhe); | |
1142 | zebra_nhg_install_kernel(rb_node_dep->nhe); | |
1143 | } | |
1144 | } | |
1145 | } | |
1146 | ||
1147 | /* | |
1148 | * The kernel/other program has changed the state of a nexthop object we are | |
1149 | * using. | |
1150 | */ | |
1151 | static void zebra_nhg_handle_kernel_state_change(struct nhg_hash_entry *nhe, | |
1152 | bool is_delete) | |
1153 | { | |
1154 | if (nhe->refcnt) { | |
1155 | flog_err( | |
1156 | EC_ZEBRA_NHG_SYNC, | |
1157 | "Kernel %s a nexthop group with ID (%pNG) that we are still using for a route, sending it back down", | |
1158 | (is_delete ? "deleted" : "updated"), nhe); | |
1159 | ||
1160 | UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED); | |
1161 | zebra_nhg_install_kernel(nhe); | |
1162 | } else | |
1163 | zebra_nhg_handle_uninstall(nhe); | |
1164 | } | |
1165 | ||
1166 | static int nhg_ctx_process_new(struct nhg_ctx *ctx) | |
1167 | { | |
1168 | struct nexthop_group *nhg = NULL; | |
1169 | struct nhg_connected_tree_head nhg_depends = {}; | |
1170 | struct nhg_hash_entry *lookup = NULL; | |
1171 | struct nhg_hash_entry *nhe = NULL; | |
1172 | ||
1173 | uint32_t id = nhg_ctx_get_id(ctx); | |
1174 | uint8_t count = nhg_ctx_get_count(ctx); | |
1175 | vrf_id_t vrf_id = nhg_ctx_get_vrf_id(ctx); | |
1176 | int type = nhg_ctx_get_type(ctx); | |
1177 | afi_t afi = nhg_ctx_get_afi(ctx); | |
1178 | ||
1179 | lookup = zebra_nhg_lookup_id(id); | |
1180 | ||
1181 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
1182 | zlog_debug("%s: id %u, count %d, lookup => %p", | |
1183 | __func__, id, count, lookup); | |
1184 | ||
1185 | if (lookup) { | |
1186 | /* This is already present in our table, hence an update | |
1187 | * that we did not initate. | |
1188 | */ | |
1189 | zebra_nhg_handle_kernel_state_change(lookup, false); | |
1190 | return 0; | |
1191 | } | |
1192 | ||
1193 | if (nhg_ctx_get_count(ctx)) { | |
1194 | nhg = nexthop_group_new(); | |
1195 | if (zebra_nhg_process_grp(nhg, &nhg_depends, | |
1196 | nhg_ctx_get_grp(ctx), count, | |
1197 | nhg_ctx_get_resilience(ctx))) { | |
1198 | depends_decrement_free(&nhg_depends); | |
1199 | nexthop_group_delete(&nhg); | |
1200 | return -ENOENT; | |
1201 | } | |
1202 | ||
1203 | if (!zebra_nhg_find(&nhe, id, nhg, &nhg_depends, vrf_id, afi, | |
1204 | type, true)) | |
1205 | depends_decrement_free(&nhg_depends); | |
1206 | ||
1207 | /* These got copied over in zebra_nhg_alloc() */ | |
1208 | nexthop_group_delete(&nhg); | |
1209 | } else | |
1210 | nhe = zebra_nhg_find_nexthop(id, nhg_ctx_get_nh(ctx), afi, type, | |
1211 | true); | |
1212 | ||
1213 | if (!nhe) { | |
1214 | flog_err( | |
1215 | EC_ZEBRA_TABLE_LOOKUP_FAILED, | |
1216 | "Zebra failed to find or create a nexthop hash entry for ID (%u)", | |
1217 | id); | |
1218 | return -1; | |
1219 | } | |
1220 | ||
1221 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
1222 | zlog_debug("%s: nhe %p (%pNG) is new", __func__, nhe, nhe); | |
1223 | ||
1224 | /* | |
1225 | * If daemon nhg from the kernel, add a refcnt here to indicate the | |
1226 | * daemon owns it. | |
1227 | */ | |
1228 | if (PROTO_OWNED(nhe)) | |
1229 | zebra_nhg_increment_ref(nhe); | |
1230 | ||
1231 | SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID); | |
1232 | SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED); | |
1233 | ||
1234 | return 0; | |
1235 | } | |
1236 | ||
1237 | static int nhg_ctx_process_del(struct nhg_ctx *ctx) | |
1238 | { | |
1239 | struct nhg_hash_entry *nhe = NULL; | |
1240 | uint32_t id = nhg_ctx_get_id(ctx); | |
1241 | ||
1242 | nhe = zebra_nhg_lookup_id(id); | |
1243 | ||
1244 | if (!nhe) { | |
1245 | flog_warn( | |
1246 | EC_ZEBRA_BAD_NHG_MESSAGE, | |
1247 | "Kernel delete message received for nexthop group ID (%u) that we do not have in our ID table", | |
1248 | id); | |
1249 | return -1; | |
1250 | } | |
1251 | ||
1252 | zebra_nhg_handle_kernel_state_change(nhe, true); | |
1253 | ||
1254 | return 0; | |
1255 | } | |
1256 | ||
1257 | static void nhg_ctx_fini(struct nhg_ctx **ctx) | |
1258 | { | |
1259 | /* | |
1260 | * Just freeing for now, maybe do something more in the future | |
1261 | * based on flag. | |
1262 | */ | |
1263 | ||
1264 | nhg_ctx_free(ctx); | |
1265 | } | |
1266 | ||
1267 | static int queue_add(struct nhg_ctx *ctx) | |
1268 | { | |
1269 | /* If its queued or already processed do nothing */ | |
1270 | if (nhg_ctx_get_status(ctx) == NHG_CTX_QUEUED) | |
1271 | return 0; | |
1272 | ||
1273 | if (rib_queue_nhg_ctx_add(ctx)) { | |
1274 | nhg_ctx_set_status(ctx, NHG_CTX_FAILURE); | |
1275 | return -1; | |
1276 | } | |
1277 | ||
1278 | nhg_ctx_set_status(ctx, NHG_CTX_QUEUED); | |
1279 | ||
1280 | return 0; | |
1281 | } | |
1282 | ||
1283 | int nhg_ctx_process(struct nhg_ctx *ctx) | |
1284 | { | |
1285 | int ret = 0; | |
1286 | ||
1287 | switch (nhg_ctx_get_op(ctx)) { | |
1288 | case NHG_CTX_OP_NEW: | |
1289 | ret = nhg_ctx_process_new(ctx); | |
1290 | if (nhg_ctx_get_count(ctx) && ret == -ENOENT | |
1291 | && nhg_ctx_get_status(ctx) != NHG_CTX_REQUEUED) { | |
1292 | /** | |
1293 | * We have entered a situation where we are | |
1294 | * processing a group from the kernel | |
1295 | * that has a contained nexthop which | |
1296 | * we have not yet processed. | |
1297 | * | |
1298 | * Re-enqueue this ctx to be handled exactly one | |
1299 | * more time (indicated by the flag). | |
1300 | * | |
1301 | * By the time we get back to it, we | |
1302 | * should have processed its depends. | |
1303 | */ | |
1304 | nhg_ctx_set_status(ctx, NHG_CTX_NONE); | |
1305 | if (queue_add(ctx) == 0) { | |
1306 | nhg_ctx_set_status(ctx, NHG_CTX_REQUEUED); | |
1307 | return 0; | |
1308 | } | |
1309 | } | |
1310 | break; | |
1311 | case NHG_CTX_OP_DEL: | |
1312 | ret = nhg_ctx_process_del(ctx); | |
1313 | case NHG_CTX_OP_NONE: | |
1314 | break; | |
1315 | } | |
1316 | ||
1317 | nhg_ctx_set_status(ctx, (ret ? NHG_CTX_FAILURE : NHG_CTX_SUCCESS)); | |
1318 | ||
1319 | nhg_ctx_fini(&ctx); | |
1320 | ||
1321 | return ret; | |
1322 | } | |
1323 | ||
1324 | /* Kernel-side, you either get a single new nexthop or a array of ID's */ | |
1325 | int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, struct nh_grp *grp, | |
1326 | uint8_t count, vrf_id_t vrf_id, afi_t afi, int type, | |
1327 | int startup, struct nhg_resilience *nhgr) | |
1328 | { | |
1329 | struct nhg_ctx *ctx = NULL; | |
1330 | ||
1331 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
1332 | zlog_debug("%s: nh %pNHv, id %u, count %d", | |
1333 | __func__, nh, id, (int)count); | |
1334 | ||
1335 | if (id > id_counter && id < ZEBRA_NHG_PROTO_LOWER) | |
1336 | /* Increase our counter so we don't try to create | |
1337 | * an ID that already exists | |
1338 | */ | |
1339 | id_counter = id; | |
1340 | ||
1341 | ctx = nhg_ctx_init(id, nh, grp, vrf_id, afi, type, count, nhgr); | |
1342 | nhg_ctx_set_op(ctx, NHG_CTX_OP_NEW); | |
1343 | ||
1344 | /* Under statup conditions, we need to handle them immediately | |
1345 | * like we do for routes. Otherwise, we are going to get a route | |
1346 | * with a nhe_id that we have not handled. | |
1347 | */ | |
1348 | if (startup) | |
1349 | return nhg_ctx_process(ctx); | |
1350 | ||
1351 | if (queue_add(ctx)) { | |
1352 | nhg_ctx_fini(&ctx); | |
1353 | return -1; | |
1354 | } | |
1355 | ||
1356 | return 0; | |
1357 | } | |
1358 | ||
1359 | /* Kernel-side, received delete message */ | |
1360 | int zebra_nhg_kernel_del(uint32_t id, vrf_id_t vrf_id) | |
1361 | { | |
1362 | struct nhg_ctx *ctx = NULL; | |
1363 | ||
1364 | ctx = nhg_ctx_init(id, NULL, NULL, vrf_id, 0, 0, 0, NULL); | |
1365 | ||
1366 | nhg_ctx_set_op(ctx, NHG_CTX_OP_DEL); | |
1367 | ||
1368 | if (queue_add(ctx)) { | |
1369 | nhg_ctx_fini(&ctx); | |
1370 | return -1; | |
1371 | } | |
1372 | ||
1373 | return 0; | |
1374 | } | |
1375 | ||
1376 | /* Some dependency helper functions */ | |
1377 | static struct nhg_hash_entry *depends_find_recursive(const struct nexthop *nh, | |
1378 | afi_t afi, int type) | |
1379 | { | |
1380 | struct nhg_hash_entry *nhe; | |
1381 | struct nexthop *lookup = NULL; | |
1382 | ||
1383 | lookup = nexthop_dup(nh, NULL); | |
1384 | ||
1385 | nhe = zebra_nhg_find_nexthop(0, lookup, afi, type, false); | |
1386 | ||
1387 | nexthops_free(lookup); | |
1388 | ||
1389 | return nhe; | |
1390 | } | |
1391 | ||
1392 | static struct nhg_hash_entry *depends_find_singleton(const struct nexthop *nh, | |
1393 | afi_t afi, int type, | |
1394 | bool from_dplane) | |
1395 | { | |
1396 | struct nhg_hash_entry *nhe; | |
1397 | struct nexthop lookup = {}; | |
1398 | ||
1399 | /* Capture a snapshot of this single nh; it might be part of a list, | |
1400 | * so we need to make a standalone copy. | |
1401 | */ | |
1402 | nexthop_copy_no_recurse(&lookup, nh, NULL); | |
1403 | ||
1404 | nhe = zebra_nhg_find_nexthop(0, &lookup, afi, type, from_dplane); | |
1405 | ||
1406 | /* The copy may have allocated labels; free them if necessary. */ | |
1407 | nexthop_del_labels(&lookup); | |
1408 | nexthop_del_srv6_seg6local(&lookup); | |
1409 | nexthop_del_srv6_seg6(&lookup); | |
1410 | ||
1411 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
1412 | zlog_debug("%s: nh %pNHv => %p (%pNG)", __func__, nh, nhe, nhe); | |
1413 | ||
1414 | return nhe; | |
1415 | } | |
1416 | ||
1417 | static struct nhg_hash_entry *depends_find(const struct nexthop *nh, afi_t afi, | |
1418 | int type, bool from_dplane) | |
1419 | { | |
1420 | struct nhg_hash_entry *nhe = NULL; | |
1421 | ||
1422 | if (!nh) | |
1423 | goto done; | |
1424 | ||
1425 | /* We are separating these functions out to increase handling speed | |
1426 | * in the non-recursive case (by not alloc/freeing) | |
1427 | */ | |
1428 | if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) | |
1429 | nhe = depends_find_recursive(nh, afi, type); | |
1430 | else | |
1431 | nhe = depends_find_singleton(nh, afi, type, from_dplane); | |
1432 | ||
1433 | ||
1434 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) { | |
1435 | zlog_debug("%s: nh %pNHv %s => %p (%pNG)", __func__, nh, | |
1436 | CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE) ? "(R)" | |
1437 | : "", | |
1438 | nhe, nhe); | |
1439 | } | |
1440 | ||
1441 | done: | |
1442 | return nhe; | |
1443 | } | |
1444 | ||
1445 | static void depends_add(struct nhg_connected_tree_head *head, | |
1446 | struct nhg_hash_entry *depend) | |
1447 | { | |
1448 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
1449 | zlog_debug("%s: head %p nh %pNHv", | |
1450 | __func__, head, depend->nhg.nexthop); | |
1451 | ||
1452 | /* If NULL is returned, it was successfully added and | |
1453 | * needs to have its refcnt incremented. | |
1454 | * | |
1455 | * Else the NHE is already present in the tree and doesn't | |
1456 | * need to increment the refcnt. | |
1457 | */ | |
1458 | if (nhg_connected_tree_add_nhe(head, depend) == NULL) | |
1459 | zebra_nhg_increment_ref(depend); | |
1460 | } | |
1461 | ||
1462 | static struct nhg_hash_entry * | |
1463 | depends_find_add(struct nhg_connected_tree_head *head, struct nexthop *nh, | |
1464 | afi_t afi, int type, bool from_dplane) | |
1465 | { | |
1466 | struct nhg_hash_entry *depend = NULL; | |
1467 | ||
1468 | depend = depends_find(nh, afi, type, from_dplane); | |
1469 | ||
1470 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
1471 | zlog_debug("%s: nh %pNHv => %p", | |
1472 | __func__, nh, depend); | |
1473 | ||
1474 | if (depend) | |
1475 | depends_add(head, depend); | |
1476 | ||
1477 | return depend; | |
1478 | } | |
1479 | ||
1480 | static struct nhg_hash_entry * | |
1481 | depends_find_id_add(struct nhg_connected_tree_head *head, uint32_t id) | |
1482 | { | |
1483 | struct nhg_hash_entry *depend = NULL; | |
1484 | ||
1485 | depend = zebra_nhg_lookup_id(id); | |
1486 | ||
1487 | if (depend) | |
1488 | depends_add(head, depend); | |
1489 | ||
1490 | return depend; | |
1491 | } | |
1492 | ||
1493 | static void depends_decrement_free(struct nhg_connected_tree_head *head) | |
1494 | { | |
1495 | nhg_connected_tree_decrement_ref(head); | |
1496 | nhg_connected_tree_free(head); | |
1497 | } | |
1498 | ||
1499 | /* Find an nhe based on a list of nexthops */ | |
1500 | struct nhg_hash_entry *zebra_nhg_rib_find(uint32_t id, | |
1501 | struct nexthop_group *nhg, | |
1502 | afi_t rt_afi, int type) | |
1503 | { | |
1504 | struct nhg_hash_entry *nhe = NULL; | |
1505 | vrf_id_t vrf_id; | |
1506 | ||
1507 | /* | |
1508 | * CLANG SA is complaining that nexthop may be NULL | |
1509 | * Make it happy but this is ridonc | |
1510 | */ | |
1511 | assert(nhg->nexthop); | |
1512 | vrf_id = !vrf_is_backend_netns() ? VRF_DEFAULT : nhg->nexthop->vrf_id; | |
1513 | ||
1514 | zebra_nhg_find(&nhe, id, nhg, NULL, vrf_id, rt_afi, type, false); | |
1515 | ||
1516 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
1517 | zlog_debug("%s: => nhe %p (%pNG)", __func__, nhe, nhe); | |
1518 | ||
1519 | return nhe; | |
1520 | } | |
1521 | ||
1522 | /* Find an nhe based on a route's nhe */ | |
1523 | struct nhg_hash_entry * | |
1524 | zebra_nhg_rib_find_nhe(struct nhg_hash_entry *rt_nhe, afi_t rt_afi) | |
1525 | { | |
1526 | struct nhg_hash_entry *nhe = NULL; | |
1527 | ||
1528 | if (!(rt_nhe && rt_nhe->nhg.nexthop)) { | |
1529 | flog_err(EC_ZEBRA_TABLE_LOOKUP_FAILED, | |
1530 | "No nexthop passed to %s", __func__); | |
1531 | return NULL; | |
1532 | } | |
1533 | ||
1534 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
1535 | zlog_debug("%s: rt_nhe %p (%pNG)", __func__, rt_nhe, rt_nhe); | |
1536 | ||
1537 | zebra_nhe_find(&nhe, rt_nhe, NULL, rt_afi, false); | |
1538 | ||
1539 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
1540 | zlog_debug("%s: => nhe %p (%pNG)", __func__, nhe, nhe); | |
1541 | ||
1542 | return nhe; | |
1543 | } | |
1544 | ||
1545 | /* | |
1546 | * Allocate backup nexthop info object. Typically these are embedded in | |
1547 | * nhg_hash_entry objects. | |
1548 | */ | |
1549 | struct nhg_backup_info *zebra_nhg_backup_alloc(void) | |
1550 | { | |
1551 | struct nhg_backup_info *p; | |
1552 | ||
1553 | p = XCALLOC(MTYPE_NHG, sizeof(struct nhg_backup_info)); | |
1554 | ||
1555 | p->nhe = zebra_nhg_alloc(); | |
1556 | ||
1557 | /* Identify the embedded group used to hold the list of backups */ | |
1558 | SET_FLAG(p->nhe->flags, NEXTHOP_GROUP_BACKUP); | |
1559 | ||
1560 | return p; | |
1561 | } | |
1562 | ||
1563 | /* | |
1564 | * Free backup nexthop info object, deal with any embedded allocations | |
1565 | */ | |
1566 | void zebra_nhg_backup_free(struct nhg_backup_info **p) | |
1567 | { | |
1568 | if (p && *p) { | |
1569 | if ((*p)->nhe) | |
1570 | zebra_nhg_free((*p)->nhe); | |
1571 | ||
1572 | XFREE(MTYPE_NHG, (*p)); | |
1573 | } | |
1574 | } | |
1575 | ||
1576 | /* Accessor for backup nexthop group */ | |
1577 | struct nexthop_group *zebra_nhg_get_backup_nhg(struct nhg_hash_entry *nhe) | |
1578 | { | |
1579 | struct nexthop_group *p = NULL; | |
1580 | ||
1581 | if (nhe) { | |
1582 | if (nhe->backup_info && nhe->backup_info->nhe) | |
1583 | p = &(nhe->backup_info->nhe->nhg); | |
1584 | } | |
1585 | ||
1586 | return p; | |
1587 | } | |
1588 | ||
1589 | /* | |
1590 | * Helper to return a copy of a backup_info - note that this is a shallow | |
1591 | * copy, meant to be used when creating a new nhe from info passed in with | |
1592 | * a route e.g. | |
1593 | */ | |
1594 | static struct nhg_backup_info * | |
1595 | nhg_backup_copy(const struct nhg_backup_info *orig) | |
1596 | { | |
1597 | struct nhg_backup_info *b; | |
1598 | ||
1599 | b = zebra_nhg_backup_alloc(); | |
1600 | ||
1601 | /* Copy list of nexthops */ | |
1602 | nexthop_group_copy(&(b->nhe->nhg), &(orig->nhe->nhg)); | |
1603 | ||
1604 | return b; | |
1605 | } | |
1606 | ||
1607 | static void zebra_nhg_free_members(struct nhg_hash_entry *nhe) | |
1608 | { | |
1609 | nexthops_free(nhe->nhg.nexthop); | |
1610 | ||
1611 | zebra_nhg_backup_free(&nhe->backup_info); | |
1612 | ||
1613 | /* Decrement to remove connection ref */ | |
1614 | nhg_connected_tree_decrement_ref(&nhe->nhg_depends); | |
1615 | nhg_connected_tree_free(&nhe->nhg_depends); | |
1616 | nhg_connected_tree_free(&nhe->nhg_dependents); | |
1617 | } | |
1618 | ||
1619 | void zebra_nhg_free(struct nhg_hash_entry *nhe) | |
1620 | { | |
1621 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) { | |
1622 | /* Group or singleton? */ | |
1623 | if (nhe->nhg.nexthop && nhe->nhg.nexthop->next) | |
1624 | zlog_debug("%s: nhe %p (%pNG), refcnt %d", __func__, | |
1625 | nhe, nhe, nhe->refcnt); | |
1626 | else | |
1627 | zlog_debug("%s: nhe %p (%pNG), refcnt %d, NH %pNHv", | |
1628 | __func__, nhe, nhe, nhe->refcnt, | |
1629 | nhe->nhg.nexthop); | |
1630 | } | |
1631 | ||
1632 | EVENT_OFF(nhe->timer); | |
1633 | ||
1634 | zebra_nhg_free_members(nhe); | |
1635 | ||
1636 | XFREE(MTYPE_NHG, nhe); | |
1637 | } | |
1638 | ||
1639 | /* | |
1640 | * Let's just drop the memory associated with each item | |
1641 | */ | |
1642 | void zebra_nhg_hash_free(void *p) | |
1643 | { | |
1644 | struct nhg_hash_entry *nhe = p; | |
1645 | ||
1646 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) { | |
1647 | /* Group or singleton? */ | |
1648 | if (nhe->nhg.nexthop && nhe->nhg.nexthop->next) | |
1649 | zlog_debug("%s: nhe %p (%u), refcnt %d", __func__, nhe, | |
1650 | nhe->id, nhe->refcnt); | |
1651 | else | |
1652 | zlog_debug("%s: nhe %p (%pNG), refcnt %d, NH %pNHv", | |
1653 | __func__, nhe, nhe, nhe->refcnt, | |
1654 | nhe->nhg.nexthop); | |
1655 | } | |
1656 | ||
1657 | EVENT_OFF(nhe->timer); | |
1658 | ||
1659 | nexthops_free(nhe->nhg.nexthop); | |
1660 | ||
1661 | XFREE(MTYPE_NHG, nhe); | |
1662 | } | |
1663 | ||
1664 | /* | |
1665 | * On cleanup there are nexthop groups that have not | |
1666 | * been resolved at all( a nhe->id of 0 ). As such | |
1667 | * zebra needs to clean up the memory associated with | |
1668 | * those entries. | |
1669 | */ | |
1670 | void zebra_nhg_hash_free_zero_id(struct hash_bucket *b, void *arg) | |
1671 | { | |
1672 | struct nhg_hash_entry *nhe = b->data; | |
1673 | struct nhg_connected *dep; | |
1674 | ||
1675 | while ((dep = nhg_connected_tree_pop(&nhe->nhg_depends))) { | |
1676 | if (dep->nhe->id == 0) | |
1677 | zebra_nhg_hash_free(dep->nhe); | |
1678 | ||
1679 | nhg_connected_free(dep); | |
1680 | } | |
1681 | ||
1682 | while ((dep = nhg_connected_tree_pop(&nhe->nhg_dependents))) | |
1683 | nhg_connected_free(dep); | |
1684 | ||
1685 | if (nhe->backup_info && nhe->backup_info->nhe->id == 0) { | |
1686 | while ((dep = nhg_connected_tree_pop( | |
1687 | &nhe->backup_info->nhe->nhg_depends))) | |
1688 | nhg_connected_free(dep); | |
1689 | ||
1690 | zebra_nhg_hash_free(nhe->backup_info->nhe); | |
1691 | ||
1692 | XFREE(MTYPE_NHG, nhe->backup_info); | |
1693 | } | |
1694 | } | |
1695 | ||
1696 | static void zebra_nhg_timer(struct event *thread) | |
1697 | { | |
1698 | struct nhg_hash_entry *nhe = EVENT_ARG(thread); | |
1699 | ||
1700 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
1701 | zlog_debug("Nexthop Timer for nhe: %pNG", nhe); | |
1702 | ||
1703 | if (nhe->refcnt == 1) | |
1704 | zebra_nhg_decrement_ref(nhe); | |
1705 | } | |
1706 | ||
1707 | void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe) | |
1708 | { | |
1709 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
1710 | zlog_debug("%s: nhe %p (%pNG) %d => %d", __func__, nhe, nhe, | |
1711 | nhe->refcnt, nhe->refcnt - 1); | |
1712 | ||
1713 | nhe->refcnt--; | |
1714 | ||
1715 | if (!zebra_router_in_shutdown() && nhe->refcnt <= 0 && | |
1716 | CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED) && | |
1717 | !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND)) { | |
1718 | nhe->refcnt = 1; | |
1719 | SET_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND); | |
1720 | event_add_timer(zrouter.master, zebra_nhg_timer, nhe, | |
1721 | zrouter.nhg_keep, &nhe->timer); | |
1722 | return; | |
1723 | } | |
1724 | ||
1725 | if (!zebra_nhg_depends_is_empty(nhe)) | |
1726 | nhg_connected_tree_decrement_ref(&nhe->nhg_depends); | |
1727 | ||
1728 | if (ZEBRA_NHG_CREATED(nhe) && nhe->refcnt <= 0) | |
1729 | zebra_nhg_uninstall_kernel(nhe); | |
1730 | } | |
1731 | ||
1732 | void zebra_nhg_increment_ref(struct nhg_hash_entry *nhe) | |
1733 | { | |
1734 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
1735 | zlog_debug("%s: nhe %p (%pNG) %d => %d", __func__, nhe, nhe, | |
1736 | nhe->refcnt, nhe->refcnt + 1); | |
1737 | ||
1738 | nhe->refcnt++; | |
1739 | ||
1740 | if (event_is_scheduled(nhe->timer)) { | |
1741 | EVENT_OFF(nhe->timer); | |
1742 | nhe->refcnt--; | |
1743 | UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND); | |
1744 | } | |
1745 | ||
1746 | if (!zebra_nhg_depends_is_empty(nhe)) | |
1747 | nhg_connected_tree_increment_ref(&nhe->nhg_depends); | |
1748 | } | |
1749 | ||
1750 | static struct nexthop *nexthop_set_resolved(afi_t afi, | |
1751 | const struct nexthop *newhop, | |
1752 | struct nexthop *nexthop, | |
1753 | struct zebra_sr_policy *policy) | |
1754 | { | |
1755 | struct nexthop *resolved_hop; | |
1756 | uint8_t num_labels = 0; | |
1757 | mpls_label_t labels[MPLS_MAX_LABELS]; | |
1758 | enum lsp_types_t label_type = ZEBRA_LSP_NONE; | |
1759 | int i = 0; | |
1760 | ||
1761 | resolved_hop = nexthop_new(); | |
1762 | SET_FLAG(resolved_hop->flags, NEXTHOP_FLAG_ACTIVE); | |
1763 | ||
1764 | resolved_hop->vrf_id = nexthop->vrf_id; | |
1765 | switch (newhop->type) { | |
1766 | case NEXTHOP_TYPE_IPV4: | |
1767 | case NEXTHOP_TYPE_IPV4_IFINDEX: | |
1768 | /* If the resolving route specifies a gateway, use it */ | |
1769 | resolved_hop->type = newhop->type; | |
1770 | resolved_hop->gate.ipv4 = newhop->gate.ipv4; | |
1771 | ||
1772 | if (newhop->ifindex) { | |
1773 | resolved_hop->type = NEXTHOP_TYPE_IPV4_IFINDEX; | |
1774 | resolved_hop->ifindex = newhop->ifindex; | |
1775 | } | |
1776 | break; | |
1777 | case NEXTHOP_TYPE_IPV6: | |
1778 | case NEXTHOP_TYPE_IPV6_IFINDEX: | |
1779 | resolved_hop->type = newhop->type; | |
1780 | resolved_hop->gate.ipv6 = newhop->gate.ipv6; | |
1781 | ||
1782 | if (newhop->ifindex) { | |
1783 | resolved_hop->type = NEXTHOP_TYPE_IPV6_IFINDEX; | |
1784 | resolved_hop->ifindex = newhop->ifindex; | |
1785 | } | |
1786 | break; | |
1787 | case NEXTHOP_TYPE_IFINDEX: | |
1788 | /* If the resolving route is an interface route, | |
1789 | * it means the gateway we are looking up is connected | |
1790 | * to that interface. (The actual network is _not_ onlink). | |
1791 | * Therefore, the resolved route should have the original | |
1792 | * gateway as nexthop as it is directly connected. | |
1793 | * | |
1794 | * On Linux, we have to set the onlink netlink flag because | |
1795 | * otherwise, the kernel won't accept the route. | |
1796 | */ | |
1797 | resolved_hop->flags |= NEXTHOP_FLAG_ONLINK; | |
1798 | if (afi == AFI_IP) { | |
1799 | resolved_hop->type = NEXTHOP_TYPE_IPV4_IFINDEX; | |
1800 | resolved_hop->gate.ipv4 = nexthop->gate.ipv4; | |
1801 | } else if (afi == AFI_IP6) { | |
1802 | resolved_hop->type = NEXTHOP_TYPE_IPV6_IFINDEX; | |
1803 | resolved_hop->gate.ipv6 = nexthop->gate.ipv6; | |
1804 | } | |
1805 | resolved_hop->ifindex = newhop->ifindex; | |
1806 | break; | |
1807 | case NEXTHOP_TYPE_BLACKHOLE: | |
1808 | resolved_hop->type = NEXTHOP_TYPE_BLACKHOLE; | |
1809 | resolved_hop->bh_type = newhop->bh_type; | |
1810 | break; | |
1811 | } | |
1812 | ||
1813 | if (newhop->flags & NEXTHOP_FLAG_ONLINK) | |
1814 | resolved_hop->flags |= NEXTHOP_FLAG_ONLINK; | |
1815 | ||
1816 | /* Copy labels of the resolved route and the parent resolving to it */ | |
1817 | if (policy) { | |
1818 | int label_num = 0; | |
1819 | ||
1820 | /* | |
1821 | * Don't push the first SID if the corresponding action in the | |
1822 | * LFIB is POP. | |
1823 | */ | |
1824 | if (!newhop->nh_label || !newhop->nh_label->num_labels | |
1825 | || newhop->nh_label->label[0] == MPLS_LABEL_IMPLICIT_NULL) | |
1826 | label_num = 1; | |
1827 | ||
1828 | for (; label_num < policy->segment_list.label_num; label_num++) | |
1829 | labels[num_labels++] = | |
1830 | policy->segment_list.labels[label_num]; | |
1831 | label_type = policy->segment_list.type; | |
1832 | } else if (newhop->nh_label) { | |
1833 | for (i = 0; i < newhop->nh_label->num_labels; i++) { | |
1834 | /* Be a bit picky about overrunning the local array */ | |
1835 | if (num_labels >= MPLS_MAX_LABELS) { | |
1836 | if (IS_ZEBRA_DEBUG_NHG || IS_ZEBRA_DEBUG_RIB) | |
1837 | zlog_debug("%s: too many labels in newhop %pNHv", | |
1838 | __func__, newhop); | |
1839 | break; | |
1840 | } | |
1841 | labels[num_labels++] = newhop->nh_label->label[i]; | |
1842 | } | |
1843 | /* Use the "outer" type */ | |
1844 | label_type = newhop->nh_label_type; | |
1845 | } | |
1846 | ||
1847 | if (nexthop->nh_label) { | |
1848 | for (i = 0; i < nexthop->nh_label->num_labels; i++) { | |
1849 | /* Be a bit picky about overrunning the local array */ | |
1850 | if (num_labels >= MPLS_MAX_LABELS) { | |
1851 | if (IS_ZEBRA_DEBUG_NHG || IS_ZEBRA_DEBUG_RIB) | |
1852 | zlog_debug("%s: too many labels in nexthop %pNHv", | |
1853 | __func__, nexthop); | |
1854 | break; | |
1855 | } | |
1856 | labels[num_labels++] = nexthop->nh_label->label[i]; | |
1857 | } | |
1858 | ||
1859 | /* If the parent has labels, use its type if | |
1860 | * we don't already have one. | |
1861 | */ | |
1862 | if (label_type == ZEBRA_LSP_NONE) | |
1863 | label_type = nexthop->nh_label_type; | |
1864 | } | |
1865 | ||
1866 | if (num_labels) | |
1867 | nexthop_add_labels(resolved_hop, label_type, num_labels, | |
1868 | labels); | |
1869 | ||
1870 | if (nexthop->nh_srv6) { | |
1871 | nexthop_add_srv6_seg6local(resolved_hop, | |
1872 | nexthop->nh_srv6->seg6local_action, | |
1873 | &nexthop->nh_srv6->seg6local_ctx); | |
1874 | nexthop_add_srv6_seg6(resolved_hop, | |
1875 | &nexthop->nh_srv6->seg6_segs); | |
1876 | } | |
1877 | ||
1878 | resolved_hop->rparent = nexthop; | |
1879 | _nexthop_add(&nexthop->resolved, resolved_hop); | |
1880 | ||
1881 | return resolved_hop; | |
1882 | } | |
1883 | ||
1884 | /* Checks if nexthop we are trying to resolve to is valid */ | |
1885 | static bool nexthop_valid_resolve(const struct nexthop *nexthop, | |
1886 | const struct nexthop *resolved) | |
1887 | { | |
1888 | /* Can't resolve to a recursive nexthop */ | |
1889 | if (CHECK_FLAG(resolved->flags, NEXTHOP_FLAG_RECURSIVE)) | |
1890 | return false; | |
1891 | ||
1892 | /* Must be ACTIVE */ | |
1893 | if (!CHECK_FLAG(resolved->flags, NEXTHOP_FLAG_ACTIVE)) | |
1894 | return false; | |
1895 | ||
1896 | /* Must not be duplicate */ | |
1897 | if (CHECK_FLAG(resolved->flags, NEXTHOP_FLAG_DUPLICATE)) | |
1898 | return false; | |
1899 | ||
1900 | switch (nexthop->type) { | |
1901 | case NEXTHOP_TYPE_IPV4_IFINDEX: | |
1902 | case NEXTHOP_TYPE_IPV6_IFINDEX: | |
1903 | /* If the nexthop we are resolving to does not match the | |
1904 | * ifindex for the nexthop the route wanted, its not valid. | |
1905 | */ | |
1906 | if (nexthop->ifindex != resolved->ifindex) | |
1907 | return false; | |
1908 | break; | |
1909 | case NEXTHOP_TYPE_IPV4: | |
1910 | case NEXTHOP_TYPE_IPV6: | |
1911 | case NEXTHOP_TYPE_IFINDEX: | |
1912 | case NEXTHOP_TYPE_BLACKHOLE: | |
1913 | break; | |
1914 | } | |
1915 | ||
1916 | return true; | |
1917 | } | |
1918 | ||
1919 | /* | |
1920 | * Downstream VNI and Single VXlan device check. | |
1921 | * | |
1922 | * If it has nexthop VNI labels at this point it must be D-VNI allocated | |
1923 | * and all the nexthops have to be on an SVD. | |
1924 | * | |
1925 | * If SVD is not available, mark as inactive. | |
1926 | */ | |
1927 | static bool nexthop_set_evpn_dvni_svd(vrf_id_t re_vrf_id, | |
1928 | struct nexthop *nexthop) | |
1929 | { | |
1930 | if (!is_vrf_l3vni_svd_backed(re_vrf_id)) { | |
1931 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) { | |
1932 | struct vrf *vrf = vrf_lookup_by_id(re_vrf_id); | |
1933 | ||
1934 | zlog_debug( | |
1935 | "nexthop %pNHv D-VNI but route's vrf %s(%u) doesn't use SVD", | |
1936 | nexthop, VRF_LOGNAME(vrf), re_vrf_id); | |
1937 | } | |
1938 | ||
1939 | return false; | |
1940 | } | |
1941 | ||
1942 | nexthop->ifindex = get_l3vni_vxlan_ifindex(re_vrf_id); | |
1943 | nexthop->vrf_id = 0; | |
1944 | ||
1945 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
1946 | zlog_debug("nexthop %pNHv using SVD", nexthop); | |
1947 | ||
1948 | return true; | |
1949 | } | |
1950 | ||
1951 | /* | |
1952 | * Given a nexthop we need to properly recursively resolve | |
1953 | * the route. As such, do a table lookup to find and match | |
1954 | * if at all possible. Set the nexthop->ifindex and resolved_id | |
1955 | * as appropriate | |
1956 | */ | |
1957 | static int resolve_backup_nexthops(const struct nexthop *nexthop, | |
1958 | const struct nhg_hash_entry *nhe, | |
1959 | struct nexthop *resolved, | |
1960 | struct nhg_hash_entry *resolve_nhe, | |
1961 | struct backup_nh_map_s *map) | |
1962 | { | |
1963 | int i, j, idx; | |
1964 | const struct nexthop *bnh; | |
1965 | struct nexthop *nh, *newnh; | |
1966 | mpls_label_t labels[MPLS_MAX_LABELS]; | |
1967 | uint8_t num_labels; | |
1968 | ||
1969 | assert(nexthop->backup_num <= NEXTHOP_MAX_BACKUPS); | |
1970 | ||
1971 | /* Locate backups from the original nexthop's backup index and nhe */ | |
1972 | for (i = 0; i < nexthop->backup_num; i++) { | |
1973 | idx = nexthop->backup_idx[i]; | |
1974 | ||
1975 | /* Do we already know about this particular backup? */ | |
1976 | for (j = 0; j < map->map_count; j++) { | |
1977 | if (map->map[j].orig_idx == idx) | |
1978 | break; | |
1979 | } | |
1980 | ||
1981 | if (j < map->map_count) { | |
1982 | resolved->backup_idx[resolved->backup_num] = | |
1983 | map->map[j].new_idx; | |
1984 | resolved->backup_num++; | |
1985 | ||
1986 | SET_FLAG(resolved->flags, NEXTHOP_FLAG_HAS_BACKUP); | |
1987 | ||
1988 | if (IS_ZEBRA_DEBUG_RIB_DETAILED) | |
1989 | zlog_debug("%s: found map idx orig %d, new %d", | |
1990 | __func__, map->map[j].orig_idx, | |
1991 | map->map[j].new_idx); | |
1992 | ||
1993 | continue; | |
1994 | } | |
1995 | ||
1996 | /* We can't handle any new map entries at this point. */ | |
1997 | if (map->map_count == MULTIPATH_NUM) | |
1998 | break; | |
1999 | ||
2000 | /* Need to create/copy a new backup */ | |
2001 | bnh = nhe->backup_info->nhe->nhg.nexthop; | |
2002 | for (j = 0; j < idx; j++) { | |
2003 | if (bnh == NULL) | |
2004 | break; | |
2005 | bnh = bnh->next; | |
2006 | } | |
2007 | ||
2008 | /* Whoops - bad index in the nexthop? */ | |
2009 | if (bnh == NULL) | |
2010 | continue; | |
2011 | ||
2012 | if (resolve_nhe->backup_info == NULL) | |
2013 | resolve_nhe->backup_info = zebra_nhg_backup_alloc(); | |
2014 | ||
2015 | /* Update backup info in the resolving nexthop and its nhe */ | |
2016 | newnh = nexthop_dup_no_recurse(bnh, NULL); | |
2017 | ||
2018 | /* We may need some special handling for mpls labels: the new | |
2019 | * backup needs to carry the recursive nexthop's labels, | |
2020 | * if any: they may be vrf labels e.g. | |
2021 | * The original/inner labels are in the stack of 'resolve_nhe', | |
2022 | * if that is longer than the stack in 'nexthop'. | |
2023 | */ | |
2024 | if (newnh->nh_label && resolved->nh_label && | |
2025 | nexthop->nh_label) { | |
2026 | if (resolved->nh_label->num_labels > | |
2027 | nexthop->nh_label->num_labels) { | |
2028 | /* Prepare new label stack */ | |
2029 | num_labels = 0; | |
2030 | for (j = 0; j < newnh->nh_label->num_labels; | |
2031 | j++) { | |
2032 | labels[j] = newnh->nh_label->label[j]; | |
2033 | num_labels++; | |
2034 | } | |
2035 | ||
2036 | /* Include inner labels */ | |
2037 | for (j = nexthop->nh_label->num_labels; | |
2038 | j < resolved->nh_label->num_labels; | |
2039 | j++) { | |
2040 | labels[num_labels] = | |
2041 | resolved->nh_label->label[j]; | |
2042 | num_labels++; | |
2043 | } | |
2044 | ||
2045 | /* Replace existing label stack in the backup */ | |
2046 | nexthop_del_labels(newnh); | |
2047 | nexthop_add_labels(newnh, bnh->nh_label_type, | |
2048 | num_labels, labels); | |
2049 | } | |
2050 | } | |
2051 | ||
2052 | /* Need to compute the new backup index in the new | |
2053 | * backup list, and add to map struct. | |
2054 | */ | |
2055 | j = 0; | |
2056 | nh = resolve_nhe->backup_info->nhe->nhg.nexthop; | |
2057 | if (nh) { | |
2058 | while (nh->next) { | |
2059 | nh = nh->next; | |
2060 | j++; | |
2061 | } | |
2062 | ||
2063 | nh->next = newnh; | |
2064 | j++; | |
2065 | ||
2066 | } else /* First one */ | |
2067 | resolve_nhe->backup_info->nhe->nhg.nexthop = newnh; | |
2068 | ||
2069 | /* Capture index */ | |
2070 | resolved->backup_idx[resolved->backup_num] = j; | |
2071 | resolved->backup_num++; | |
2072 | ||
2073 | SET_FLAG(resolved->flags, NEXTHOP_FLAG_HAS_BACKUP); | |
2074 | ||
2075 | if (IS_ZEBRA_DEBUG_RIB_DETAILED) | |
2076 | zlog_debug("%s: added idx orig %d, new %d", | |
2077 | __func__, idx, j); | |
2078 | ||
2079 | /* Update map/cache */ | |
2080 | map->map[map->map_count].orig_idx = idx; | |
2081 | map->map[map->map_count].new_idx = j; | |
2082 | map->map_count++; | |
2083 | } | |
2084 | ||
2085 | return 0; | |
2086 | } | |
2087 | ||
2088 | /* | |
2089 | * So this nexthop resolution has decided that a connected route | |
2090 | * is the correct choice. At this point in time if FRR has multiple | |
2091 | * connected routes that all point to the same prefix one will be | |
2092 | * selected, *but* the particular interface may not be the one | |
2093 | * that the nexthop points at. Let's look at all the available | |
2094 | * connected routes on this node and if any of them auto match | |
2095 | * the routes nexthops ifindex that is good enough for a match | |
2096 | * | |
2097 | * This code is depending on the fact that a nexthop->ifindex is 0 | |
2098 | * if it is not known, if this assumption changes, yummy! | |
2099 | * Additionally a ifindx of 0 means figure it out for us. | |
2100 | */ | |
2101 | static struct route_entry * | |
2102 | zebra_nhg_connected_ifindex(struct route_node *rn, struct route_entry *match, | |
2103 | int32_t curr_ifindex) | |
2104 | { | |
2105 | struct nexthop *newhop = match->nhe->nhg.nexthop; | |
2106 | struct route_entry *re; | |
2107 | ||
2108 | assert(newhop); /* What a kick in the patooey */ | |
2109 | ||
2110 | if (curr_ifindex == 0) | |
2111 | return match; | |
2112 | ||
2113 | if (curr_ifindex == newhop->ifindex) | |
2114 | return match; | |
2115 | ||
2116 | /* | |
2117 | * At this point we know that this route is matching a connected | |
2118 | * but there are possibly a bunch of connected routes that are | |
2119 | * alive that should be considered as well. So let's iterate over | |
2120 | * all the re's and see if they are connected as well and maybe one | |
2121 | * of those ifindexes match as well. | |
2122 | */ | |
2123 | RNODE_FOREACH_RE (rn, re) { | |
2124 | if (re->type != ZEBRA_ROUTE_CONNECT) | |
2125 | continue; | |
2126 | ||
2127 | if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED)) | |
2128 | continue; | |
2129 | ||
2130 | /* | |
2131 | * zebra has a connected route that is not removed | |
2132 | * let's test if it is good | |
2133 | */ | |
2134 | newhop = re->nhe->nhg.nexthop; | |
2135 | assert(newhop); | |
2136 | if (curr_ifindex == newhop->ifindex) | |
2137 | return re; | |
2138 | } | |
2139 | ||
2140 | return match; | |
2141 | } | |
2142 | ||
2143 | /* | |
2144 | * Given a nexthop we need to properly recursively resolve, | |
2145 | * do a table lookup to find and match if at all possible. | |
2146 | * Set the nexthop->ifindex and resolution info as appropriate. | |
2147 | */ | |
2148 | static int nexthop_active(struct nexthop *nexthop, struct nhg_hash_entry *nhe, | |
2149 | const struct prefix *top, int type, uint32_t flags, | |
2150 | uint32_t *pmtu, vrf_id_t vrf_id) | |
2151 | { | |
2152 | struct prefix p; | |
2153 | struct route_table *table; | |
2154 | struct route_node *rn; | |
2155 | struct route_entry *match = NULL; | |
2156 | int resolved; | |
2157 | struct zebra_nhlfe *nhlfe; | |
2158 | struct nexthop *newhop; | |
2159 | struct interface *ifp; | |
2160 | rib_dest_t *dest; | |
2161 | struct zebra_vrf *zvrf; | |
2162 | struct in_addr local_ipv4; | |
2163 | struct in_addr *ipv4; | |
2164 | afi_t afi = AFI_IP; | |
2165 | ||
2166 | /* Reset some nexthop attributes that we'll recompute if necessary */ | |
2167 | if ((nexthop->type == NEXTHOP_TYPE_IPV4) | |
2168 | || (nexthop->type == NEXTHOP_TYPE_IPV6)) | |
2169 | nexthop->ifindex = 0; | |
2170 | ||
2171 | UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE); | |
2172 | nexthops_free(nexthop->resolved); | |
2173 | nexthop->resolved = NULL; | |
2174 | ||
2175 | /* | |
2176 | * Set afi based on nexthop type. | |
2177 | * Some nexthop types get special handling, possibly skipping | |
2178 | * the normal processing. | |
2179 | */ | |
2180 | switch (nexthop->type) { | |
2181 | case NEXTHOP_TYPE_IFINDEX: | |
2182 | ||
2183 | ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id); | |
2184 | /* | |
2185 | * If the interface exists and its operative or its a kernel | |
2186 | * route and interface is up, its active. We trust kernel routes | |
2187 | * to be good. | |
2188 | */ | |
2189 | if (ifp && (if_is_operative(ifp))) | |
2190 | return 1; | |
2191 | else | |
2192 | return 0; | |
2193 | break; | |
2194 | ||
2195 | case NEXTHOP_TYPE_IPV6_IFINDEX: | |
2196 | afi = AFI_IP6; | |
2197 | ||
2198 | if (IN6_IS_ADDR_LINKLOCAL(&nexthop->gate.ipv6)) { | |
2199 | ifp = if_lookup_by_index(nexthop->ifindex, | |
2200 | nexthop->vrf_id); | |
2201 | if (ifp && if_is_operative(ifp)) | |
2202 | return 1; | |
2203 | else | |
2204 | return 0; | |
2205 | } | |
2206 | break; | |
2207 | ||
2208 | case NEXTHOP_TYPE_IPV4: | |
2209 | case NEXTHOP_TYPE_IPV4_IFINDEX: | |
2210 | afi = AFI_IP; | |
2211 | break; | |
2212 | case NEXTHOP_TYPE_IPV6: | |
2213 | afi = AFI_IP6; | |
2214 | break; | |
2215 | ||
2216 | case NEXTHOP_TYPE_BLACKHOLE: | |
2217 | return 1; | |
2218 | } | |
2219 | ||
2220 | /* | |
2221 | * If the nexthop has been marked as 'onlink' we just need to make | |
2222 | * sure the nexthop's interface is known and is operational. | |
2223 | */ | |
2224 | if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) { | |
2225 | /* DVNI/SVD Checks for EVPN routes */ | |
2226 | if (nexthop->nh_label && | |
2227 | nexthop->nh_label_type == ZEBRA_LSP_EVPN && | |
2228 | !nexthop_set_evpn_dvni_svd(vrf_id, nexthop)) | |
2229 | return 0; | |
2230 | ||
2231 | ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id); | |
2232 | if (!ifp) { | |
2233 | if (IS_ZEBRA_DEBUG_RIB_DETAILED) | |
2234 | zlog_debug("nexthop %pNHv marked onlink but nhif %u doesn't exist", | |
2235 | nexthop, nexthop->ifindex); | |
2236 | return 0; | |
2237 | } | |
2238 | if (!if_is_operative(ifp)) { | |
2239 | if (IS_ZEBRA_DEBUG_RIB_DETAILED) | |
2240 | zlog_debug("nexthop %pNHv marked onlink but nhif %s is not operational", | |
2241 | nexthop, ifp->name); | |
2242 | return 0; | |
2243 | } | |
2244 | return 1; | |
2245 | } | |
2246 | ||
2247 | if (top && | |
2248 | ((top->family == AF_INET && top->prefixlen == IPV4_MAX_BITLEN && | |
2249 | nexthop->gate.ipv4.s_addr == top->u.prefix4.s_addr) || | |
2250 | (top->family == AF_INET6 && top->prefixlen == IPV6_MAX_BITLEN && | |
2251 | memcmp(&nexthop->gate.ipv6, &top->u.prefix6, IPV6_MAX_BYTELEN) == | |
2252 | 0)) && | |
2253 | nexthop->vrf_id == vrf_id) { | |
2254 | if (IS_ZEBRA_DEBUG_RIB_DETAILED) | |
2255 | zlog_debug( | |
2256 | " :%s: Attempting to install a max prefixlength route through itself", | |
2257 | __func__); | |
2258 | return 0; | |
2259 | } | |
2260 | ||
2261 | /* Validation for ipv4 mapped ipv6 nexthop. */ | |
2262 | if (IS_MAPPED_IPV6(&nexthop->gate.ipv6)) { | |
2263 | afi = AFI_IP; | |
2264 | ipv4 = &local_ipv4; | |
2265 | ipv4_mapped_ipv6_to_ipv4(&nexthop->gate.ipv6, ipv4); | |
2266 | } else { | |
2267 | ipv4 = &nexthop->gate.ipv4; | |
2268 | } | |
2269 | ||
2270 | /* Processing for nexthops with SR 'color' attribute, using | |
2271 | * the corresponding SR policy object. | |
2272 | */ | |
2273 | if (nexthop->srte_color) { | |
2274 | struct ipaddr endpoint = {0}; | |
2275 | struct zebra_sr_policy *policy; | |
2276 | ||
2277 | switch (afi) { | |
2278 | case AFI_IP: | |
2279 | endpoint.ipa_type = IPADDR_V4; | |
2280 | endpoint.ipaddr_v4 = *ipv4; | |
2281 | break; | |
2282 | case AFI_IP6: | |
2283 | endpoint.ipa_type = IPADDR_V6; | |
2284 | endpoint.ipaddr_v6 = nexthop->gate.ipv6; | |
2285 | break; | |
2286 | case AFI_UNSPEC: | |
2287 | case AFI_L2VPN: | |
2288 | case AFI_MAX: | |
2289 | flog_err(EC_LIB_DEVELOPMENT, | |
2290 | "%s: unknown address-family: %u", __func__, | |
2291 | afi); | |
2292 | exit(1); | |
2293 | } | |
2294 | ||
2295 | policy = zebra_sr_policy_find(nexthop->srte_color, &endpoint); | |
2296 | if (policy && policy->status == ZEBRA_SR_POLICY_UP) { | |
2297 | resolved = 0; | |
2298 | frr_each_safe (nhlfe_list, &policy->lsp->nhlfe_list, | |
2299 | nhlfe) { | |
2300 | if (!CHECK_FLAG(nhlfe->flags, | |
2301 | NHLFE_FLAG_SELECTED) | |
2302 | || CHECK_FLAG(nhlfe->flags, | |
2303 | NHLFE_FLAG_DELETED)) | |
2304 | continue; | |
2305 | SET_FLAG(nexthop->flags, | |
2306 | NEXTHOP_FLAG_RECURSIVE); | |
2307 | nexthop_set_resolved(afi, nhlfe->nexthop, | |
2308 | nexthop, policy); | |
2309 | resolved = 1; | |
2310 | } | |
2311 | if (resolved) | |
2312 | return 1; | |
2313 | } | |
2314 | } | |
2315 | ||
2316 | /* Make lookup prefix. */ | |
2317 | memset(&p, 0, sizeof(struct prefix)); | |
2318 | switch (afi) { | |
2319 | case AFI_IP: | |
2320 | p.family = AF_INET; | |
2321 | p.prefixlen = IPV4_MAX_BITLEN; | |
2322 | p.u.prefix4 = *ipv4; | |
2323 | break; | |
2324 | case AFI_IP6: | |
2325 | p.family = AF_INET6; | |
2326 | p.prefixlen = IPV6_MAX_BITLEN; | |
2327 | p.u.prefix6 = nexthop->gate.ipv6; | |
2328 | break; | |
2329 | case AFI_UNSPEC: | |
2330 | case AFI_L2VPN: | |
2331 | case AFI_MAX: | |
2332 | assert(afi != AFI_IP && afi != AFI_IP6); | |
2333 | break; | |
2334 | } | |
2335 | /* Lookup table. */ | |
2336 | table = zebra_vrf_table(afi, SAFI_UNICAST, nexthop->vrf_id); | |
2337 | /* get zvrf */ | |
2338 | zvrf = zebra_vrf_lookup_by_id(nexthop->vrf_id); | |
2339 | if (!table || !zvrf) { | |
2340 | if (IS_ZEBRA_DEBUG_RIB_DETAILED) | |
2341 | zlog_debug(" %s: Table not found", __func__); | |
2342 | return 0; | |
2343 | } | |
2344 | ||
2345 | rn = route_node_match(table, (struct prefix *)&p); | |
2346 | while (rn) { | |
2347 | route_unlock_node(rn); | |
2348 | ||
2349 | /* Lookup should halt if we've matched against ourselves ('top', | |
2350 | * if specified) - i.e., we cannot have a nexthop NH1 is | |
2351 | * resolved by a route NH1. The exception is if the route is a | |
2352 | * host route. | |
2353 | */ | |
2354 | if (prefix_same(&rn->p, top)) | |
2355 | if (((afi == AFI_IP) | |
2356 | && (rn->p.prefixlen != IPV4_MAX_BITLEN)) | |
2357 | || ((afi == AFI_IP6) | |
2358 | && (rn->p.prefixlen != IPV6_MAX_BITLEN))) { | |
2359 | if (IS_ZEBRA_DEBUG_RIB_DETAILED) | |
2360 | zlog_debug( | |
2361 | " %s: Matched against ourself and prefix length is not max bit length", | |
2362 | __func__); | |
2363 | return 0; | |
2364 | } | |
2365 | ||
2366 | /* Pick up selected route. */ | |
2367 | /* However, do not resolve over default route unless explicitly | |
2368 | * allowed. | |
2369 | */ | |
2370 | if (is_default_prefix(&rn->p) | |
2371 | && !rnh_resolve_via_default(zvrf, p.family)) { | |
2372 | if (IS_ZEBRA_DEBUG_RIB_DETAILED) | |
2373 | zlog_debug( | |
2374 | " :%s: Resolved against default route", | |
2375 | __func__); | |
2376 | return 0; | |
2377 | } | |
2378 | ||
2379 | dest = rib_dest_from_rnode(rn); | |
2380 | if (dest && dest->selected_fib | |
2381 | && !CHECK_FLAG(dest->selected_fib->status, | |
2382 | ROUTE_ENTRY_REMOVED) | |
2383 | && dest->selected_fib->type != ZEBRA_ROUTE_TABLE) | |
2384 | match = dest->selected_fib; | |
2385 | ||
2386 | /* If there is no selected route or matched route is EGP, go up | |
2387 | * tree. | |
2388 | */ | |
2389 | if (!match) { | |
2390 | do { | |
2391 | rn = rn->parent; | |
2392 | } while (rn && rn->info == NULL); | |
2393 | if (rn) | |
2394 | route_lock_node(rn); | |
2395 | ||
2396 | continue; | |
2397 | } | |
2398 | ||
2399 | if ((match->type == ZEBRA_ROUTE_CONNECT) || | |
2400 | (RIB_SYSTEM_ROUTE(match) && RSYSTEM_ROUTE(type))) { | |
2401 | match = zebra_nhg_connected_ifindex(rn, match, | |
2402 | nexthop->ifindex); | |
2403 | ||
2404 | newhop = match->nhe->nhg.nexthop; | |
2405 | if (nexthop->type == NEXTHOP_TYPE_IPV4 || | |
2406 | nexthop->type == NEXTHOP_TYPE_IPV6) | |
2407 | nexthop->ifindex = newhop->ifindex; | |
2408 | else if (nexthop->ifindex != newhop->ifindex) { | |
2409 | if (IS_ZEBRA_DEBUG_RIB_DETAILED) | |
2410 | zlog_debug( | |
2411 | "%s: %pNHv given ifindex does not match nexthops ifindex found: %pNHv", | |
2412 | __func__, nexthop, newhop); | |
2413 | /* | |
2414 | * NEXTHOP_TYPE_*_IFINDEX but ifindex | |
2415 | * doesn't match what we found. | |
2416 | */ | |
2417 | return 0; | |
2418 | } | |
2419 | ||
2420 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
2421 | zlog_debug( | |
2422 | "%s: CONNECT match %p (%pNG), newhop %pNHv", | |
2423 | __func__, match, match->nhe, newhop); | |
2424 | ||
2425 | return 1; | |
2426 | } else if (CHECK_FLAG(flags, ZEBRA_FLAG_ALLOW_RECURSION)) { | |
2427 | struct nexthop_group *nhg; | |
2428 | struct nexthop *resolver; | |
2429 | struct backup_nh_map_s map = {}; | |
2430 | ||
2431 | resolved = 0; | |
2432 | ||
2433 | /* | |
2434 | * Only useful if installed or being Route Replacing | |
2435 | * Why Being Route Replaced as well? | |
2436 | * Imagine a route A and route B( that depends on A ) | |
2437 | * for recursive resolution and A already exists in the | |
2438 | * zebra rib. If zebra receives the routes | |
2439 | * for resolution at aproximately the same time in the [ | |
2440 | * B, A ] order on the workQ. If this happens then | |
2441 | * normal route resolution will happen and B will be | |
2442 | * resolved successfully and then A will be resolved | |
2443 | * successfully. Now imagine the reversed order [A, B]. | |
2444 | * A will be resolved and then scheduled for installed | |
2445 | * (Thus not having the ROUTE_ENTRY_INSTALLED flag ). B | |
2446 | * will then get resolved and fail to be installed | |
2447 | * because the original below test. Let's `loosen` this | |
2448 | * up a tiny bit and allow the | |
2449 | * ROUTE_ENTRY_ROUTE_REPLACING flag ( that is set when a | |
2450 | * Route Replace operation is being initiated on A now ) | |
2451 | * to now satisfy this situation. This will allow | |
2452 | * either order in the workQ to work properly. | |
2453 | */ | |
2454 | if (!CHECK_FLAG(match->status, ROUTE_ENTRY_INSTALLED) && | |
2455 | !CHECK_FLAG(match->status, | |
2456 | ROUTE_ENTRY_ROUTE_REPLACING)) { | |
2457 | if (IS_ZEBRA_DEBUG_RIB_DETAILED) | |
2458 | zlog_debug( | |
2459 | "%s: match %p (%pNG) not installed or being Route Replaced", | |
2460 | __func__, match, match->nhe); | |
2461 | ||
2462 | goto done_with_match; | |
2463 | } | |
2464 | ||
2465 | /* Examine installed nexthops; note that there | |
2466 | * may not be any installed primary nexthops if | |
2467 | * only backups are installed. | |
2468 | */ | |
2469 | nhg = rib_get_fib_nhg(match); | |
2470 | for (ALL_NEXTHOPS_PTR(nhg, newhop)) { | |
2471 | if (!nexthop_valid_resolve(nexthop, newhop)) | |
2472 | continue; | |
2473 | ||
2474 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
2475 | zlog_debug( | |
2476 | "%s: RECURSIVE match %p (%pNG), newhop %pNHv", | |
2477 | __func__, match, match->nhe, | |
2478 | newhop); | |
2479 | ||
2480 | SET_FLAG(nexthop->flags, | |
2481 | NEXTHOP_FLAG_RECURSIVE); | |
2482 | resolver = nexthop_set_resolved(afi, newhop, | |
2483 | nexthop, NULL); | |
2484 | resolved = 1; | |
2485 | ||
2486 | /* If there are backup nexthops, capture | |
2487 | * that info with the resolving nexthop. | |
2488 | */ | |
2489 | if (resolver && newhop->backup_num > 0) { | |
2490 | resolve_backup_nexthops(newhop, | |
2491 | match->nhe, | |
2492 | resolver, nhe, | |
2493 | &map); | |
2494 | } | |
2495 | } | |
2496 | ||
2497 | /* Examine installed backup nexthops, if any. There | |
2498 | * are only installed backups *if* there is a | |
2499 | * dedicated fib list. The UI can also control use | |
2500 | * of backups for resolution. | |
2501 | */ | |
2502 | nhg = rib_get_fib_backup_nhg(match); | |
2503 | if (!use_recursive_backups || | |
2504 | nhg == NULL || nhg->nexthop == NULL) | |
2505 | goto done_with_match; | |
2506 | ||
2507 | for (ALL_NEXTHOPS_PTR(nhg, newhop)) { | |
2508 | if (!nexthop_valid_resolve(nexthop, newhop)) | |
2509 | continue; | |
2510 | ||
2511 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
2512 | zlog_debug( | |
2513 | "%s: RECURSIVE match backup %p (%pNG), newhop %pNHv", | |
2514 | __func__, match, match->nhe, | |
2515 | newhop); | |
2516 | ||
2517 | SET_FLAG(nexthop->flags, | |
2518 | NEXTHOP_FLAG_RECURSIVE); | |
2519 | nexthop_set_resolved(afi, newhop, nexthop, | |
2520 | NULL); | |
2521 | resolved = 1; | |
2522 | } | |
2523 | ||
2524 | done_with_match: | |
2525 | /* Capture resolving mtu */ | |
2526 | if (resolved) { | |
2527 | if (pmtu) | |
2528 | *pmtu = match->mtu; | |
2529 | ||
2530 | } else if (IS_ZEBRA_DEBUG_RIB_DETAILED) | |
2531 | zlog_debug( | |
2532 | " %s: Recursion failed to find", | |
2533 | __func__); | |
2534 | ||
2535 | return resolved; | |
2536 | } else { | |
2537 | if (IS_ZEBRA_DEBUG_RIB_DETAILED) { | |
2538 | zlog_debug( | |
2539 | " %s: Route Type %s has not turned on recursion", | |
2540 | __func__, zebra_route_string(type)); | |
2541 | if (type == ZEBRA_ROUTE_BGP | |
2542 | && !CHECK_FLAG(flags, ZEBRA_FLAG_IBGP)) | |
2543 | zlog_debug( | |
2544 | " EBGP: see \"disable-ebgp-connected-route-check\" or \"disable-connected-check\""); | |
2545 | } | |
2546 | return 0; | |
2547 | } | |
2548 | } | |
2549 | if (IS_ZEBRA_DEBUG_RIB_DETAILED) | |
2550 | zlog_debug(" %s: Nexthop did not lookup in table", | |
2551 | __func__); | |
2552 | return 0; | |
2553 | } | |
2554 | ||
2555 | /* This function verifies reachability of one given nexthop, which can be | |
2556 | * numbered or unnumbered, IPv4 or IPv6. The result is unconditionally stored | |
2557 | * in nexthop->flags field. The nexthop->ifindex will be updated | |
2558 | * appropriately as well. | |
2559 | * | |
2560 | * An existing route map can turn an otherwise active nexthop into inactive, | |
2561 | * but not vice versa. | |
2562 | * | |
2563 | * The return value is the final value of 'ACTIVE' flag. | |
2564 | */ | |
2565 | static unsigned nexthop_active_check(struct route_node *rn, | |
2566 | struct route_entry *re, | |
2567 | struct nexthop *nexthop, | |
2568 | struct nhg_hash_entry *nhe) | |
2569 | { | |
2570 | route_map_result_t ret = RMAP_PERMITMATCH; | |
2571 | afi_t family; | |
2572 | const struct prefix *p, *src_p; | |
2573 | struct zebra_vrf *zvrf; | |
2574 | uint32_t mtu = 0; | |
2575 | vrf_id_t vrf_id; | |
2576 | ||
2577 | srcdest_rnode_prefixes(rn, &p, &src_p); | |
2578 | ||
2579 | if (rn->p.family == AF_INET) | |
2580 | family = AFI_IP; | |
2581 | else if (rn->p.family == AF_INET6) | |
2582 | family = AFI_IP6; | |
2583 | else | |
2584 | family = AF_UNSPEC; | |
2585 | ||
2586 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
2587 | zlog_debug("%s: re %p, nexthop %pNHv", __func__, re, nexthop); | |
2588 | ||
2589 | /* | |
2590 | * If this is a kernel route, then if the interface is *up* then | |
2591 | * by golly gee whiz it's a good route. | |
2592 | */ | |
2593 | if (re->type == ZEBRA_ROUTE_KERNEL || re->type == ZEBRA_ROUTE_SYSTEM) { | |
2594 | struct interface *ifp; | |
2595 | ||
2596 | ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id); | |
2597 | ||
2598 | if (ifp && (if_is_operative(ifp) || if_is_up(ifp))) { | |
2599 | SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); | |
2600 | goto skip_check; | |
2601 | } | |
2602 | } | |
2603 | ||
2604 | vrf_id = zvrf_id(rib_dest_vrf(rib_dest_from_rnode(rn))); | |
2605 | switch (nexthop->type) { | |
2606 | case NEXTHOP_TYPE_IFINDEX: | |
2607 | if (nexthop_active(nexthop, nhe, &rn->p, re->type, re->flags, | |
2608 | &mtu, vrf_id)) | |
2609 | SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); | |
2610 | else | |
2611 | UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); | |
2612 | break; | |
2613 | case NEXTHOP_TYPE_IPV4: | |
2614 | case NEXTHOP_TYPE_IPV4_IFINDEX: | |
2615 | family = AFI_IP; | |
2616 | if (nexthop_active(nexthop, nhe, &rn->p, re->type, re->flags, | |
2617 | &mtu, vrf_id)) | |
2618 | SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); | |
2619 | else | |
2620 | UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); | |
2621 | break; | |
2622 | case NEXTHOP_TYPE_IPV6: | |
2623 | family = AFI_IP6; | |
2624 | if (nexthop_active(nexthop, nhe, &rn->p, re->type, re->flags, | |
2625 | &mtu, vrf_id)) | |
2626 | SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); | |
2627 | else | |
2628 | UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); | |
2629 | break; | |
2630 | case NEXTHOP_TYPE_IPV6_IFINDEX: | |
2631 | /* RFC 5549, v4 prefix with v6 NH */ | |
2632 | if (rn->p.family != AF_INET) | |
2633 | family = AFI_IP6; | |
2634 | ||
2635 | if (nexthop_active(nexthop, nhe, &rn->p, re->type, re->flags, | |
2636 | &mtu, vrf_id)) | |
2637 | SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); | |
2638 | else | |
2639 | UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); | |
2640 | break; | |
2641 | case NEXTHOP_TYPE_BLACKHOLE: | |
2642 | SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); | |
2643 | break; | |
2644 | default: | |
2645 | break; | |
2646 | } | |
2647 | ||
2648 | skip_check: | |
2649 | ||
2650 | if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) { | |
2651 | if (IS_ZEBRA_DEBUG_RIB_DETAILED) | |
2652 | zlog_debug(" %s: Unable to find active nexthop", | |
2653 | __func__); | |
2654 | return 0; | |
2655 | } | |
2656 | ||
2657 | /* Capture recursive nexthop mtu. | |
2658 | * TODO -- the code used to just reset the re's value to zero | |
2659 | * for each nexthop, and then jam any resolving route's mtu value in, | |
2660 | * whether or not that was zero, or lt/gt any existing value? The | |
2661 | * way this is used appears to be as a floor value, so let's try | |
2662 | * using it that way here. | |
2663 | */ | |
2664 | if (mtu > 0) { | |
2665 | if (re->nexthop_mtu == 0 || re->nexthop_mtu > mtu) | |
2666 | re->nexthop_mtu = mtu; | |
2667 | } | |
2668 | ||
2669 | /* XXX: What exactly do those checks do? Do we support | |
2670 | * e.g. IPv4 routes with IPv6 nexthops or vice versa? | |
2671 | */ | |
2672 | if (RIB_SYSTEM_ROUTE(re) || (family == AFI_IP && p->family != AF_INET) | |
2673 | || (family == AFI_IP6 && p->family != AF_INET6)) | |
2674 | return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); | |
2675 | ||
2676 | /* The original code didn't determine the family correctly | |
2677 | * e.g. for NEXTHOP_TYPE_IFINDEX. Retrieve the correct afi | |
2678 | * from the rib_table_info in those cases. | |
2679 | * Possibly it may be better to use only the rib_table_info | |
2680 | * in every case. | |
2681 | */ | |
2682 | if (family == 0) { | |
2683 | struct rib_table_info *info; | |
2684 | ||
2685 | info = srcdest_rnode_table_info(rn); | |
2686 | family = info->afi; | |
2687 | } | |
2688 | ||
2689 | memset(&nexthop->rmap_src.ipv6, 0, sizeof(union g_addr)); | |
2690 | ||
2691 | zvrf = zebra_vrf_lookup_by_id(re->vrf_id); | |
2692 | if (!zvrf) { | |
2693 | if (IS_ZEBRA_DEBUG_RIB_DETAILED) | |
2694 | zlog_debug(" %s: zvrf is NULL", __func__); | |
2695 | return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); | |
2696 | } | |
2697 | ||
2698 | /* It'll get set if required inside */ | |
2699 | ret = zebra_route_map_check(family, re->type, re->instance, p, nexthop, | |
2700 | zvrf, re->tag); | |
2701 | if (ret == RMAP_DENYMATCH) { | |
2702 | if (IS_ZEBRA_DEBUG_RIB) { | |
2703 | zlog_debug( | |
2704 | "%u:%pRN: Filtering out with NH %pNHv due to route map", | |
2705 | re->vrf_id, rn, nexthop); | |
2706 | } | |
2707 | UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); | |
2708 | } | |
2709 | return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); | |
2710 | } | |
2711 | ||
2712 | /* Helper function called after resolution to walk nhg rb trees | |
2713 | * and toggle the NEXTHOP_GROUP_VALID flag if the nexthop | |
2714 | * is active on singleton NHEs. | |
2715 | */ | |
2716 | static bool zebra_nhg_set_valid_if_active(struct nhg_hash_entry *nhe) | |
2717 | { | |
2718 | struct nhg_connected *rb_node_dep = NULL; | |
2719 | bool valid = false; | |
2720 | ||
2721 | if (!zebra_nhg_depends_is_empty(nhe)) { | |
2722 | /* Is at least one depend valid? */ | |
2723 | frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) { | |
2724 | if (zebra_nhg_set_valid_if_active(rb_node_dep->nhe)) | |
2725 | valid = true; | |
2726 | } | |
2727 | ||
2728 | goto done; | |
2729 | } | |
2730 | ||
2731 | /* should be fully resolved singleton at this point */ | |
2732 | if (CHECK_FLAG(nhe->nhg.nexthop->flags, NEXTHOP_FLAG_ACTIVE)) | |
2733 | valid = true; | |
2734 | ||
2735 | done: | |
2736 | if (valid) | |
2737 | SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID); | |
2738 | ||
2739 | return valid; | |
2740 | } | |
2741 | ||
2742 | /* Checks if the first nexthop is EVPN. If not, early return. | |
2743 | * | |
2744 | * This is used to determine if there is a mismatch between l3VNI | |
2745 | * of the route's vrf and the nexthops in use's VNI labels. | |
2746 | * | |
2747 | * If there is a mismatch, we keep the labels as these MUST be DVNI nexthops. | |
2748 | * | |
2749 | * IF there is no mismatch, we remove the labels and handle the routes as | |
2750 | * we have traditionally with evpn. | |
2751 | */ | |
2752 | static bool nexthop_list_set_evpn_dvni(struct route_entry *re, | |
2753 | struct nexthop_group *nhg) | |
2754 | { | |
2755 | struct nexthop *nexthop; | |
2756 | vni_t re_vrf_vni; | |
2757 | vni_t nh_vni; | |
2758 | bool use_dvni = false; | |
2759 | ||
2760 | nexthop = nhg->nexthop; | |
2761 | ||
2762 | if (!nexthop->nh_label || nexthop->nh_label_type != ZEBRA_LSP_EVPN) | |
2763 | return false; | |
2764 | ||
2765 | re_vrf_vni = get_l3vni_vni(re->vrf_id); | |
2766 | ||
2767 | for (; nexthop; nexthop = nexthop->next) { | |
2768 | if (!nexthop->nh_label || | |
2769 | nexthop->nh_label_type != ZEBRA_LSP_EVPN) | |
2770 | continue; | |
2771 | ||
2772 | nh_vni = label2vni(&nexthop->nh_label->label[0]); | |
2773 | ||
2774 | if (nh_vni != re_vrf_vni) | |
2775 | use_dvni = true; | |
2776 | } | |
2777 | ||
2778 | /* Using traditional way, no VNI encap - remove labels */ | |
2779 | if (!use_dvni) { | |
2780 | for (nexthop = nhg->nexthop; nexthop; nexthop = nexthop->next) | |
2781 | nexthop_del_labels(nexthop); | |
2782 | } | |
2783 | ||
2784 | return use_dvni; | |
2785 | } | |
2786 | ||
2787 | /* | |
2788 | * Process a list of nexthops, given an nhe, determining | |
2789 | * whether each one is ACTIVE/installable at this time. | |
2790 | */ | |
2791 | static uint32_t nexthop_list_active_update(struct route_node *rn, | |
2792 | struct route_entry *re, | |
2793 | struct nhg_hash_entry *nhe, | |
2794 | bool is_backup) | |
2795 | { | |
2796 | union g_addr prev_src; | |
2797 | unsigned int prev_active, new_active; | |
2798 | ifindex_t prev_index; | |
2799 | uint32_t counter = 0; | |
2800 | struct nexthop *nexthop; | |
2801 | struct nexthop_group *nhg = &nhe->nhg; | |
2802 | bool vni_removed = false; | |
2803 | ||
2804 | nexthop = nhg->nexthop; | |
2805 | ||
2806 | /* Init recursive nh mtu */ | |
2807 | re->nexthop_mtu = 0; | |
2808 | ||
2809 | /* Handler for dvni evpn nexthops. Has to be done at nhg level */ | |
2810 | vni_removed = !nexthop_list_set_evpn_dvni(re, nhg); | |
2811 | ||
2812 | /* Process nexthops one-by-one */ | |
2813 | for ( ; nexthop; nexthop = nexthop->next) { | |
2814 | ||
2815 | /* No protocol daemon provides src and so we're skipping | |
2816 | * tracking it | |
2817 | */ | |
2818 | prev_src = nexthop->rmap_src; | |
2819 | prev_active = CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); | |
2820 | prev_index = nexthop->ifindex; | |
2821 | ||
2822 | /* Include the containing nhe for primary nexthops: if there's | |
2823 | * recursive resolution, we capture the backup info also. | |
2824 | */ | |
2825 | new_active = | |
2826 | nexthop_active_check(rn, re, nexthop, | |
2827 | (is_backup ? NULL : nhe)); | |
2828 | ||
2829 | /* | |
2830 | * We need to respect the multipath_num here | |
2831 | * as that what we should be able to install from | |
2832 | * a multipath perspective should not be a data plane | |
2833 | * decision point. | |
2834 | */ | |
2835 | if (new_active && counter >= zrouter.multipath_num) { | |
2836 | struct nexthop *nh; | |
2837 | ||
2838 | /* Set it and its resolved nexthop as inactive. */ | |
2839 | for (nh = nexthop; nh; nh = nh->resolved) | |
2840 | UNSET_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE); | |
2841 | ||
2842 | new_active = 0; | |
2843 | } | |
2844 | ||
2845 | if (new_active) | |
2846 | counter++; | |
2847 | ||
2848 | /* Check for changes to the nexthop - set ROUTE_ENTRY_CHANGED */ | |
2849 | if (prev_active != new_active || | |
2850 | prev_index != nexthop->ifindex || | |
2851 | ((nexthop->type >= NEXTHOP_TYPE_IFINDEX && | |
2852 | nexthop->type < NEXTHOP_TYPE_IPV6) && | |
2853 | prev_src.ipv4.s_addr != nexthop->rmap_src.ipv4.s_addr) || | |
2854 | ((nexthop->type >= NEXTHOP_TYPE_IPV6 && | |
2855 | nexthop->type < NEXTHOP_TYPE_BLACKHOLE) && | |
2856 | !(IPV6_ADDR_SAME(&prev_src.ipv6, | |
2857 | &nexthop->rmap_src.ipv6))) || | |
2858 | CHECK_FLAG(re->status, ROUTE_ENTRY_LABELS_CHANGED) || | |
2859 | vni_removed) | |
2860 | SET_FLAG(re->status, ROUTE_ENTRY_CHANGED); | |
2861 | } | |
2862 | ||
2863 | return counter; | |
2864 | } | |
2865 | ||
2866 | ||
2867 | static uint32_t proto_nhg_nexthop_active_update(struct nexthop_group *nhg) | |
2868 | { | |
2869 | struct nexthop *nh; | |
2870 | uint32_t curr_active = 0; | |
2871 | ||
2872 | /* Assume all active for now */ | |
2873 | ||
2874 | for (nh = nhg->nexthop; nh; nh = nh->next) { | |
2875 | SET_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE); | |
2876 | curr_active++; | |
2877 | } | |
2878 | ||
2879 | return curr_active; | |
2880 | } | |
2881 | ||
2882 | /* | |
2883 | * Iterate over all nexthops of the given RIB entry and refresh their | |
2884 | * ACTIVE flag. If any nexthop is found to toggle the ACTIVE flag, | |
2885 | * the whole re structure is flagged with ROUTE_ENTRY_CHANGED. | |
2886 | * | |
2887 | * Return value is the new number of active nexthops. | |
2888 | */ | |
2889 | int nexthop_active_update(struct route_node *rn, struct route_entry *re) | |
2890 | { | |
2891 | struct nhg_hash_entry *curr_nhe; | |
2892 | uint32_t curr_active = 0, backup_active = 0; | |
2893 | ||
2894 | if (PROTO_OWNED(re->nhe)) | |
2895 | return proto_nhg_nexthop_active_update(&re->nhe->nhg); | |
2896 | ||
2897 | afi_t rt_afi = family2afi(rn->p.family); | |
2898 | ||
2899 | UNSET_FLAG(re->status, ROUTE_ENTRY_CHANGED); | |
2900 | ||
2901 | /* Make a local copy of the existing nhe, so we don't work on/modify | |
2902 | * the shared nhe. | |
2903 | */ | |
2904 | curr_nhe = zebra_nhe_copy(re->nhe, re->nhe->id); | |
2905 | ||
2906 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
2907 | zlog_debug("%s: re %p nhe %p (%pNG), curr_nhe %p", __func__, re, | |
2908 | re->nhe, re->nhe, curr_nhe); | |
2909 | ||
2910 | /* Clear the existing id, if any: this will avoid any confusion | |
2911 | * if the id exists, and will also force the creation | |
2912 | * of a new nhe reflecting the changes we may make in this local copy. | |
2913 | */ | |
2914 | curr_nhe->id = 0; | |
2915 | ||
2916 | /* Process nexthops */ | |
2917 | curr_active = nexthop_list_active_update(rn, re, curr_nhe, false); | |
2918 | ||
2919 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
2920 | zlog_debug("%s: re %p curr_active %u", __func__, re, | |
2921 | curr_active); | |
2922 | ||
2923 | /* If there are no backup nexthops, we are done */ | |
2924 | if (zebra_nhg_get_backup_nhg(curr_nhe) == NULL) | |
2925 | goto backups_done; | |
2926 | ||
2927 | backup_active = nexthop_list_active_update( | |
2928 | rn, re, curr_nhe->backup_info->nhe, true /*is_backup*/); | |
2929 | ||
2930 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
2931 | zlog_debug("%s: re %p backup_active %u", __func__, re, | |
2932 | backup_active); | |
2933 | ||
2934 | backups_done: | |
2935 | ||
2936 | /* | |
2937 | * Ref or create an nhe that matches the current state of the | |
2938 | * nexthop(s). | |
2939 | */ | |
2940 | if (CHECK_FLAG(re->status, ROUTE_ENTRY_CHANGED)) { | |
2941 | struct nhg_hash_entry *new_nhe = NULL; | |
2942 | ||
2943 | new_nhe = zebra_nhg_rib_find_nhe(curr_nhe, rt_afi); | |
2944 | ||
2945 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
2946 | zlog_debug( | |
2947 | "%s: re %p CHANGED: nhe %p (%pNG) => new_nhe %p (%pNG)", | |
2948 | __func__, re, re->nhe, re->nhe, new_nhe, | |
2949 | new_nhe); | |
2950 | ||
2951 | route_entry_update_nhe(re, new_nhe); | |
2952 | } | |
2953 | ||
2954 | ||
2955 | /* Walk the NHE depends tree and toggle NEXTHOP_GROUP_VALID | |
2956 | * flag where appropriate. | |
2957 | */ | |
2958 | if (curr_active) | |
2959 | zebra_nhg_set_valid_if_active(re->nhe); | |
2960 | ||
2961 | /* | |
2962 | * Do not need the old / copied nhe anymore since it | |
2963 | * was either copied over into a new nhe or not | |
2964 | * used at all. | |
2965 | */ | |
2966 | zebra_nhg_free(curr_nhe); | |
2967 | return curr_active; | |
2968 | } | |
2969 | ||
2970 | /* Recursively construct a grp array of fully resolved IDs. | |
2971 | * | |
2972 | * This function allows us to account for groups within groups, | |
2973 | * by converting them into a flat array of IDs. | |
2974 | * | |
2975 | * nh_grp is modified at every level of recursion to append | |
2976 | * to it the next unique, fully resolved ID from the entire tree. | |
2977 | * | |
2978 | * | |
2979 | * Note: | |
2980 | * I'm pretty sure we only allow ONE level of group within group currently. | |
2981 | * But making this recursive just in case that ever changes. | |
2982 | */ | |
2983 | static uint8_t zebra_nhg_nhe2grp_internal(struct nh_grp *grp, | |
2984 | uint8_t curr_index, | |
2985 | struct nhg_hash_entry *nhe, | |
2986 | int max_num) | |
2987 | { | |
2988 | struct nhg_connected *rb_node_dep = NULL; | |
2989 | struct nhg_hash_entry *depend = NULL; | |
2990 | uint8_t i = curr_index; | |
2991 | ||
2992 | frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) { | |
2993 | bool duplicate = false; | |
2994 | ||
2995 | if (i >= max_num) | |
2996 | goto done; | |
2997 | ||
2998 | depend = rb_node_dep->nhe; | |
2999 | ||
3000 | /* | |
3001 | * If its recursive, use its resolved nhe in the group | |
3002 | */ | |
3003 | if (CHECK_FLAG(depend->flags, NEXTHOP_GROUP_RECURSIVE)) { | |
3004 | depend = zebra_nhg_resolve(depend); | |
3005 | if (!depend) { | |
3006 | flog_err( | |
3007 | EC_ZEBRA_NHG_FIB_UPDATE, | |
3008 | "Failed to recursively resolve Nexthop Hash Entry in the group id=%pNG", | |
3009 | nhe); | |
3010 | continue; | |
3011 | } | |
3012 | } | |
3013 | ||
3014 | if (!zebra_nhg_depends_is_empty(depend)) { | |
3015 | /* This is a group within a group */ | |
3016 | i = zebra_nhg_nhe2grp_internal(grp, i, depend, max_num); | |
3017 | } else { | |
3018 | if (!CHECK_FLAG(depend->flags, NEXTHOP_GROUP_VALID)) { | |
3019 | if (IS_ZEBRA_DEBUG_RIB_DETAILED | |
3020 | || IS_ZEBRA_DEBUG_NHG) | |
3021 | zlog_debug( | |
3022 | "%s: Nexthop ID (%u) not valid, not appending to dataplane install group", | |
3023 | __func__, depend->id); | |
3024 | continue; | |
3025 | } | |
3026 | ||
3027 | /* If the nexthop not installed/queued for install don't | |
3028 | * put in the ID array. | |
3029 | */ | |
3030 | if (!(CHECK_FLAG(depend->flags, NEXTHOP_GROUP_INSTALLED) | |
3031 | || CHECK_FLAG(depend->flags, | |
3032 | NEXTHOP_GROUP_QUEUED))) { | |
3033 | if (IS_ZEBRA_DEBUG_RIB_DETAILED | |
3034 | || IS_ZEBRA_DEBUG_NHG) | |
3035 | zlog_debug( | |
3036 | "%s: Nexthop ID (%u) not installed or queued for install, not appending to dataplane install group", | |
3037 | __func__, depend->id); | |
3038 | continue; | |
3039 | } | |
3040 | ||
3041 | /* Check for duplicate IDs, ignore if found. */ | |
3042 | for (int j = 0; j < i; j++) { | |
3043 | if (depend->id == grp[j].id) { | |
3044 | duplicate = true; | |
3045 | break; | |
3046 | } | |
3047 | } | |
3048 | ||
3049 | if (duplicate) { | |
3050 | if (IS_ZEBRA_DEBUG_RIB_DETAILED | |
3051 | || IS_ZEBRA_DEBUG_NHG) | |
3052 | zlog_debug( | |
3053 | "%s: Nexthop ID (%u) is duplicate, not appending to dataplane install group", | |
3054 | __func__, depend->id); | |
3055 | continue; | |
3056 | } | |
3057 | ||
3058 | grp[i].id = depend->id; | |
3059 | grp[i].weight = depend->nhg.nexthop->weight; | |
3060 | i++; | |
3061 | } | |
3062 | } | |
3063 | ||
3064 | if (nhe->backup_info == NULL || nhe->backup_info->nhe == NULL) | |
3065 | goto done; | |
3066 | ||
3067 | /* TODO -- For now, we are not trying to use or install any | |
3068 | * backup info in this nexthop-id path: we aren't prepared | |
3069 | * to use the backups here yet. We're just debugging what we find. | |
3070 | */ | |
3071 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
3072 | zlog_debug("%s: skipping backup nhe", __func__); | |
3073 | ||
3074 | done: | |
3075 | return i; | |
3076 | } | |
3077 | ||
3078 | /* Convert a nhe into a group array */ | |
3079 | uint8_t zebra_nhg_nhe2grp(struct nh_grp *grp, struct nhg_hash_entry *nhe, | |
3080 | int max_num) | |
3081 | { | |
3082 | /* Call into the recursive function */ | |
3083 | return zebra_nhg_nhe2grp_internal(grp, 0, nhe, max_num); | |
3084 | } | |
3085 | ||
3086 | void zebra_nhg_install_kernel(struct nhg_hash_entry *nhe) | |
3087 | { | |
3088 | struct nhg_connected *rb_node_dep = NULL; | |
3089 | ||
3090 | /* Resolve it first */ | |
3091 | nhe = zebra_nhg_resolve(nhe); | |
3092 | ||
3093 | if (zebra_nhg_set_valid_if_active(nhe)) { | |
3094 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
3095 | zlog_debug("%s: valid flag set for nh %pNG", __func__, | |
3096 | nhe); | |
3097 | } | |
3098 | ||
3099 | /* Make sure all depends are installed/queued */ | |
3100 | frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) { | |
3101 | zebra_nhg_install_kernel(rb_node_dep->nhe); | |
3102 | } | |
3103 | ||
3104 | if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_VALID) | |
3105 | && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED) | |
3106 | && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED)) { | |
3107 | /* Change its type to us since we are installing it */ | |
3108 | if (!ZEBRA_NHG_CREATED(nhe)) | |
3109 | nhe->type = ZEBRA_ROUTE_NHG; | |
3110 | ||
3111 | int ret = dplane_nexthop_add(nhe); | |
3112 | ||
3113 | switch (ret) { | |
3114 | case ZEBRA_DPLANE_REQUEST_QUEUED: | |
3115 | SET_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED); | |
3116 | break; | |
3117 | case ZEBRA_DPLANE_REQUEST_FAILURE: | |
3118 | flog_err( | |
3119 | EC_ZEBRA_DP_INSTALL_FAIL, | |
3120 | "Failed to install Nexthop ID (%pNG) into the kernel", | |
3121 | nhe); | |
3122 | break; | |
3123 | case ZEBRA_DPLANE_REQUEST_SUCCESS: | |
3124 | SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED); | |
3125 | zebra_nhg_handle_install(nhe, false); | |
3126 | break; | |
3127 | } | |
3128 | } | |
3129 | } | |
3130 | ||
3131 | void zebra_nhg_uninstall_kernel(struct nhg_hash_entry *nhe) | |
3132 | { | |
3133 | if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)) { | |
3134 | int ret = dplane_nexthop_delete(nhe); | |
3135 | ||
3136 | switch (ret) { | |
3137 | case ZEBRA_DPLANE_REQUEST_QUEUED: | |
3138 | SET_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED); | |
3139 | break; | |
3140 | case ZEBRA_DPLANE_REQUEST_FAILURE: | |
3141 | flog_err( | |
3142 | EC_ZEBRA_DP_DELETE_FAIL, | |
3143 | "Failed to uninstall Nexthop ID (%pNG) from the kernel", | |
3144 | nhe); | |
3145 | break; | |
3146 | case ZEBRA_DPLANE_REQUEST_SUCCESS: | |
3147 | UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED); | |
3148 | break; | |
3149 | } | |
3150 | } | |
3151 | ||
3152 | zebra_nhg_handle_uninstall(nhe); | |
3153 | } | |
3154 | ||
3155 | void zebra_nhg_dplane_result(struct zebra_dplane_ctx *ctx) | |
3156 | { | |
3157 | enum dplane_op_e op; | |
3158 | enum zebra_dplane_result status; | |
3159 | uint32_t id = 0; | |
3160 | struct nhg_hash_entry *nhe = NULL; | |
3161 | ||
3162 | op = dplane_ctx_get_op(ctx); | |
3163 | status = dplane_ctx_get_status(ctx); | |
3164 | ||
3165 | id = dplane_ctx_get_nhe_id(ctx); | |
3166 | ||
3167 | if (IS_ZEBRA_DEBUG_DPLANE_DETAIL || IS_ZEBRA_DEBUG_NHG_DETAIL) | |
3168 | zlog_debug( | |
3169 | "Nexthop dplane ctx %p, op %s, nexthop ID (%u), result %s", | |
3170 | ctx, dplane_op2str(op), id, dplane_res2str(status)); | |
3171 | ||
3172 | switch (op) { | |
3173 | case DPLANE_OP_NH_DELETE: | |
3174 | if (status != ZEBRA_DPLANE_REQUEST_SUCCESS) | |
3175 | flog_err( | |
3176 | EC_ZEBRA_DP_DELETE_FAIL, | |
3177 | "Failed to uninstall Nexthop ID (%u) from the kernel", | |
3178 | id); | |
3179 | ||
3180 | /* We already free'd the data, nothing to do */ | |
3181 | break; | |
3182 | case DPLANE_OP_NH_INSTALL: | |
3183 | case DPLANE_OP_NH_UPDATE: | |
3184 | nhe = zebra_nhg_lookup_id(id); | |
3185 | ||
3186 | if (!nhe) { | |
3187 | if (IS_ZEBRA_DEBUG_NHG) | |
3188 | zlog_debug( | |
3189 | "%s operation preformed on Nexthop ID (%u) in the kernel, that we no longer have in our table", | |
3190 | dplane_op2str(op), id); | |
3191 | ||
3192 | break; | |
3193 | } | |
3194 | ||
3195 | UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED); | |
3196 | if (status == ZEBRA_DPLANE_REQUEST_SUCCESS) { | |
3197 | SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID); | |
3198 | SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED); | |
3199 | zebra_nhg_handle_install(nhe, true); | |
3200 | ||
3201 | /* If daemon nhg, send it an update */ | |
3202 | if (PROTO_OWNED(nhe)) | |
3203 | zsend_nhg_notify(nhe->type, nhe->zapi_instance, | |
3204 | nhe->zapi_session, nhe->id, | |
3205 | ZAPI_NHG_INSTALLED); | |
3206 | } else { | |
3207 | /* If daemon nhg, send it an update */ | |
3208 | if (PROTO_OWNED(nhe)) | |
3209 | zsend_nhg_notify(nhe->type, nhe->zapi_instance, | |
3210 | nhe->zapi_session, nhe->id, | |
3211 | ZAPI_NHG_FAIL_INSTALL); | |
3212 | ||
3213 | if (!(zebra_nhg_proto_nexthops_only() && | |
3214 | !PROTO_OWNED(nhe))) | |
3215 | flog_err( | |
3216 | EC_ZEBRA_DP_INSTALL_FAIL, | |
3217 | "Failed to install Nexthop (%pNG) into the kernel", | |
3218 | nhe); | |
3219 | } | |
3220 | break; | |
3221 | ||
3222 | case DPLANE_OP_ROUTE_INSTALL: | |
3223 | case DPLANE_OP_ROUTE_UPDATE: | |
3224 | case DPLANE_OP_ROUTE_DELETE: | |
3225 | case DPLANE_OP_ROUTE_NOTIFY: | |
3226 | case DPLANE_OP_LSP_INSTALL: | |
3227 | case DPLANE_OP_LSP_UPDATE: | |
3228 | case DPLANE_OP_LSP_DELETE: | |
3229 | case DPLANE_OP_LSP_NOTIFY: | |
3230 | case DPLANE_OP_PW_INSTALL: | |
3231 | case DPLANE_OP_PW_UNINSTALL: | |
3232 | case DPLANE_OP_SYS_ROUTE_ADD: | |
3233 | case DPLANE_OP_SYS_ROUTE_DELETE: | |
3234 | case DPLANE_OP_ADDR_INSTALL: | |
3235 | case DPLANE_OP_ADDR_UNINSTALL: | |
3236 | case DPLANE_OP_MAC_INSTALL: | |
3237 | case DPLANE_OP_MAC_DELETE: | |
3238 | case DPLANE_OP_NEIGH_INSTALL: | |
3239 | case DPLANE_OP_NEIGH_UPDATE: | |
3240 | case DPLANE_OP_NEIGH_DELETE: | |
3241 | case DPLANE_OP_NEIGH_IP_INSTALL: | |
3242 | case DPLANE_OP_NEIGH_IP_DELETE: | |
3243 | case DPLANE_OP_VTEP_ADD: | |
3244 | case DPLANE_OP_VTEP_DELETE: | |
3245 | case DPLANE_OP_RULE_ADD: | |
3246 | case DPLANE_OP_RULE_DELETE: | |
3247 | case DPLANE_OP_RULE_UPDATE: | |
3248 | case DPLANE_OP_NEIGH_DISCOVER: | |
3249 | case DPLANE_OP_BR_PORT_UPDATE: | |
3250 | case DPLANE_OP_NONE: | |
3251 | case DPLANE_OP_IPTABLE_ADD: | |
3252 | case DPLANE_OP_IPTABLE_DELETE: | |
3253 | case DPLANE_OP_IPSET_ADD: | |
3254 | case DPLANE_OP_IPSET_DELETE: | |
3255 | case DPLANE_OP_IPSET_ENTRY_ADD: | |
3256 | case DPLANE_OP_IPSET_ENTRY_DELETE: | |
3257 | case DPLANE_OP_NEIGH_TABLE_UPDATE: | |
3258 | case DPLANE_OP_GRE_SET: | |
3259 | case DPLANE_OP_INTF_ADDR_ADD: | |
3260 | case DPLANE_OP_INTF_ADDR_DEL: | |
3261 | case DPLANE_OP_INTF_NETCONFIG: | |
3262 | case DPLANE_OP_INTF_INSTALL: | |
3263 | case DPLANE_OP_INTF_UPDATE: | |
3264 | case DPLANE_OP_INTF_DELETE: | |
3265 | case DPLANE_OP_TC_QDISC_INSTALL: | |
3266 | case DPLANE_OP_TC_QDISC_UNINSTALL: | |
3267 | case DPLANE_OP_TC_CLASS_ADD: | |
3268 | case DPLANE_OP_TC_CLASS_DELETE: | |
3269 | case DPLANE_OP_TC_CLASS_UPDATE: | |
3270 | case DPLANE_OP_TC_FILTER_ADD: | |
3271 | case DPLANE_OP_TC_FILTER_DELETE: | |
3272 | case DPLANE_OP_TC_FILTER_UPDATE: | |
3273 | break; | |
3274 | } | |
3275 | } | |
3276 | ||
3277 | static int zebra_nhg_sweep_entry(struct hash_bucket *bucket, void *arg) | |
3278 | { | |
3279 | struct nhg_hash_entry *nhe = NULL; | |
3280 | ||
3281 | nhe = (struct nhg_hash_entry *)bucket->data; | |
3282 | ||
3283 | /* | |
3284 | * same logic as with routes. | |
3285 | * | |
3286 | * If older than startup time, we know we read them in from the | |
3287 | * kernel and have not gotten and update for them since startup | |
3288 | * from an upper level proto. | |
3289 | */ | |
3290 | if (zrouter.startup_time < nhe->uptime) | |
3291 | return HASHWALK_CONTINUE; | |
3292 | ||
3293 | /* | |
3294 | * If it's proto-owned and not being used by a route, remove it since | |
3295 | * we haven't gotten an update about it from the proto since startup. | |
3296 | * This means that either the config for it was removed or the daemon | |
3297 | * didn't get started. This handles graceful restart & retain scenario. | |
3298 | */ | |
3299 | if (PROTO_OWNED(nhe) && nhe->refcnt == 1) { | |
3300 | zebra_nhg_decrement_ref(nhe); | |
3301 | return HASHWALK_ABORT; | |
3302 | } | |
3303 | ||
3304 | /* | |
3305 | * If its being ref'd by routes, just let it be uninstalled via a route | |
3306 | * removal. | |
3307 | */ | |
3308 | if (ZEBRA_NHG_CREATED(nhe) && nhe->refcnt <= 0) { | |
3309 | zebra_nhg_uninstall_kernel(nhe); | |
3310 | return HASHWALK_ABORT; | |
3311 | } | |
3312 | ||
3313 | return HASHWALK_CONTINUE; | |
3314 | } | |
3315 | ||
3316 | void zebra_nhg_sweep_table(struct hash *hash) | |
3317 | { | |
3318 | uint32_t count; | |
3319 | ||
3320 | /* | |
3321 | * Yes this is extremely odd. Effectively nhg's have | |
3322 | * other nexthop groups that depend on them and when you | |
3323 | * remove them, you can have other entries blown up. | |
3324 | * our hash code does not work with deleting multiple | |
3325 | * entries at a time and will possibly cause crashes | |
3326 | * So what to do? Whenever zebra_nhg_sweep_entry | |
3327 | * deletes an entry it will return HASHWALK_ABORT, | |
3328 | * cause that deletion might have triggered more. | |
3329 | * then we can just keep sweeping this table | |
3330 | * until nothing more is found to do. | |
3331 | */ | |
3332 | do { | |
3333 | count = hashcount(hash); | |
3334 | hash_walk(hash, zebra_nhg_sweep_entry, NULL); | |
3335 | } while (count != hashcount(hash)); | |
3336 | } | |
3337 | ||
3338 | static void zebra_nhg_mark_keep_entry(struct hash_bucket *bucket, void *arg) | |
3339 | { | |
3340 | struct nhg_hash_entry *nhe = bucket->data; | |
3341 | ||
3342 | UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED); | |
3343 | } | |
3344 | ||
3345 | /* | |
3346 | * When we are shutting down and we have retain mode enabled | |
3347 | * in zebra the process is to mark each vrf that it's | |
3348 | * routes should not be deleted. The problem with that | |
3349 | * is that shutdown actually free's up memory which | |
3350 | * causes the nexthop group's ref counts to go to zero | |
3351 | * we need a way to subtly tell the system to not remove | |
3352 | * the nexthop groups from the kernel at the same time. | |
3353 | * The easiest just looks like that we should not mark | |
3354 | * the nhg's as installed any more and when the ref count | |
3355 | * goes to zero we'll attempt to delete and do nothing | |
3356 | */ | |
3357 | void zebra_nhg_mark_keep(void) | |
3358 | { | |
3359 | hash_iterate(zrouter.nhgs_id, zebra_nhg_mark_keep_entry, NULL); | |
3360 | } | |
3361 | ||
3362 | /* Global control to disable use of kernel nexthops, if available. We can't | |
3363 | * force the kernel to support nexthop ids, of course, but we can disable | |
3364 | * zebra's use of them, for testing e.g. By default, if the kernel supports | |
3365 | * nexthop ids, zebra uses them. | |
3366 | */ | |
3367 | void zebra_nhg_enable_kernel_nexthops(bool set) | |
3368 | { | |
3369 | g_nexthops_enabled = set; | |
3370 | } | |
3371 | ||
3372 | bool zebra_nhg_kernel_nexthops_enabled(void) | |
3373 | { | |
3374 | return g_nexthops_enabled; | |
3375 | } | |
3376 | ||
3377 | /* Global control for use of activated backups for recursive resolution. */ | |
3378 | void zebra_nhg_set_recursive_use_backups(bool set) | |
3379 | { | |
3380 | use_recursive_backups = set; | |
3381 | } | |
3382 | ||
3383 | bool zebra_nhg_recursive_use_backups(void) | |
3384 | { | |
3385 | return use_recursive_backups; | |
3386 | } | |
3387 | ||
3388 | /* | |
3389 | * Global control to only use kernel nexthops for protocol created NHGs. | |
3390 | * There are some use cases where you may not want zebra to implicitly | |
3391 | * create kernel nexthops for all routes and only create them for NHGs | |
3392 | * passed down by upper level protos. | |
3393 | * | |
3394 | * Default is off. | |
3395 | */ | |
3396 | void zebra_nhg_set_proto_nexthops_only(bool set) | |
3397 | { | |
3398 | proto_nexthops_only = set; | |
3399 | } | |
3400 | ||
3401 | bool zebra_nhg_proto_nexthops_only(void) | |
3402 | { | |
3403 | return proto_nexthops_only; | |
3404 | } | |
3405 | ||
3406 | /* Add NHE from upper level proto */ | |
3407 | struct nhg_hash_entry *zebra_nhg_proto_add(uint32_t id, int type, | |
3408 | uint16_t instance, uint32_t session, | |
3409 | struct nexthop_group *nhg, afi_t afi) | |
3410 | { | |
3411 | struct nhg_hash_entry lookup; | |
3412 | struct nhg_hash_entry *new, *old; | |
3413 | struct nhg_connected *rb_node_dep = NULL; | |
3414 | struct nexthop *newhop; | |
3415 | bool replace = false; | |
3416 | ||
3417 | if (!nhg->nexthop) { | |
3418 | if (IS_ZEBRA_DEBUG_NHG) | |
3419 | zlog_debug("%s: id %u, no nexthops passed to add", | |
3420 | __func__, id); | |
3421 | return NULL; | |
3422 | } | |
3423 | ||
3424 | ||
3425 | /* Set nexthop list as active, since they wont go through rib | |
3426 | * processing. | |
3427 | * | |
3428 | * Assuming valid/onlink for now. | |
3429 | * | |
3430 | * Once resolution is figured out, we won't need this! | |
3431 | */ | |
3432 | for (ALL_NEXTHOPS_PTR(nhg, newhop)) { | |
3433 | if (CHECK_FLAG(newhop->flags, NEXTHOP_FLAG_HAS_BACKUP)) { | |
3434 | if (IS_ZEBRA_DEBUG_NHG) | |
3435 | zlog_debug( | |
3436 | "%s: id %u, backup nexthops not supported", | |
3437 | __func__, id); | |
3438 | return NULL; | |
3439 | } | |
3440 | ||
3441 | if (newhop->type == NEXTHOP_TYPE_BLACKHOLE) { | |
3442 | if (IS_ZEBRA_DEBUG_NHG) | |
3443 | zlog_debug( | |
3444 | "%s: id %u, blackhole nexthop not supported", | |
3445 | __func__, id); | |
3446 | return NULL; | |
3447 | } | |
3448 | ||
3449 | if (newhop->type == NEXTHOP_TYPE_IFINDEX) { | |
3450 | if (IS_ZEBRA_DEBUG_NHG) | |
3451 | zlog_debug( | |
3452 | "%s: id %u, nexthop without gateway not supported", | |
3453 | __func__, id); | |
3454 | return NULL; | |
3455 | } | |
3456 | ||
3457 | if (!newhop->ifindex) { | |
3458 | if (IS_ZEBRA_DEBUG_NHG) | |
3459 | zlog_debug( | |
3460 | "%s: id %u, nexthop without ifindex is not supported", | |
3461 | __func__, id); | |
3462 | return NULL; | |
3463 | } | |
3464 | SET_FLAG(newhop->flags, NEXTHOP_FLAG_ACTIVE); | |
3465 | } | |
3466 | ||
3467 | zebra_nhe_init(&lookup, afi, nhg->nexthop); | |
3468 | lookup.nhg.nexthop = nhg->nexthop; | |
3469 | lookup.nhg.nhgr = nhg->nhgr; | |
3470 | lookup.id = id; | |
3471 | lookup.type = type; | |
3472 | ||
3473 | old = zebra_nhg_lookup_id(id); | |
3474 | ||
3475 | if (old) { | |
3476 | /* | |
3477 | * This is a replace, just release NHE from ID for now, The | |
3478 | * depends/dependents may still be used in the replacement so | |
3479 | * we don't touch them other than to remove their refs to their | |
3480 | * old parent. | |
3481 | */ | |
3482 | replace = true; | |
3483 | hash_release(zrouter.nhgs_id, old); | |
3484 | ||
3485 | /* Free all the things */ | |
3486 | zebra_nhg_release_all_deps(old); | |
3487 | } | |
3488 | ||
3489 | new = zebra_nhg_rib_find_nhe(&lookup, afi); | |
3490 | ||
3491 | zebra_nhg_increment_ref(new); | |
3492 | ||
3493 | /* Capture zapi client info */ | |
3494 | new->zapi_instance = instance; | |
3495 | new->zapi_session = session; | |
3496 | ||
3497 | zebra_nhg_set_valid_if_active(new); | |
3498 | ||
3499 | zebra_nhg_install_kernel(new); | |
3500 | ||
3501 | if (old) { | |
3502 | /* | |
3503 | * Check to handle recving DEL while routes still in use then | |
3504 | * a replace. | |
3505 | * | |
3506 | * In this case we would have decremented the refcnt already | |
3507 | * but set the FLAG here. Go ahead and increment once to fix | |
3508 | * the misordering we have been sent. | |
3509 | */ | |
3510 | if (CHECK_FLAG(old->flags, NEXTHOP_GROUP_PROTO_RELEASED)) | |
3511 | zebra_nhg_increment_ref(old); | |
3512 | ||
3513 | rib_handle_nhg_replace(old, new); | |
3514 | ||
3515 | /* We have to decrement its singletons | |
3516 | * because some might not exist in NEW. | |
3517 | */ | |
3518 | if (!zebra_nhg_depends_is_empty(old)) { | |
3519 | frr_each (nhg_connected_tree, &old->nhg_depends, | |
3520 | rb_node_dep) | |
3521 | zebra_nhg_decrement_ref(rb_node_dep->nhe); | |
3522 | } | |
3523 | ||
3524 | /* Dont call the dec API, we dont want to uninstall the ID */ | |
3525 | old->refcnt = 0; | |
3526 | EVENT_OFF(old->timer); | |
3527 | zebra_nhg_free(old); | |
3528 | old = NULL; | |
3529 | } | |
3530 | ||
3531 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
3532 | zlog_debug("%s: %s nhe %p (%u), vrf %d, type %s", __func__, | |
3533 | (replace ? "replaced" : "added"), new, new->id, | |
3534 | new->vrf_id, zebra_route_string(new->type)); | |
3535 | ||
3536 | return new; | |
3537 | } | |
3538 | ||
3539 | /* Delete NHE from upper level proto, caller must decrement ref */ | |
3540 | struct nhg_hash_entry *zebra_nhg_proto_del(uint32_t id, int type) | |
3541 | { | |
3542 | struct nhg_hash_entry *nhe; | |
3543 | ||
3544 | nhe = zebra_nhg_lookup_id(id); | |
3545 | ||
3546 | if (!nhe) { | |
3547 | if (IS_ZEBRA_DEBUG_NHG) | |
3548 | zlog_debug("%s: id %u, lookup failed", __func__, id); | |
3549 | ||
3550 | return NULL; | |
3551 | } | |
3552 | ||
3553 | if (type != nhe->type) { | |
3554 | if (IS_ZEBRA_DEBUG_NHG) | |
3555 | zlog_debug( | |
3556 | "%s: id %u, type %s mismatch, sent by %s, ignoring", | |
3557 | __func__, id, zebra_route_string(nhe->type), | |
3558 | zebra_route_string(type)); | |
3559 | return NULL; | |
3560 | } | |
3561 | ||
3562 | if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_PROTO_RELEASED)) { | |
3563 | if (IS_ZEBRA_DEBUG_NHG) | |
3564 | zlog_debug("%s: id %u, already released", __func__, id); | |
3565 | ||
3566 | return NULL; | |
3567 | } | |
3568 | ||
3569 | SET_FLAG(nhe->flags, NEXTHOP_GROUP_PROTO_RELEASED); | |
3570 | ||
3571 | if (nhe->refcnt > 1) { | |
3572 | if (IS_ZEBRA_DEBUG_NHG) | |
3573 | zlog_debug( | |
3574 | "%s: %pNG, still being used by routes refcnt %u", | |
3575 | __func__, nhe, nhe->refcnt); | |
3576 | return nhe; | |
3577 | } | |
3578 | ||
3579 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
3580 | zlog_debug("%s: deleted nhe %p (%pNG), vrf %d, type %s", | |
3581 | __func__, nhe, nhe, nhe->vrf_id, | |
3582 | zebra_route_string(nhe->type)); | |
3583 | ||
3584 | return nhe; | |
3585 | } | |
3586 | ||
3587 | struct nhg_score_proto_iter { | |
3588 | int type; | |
3589 | struct list *found; | |
3590 | }; | |
3591 | ||
3592 | static void zebra_nhg_score_proto_entry(struct hash_bucket *bucket, void *arg) | |
3593 | { | |
3594 | struct nhg_hash_entry *nhe; | |
3595 | struct nhg_score_proto_iter *iter; | |
3596 | ||
3597 | nhe = (struct nhg_hash_entry *)bucket->data; | |
3598 | iter = arg; | |
3599 | ||
3600 | /* Needs to match type and outside zebra ID space */ | |
3601 | if (nhe->type == iter->type && PROTO_OWNED(nhe)) { | |
3602 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
3603 | zlog_debug( | |
3604 | "%s: found nhe %p (%pNG), vrf %d, type %s after client disconnect", | |
3605 | __func__, nhe, nhe, nhe->vrf_id, | |
3606 | zebra_route_string(nhe->type)); | |
3607 | ||
3608 | /* Add to removal list */ | |
3609 | listnode_add(iter->found, nhe); | |
3610 | } | |
3611 | } | |
3612 | ||
3613 | /* Remove specific by proto NHGs */ | |
3614 | unsigned long zebra_nhg_score_proto(int type) | |
3615 | { | |
3616 | struct nhg_hash_entry *nhe; | |
3617 | struct nhg_score_proto_iter iter = {}; | |
3618 | struct listnode *ln; | |
3619 | unsigned long count; | |
3620 | ||
3621 | iter.type = type; | |
3622 | iter.found = list_new(); | |
3623 | ||
3624 | /* Find matching entries to remove */ | |
3625 | hash_iterate(zrouter.nhgs_id, zebra_nhg_score_proto_entry, &iter); | |
3626 | ||
3627 | /* Now remove them */ | |
3628 | for (ALL_LIST_ELEMENTS_RO(iter.found, ln, nhe)) { | |
3629 | /* | |
3630 | * This should be the last ref if we remove client routes too, | |
3631 | * and thus should remove and free them. | |
3632 | */ | |
3633 | zebra_nhg_decrement_ref(nhe); | |
3634 | } | |
3635 | ||
3636 | count = iter.found->count; | |
3637 | list_delete(&iter.found); | |
3638 | ||
3639 | return count; | |
3640 | } | |
3641 | ||
3642 | printfrr_ext_autoreg_p("NG", printfrr_nhghe); | |
3643 | static ssize_t printfrr_nhghe(struct fbuf *buf, struct printfrr_eargs *ea, | |
3644 | const void *ptr) | |
3645 | { | |
3646 | const struct nhg_hash_entry *nhe = ptr; | |
3647 | const struct nhg_connected *dep; | |
3648 | ssize_t ret = 0; | |
3649 | ||
3650 | if (!nhe) | |
3651 | return bputs(buf, "[NULL]"); | |
3652 | ||
3653 | ret += bprintfrr(buf, "%u[", nhe->id); | |
3654 | if (nhe->ifp) | |
3655 | ret += printfrr_nhs(buf, nhe->nhg.nexthop); | |
3656 | else { | |
3657 | int count = zebra_nhg_depends_count(nhe); | |
3658 | ||
3659 | frr_each (nhg_connected_tree_const, &nhe->nhg_depends, dep) { | |
3660 | ret += bprintfrr(buf, "%u", dep->nhe->id); | |
3661 | if (count > 1) | |
3662 | ret += bputs(buf, "/"); | |
3663 | count--; | |
3664 | } | |
3665 | } | |
3666 | ||
3667 | ret += bputs(buf, "]"); | |
3668 | return ret; | |
3669 | } | |
3670 | ||
3671 | /* | |
3672 | * On interface add the nexthop that resolves to this intf needs | |
3673 | * a re-install. There are following scenarios when the nexthop group update | |
3674 | * gets skipped: | |
3675 | * 1. When upper level protocol sends removal of NHG, there is | |
3676 | * timer running to keep NHG for 180 seconds, during this interval, same route | |
3677 | * with same set of nexthops installation is given , the same NHG is used | |
3678 | * but since NHG is not reinstalled on interface address add, it is not aware | |
3679 | * in Dplan/Kernel. | |
3680 | * 2. Due to a quick port flap due to interface add and delete | |
3681 | * to be processed in same queue one after another. Zebra believes that | |
3682 | * there is no change in nhg in this case. Hence this re-install will | |
3683 | * make sure the nexthop group gets updated to Dplan/Kernel. | |
3684 | */ | |
3685 | void zebra_interface_nhg_reinstall(struct interface *ifp) | |
3686 | { | |
3687 | struct nhg_connected *rb_node_dep = NULL; | |
3688 | struct zebra_if *zif = ifp->info; | |
3689 | struct nexthop *nh; | |
3690 | ||
3691 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
3692 | zlog_debug( | |
3693 | "%s: Installing interface %s associated NHGs into kernel", | |
3694 | __func__, ifp->name); | |
3695 | ||
3696 | frr_each (nhg_connected_tree, &zif->nhg_dependents, rb_node_dep) { | |
3697 | nh = rb_node_dep->nhe->nhg.nexthop; | |
3698 | if (zebra_nhg_set_valid_if_active(rb_node_dep->nhe)) { | |
3699 | if (IS_ZEBRA_DEBUG_NHG_DETAIL) | |
3700 | zlog_debug( | |
3701 | "%s: Setting the valid flag for nhe %pNG, interface: %s", | |
3702 | __func__, rb_node_dep->nhe, ifp->name); | |
3703 | } | |
3704 | /* Check for singleton NHG associated to interface */ | |
3705 | if (nexthop_is_ifindex_type(nh) && | |
3706 | zebra_nhg_depends_is_empty(rb_node_dep->nhe)) { | |
3707 | struct nhg_connected *rb_node_dependent; | |
3708 | ||
3709 | if (IS_ZEBRA_DEBUG_NHG) | |
3710 | zlog_debug( | |
3711 | "%s install nhe %pNG nh type %u flags 0x%x", | |
3712 | __func__, rb_node_dep->nhe, nh->type, | |
3713 | rb_node_dep->nhe->flags); | |
3714 | zebra_nhg_install_kernel(rb_node_dep->nhe); | |
3715 | ||
3716 | /* mark depedent uninstall, when interface associated | |
3717 | * singleton is installed, install depedent | |
3718 | */ | |
3719 | frr_each_safe (nhg_connected_tree, | |
3720 | &rb_node_dep->nhe->nhg_dependents, | |
3721 | rb_node_dependent) { | |
3722 | if (IS_ZEBRA_DEBUG_NHG) | |
3723 | zlog_debug( | |
3724 | "%s dependent nhe %pNG unset installed flag", | |
3725 | __func__, | |
3726 | rb_node_dependent->nhe); | |
3727 | UNSET_FLAG(rb_node_dependent->nhe->flags, | |
3728 | NEXTHOP_GROUP_INSTALLED); | |
3729 | } | |
3730 | } | |
3731 | } | |
3732 | } |