/* id counter to keep in sync with kernel */
uint32_t id_counter;
-static struct nhg_hash_entry *depends_find(struct nexthop *nh, afi_t afi);
+/* */
+static bool g_nexthops_enabled = true;
+
+static struct nhg_hash_entry *depends_find(const struct nexthop *nh,
+ afi_t afi);
static void depends_add(struct nhg_connected_tree_head *head,
struct nhg_hash_entry *depend);
static struct nhg_hash_entry *
return nhg_connected_tree_first(head);
}
-void nhg_connected_tree_del_nhe(struct nhg_connected_tree_head *head,
- struct nhg_hash_entry *depend)
+struct nhg_hash_entry *
+nhg_connected_tree_del_nhe(struct nhg_connected_tree_head *head,
+ struct nhg_hash_entry *depend)
{
struct nhg_connected lookup = {};
struct nhg_connected *remove = NULL;
+ struct nhg_hash_entry *removed_nhe;
lookup.nhe = depend;
/* Lookup to find the element, then remove it */
remove = nhg_connected_tree_find(head, &lookup);
- remove = nhg_connected_tree_del(head, remove);
-
if (remove)
+ /* Re-returning here just in case this API changes..
+ * the _del list api's are a bit undefined at the moment.
+ *
+ * So hopefully returning here will make it fail if the api
+ * changes to something different than currently expected.
+ */
+ remove = nhg_connected_tree_del(head, remove);
+
+ /* If the entry was sucessfully removed, free the 'connected` struct */
+ if (remove) {
+ removed_nhe = remove->nhe;
nhg_connected_free(remove);
+ return removed_nhe;
+ }
+
+ return NULL;
}
-void nhg_connected_tree_add_nhe(struct nhg_connected_tree_head *head,
- struct nhg_hash_entry *depend)
+/* Assuming UNIQUE RB tree. If this changes, assumptions here about
+ * insertion need to change.
+ */
+struct nhg_hash_entry *
+nhg_connected_tree_add_nhe(struct nhg_connected_tree_head *head,
+ struct nhg_hash_entry *depend)
{
struct nhg_connected *new = NULL;
new = nhg_connected_new(depend);
- if (new)
- nhg_connected_tree_add(head, new);
+ /* On success, NULL will be returned from the
+ * RB code.
+ */
+ if (new && (nhg_connected_tree_add(head, new) == NULL))
+ return NULL;
+
+ /* If it wasn't successful, it must be a duplicate. We enforce the
+ * unique property for the `nhg_connected` tree.
+ */
+ nhg_connected_free(new);
+
+ return depend;
}
static void
struct interface *ifp = NULL;
ifp = if_lookup_by_index(nhe->nhg->nexthop->ifindex,
- nhe->vrf_id);
+ nhe->nhg->nexthop->vrf_id);
if (ifp)
zebra_nhg_set_if(nhe, ifp);
else
flog_err(
EC_ZEBRA_IF_LOOKUP_FAILED,
"Zebra failed to lookup an interface with ifindex=%d in vrf=%u for NHE id=%u",
- nhe->nhg->nexthop->ifindex, nhe->vrf_id,
- nhe->id);
+ nhe->nhg->nexthop->ifindex,
+ nhe->nhg->nexthop->vrf_id, nhe->id);
}
}
struct nhg_hash_entry *depend = NULL;
struct nexthop_group resolved_ng = {};
- nexthop_group_add_sorted(&resolved_ng, nh);
+ resolved_ng.nexthop = nh;
depend = zebra_nhg_rib_find(0, &resolved_ng, afi);
- depends_add(nhg_depends, depend);
+
+ if (depend)
+ depends_add(nhg_depends, depend);
}
static bool zebra_nhg_find(struct nhg_hash_entry **nhe, uint32_t id,
lookup.type = type ? type : ZEBRA_ROUTE_NHG;
lookup.nhg = nhg;
+ lookup.vrf_id = vrf_id;
if (lookup.nhg->nexthop->next) {
/* Groups can have all vrfs and AF's in them */
lookup.afi = AFI_UNSPEC;
- lookup.vrf_id = 0;
} else {
switch (lookup.nhg->nexthop->type) {
case (NEXTHOP_TYPE_IFINDEX):
lookup.afi = AFI_IP6;
break;
}
-
- lookup.vrf_id = vrf_id;
}
if (id)
{
struct nhg_hash_entry *nhe = NULL;
struct nexthop_group nhg = {};
+ vrf_id_t vrf_id = !vrf_is_backend_netns() ? VRF_DEFAULT : nh->vrf_id;
nexthop_group_add_sorted(&nhg, nh);
- zebra_nhg_find(&nhe, id, &nhg, NULL, nh->vrf_id, afi, 0);
+ zebra_nhg_find(&nhe, id, &nhg, NULL, vrf_id, afi, type);
return nhe;
}
done:
XFREE(MTYPE_NHG_CTX, *ctx);
- *ctx = NULL;
}
static struct nhg_ctx *nhg_ctx_init(uint32_t id, struct nexthop *nh,
}
/* Kernel-side, received delete message */
-int zebra_nhg_kernel_del(uint32_t id)
+int zebra_nhg_kernel_del(uint32_t id, vrf_id_t vrf_id)
{
struct nhg_ctx *ctx = NULL;
- ctx = nhg_ctx_init(id, NULL, NULL, 0, 0, 0, 0);
+ ctx = nhg_ctx_init(id, NULL, NULL, vrf_id, 0, 0, 0);
nhg_ctx_set_op(ctx, NHG_CTX_OP_DEL);
}
/* Some dependency helper functions */
-static struct nhg_hash_entry *depends_find(struct nexthop *nh, afi_t afi)
+static struct nhg_hash_entry *depends_find_recursive(const struct nexthop *nh,
+ afi_t afi)
{
+ struct nhg_hash_entry *nhe;
struct nexthop *lookup = NULL;
- struct nhg_hash_entry *nhe = NULL;
- if (!nh)
- goto done;
-
- copy_nexthops(&lookup, nh, NULL);
-
- /* Clear it, in case its a group */
- nexthops_free(lookup->next);
- nexthops_free(lookup->prev);
- lookup->next = NULL;
- lookup->prev = NULL;
+ lookup = nexthop_dup(nh, NULL);
nhe = zebra_nhg_find_nexthop(0, lookup, afi, 0);
nexthops_free(lookup);
+ return nhe;
+}
+
+static struct nhg_hash_entry *depends_find_singleton(const struct nexthop *nh,
+ afi_t afi)
+{
+ struct nhg_hash_entry *nhe;
+ struct nexthop lookup = {};
+
+ /* Capture a snapshot of this single nh; it might be part of a list,
+ * so we need to make a standalone copy.
+ */
+ nexthop_copy_no_recurse(&lookup, nh, NULL);
+
+ nhe = zebra_nhg_find_nexthop(0, &lookup, afi, 0);
+
+ /* The copy may have allocated labels; free them if necessary. */
+ nexthop_del_labels(&lookup);
+
+ return nhe;
+}
+
+static struct nhg_hash_entry *depends_find(const struct nexthop *nh, afi_t afi)
+{
+ struct nhg_hash_entry *nhe = NULL;
+
+ if (!nh)
+ goto done;
+
+ /* We are separating these functions out to increase handling speed
+ * in the non-recursive case (by not alloc/freeing)
+ */
+ if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE))
+ nhe = depends_find_recursive(nh, afi);
+ else
+ nhe = depends_find_singleton(nh, afi);
+
done:
return nhe;
}
static void depends_add(struct nhg_connected_tree_head *head,
struct nhg_hash_entry *depend)
{
- nhg_connected_tree_add_nhe(head, depend);
- zebra_nhg_increment_ref(depend);
+ /* If NULL is returned, it was successfully added and
+ * needs to have its refcnt incremented.
+ *
+ * Else the NHE is already present in the tree and doesn't
+ * need to increment the refcnt.
+ */
+ if (nhg_connected_tree_add_nhe(head, depend) == NULL)
+ zebra_nhg_increment_ref(depend);
}
static struct nhg_hash_entry *
zebra_nhg_rib_find(uint32_t id, struct nexthop_group *nhg, afi_t rt_afi)
{
struct nhg_hash_entry *nhe = NULL;
+ vrf_id_t vrf_id;
+
+ /*
+ * CLANG SA is complaining that nexthop may be NULL
+ * Make it happy but this is ridonc
+ */
+ assert(nhg->nexthop);
+ vrf_id = !vrf_is_backend_netns() ? VRF_DEFAULT : nhg->nexthop->vrf_id;
if (!(nhg && nhg->nexthop)) {
flog_err(EC_ZEBRA_TABLE_LOOKUP_FAILED,
return NULL;
}
- zebra_nhg_find(&nhe, id, nhg, NULL, nhg->nexthop->vrf_id, rt_afi, 0);
+ zebra_nhg_find(&nhe, id, nhg, NULL, vrf_id, rt_afi, 0);
return nhe;
}
* resolved by a route NH1. The exception is if the route is a
* host route.
*/
- if (top && rn == top)
+ if (rn == top)
if (((afi == AFI_IP) && (rn->p.prefixlen != 32))
|| ((afi == AFI_IP6) && (rn->p.prefixlen != 128))) {
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
{
hash_iterate(hash, zebra_nhg_sweep_entry, NULL);
}
+
+/* Global control to disable use of kernel nexthops, if available. We can't
+ * force the kernel to support nexthop ids, of course, but we can disable
+ * zebra's use of them, for testing e.g. By default, if the kernel supports
+ * nexthop ids, zebra uses them.
+ */
+void zebra_nhg_enable_kernel_nexthops(bool set)
+{
+ g_nexthops_enabled = set;
+}
+
+bool zebra_nhg_kernel_nexthops_enabled(void)
+{
+ return g_nexthops_enabled;
+}