2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "lib/libfrr.h"
21 #include "lib/debug.h"
22 #include "lib/frratomic.h"
23 #include "lib/frr_pthread.h"
24 #include "lib/memory.h"
25 #include "lib/queue.h"
26 #include "lib/zebra.h"
27 #include "zebra/zebra_memory.h"
28 #include "zebra/zserv.h"
29 #include "zebra/zebra_dplane.h"
31 #include "zebra/debug.h"
33 /* Memory type for context blocks */
34 DEFINE_MTYPE(ZEBRA
, DP_CTX
, "Zebra DPlane Ctx")
35 DEFINE_MTYPE(ZEBRA
, DP_PROV
, "Zebra DPlane Provider")
41 /* Enable test dataplane provider */
42 /*#define DPLANE_TEST_PROVIDER 1 */
44 /* Default value for max queued incoming updates */
45 const uint32_t DPLANE_DEFAULT_MAX_QUEUED
= 200;
47 /* Default value for new work per cycle */
48 const uint32_t DPLANE_DEFAULT_NEW_WORK
= 100;
50 /* Validation check macro for context blocks */
51 /* #define DPLANE_DEBUG 1 */
55 # define DPLANE_CTX_VALID(p) \
60 # define DPLANE_CTX_VALID(p)
62 #endif /* DPLANE_DEBUG */
65 * The context block used to exchange info about route updates across
66 * the boundary between the zebra main context (and pthread) and the
67 * dataplane layer (and pthread).
69 struct zebra_dplane_ctx
{
72 enum dplane_op_e zd_op
;
74 /* Status on return */
75 enum zebra_dplane_result zd_status
;
77 /* Dplane provider id */
80 /* Flags - used by providers, e.g. */
83 /* TODO -- internal/sub-operation status? */
84 enum zebra_dplane_result zd_remote_status
;
85 enum zebra_dplane_result zd_kernel_status
;
87 /* Dest and (optional) source prefixes */
88 struct prefix zd_dest
;
105 route_tag_t zd_old_tag
;
107 uint32_t zd_old_metric
;
108 uint16_t zd_instance
;
109 uint16_t zd_old_instance
;
112 uint8_t zd_old_distance
;
115 uint32_t zd_nexthop_mtu
;
118 struct zebra_dplane_info zd_ns_info
;
121 struct nexthop_group zd_ng
;
123 /* "Previous" nexthops, used only in route updates without netlink */
124 struct nexthop_group zd_old_ng
;
126 /* TODO -- use fixed array of nexthops, to avoid mallocs? */
128 /* Embedded list linkage */
129 TAILQ_ENTRY(zebra_dplane_ctx
) zd_q_entries
;
132 /* Flag that can be set by a pre-kernel provider as a signal that an update
133 * should bypass the kernel.
135 #define DPLANE_CTX_FLAG_NO_KERNEL 0x01
139 * Registration block for one dataplane provider.
141 struct zebra_dplane_provider
{
143 char dp_name
[DPLANE_PROVIDER_NAMELEN
+ 1];
145 /* Priority, for ordering among providers */
152 pthread_mutex_t dp_mutex
;
154 /* Plugin-provided extra data */
160 int (*dp_fp
)(struct zebra_dplane_provider
*prov
);
162 int (*dp_fini
)(struct zebra_dplane_provider
*prov
, bool early_p
);
164 _Atomic
uint32_t dp_in_counter
;
165 _Atomic
uint32_t dp_in_queued
;
166 _Atomic
uint32_t dp_in_max
;
167 _Atomic
uint32_t dp_out_counter
;
168 _Atomic
uint32_t dp_out_queued
;
169 _Atomic
uint32_t dp_out_max
;
170 _Atomic
uint32_t dp_error_counter
;
172 /* Queue of contexts inbound to the provider */
173 struct dplane_ctx_q dp_ctx_in_q
;
175 /* Queue of completed contexts outbound from the provider back
176 * towards the dataplane module.
178 struct dplane_ctx_q dp_ctx_out_q
;
180 /* Embedded list linkage for provider objects */
181 TAILQ_ENTRY(zebra_dplane_provider
) dp_prov_link
;
187 static struct zebra_dplane_globals
{
188 /* Mutex to control access to dataplane components */
189 pthread_mutex_t dg_mutex
;
191 /* Results callback registered by zebra 'core' */
192 int (*dg_results_cb
)(struct dplane_ctx_q
*ctxlist
);
194 /* Sentinel for beginning of shutdown */
195 volatile bool dg_is_shutdown
;
197 /* Sentinel for end of shutdown */
198 volatile bool dg_run
;
200 /* Route-update context queue inbound to the dataplane */
201 TAILQ_HEAD(zdg_ctx_q
, zebra_dplane_ctx
) dg_route_ctx_q
;
203 /* Ordered list of providers */
204 TAILQ_HEAD(zdg_prov_q
, zebra_dplane_provider
) dg_providers_q
;
206 /* Counter used to assign internal ids to providers */
207 uint32_t dg_provider_id
;
209 /* Limit number of pending, unprocessed updates */
210 _Atomic
uint32_t dg_max_queued_updates
;
212 /* Limit number of new updates dequeued at once, to pace an
215 uint32_t dg_updates_per_cycle
;
217 _Atomic
uint32_t dg_routes_in
;
218 _Atomic
uint32_t dg_routes_queued
;
219 _Atomic
uint32_t dg_routes_queued_max
;
220 _Atomic
uint32_t dg_route_errors
;
221 _Atomic
uint32_t dg_update_yields
;
223 /* Dataplane pthread */
224 struct frr_pthread
*dg_pthread
;
226 /* Event-delivery context 'master' for the dplane */
227 struct thread_master
*dg_master
;
229 /* Event/'thread' pointer for queued updates */
230 struct thread
*dg_t_update
;
232 /* Event pointer for pending shutdown check loop */
233 struct thread
*dg_t_shutdown_check
;
238 * Lock and unlock for interactions with the zebra 'core' pthread
240 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
241 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
245 * Lock and unlock for individual providers
247 #define DPLANE_PROV_LOCK(p) pthread_mutex_lock(&((p)->dp_mutex))
248 #define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
251 static int dplane_thread_loop(struct thread
*event
);
252 static void dplane_info_from_zns(struct zebra_dplane_info
*ns_info
,
253 struct zebra_ns
*zns
);
259 /* Obtain thread_master for dataplane thread */
260 struct thread_master
*dplane_get_thread_master(void)
262 return zdplane_info
.dg_master
;
266 * Allocate a dataplane update context
268 static struct zebra_dplane_ctx
*dplane_ctx_alloc(void)
270 struct zebra_dplane_ctx
*p
;
272 /* TODO -- just alloc'ing memory, but would like to maintain
275 p
= XCALLOC(MTYPE_DP_CTX
, sizeof(struct zebra_dplane_ctx
));
281 * Free a dataplane results context.
283 static void dplane_ctx_free(struct zebra_dplane_ctx
**pctx
)
286 DPLANE_CTX_VALID(*pctx
);
288 /* TODO -- just freeing memory, but would like to maintain
292 /* Free embedded nexthops */
293 if ((*pctx
)->zd_ng
.nexthop
) {
294 /* This deals with recursive nexthops too */
295 nexthops_free((*pctx
)->zd_ng
.nexthop
);
298 if ((*pctx
)->zd_old_ng
.nexthop
) {
299 /* This deals with recursive nexthops too */
300 nexthops_free((*pctx
)->zd_old_ng
.nexthop
);
303 XFREE(MTYPE_DP_CTX
, *pctx
);
309 * Return a context block to the dplane module after processing
311 void dplane_ctx_fini(struct zebra_dplane_ctx
**pctx
)
313 /* TODO -- maintain pool; for now, just free */
314 dplane_ctx_free(pctx
);
317 /* Enqueue a context block */
318 void dplane_ctx_enqueue_tail(struct dplane_ctx_q
*q
,
319 const struct zebra_dplane_ctx
*ctx
)
321 TAILQ_INSERT_TAIL(q
, (struct zebra_dplane_ctx
*)ctx
, zd_q_entries
);
324 /* Append a list of context blocks to another list */
325 void dplane_ctx_list_append(struct dplane_ctx_q
*to_list
,
326 struct dplane_ctx_q
*from_list
)
328 if (TAILQ_FIRST(from_list
)) {
329 TAILQ_CONCAT(to_list
, from_list
, zd_q_entries
);
331 /* And clear 'from' list */
332 TAILQ_INIT(from_list
);
336 /* Dequeue a context block from the head of a list */
337 struct zebra_dplane_ctx
*dplane_ctx_dequeue(struct dplane_ctx_q
*q
)
339 struct zebra_dplane_ctx
*ctx
= TAILQ_FIRST(q
);
342 TAILQ_REMOVE(q
, ctx
, zd_q_entries
);
348 * Accessors for information from the context object
350 enum zebra_dplane_result
dplane_ctx_get_status(
351 const struct zebra_dplane_ctx
*ctx
)
353 DPLANE_CTX_VALID(ctx
);
355 return ctx
->zd_status
;
358 void dplane_ctx_set_status(struct zebra_dplane_ctx
*ctx
,
359 enum zebra_dplane_result status
)
361 DPLANE_CTX_VALID(ctx
);
363 ctx
->zd_status
= status
;
366 /* Retrieve last/current provider id */
367 uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx
*ctx
)
369 DPLANE_CTX_VALID(ctx
);
370 return ctx
->zd_provider
;
373 /* Providers run before the kernel can control whether a kernel
374 * update should be done.
376 void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx
*ctx
)
378 DPLANE_CTX_VALID(ctx
);
380 SET_FLAG(ctx
->zd_flags
, DPLANE_CTX_FLAG_NO_KERNEL
);
383 bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx
*ctx
)
385 DPLANE_CTX_VALID(ctx
);
387 return CHECK_FLAG(ctx
->zd_flags
, DPLANE_CTX_FLAG_NO_KERNEL
);
390 enum dplane_op_e
dplane_ctx_get_op(const struct zebra_dplane_ctx
*ctx
)
392 DPLANE_CTX_VALID(ctx
);
397 const char *dplane_op2str(enum dplane_op_e op
)
399 const char *ret
= "UNKNOWN";
407 case DPLANE_OP_ROUTE_INSTALL
:
408 ret
= "ROUTE_INSTALL";
410 case DPLANE_OP_ROUTE_UPDATE
:
411 ret
= "ROUTE_UPDATE";
413 case DPLANE_OP_ROUTE_DELETE
:
414 ret
= "ROUTE_DELETE";
422 const char *dplane_res2str(enum zebra_dplane_result res
)
424 const char *ret
= "<Unknown>";
427 case ZEBRA_DPLANE_REQUEST_FAILURE
:
430 case ZEBRA_DPLANE_REQUEST_QUEUED
:
433 case ZEBRA_DPLANE_REQUEST_SUCCESS
:
441 const struct prefix
*dplane_ctx_get_dest(const struct zebra_dplane_ctx
*ctx
)
443 DPLANE_CTX_VALID(ctx
);
445 return &(ctx
->zd_dest
);
448 /* Source prefix is a little special - return NULL for "no src prefix" */
449 const struct prefix
*dplane_ctx_get_src(const struct zebra_dplane_ctx
*ctx
)
451 DPLANE_CTX_VALID(ctx
);
453 if (ctx
->zd_src
.prefixlen
== 0 &&
454 IN6_IS_ADDR_UNSPECIFIED(&(ctx
->zd_src
.u
.prefix6
))) {
457 return &(ctx
->zd_src
);
461 bool dplane_ctx_is_update(const struct zebra_dplane_ctx
*ctx
)
463 DPLANE_CTX_VALID(ctx
);
465 return ctx
->zd_is_update
;
468 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx
*ctx
)
470 DPLANE_CTX_VALID(ctx
);
475 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx
*ctx
)
477 DPLANE_CTX_VALID(ctx
);
479 return ctx
->zd_old_seq
;
482 vrf_id_t
dplane_ctx_get_vrf(const struct zebra_dplane_ctx
*ctx
)
484 DPLANE_CTX_VALID(ctx
);
486 return ctx
->zd_vrf_id
;
489 int dplane_ctx_get_type(const struct zebra_dplane_ctx
*ctx
)
491 DPLANE_CTX_VALID(ctx
);
496 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx
*ctx
)
498 DPLANE_CTX_VALID(ctx
);
500 return ctx
->zd_old_type
;
503 afi_t
dplane_ctx_get_afi(const struct zebra_dplane_ctx
*ctx
)
505 DPLANE_CTX_VALID(ctx
);
510 safi_t
dplane_ctx_get_safi(const struct zebra_dplane_ctx
*ctx
)
512 DPLANE_CTX_VALID(ctx
);
517 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx
*ctx
)
519 DPLANE_CTX_VALID(ctx
);
521 return ctx
->zd_table_id
;
524 route_tag_t
dplane_ctx_get_tag(const struct zebra_dplane_ctx
*ctx
)
526 DPLANE_CTX_VALID(ctx
);
531 route_tag_t
dplane_ctx_get_old_tag(const struct zebra_dplane_ctx
*ctx
)
533 DPLANE_CTX_VALID(ctx
);
535 return ctx
->zd_old_tag
;
538 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx
*ctx
)
540 DPLANE_CTX_VALID(ctx
);
542 return ctx
->zd_instance
;
545 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx
*ctx
)
547 DPLANE_CTX_VALID(ctx
);
549 return ctx
->zd_old_instance
;
552 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx
*ctx
)
554 DPLANE_CTX_VALID(ctx
);
556 return ctx
->zd_metric
;
559 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx
*ctx
)
561 DPLANE_CTX_VALID(ctx
);
563 return ctx
->zd_old_metric
;
566 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx
*ctx
)
568 DPLANE_CTX_VALID(ctx
);
573 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx
*ctx
)
575 DPLANE_CTX_VALID(ctx
);
577 return ctx
->zd_nexthop_mtu
;
580 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx
*ctx
)
582 DPLANE_CTX_VALID(ctx
);
584 return ctx
->zd_distance
;
587 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx
*ctx
)
589 DPLANE_CTX_VALID(ctx
);
591 return ctx
->zd_old_distance
;
594 const struct nexthop_group
*dplane_ctx_get_ng(
595 const struct zebra_dplane_ctx
*ctx
)
597 DPLANE_CTX_VALID(ctx
);
599 return &(ctx
->zd_ng
);
602 const struct nexthop_group
*dplane_ctx_get_old_ng(
603 const struct zebra_dplane_ctx
*ctx
)
605 DPLANE_CTX_VALID(ctx
);
607 return &(ctx
->zd_old_ng
);
610 const struct zebra_dplane_info
*dplane_ctx_get_ns(
611 const struct zebra_dplane_ctx
*ctx
)
613 DPLANE_CTX_VALID(ctx
);
615 return &(ctx
->zd_ns_info
);
619 * End of dplane context accessors
624 * Retrieve the limit on the number of pending, unprocessed updates.
626 uint32_t dplane_get_in_queue_limit(void)
628 return atomic_load_explicit(&zdplane_info
.dg_max_queued_updates
,
629 memory_order_relaxed
);
633 * Configure limit on the number of pending, queued updates.
635 void dplane_set_in_queue_limit(uint32_t limit
, bool set
)
637 /* Reset to default on 'unset' */
639 limit
= DPLANE_DEFAULT_MAX_QUEUED
;
641 atomic_store_explicit(&zdplane_info
.dg_max_queued_updates
, limit
,
642 memory_order_relaxed
);
646 * Retrieve the current queue depth of incoming, unprocessed updates
648 uint32_t dplane_get_in_queue_len(void)
650 return atomic_load_explicit(&zdplane_info
.dg_routes_queued
,
651 memory_order_seq_cst
);
655 * Initialize a context block for a route update from zebra data structs.
657 static int dplane_ctx_route_init(struct zebra_dplane_ctx
*ctx
,
659 struct route_node
*rn
,
660 struct route_entry
*re
)
663 const struct route_table
*table
= NULL
;
664 const rib_table_info_t
*info
;
665 const struct prefix
*p
, *src_p
;
666 struct zebra_ns
*zns
;
667 struct zebra_vrf
*zvrf
;
668 struct nexthop
*nexthop
;
670 if (!ctx
|| !rn
|| !re
)
674 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
676 ctx
->zd_type
= re
->type
;
677 ctx
->zd_old_type
= re
->type
;
679 /* Prefixes: dest, and optional source */
680 srcdest_rnode_prefixes(rn
, &p
, &src_p
);
682 prefix_copy(&(ctx
->zd_dest
), p
);
685 prefix_copy(&(ctx
->zd_src
), src_p
);
687 memset(&(ctx
->zd_src
), 0, sizeof(ctx
->zd_src
));
689 ctx
->zd_table_id
= re
->table
;
691 ctx
->zd_metric
= re
->metric
;
692 ctx
->zd_old_metric
= re
->metric
;
693 ctx
->zd_vrf_id
= re
->vrf_id
;
694 ctx
->zd_mtu
= re
->mtu
;
695 ctx
->zd_nexthop_mtu
= re
->nexthop_mtu
;
696 ctx
->zd_instance
= re
->instance
;
697 ctx
->zd_tag
= re
->tag
;
698 ctx
->zd_old_tag
= re
->tag
;
699 ctx
->zd_distance
= re
->distance
;
701 table
= srcdest_rnode_table(rn
);
704 ctx
->zd_afi
= info
->afi
;
705 ctx
->zd_safi
= info
->safi
;
707 /* Extract ns info - can't use pointers to 'core' structs */
708 zvrf
= vrf_info_lookup(re
->vrf_id
);
711 /* Internal copy helper */
712 dplane_info_from_zns(&(ctx
->zd_ns_info
), zns
);
714 #if defined(HAVE_NETLINK)
715 /* Increment message counter after copying to context struct - may need
716 * two messages in some 'update' cases.
718 if (op
== DPLANE_OP_ROUTE_UPDATE
)
719 zns
->netlink_dplane
.seq
+= 2;
721 zns
->netlink_dplane
.seq
++;
724 /* Copy nexthops; recursive info is included too */
725 copy_nexthops(&(ctx
->zd_ng
.nexthop
), re
->ng
.nexthop
, NULL
);
727 /* TODO -- maybe use array of nexthops to avoid allocs? */
729 /* Ensure that the dplane's nexthops flags are clear. */
730 for (ALL_NEXTHOPS(ctx
->zd_ng
, nexthop
))
731 UNSET_FLAG(nexthop
->flags
, NEXTHOP_FLAG_FIB
);
733 /* Trying out the sequence number idea, so we can try to detect
734 * when a result is stale.
736 re
->dplane_sequence
++;
737 ctx
->zd_seq
= re
->dplane_sequence
;
746 * Enqueue a new route update,
747 * and ensure an event is active for the dataplane thread.
749 static int dplane_route_enqueue(struct zebra_dplane_ctx
*ctx
)
754 /* Enqueue for processing by the dataplane thread */
757 TAILQ_INSERT_TAIL(&zdplane_info
.dg_route_ctx_q
, ctx
,
762 curr
= atomic_add_fetch_explicit(
764 /* TODO -- issue with the clang atomic/intrinsics currently;
765 * casting away the 'Atomic'-ness of the variable works.
767 (uint32_t *)&(zdplane_info
.dg_routes_queued
),
769 &(zdplane_info
.dg_routes_queued
),
771 1, memory_order_seq_cst
);
773 /* Maybe update high-water counter also */
774 high
= atomic_load_explicit(&zdplane_info
.dg_routes_queued_max
,
775 memory_order_seq_cst
);
776 while (high
< curr
) {
777 if (atomic_compare_exchange_weak_explicit(
778 &zdplane_info
.dg_routes_queued_max
,
780 memory_order_seq_cst
,
781 memory_order_seq_cst
))
785 /* Ensure that an event for the dataplane thread is active */
786 ret
= dplane_provider_work_ready();
792 * Utility that prepares a route update and enqueues it for processing
794 static enum zebra_dplane_result
795 dplane_route_update_internal(struct route_node
*rn
,
796 struct route_entry
*re
,
797 struct route_entry
*old_re
,
800 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
802 struct zebra_dplane_ctx
*ctx
= NULL
;
804 /* Obtain context block */
805 ctx
= dplane_ctx_alloc();
811 /* Init context with info from zebra data structs */
812 ret
= dplane_ctx_route_init(ctx
, op
, rn
, re
);
814 /* Capture some extra info for update case
815 * where there's a different 'old' route.
817 if ((op
== DPLANE_OP_ROUTE_UPDATE
) &&
818 old_re
&& (old_re
!= re
)) {
819 ctx
->zd_is_update
= true;
821 old_re
->dplane_sequence
++;
822 ctx
->zd_old_seq
= old_re
->dplane_sequence
;
824 ctx
->zd_old_tag
= old_re
->tag
;
825 ctx
->zd_old_type
= old_re
->type
;
826 ctx
->zd_old_instance
= old_re
->instance
;
827 ctx
->zd_old_distance
= old_re
->distance
;
828 ctx
->zd_old_metric
= old_re
->metric
;
831 /* For bsd, capture previous re's nexthops too, sigh.
832 * We'll need these to do per-nexthop deletes.
834 copy_nexthops(&(ctx
->zd_old_ng
.nexthop
),
835 old_re
->ng
.nexthop
, NULL
);
836 #endif /* !HAVE_NETLINK */
839 /* Enqueue context for processing */
840 ret
= dplane_route_enqueue(ctx
);
845 atomic_fetch_add_explicit(&zdplane_info
.dg_routes_in
, 1,
846 memory_order_relaxed
);
849 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
851 atomic_fetch_add_explicit(&zdplane_info
.dg_route_errors
, 1,
852 memory_order_relaxed
);
853 dplane_ctx_free(&ctx
);
860 * Enqueue a route 'add' for the dataplane.
862 enum zebra_dplane_result
dplane_route_add(struct route_node
*rn
,
863 struct route_entry
*re
)
865 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
867 if (rn
== NULL
|| re
== NULL
)
870 ret
= dplane_route_update_internal(rn
, re
, NULL
,
871 DPLANE_OP_ROUTE_INSTALL
);
878 * Enqueue a route update for the dataplane.
880 enum zebra_dplane_result
dplane_route_update(struct route_node
*rn
,
881 struct route_entry
*re
,
882 struct route_entry
*old_re
)
884 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
886 if (rn
== NULL
|| re
== NULL
)
889 ret
= dplane_route_update_internal(rn
, re
, old_re
,
890 DPLANE_OP_ROUTE_UPDATE
);
896 * Enqueue a route removal for the dataplane.
898 enum zebra_dplane_result
dplane_route_delete(struct route_node
*rn
,
899 struct route_entry
*re
)
901 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
903 if (rn
== NULL
|| re
== NULL
)
906 ret
= dplane_route_update_internal(rn
, re
, NULL
,
907 DPLANE_OP_ROUTE_DELETE
);
914 * Handler for 'show dplane'
916 int dplane_show_helper(struct vty
*vty
, bool detailed
)
918 uint64_t queued
, queue_max
, limit
, errs
, incoming
, yields
;
920 /* Using atomics because counters are being changed in different
923 incoming
= atomic_load_explicit(&zdplane_info
.dg_routes_in
,
924 memory_order_relaxed
);
925 limit
= atomic_load_explicit(&zdplane_info
.dg_max_queued_updates
,
926 memory_order_relaxed
);
927 queued
= atomic_load_explicit(&zdplane_info
.dg_routes_queued
,
928 memory_order_relaxed
);
929 queue_max
= atomic_load_explicit(&zdplane_info
.dg_routes_queued_max
,
930 memory_order_relaxed
);
931 errs
= atomic_load_explicit(&zdplane_info
.dg_route_errors
,
932 memory_order_relaxed
);
933 yields
= atomic_load_explicit(&zdplane_info
.dg_update_yields
,
934 memory_order_relaxed
);
936 vty_out(vty
, "Zebra dataplane:\nRoute updates: %"PRIu64
"\n",
938 vty_out(vty
, "Route update errors: %"PRIu64
"\n", errs
);
939 vty_out(vty
, "Route update queue limit: %"PRIu64
"\n", limit
);
940 vty_out(vty
, "Route update queue depth: %"PRIu64
"\n", queued
);
941 vty_out(vty
, "Route update queue max: %"PRIu64
"\n", queue_max
);
942 vty_out(vty
, "Route update yields: %"PRIu64
"\n", yields
);
948 * Handler for 'show dplane providers'
950 int dplane_show_provs_helper(struct vty
*vty
, bool detailed
)
952 struct zebra_dplane_provider
*prov
;
953 uint64_t in
, in_max
, out
, out_max
;
955 vty_out(vty
, "Zebra dataplane providers:\n");
958 prov
= TAILQ_FIRST(&zdplane_info
.dg_providers_q
);
961 /* Show counters, useful info from each registered provider */
964 in
= atomic_load_explicit(&prov
->dp_in_counter
,
965 memory_order_relaxed
);
966 in_max
= atomic_load_explicit(&prov
->dp_in_max
,
967 memory_order_relaxed
);
968 out
= atomic_load_explicit(&prov
->dp_out_counter
,
969 memory_order_relaxed
);
970 out_max
= atomic_load_explicit(&prov
->dp_out_max
,
971 memory_order_relaxed
);
973 vty_out(vty
, "%s (%u): in: %"PRIu64
", q_max: %"PRIu64
", "
974 "out: %"PRIu64
", q_max: %"PRIu64
"\n",
975 prov
->dp_name
, prov
->dp_id
, in
, in_max
, out
, out_max
);
978 prov
= TAILQ_NEXT(prov
, dp_prov_link
);
986 * Provider registration
988 int dplane_provider_register(const char *name
,
989 enum dplane_provider_prio prio
,
991 int (*fp
)(struct zebra_dplane_provider
*),
992 int (*fini_fp
)(struct zebra_dplane_provider
*,
995 struct zebra_dplane_provider
**prov_p
)
998 struct zebra_dplane_provider
*p
, *last
;
1006 if (prio
<= DPLANE_PRIO_NONE
||
1007 prio
> DPLANE_PRIO_LAST
) {
1012 /* Allocate and init new provider struct */
1013 p
= XCALLOC(MTYPE_DP_PROV
, sizeof(struct zebra_dplane_provider
));
1019 pthread_mutex_init(&(p
->dp_mutex
), NULL
);
1020 TAILQ_INIT(&(p
->dp_ctx_in_q
));
1021 TAILQ_INIT(&(p
->dp_ctx_out_q
));
1023 p
->dp_priority
= prio
;
1025 p
->dp_fini
= fini_fp
;
1028 /* Lock - the dplane pthread may be running */
1031 p
->dp_id
= ++zdplane_info
.dg_provider_id
;
1034 strlcpy(p
->dp_name
, name
, DPLANE_PROVIDER_NAMELEN
);
1036 snprintf(p
->dp_name
, DPLANE_PROVIDER_NAMELEN
,
1037 "provider-%u", p
->dp_id
);
1039 /* Insert into list ordered by priority */
1040 TAILQ_FOREACH(last
, &zdplane_info
.dg_providers_q
, dp_prov_link
) {
1041 if (last
->dp_priority
> p
->dp_priority
)
1046 TAILQ_INSERT_BEFORE(last
, p
, dp_prov_link
);
1048 TAILQ_INSERT_TAIL(&zdplane_info
.dg_providers_q
, p
,
1054 if (IS_ZEBRA_DEBUG_DPLANE
)
1055 zlog_debug("dplane: registered new provider '%s' (%u), prio %d",
1056 p
->dp_name
, p
->dp_id
, p
->dp_priority
);
1065 /* Accessors for provider attributes */
1066 const char *dplane_provider_get_name(const struct zebra_dplane_provider
*prov
)
1068 return prov
->dp_name
;
1071 uint32_t dplane_provider_get_id(const struct zebra_dplane_provider
*prov
)
1076 void *dplane_provider_get_data(const struct zebra_dplane_provider
*prov
)
1078 return prov
->dp_data
;
1081 int dplane_provider_get_work_limit(const struct zebra_dplane_provider
*prov
)
1083 return zdplane_info
.dg_updates_per_cycle
;
1086 /* Lock/unlock a provider's mutex - iff the provider was registered with
1087 * the THREADED flag.
1089 void dplane_provider_lock(struct zebra_dplane_provider
*prov
)
1091 if (dplane_provider_is_threaded(prov
))
1092 DPLANE_PROV_LOCK(prov
);
1095 void dplane_provider_unlock(struct zebra_dplane_provider
*prov
)
1097 if (dplane_provider_is_threaded(prov
))
1098 DPLANE_PROV_UNLOCK(prov
);
1102 * Dequeue and maintain associated counter
1104 struct zebra_dplane_ctx
*dplane_provider_dequeue_in_ctx(
1105 struct zebra_dplane_provider
*prov
)
1107 struct zebra_dplane_ctx
*ctx
= NULL
;
1109 dplane_provider_lock(prov
);
1111 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_in_q
));
1113 TAILQ_REMOVE(&(prov
->dp_ctx_in_q
), ctx
, zd_q_entries
);
1115 atomic_fetch_sub_explicit(&prov
->dp_in_queued
, 1,
1116 memory_order_relaxed
);
1119 dplane_provider_unlock(prov
);
1125 * Dequeue work to a list, return count
1127 int dplane_provider_dequeue_in_list(struct zebra_dplane_provider
*prov
,
1128 struct dplane_ctx_q
*listp
)
1131 struct zebra_dplane_ctx
*ctx
;
1133 limit
= zdplane_info
.dg_updates_per_cycle
;
1135 dplane_provider_lock(prov
);
1137 for (ret
= 0; ret
< limit
; ret
++) {
1138 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_in_q
));
1140 TAILQ_REMOVE(&(prov
->dp_ctx_in_q
), ctx
, zd_q_entries
);
1142 TAILQ_INSERT_TAIL(listp
, ctx
, zd_q_entries
);
1149 atomic_fetch_sub_explicit(&prov
->dp_in_queued
, ret
,
1150 memory_order_relaxed
);
1152 dplane_provider_unlock(prov
);
1158 * Enqueue and maintain associated counter
1160 void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider
*prov
,
1161 struct zebra_dplane_ctx
*ctx
)
1163 dplane_provider_lock(prov
);
1165 TAILQ_INSERT_TAIL(&(prov
->dp_ctx_out_q
), ctx
,
1168 dplane_provider_unlock(prov
);
1170 atomic_fetch_add_explicit(&(prov
->dp_out_counter
), 1,
1171 memory_order_relaxed
);
1175 * Accessor for provider object
1177 bool dplane_provider_is_threaded(const struct zebra_dplane_provider
*prov
)
1179 return (prov
->dp_flags
& DPLANE_PROV_FLAG_THREADED
);
1183 * Internal helper that copies information from a zebra ns object; this is
1184 * called in the zebra main pthread context as part of dplane ctx init.
1186 static void dplane_info_from_zns(struct zebra_dplane_info
*ns_info
,
1187 struct zebra_ns
*zns
)
1189 ns_info
->ns_id
= zns
->ns_id
;
1191 #if defined(HAVE_NETLINK)
1192 ns_info
->is_cmd
= true;
1193 ns_info
->nls
= zns
->netlink_dplane
;
1194 #endif /* NETLINK */
1198 * Provider api to signal that work/events are available
1199 * for the dataplane pthread.
1201 int dplane_provider_work_ready(void)
1203 /* Note that during zebra startup, we may be offered work before
1204 * the dataplane pthread (and thread-master) are ready. We want to
1205 * enqueue the work, but the event-scheduling machinery may not be
1208 if (zdplane_info
.dg_run
) {
1209 thread_add_event(zdplane_info
.dg_master
,
1210 dplane_thread_loop
, NULL
, 0,
1211 &zdplane_info
.dg_t_update
);
1218 * Kernel dataplane provider
1222 * Kernel provider callback
1224 static int kernel_dplane_process_func(struct zebra_dplane_provider
*prov
)
1226 enum zebra_dplane_result res
;
1227 struct zebra_dplane_ctx
*ctx
;
1230 limit
= dplane_provider_get_work_limit(prov
);
1232 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
1233 zlog_debug("dplane provider '%s': processing",
1234 dplane_provider_get_name(prov
));
1236 for (counter
= 0; counter
< limit
; counter
++) {
1238 ctx
= dplane_provider_dequeue_in_ctx(prov
);
1242 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
) {
1243 char dest_str
[PREFIX_STRLEN
];
1245 prefix2str(dplane_ctx_get_dest(ctx
),
1246 dest_str
, sizeof(dest_str
));
1248 zlog_debug("%u:%s Dplane route update ctx %p op %s",
1249 dplane_ctx_get_vrf(ctx
), dest_str
,
1250 ctx
, dplane_op2str(dplane_ctx_get_op(ctx
)));
1253 /* Call into the synchronous kernel-facing code here */
1254 res
= kernel_route_update(ctx
);
1256 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
1257 atomic_fetch_add_explicit(
1258 &zdplane_info
.dg_route_errors
, 1,
1259 memory_order_relaxed
);
1261 dplane_ctx_set_status(ctx
, res
);
1263 dplane_provider_enqueue_out_ctx(prov
, ctx
);
1266 /* Ensure that we'll run the work loop again if there's still
1269 if (counter
>= limit
) {
1270 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
1271 zlog_debug("dplane provider '%s' reached max updates %d",
1272 dplane_provider_get_name(prov
), counter
);
1274 atomic_fetch_add_explicit(&zdplane_info
.dg_update_yields
,
1275 1, memory_order_relaxed
);
1277 dplane_provider_work_ready();
1283 #if DPLANE_TEST_PROVIDER
1286 * Test dataplane provider plugin
1290 * Test provider process callback
1292 static int test_dplane_process_func(struct zebra_dplane_provider
*prov
)
1294 struct zebra_dplane_ctx
*ctx
;
1297 /* Just moving from 'in' queue to 'out' queue */
1299 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
1300 zlog_debug("dplane provider '%s': processing",
1301 dplane_provider_get_name(prov
));
1303 limit
= dplane_provider_get_work_limit(prov
);
1305 for (counter
= 0; counter
< limit
; counter
++) {
1307 ctx
= dplane_provider_dequeue_in_ctx(prov
);
1311 dplane_ctx_set_status(ctx
, ZEBRA_DPLANE_REQUEST_SUCCESS
);
1313 dplane_provider_enqueue_out_ctx(prov
, ctx
);
1316 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
1317 zlog_debug("dplane provider '%s': processed %d",
1318 dplane_provider_get_name(prov
), counter
);
1320 /* Ensure that we'll run the work loop again if there's still
1323 if (counter
>= limit
)
1324 dplane_provider_work_ready();
1330 * Test provider shutdown/fini callback
1332 static int test_dplane_shutdown_func(struct zebra_dplane_provider
*prov
,
1335 if (IS_ZEBRA_DEBUG_DPLANE
)
1336 zlog_debug("dplane provider '%s': %sshutdown",
1337 dplane_provider_get_name(prov
),
1338 early
? "early " : "");
1342 #endif /* DPLANE_TEST_PROVIDER */
1345 * Register default kernel provider
1347 static void dplane_provider_init(void)
1351 ret
= dplane_provider_register("Kernel",
1353 DPLANE_PROV_FLAGS_DEFAULT
,
1354 kernel_dplane_process_func
,
1359 zlog_err("Unable to register kernel dplane provider: %d",
1362 #if DPLANE_TEST_PROVIDER
1363 /* Optional test provider ... */
1364 ret
= dplane_provider_register("Test",
1365 DPLANE_PRIO_PRE_KERNEL
,
1366 DPLANE_PROV_FLAGS_DEFAULT
,
1367 test_dplane_process_func
,
1368 test_dplane_shutdown_func
,
1369 NULL
/* data */, NULL
);
1372 zlog_err("Unable to register test dplane provider: %d",
1374 #endif /* DPLANE_TEST_PROVIDER */
1377 /* Indicates zebra shutdown/exit is in progress. Some operations may be
1378 * simplified or skipped during shutdown processing.
1380 bool dplane_is_in_shutdown(void)
1382 return zdplane_info
.dg_is_shutdown
;
1386 * Early or pre-shutdown, de-init notification api. This runs pretty
1387 * early during zebra shutdown, as a signal to stop new work and prepare
1388 * for updates generated by shutdown/cleanup activity, as zebra tries to
1389 * remove everything it's responsible for.
1390 * NB: This runs in the main zebra pthread context.
1392 void zebra_dplane_pre_finish(void)
1394 if (IS_ZEBRA_DEBUG_DPLANE
)
1395 zlog_debug("Zebra dataplane pre-fini called");
1397 zdplane_info
.dg_is_shutdown
= true;
1399 /* TODO -- Notify provider(s) of pending shutdown */
1403 * Utility to determine whether work remains enqueued within the dplane;
1404 * used during system shutdown processing.
1406 static bool dplane_work_pending(void)
1409 struct zebra_dplane_ctx
*ctx
;
1410 struct zebra_dplane_provider
*prov
;
1412 /* TODO -- just checking incoming/pending work for now, must check
1417 ctx
= TAILQ_FIRST(&zdplane_info
.dg_route_ctx_q
);
1418 prov
= TAILQ_FIRST(&zdplane_info
.dg_providers_q
);
1429 dplane_provider_lock(prov
);
1431 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_in_q
));
1433 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_out_q
));
1435 dplane_provider_unlock(prov
);
1441 prov
= TAILQ_NEXT(prov
, dp_prov_link
);
1453 * Shutdown-time intermediate callback, used to determine when all pending
1454 * in-flight updates are done. If there's still work to do, reschedules itself.
1455 * If all work is done, schedules an event to the main zebra thread for
1456 * final zebra shutdown.
1457 * This runs in the dplane pthread context.
1459 static int dplane_check_shutdown_status(struct thread
*event
)
1461 if (IS_ZEBRA_DEBUG_DPLANE
)
1462 zlog_debug("Zebra dataplane shutdown status check called");
1464 if (dplane_work_pending()) {
1465 /* Reschedule dplane check on a short timer */
1466 thread_add_timer_msec(zdplane_info
.dg_master
,
1467 dplane_check_shutdown_status
,
1469 &zdplane_info
.dg_t_shutdown_check
);
1471 /* TODO - give up and stop waiting after a short time? */
1474 /* We appear to be done - schedule a final callback event
1475 * for the zebra main pthread.
1477 thread_add_event(zebrad
.master
, zebra_finalize
, NULL
, 0, NULL
);
1484 * Shutdown, de-init api. This runs pretty late during shutdown,
1485 * after zebra has tried to free/remove/uninstall all routes during shutdown.
1486 * At this point, dplane work may still remain to be done, so we can't just
1487 * blindly terminate. If there's still work to do, we'll periodically check
1488 * and when done, we'll enqueue a task to the zebra main thread for final
1489 * termination processing.
1491 * NB: This runs in the main zebra thread context.
1493 void zebra_dplane_finish(void)
1495 if (IS_ZEBRA_DEBUG_DPLANE
)
1496 zlog_debug("Zebra dataplane fini called");
1498 thread_add_event(zdplane_info
.dg_master
,
1499 dplane_check_shutdown_status
, NULL
, 0,
1500 &zdplane_info
.dg_t_shutdown_check
);
1504 * Main dataplane pthread event loop. The thread takes new incoming work
1505 * and offers it to the first provider. It then iterates through the
1506 * providers, taking complete work from each one and offering it
1507 * to the next in order. At each step, a limited number of updates are
1508 * processed during a cycle in order to provide some fairness.
1510 * This loop through the providers is only run once, so that the dataplane
1511 * pthread can look for other pending work - such as i/o work on behalf of
1514 static int dplane_thread_loop(struct thread
*event
)
1516 struct dplane_ctx_q work_list
;
1517 struct dplane_ctx_q error_list
;
1518 struct zebra_dplane_provider
*prov
;
1519 struct zebra_dplane_ctx
*ctx
, *tctx
;
1520 int limit
, counter
, error_counter
;
1521 uint64_t curr
, high
;
1523 /* Capture work limit per cycle */
1524 limit
= zdplane_info
.dg_updates_per_cycle
;
1526 /* Init temporary lists used to move contexts among providers */
1527 TAILQ_INIT(&work_list
);
1528 TAILQ_INIT(&error_list
);
1531 /* Check for zebra shutdown */
1532 if (!zdplane_info
.dg_run
)
1535 /* Dequeue some incoming work from zebra (if any) onto the temporary
1540 /* Locate initial registered provider */
1541 prov
= TAILQ_FIRST(&zdplane_info
.dg_providers_q
);
1543 /* Move new work from incoming list to temp list */
1544 for (counter
= 0; counter
< limit
; counter
++) {
1545 ctx
= TAILQ_FIRST(&zdplane_info
.dg_route_ctx_q
);
1547 TAILQ_REMOVE(&zdplane_info
.dg_route_ctx_q
, ctx
,
1550 ctx
->zd_provider
= prov
->dp_id
;
1552 TAILQ_INSERT_TAIL(&work_list
, ctx
, zd_q_entries
);
1560 atomic_fetch_sub_explicit(&zdplane_info
.dg_routes_queued
, counter
,
1561 memory_order_relaxed
);
1563 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
1564 zlog_debug("dplane: incoming new work counter: %d", counter
);
1566 /* Iterate through the registered providers, offering new incoming
1567 * work. If the provider has outgoing work in its queue, take that
1568 * work for the next provider
1572 /* At each iteration, the temporary work list has 'counter'
1575 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
1576 zlog_debug("dplane enqueues %d new work to provider '%s'",
1577 counter
, dplane_provider_get_name(prov
));
1579 /* Capture current provider id in each context; check for
1582 TAILQ_FOREACH_SAFE(ctx
, &work_list
, zd_q_entries
, tctx
) {
1583 if (dplane_ctx_get_status(ctx
) ==
1584 ZEBRA_DPLANE_REQUEST_SUCCESS
) {
1585 ctx
->zd_provider
= prov
->dp_id
;
1588 * TODO -- improve error-handling: recirc
1589 * errors backwards so that providers can
1590 * 'undo' their work (if they want to)
1593 /* Move to error list; will be returned
1596 TAILQ_REMOVE(&work_list
, ctx
, zd_q_entries
);
1597 TAILQ_INSERT_TAIL(&error_list
,
1603 /* Enqueue new work to the provider */
1604 dplane_provider_lock(prov
);
1606 if (TAILQ_FIRST(&work_list
))
1607 TAILQ_CONCAT(&(prov
->dp_ctx_in_q
), &work_list
,
1610 atomic_fetch_add_explicit(&prov
->dp_in_counter
, counter
,
1611 memory_order_relaxed
);
1612 atomic_fetch_add_explicit(&prov
->dp_in_queued
, counter
,
1613 memory_order_relaxed
);
1614 curr
= atomic_load_explicit(&prov
->dp_in_queued
,
1615 memory_order_relaxed
);
1616 high
= atomic_load_explicit(&prov
->dp_in_max
,
1617 memory_order_relaxed
);
1619 atomic_store_explicit(&prov
->dp_in_max
, curr
,
1620 memory_order_relaxed
);
1622 dplane_provider_unlock(prov
);
1624 /* Reset the temp list (though the 'concat' may have done this
1625 * already), and the counter
1627 TAILQ_INIT(&work_list
);
1630 /* Call into the provider code. Note that this is
1631 * unconditional: we offer to do work even if we don't enqueue
1634 (*prov
->dp_fp
)(prov
);
1636 /* Check for zebra shutdown */
1637 if (!zdplane_info
.dg_run
)
1640 /* Dequeue completed work from the provider */
1641 dplane_provider_lock(prov
);
1643 while (counter
< limit
) {
1644 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_out_q
));
1646 TAILQ_REMOVE(&(prov
->dp_ctx_out_q
), ctx
,
1649 TAILQ_INSERT_TAIL(&work_list
,
1656 dplane_provider_unlock(prov
);
1658 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
1659 zlog_debug("dplane dequeues %d completed work from provider %s",
1660 counter
, dplane_provider_get_name(prov
));
1662 /* Locate next provider */
1664 prov
= TAILQ_NEXT(prov
, dp_prov_link
);
1668 /* After all providers have been serviced, enqueue any completed
1669 * work and any errors back to zebra so it can process the results.
1671 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
1672 zlog_debug("dplane has %d completed, %d errors, for zebra main",
1673 counter
, error_counter
);
1676 * Hand lists through the api to zebra main,
1677 * to reduce the number of lock/unlock cycles
1680 /* Call through to zebra main */
1681 (zdplane_info
.dg_results_cb
)(&error_list
);
1683 TAILQ_INIT(&error_list
);
1686 /* Call through to zebra main */
1687 (zdplane_info
.dg_results_cb
)(&work_list
);
1689 TAILQ_INIT(&work_list
);
1696 * Final phase of shutdown, after all work enqueued to dplane has been
1697 * processed. This is called from the zebra main pthread context.
1699 void zebra_dplane_shutdown(void)
1701 if (IS_ZEBRA_DEBUG_DPLANE
)
1702 zlog_debug("Zebra dataplane shutdown called");
1704 /* Stop dplane thread, if it's running */
1706 zdplane_info
.dg_run
= false;
1708 THREAD_OFF(zdplane_info
.dg_t_update
);
1710 frr_pthread_stop(zdplane_info
.dg_pthread
, NULL
);
1712 /* Destroy pthread */
1713 frr_pthread_destroy(zdplane_info
.dg_pthread
);
1714 zdplane_info
.dg_pthread
= NULL
;
1715 zdplane_info
.dg_master
= NULL
;
1717 /* TODO -- Notify provider(s) of final shutdown */
1719 /* TODO -- Clean-up provider objects */
1721 /* TODO -- Clean queue(s), free memory */
1725 * Initialize the dataplane module during startup, internal/private version
1727 static void zebra_dplane_init_internal(struct zebra_t
*zebra
)
1729 memset(&zdplane_info
, 0, sizeof(zdplane_info
));
1731 pthread_mutex_init(&zdplane_info
.dg_mutex
, NULL
);
1733 TAILQ_INIT(&zdplane_info
.dg_route_ctx_q
);
1734 TAILQ_INIT(&zdplane_info
.dg_providers_q
);
1736 zdplane_info
.dg_updates_per_cycle
= DPLANE_DEFAULT_NEW_WORK
;
1738 zdplane_info
.dg_max_queued_updates
= DPLANE_DEFAULT_MAX_QUEUED
;
1740 /* Register default kernel 'provider' during init */
1741 dplane_provider_init();
1745 * Start the dataplane pthread. This step needs to be run later than the
1746 * 'init' step, in case zebra has fork-ed.
1748 void zebra_dplane_start(void)
1750 /* Start dataplane pthread */
1752 struct frr_pthread_attr pattr
= {
1753 .start
= frr_pthread_attr_default
.start
,
1754 .stop
= frr_pthread_attr_default
.stop
1757 zdplane_info
.dg_pthread
= frr_pthread_new(&pattr
, "Zebra dplane thread",
1760 zdplane_info
.dg_master
= zdplane_info
.dg_pthread
->master
;
1762 zdplane_info
.dg_run
= true;
1764 /* Enqueue an initial event for the dataplane pthread */
1765 thread_add_event(zdplane_info
.dg_master
, dplane_thread_loop
, NULL
, 0,
1766 &zdplane_info
.dg_t_update
);
1768 frr_pthread_run(zdplane_info
.dg_pthread
, NULL
);
1772 * Initialize the dataplane module at startup; called by zebra rib_init()
1774 void zebra_dplane_init(int (*results_fp
)(struct dplane_ctx_q
*))
1776 zebra_dplane_init_internal(&zebrad
);
1777 zdplane_info
.dg_results_cb
= results_fp
;