2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "lib/libfrr.h"
21 #include "lib/debug.h"
22 #include "lib/frratomic.h"
23 #include "lib/frr_pthread.h"
24 #include "lib/memory.h"
25 #include "lib/queue.h"
26 #include "lib/zebra.h"
27 #include "zebra/zebra_memory.h"
28 #include "zebra/zserv.h"
29 #include "zebra/zebra_dplane.h"
31 #include "zebra/debug.h"
33 /* Memory type for context blocks */
34 DEFINE_MTYPE(ZEBRA
, DP_CTX
, "Zebra DPlane Ctx")
35 DEFINE_MTYPE(ZEBRA
, DP_PROV
, "Zebra DPlane Provider")
41 /* Enable test dataplane provider */
42 /*#define DPLANE_TEST_PROVIDER 1 */
44 /* Default value for max queued incoming updates */
45 const uint32_t DPLANE_DEFAULT_MAX_QUEUED
= 200;
47 /* Default value for new work per cycle */
48 const uint32_t DPLANE_DEFAULT_NEW_WORK
= 100;
50 /* Validation check macro for context blocks */
51 /* #define DPLANE_DEBUG 1 */
55 # define DPLANE_CTX_VALID(p) \
60 # define DPLANE_CTX_VALID(p)
62 #endif /* DPLANE_DEBUG */
65 * The context block used to exchange info about route updates across
66 * the boundary between the zebra main context (and pthread) and the
67 * dataplane layer (and pthread).
69 struct zebra_dplane_ctx
{
72 enum dplane_op_e zd_op
;
74 /* Status on return */
75 enum zebra_dplane_result zd_status
;
77 /* Dplane provider id */
80 /* Flags - used by providers, e.g. */
83 /* TODO -- internal/sub-operation status? */
84 enum zebra_dplane_result zd_remote_status
;
85 enum zebra_dplane_result zd_kernel_status
;
87 /* Dest and (optional) source prefixes */
88 struct prefix zd_dest
;
105 route_tag_t zd_old_tag
;
107 uint32_t zd_old_metric
;
108 uint16_t zd_instance
;
109 uint16_t zd_old_instance
;
112 uint8_t zd_old_distance
;
115 uint32_t zd_nexthop_mtu
;
118 struct zebra_dplane_info zd_ns_info
;
121 struct nexthop_group zd_ng
;
123 /* "Previous" nexthops, used only in route updates without netlink */
124 struct nexthop_group zd_old_ng
;
126 /* TODO -- use fixed array of nexthops, to avoid mallocs? */
128 /* Embedded list linkage */
129 TAILQ_ENTRY(zebra_dplane_ctx
) zd_q_entries
;
132 /* Flag that can be set by a pre-kernel provider as a signal that an update
133 * should bypass the kernel.
135 #define DPLANE_CTX_FLAG_NO_KERNEL 0x01
139 * Registration block for one dataplane provider.
141 struct zebra_dplane_provider
{
143 char dp_name
[DPLANE_PROVIDER_NAMELEN
+ 1];
145 /* Priority, for ordering among providers */
152 pthread_mutex_t dp_mutex
;
154 /* Plugin-provided extra data */
160 dplane_provider_process_fp dp_fp
;
162 dplane_provider_fini_fp dp_fini
;
164 _Atomic
uint32_t dp_in_counter
;
165 _Atomic
uint32_t dp_in_queued
;
166 _Atomic
uint32_t dp_in_max
;
167 _Atomic
uint32_t dp_out_counter
;
168 _Atomic
uint32_t dp_out_queued
;
169 _Atomic
uint32_t dp_out_max
;
170 _Atomic
uint32_t dp_error_counter
;
172 /* Queue of contexts inbound to the provider */
173 struct dplane_ctx_q dp_ctx_in_q
;
175 /* Queue of completed contexts outbound from the provider back
176 * towards the dataplane module.
178 struct dplane_ctx_q dp_ctx_out_q
;
180 /* Embedded list linkage for provider objects */
181 TAILQ_ENTRY(zebra_dplane_provider
) dp_prov_link
;
187 static struct zebra_dplane_globals
{
188 /* Mutex to control access to dataplane components */
189 pthread_mutex_t dg_mutex
;
191 /* Results callback registered by zebra 'core' */
192 dplane_results_fp dg_results_cb
;
194 /* Sentinel for beginning of shutdown */
195 volatile bool dg_is_shutdown
;
197 /* Sentinel for end of shutdown */
198 volatile bool dg_run
;
200 /* Route-update context queue inbound to the dataplane */
201 TAILQ_HEAD(zdg_ctx_q
, zebra_dplane_ctx
) dg_route_ctx_q
;
203 /* Ordered list of providers */
204 TAILQ_HEAD(zdg_prov_q
, zebra_dplane_provider
) dg_providers_q
;
206 /* Counter used to assign internal ids to providers */
207 uint32_t dg_provider_id
;
209 /* Limit number of pending, unprocessed updates */
210 _Atomic
uint32_t dg_max_queued_updates
;
212 /* Limit number of new updates dequeued at once, to pace an
215 uint32_t dg_updates_per_cycle
;
217 _Atomic
uint32_t dg_routes_in
;
218 _Atomic
uint32_t dg_routes_queued
;
219 _Atomic
uint32_t dg_routes_queued_max
;
220 _Atomic
uint32_t dg_route_errors
;
221 _Atomic
uint32_t dg_update_yields
;
223 /* Dataplane pthread */
224 struct frr_pthread
*dg_pthread
;
226 /* Event-delivery context 'master' for the dplane */
227 struct thread_master
*dg_master
;
229 /* Event/'thread' pointer for queued updates */
230 struct thread
*dg_t_update
;
232 /* Event pointer for pending shutdown check loop */
233 struct thread
*dg_t_shutdown_check
;
238 * Lock and unlock for interactions with the zebra 'core' pthread
240 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
241 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
245 * Lock and unlock for individual providers
247 #define DPLANE_PROV_LOCK(p) pthread_mutex_lock(&((p)->dp_mutex))
248 #define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
251 static int dplane_thread_loop(struct thread
*event
);
252 static void dplane_info_from_zns(struct zebra_dplane_info
*ns_info
,
253 struct zebra_ns
*zns
);
259 /* Obtain thread_master for dataplane thread */
260 struct thread_master
*dplane_get_thread_master(void)
262 return zdplane_info
.dg_master
;
266 * Allocate a dataplane update context
268 static struct zebra_dplane_ctx
*dplane_ctx_alloc(void)
270 struct zebra_dplane_ctx
*p
;
272 /* TODO -- just alloc'ing memory, but would like to maintain
275 p
= XCALLOC(MTYPE_DP_CTX
, sizeof(struct zebra_dplane_ctx
));
281 * Free a dataplane results context.
283 static void dplane_ctx_free(struct zebra_dplane_ctx
**pctx
)
286 DPLANE_CTX_VALID(*pctx
);
288 /* TODO -- just freeing memory, but would like to maintain
292 /* Free embedded nexthops */
293 if ((*pctx
)->zd_ng
.nexthop
) {
294 /* This deals with recursive nexthops too */
295 nexthops_free((*pctx
)->zd_ng
.nexthop
);
298 if ((*pctx
)->zd_old_ng
.nexthop
) {
299 /* This deals with recursive nexthops too */
300 nexthops_free((*pctx
)->zd_old_ng
.nexthop
);
303 XFREE(MTYPE_DP_CTX
, *pctx
);
309 * Return a context block to the dplane module after processing
311 void dplane_ctx_fini(struct zebra_dplane_ctx
**pctx
)
313 /* TODO -- maintain pool; for now, just free */
314 dplane_ctx_free(pctx
);
317 /* Enqueue a context block */
318 void dplane_ctx_enqueue_tail(struct dplane_ctx_q
*q
,
319 const struct zebra_dplane_ctx
*ctx
)
321 TAILQ_INSERT_TAIL(q
, (struct zebra_dplane_ctx
*)ctx
, zd_q_entries
);
324 /* Append a list of context blocks to another list */
325 void dplane_ctx_list_append(struct dplane_ctx_q
*to_list
,
326 struct dplane_ctx_q
*from_list
)
328 if (TAILQ_FIRST(from_list
)) {
329 TAILQ_CONCAT(to_list
, from_list
, zd_q_entries
);
331 /* And clear 'from' list */
332 TAILQ_INIT(from_list
);
336 /* Dequeue a context block from the head of a list */
337 struct zebra_dplane_ctx
*dplane_ctx_dequeue(struct dplane_ctx_q
*q
)
339 struct zebra_dplane_ctx
*ctx
= TAILQ_FIRST(q
);
342 TAILQ_REMOVE(q
, ctx
, zd_q_entries
);
348 * Accessors for information from the context object
350 enum zebra_dplane_result
dplane_ctx_get_status(
351 const struct zebra_dplane_ctx
*ctx
)
353 DPLANE_CTX_VALID(ctx
);
355 return ctx
->zd_status
;
358 void dplane_ctx_set_status(struct zebra_dplane_ctx
*ctx
,
359 enum zebra_dplane_result status
)
361 DPLANE_CTX_VALID(ctx
);
363 ctx
->zd_status
= status
;
366 /* Retrieve last/current provider id */
367 uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx
*ctx
)
369 DPLANE_CTX_VALID(ctx
);
370 return ctx
->zd_provider
;
373 /* Providers run before the kernel can control whether a kernel
374 * update should be done.
376 void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx
*ctx
)
378 DPLANE_CTX_VALID(ctx
);
380 SET_FLAG(ctx
->zd_flags
, DPLANE_CTX_FLAG_NO_KERNEL
);
383 bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx
*ctx
)
385 DPLANE_CTX_VALID(ctx
);
387 return CHECK_FLAG(ctx
->zd_flags
, DPLANE_CTX_FLAG_NO_KERNEL
);
390 enum dplane_op_e
dplane_ctx_get_op(const struct zebra_dplane_ctx
*ctx
)
392 DPLANE_CTX_VALID(ctx
);
397 const char *dplane_op2str(enum dplane_op_e op
)
399 const char *ret
= "UNKNOWN";
407 case DPLANE_OP_ROUTE_INSTALL
:
408 ret
= "ROUTE_INSTALL";
410 case DPLANE_OP_ROUTE_UPDATE
:
411 ret
= "ROUTE_UPDATE";
413 case DPLANE_OP_ROUTE_DELETE
:
414 ret
= "ROUTE_DELETE";
422 const char *dplane_res2str(enum zebra_dplane_result res
)
424 const char *ret
= "<Unknown>";
427 case ZEBRA_DPLANE_REQUEST_FAILURE
:
430 case ZEBRA_DPLANE_REQUEST_QUEUED
:
433 case ZEBRA_DPLANE_REQUEST_SUCCESS
:
441 const struct prefix
*dplane_ctx_get_dest(const struct zebra_dplane_ctx
*ctx
)
443 DPLANE_CTX_VALID(ctx
);
445 return &(ctx
->zd_dest
);
448 /* Source prefix is a little special - return NULL for "no src prefix" */
449 const struct prefix
*dplane_ctx_get_src(const struct zebra_dplane_ctx
*ctx
)
451 DPLANE_CTX_VALID(ctx
);
453 if (ctx
->zd_src
.prefixlen
== 0 &&
454 IN6_IS_ADDR_UNSPECIFIED(&(ctx
->zd_src
.u
.prefix6
))) {
457 return &(ctx
->zd_src
);
461 bool dplane_ctx_is_update(const struct zebra_dplane_ctx
*ctx
)
463 DPLANE_CTX_VALID(ctx
);
465 return ctx
->zd_is_update
;
468 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx
*ctx
)
470 DPLANE_CTX_VALID(ctx
);
475 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx
*ctx
)
477 DPLANE_CTX_VALID(ctx
);
479 return ctx
->zd_old_seq
;
482 vrf_id_t
dplane_ctx_get_vrf(const struct zebra_dplane_ctx
*ctx
)
484 DPLANE_CTX_VALID(ctx
);
486 return ctx
->zd_vrf_id
;
489 int dplane_ctx_get_type(const struct zebra_dplane_ctx
*ctx
)
491 DPLANE_CTX_VALID(ctx
);
496 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx
*ctx
)
498 DPLANE_CTX_VALID(ctx
);
500 return ctx
->zd_old_type
;
503 afi_t
dplane_ctx_get_afi(const struct zebra_dplane_ctx
*ctx
)
505 DPLANE_CTX_VALID(ctx
);
510 safi_t
dplane_ctx_get_safi(const struct zebra_dplane_ctx
*ctx
)
512 DPLANE_CTX_VALID(ctx
);
517 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx
*ctx
)
519 DPLANE_CTX_VALID(ctx
);
521 return ctx
->zd_table_id
;
524 route_tag_t
dplane_ctx_get_tag(const struct zebra_dplane_ctx
*ctx
)
526 DPLANE_CTX_VALID(ctx
);
531 route_tag_t
dplane_ctx_get_old_tag(const struct zebra_dplane_ctx
*ctx
)
533 DPLANE_CTX_VALID(ctx
);
535 return ctx
->zd_old_tag
;
538 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx
*ctx
)
540 DPLANE_CTX_VALID(ctx
);
542 return ctx
->zd_instance
;
545 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx
*ctx
)
547 DPLANE_CTX_VALID(ctx
);
549 return ctx
->zd_old_instance
;
552 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx
*ctx
)
554 DPLANE_CTX_VALID(ctx
);
556 return ctx
->zd_metric
;
559 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx
*ctx
)
561 DPLANE_CTX_VALID(ctx
);
563 return ctx
->zd_old_metric
;
566 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx
*ctx
)
568 DPLANE_CTX_VALID(ctx
);
573 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx
*ctx
)
575 DPLANE_CTX_VALID(ctx
);
577 return ctx
->zd_nexthop_mtu
;
580 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx
*ctx
)
582 DPLANE_CTX_VALID(ctx
);
584 return ctx
->zd_distance
;
587 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx
*ctx
)
589 DPLANE_CTX_VALID(ctx
);
591 return ctx
->zd_old_distance
;
594 const struct nexthop_group
*dplane_ctx_get_ng(
595 const struct zebra_dplane_ctx
*ctx
)
597 DPLANE_CTX_VALID(ctx
);
599 return &(ctx
->zd_ng
);
602 const struct nexthop_group
*dplane_ctx_get_old_ng(
603 const struct zebra_dplane_ctx
*ctx
)
605 DPLANE_CTX_VALID(ctx
);
607 return &(ctx
->zd_old_ng
);
610 const struct zebra_dplane_info
*dplane_ctx_get_ns(
611 const struct zebra_dplane_ctx
*ctx
)
613 DPLANE_CTX_VALID(ctx
);
615 return &(ctx
->zd_ns_info
);
619 * End of dplane context accessors
624 * Retrieve the limit on the number of pending, unprocessed updates.
626 uint32_t dplane_get_in_queue_limit(void)
628 return atomic_load_explicit(&zdplane_info
.dg_max_queued_updates
,
629 memory_order_relaxed
);
633 * Configure limit on the number of pending, queued updates.
635 void dplane_set_in_queue_limit(uint32_t limit
, bool set
)
637 /* Reset to default on 'unset' */
639 limit
= DPLANE_DEFAULT_MAX_QUEUED
;
641 atomic_store_explicit(&zdplane_info
.dg_max_queued_updates
, limit
,
642 memory_order_relaxed
);
646 * Retrieve the current queue depth of incoming, unprocessed updates
648 uint32_t dplane_get_in_queue_len(void)
650 return atomic_load_explicit(&zdplane_info
.dg_routes_queued
,
651 memory_order_seq_cst
);
655 * Initialize a context block for a route update from zebra data structs.
657 static int dplane_ctx_route_init(struct zebra_dplane_ctx
*ctx
,
659 struct route_node
*rn
,
660 struct route_entry
*re
)
663 const struct route_table
*table
= NULL
;
664 const rib_table_info_t
*info
;
665 const struct prefix
*p
, *src_p
;
666 struct zebra_ns
*zns
;
667 struct zebra_vrf
*zvrf
;
668 struct nexthop
*nexthop
;
670 if (!ctx
|| !rn
|| !re
)
674 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
676 ctx
->zd_type
= re
->type
;
677 ctx
->zd_old_type
= re
->type
;
679 /* Prefixes: dest, and optional source */
680 srcdest_rnode_prefixes(rn
, &p
, &src_p
);
682 prefix_copy(&(ctx
->zd_dest
), p
);
685 prefix_copy(&(ctx
->zd_src
), src_p
);
687 memset(&(ctx
->zd_src
), 0, sizeof(ctx
->zd_src
));
689 ctx
->zd_table_id
= re
->table
;
691 ctx
->zd_metric
= re
->metric
;
692 ctx
->zd_old_metric
= re
->metric
;
693 ctx
->zd_vrf_id
= re
->vrf_id
;
694 ctx
->zd_mtu
= re
->mtu
;
695 ctx
->zd_nexthop_mtu
= re
->nexthop_mtu
;
696 ctx
->zd_instance
= re
->instance
;
697 ctx
->zd_tag
= re
->tag
;
698 ctx
->zd_old_tag
= re
->tag
;
699 ctx
->zd_distance
= re
->distance
;
701 table
= srcdest_rnode_table(rn
);
704 ctx
->zd_afi
= info
->afi
;
705 ctx
->zd_safi
= info
->safi
;
707 /* Extract ns info - can't use pointers to 'core' structs */
708 zvrf
= vrf_info_lookup(re
->vrf_id
);
711 /* Internal copy helper */
712 dplane_info_from_zns(&(ctx
->zd_ns_info
), zns
);
714 #if defined(HAVE_NETLINK)
715 /* Increment message counter after copying to context struct - may need
716 * two messages in some 'update' cases.
718 if (op
== DPLANE_OP_ROUTE_UPDATE
)
719 zns
->netlink_dplane
.seq
+= 2;
721 zns
->netlink_dplane
.seq
++;
724 /* Copy nexthops; recursive info is included too */
725 copy_nexthops(&(ctx
->zd_ng
.nexthop
), re
->ng
.nexthop
, NULL
);
727 /* TODO -- maybe use array of nexthops to avoid allocs? */
729 /* Ensure that the dplane's nexthops flags are clear. */
730 for (ALL_NEXTHOPS(ctx
->zd_ng
, nexthop
))
731 UNSET_FLAG(nexthop
->flags
, NEXTHOP_FLAG_FIB
);
733 /* Trying out the sequence number idea, so we can try to detect
734 * when a result is stale.
736 re
->dplane_sequence
++;
737 ctx
->zd_seq
= re
->dplane_sequence
;
746 * Enqueue a new route update,
747 * and ensure an event is active for the dataplane thread.
749 static int dplane_route_enqueue(struct zebra_dplane_ctx
*ctx
)
754 /* Enqueue for processing by the dataplane thread */
757 TAILQ_INSERT_TAIL(&zdplane_info
.dg_route_ctx_q
, ctx
,
762 curr
= atomic_add_fetch_explicit(
764 /* TODO -- issue with the clang atomic/intrinsics currently;
765 * casting away the 'Atomic'-ness of the variable works.
767 (uint32_t *)&(zdplane_info
.dg_routes_queued
),
769 &(zdplane_info
.dg_routes_queued
),
771 1, memory_order_seq_cst
);
773 /* Maybe update high-water counter also */
774 high
= atomic_load_explicit(&zdplane_info
.dg_routes_queued_max
,
775 memory_order_seq_cst
);
776 while (high
< curr
) {
777 if (atomic_compare_exchange_weak_explicit(
778 &zdplane_info
.dg_routes_queued_max
,
780 memory_order_seq_cst
,
781 memory_order_seq_cst
))
785 /* Ensure that an event for the dataplane thread is active */
786 ret
= dplane_provider_work_ready();
792 * Utility that prepares a route update and enqueues it for processing
794 static enum zebra_dplane_result
795 dplane_route_update_internal(struct route_node
*rn
,
796 struct route_entry
*re
,
797 struct route_entry
*old_re
,
800 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
802 struct zebra_dplane_ctx
*ctx
= NULL
;
804 /* Obtain context block */
805 ctx
= dplane_ctx_alloc();
811 /* Init context with info from zebra data structs */
812 ret
= dplane_ctx_route_init(ctx
, op
, rn
, re
);
814 /* Capture some extra info for update case
815 * where there's a different 'old' route.
817 if ((op
== DPLANE_OP_ROUTE_UPDATE
) &&
818 old_re
&& (old_re
!= re
)) {
819 ctx
->zd_is_update
= true;
821 old_re
->dplane_sequence
++;
822 ctx
->zd_old_seq
= old_re
->dplane_sequence
;
824 ctx
->zd_old_tag
= old_re
->tag
;
825 ctx
->zd_old_type
= old_re
->type
;
826 ctx
->zd_old_instance
= old_re
->instance
;
827 ctx
->zd_old_distance
= old_re
->distance
;
828 ctx
->zd_old_metric
= old_re
->metric
;
831 /* For bsd, capture previous re's nexthops too, sigh.
832 * We'll need these to do per-nexthop deletes.
834 copy_nexthops(&(ctx
->zd_old_ng
.nexthop
),
835 old_re
->ng
.nexthop
, NULL
);
836 #endif /* !HAVE_NETLINK */
839 /* Enqueue context for processing */
840 ret
= dplane_route_enqueue(ctx
);
845 atomic_fetch_add_explicit(&zdplane_info
.dg_routes_in
, 1,
846 memory_order_relaxed
);
849 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
851 atomic_fetch_add_explicit(&zdplane_info
.dg_route_errors
, 1,
852 memory_order_relaxed
);
853 dplane_ctx_free(&ctx
);
860 * Enqueue a route 'add' for the dataplane.
862 enum zebra_dplane_result
dplane_route_add(struct route_node
*rn
,
863 struct route_entry
*re
)
865 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
867 if (rn
== NULL
|| re
== NULL
)
870 ret
= dplane_route_update_internal(rn
, re
, NULL
,
871 DPLANE_OP_ROUTE_INSTALL
);
878 * Enqueue a route update for the dataplane.
880 enum zebra_dplane_result
dplane_route_update(struct route_node
*rn
,
881 struct route_entry
*re
,
882 struct route_entry
*old_re
)
884 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
886 if (rn
== NULL
|| re
== NULL
)
889 ret
= dplane_route_update_internal(rn
, re
, old_re
,
890 DPLANE_OP_ROUTE_UPDATE
);
896 * Enqueue a route removal for the dataplane.
898 enum zebra_dplane_result
dplane_route_delete(struct route_node
*rn
,
899 struct route_entry
*re
)
901 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
903 if (rn
== NULL
|| re
== NULL
)
906 ret
= dplane_route_update_internal(rn
, re
, NULL
,
907 DPLANE_OP_ROUTE_DELETE
);
914 * Handler for 'show dplane'
916 int dplane_show_helper(struct vty
*vty
, bool detailed
)
918 uint64_t queued
, queue_max
, limit
, errs
, incoming
, yields
;
920 /* Using atomics because counters are being changed in different
923 incoming
= atomic_load_explicit(&zdplane_info
.dg_routes_in
,
924 memory_order_relaxed
);
925 limit
= atomic_load_explicit(&zdplane_info
.dg_max_queued_updates
,
926 memory_order_relaxed
);
927 queued
= atomic_load_explicit(&zdplane_info
.dg_routes_queued
,
928 memory_order_relaxed
);
929 queue_max
= atomic_load_explicit(&zdplane_info
.dg_routes_queued_max
,
930 memory_order_relaxed
);
931 errs
= atomic_load_explicit(&zdplane_info
.dg_route_errors
,
932 memory_order_relaxed
);
933 yields
= atomic_load_explicit(&zdplane_info
.dg_update_yields
,
934 memory_order_relaxed
);
936 vty_out(vty
, "Zebra dataplane:\nRoute updates: %"PRIu64
"\n",
938 vty_out(vty
, "Route update errors: %"PRIu64
"\n", errs
);
939 vty_out(vty
, "Route update queue limit: %"PRIu64
"\n", limit
);
940 vty_out(vty
, "Route update queue depth: %"PRIu64
"\n", queued
);
941 vty_out(vty
, "Route update queue max: %"PRIu64
"\n", queue_max
);
942 vty_out(vty
, "Route update yields: %"PRIu64
"\n", yields
);
948 * Handler for 'show dplane providers'
950 int dplane_show_provs_helper(struct vty
*vty
, bool detailed
)
952 struct zebra_dplane_provider
*prov
;
953 uint64_t in
, in_max
, out
, out_max
;
955 vty_out(vty
, "Zebra dataplane providers:\n");
958 prov
= TAILQ_FIRST(&zdplane_info
.dg_providers_q
);
961 /* Show counters, useful info from each registered provider */
964 in
= atomic_load_explicit(&prov
->dp_in_counter
,
965 memory_order_relaxed
);
966 in_max
= atomic_load_explicit(&prov
->dp_in_max
,
967 memory_order_relaxed
);
968 out
= atomic_load_explicit(&prov
->dp_out_counter
,
969 memory_order_relaxed
);
970 out_max
= atomic_load_explicit(&prov
->dp_out_max
,
971 memory_order_relaxed
);
973 vty_out(vty
, "%s (%u): in: %"PRIu64
", q_max: %"PRIu64
", "
974 "out: %"PRIu64
", q_max: %"PRIu64
"\n",
975 prov
->dp_name
, prov
->dp_id
, in
, in_max
, out
, out_max
);
978 prov
= TAILQ_NEXT(prov
, dp_prov_link
);
986 * Provider registration
988 int dplane_provider_register(const char *name
,
989 enum dplane_provider_prio prio
,
991 dplane_provider_process_fp fp
,
992 dplane_provider_fini_fp fini_fp
,
996 struct zebra_dplane_provider
*p
, *last
;
1004 if (prio
<= DPLANE_PRIO_NONE
||
1005 prio
> DPLANE_PRIO_LAST
) {
1010 /* Allocate and init new provider struct */
1011 p
= XCALLOC(MTYPE_DP_PROV
, sizeof(struct zebra_dplane_provider
));
1017 pthread_mutex_init(&(p
->dp_mutex
), NULL
);
1018 TAILQ_INIT(&(p
->dp_ctx_in_q
));
1019 TAILQ_INIT(&(p
->dp_ctx_out_q
));
1021 p
->dp_priority
= prio
;
1023 p
->dp_fini
= fini_fp
;
1026 /* Lock - the dplane pthread may be running */
1029 p
->dp_id
= ++zdplane_info
.dg_provider_id
;
1032 strlcpy(p
->dp_name
, name
, DPLANE_PROVIDER_NAMELEN
);
1034 snprintf(p
->dp_name
, DPLANE_PROVIDER_NAMELEN
,
1035 "provider-%u", p
->dp_id
);
1037 /* Insert into list ordered by priority */
1038 TAILQ_FOREACH(last
, &zdplane_info
.dg_providers_q
, dp_prov_link
) {
1039 if (last
->dp_priority
> p
->dp_priority
)
1044 TAILQ_INSERT_BEFORE(last
, p
, dp_prov_link
);
1046 TAILQ_INSERT_TAIL(&zdplane_info
.dg_providers_q
, p
,
1052 if (IS_ZEBRA_DEBUG_DPLANE
)
1053 zlog_debug("dplane: registered new provider '%s' (%u), prio %d",
1054 p
->dp_name
, p
->dp_id
, p
->dp_priority
);
1060 /* Accessors for provider attributes */
1061 const char *dplane_provider_get_name(const struct zebra_dplane_provider
*prov
)
1063 return prov
->dp_name
;
1066 uint32_t dplane_provider_get_id(const struct zebra_dplane_provider
*prov
)
1071 void *dplane_provider_get_data(const struct zebra_dplane_provider
*prov
)
1073 return prov
->dp_data
;
1076 int dplane_provider_get_work_limit(const struct zebra_dplane_provider
*prov
)
1078 return zdplane_info
.dg_updates_per_cycle
;
1081 /* Lock/unlock a provider's mutex - iff the provider was registered with
1082 * the THREADED flag.
1084 void dplane_provider_lock(struct zebra_dplane_provider
*prov
)
1086 if (dplane_provider_is_threaded(prov
))
1087 DPLANE_PROV_LOCK(prov
);
1090 void dplane_provider_unlock(struct zebra_dplane_provider
*prov
)
1092 if (dplane_provider_is_threaded(prov
))
1093 DPLANE_PROV_UNLOCK(prov
);
1097 * Dequeue and maintain associated counter
1099 struct zebra_dplane_ctx
*dplane_provider_dequeue_in_ctx(
1100 struct zebra_dplane_provider
*prov
)
1102 struct zebra_dplane_ctx
*ctx
= NULL
;
1104 dplane_provider_lock(prov
);
1106 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_in_q
));
1108 TAILQ_REMOVE(&(prov
->dp_ctx_in_q
), ctx
, zd_q_entries
);
1110 atomic_fetch_sub_explicit(&prov
->dp_in_queued
, 1,
1111 memory_order_relaxed
);
1114 dplane_provider_unlock(prov
);
1120 * Dequeue work to a list, return count
1122 int dplane_provider_dequeue_in_list(struct zebra_dplane_provider
*prov
,
1123 struct dplane_ctx_q
*listp
)
1126 struct zebra_dplane_ctx
*ctx
;
1128 limit
= zdplane_info
.dg_updates_per_cycle
;
1130 dplane_provider_lock(prov
);
1132 for (ret
= 0; ret
< limit
; ret
++) {
1133 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_in_q
));
1135 TAILQ_REMOVE(&(prov
->dp_ctx_in_q
), ctx
, zd_q_entries
);
1137 TAILQ_INSERT_TAIL(listp
, ctx
, zd_q_entries
);
1144 atomic_fetch_sub_explicit(&prov
->dp_in_queued
, ret
,
1145 memory_order_relaxed
);
1147 dplane_provider_unlock(prov
);
1153 * Enqueue and maintain associated counter
1155 void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider
*prov
,
1156 struct zebra_dplane_ctx
*ctx
)
1158 dplane_provider_lock(prov
);
1160 TAILQ_INSERT_TAIL(&(prov
->dp_ctx_out_q
), ctx
,
1163 dplane_provider_unlock(prov
);
1165 atomic_fetch_add_explicit(&(prov
->dp_out_counter
), 1,
1166 memory_order_relaxed
);
1170 * Accessor for provider object
1172 bool dplane_provider_is_threaded(const struct zebra_dplane_provider
*prov
)
1174 return (prov
->dp_flags
& DPLANE_PROV_FLAG_THREADED
);
1178 * Internal helper that copies information from a zebra ns object; this is
1179 * called in the zebra main pthread context as part of dplane ctx init.
1181 static void dplane_info_from_zns(struct zebra_dplane_info
*ns_info
,
1182 struct zebra_ns
*zns
)
1184 ns_info
->ns_id
= zns
->ns_id
;
1186 #if defined(HAVE_NETLINK)
1187 ns_info
->is_cmd
= true;
1188 ns_info
->nls
= zns
->netlink_dplane
;
1189 #endif /* NETLINK */
1193 * Provider api to signal that work/events are available
1194 * for the dataplane pthread.
1196 int dplane_provider_work_ready(void)
1198 /* Note that during zebra startup, we may be offered work before
1199 * the dataplane pthread (and thread-master) are ready. We want to
1200 * enqueue the work, but the event-scheduling machinery may not be
1203 if (zdplane_info
.dg_run
) {
1204 thread_add_event(zdplane_info
.dg_master
,
1205 dplane_thread_loop
, NULL
, 0,
1206 &zdplane_info
.dg_t_update
);
1213 * Zebra registers a results callback with the dataplane system
1215 int dplane_results_register(dplane_results_fp fp
)
1217 zdplane_info
.dg_results_cb
= fp
;
1222 * Kernel dataplane provider
1226 * Kernel provider callback
1228 static int kernel_dplane_process_func(struct zebra_dplane_provider
*prov
)
1230 enum zebra_dplane_result res
;
1231 struct zebra_dplane_ctx
*ctx
;
1234 limit
= dplane_provider_get_work_limit(prov
);
1236 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
1237 zlog_debug("dplane provider '%s': processing",
1238 dplane_provider_get_name(prov
));
1240 for (counter
= 0; counter
< limit
; counter
++) {
1242 ctx
= dplane_provider_dequeue_in_ctx(prov
);
1246 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
) {
1247 char dest_str
[PREFIX_STRLEN
];
1249 prefix2str(dplane_ctx_get_dest(ctx
),
1250 dest_str
, sizeof(dest_str
));
1252 zlog_debug("%u:%s Dplane route update ctx %p op %s",
1253 dplane_ctx_get_vrf(ctx
), dest_str
,
1254 ctx
, dplane_op2str(dplane_ctx_get_op(ctx
)));
1257 /* Call into the synchronous kernel-facing code here */
1258 res
= kernel_route_update(ctx
);
1260 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
1261 atomic_fetch_add_explicit(
1262 &zdplane_info
.dg_route_errors
, 1,
1263 memory_order_relaxed
);
1265 dplane_ctx_set_status(ctx
, res
);
1267 dplane_provider_enqueue_out_ctx(prov
, ctx
);
1270 /* Ensure that we'll run the work loop again if there's still
1273 if (counter
>= limit
) {
1274 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
1275 zlog_debug("dplane provider '%s' reached max updates %d",
1276 dplane_provider_get_name(prov
), counter
);
1278 atomic_fetch_add_explicit(&zdplane_info
.dg_update_yields
,
1279 1, memory_order_relaxed
);
1281 dplane_provider_work_ready();
1287 #if DPLANE_TEST_PROVIDER
1290 * Test dataplane provider plugin
1294 * Test provider process callback
1296 static int test_dplane_process_func(struct zebra_dplane_provider
*prov
)
1298 struct zebra_dplane_ctx
*ctx
;
1301 /* Just moving from 'in' queue to 'out' queue */
1303 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
1304 zlog_debug("dplane provider '%s': processing",
1305 dplane_provider_get_name(prov
));
1307 limit
= dplane_provider_get_work_limit(prov
);
1309 for (counter
= 0; counter
< limit
; counter
++) {
1311 ctx
= dplane_provider_dequeue_in_ctx(prov
);
1315 dplane_ctx_set_status(ctx
, ZEBRA_DPLANE_REQUEST_SUCCESS
);
1317 dplane_provider_enqueue_out_ctx(prov
, ctx
);
1320 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
1321 zlog_debug("dplane provider '%s': processed %d",
1322 dplane_provider_get_name(prov
), counter
);
1324 /* Ensure that we'll run the work loop again if there's still
1327 if (counter
>= limit
)
1328 dplane_provider_work_ready();
1334 * Test provider shutdown/fini callback
1336 static int test_dplane_shutdown_func(struct zebra_dplane_provider
*prov
,
1339 if (IS_ZEBRA_DEBUG_DPLANE
)
1340 zlog_debug("dplane provider '%s': %sshutdown",
1341 dplane_provider_get_name(prov
),
1342 early
? "early " : "");
1346 #endif /* DPLANE_TEST_PROVIDER */
1349 * Register default kernel provider
1351 static void dplane_provider_init(void)
1355 ret
= dplane_provider_register("Kernel",
1357 DPLANE_PROV_FLAGS_DEFAULT
,
1358 kernel_dplane_process_func
,
1363 zlog_err("Unable to register kernel dplane provider: %d",
1366 #if DPLANE_TEST_PROVIDER
1367 /* Optional test provider ... */
1368 ret
= dplane_provider_register("Test",
1369 DPLANE_PRIO_PRE_KERNEL
,
1370 DPLANE_PROV_FLAGS_DEFAULT
,
1371 test_dplane_process_func
,
1372 test_dplane_shutdown_func
,
1376 zlog_err("Unable to register test dplane provider: %d",
1378 #endif /* DPLANE_TEST_PROVIDER */
1381 /* Indicates zebra shutdown/exit is in progress. Some operations may be
1382 * simplified or skipped during shutdown processing.
1384 bool dplane_is_in_shutdown(void)
1386 return zdplane_info
.dg_is_shutdown
;
1390 * Early or pre-shutdown, de-init notification api. This runs pretty
1391 * early during zebra shutdown, as a signal to stop new work and prepare
1392 * for updates generated by shutdown/cleanup activity, as zebra tries to
1393 * remove everything it's responsible for.
1394 * NB: This runs in the main zebra pthread context.
1396 void zebra_dplane_pre_finish(void)
1398 if (IS_ZEBRA_DEBUG_DPLANE
)
1399 zlog_debug("Zebra dataplane pre-fini called");
1401 zdplane_info
.dg_is_shutdown
= true;
1403 /* TODO -- Notify provider(s) of pending shutdown */
1407 * Utility to determine whether work remains enqueued within the dplane;
1408 * used during system shutdown processing.
1410 static bool dplane_work_pending(void)
1413 struct zebra_dplane_ctx
*ctx
;
1414 struct zebra_dplane_provider
*prov
;
1416 /* TODO -- just checking incoming/pending work for now, must check
1421 ctx
= TAILQ_FIRST(&zdplane_info
.dg_route_ctx_q
);
1422 prov
= TAILQ_FIRST(&zdplane_info
.dg_providers_q
);
1433 dplane_provider_lock(prov
);
1435 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_in_q
));
1437 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_out_q
));
1439 dplane_provider_unlock(prov
);
1445 prov
= TAILQ_NEXT(prov
, dp_prov_link
);
1457 * Shutdown-time intermediate callback, used to determine when all pending
1458 * in-flight updates are done. If there's still work to do, reschedules itself.
1459 * If all work is done, schedules an event to the main zebra thread for
1460 * final zebra shutdown.
1461 * This runs in the dplane pthread context.
1463 static int dplane_check_shutdown_status(struct thread
*event
)
1465 if (IS_ZEBRA_DEBUG_DPLANE
)
1466 zlog_debug("Zebra dataplane shutdown status check called");
1468 if (dplane_work_pending()) {
1469 /* Reschedule dplane check on a short timer */
1470 thread_add_timer_msec(zdplane_info
.dg_master
,
1471 dplane_check_shutdown_status
,
1473 &zdplane_info
.dg_t_shutdown_check
);
1475 /* TODO - give up and stop waiting after a short time? */
1478 /* We appear to be done - schedule a final callback event
1479 * for the zebra main pthread.
1481 thread_add_event(zebrad
.master
, zebra_finalize
, NULL
, 0, NULL
);
1488 * Shutdown, de-init api. This runs pretty late during shutdown,
1489 * after zebra has tried to free/remove/uninstall all routes during shutdown.
1490 * At this point, dplane work may still remain to be done, so we can't just
1491 * blindly terminate. If there's still work to do, we'll periodically check
1492 * and when done, we'll enqueue a task to the zebra main thread for final
1493 * termination processing.
1495 * NB: This runs in the main zebra thread context.
1497 void zebra_dplane_finish(void)
1499 if (IS_ZEBRA_DEBUG_DPLANE
)
1500 zlog_debug("Zebra dataplane fini called");
1502 thread_add_event(zdplane_info
.dg_master
,
1503 dplane_check_shutdown_status
, NULL
, 0,
1504 &zdplane_info
.dg_t_shutdown_check
);
1508 * Main dataplane pthread event loop. The thread takes new incoming work
1509 * and offers it to the first provider. It then iterates through the
1510 * providers, taking complete work from each one and offering it
1511 * to the next in order. At each step, a limited number of updates are
1512 * processed during a cycle in order to provide some fairness.
1514 * This loop through the providers is only run once, so that the dataplane
1515 * pthread can look for other pending work - such as i/o work on behalf of
1518 static int dplane_thread_loop(struct thread
*event
)
1520 struct dplane_ctx_q work_list
;
1521 struct dplane_ctx_q error_list
;
1522 struct zebra_dplane_provider
*prov
;
1523 struct zebra_dplane_ctx
*ctx
, *tctx
;
1524 int limit
, counter
, error_counter
;
1525 uint64_t curr
, high
;
1527 /* Capture work limit per cycle */
1528 limit
= zdplane_info
.dg_updates_per_cycle
;
1530 /* Init temporary lists used to move contexts among providers */
1531 TAILQ_INIT(&work_list
);
1532 TAILQ_INIT(&error_list
);
1535 /* Check for zebra shutdown */
1536 if (!zdplane_info
.dg_run
)
1539 /* Dequeue some incoming work from zebra (if any) onto the temporary
1544 /* Locate initial registered provider */
1545 prov
= TAILQ_FIRST(&zdplane_info
.dg_providers_q
);
1547 /* Move new work from incoming list to temp list */
1548 for (counter
= 0; counter
< limit
; counter
++) {
1549 ctx
= TAILQ_FIRST(&zdplane_info
.dg_route_ctx_q
);
1551 TAILQ_REMOVE(&zdplane_info
.dg_route_ctx_q
, ctx
,
1554 ctx
->zd_provider
= prov
->dp_id
;
1556 TAILQ_INSERT_TAIL(&work_list
, ctx
, zd_q_entries
);
1564 atomic_fetch_sub_explicit(&zdplane_info
.dg_routes_queued
, counter
,
1565 memory_order_relaxed
);
1567 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
1568 zlog_debug("dplane: incoming new work counter: %d", counter
);
1570 /* Iterate through the registered providers, offering new incoming
1571 * work. If the provider has outgoing work in its queue, take that
1572 * work for the next provider
1576 /* At each iteration, the temporary work list has 'counter'
1579 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
1580 zlog_debug("dplane enqueues %d new work to provider '%s'",
1581 counter
, dplane_provider_get_name(prov
));
1583 /* Capture current provider id in each context; check for
1586 TAILQ_FOREACH_SAFE(ctx
, &work_list
, zd_q_entries
, tctx
) {
1587 if (dplane_ctx_get_status(ctx
) ==
1588 ZEBRA_DPLANE_REQUEST_SUCCESS
) {
1589 ctx
->zd_provider
= prov
->dp_id
;
1592 * TODO -- improve error-handling: recirc
1593 * errors backwards so that providers can
1594 * 'undo' their work (if they want to)
1597 /* Move to error list; will be returned
1600 TAILQ_REMOVE(&work_list
, ctx
, zd_q_entries
);
1601 TAILQ_INSERT_TAIL(&error_list
,
1607 /* Enqueue new work to the provider */
1608 dplane_provider_lock(prov
);
1610 if (TAILQ_FIRST(&work_list
))
1611 TAILQ_CONCAT(&(prov
->dp_ctx_in_q
), &work_list
,
1614 atomic_fetch_add_explicit(&prov
->dp_in_counter
, counter
,
1615 memory_order_relaxed
);
1616 atomic_fetch_add_explicit(&prov
->dp_in_queued
, counter
,
1617 memory_order_relaxed
);
1618 curr
= atomic_load_explicit(&prov
->dp_in_queued
,
1619 memory_order_relaxed
);
1620 high
= atomic_load_explicit(&prov
->dp_in_max
,
1621 memory_order_relaxed
);
1623 atomic_store_explicit(&prov
->dp_in_max
, curr
,
1624 memory_order_relaxed
);
1626 dplane_provider_unlock(prov
);
1628 /* Reset the temp list (though the 'concat' may have done this
1629 * already), and the counter
1631 TAILQ_INIT(&work_list
);
1634 /* Call into the provider code. Note that this is
1635 * unconditional: we offer to do work even if we don't enqueue
1638 (*prov
->dp_fp
)(prov
);
1640 /* Check for zebra shutdown */
1641 if (!zdplane_info
.dg_run
)
1644 /* Dequeue completed work from the provider */
1645 dplane_provider_lock(prov
);
1647 while (counter
< limit
) {
1648 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_out_q
));
1650 TAILQ_REMOVE(&(prov
->dp_ctx_out_q
), ctx
,
1653 TAILQ_INSERT_TAIL(&work_list
,
1660 dplane_provider_unlock(prov
);
1662 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
1663 zlog_debug("dplane dequeues %d completed work from provider %s",
1664 counter
, dplane_provider_get_name(prov
));
1666 /* Locate next provider */
1668 prov
= TAILQ_NEXT(prov
, dp_prov_link
);
1672 /* After all providers have been serviced, enqueue any completed
1673 * work and any errors back to zebra so it can process the results.
1675 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
1676 zlog_debug("dplane has %d completed, %d errors, for zebra main",
1677 counter
, error_counter
);
1680 * TODO -- I'd rather hand lists through the api to zebra main,
1681 * to reduce the number of lock/unlock cycles
1683 for (ctx
= TAILQ_FIRST(&error_list
); ctx
; ) {
1684 TAILQ_REMOVE(&error_list
, ctx
, zd_q_entries
);
1686 /* Call through to zebra main */
1687 (*zdplane_info
.dg_results_cb
)(ctx
);
1689 ctx
= TAILQ_FIRST(&error_list
);
1693 for (ctx
= TAILQ_FIRST(&work_list
); ctx
; ) {
1694 TAILQ_REMOVE(&work_list
, ctx
, zd_q_entries
);
1696 /* Call through to zebra main */
1697 (*zdplane_info
.dg_results_cb
)(ctx
);
1699 ctx
= TAILQ_FIRST(&work_list
);
1707 * Final phase of shutdown, after all work enqueued to dplane has been
1708 * processed. This is called from the zebra main pthread context.
1710 void zebra_dplane_shutdown(void)
1712 if (IS_ZEBRA_DEBUG_DPLANE
)
1713 zlog_debug("Zebra dataplane shutdown called");
1715 /* Stop dplane thread, if it's running */
1717 zdplane_info
.dg_run
= false;
1719 THREAD_OFF(zdplane_info
.dg_t_update
);
1721 frr_pthread_stop(zdplane_info
.dg_pthread
, NULL
);
1723 /* Destroy pthread */
1724 frr_pthread_destroy(zdplane_info
.dg_pthread
);
1725 zdplane_info
.dg_pthread
= NULL
;
1726 zdplane_info
.dg_master
= NULL
;
1728 /* TODO -- Notify provider(s) of final shutdown */
1730 /* TODO -- Clean-up provider objects */
1732 /* TODO -- Clean queue(s), free memory */
1736 * Initialize the dataplane module during startup, internal/private version
1738 static void zebra_dplane_init_internal(struct zebra_t
*zebra
)
1740 memset(&zdplane_info
, 0, sizeof(zdplane_info
));
1742 pthread_mutex_init(&zdplane_info
.dg_mutex
, NULL
);
1744 TAILQ_INIT(&zdplane_info
.dg_route_ctx_q
);
1745 TAILQ_INIT(&zdplane_info
.dg_providers_q
);
1747 zdplane_info
.dg_updates_per_cycle
= DPLANE_DEFAULT_NEW_WORK
;
1749 zdplane_info
.dg_max_queued_updates
= DPLANE_DEFAULT_MAX_QUEUED
;
1751 /* Register default kernel 'provider' during init */
1752 dplane_provider_init();
1756 * Start the dataplane pthread. This step needs to be run later than the
1757 * 'init' step, in case zebra has fork-ed.
1759 void zebra_dplane_start(void)
1761 /* Start dataplane pthread */
1763 struct frr_pthread_attr pattr
= {
1764 .start
= frr_pthread_attr_default
.start
,
1765 .stop
= frr_pthread_attr_default
.stop
1768 zdplane_info
.dg_pthread
= frr_pthread_new(&pattr
, "Zebra dplane thread",
1771 zdplane_info
.dg_master
= zdplane_info
.dg_pthread
->master
;
1773 zdplane_info
.dg_run
= true;
1775 /* Enqueue an initial event for the dataplane pthread */
1776 thread_add_event(zdplane_info
.dg_master
, dplane_thread_loop
, NULL
, 0,
1777 &zdplane_info
.dg_t_update
);
1779 frr_pthread_run(zdplane_info
.dg_pthread
, NULL
);
1783 * Initialize the dataplane module at startup; called by zebra rib_init()
1785 void zebra_dplane_init(void)
1787 zebra_dplane_init_internal(&zebrad
);