]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_dplane.c
zebra: start pseudowire support
[mirror_frr.git] / zebra / zebra_dplane.c
1 /*
2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #include "lib/libfrr.h"
21 #include "lib/debug.h"
22 #include "lib/frratomic.h"
23 #include "lib/frr_pthread.h"
24 #include "lib/memory.h"
25 #include "lib/queue.h"
26 #include "lib/zebra.h"
27 #include "zebra/zebra_router.h"
28 #include "zebra/zebra_memory.h"
29 #include "zebra/zserv.h"
30 #include "zebra/zebra_dplane.h"
31 #include "zebra/rt.h"
32 #include "zebra/debug.h"
33
34 /* Memory type for context blocks */
35 DEFINE_MTYPE(ZEBRA, DP_CTX, "Zebra DPlane Ctx")
36 DEFINE_MTYPE(ZEBRA, DP_PROV, "Zebra DPlane Provider")
37
38 #ifndef AOK
39 # define AOK 0
40 #endif
41
42 /* Enable test dataplane provider */
43 /*#define DPLANE_TEST_PROVIDER 1 */
44
45 /* Default value for max queued incoming updates */
46 const uint32_t DPLANE_DEFAULT_MAX_QUEUED = 200;
47
48 /* Default value for new work per cycle */
49 const uint32_t DPLANE_DEFAULT_NEW_WORK = 100;
50
51 /* Validation check macro for context blocks */
52 /* #define DPLANE_DEBUG 1 */
53
54 #ifdef DPLANE_DEBUG
55
56 # define DPLANE_CTX_VALID(p) \
57 assert((p) != NULL)
58
59 #else
60
61 # define DPLANE_CTX_VALID(p)
62
63 #endif /* DPLANE_DEBUG */
64
65 /*
66 * Route information captured for route updates.
67 */
68 struct dplane_route_info {
69
70 /* Dest and (optional) source prefixes */
71 struct prefix zd_dest;
72 struct prefix zd_src;
73
74 afi_t zd_afi;
75 safi_t zd_safi;
76
77 int zd_type;
78 int zd_old_type;
79
80 route_tag_t zd_tag;
81 route_tag_t zd_old_tag;
82 uint32_t zd_metric;
83 uint32_t zd_old_metric;
84
85 uint16_t zd_instance;
86 uint16_t zd_old_instance;
87
88 uint8_t zd_distance;
89 uint8_t zd_old_distance;
90
91 uint32_t zd_mtu;
92 uint32_t zd_nexthop_mtu;
93
94 /* Nexthops */
95 struct nexthop_group zd_ng;
96
97 /* "Previous" nexthops, used only in route updates without netlink */
98 struct nexthop_group zd_old_ng;
99
100 /* TODO -- use fixed array of nexthops, to avoid mallocs? */
101
102 };
103
104 /*
105 * Pseudowire info for the dataplane
106 */
107 struct dplane_pw_info {
108 char ifname[IF_NAMESIZE];
109 ifindex_t ifindex;
110 int type;
111 int af;
112 int status;
113 uint32_t flags;
114 union g_addr nexthop;
115 mpls_label_t local_label;
116 mpls_label_t remote_label;
117
118 union pw_protocol_fields fields;
119 };
120
121 /*
122 * The context block used to exchange info about route updates across
123 * the boundary between the zebra main context (and pthread) and the
124 * dataplane layer (and pthread).
125 */
126 struct zebra_dplane_ctx {
127
128 /* Operation code */
129 enum dplane_op_e zd_op;
130
131 /* Status on return */
132 enum zebra_dplane_result zd_status;
133
134 /* Dplane provider id */
135 uint32_t zd_provider;
136
137 /* Flags - used by providers, e.g. */
138 int zd_flags;
139
140 bool zd_is_update;
141
142 uint32_t zd_seq;
143 uint32_t zd_old_seq;
144
145 /* TODO -- internal/sub-operation status? */
146 enum zebra_dplane_result zd_remote_status;
147 enum zebra_dplane_result zd_kernel_status;
148
149 vrf_id_t zd_vrf_id;
150 uint32_t zd_table_id;
151
152 /* Support info for either route or LSP update */
153 union {
154 struct dplane_route_info rinfo;
155 zebra_lsp_t lsp;
156 struct dplane_pw_info pw;
157 } u;
158
159 /* Namespace info, used especially for netlink kernel communication */
160 struct zebra_dplane_info zd_ns_info;
161
162 /* Embedded list linkage */
163 TAILQ_ENTRY(zebra_dplane_ctx) zd_q_entries;
164 };
165
166 /* Flag that can be set by a pre-kernel provider as a signal that an update
167 * should bypass the kernel.
168 */
169 #define DPLANE_CTX_FLAG_NO_KERNEL 0x01
170
171
172 /*
173 * Registration block for one dataplane provider.
174 */
175 struct zebra_dplane_provider {
176 /* Name */
177 char dp_name[DPLANE_PROVIDER_NAMELEN + 1];
178
179 /* Priority, for ordering among providers */
180 uint8_t dp_priority;
181
182 /* Id value */
183 uint32_t dp_id;
184
185 /* Mutex */
186 pthread_mutex_t dp_mutex;
187
188 /* Plugin-provided extra data */
189 void *dp_data;
190
191 /* Flags */
192 int dp_flags;
193
194 int (*dp_fp)(struct zebra_dplane_provider *prov);
195
196 int (*dp_fini)(struct zebra_dplane_provider *prov, bool early_p);
197
198 _Atomic uint32_t dp_in_counter;
199 _Atomic uint32_t dp_in_queued;
200 _Atomic uint32_t dp_in_max;
201 _Atomic uint32_t dp_out_counter;
202 _Atomic uint32_t dp_out_queued;
203 _Atomic uint32_t dp_out_max;
204 _Atomic uint32_t dp_error_counter;
205
206 /* Queue of contexts inbound to the provider */
207 struct dplane_ctx_q dp_ctx_in_q;
208
209 /* Queue of completed contexts outbound from the provider back
210 * towards the dataplane module.
211 */
212 struct dplane_ctx_q dp_ctx_out_q;
213
214 /* Embedded list linkage for provider objects */
215 TAILQ_ENTRY(zebra_dplane_provider) dp_prov_link;
216 };
217
218 /*
219 * Globals
220 */
221 static struct zebra_dplane_globals {
222 /* Mutex to control access to dataplane components */
223 pthread_mutex_t dg_mutex;
224
225 /* Results callback registered by zebra 'core' */
226 int (*dg_results_cb)(struct dplane_ctx_q *ctxlist);
227
228 /* Sentinel for beginning of shutdown */
229 volatile bool dg_is_shutdown;
230
231 /* Sentinel for end of shutdown */
232 volatile bool dg_run;
233
234 /* Route-update context queue inbound to the dataplane */
235 TAILQ_HEAD(zdg_ctx_q, zebra_dplane_ctx) dg_route_ctx_q;
236
237 /* Ordered list of providers */
238 TAILQ_HEAD(zdg_prov_q, zebra_dplane_provider) dg_providers_q;
239
240 /* Counter used to assign internal ids to providers */
241 uint32_t dg_provider_id;
242
243 /* Limit number of pending, unprocessed updates */
244 _Atomic uint32_t dg_max_queued_updates;
245
246 /* Limit number of new updates dequeued at once, to pace an
247 * incoming burst.
248 */
249 uint32_t dg_updates_per_cycle;
250
251 _Atomic uint32_t dg_routes_in;
252 _Atomic uint32_t dg_routes_queued;
253 _Atomic uint32_t dg_routes_queued_max;
254 _Atomic uint32_t dg_route_errors;
255 _Atomic uint32_t dg_other_errors;
256
257 _Atomic uint32_t dg_lsps_in;
258 _Atomic uint32_t dg_lsps_queued;
259 _Atomic uint32_t dg_lsps_queued_max;
260 _Atomic uint32_t dg_lsp_errors;
261
262 _Atomic uint32_t dg_update_yields;
263
264 /* Dataplane pthread */
265 struct frr_pthread *dg_pthread;
266
267 /* Event-delivery context 'master' for the dplane */
268 struct thread_master *dg_master;
269
270 /* Event/'thread' pointer for queued updates */
271 struct thread *dg_t_update;
272
273 /* Event pointer for pending shutdown check loop */
274 struct thread *dg_t_shutdown_check;
275
276 } zdplane_info;
277
278 /*
279 * Lock and unlock for interactions with the zebra 'core' pthread
280 */
281 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
282 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
283
284
285 /*
286 * Lock and unlock for individual providers
287 */
288 #define DPLANE_PROV_LOCK(p) pthread_mutex_lock(&((p)->dp_mutex))
289 #define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
290
291 /* Prototypes */
292 static int dplane_thread_loop(struct thread *event);
293 static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
294 struct zebra_ns *zns);
295 static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
296 enum dplane_op_e op);
297
298 /*
299 * Public APIs
300 */
301
302 /* Obtain thread_master for dataplane thread */
303 struct thread_master *dplane_get_thread_master(void)
304 {
305 return zdplane_info.dg_master;
306 }
307
308 /*
309 * Allocate a dataplane update context
310 */
311 static struct zebra_dplane_ctx *dplane_ctx_alloc(void)
312 {
313 struct zebra_dplane_ctx *p;
314
315 /* TODO -- just alloc'ing memory, but would like to maintain
316 * a pool
317 */
318 p = XCALLOC(MTYPE_DP_CTX, sizeof(struct zebra_dplane_ctx));
319
320 return p;
321 }
322
323 /*
324 * Free a dataplane results context.
325 */
326 static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
327 {
328 if (pctx == NULL)
329 return;
330
331 DPLANE_CTX_VALID(*pctx);
332
333 /* TODO -- just freeing memory, but would like to maintain
334 * a pool
335 */
336
337 /* Some internal allocations may need to be freed, depending on
338 * the type of info captured in the ctx.
339 */
340 switch ((*pctx)->zd_op) {
341 case DPLANE_OP_ROUTE_INSTALL:
342 case DPLANE_OP_ROUTE_UPDATE:
343 case DPLANE_OP_ROUTE_DELETE:
344
345 /* Free allocated nexthops */
346 if ((*pctx)->u.rinfo.zd_ng.nexthop) {
347 /* This deals with recursive nexthops too */
348 nexthops_free((*pctx)->u.rinfo.zd_ng.nexthop);
349
350 (*pctx)->u.rinfo.zd_ng.nexthop = NULL;
351 }
352
353 if ((*pctx)->u.rinfo.zd_old_ng.nexthop) {
354 /* This deals with recursive nexthops too */
355 nexthops_free((*pctx)->u.rinfo.zd_old_ng.nexthop);
356
357 (*pctx)->u.rinfo.zd_old_ng.nexthop = NULL;
358 }
359
360 break;
361
362 case DPLANE_OP_LSP_INSTALL:
363 case DPLANE_OP_LSP_UPDATE:
364 case DPLANE_OP_LSP_DELETE:
365 {
366 zebra_nhlfe_t *nhlfe, *next;
367
368 /* Free allocated NHLFEs */
369 for (nhlfe = (*pctx)->u.lsp.nhlfe_list; nhlfe; nhlfe = next) {
370 next = nhlfe->next;
371
372 zebra_mpls_nhlfe_del(nhlfe);
373 }
374
375 /* Clear pointers in lsp struct, in case we're cacheing
376 * free context structs.
377 */
378 (*pctx)->u.lsp.nhlfe_list = NULL;
379 (*pctx)->u.lsp.best_nhlfe = NULL;
380
381 break;
382 }
383
384 case DPLANE_OP_NONE:
385 break;
386 }
387
388 XFREE(MTYPE_DP_CTX, *pctx);
389 *pctx = NULL;
390 }
391
392 /*
393 * Return a context block to the dplane module after processing
394 */
395 void dplane_ctx_fini(struct zebra_dplane_ctx **pctx)
396 {
397 /* TODO -- maintain pool; for now, just free */
398 dplane_ctx_free(pctx);
399 }
400
401 /* Enqueue a context block */
402 void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
403 const struct zebra_dplane_ctx *ctx)
404 {
405 TAILQ_INSERT_TAIL(q, (struct zebra_dplane_ctx *)ctx, zd_q_entries);
406 }
407
408 /* Append a list of context blocks to another list */
409 void dplane_ctx_list_append(struct dplane_ctx_q *to_list,
410 struct dplane_ctx_q *from_list)
411 {
412 if (TAILQ_FIRST(from_list)) {
413 TAILQ_CONCAT(to_list, from_list, zd_q_entries);
414
415 /* And clear 'from' list */
416 TAILQ_INIT(from_list);
417 }
418 }
419
420 /* Dequeue a context block from the head of a list */
421 struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_q *q)
422 {
423 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
424
425 if (ctx)
426 TAILQ_REMOVE(q, ctx, zd_q_entries);
427
428 return ctx;
429 }
430
431 /*
432 * Accessors for information from the context object
433 */
434 enum zebra_dplane_result dplane_ctx_get_status(
435 const struct zebra_dplane_ctx *ctx)
436 {
437 DPLANE_CTX_VALID(ctx);
438
439 return ctx->zd_status;
440 }
441
442 void dplane_ctx_set_status(struct zebra_dplane_ctx *ctx,
443 enum zebra_dplane_result status)
444 {
445 DPLANE_CTX_VALID(ctx);
446
447 ctx->zd_status = status;
448 }
449
450 /* Retrieve last/current provider id */
451 uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx *ctx)
452 {
453 DPLANE_CTX_VALID(ctx);
454 return ctx->zd_provider;
455 }
456
457 /* Providers run before the kernel can control whether a kernel
458 * update should be done.
459 */
460 void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx *ctx)
461 {
462 DPLANE_CTX_VALID(ctx);
463
464 SET_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
465 }
466
467 bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx *ctx)
468 {
469 DPLANE_CTX_VALID(ctx);
470
471 return CHECK_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
472 }
473
474 enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx)
475 {
476 DPLANE_CTX_VALID(ctx);
477
478 return ctx->zd_op;
479 }
480
481 const char *dplane_op2str(enum dplane_op_e op)
482 {
483 const char *ret = "UNKNOWN";
484
485 switch (op) {
486 case DPLANE_OP_NONE:
487 ret = "NONE";
488 break;
489
490 /* Route update */
491 case DPLANE_OP_ROUTE_INSTALL:
492 ret = "ROUTE_INSTALL";
493 break;
494 case DPLANE_OP_ROUTE_UPDATE:
495 ret = "ROUTE_UPDATE";
496 break;
497 case DPLANE_OP_ROUTE_DELETE:
498 ret = "ROUTE_DELETE";
499 break;
500
501 case DPLANE_OP_LSP_INSTALL:
502 ret = "LSP_INSTALL";
503 break;
504 case DPLANE_OP_LSP_UPDATE:
505 ret = "LSP_UPDATE";
506 break;
507 case DPLANE_OP_LSP_DELETE:
508 ret = "LSP_DELETE";
509 break;
510
511 };
512
513 return ret;
514 }
515
516 const char *dplane_res2str(enum zebra_dplane_result res)
517 {
518 const char *ret = "<Unknown>";
519
520 switch (res) {
521 case ZEBRA_DPLANE_REQUEST_FAILURE:
522 ret = "FAILURE";
523 break;
524 case ZEBRA_DPLANE_REQUEST_QUEUED:
525 ret = "QUEUED";
526 break;
527 case ZEBRA_DPLANE_REQUEST_SUCCESS:
528 ret = "SUCCESS";
529 break;
530 };
531
532 return ret;
533 }
534
535 const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
536 {
537 DPLANE_CTX_VALID(ctx);
538
539 return &(ctx->u.rinfo.zd_dest);
540 }
541
542 /* Source prefix is a little special - return NULL for "no src prefix" */
543 const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx)
544 {
545 DPLANE_CTX_VALID(ctx);
546
547 if (ctx->u.rinfo.zd_src.prefixlen == 0 &&
548 IN6_IS_ADDR_UNSPECIFIED(&(ctx->u.rinfo.zd_src.u.prefix6))) {
549 return NULL;
550 } else {
551 return &(ctx->u.rinfo.zd_src);
552 }
553 }
554
555 bool dplane_ctx_is_update(const struct zebra_dplane_ctx *ctx)
556 {
557 DPLANE_CTX_VALID(ctx);
558
559 return ctx->zd_is_update;
560 }
561
562 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx *ctx)
563 {
564 DPLANE_CTX_VALID(ctx);
565
566 return ctx->zd_seq;
567 }
568
569 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx)
570 {
571 DPLANE_CTX_VALID(ctx);
572
573 return ctx->zd_old_seq;
574 }
575
576 vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx)
577 {
578 DPLANE_CTX_VALID(ctx);
579
580 return ctx->zd_vrf_id;
581 }
582
583 int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx)
584 {
585 DPLANE_CTX_VALID(ctx);
586
587 return ctx->u.rinfo.zd_type;
588 }
589
590 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx)
591 {
592 DPLANE_CTX_VALID(ctx);
593
594 return ctx->u.rinfo.zd_old_type;
595 }
596
597 afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
598 {
599 DPLANE_CTX_VALID(ctx);
600
601 return ctx->u.rinfo.zd_afi;
602 }
603
604 safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
605 {
606 DPLANE_CTX_VALID(ctx);
607
608 return ctx->u.rinfo.zd_safi;
609 }
610
611 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx)
612 {
613 DPLANE_CTX_VALID(ctx);
614
615 return ctx->zd_table_id;
616 }
617
618 route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx)
619 {
620 DPLANE_CTX_VALID(ctx);
621
622 return ctx->u.rinfo.zd_tag;
623 }
624
625 route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx)
626 {
627 DPLANE_CTX_VALID(ctx);
628
629 return ctx->u.rinfo.zd_old_tag;
630 }
631
632 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx)
633 {
634 DPLANE_CTX_VALID(ctx);
635
636 return ctx->u.rinfo.zd_instance;
637 }
638
639 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
640 {
641 DPLANE_CTX_VALID(ctx);
642
643 return ctx->u.rinfo.zd_old_instance;
644 }
645
646 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx)
647 {
648 DPLANE_CTX_VALID(ctx);
649
650 return ctx->u.rinfo.zd_metric;
651 }
652
653 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx)
654 {
655 DPLANE_CTX_VALID(ctx);
656
657 return ctx->u.rinfo.zd_old_metric;
658 }
659
660 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx)
661 {
662 DPLANE_CTX_VALID(ctx);
663
664 return ctx->u.rinfo.zd_mtu;
665 }
666
667 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx)
668 {
669 DPLANE_CTX_VALID(ctx);
670
671 return ctx->u.rinfo.zd_nexthop_mtu;
672 }
673
674 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx)
675 {
676 DPLANE_CTX_VALID(ctx);
677
678 return ctx->u.rinfo.zd_distance;
679 }
680
681 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
682 {
683 DPLANE_CTX_VALID(ctx);
684
685 return ctx->u.rinfo.zd_old_distance;
686 }
687
688 const struct nexthop_group *dplane_ctx_get_ng(
689 const struct zebra_dplane_ctx *ctx)
690 {
691 DPLANE_CTX_VALID(ctx);
692
693 return &(ctx->u.rinfo.zd_ng);
694 }
695
696 const struct nexthop_group *dplane_ctx_get_old_ng(
697 const struct zebra_dplane_ctx *ctx)
698 {
699 DPLANE_CTX_VALID(ctx);
700
701 return &(ctx->u.rinfo.zd_old_ng);
702 }
703
704 const struct zebra_dplane_info *dplane_ctx_get_ns(
705 const struct zebra_dplane_ctx *ctx)
706 {
707 DPLANE_CTX_VALID(ctx);
708
709 return &(ctx->zd_ns_info);
710 }
711
712 /* Accessors for LSP information */
713
714 mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx)
715 {
716 DPLANE_CTX_VALID(ctx);
717
718 return ctx->u.lsp.ile.in_label;
719 }
720
721 uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx *ctx)
722 {
723 DPLANE_CTX_VALID(ctx);
724
725 return ctx->u.lsp.addr_family;
726 }
727
728 uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx)
729 {
730 DPLANE_CTX_VALID(ctx);
731
732 return ctx->u.lsp.flags;
733 }
734
735 zebra_nhlfe_t *dplane_ctx_get_nhlfe(struct zebra_dplane_ctx *ctx)
736 {
737 DPLANE_CTX_VALID(ctx);
738
739 return ctx->u.lsp.nhlfe_list;
740 }
741
742 zebra_nhlfe_t *dplane_ctx_get_best_nhlfe(struct zebra_dplane_ctx *ctx)
743 {
744 DPLANE_CTX_VALID(ctx);
745
746 return ctx->u.lsp.best_nhlfe;
747 }
748
749 uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx *ctx)
750 {
751 DPLANE_CTX_VALID(ctx);
752
753 return ctx->u.lsp.num_ecmp;
754 }
755
756 const char *dplane_ctx_get_pw_ifname(const struct zebra_dplane_ctx *ctx)
757 {
758 DPLANE_CTX_VALID(ctx);
759
760 return ctx->u.pw.ifname;
761 }
762
763 mpls_label_t dplane_ctx_get_pw_local_label(const struct zebra_dplane_ctx *ctx)
764 {
765 DPLANE_CTX_VALID(ctx);
766
767 return ctx->u.pw.local_label;
768 }
769
770 mpls_label_t dplane_ctx_get_pw_remote_label(const struct zebra_dplane_ctx *ctx)
771 {
772 DPLANE_CTX_VALID(ctx);
773
774 return ctx->u.pw.remote_label;
775 }
776
777 int dplane_ctx_get_pw_type(const struct zebra_dplane_ctx *ctx)
778 {
779 DPLANE_CTX_VALID(ctx);
780
781 return ctx->u.pw.type;
782 }
783
784 int dplane_ctx_get_pw_af(const struct zebra_dplane_ctx *ctx)
785 {
786 DPLANE_CTX_VALID(ctx);
787
788 return ctx->u.pw.af;
789 }
790
791 uint32_t dplane_ctx_get_pw_flags(const struct zebra_dplane_ctx *ctx)
792 {
793 DPLANE_CTX_VALID(ctx);
794
795 return ctx->u.pw.flags;
796 }
797
798 int dplane_ctx_get_pw_status(const struct zebra_dplane_ctx *ctx)
799 {
800 DPLANE_CTX_VALID(ctx);
801
802 return ctx->u.pw.status;
803 }
804
805 const union g_addr *dplane_ctx_get_pw_nexthop(
806 const struct zebra_dplane_ctx *ctx)
807 {
808 DPLANE_CTX_VALID(ctx);
809
810 return &(ctx->u.pw.nexthop);
811 }
812
813 const union pw_protocol_fields *dplane_ctx_get_pw_proto(
814 const struct zebra_dplane_ctx *ctx)
815 {
816 DPLANE_CTX_VALID(ctx);
817
818 return &(ctx->u.pw.fields);
819 }
820
821 /*
822 * End of dplane context accessors
823 */
824
825
826 /*
827 * Retrieve the limit on the number of pending, unprocessed updates.
828 */
829 uint32_t dplane_get_in_queue_limit(void)
830 {
831 return atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
832 memory_order_relaxed);
833 }
834
835 /*
836 * Configure limit on the number of pending, queued updates.
837 */
838 void dplane_set_in_queue_limit(uint32_t limit, bool set)
839 {
840 /* Reset to default on 'unset' */
841 if (!set)
842 limit = DPLANE_DEFAULT_MAX_QUEUED;
843
844 atomic_store_explicit(&zdplane_info.dg_max_queued_updates, limit,
845 memory_order_relaxed);
846 }
847
848 /*
849 * Retrieve the current queue depth of incoming, unprocessed updates
850 */
851 uint32_t dplane_get_in_queue_len(void)
852 {
853 return atomic_load_explicit(&zdplane_info.dg_routes_queued,
854 memory_order_seq_cst);
855 }
856
857 /*
858 * Common dataplane context init with zebra namespace info.
859 */
860 static int dplane_ctx_ns_init(struct zebra_dplane_ctx *ctx,
861 struct zebra_ns *zns,
862 bool is_update)
863 {
864 dplane_info_from_zns(&(ctx->zd_ns_info), zns);
865
866 #if defined(HAVE_NETLINK)
867 /* Increment message counter after copying to context struct - may need
868 * two messages in some 'update' cases.
869 */
870 if (is_update)
871 zns->netlink_dplane.seq += 2;
872 else
873 zns->netlink_dplane.seq++;
874 #endif /* HAVE_NETLINK */
875
876 return AOK;
877 }
878
879 /*
880 * Initialize a context block for a route update from zebra data structs.
881 */
882 static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx,
883 enum dplane_op_e op,
884 struct route_node *rn,
885 struct route_entry *re)
886 {
887 int ret = EINVAL;
888 const struct route_table *table = NULL;
889 const rib_table_info_t *info;
890 const struct prefix *p, *src_p;
891 struct zebra_ns *zns;
892 struct zebra_vrf *zvrf;
893 struct nexthop *nexthop;
894
895 if (!ctx || !rn || !re)
896 goto done;
897
898 ctx->zd_op = op;
899 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
900
901 ctx->u.rinfo.zd_type = re->type;
902 ctx->u.rinfo.zd_old_type = re->type;
903
904 /* Prefixes: dest, and optional source */
905 srcdest_rnode_prefixes(rn, &p, &src_p);
906
907 prefix_copy(&(ctx->u.rinfo.zd_dest), p);
908
909 if (src_p)
910 prefix_copy(&(ctx->u.rinfo.zd_src), src_p);
911 else
912 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(ctx->u.rinfo.zd_src));
913
914 ctx->zd_table_id = re->table;
915
916 ctx->u.rinfo.zd_metric = re->metric;
917 ctx->u.rinfo.zd_old_metric = re->metric;
918 ctx->zd_vrf_id = re->vrf_id;
919 ctx->u.rinfo.zd_mtu = re->mtu;
920 ctx->u.rinfo.zd_nexthop_mtu = re->nexthop_mtu;
921 ctx->u.rinfo.zd_instance = re->instance;
922 ctx->u.rinfo.zd_tag = re->tag;
923 ctx->u.rinfo.zd_old_tag = re->tag;
924 ctx->u.rinfo.zd_distance = re->distance;
925
926 table = srcdest_rnode_table(rn);
927 info = table->info;
928
929 ctx->u.rinfo.zd_afi = info->afi;
930 ctx->u.rinfo.zd_safi = info->safi;
931
932 /* Extract ns info - can't use pointers to 'core' structs */
933 zvrf = vrf_info_lookup(re->vrf_id);
934 zns = zvrf->zns;
935
936 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_ROUTE_UPDATE));
937
938 /* Copy nexthops; recursive info is included too */
939 copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop), re->ng.nexthop, NULL);
940
941 /* TODO -- maybe use array of nexthops to avoid allocs? */
942
943 /* Ensure that the dplane's nexthops flags are clear. */
944 for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop))
945 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
946
947 /* Trying out the sequence number idea, so we can try to detect
948 * when a result is stale.
949 */
950 re->dplane_sequence = zebra_router_get_next_sequence();
951 ctx->zd_seq = re->dplane_sequence;
952
953 ret = AOK;
954
955 done:
956 return ret;
957 }
958
959 /*
960 * Capture information for an LSP update in a dplane context.
961 */
962 static int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx,
963 enum dplane_op_e op,
964 zebra_lsp_t *lsp)
965 {
966 int ret = AOK;
967 zebra_nhlfe_t *nhlfe, *new_nhlfe;
968
969 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
970 zlog_debug("init dplane ctx %s: in-label %u ecmp# %d",
971 dplane_op2str(op), lsp->ile.in_label,
972 lsp->num_ecmp);
973
974 ctx->zd_op = op;
975 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
976
977 /* Capture namespace info */
978 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
979 (op == DPLANE_OP_LSP_UPDATE));
980
981 memset(&ctx->u.lsp, 0, sizeof(ctx->u.lsp));
982
983 ctx->u.lsp.ile = lsp->ile;
984 ctx->u.lsp.addr_family = lsp->addr_family;
985 ctx->u.lsp.num_ecmp = lsp->num_ecmp;
986 ctx->u.lsp.flags = lsp->flags;
987
988 /* Copy source LSP's nhlfes, and capture 'best' nhlfe */
989 for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
990 /* Not sure if this is meaningful... */
991 if (nhlfe->nexthop == NULL)
992 continue;
993
994 new_nhlfe =
995 zebra_mpls_lsp_add_nhlfe(
996 &(ctx->u.lsp),
997 nhlfe->type,
998 nhlfe->nexthop->type,
999 &(nhlfe->nexthop->gate),
1000 nhlfe->nexthop->ifindex,
1001 nhlfe->nexthop->nh_label->label[0]);
1002
1003 if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
1004 ret = ENOMEM;
1005 break;
1006 }
1007
1008 /* Need to copy flags too */
1009 new_nhlfe->flags = nhlfe->flags;
1010 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
1011
1012 if (nhlfe == lsp->best_nhlfe)
1013 ctx->u.lsp.best_nhlfe = new_nhlfe;
1014 }
1015
1016 /* On error the ctx will be cleaned-up, so we don't need to
1017 * deal with any allocated nhlfe or nexthop structs here.
1018 */
1019
1020 return ret;
1021 }
1022
1023 /*
1024 * Enqueue a new route update,
1025 * and ensure an event is active for the dataplane pthread.
1026 */
1027 static int dplane_route_enqueue(struct zebra_dplane_ctx *ctx)
1028 {
1029 int ret = EINVAL;
1030 uint32_t high, curr;
1031
1032 /* Enqueue for processing by the dataplane pthread */
1033 DPLANE_LOCK();
1034 {
1035 TAILQ_INSERT_TAIL(&zdplane_info.dg_route_ctx_q, ctx,
1036 zd_q_entries);
1037 }
1038 DPLANE_UNLOCK();
1039
1040 curr = atomic_add_fetch_explicit(
1041 #ifdef __clang__
1042 /* TODO -- issue with the clang atomic/intrinsics currently;
1043 * casting away the 'Atomic'-ness of the variable works.
1044 */
1045 (uint32_t *)&(zdplane_info.dg_routes_queued),
1046 #else
1047 &(zdplane_info.dg_routes_queued),
1048 #endif
1049 1, memory_order_seq_cst);
1050
1051 /* Maybe update high-water counter also */
1052 high = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
1053 memory_order_seq_cst);
1054 while (high < curr) {
1055 if (atomic_compare_exchange_weak_explicit(
1056 &zdplane_info.dg_routes_queued_max,
1057 &high, curr,
1058 memory_order_seq_cst,
1059 memory_order_seq_cst))
1060 break;
1061 }
1062
1063 /* Ensure that an event for the dataplane thread is active */
1064 ret = dplane_provider_work_ready();
1065
1066 return ret;
1067 }
1068
1069 /*
1070 * Utility that prepares a route update and enqueues it for processing
1071 */
1072 static enum zebra_dplane_result
1073 dplane_route_update_internal(struct route_node *rn,
1074 struct route_entry *re,
1075 struct route_entry *old_re,
1076 enum dplane_op_e op)
1077 {
1078 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1079 int ret = EINVAL;
1080 struct zebra_dplane_ctx *ctx = NULL;
1081
1082 /* Obtain context block */
1083 ctx = dplane_ctx_alloc();
1084 if (ctx == NULL) {
1085 ret = ENOMEM;
1086 goto done;
1087 }
1088
1089 /* Init context with info from zebra data structs */
1090 ret = dplane_ctx_route_init(ctx, op, rn, re);
1091 if (ret == AOK) {
1092 /* Capture some extra info for update case
1093 * where there's a different 'old' route.
1094 */
1095 if ((op == DPLANE_OP_ROUTE_UPDATE) &&
1096 old_re && (old_re != re)) {
1097 ctx->zd_is_update = true;
1098
1099 old_re->dplane_sequence =
1100 zebra_router_get_next_sequence();
1101 ctx->zd_old_seq = old_re->dplane_sequence;
1102
1103 ctx->u.rinfo.zd_old_tag = old_re->tag;
1104 ctx->u.rinfo.zd_old_type = old_re->type;
1105 ctx->u.rinfo.zd_old_instance = old_re->instance;
1106 ctx->u.rinfo.zd_old_distance = old_re->distance;
1107 ctx->u.rinfo.zd_old_metric = old_re->metric;
1108
1109 #ifndef HAVE_NETLINK
1110 /* For bsd, capture previous re's nexthops too, sigh.
1111 * We'll need these to do per-nexthop deletes.
1112 */
1113 copy_nexthops(&(ctx->u.rinfo.zd_old_ng.nexthop),
1114 old_re->ng.nexthop, NULL);
1115 #endif /* !HAVE_NETLINK */
1116 }
1117
1118 /* Enqueue context for processing */
1119 ret = dplane_route_enqueue(ctx);
1120 }
1121
1122 done:
1123 /* Update counter */
1124 atomic_fetch_add_explicit(&zdplane_info.dg_routes_in, 1,
1125 memory_order_relaxed);
1126
1127 if (ret == AOK)
1128 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1129 else {
1130 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors, 1,
1131 memory_order_relaxed);
1132 if (ctx)
1133 dplane_ctx_free(&ctx);
1134 }
1135
1136 return result;
1137 }
1138
1139 /*
1140 * Enqueue a route 'add' for the dataplane.
1141 */
1142 enum zebra_dplane_result dplane_route_add(struct route_node *rn,
1143 struct route_entry *re)
1144 {
1145 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1146
1147 if (rn == NULL || re == NULL)
1148 goto done;
1149
1150 ret = dplane_route_update_internal(rn, re, NULL,
1151 DPLANE_OP_ROUTE_INSTALL);
1152
1153 done:
1154 return ret;
1155 }
1156
1157 /*
1158 * Enqueue a route update for the dataplane.
1159 */
1160 enum zebra_dplane_result dplane_route_update(struct route_node *rn,
1161 struct route_entry *re,
1162 struct route_entry *old_re)
1163 {
1164 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1165
1166 if (rn == NULL || re == NULL)
1167 goto done;
1168
1169 ret = dplane_route_update_internal(rn, re, old_re,
1170 DPLANE_OP_ROUTE_UPDATE);
1171 done:
1172 return ret;
1173 }
1174
1175 /*
1176 * Enqueue a route removal for the dataplane.
1177 */
1178 enum zebra_dplane_result dplane_route_delete(struct route_node *rn,
1179 struct route_entry *re)
1180 {
1181 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1182
1183 if (rn == NULL || re == NULL)
1184 goto done;
1185
1186 ret = dplane_route_update_internal(rn, re, NULL,
1187 DPLANE_OP_ROUTE_DELETE);
1188
1189 done:
1190 return ret;
1191 }
1192
1193 /*
1194 * Enqueue LSP add for the dataplane.
1195 */
1196 enum zebra_dplane_result dplane_lsp_add(zebra_lsp_t *lsp)
1197 {
1198 enum zebra_dplane_result ret =
1199 lsp_update_internal(lsp, DPLANE_OP_LSP_INSTALL);
1200
1201 return ret;
1202 }
1203
1204 /*
1205 * Enqueue LSP update for the dataplane.
1206 */
1207 enum zebra_dplane_result dplane_lsp_update(zebra_lsp_t *lsp)
1208 {
1209 enum zebra_dplane_result ret =
1210 lsp_update_internal(lsp, DPLANE_OP_LSP_UPDATE);
1211
1212 return ret;
1213 }
1214
1215 /*
1216 * Enqueue LSP delete for the dataplane.
1217 */
1218 enum zebra_dplane_result dplane_lsp_delete(zebra_lsp_t *lsp)
1219 {
1220 enum zebra_dplane_result ret =
1221 lsp_update_internal(lsp, DPLANE_OP_LSP_DELETE);
1222
1223 return ret;
1224 }
1225
1226 /*
1227 * Common internal LSP update utility
1228 */
1229 static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
1230 enum dplane_op_e op)
1231 {
1232 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1233 int ret = EINVAL;
1234 struct zebra_dplane_ctx *ctx = NULL;
1235
1236 /* Obtain context block */
1237 ctx = dplane_ctx_alloc();
1238 if (ctx == NULL) {
1239 ret = ENOMEM;
1240 goto done;
1241 }
1242
1243 ret = dplane_ctx_lsp_init(ctx, op, lsp);
1244 if (ret != AOK)
1245 goto done;
1246
1247 ret = dplane_route_enqueue(ctx);
1248
1249 done:
1250 /* Update counter */
1251 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
1252 memory_order_relaxed);
1253
1254 if (ret == AOK)
1255 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1256 else {
1257 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
1258 memory_order_relaxed);
1259 if (ctx)
1260 dplane_ctx_free(&ctx);
1261 }
1262
1263 return result;
1264 }
1265
1266 /*
1267 * Handler for 'show dplane'
1268 */
1269 int dplane_show_helper(struct vty *vty, bool detailed)
1270 {
1271 uint64_t queued, queue_max, limit, errs, incoming, yields,
1272 other_errs;
1273
1274 /* Using atomics because counters are being changed in different
1275 * pthread contexts.
1276 */
1277 incoming = atomic_load_explicit(&zdplane_info.dg_routes_in,
1278 memory_order_relaxed);
1279 limit = atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
1280 memory_order_relaxed);
1281 queued = atomic_load_explicit(&zdplane_info.dg_routes_queued,
1282 memory_order_relaxed);
1283 queue_max = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
1284 memory_order_relaxed);
1285 errs = atomic_load_explicit(&zdplane_info.dg_route_errors,
1286 memory_order_relaxed);
1287 yields = atomic_load_explicit(&zdplane_info.dg_update_yields,
1288 memory_order_relaxed);
1289 other_errs = atomic_load_explicit(&zdplane_info.dg_other_errors,
1290 memory_order_relaxed);
1291
1292 vty_out(vty, "Zebra dataplane:\nRoute updates: %"PRIu64"\n",
1293 incoming);
1294 vty_out(vty, "Route update errors: %"PRIu64"\n", errs);
1295 vty_out(vty, "Other errors : %"PRIu64"\n", other_errs);
1296 vty_out(vty, "Route update queue limit: %"PRIu64"\n", limit);
1297 vty_out(vty, "Route update queue depth: %"PRIu64"\n", queued);
1298 vty_out(vty, "Route update queue max: %"PRIu64"\n", queue_max);
1299 vty_out(vty, "Dplane update yields: %"PRIu64"\n", yields);
1300
1301 return CMD_SUCCESS;
1302 }
1303
1304 /*
1305 * Handler for 'show dplane providers'
1306 */
1307 int dplane_show_provs_helper(struct vty *vty, bool detailed)
1308 {
1309 struct zebra_dplane_provider *prov;
1310 uint64_t in, in_max, out, out_max;
1311
1312 vty_out(vty, "Zebra dataplane providers:\n");
1313
1314 DPLANE_LOCK();
1315 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
1316 DPLANE_UNLOCK();
1317
1318 /* Show counters, useful info from each registered provider */
1319 while (prov) {
1320
1321 in = atomic_load_explicit(&prov->dp_in_counter,
1322 memory_order_relaxed);
1323 in_max = atomic_load_explicit(&prov->dp_in_max,
1324 memory_order_relaxed);
1325 out = atomic_load_explicit(&prov->dp_out_counter,
1326 memory_order_relaxed);
1327 out_max = atomic_load_explicit(&prov->dp_out_max,
1328 memory_order_relaxed);
1329
1330 vty_out(vty, "%s (%u): in: %"PRIu64", q_max: %"PRIu64", "
1331 "out: %"PRIu64", q_max: %"PRIu64"\n",
1332 prov->dp_name, prov->dp_id, in, in_max, out, out_max);
1333
1334 DPLANE_LOCK();
1335 prov = TAILQ_NEXT(prov, dp_prov_link);
1336 DPLANE_UNLOCK();
1337 }
1338
1339 return CMD_SUCCESS;
1340 }
1341
1342 /*
1343 * Provider registration
1344 */
1345 int dplane_provider_register(const char *name,
1346 enum dplane_provider_prio prio,
1347 int flags,
1348 int (*fp)(struct zebra_dplane_provider *),
1349 int (*fini_fp)(struct zebra_dplane_provider *,
1350 bool early),
1351 void *data,
1352 struct zebra_dplane_provider **prov_p)
1353 {
1354 int ret = 0;
1355 struct zebra_dplane_provider *p = NULL, *last;
1356
1357 /* Validate */
1358 if (fp == NULL) {
1359 ret = EINVAL;
1360 goto done;
1361 }
1362
1363 if (prio <= DPLANE_PRIO_NONE ||
1364 prio > DPLANE_PRIO_LAST) {
1365 ret = EINVAL;
1366 goto done;
1367 }
1368
1369 /* Allocate and init new provider struct */
1370 p = XCALLOC(MTYPE_DP_PROV, sizeof(struct zebra_dplane_provider));
1371 if (p == NULL) {
1372 ret = ENOMEM;
1373 goto done;
1374 }
1375
1376 pthread_mutex_init(&(p->dp_mutex), NULL);
1377 TAILQ_INIT(&(p->dp_ctx_in_q));
1378 TAILQ_INIT(&(p->dp_ctx_out_q));
1379
1380 p->dp_priority = prio;
1381 p->dp_fp = fp;
1382 p->dp_fini = fini_fp;
1383 p->dp_data = data;
1384
1385 /* Lock - the dplane pthread may be running */
1386 DPLANE_LOCK();
1387
1388 p->dp_id = ++zdplane_info.dg_provider_id;
1389
1390 if (name)
1391 strlcpy(p->dp_name, name, DPLANE_PROVIDER_NAMELEN);
1392 else
1393 snprintf(p->dp_name, DPLANE_PROVIDER_NAMELEN,
1394 "provider-%u", p->dp_id);
1395
1396 /* Insert into list ordered by priority */
1397 TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_prov_link) {
1398 if (last->dp_priority > p->dp_priority)
1399 break;
1400 }
1401
1402 if (last)
1403 TAILQ_INSERT_BEFORE(last, p, dp_prov_link);
1404 else
1405 TAILQ_INSERT_TAIL(&zdplane_info.dg_providers_q, p,
1406 dp_prov_link);
1407
1408 /* And unlock */
1409 DPLANE_UNLOCK();
1410
1411 if (IS_ZEBRA_DEBUG_DPLANE)
1412 zlog_debug("dplane: registered new provider '%s' (%u), prio %d",
1413 p->dp_name, p->dp_id, p->dp_priority);
1414
1415 done:
1416 if (prov_p)
1417 *prov_p = p;
1418
1419 return ret;
1420 }
1421
1422 /* Accessors for provider attributes */
1423 const char *dplane_provider_get_name(const struct zebra_dplane_provider *prov)
1424 {
1425 return prov->dp_name;
1426 }
1427
1428 uint32_t dplane_provider_get_id(const struct zebra_dplane_provider *prov)
1429 {
1430 return prov->dp_id;
1431 }
1432
1433 void *dplane_provider_get_data(const struct zebra_dplane_provider *prov)
1434 {
1435 return prov->dp_data;
1436 }
1437
1438 int dplane_provider_get_work_limit(const struct zebra_dplane_provider *prov)
1439 {
1440 return zdplane_info.dg_updates_per_cycle;
1441 }
1442
1443 /* Lock/unlock a provider's mutex - iff the provider was registered with
1444 * the THREADED flag.
1445 */
1446 void dplane_provider_lock(struct zebra_dplane_provider *prov)
1447 {
1448 if (dplane_provider_is_threaded(prov))
1449 DPLANE_PROV_LOCK(prov);
1450 }
1451
1452 void dplane_provider_unlock(struct zebra_dplane_provider *prov)
1453 {
1454 if (dplane_provider_is_threaded(prov))
1455 DPLANE_PROV_UNLOCK(prov);
1456 }
1457
1458 /*
1459 * Dequeue and maintain associated counter
1460 */
1461 struct zebra_dplane_ctx *dplane_provider_dequeue_in_ctx(
1462 struct zebra_dplane_provider *prov)
1463 {
1464 struct zebra_dplane_ctx *ctx = NULL;
1465
1466 dplane_provider_lock(prov);
1467
1468 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
1469 if (ctx) {
1470 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
1471
1472 atomic_fetch_sub_explicit(&prov->dp_in_queued, 1,
1473 memory_order_relaxed);
1474 }
1475
1476 dplane_provider_unlock(prov);
1477
1478 return ctx;
1479 }
1480
1481 /*
1482 * Dequeue work to a list, return count
1483 */
1484 int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov,
1485 struct dplane_ctx_q *listp)
1486 {
1487 int limit, ret;
1488 struct zebra_dplane_ctx *ctx;
1489
1490 limit = zdplane_info.dg_updates_per_cycle;
1491
1492 dplane_provider_lock(prov);
1493
1494 for (ret = 0; ret < limit; ret++) {
1495 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
1496 if (ctx) {
1497 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
1498
1499 TAILQ_INSERT_TAIL(listp, ctx, zd_q_entries);
1500 } else {
1501 break;
1502 }
1503 }
1504
1505 if (ret > 0)
1506 atomic_fetch_sub_explicit(&prov->dp_in_queued, ret,
1507 memory_order_relaxed);
1508
1509 dplane_provider_unlock(prov);
1510
1511 return ret;
1512 }
1513
1514 /*
1515 * Enqueue and maintain associated counter
1516 */
1517 void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
1518 struct zebra_dplane_ctx *ctx)
1519 {
1520 dplane_provider_lock(prov);
1521
1522 TAILQ_INSERT_TAIL(&(prov->dp_ctx_out_q), ctx,
1523 zd_q_entries);
1524
1525 dplane_provider_unlock(prov);
1526
1527 atomic_fetch_add_explicit(&(prov->dp_out_counter), 1,
1528 memory_order_relaxed);
1529 }
1530
1531 /*
1532 * Accessor for provider object
1533 */
1534 bool dplane_provider_is_threaded(const struct zebra_dplane_provider *prov)
1535 {
1536 return (prov->dp_flags & DPLANE_PROV_FLAG_THREADED);
1537 }
1538
1539 /*
1540 * Internal helper that copies information from a zebra ns object; this is
1541 * called in the zebra main pthread context as part of dplane ctx init.
1542 */
1543 static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
1544 struct zebra_ns *zns)
1545 {
1546 ns_info->ns_id = zns->ns_id;
1547
1548 #if defined(HAVE_NETLINK)
1549 ns_info->is_cmd = true;
1550 ns_info->nls = zns->netlink_dplane;
1551 #endif /* NETLINK */
1552 }
1553
1554 /*
1555 * Provider api to signal that work/events are available
1556 * for the dataplane pthread.
1557 */
1558 int dplane_provider_work_ready(void)
1559 {
1560 /* Note that during zebra startup, we may be offered work before
1561 * the dataplane pthread (and thread-master) are ready. We want to
1562 * enqueue the work, but the event-scheduling machinery may not be
1563 * available.
1564 */
1565 if (zdplane_info.dg_run) {
1566 thread_add_event(zdplane_info.dg_master,
1567 dplane_thread_loop, NULL, 0,
1568 &zdplane_info.dg_t_update);
1569 }
1570
1571 return AOK;
1572 }
1573
1574 /*
1575 * Kernel dataplane provider
1576 */
1577
1578 /*
1579 * Handler for kernel LSP updates
1580 */
1581 static enum zebra_dplane_result
1582 kernel_dplane_lsp_update(struct zebra_dplane_ctx *ctx)
1583 {
1584 enum zebra_dplane_result res;
1585
1586 /* Call into the synchronous kernel-facing code here */
1587 res = kernel_lsp_update(ctx);
1588
1589 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
1590 atomic_fetch_add_explicit(
1591 &zdplane_info.dg_lsp_errors, 1,
1592 memory_order_relaxed);
1593
1594 return res;
1595 }
1596
1597 /*
1598 * Handler for kernel route updates
1599 */
1600 static enum zebra_dplane_result
1601 kernel_dplane_route_update(struct zebra_dplane_ctx *ctx)
1602 {
1603 enum zebra_dplane_result res;
1604
1605 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
1606 char dest_str[PREFIX_STRLEN];
1607
1608 prefix2str(dplane_ctx_get_dest(ctx),
1609 dest_str, sizeof(dest_str));
1610
1611 zlog_debug("%u:%s Dplane route update ctx %p op %s",
1612 dplane_ctx_get_vrf(ctx), dest_str,
1613 ctx, dplane_op2str(dplane_ctx_get_op(ctx)));
1614 }
1615
1616 /* Call into the synchronous kernel-facing code here */
1617 res = kernel_route_update(ctx);
1618
1619 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
1620 atomic_fetch_add_explicit(
1621 &zdplane_info.dg_route_errors, 1,
1622 memory_order_relaxed);
1623
1624 return res;
1625 }
1626
1627 /*
1628 * Kernel provider callback
1629 */
1630 static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
1631 {
1632 enum zebra_dplane_result res;
1633 struct zebra_dplane_ctx *ctx;
1634 int counter, limit;
1635
1636 limit = dplane_provider_get_work_limit(prov);
1637
1638 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1639 zlog_debug("dplane provider '%s': processing",
1640 dplane_provider_get_name(prov));
1641
1642 for (counter = 0; counter < limit; counter++) {
1643
1644 ctx = dplane_provider_dequeue_in_ctx(prov);
1645 if (ctx == NULL)
1646 break;
1647
1648 /* Dispatch to appropriate kernel-facing apis */
1649 switch (dplane_ctx_get_op(ctx)) {
1650
1651 case DPLANE_OP_ROUTE_INSTALL:
1652 case DPLANE_OP_ROUTE_UPDATE:
1653 case DPLANE_OP_ROUTE_DELETE:
1654 res = kernel_dplane_route_update(ctx);
1655 break;
1656
1657 case DPLANE_OP_LSP_INSTALL:
1658 case DPLANE_OP_LSP_UPDATE:
1659 case DPLANE_OP_LSP_DELETE:
1660 res = kernel_dplane_lsp_update(ctx);
1661 break;
1662
1663 default:
1664 atomic_fetch_add_explicit(
1665 &zdplane_info.dg_other_errors, 1,
1666 memory_order_relaxed);
1667
1668 res = ZEBRA_DPLANE_REQUEST_FAILURE;
1669 break;
1670 }
1671
1672 dplane_ctx_set_status(ctx, res);
1673
1674 dplane_provider_enqueue_out_ctx(prov, ctx);
1675 }
1676
1677 /* Ensure that we'll run the work loop again if there's still
1678 * more work to do.
1679 */
1680 if (counter >= limit) {
1681 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1682 zlog_debug("dplane provider '%s' reached max updates %d",
1683 dplane_provider_get_name(prov), counter);
1684
1685 atomic_fetch_add_explicit(&zdplane_info.dg_update_yields,
1686 1, memory_order_relaxed);
1687
1688 dplane_provider_work_ready();
1689 }
1690
1691 return 0;
1692 }
1693
1694 #if DPLANE_TEST_PROVIDER
1695
1696 /*
1697 * Test dataplane provider plugin
1698 */
1699
1700 /*
1701 * Test provider process callback
1702 */
1703 static int test_dplane_process_func(struct zebra_dplane_provider *prov)
1704 {
1705 struct zebra_dplane_ctx *ctx;
1706 int counter, limit;
1707
1708 /* Just moving from 'in' queue to 'out' queue */
1709
1710 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1711 zlog_debug("dplane provider '%s': processing",
1712 dplane_provider_get_name(prov));
1713
1714 limit = dplane_provider_get_work_limit(prov);
1715
1716 for (counter = 0; counter < limit; counter++) {
1717
1718 ctx = dplane_provider_dequeue_in_ctx(prov);
1719 if (ctx == NULL)
1720 break;
1721
1722 dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
1723
1724 dplane_provider_enqueue_out_ctx(prov, ctx);
1725 }
1726
1727 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1728 zlog_debug("dplane provider '%s': processed %d",
1729 dplane_provider_get_name(prov), counter);
1730
1731 /* Ensure that we'll run the work loop again if there's still
1732 * more work to do.
1733 */
1734 if (counter >= limit)
1735 dplane_provider_work_ready();
1736
1737 return 0;
1738 }
1739
1740 /*
1741 * Test provider shutdown/fini callback
1742 */
1743 static int test_dplane_shutdown_func(struct zebra_dplane_provider *prov,
1744 bool early)
1745 {
1746 if (IS_ZEBRA_DEBUG_DPLANE)
1747 zlog_debug("dplane provider '%s': %sshutdown",
1748 dplane_provider_get_name(prov),
1749 early ? "early " : "");
1750
1751 return 0;
1752 }
1753 #endif /* DPLANE_TEST_PROVIDER */
1754
1755 /*
1756 * Register default kernel provider
1757 */
1758 static void dplane_provider_init(void)
1759 {
1760 int ret;
1761
1762 ret = dplane_provider_register("Kernel",
1763 DPLANE_PRIO_KERNEL,
1764 DPLANE_PROV_FLAGS_DEFAULT,
1765 kernel_dplane_process_func,
1766 NULL,
1767 NULL, NULL);
1768
1769 if (ret != AOK)
1770 zlog_err("Unable to register kernel dplane provider: %d",
1771 ret);
1772
1773 #if DPLANE_TEST_PROVIDER
1774 /* Optional test provider ... */
1775 ret = dplane_provider_register("Test",
1776 DPLANE_PRIO_PRE_KERNEL,
1777 DPLANE_PROV_FLAGS_DEFAULT,
1778 test_dplane_process_func,
1779 test_dplane_shutdown_func,
1780 NULL /* data */, NULL);
1781
1782 if (ret != AOK)
1783 zlog_err("Unable to register test dplane provider: %d",
1784 ret);
1785 #endif /* DPLANE_TEST_PROVIDER */
1786 }
1787
1788 /* Indicates zebra shutdown/exit is in progress. Some operations may be
1789 * simplified or skipped during shutdown processing.
1790 */
1791 bool dplane_is_in_shutdown(void)
1792 {
1793 return zdplane_info.dg_is_shutdown;
1794 }
1795
1796 /*
1797 * Early or pre-shutdown, de-init notification api. This runs pretty
1798 * early during zebra shutdown, as a signal to stop new work and prepare
1799 * for updates generated by shutdown/cleanup activity, as zebra tries to
1800 * remove everything it's responsible for.
1801 * NB: This runs in the main zebra pthread context.
1802 */
1803 void zebra_dplane_pre_finish(void)
1804 {
1805 if (IS_ZEBRA_DEBUG_DPLANE)
1806 zlog_debug("Zebra dataplane pre-fini called");
1807
1808 zdplane_info.dg_is_shutdown = true;
1809
1810 /* TODO -- Notify provider(s) of pending shutdown */
1811 }
1812
1813 /*
1814 * Utility to determine whether work remains enqueued within the dplane;
1815 * used during system shutdown processing.
1816 */
1817 static bool dplane_work_pending(void)
1818 {
1819 bool ret = false;
1820 struct zebra_dplane_ctx *ctx;
1821 struct zebra_dplane_provider *prov;
1822
1823 /* TODO -- just checking incoming/pending work for now, must check
1824 * providers
1825 */
1826 DPLANE_LOCK();
1827 {
1828 ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
1829 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
1830 }
1831 DPLANE_UNLOCK();
1832
1833 if (ctx != NULL) {
1834 ret = true;
1835 goto done;
1836 }
1837
1838 while (prov) {
1839
1840 dplane_provider_lock(prov);
1841
1842 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
1843 if (ctx == NULL)
1844 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
1845
1846 dplane_provider_unlock(prov);
1847
1848 if (ctx != NULL)
1849 break;
1850
1851 DPLANE_LOCK();
1852 prov = TAILQ_NEXT(prov, dp_prov_link);
1853 DPLANE_UNLOCK();
1854 }
1855
1856 if (ctx != NULL)
1857 ret = true;
1858
1859 done:
1860 return ret;
1861 }
1862
1863 /*
1864 * Shutdown-time intermediate callback, used to determine when all pending
1865 * in-flight updates are done. If there's still work to do, reschedules itself.
1866 * If all work is done, schedules an event to the main zebra thread for
1867 * final zebra shutdown.
1868 * This runs in the dplane pthread context.
1869 */
1870 static int dplane_check_shutdown_status(struct thread *event)
1871 {
1872 if (IS_ZEBRA_DEBUG_DPLANE)
1873 zlog_debug("Zebra dataplane shutdown status check called");
1874
1875 if (dplane_work_pending()) {
1876 /* Reschedule dplane check on a short timer */
1877 thread_add_timer_msec(zdplane_info.dg_master,
1878 dplane_check_shutdown_status,
1879 NULL, 100,
1880 &zdplane_info.dg_t_shutdown_check);
1881
1882 /* TODO - give up and stop waiting after a short time? */
1883
1884 } else {
1885 /* We appear to be done - schedule a final callback event
1886 * for the zebra main pthread.
1887 */
1888 thread_add_event(zebrad.master, zebra_finalize, NULL, 0, NULL);
1889 }
1890
1891 return 0;
1892 }
1893
1894 /*
1895 * Shutdown, de-init api. This runs pretty late during shutdown,
1896 * after zebra has tried to free/remove/uninstall all routes during shutdown.
1897 * At this point, dplane work may still remain to be done, so we can't just
1898 * blindly terminate. If there's still work to do, we'll periodically check
1899 * and when done, we'll enqueue a task to the zebra main thread for final
1900 * termination processing.
1901 *
1902 * NB: This runs in the main zebra thread context.
1903 */
1904 void zebra_dplane_finish(void)
1905 {
1906 if (IS_ZEBRA_DEBUG_DPLANE)
1907 zlog_debug("Zebra dataplane fini called");
1908
1909 thread_add_event(zdplane_info.dg_master,
1910 dplane_check_shutdown_status, NULL, 0,
1911 &zdplane_info.dg_t_shutdown_check);
1912 }
1913
1914 /*
1915 * Main dataplane pthread event loop. The thread takes new incoming work
1916 * and offers it to the first provider. It then iterates through the
1917 * providers, taking complete work from each one and offering it
1918 * to the next in order. At each step, a limited number of updates are
1919 * processed during a cycle in order to provide some fairness.
1920 *
1921 * This loop through the providers is only run once, so that the dataplane
1922 * pthread can look for other pending work - such as i/o work on behalf of
1923 * providers.
1924 */
1925 static int dplane_thread_loop(struct thread *event)
1926 {
1927 struct dplane_ctx_q work_list;
1928 struct dplane_ctx_q error_list;
1929 struct zebra_dplane_provider *prov;
1930 struct zebra_dplane_ctx *ctx, *tctx;
1931 int limit, counter, error_counter;
1932 uint64_t curr, high;
1933
1934 /* Capture work limit per cycle */
1935 limit = zdplane_info.dg_updates_per_cycle;
1936
1937 /* Init temporary lists used to move contexts among providers */
1938 TAILQ_INIT(&work_list);
1939 TAILQ_INIT(&error_list);
1940 error_counter = 0;
1941
1942 /* Check for zebra shutdown */
1943 if (!zdplane_info.dg_run)
1944 goto done;
1945
1946 /* Dequeue some incoming work from zebra (if any) onto the temporary
1947 * working list.
1948 */
1949 DPLANE_LOCK();
1950
1951 /* Locate initial registered provider */
1952 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
1953
1954 /* Move new work from incoming list to temp list */
1955 for (counter = 0; counter < limit; counter++) {
1956 ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
1957 if (ctx) {
1958 TAILQ_REMOVE(&zdplane_info.dg_route_ctx_q, ctx,
1959 zd_q_entries);
1960
1961 ctx->zd_provider = prov->dp_id;
1962
1963 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
1964 } else {
1965 break;
1966 }
1967 }
1968
1969 DPLANE_UNLOCK();
1970
1971 atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, counter,
1972 memory_order_relaxed);
1973
1974 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1975 zlog_debug("dplane: incoming new work counter: %d", counter);
1976
1977 /* Iterate through the registered providers, offering new incoming
1978 * work. If the provider has outgoing work in its queue, take that
1979 * work for the next provider
1980 */
1981 while (prov) {
1982
1983 /* At each iteration, the temporary work list has 'counter'
1984 * items.
1985 */
1986 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1987 zlog_debug("dplane enqueues %d new work to provider '%s'",
1988 counter, dplane_provider_get_name(prov));
1989
1990 /* Capture current provider id in each context; check for
1991 * error status.
1992 */
1993 TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, tctx) {
1994 if (dplane_ctx_get_status(ctx) ==
1995 ZEBRA_DPLANE_REQUEST_SUCCESS) {
1996 ctx->zd_provider = prov->dp_id;
1997 } else {
1998 /*
1999 * TODO -- improve error-handling: recirc
2000 * errors backwards so that providers can
2001 * 'undo' their work (if they want to)
2002 */
2003
2004 /* Move to error list; will be returned
2005 * zebra main.
2006 */
2007 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
2008 TAILQ_INSERT_TAIL(&error_list,
2009 ctx, zd_q_entries);
2010 error_counter++;
2011 }
2012 }
2013
2014 /* Enqueue new work to the provider */
2015 dplane_provider_lock(prov);
2016
2017 if (TAILQ_FIRST(&work_list))
2018 TAILQ_CONCAT(&(prov->dp_ctx_in_q), &work_list,
2019 zd_q_entries);
2020
2021 atomic_fetch_add_explicit(&prov->dp_in_counter, counter,
2022 memory_order_relaxed);
2023 atomic_fetch_add_explicit(&prov->dp_in_queued, counter,
2024 memory_order_relaxed);
2025 curr = atomic_load_explicit(&prov->dp_in_queued,
2026 memory_order_relaxed);
2027 high = atomic_load_explicit(&prov->dp_in_max,
2028 memory_order_relaxed);
2029 if (curr > high)
2030 atomic_store_explicit(&prov->dp_in_max, curr,
2031 memory_order_relaxed);
2032
2033 dplane_provider_unlock(prov);
2034
2035 /* Reset the temp list (though the 'concat' may have done this
2036 * already), and the counter
2037 */
2038 TAILQ_INIT(&work_list);
2039 counter = 0;
2040
2041 /* Call into the provider code. Note that this is
2042 * unconditional: we offer to do work even if we don't enqueue
2043 * any _new_ work.
2044 */
2045 (*prov->dp_fp)(prov);
2046
2047 /* Check for zebra shutdown */
2048 if (!zdplane_info.dg_run)
2049 break;
2050
2051 /* Dequeue completed work from the provider */
2052 dplane_provider_lock(prov);
2053
2054 while (counter < limit) {
2055 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
2056 if (ctx) {
2057 TAILQ_REMOVE(&(prov->dp_ctx_out_q), ctx,
2058 zd_q_entries);
2059
2060 TAILQ_INSERT_TAIL(&work_list,
2061 ctx, zd_q_entries);
2062 counter++;
2063 } else
2064 break;
2065 }
2066
2067 dplane_provider_unlock(prov);
2068
2069 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2070 zlog_debug("dplane dequeues %d completed work from provider %s",
2071 counter, dplane_provider_get_name(prov));
2072
2073 /* Locate next provider */
2074 DPLANE_LOCK();
2075 prov = TAILQ_NEXT(prov, dp_prov_link);
2076 DPLANE_UNLOCK();
2077 }
2078
2079 /* After all providers have been serviced, enqueue any completed
2080 * work and any errors back to zebra so it can process the results.
2081 */
2082 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2083 zlog_debug("dplane has %d completed, %d errors, for zebra main",
2084 counter, error_counter);
2085
2086 /*
2087 * Hand lists through the api to zebra main,
2088 * to reduce the number of lock/unlock cycles
2089 */
2090
2091 /* Call through to zebra main */
2092 (zdplane_info.dg_results_cb)(&error_list);
2093
2094 TAILQ_INIT(&error_list);
2095
2096
2097 /* Call through to zebra main */
2098 (zdplane_info.dg_results_cb)(&work_list);
2099
2100 TAILQ_INIT(&work_list);
2101
2102 done:
2103 return 0;
2104 }
2105
2106 /*
2107 * Final phase of shutdown, after all work enqueued to dplane has been
2108 * processed. This is called from the zebra main pthread context.
2109 */
2110 void zebra_dplane_shutdown(void)
2111 {
2112 if (IS_ZEBRA_DEBUG_DPLANE)
2113 zlog_debug("Zebra dataplane shutdown called");
2114
2115 /* Stop dplane thread, if it's running */
2116
2117 zdplane_info.dg_run = false;
2118
2119 THREAD_OFF(zdplane_info.dg_t_update);
2120
2121 frr_pthread_stop(zdplane_info.dg_pthread, NULL);
2122
2123 /* Destroy pthread */
2124 frr_pthread_destroy(zdplane_info.dg_pthread);
2125 zdplane_info.dg_pthread = NULL;
2126 zdplane_info.dg_master = NULL;
2127
2128 /* TODO -- Notify provider(s) of final shutdown */
2129
2130 /* TODO -- Clean-up provider objects */
2131
2132 /* TODO -- Clean queue(s), free memory */
2133 }
2134
2135 /*
2136 * Initialize the dataplane module during startup, internal/private version
2137 */
2138 static void zebra_dplane_init_internal(struct zebra_t *zebra)
2139 {
2140 memset(&zdplane_info, 0, sizeof(zdplane_info));
2141
2142 pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
2143
2144 TAILQ_INIT(&zdplane_info.dg_route_ctx_q);
2145 TAILQ_INIT(&zdplane_info.dg_providers_q);
2146
2147 zdplane_info.dg_updates_per_cycle = DPLANE_DEFAULT_NEW_WORK;
2148
2149 zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;
2150
2151 /* Register default kernel 'provider' during init */
2152 dplane_provider_init();
2153 }
2154
2155 /*
2156 * Start the dataplane pthread. This step needs to be run later than the
2157 * 'init' step, in case zebra has fork-ed.
2158 */
2159 void zebra_dplane_start(void)
2160 {
2161 /* Start dataplane pthread */
2162
2163 struct frr_pthread_attr pattr = {
2164 .start = frr_pthread_attr_default.start,
2165 .stop = frr_pthread_attr_default.stop
2166 };
2167
2168 zdplane_info.dg_pthread = frr_pthread_new(&pattr, "Zebra dplane thread",
2169 "Zebra dplane");
2170
2171 zdplane_info.dg_master = zdplane_info.dg_pthread->master;
2172
2173 zdplane_info.dg_run = true;
2174
2175 /* Enqueue an initial event for the dataplane pthread */
2176 thread_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
2177 &zdplane_info.dg_t_update);
2178
2179 frr_pthread_run(zdplane_info.dg_pthread, NULL);
2180 }
2181
2182 /*
2183 * Initialize the dataplane module at startup; called by zebra rib_init()
2184 */
2185 void zebra_dplane_init(int (*results_fp)(struct dplane_ctx_q *))
2186 {
2187 zebra_dplane_init_internal(&zebrad);
2188 zdplane_info.dg_results_cb = results_fp;
2189 }