]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_dplane.c
Merge pull request #3723 from slrz/zebra-rtadv-add-rfc8106-support
[mirror_frr.git] / zebra / zebra_dplane.c
1 /*
2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #include "lib/libfrr.h"
21 #include "lib/debug.h"
22 #include "lib/frratomic.h"
23 #include "lib/frr_pthread.h"
24 #include "lib/memory.h"
25 #include "lib/queue.h"
26 #include "lib/zebra.h"
27 #include "zebra/zebra_router.h"
28 #include "zebra/zebra_memory.h"
29 #include "zebra/zebra_router.h"
30 #include "zebra/zebra_dplane.h"
31 #include "zebra/rt.h"
32 #include "zebra/debug.h"
33
34 /* Memory type for context blocks */
35 DEFINE_MTYPE(ZEBRA, DP_CTX, "Zebra DPlane Ctx")
36 DEFINE_MTYPE(ZEBRA, DP_PROV, "Zebra DPlane Provider")
37
38 #ifndef AOK
39 # define AOK 0
40 #endif
41
42 /* Enable test dataplane provider */
43 /*#define DPLANE_TEST_PROVIDER 1 */
44
45 /* Default value for max queued incoming updates */
46 const uint32_t DPLANE_DEFAULT_MAX_QUEUED = 200;
47
48 /* Default value for new work per cycle */
49 const uint32_t DPLANE_DEFAULT_NEW_WORK = 100;
50
51 /* Validation check macro for context blocks */
52 /* #define DPLANE_DEBUG 1 */
53
54 #ifdef DPLANE_DEBUG
55
56 # define DPLANE_CTX_VALID(p) \
57 assert((p) != NULL)
58
59 #else
60
61 # define DPLANE_CTX_VALID(p)
62
63 #endif /* DPLANE_DEBUG */
64
65 /*
66 * Route information captured for route updates.
67 */
68 struct dplane_route_info {
69
70 /* Dest and (optional) source prefixes */
71 struct prefix zd_dest;
72 struct prefix zd_src;
73
74 afi_t zd_afi;
75 safi_t zd_safi;
76
77 int zd_type;
78 int zd_old_type;
79
80 route_tag_t zd_tag;
81 route_tag_t zd_old_tag;
82 uint32_t zd_metric;
83 uint32_t zd_old_metric;
84
85 uint16_t zd_instance;
86 uint16_t zd_old_instance;
87
88 uint8_t zd_distance;
89 uint8_t zd_old_distance;
90
91 uint32_t zd_mtu;
92 uint32_t zd_nexthop_mtu;
93
94 /* Nexthops */
95 struct nexthop_group zd_ng;
96
97 /* "Previous" nexthops, used only in route updates without netlink */
98 struct nexthop_group zd_old_ng;
99
100 /* TODO -- use fixed array of nexthops, to avoid mallocs? */
101
102 };
103
104 /*
105 * Pseudowire info for the dataplane
106 */
107 struct dplane_pw_info {
108 char ifname[IF_NAMESIZE];
109 ifindex_t ifindex;
110 int type;
111 int af;
112 int status;
113 uint32_t flags;
114 union g_addr nexthop;
115 mpls_label_t local_label;
116 mpls_label_t remote_label;
117
118 union pw_protocol_fields fields;
119 };
120
121 /*
122 * The context block used to exchange info about route updates across
123 * the boundary between the zebra main context (and pthread) and the
124 * dataplane layer (and pthread).
125 */
126 struct zebra_dplane_ctx {
127
128 /* Operation code */
129 enum dplane_op_e zd_op;
130
131 /* Status on return */
132 enum zebra_dplane_result zd_status;
133
134 /* Dplane provider id */
135 uint32_t zd_provider;
136
137 /* Flags - used by providers, e.g. */
138 int zd_flags;
139
140 bool zd_is_update;
141
142 uint32_t zd_seq;
143 uint32_t zd_old_seq;
144
145 /* TODO -- internal/sub-operation status? */
146 enum zebra_dplane_result zd_remote_status;
147 enum zebra_dplane_result zd_kernel_status;
148
149 vrf_id_t zd_vrf_id;
150 uint32_t zd_table_id;
151
152 /* Support info for either route or LSP update */
153 union {
154 struct dplane_route_info rinfo;
155 zebra_lsp_t lsp;
156 struct dplane_pw_info pw;
157 } u;
158
159 /* Namespace info, used especially for netlink kernel communication */
160 struct zebra_dplane_info zd_ns_info;
161
162 /* Embedded list linkage */
163 TAILQ_ENTRY(zebra_dplane_ctx) zd_q_entries;
164 };
165
166 /* Flag that can be set by a pre-kernel provider as a signal that an update
167 * should bypass the kernel.
168 */
169 #define DPLANE_CTX_FLAG_NO_KERNEL 0x01
170
171
172 /*
173 * Registration block for one dataplane provider.
174 */
175 struct zebra_dplane_provider {
176 /* Name */
177 char dp_name[DPLANE_PROVIDER_NAMELEN + 1];
178
179 /* Priority, for ordering among providers */
180 uint8_t dp_priority;
181
182 /* Id value */
183 uint32_t dp_id;
184
185 /* Mutex */
186 pthread_mutex_t dp_mutex;
187
188 /* Plugin-provided extra data */
189 void *dp_data;
190
191 /* Flags */
192 int dp_flags;
193
194 int (*dp_fp)(struct zebra_dplane_provider *prov);
195
196 int (*dp_fini)(struct zebra_dplane_provider *prov, bool early_p);
197
198 _Atomic uint32_t dp_in_counter;
199 _Atomic uint32_t dp_in_queued;
200 _Atomic uint32_t dp_in_max;
201 _Atomic uint32_t dp_out_counter;
202 _Atomic uint32_t dp_out_queued;
203 _Atomic uint32_t dp_out_max;
204 _Atomic uint32_t dp_error_counter;
205
206 /* Queue of contexts inbound to the provider */
207 struct dplane_ctx_q dp_ctx_in_q;
208
209 /* Queue of completed contexts outbound from the provider back
210 * towards the dataplane module.
211 */
212 struct dplane_ctx_q dp_ctx_out_q;
213
214 /* Embedded list linkage for provider objects */
215 TAILQ_ENTRY(zebra_dplane_provider) dp_prov_link;
216 };
217
218 /*
219 * Globals
220 */
221 static struct zebra_dplane_globals {
222 /* Mutex to control access to dataplane components */
223 pthread_mutex_t dg_mutex;
224
225 /* Results callback registered by zebra 'core' */
226 int (*dg_results_cb)(struct dplane_ctx_q *ctxlist);
227
228 /* Sentinel for beginning of shutdown */
229 volatile bool dg_is_shutdown;
230
231 /* Sentinel for end of shutdown */
232 volatile bool dg_run;
233
234 /* Route-update context queue inbound to the dataplane */
235 TAILQ_HEAD(zdg_ctx_q, zebra_dplane_ctx) dg_route_ctx_q;
236
237 /* Ordered list of providers */
238 TAILQ_HEAD(zdg_prov_q, zebra_dplane_provider) dg_providers_q;
239
240 /* Counter used to assign internal ids to providers */
241 uint32_t dg_provider_id;
242
243 /* Limit number of pending, unprocessed updates */
244 _Atomic uint32_t dg_max_queued_updates;
245
246 /* Limit number of new updates dequeued at once, to pace an
247 * incoming burst.
248 */
249 uint32_t dg_updates_per_cycle;
250
251 _Atomic uint32_t dg_routes_in;
252 _Atomic uint32_t dg_routes_queued;
253 _Atomic uint32_t dg_routes_queued_max;
254 _Atomic uint32_t dg_route_errors;
255 _Atomic uint32_t dg_other_errors;
256
257 _Atomic uint32_t dg_lsps_in;
258 _Atomic uint32_t dg_lsp_errors;
259
260 _Atomic uint32_t dg_pws_in;
261 _Atomic uint32_t dg_pw_errors;
262
263 _Atomic uint32_t dg_update_yields;
264
265 /* Dataplane pthread */
266 struct frr_pthread *dg_pthread;
267
268 /* Event-delivery context 'master' for the dplane */
269 struct thread_master *dg_master;
270
271 /* Event/'thread' pointer for queued updates */
272 struct thread *dg_t_update;
273
274 /* Event pointer for pending shutdown check loop */
275 struct thread *dg_t_shutdown_check;
276
277 } zdplane_info;
278
279 /*
280 * Lock and unlock for interactions with the zebra 'core' pthread
281 */
282 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
283 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
284
285
286 /*
287 * Lock and unlock for individual providers
288 */
289 #define DPLANE_PROV_LOCK(p) pthread_mutex_lock(&((p)->dp_mutex))
290 #define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
291
292 /* Prototypes */
293 static int dplane_thread_loop(struct thread *event);
294 static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
295 struct zebra_ns *zns);
296 static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
297 enum dplane_op_e op);
298 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
299 enum dplane_op_e op);
300
301 /*
302 * Public APIs
303 */
304
305 /* Obtain thread_master for dataplane thread */
306 struct thread_master *dplane_get_thread_master(void)
307 {
308 return zdplane_info.dg_master;
309 }
310
311 /*
312 * Allocate a dataplane update context
313 */
314 static struct zebra_dplane_ctx *dplane_ctx_alloc(void)
315 {
316 struct zebra_dplane_ctx *p;
317
318 /* TODO -- just alloc'ing memory, but would like to maintain
319 * a pool
320 */
321 p = XCALLOC(MTYPE_DP_CTX, sizeof(struct zebra_dplane_ctx));
322
323 return p;
324 }
325
326 /*
327 * Free a dataplane results context.
328 */
329 static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
330 {
331 if (pctx == NULL)
332 return;
333
334 DPLANE_CTX_VALID(*pctx);
335
336 /* TODO -- just freeing memory, but would like to maintain
337 * a pool
338 */
339
340 /* Some internal allocations may need to be freed, depending on
341 * the type of info captured in the ctx.
342 */
343 switch ((*pctx)->zd_op) {
344 case DPLANE_OP_ROUTE_INSTALL:
345 case DPLANE_OP_ROUTE_UPDATE:
346 case DPLANE_OP_ROUTE_DELETE:
347
348 /* Free allocated nexthops */
349 if ((*pctx)->u.rinfo.zd_ng.nexthop) {
350 /* This deals with recursive nexthops too */
351 nexthops_free((*pctx)->u.rinfo.zd_ng.nexthop);
352
353 (*pctx)->u.rinfo.zd_ng.nexthop = NULL;
354 }
355
356 if ((*pctx)->u.rinfo.zd_old_ng.nexthop) {
357 /* This deals with recursive nexthops too */
358 nexthops_free((*pctx)->u.rinfo.zd_old_ng.nexthop);
359
360 (*pctx)->u.rinfo.zd_old_ng.nexthop = NULL;
361 }
362
363 break;
364
365 case DPLANE_OP_LSP_INSTALL:
366 case DPLANE_OP_LSP_UPDATE:
367 case DPLANE_OP_LSP_DELETE:
368 {
369 zebra_nhlfe_t *nhlfe, *next;
370
371 /* Free allocated NHLFEs */
372 for (nhlfe = (*pctx)->u.lsp.nhlfe_list; nhlfe; nhlfe = next) {
373 next = nhlfe->next;
374
375 zebra_mpls_nhlfe_del(nhlfe);
376 }
377
378 /* Clear pointers in lsp struct, in case we're cacheing
379 * free context structs.
380 */
381 (*pctx)->u.lsp.nhlfe_list = NULL;
382 (*pctx)->u.lsp.best_nhlfe = NULL;
383
384 break;
385 }
386
387 case DPLANE_OP_PW_INSTALL:
388 case DPLANE_OP_PW_UNINSTALL:
389 case DPLANE_OP_NONE:
390 break;
391 }
392
393 XFREE(MTYPE_DP_CTX, *pctx);
394 *pctx = NULL;
395 }
396
397 /*
398 * Return a context block to the dplane module after processing
399 */
400 void dplane_ctx_fini(struct zebra_dplane_ctx **pctx)
401 {
402 /* TODO -- maintain pool; for now, just free */
403 dplane_ctx_free(pctx);
404 }
405
406 /* Enqueue a context block */
407 void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
408 const struct zebra_dplane_ctx *ctx)
409 {
410 TAILQ_INSERT_TAIL(q, (struct zebra_dplane_ctx *)ctx, zd_q_entries);
411 }
412
413 /* Append a list of context blocks to another list */
414 void dplane_ctx_list_append(struct dplane_ctx_q *to_list,
415 struct dplane_ctx_q *from_list)
416 {
417 if (TAILQ_FIRST(from_list)) {
418 TAILQ_CONCAT(to_list, from_list, zd_q_entries);
419
420 /* And clear 'from' list */
421 TAILQ_INIT(from_list);
422 }
423 }
424
425 /* Dequeue a context block from the head of a list */
426 struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_q *q)
427 {
428 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
429
430 if (ctx)
431 TAILQ_REMOVE(q, ctx, zd_q_entries);
432
433 return ctx;
434 }
435
436 /*
437 * Accessors for information from the context object
438 */
439 enum zebra_dplane_result dplane_ctx_get_status(
440 const struct zebra_dplane_ctx *ctx)
441 {
442 DPLANE_CTX_VALID(ctx);
443
444 return ctx->zd_status;
445 }
446
447 void dplane_ctx_set_status(struct zebra_dplane_ctx *ctx,
448 enum zebra_dplane_result status)
449 {
450 DPLANE_CTX_VALID(ctx);
451
452 ctx->zd_status = status;
453 }
454
455 /* Retrieve last/current provider id */
456 uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx *ctx)
457 {
458 DPLANE_CTX_VALID(ctx);
459 return ctx->zd_provider;
460 }
461
462 /* Providers run before the kernel can control whether a kernel
463 * update should be done.
464 */
465 void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx *ctx)
466 {
467 DPLANE_CTX_VALID(ctx);
468
469 SET_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
470 }
471
472 bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx *ctx)
473 {
474 DPLANE_CTX_VALID(ctx);
475
476 return CHECK_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
477 }
478
479 enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx)
480 {
481 DPLANE_CTX_VALID(ctx);
482
483 return ctx->zd_op;
484 }
485
486 const char *dplane_op2str(enum dplane_op_e op)
487 {
488 const char *ret = "UNKNOWN";
489
490 switch (op) {
491 case DPLANE_OP_NONE:
492 ret = "NONE";
493 break;
494
495 /* Route update */
496 case DPLANE_OP_ROUTE_INSTALL:
497 ret = "ROUTE_INSTALL";
498 break;
499 case DPLANE_OP_ROUTE_UPDATE:
500 ret = "ROUTE_UPDATE";
501 break;
502 case DPLANE_OP_ROUTE_DELETE:
503 ret = "ROUTE_DELETE";
504 break;
505
506 case DPLANE_OP_LSP_INSTALL:
507 ret = "LSP_INSTALL";
508 break;
509 case DPLANE_OP_LSP_UPDATE:
510 ret = "LSP_UPDATE";
511 break;
512 case DPLANE_OP_LSP_DELETE:
513 ret = "LSP_DELETE";
514 break;
515
516 case DPLANE_OP_PW_INSTALL:
517 ret = "PW_INSTALL";
518 break;
519 case DPLANE_OP_PW_UNINSTALL:
520 ret = "PW_UNINSTALL";
521 break;
522
523 };
524
525 return ret;
526 }
527
528 const char *dplane_res2str(enum zebra_dplane_result res)
529 {
530 const char *ret = "<Unknown>";
531
532 switch (res) {
533 case ZEBRA_DPLANE_REQUEST_FAILURE:
534 ret = "FAILURE";
535 break;
536 case ZEBRA_DPLANE_REQUEST_QUEUED:
537 ret = "QUEUED";
538 break;
539 case ZEBRA_DPLANE_REQUEST_SUCCESS:
540 ret = "SUCCESS";
541 break;
542 };
543
544 return ret;
545 }
546
547 const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
548 {
549 DPLANE_CTX_VALID(ctx);
550
551 return &(ctx->u.rinfo.zd_dest);
552 }
553
554 /* Source prefix is a little special - return NULL for "no src prefix" */
555 const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx)
556 {
557 DPLANE_CTX_VALID(ctx);
558
559 if (ctx->u.rinfo.zd_src.prefixlen == 0 &&
560 IN6_IS_ADDR_UNSPECIFIED(&(ctx->u.rinfo.zd_src.u.prefix6))) {
561 return NULL;
562 } else {
563 return &(ctx->u.rinfo.zd_src);
564 }
565 }
566
567 bool dplane_ctx_is_update(const struct zebra_dplane_ctx *ctx)
568 {
569 DPLANE_CTX_VALID(ctx);
570
571 return ctx->zd_is_update;
572 }
573
574 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx *ctx)
575 {
576 DPLANE_CTX_VALID(ctx);
577
578 return ctx->zd_seq;
579 }
580
581 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx)
582 {
583 DPLANE_CTX_VALID(ctx);
584
585 return ctx->zd_old_seq;
586 }
587
588 vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx)
589 {
590 DPLANE_CTX_VALID(ctx);
591
592 return ctx->zd_vrf_id;
593 }
594
595 int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx)
596 {
597 DPLANE_CTX_VALID(ctx);
598
599 return ctx->u.rinfo.zd_type;
600 }
601
602 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx)
603 {
604 DPLANE_CTX_VALID(ctx);
605
606 return ctx->u.rinfo.zd_old_type;
607 }
608
609 afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
610 {
611 DPLANE_CTX_VALID(ctx);
612
613 return ctx->u.rinfo.zd_afi;
614 }
615
616 safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
617 {
618 DPLANE_CTX_VALID(ctx);
619
620 return ctx->u.rinfo.zd_safi;
621 }
622
623 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx)
624 {
625 DPLANE_CTX_VALID(ctx);
626
627 return ctx->zd_table_id;
628 }
629
630 route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx)
631 {
632 DPLANE_CTX_VALID(ctx);
633
634 return ctx->u.rinfo.zd_tag;
635 }
636
637 route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx)
638 {
639 DPLANE_CTX_VALID(ctx);
640
641 return ctx->u.rinfo.zd_old_tag;
642 }
643
644 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx)
645 {
646 DPLANE_CTX_VALID(ctx);
647
648 return ctx->u.rinfo.zd_instance;
649 }
650
651 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
652 {
653 DPLANE_CTX_VALID(ctx);
654
655 return ctx->u.rinfo.zd_old_instance;
656 }
657
658 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx)
659 {
660 DPLANE_CTX_VALID(ctx);
661
662 return ctx->u.rinfo.zd_metric;
663 }
664
665 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx)
666 {
667 DPLANE_CTX_VALID(ctx);
668
669 return ctx->u.rinfo.zd_old_metric;
670 }
671
672 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx)
673 {
674 DPLANE_CTX_VALID(ctx);
675
676 return ctx->u.rinfo.zd_mtu;
677 }
678
679 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx)
680 {
681 DPLANE_CTX_VALID(ctx);
682
683 return ctx->u.rinfo.zd_nexthop_mtu;
684 }
685
686 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx)
687 {
688 DPLANE_CTX_VALID(ctx);
689
690 return ctx->u.rinfo.zd_distance;
691 }
692
693 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
694 {
695 DPLANE_CTX_VALID(ctx);
696
697 return ctx->u.rinfo.zd_old_distance;
698 }
699
700 const struct nexthop_group *dplane_ctx_get_ng(
701 const struct zebra_dplane_ctx *ctx)
702 {
703 DPLANE_CTX_VALID(ctx);
704
705 return &(ctx->u.rinfo.zd_ng);
706 }
707
708 const struct nexthop_group *dplane_ctx_get_old_ng(
709 const struct zebra_dplane_ctx *ctx)
710 {
711 DPLANE_CTX_VALID(ctx);
712
713 return &(ctx->u.rinfo.zd_old_ng);
714 }
715
716 const struct zebra_dplane_info *dplane_ctx_get_ns(
717 const struct zebra_dplane_ctx *ctx)
718 {
719 DPLANE_CTX_VALID(ctx);
720
721 return &(ctx->zd_ns_info);
722 }
723
724 /* Accessors for LSP information */
725
726 mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx)
727 {
728 DPLANE_CTX_VALID(ctx);
729
730 return ctx->u.lsp.ile.in_label;
731 }
732
733 uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx *ctx)
734 {
735 DPLANE_CTX_VALID(ctx);
736
737 return ctx->u.lsp.addr_family;
738 }
739
740 uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx)
741 {
742 DPLANE_CTX_VALID(ctx);
743
744 return ctx->u.lsp.flags;
745 }
746
747 zebra_nhlfe_t *dplane_ctx_get_nhlfe(struct zebra_dplane_ctx *ctx)
748 {
749 DPLANE_CTX_VALID(ctx);
750
751 return ctx->u.lsp.nhlfe_list;
752 }
753
754 zebra_nhlfe_t *dplane_ctx_get_best_nhlfe(struct zebra_dplane_ctx *ctx)
755 {
756 DPLANE_CTX_VALID(ctx);
757
758 return ctx->u.lsp.best_nhlfe;
759 }
760
761 uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx *ctx)
762 {
763 DPLANE_CTX_VALID(ctx);
764
765 return ctx->u.lsp.num_ecmp;
766 }
767
768 const char *dplane_ctx_get_pw_ifname(const struct zebra_dplane_ctx *ctx)
769 {
770 DPLANE_CTX_VALID(ctx);
771
772 return ctx->u.pw.ifname;
773 }
774
775 mpls_label_t dplane_ctx_get_pw_local_label(const struct zebra_dplane_ctx *ctx)
776 {
777 DPLANE_CTX_VALID(ctx);
778
779 return ctx->u.pw.local_label;
780 }
781
782 mpls_label_t dplane_ctx_get_pw_remote_label(const struct zebra_dplane_ctx *ctx)
783 {
784 DPLANE_CTX_VALID(ctx);
785
786 return ctx->u.pw.remote_label;
787 }
788
789 int dplane_ctx_get_pw_type(const struct zebra_dplane_ctx *ctx)
790 {
791 DPLANE_CTX_VALID(ctx);
792
793 return ctx->u.pw.type;
794 }
795
796 int dplane_ctx_get_pw_af(const struct zebra_dplane_ctx *ctx)
797 {
798 DPLANE_CTX_VALID(ctx);
799
800 return ctx->u.pw.af;
801 }
802
803 uint32_t dplane_ctx_get_pw_flags(const struct zebra_dplane_ctx *ctx)
804 {
805 DPLANE_CTX_VALID(ctx);
806
807 return ctx->u.pw.flags;
808 }
809
810 int dplane_ctx_get_pw_status(const struct zebra_dplane_ctx *ctx)
811 {
812 DPLANE_CTX_VALID(ctx);
813
814 return ctx->u.pw.status;
815 }
816
817 const union g_addr *dplane_ctx_get_pw_nexthop(
818 const struct zebra_dplane_ctx *ctx)
819 {
820 DPLANE_CTX_VALID(ctx);
821
822 return &(ctx->u.pw.nexthop);
823 }
824
825 const union pw_protocol_fields *dplane_ctx_get_pw_proto(
826 const struct zebra_dplane_ctx *ctx)
827 {
828 DPLANE_CTX_VALID(ctx);
829
830 return &(ctx->u.pw.fields);
831 }
832
833 /*
834 * End of dplane context accessors
835 */
836
837
838 /*
839 * Retrieve the limit on the number of pending, unprocessed updates.
840 */
841 uint32_t dplane_get_in_queue_limit(void)
842 {
843 return atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
844 memory_order_relaxed);
845 }
846
847 /*
848 * Configure limit on the number of pending, queued updates.
849 */
850 void dplane_set_in_queue_limit(uint32_t limit, bool set)
851 {
852 /* Reset to default on 'unset' */
853 if (!set)
854 limit = DPLANE_DEFAULT_MAX_QUEUED;
855
856 atomic_store_explicit(&zdplane_info.dg_max_queued_updates, limit,
857 memory_order_relaxed);
858 }
859
860 /*
861 * Retrieve the current queue depth of incoming, unprocessed updates
862 */
863 uint32_t dplane_get_in_queue_len(void)
864 {
865 return atomic_load_explicit(&zdplane_info.dg_routes_queued,
866 memory_order_seq_cst);
867 }
868
869 /*
870 * Common dataplane context init with zebra namespace info.
871 */
872 static int dplane_ctx_ns_init(struct zebra_dplane_ctx *ctx,
873 struct zebra_ns *zns,
874 bool is_update)
875 {
876 dplane_info_from_zns(&(ctx->zd_ns_info), zns);
877
878 #if defined(HAVE_NETLINK)
879 /* Increment message counter after copying to context struct - may need
880 * two messages in some 'update' cases.
881 */
882 if (is_update)
883 zns->netlink_dplane.seq += 2;
884 else
885 zns->netlink_dplane.seq++;
886 #endif /* HAVE_NETLINK */
887
888 return AOK;
889 }
890
891 /*
892 * Initialize a context block for a route update from zebra data structs.
893 */
894 static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx,
895 enum dplane_op_e op,
896 struct route_node *rn,
897 struct route_entry *re)
898 {
899 int ret = EINVAL;
900 const struct route_table *table = NULL;
901 const rib_table_info_t *info;
902 const struct prefix *p, *src_p;
903 struct zebra_ns *zns;
904 struct zebra_vrf *zvrf;
905 struct nexthop *nexthop;
906
907 if (!ctx || !rn || !re)
908 goto done;
909
910 ctx->zd_op = op;
911 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
912
913 ctx->u.rinfo.zd_type = re->type;
914 ctx->u.rinfo.zd_old_type = re->type;
915
916 /* Prefixes: dest, and optional source */
917 srcdest_rnode_prefixes(rn, &p, &src_p);
918
919 prefix_copy(&(ctx->u.rinfo.zd_dest), p);
920
921 if (src_p)
922 prefix_copy(&(ctx->u.rinfo.zd_src), src_p);
923 else
924 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(ctx->u.rinfo.zd_src));
925
926 ctx->zd_table_id = re->table;
927
928 ctx->u.rinfo.zd_metric = re->metric;
929 ctx->u.rinfo.zd_old_metric = re->metric;
930 ctx->zd_vrf_id = re->vrf_id;
931 ctx->u.rinfo.zd_mtu = re->mtu;
932 ctx->u.rinfo.zd_nexthop_mtu = re->nexthop_mtu;
933 ctx->u.rinfo.zd_instance = re->instance;
934 ctx->u.rinfo.zd_tag = re->tag;
935 ctx->u.rinfo.zd_old_tag = re->tag;
936 ctx->u.rinfo.zd_distance = re->distance;
937
938 table = srcdest_rnode_table(rn);
939 info = table->info;
940
941 ctx->u.rinfo.zd_afi = info->afi;
942 ctx->u.rinfo.zd_safi = info->safi;
943
944 /* Extract ns info - can't use pointers to 'core' structs */
945 zvrf = vrf_info_lookup(re->vrf_id);
946 zns = zvrf->zns;
947
948 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_ROUTE_UPDATE));
949
950 /* Copy nexthops; recursive info is included too */
951 copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop), re->ng.nexthop, NULL);
952
953 /* TODO -- maybe use array of nexthops to avoid allocs? */
954
955 /* Ensure that the dplane's nexthops flags are clear. */
956 for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop))
957 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
958
959 /* Trying out the sequence number idea, so we can try to detect
960 * when a result is stale.
961 */
962 re->dplane_sequence = zebra_router_get_next_sequence();
963 ctx->zd_seq = re->dplane_sequence;
964
965 ret = AOK;
966
967 done:
968 return ret;
969 }
970
971 /*
972 * Capture information for an LSP update in a dplane context.
973 */
974 static int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx,
975 enum dplane_op_e op,
976 zebra_lsp_t *lsp)
977 {
978 int ret = AOK;
979 zebra_nhlfe_t *nhlfe, *new_nhlfe;
980
981 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
982 zlog_debug("init dplane ctx %s: in-label %u ecmp# %d",
983 dplane_op2str(op), lsp->ile.in_label,
984 lsp->num_ecmp);
985
986 ctx->zd_op = op;
987 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
988
989 /* Capture namespace info */
990 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
991 (op == DPLANE_OP_LSP_UPDATE));
992
993 memset(&ctx->u.lsp, 0, sizeof(ctx->u.lsp));
994
995 ctx->u.lsp.ile = lsp->ile;
996 ctx->u.lsp.addr_family = lsp->addr_family;
997 ctx->u.lsp.num_ecmp = lsp->num_ecmp;
998 ctx->u.lsp.flags = lsp->flags;
999
1000 /* Copy source LSP's nhlfes, and capture 'best' nhlfe */
1001 for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
1002 /* Not sure if this is meaningful... */
1003 if (nhlfe->nexthop == NULL)
1004 continue;
1005
1006 new_nhlfe =
1007 zebra_mpls_lsp_add_nhlfe(
1008 &(ctx->u.lsp),
1009 nhlfe->type,
1010 nhlfe->nexthop->type,
1011 &(nhlfe->nexthop->gate),
1012 nhlfe->nexthop->ifindex,
1013 nhlfe->nexthop->nh_label->label[0]);
1014
1015 if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
1016 ret = ENOMEM;
1017 break;
1018 }
1019
1020 /* Need to copy flags too */
1021 new_nhlfe->flags = nhlfe->flags;
1022 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
1023
1024 if (nhlfe == lsp->best_nhlfe)
1025 ctx->u.lsp.best_nhlfe = new_nhlfe;
1026 }
1027
1028 /* On error the ctx will be cleaned-up, so we don't need to
1029 * deal with any allocated nhlfe or nexthop structs here.
1030 */
1031
1032 return ret;
1033 }
1034
1035 /*
1036 * Capture information for an LSP update in a dplane context.
1037 */
1038 static int dplane_ctx_pw_init(struct zebra_dplane_ctx *ctx,
1039 enum dplane_op_e op,
1040 struct zebra_pw *pw)
1041 {
1042 int ret = AOK;
1043
1044 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1045 zlog_debug("init dplane ctx %s: pw '%s', loc %u, rem %u",
1046 dplane_op2str(op), pw->ifname, pw->local_label,
1047 pw->remote_label);
1048
1049 ctx->zd_op = op;
1050 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
1051
1052 /* Capture namespace info: no netlink support as of 12/18,
1053 * but just in case...
1054 */
1055 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
1056
1057 memset(&ctx->u.pw, 0, sizeof(ctx->u.pw));
1058
1059 /* This name appears to be c-string, so we use string copy. */
1060 strlcpy(ctx->u.pw.ifname, pw->ifname, sizeof(ctx->u.pw.ifname));
1061 ctx->zd_vrf_id = pw->vrf_id;
1062 ctx->u.pw.ifindex = pw->ifindex;
1063 ctx->u.pw.type = pw->type;
1064 ctx->u.pw.af = pw->af;
1065 ctx->u.pw.local_label = pw->local_label;
1066 ctx->u.pw.remote_label = pw->remote_label;
1067 ctx->u.pw.flags = pw->flags;
1068
1069 ctx->u.pw.nexthop = pw->nexthop;
1070
1071 ctx->u.pw.fields = pw->data;
1072
1073 return ret;
1074 }
1075
1076 /*
1077 * Enqueue a new route update,
1078 * and ensure an event is active for the dataplane pthread.
1079 */
1080 static int dplane_route_enqueue(struct zebra_dplane_ctx *ctx)
1081 {
1082 int ret = EINVAL;
1083 uint32_t high, curr;
1084
1085 /* Enqueue for processing by the dataplane pthread */
1086 DPLANE_LOCK();
1087 {
1088 TAILQ_INSERT_TAIL(&zdplane_info.dg_route_ctx_q, ctx,
1089 zd_q_entries);
1090 }
1091 DPLANE_UNLOCK();
1092
1093 curr = atomic_add_fetch_explicit(
1094 #ifdef __clang__
1095 /* TODO -- issue with the clang atomic/intrinsics currently;
1096 * casting away the 'Atomic'-ness of the variable works.
1097 */
1098 (uint32_t *)&(zdplane_info.dg_routes_queued),
1099 #else
1100 &(zdplane_info.dg_routes_queued),
1101 #endif
1102 1, memory_order_seq_cst);
1103
1104 /* Maybe update high-water counter also */
1105 high = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
1106 memory_order_seq_cst);
1107 while (high < curr) {
1108 if (atomic_compare_exchange_weak_explicit(
1109 &zdplane_info.dg_routes_queued_max,
1110 &high, curr,
1111 memory_order_seq_cst,
1112 memory_order_seq_cst))
1113 break;
1114 }
1115
1116 /* Ensure that an event for the dataplane thread is active */
1117 ret = dplane_provider_work_ready();
1118
1119 return ret;
1120 }
1121
1122 /*
1123 * Utility that prepares a route update and enqueues it for processing
1124 */
1125 static enum zebra_dplane_result
1126 dplane_route_update_internal(struct route_node *rn,
1127 struct route_entry *re,
1128 struct route_entry *old_re,
1129 enum dplane_op_e op)
1130 {
1131 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1132 int ret = EINVAL;
1133 struct zebra_dplane_ctx *ctx = NULL;
1134
1135 /* Obtain context block */
1136 ctx = dplane_ctx_alloc();
1137 if (ctx == NULL) {
1138 ret = ENOMEM;
1139 goto done;
1140 }
1141
1142 /* Init context with info from zebra data structs */
1143 ret = dplane_ctx_route_init(ctx, op, rn, re);
1144 if (ret == AOK) {
1145 /* Capture some extra info for update case
1146 * where there's a different 'old' route.
1147 */
1148 if ((op == DPLANE_OP_ROUTE_UPDATE) &&
1149 old_re && (old_re != re)) {
1150 ctx->zd_is_update = true;
1151
1152 old_re->dplane_sequence =
1153 zebra_router_get_next_sequence();
1154 ctx->zd_old_seq = old_re->dplane_sequence;
1155
1156 ctx->u.rinfo.zd_old_tag = old_re->tag;
1157 ctx->u.rinfo.zd_old_type = old_re->type;
1158 ctx->u.rinfo.zd_old_instance = old_re->instance;
1159 ctx->u.rinfo.zd_old_distance = old_re->distance;
1160 ctx->u.rinfo.zd_old_metric = old_re->metric;
1161
1162 #ifndef HAVE_NETLINK
1163 /* For bsd, capture previous re's nexthops too, sigh.
1164 * We'll need these to do per-nexthop deletes.
1165 */
1166 copy_nexthops(&(ctx->u.rinfo.zd_old_ng.nexthop),
1167 old_re->ng.nexthop, NULL);
1168 #endif /* !HAVE_NETLINK */
1169 }
1170
1171 /* Enqueue context for processing */
1172 ret = dplane_route_enqueue(ctx);
1173 }
1174
1175 done:
1176 /* Update counter */
1177 atomic_fetch_add_explicit(&zdplane_info.dg_routes_in, 1,
1178 memory_order_relaxed);
1179
1180 if (ret == AOK)
1181 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1182 else {
1183 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors, 1,
1184 memory_order_relaxed);
1185 if (ctx)
1186 dplane_ctx_free(&ctx);
1187 }
1188
1189 return result;
1190 }
1191
1192 /*
1193 * Enqueue a route 'add' for the dataplane.
1194 */
1195 enum zebra_dplane_result dplane_route_add(struct route_node *rn,
1196 struct route_entry *re)
1197 {
1198 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1199
1200 if (rn == NULL || re == NULL)
1201 goto done;
1202
1203 ret = dplane_route_update_internal(rn, re, NULL,
1204 DPLANE_OP_ROUTE_INSTALL);
1205
1206 done:
1207 return ret;
1208 }
1209
1210 /*
1211 * Enqueue a route update for the dataplane.
1212 */
1213 enum zebra_dplane_result dplane_route_update(struct route_node *rn,
1214 struct route_entry *re,
1215 struct route_entry *old_re)
1216 {
1217 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1218
1219 if (rn == NULL || re == NULL)
1220 goto done;
1221
1222 ret = dplane_route_update_internal(rn, re, old_re,
1223 DPLANE_OP_ROUTE_UPDATE);
1224 done:
1225 return ret;
1226 }
1227
1228 /*
1229 * Enqueue a route removal for the dataplane.
1230 */
1231 enum zebra_dplane_result dplane_route_delete(struct route_node *rn,
1232 struct route_entry *re)
1233 {
1234 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1235
1236 if (rn == NULL || re == NULL)
1237 goto done;
1238
1239 ret = dplane_route_update_internal(rn, re, NULL,
1240 DPLANE_OP_ROUTE_DELETE);
1241
1242 done:
1243 return ret;
1244 }
1245
1246 /*
1247 * Enqueue LSP add for the dataplane.
1248 */
1249 enum zebra_dplane_result dplane_lsp_add(zebra_lsp_t *lsp)
1250 {
1251 enum zebra_dplane_result ret =
1252 lsp_update_internal(lsp, DPLANE_OP_LSP_INSTALL);
1253
1254 return ret;
1255 }
1256
1257 /*
1258 * Enqueue LSP update for the dataplane.
1259 */
1260 enum zebra_dplane_result dplane_lsp_update(zebra_lsp_t *lsp)
1261 {
1262 enum zebra_dplane_result ret =
1263 lsp_update_internal(lsp, DPLANE_OP_LSP_UPDATE);
1264
1265 return ret;
1266 }
1267
1268 /*
1269 * Enqueue LSP delete for the dataplane.
1270 */
1271 enum zebra_dplane_result dplane_lsp_delete(zebra_lsp_t *lsp)
1272 {
1273 enum zebra_dplane_result ret =
1274 lsp_update_internal(lsp, DPLANE_OP_LSP_DELETE);
1275
1276 return ret;
1277 }
1278
1279 /*
1280 * Enqueue pseudowire install for the dataplane.
1281 */
1282 enum zebra_dplane_result dplane_pw_install(struct zebra_pw *pw)
1283 {
1284 return pw_update_internal(pw, DPLANE_OP_PW_INSTALL);
1285 }
1286
1287 /*
1288 * Enqueue pseudowire un-install for the dataplane.
1289 */
1290 enum zebra_dplane_result dplane_pw_uninstall(struct zebra_pw *pw)
1291 {
1292 return pw_update_internal(pw, DPLANE_OP_PW_UNINSTALL);
1293 }
1294
1295 /*
1296 * Common internal LSP update utility
1297 */
1298 static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
1299 enum dplane_op_e op)
1300 {
1301 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1302 int ret = EINVAL;
1303 struct zebra_dplane_ctx *ctx = NULL;
1304
1305 /* Obtain context block */
1306 ctx = dplane_ctx_alloc();
1307 if (ctx == NULL) {
1308 ret = ENOMEM;
1309 goto done;
1310 }
1311
1312 ret = dplane_ctx_lsp_init(ctx, op, lsp);
1313 if (ret != AOK)
1314 goto done;
1315
1316 ret = dplane_route_enqueue(ctx);
1317
1318 done:
1319 /* Update counter */
1320 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
1321 memory_order_relaxed);
1322
1323 if (ret == AOK)
1324 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1325 else {
1326 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
1327 memory_order_relaxed);
1328 if (ctx)
1329 dplane_ctx_free(&ctx);
1330 }
1331
1332 return result;
1333 }
1334
1335 /*
1336 * Internal, common handler for pseudowire updates.
1337 */
1338 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
1339 enum dplane_op_e op)
1340 {
1341 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1342 int ret;
1343 struct zebra_dplane_ctx *ctx = NULL;
1344
1345 ctx = dplane_ctx_alloc();
1346 if (ctx == NULL) {
1347 ret = ENOMEM;
1348 goto done;
1349 }
1350
1351 ret = dplane_ctx_pw_init(ctx, op, pw);
1352 if (ret != AOK)
1353 goto done;
1354
1355 ret = dplane_route_enqueue(ctx);
1356
1357 done:
1358 /* Update counter */
1359 atomic_fetch_add_explicit(&zdplane_info.dg_pws_in, 1,
1360 memory_order_relaxed);
1361
1362 if (ret == AOK)
1363 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1364 else {
1365 atomic_fetch_add_explicit(&zdplane_info.dg_pw_errors, 1,
1366 memory_order_relaxed);
1367 if (ctx)
1368 dplane_ctx_free(&ctx);
1369 }
1370
1371 return result;
1372 }
1373
1374 /*
1375 * Handler for 'show dplane'
1376 */
1377 int dplane_show_helper(struct vty *vty, bool detailed)
1378 {
1379 uint64_t queued, queue_max, limit, errs, incoming, yields,
1380 other_errs;
1381
1382 /* Using atomics because counters are being changed in different
1383 * pthread contexts.
1384 */
1385 incoming = atomic_load_explicit(&zdplane_info.dg_routes_in,
1386 memory_order_relaxed);
1387 limit = atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
1388 memory_order_relaxed);
1389 queued = atomic_load_explicit(&zdplane_info.dg_routes_queued,
1390 memory_order_relaxed);
1391 queue_max = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
1392 memory_order_relaxed);
1393 errs = atomic_load_explicit(&zdplane_info.dg_route_errors,
1394 memory_order_relaxed);
1395 yields = atomic_load_explicit(&zdplane_info.dg_update_yields,
1396 memory_order_relaxed);
1397 other_errs = atomic_load_explicit(&zdplane_info.dg_other_errors,
1398 memory_order_relaxed);
1399
1400 vty_out(vty, "Zebra dataplane:\nRoute updates: %"PRIu64"\n",
1401 incoming);
1402 vty_out(vty, "Route update errors: %"PRIu64"\n", errs);
1403 vty_out(vty, "Other errors : %"PRIu64"\n", other_errs);
1404 vty_out(vty, "Route update queue limit: %"PRIu64"\n", limit);
1405 vty_out(vty, "Route update queue depth: %"PRIu64"\n", queued);
1406 vty_out(vty, "Route update queue max: %"PRIu64"\n", queue_max);
1407 vty_out(vty, "Dplane update yields: %"PRIu64"\n", yields);
1408
1409 return CMD_SUCCESS;
1410 }
1411
1412 /*
1413 * Handler for 'show dplane providers'
1414 */
1415 int dplane_show_provs_helper(struct vty *vty, bool detailed)
1416 {
1417 struct zebra_dplane_provider *prov;
1418 uint64_t in, in_max, out, out_max;
1419
1420 vty_out(vty, "Zebra dataplane providers:\n");
1421
1422 DPLANE_LOCK();
1423 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
1424 DPLANE_UNLOCK();
1425
1426 /* Show counters, useful info from each registered provider */
1427 while (prov) {
1428
1429 in = atomic_load_explicit(&prov->dp_in_counter,
1430 memory_order_relaxed);
1431 in_max = atomic_load_explicit(&prov->dp_in_max,
1432 memory_order_relaxed);
1433 out = atomic_load_explicit(&prov->dp_out_counter,
1434 memory_order_relaxed);
1435 out_max = atomic_load_explicit(&prov->dp_out_max,
1436 memory_order_relaxed);
1437
1438 vty_out(vty, "%s (%u): in: %"PRIu64", q_max: %"PRIu64", "
1439 "out: %"PRIu64", q_max: %"PRIu64"\n",
1440 prov->dp_name, prov->dp_id, in, in_max, out, out_max);
1441
1442 DPLANE_LOCK();
1443 prov = TAILQ_NEXT(prov, dp_prov_link);
1444 DPLANE_UNLOCK();
1445 }
1446
1447 return CMD_SUCCESS;
1448 }
1449
1450 /*
1451 * Provider registration
1452 */
1453 int dplane_provider_register(const char *name,
1454 enum dplane_provider_prio prio,
1455 int flags,
1456 int (*fp)(struct zebra_dplane_provider *),
1457 int (*fini_fp)(struct zebra_dplane_provider *,
1458 bool early),
1459 void *data,
1460 struct zebra_dplane_provider **prov_p)
1461 {
1462 int ret = 0;
1463 struct zebra_dplane_provider *p = NULL, *last;
1464
1465 /* Validate */
1466 if (fp == NULL) {
1467 ret = EINVAL;
1468 goto done;
1469 }
1470
1471 if (prio <= DPLANE_PRIO_NONE ||
1472 prio > DPLANE_PRIO_LAST) {
1473 ret = EINVAL;
1474 goto done;
1475 }
1476
1477 /* Allocate and init new provider struct */
1478 p = XCALLOC(MTYPE_DP_PROV, sizeof(struct zebra_dplane_provider));
1479 if (p == NULL) {
1480 ret = ENOMEM;
1481 goto done;
1482 }
1483
1484 pthread_mutex_init(&(p->dp_mutex), NULL);
1485 TAILQ_INIT(&(p->dp_ctx_in_q));
1486 TAILQ_INIT(&(p->dp_ctx_out_q));
1487
1488 p->dp_priority = prio;
1489 p->dp_fp = fp;
1490 p->dp_fini = fini_fp;
1491 p->dp_data = data;
1492
1493 /* Lock - the dplane pthread may be running */
1494 DPLANE_LOCK();
1495
1496 p->dp_id = ++zdplane_info.dg_provider_id;
1497
1498 if (name)
1499 strlcpy(p->dp_name, name, DPLANE_PROVIDER_NAMELEN);
1500 else
1501 snprintf(p->dp_name, DPLANE_PROVIDER_NAMELEN,
1502 "provider-%u", p->dp_id);
1503
1504 /* Insert into list ordered by priority */
1505 TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_prov_link) {
1506 if (last->dp_priority > p->dp_priority)
1507 break;
1508 }
1509
1510 if (last)
1511 TAILQ_INSERT_BEFORE(last, p, dp_prov_link);
1512 else
1513 TAILQ_INSERT_TAIL(&zdplane_info.dg_providers_q, p,
1514 dp_prov_link);
1515
1516 /* And unlock */
1517 DPLANE_UNLOCK();
1518
1519 if (IS_ZEBRA_DEBUG_DPLANE)
1520 zlog_debug("dplane: registered new provider '%s' (%u), prio %d",
1521 p->dp_name, p->dp_id, p->dp_priority);
1522
1523 done:
1524 if (prov_p)
1525 *prov_p = p;
1526
1527 return ret;
1528 }
1529
1530 /* Accessors for provider attributes */
1531 const char *dplane_provider_get_name(const struct zebra_dplane_provider *prov)
1532 {
1533 return prov->dp_name;
1534 }
1535
1536 uint32_t dplane_provider_get_id(const struct zebra_dplane_provider *prov)
1537 {
1538 return prov->dp_id;
1539 }
1540
1541 void *dplane_provider_get_data(const struct zebra_dplane_provider *prov)
1542 {
1543 return prov->dp_data;
1544 }
1545
1546 int dplane_provider_get_work_limit(const struct zebra_dplane_provider *prov)
1547 {
1548 return zdplane_info.dg_updates_per_cycle;
1549 }
1550
1551 /* Lock/unlock a provider's mutex - iff the provider was registered with
1552 * the THREADED flag.
1553 */
1554 void dplane_provider_lock(struct zebra_dplane_provider *prov)
1555 {
1556 if (dplane_provider_is_threaded(prov))
1557 DPLANE_PROV_LOCK(prov);
1558 }
1559
1560 void dplane_provider_unlock(struct zebra_dplane_provider *prov)
1561 {
1562 if (dplane_provider_is_threaded(prov))
1563 DPLANE_PROV_UNLOCK(prov);
1564 }
1565
1566 /*
1567 * Dequeue and maintain associated counter
1568 */
1569 struct zebra_dplane_ctx *dplane_provider_dequeue_in_ctx(
1570 struct zebra_dplane_provider *prov)
1571 {
1572 struct zebra_dplane_ctx *ctx = NULL;
1573
1574 dplane_provider_lock(prov);
1575
1576 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
1577 if (ctx) {
1578 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
1579
1580 atomic_fetch_sub_explicit(&prov->dp_in_queued, 1,
1581 memory_order_relaxed);
1582 }
1583
1584 dplane_provider_unlock(prov);
1585
1586 return ctx;
1587 }
1588
1589 /*
1590 * Dequeue work to a list, return count
1591 */
1592 int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov,
1593 struct dplane_ctx_q *listp)
1594 {
1595 int limit, ret;
1596 struct zebra_dplane_ctx *ctx;
1597
1598 limit = zdplane_info.dg_updates_per_cycle;
1599
1600 dplane_provider_lock(prov);
1601
1602 for (ret = 0; ret < limit; ret++) {
1603 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
1604 if (ctx) {
1605 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
1606
1607 TAILQ_INSERT_TAIL(listp, ctx, zd_q_entries);
1608 } else {
1609 break;
1610 }
1611 }
1612
1613 if (ret > 0)
1614 atomic_fetch_sub_explicit(&prov->dp_in_queued, ret,
1615 memory_order_relaxed);
1616
1617 dplane_provider_unlock(prov);
1618
1619 return ret;
1620 }
1621
1622 /*
1623 * Enqueue and maintain associated counter
1624 */
1625 void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
1626 struct zebra_dplane_ctx *ctx)
1627 {
1628 dplane_provider_lock(prov);
1629
1630 TAILQ_INSERT_TAIL(&(prov->dp_ctx_out_q), ctx,
1631 zd_q_entries);
1632
1633 dplane_provider_unlock(prov);
1634
1635 atomic_fetch_add_explicit(&(prov->dp_out_counter), 1,
1636 memory_order_relaxed);
1637 }
1638
1639 /*
1640 * Accessor for provider object
1641 */
1642 bool dplane_provider_is_threaded(const struct zebra_dplane_provider *prov)
1643 {
1644 return (prov->dp_flags & DPLANE_PROV_FLAG_THREADED);
1645 }
1646
1647 /*
1648 * Internal helper that copies information from a zebra ns object; this is
1649 * called in the zebra main pthread context as part of dplane ctx init.
1650 */
1651 static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
1652 struct zebra_ns *zns)
1653 {
1654 ns_info->ns_id = zns->ns_id;
1655
1656 #if defined(HAVE_NETLINK)
1657 ns_info->is_cmd = true;
1658 ns_info->nls = zns->netlink_dplane;
1659 #endif /* NETLINK */
1660 }
1661
1662 /*
1663 * Provider api to signal that work/events are available
1664 * for the dataplane pthread.
1665 */
1666 int dplane_provider_work_ready(void)
1667 {
1668 /* Note that during zebra startup, we may be offered work before
1669 * the dataplane pthread (and thread-master) are ready. We want to
1670 * enqueue the work, but the event-scheduling machinery may not be
1671 * available.
1672 */
1673 if (zdplane_info.dg_run) {
1674 thread_add_event(zdplane_info.dg_master,
1675 dplane_thread_loop, NULL, 0,
1676 &zdplane_info.dg_t_update);
1677 }
1678
1679 return AOK;
1680 }
1681
1682 /*
1683 * Kernel dataplane provider
1684 */
1685
1686 /*
1687 * Handler for kernel LSP updates
1688 */
1689 static enum zebra_dplane_result
1690 kernel_dplane_lsp_update(struct zebra_dplane_ctx *ctx)
1691 {
1692 enum zebra_dplane_result res;
1693
1694 /* Call into the synchronous kernel-facing code here */
1695 res = kernel_lsp_update(ctx);
1696
1697 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
1698 atomic_fetch_add_explicit(
1699 &zdplane_info.dg_lsp_errors, 1,
1700 memory_order_relaxed);
1701
1702 return res;
1703 }
1704
1705 /*
1706 * Handler for kernel pseudowire updates
1707 */
1708 static enum zebra_dplane_result
1709 kernel_dplane_pw_update(struct zebra_dplane_ctx *ctx)
1710 {
1711 enum zebra_dplane_result res;
1712
1713 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1714 zlog_debug("Dplane pw %s: op %s af %d loc: %u rem: %u",
1715 dplane_ctx_get_pw_ifname(ctx),
1716 dplane_op2str(ctx->zd_op),
1717 dplane_ctx_get_pw_af(ctx),
1718 dplane_ctx_get_pw_local_label(ctx),
1719 dplane_ctx_get_pw_remote_label(ctx));
1720
1721 res = kernel_pw_update(ctx);
1722
1723 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
1724 atomic_fetch_add_explicit(
1725 &zdplane_info.dg_pw_errors, 1,
1726 memory_order_relaxed);
1727
1728 return res;
1729 }
1730
1731 /*
1732 * Handler for kernel route updates
1733 */
1734 static enum zebra_dplane_result
1735 kernel_dplane_route_update(struct zebra_dplane_ctx *ctx)
1736 {
1737 enum zebra_dplane_result res;
1738
1739 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
1740 char dest_str[PREFIX_STRLEN];
1741
1742 prefix2str(dplane_ctx_get_dest(ctx),
1743 dest_str, sizeof(dest_str));
1744
1745 zlog_debug("%u:%s Dplane route update ctx %p op %s",
1746 dplane_ctx_get_vrf(ctx), dest_str,
1747 ctx, dplane_op2str(dplane_ctx_get_op(ctx)));
1748 }
1749
1750 /* Call into the synchronous kernel-facing code here */
1751 res = kernel_route_update(ctx);
1752
1753 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
1754 atomic_fetch_add_explicit(
1755 &zdplane_info.dg_route_errors, 1,
1756 memory_order_relaxed);
1757
1758 return res;
1759 }
1760
1761 /*
1762 * Kernel provider callback
1763 */
1764 static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
1765 {
1766 enum zebra_dplane_result res;
1767 struct zebra_dplane_ctx *ctx;
1768 int counter, limit;
1769
1770 limit = dplane_provider_get_work_limit(prov);
1771
1772 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1773 zlog_debug("dplane provider '%s': processing",
1774 dplane_provider_get_name(prov));
1775
1776 for (counter = 0; counter < limit; counter++) {
1777
1778 ctx = dplane_provider_dequeue_in_ctx(prov);
1779 if (ctx == NULL)
1780 break;
1781
1782 /* Dispatch to appropriate kernel-facing apis */
1783 switch (dplane_ctx_get_op(ctx)) {
1784
1785 case DPLANE_OP_ROUTE_INSTALL:
1786 case DPLANE_OP_ROUTE_UPDATE:
1787 case DPLANE_OP_ROUTE_DELETE:
1788 res = kernel_dplane_route_update(ctx);
1789 break;
1790
1791 case DPLANE_OP_LSP_INSTALL:
1792 case DPLANE_OP_LSP_UPDATE:
1793 case DPLANE_OP_LSP_DELETE:
1794 res = kernel_dplane_lsp_update(ctx);
1795 break;
1796
1797 case DPLANE_OP_PW_INSTALL:
1798 case DPLANE_OP_PW_UNINSTALL:
1799 res = kernel_dplane_pw_update(ctx);
1800 break;
1801
1802 default:
1803 atomic_fetch_add_explicit(
1804 &zdplane_info.dg_other_errors, 1,
1805 memory_order_relaxed);
1806
1807 res = ZEBRA_DPLANE_REQUEST_FAILURE;
1808 break;
1809 }
1810
1811 dplane_ctx_set_status(ctx, res);
1812
1813 dplane_provider_enqueue_out_ctx(prov, ctx);
1814 }
1815
1816 /* Ensure that we'll run the work loop again if there's still
1817 * more work to do.
1818 */
1819 if (counter >= limit) {
1820 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1821 zlog_debug("dplane provider '%s' reached max updates %d",
1822 dplane_provider_get_name(prov), counter);
1823
1824 atomic_fetch_add_explicit(&zdplane_info.dg_update_yields,
1825 1, memory_order_relaxed);
1826
1827 dplane_provider_work_ready();
1828 }
1829
1830 return 0;
1831 }
1832
1833 #if DPLANE_TEST_PROVIDER
1834
1835 /*
1836 * Test dataplane provider plugin
1837 */
1838
1839 /*
1840 * Test provider process callback
1841 */
1842 static int test_dplane_process_func(struct zebra_dplane_provider *prov)
1843 {
1844 struct zebra_dplane_ctx *ctx;
1845 int counter, limit;
1846
1847 /* Just moving from 'in' queue to 'out' queue */
1848
1849 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1850 zlog_debug("dplane provider '%s': processing",
1851 dplane_provider_get_name(prov));
1852
1853 limit = dplane_provider_get_work_limit(prov);
1854
1855 for (counter = 0; counter < limit; counter++) {
1856
1857 ctx = dplane_provider_dequeue_in_ctx(prov);
1858 if (ctx == NULL)
1859 break;
1860
1861 dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
1862
1863 dplane_provider_enqueue_out_ctx(prov, ctx);
1864 }
1865
1866 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1867 zlog_debug("dplane provider '%s': processed %d",
1868 dplane_provider_get_name(prov), counter);
1869
1870 /* Ensure that we'll run the work loop again if there's still
1871 * more work to do.
1872 */
1873 if (counter >= limit)
1874 dplane_provider_work_ready();
1875
1876 return 0;
1877 }
1878
1879 /*
1880 * Test provider shutdown/fini callback
1881 */
1882 static int test_dplane_shutdown_func(struct zebra_dplane_provider *prov,
1883 bool early)
1884 {
1885 if (IS_ZEBRA_DEBUG_DPLANE)
1886 zlog_debug("dplane provider '%s': %sshutdown",
1887 dplane_provider_get_name(prov),
1888 early ? "early " : "");
1889
1890 return 0;
1891 }
1892 #endif /* DPLANE_TEST_PROVIDER */
1893
1894 /*
1895 * Register default kernel provider
1896 */
1897 static void dplane_provider_init(void)
1898 {
1899 int ret;
1900
1901 ret = dplane_provider_register("Kernel",
1902 DPLANE_PRIO_KERNEL,
1903 DPLANE_PROV_FLAGS_DEFAULT,
1904 kernel_dplane_process_func,
1905 NULL,
1906 NULL, NULL);
1907
1908 if (ret != AOK)
1909 zlog_err("Unable to register kernel dplane provider: %d",
1910 ret);
1911
1912 #if DPLANE_TEST_PROVIDER
1913 /* Optional test provider ... */
1914 ret = dplane_provider_register("Test",
1915 DPLANE_PRIO_PRE_KERNEL,
1916 DPLANE_PROV_FLAGS_DEFAULT,
1917 test_dplane_process_func,
1918 test_dplane_shutdown_func,
1919 NULL /* data */, NULL);
1920
1921 if (ret != AOK)
1922 zlog_err("Unable to register test dplane provider: %d",
1923 ret);
1924 #endif /* DPLANE_TEST_PROVIDER */
1925 }
1926
1927 /* Indicates zebra shutdown/exit is in progress. Some operations may be
1928 * simplified or skipped during shutdown processing.
1929 */
1930 bool dplane_is_in_shutdown(void)
1931 {
1932 return zdplane_info.dg_is_shutdown;
1933 }
1934
1935 /*
1936 * Early or pre-shutdown, de-init notification api. This runs pretty
1937 * early during zebra shutdown, as a signal to stop new work and prepare
1938 * for updates generated by shutdown/cleanup activity, as zebra tries to
1939 * remove everything it's responsible for.
1940 * NB: This runs in the main zebra pthread context.
1941 */
1942 void zebra_dplane_pre_finish(void)
1943 {
1944 if (IS_ZEBRA_DEBUG_DPLANE)
1945 zlog_debug("Zebra dataplane pre-fini called");
1946
1947 zdplane_info.dg_is_shutdown = true;
1948
1949 /* TODO -- Notify provider(s) of pending shutdown */
1950 }
1951
1952 /*
1953 * Utility to determine whether work remains enqueued within the dplane;
1954 * used during system shutdown processing.
1955 */
1956 static bool dplane_work_pending(void)
1957 {
1958 bool ret = false;
1959 struct zebra_dplane_ctx *ctx;
1960 struct zebra_dplane_provider *prov;
1961
1962 /* TODO -- just checking incoming/pending work for now, must check
1963 * providers
1964 */
1965 DPLANE_LOCK();
1966 {
1967 ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
1968 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
1969 }
1970 DPLANE_UNLOCK();
1971
1972 if (ctx != NULL) {
1973 ret = true;
1974 goto done;
1975 }
1976
1977 while (prov) {
1978
1979 dplane_provider_lock(prov);
1980
1981 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
1982 if (ctx == NULL)
1983 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
1984
1985 dplane_provider_unlock(prov);
1986
1987 if (ctx != NULL)
1988 break;
1989
1990 DPLANE_LOCK();
1991 prov = TAILQ_NEXT(prov, dp_prov_link);
1992 DPLANE_UNLOCK();
1993 }
1994
1995 if (ctx != NULL)
1996 ret = true;
1997
1998 done:
1999 return ret;
2000 }
2001
2002 /*
2003 * Shutdown-time intermediate callback, used to determine when all pending
2004 * in-flight updates are done. If there's still work to do, reschedules itself.
2005 * If all work is done, schedules an event to the main zebra thread for
2006 * final zebra shutdown.
2007 * This runs in the dplane pthread context.
2008 */
2009 static int dplane_check_shutdown_status(struct thread *event)
2010 {
2011 if (IS_ZEBRA_DEBUG_DPLANE)
2012 zlog_debug("Zebra dataplane shutdown status check called");
2013
2014 if (dplane_work_pending()) {
2015 /* Reschedule dplane check on a short timer */
2016 thread_add_timer_msec(zdplane_info.dg_master,
2017 dplane_check_shutdown_status,
2018 NULL, 100,
2019 &zdplane_info.dg_t_shutdown_check);
2020
2021 /* TODO - give up and stop waiting after a short time? */
2022
2023 } else {
2024 /* We appear to be done - schedule a final callback event
2025 * for the zebra main pthread.
2026 */
2027 thread_add_event(zrouter.master, zebra_finalize, NULL, 0, NULL);
2028 }
2029
2030 return 0;
2031 }
2032
2033 /*
2034 * Shutdown, de-init api. This runs pretty late during shutdown,
2035 * after zebra has tried to free/remove/uninstall all routes during shutdown.
2036 * At this point, dplane work may still remain to be done, so we can't just
2037 * blindly terminate. If there's still work to do, we'll periodically check
2038 * and when done, we'll enqueue a task to the zebra main thread for final
2039 * termination processing.
2040 *
2041 * NB: This runs in the main zebra thread context.
2042 */
2043 void zebra_dplane_finish(void)
2044 {
2045 if (IS_ZEBRA_DEBUG_DPLANE)
2046 zlog_debug("Zebra dataplane fini called");
2047
2048 thread_add_event(zdplane_info.dg_master,
2049 dplane_check_shutdown_status, NULL, 0,
2050 &zdplane_info.dg_t_shutdown_check);
2051 }
2052
2053 /*
2054 * Main dataplane pthread event loop. The thread takes new incoming work
2055 * and offers it to the first provider. It then iterates through the
2056 * providers, taking complete work from each one and offering it
2057 * to the next in order. At each step, a limited number of updates are
2058 * processed during a cycle in order to provide some fairness.
2059 *
2060 * This loop through the providers is only run once, so that the dataplane
2061 * pthread can look for other pending work - such as i/o work on behalf of
2062 * providers.
2063 */
2064 static int dplane_thread_loop(struct thread *event)
2065 {
2066 struct dplane_ctx_q work_list;
2067 struct dplane_ctx_q error_list;
2068 struct zebra_dplane_provider *prov;
2069 struct zebra_dplane_ctx *ctx, *tctx;
2070 int limit, counter, error_counter;
2071 uint64_t curr, high;
2072
2073 /* Capture work limit per cycle */
2074 limit = zdplane_info.dg_updates_per_cycle;
2075
2076 /* Init temporary lists used to move contexts among providers */
2077 TAILQ_INIT(&work_list);
2078 TAILQ_INIT(&error_list);
2079 error_counter = 0;
2080
2081 /* Check for zebra shutdown */
2082 if (!zdplane_info.dg_run)
2083 goto done;
2084
2085 /* Dequeue some incoming work from zebra (if any) onto the temporary
2086 * working list.
2087 */
2088 DPLANE_LOCK();
2089
2090 /* Locate initial registered provider */
2091 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
2092
2093 /* Move new work from incoming list to temp list */
2094 for (counter = 0; counter < limit; counter++) {
2095 ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
2096 if (ctx) {
2097 TAILQ_REMOVE(&zdplane_info.dg_route_ctx_q, ctx,
2098 zd_q_entries);
2099
2100 ctx->zd_provider = prov->dp_id;
2101
2102 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
2103 } else {
2104 break;
2105 }
2106 }
2107
2108 DPLANE_UNLOCK();
2109
2110 atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, counter,
2111 memory_order_relaxed);
2112
2113 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2114 zlog_debug("dplane: incoming new work counter: %d", counter);
2115
2116 /* Iterate through the registered providers, offering new incoming
2117 * work. If the provider has outgoing work in its queue, take that
2118 * work for the next provider
2119 */
2120 while (prov) {
2121
2122 /* At each iteration, the temporary work list has 'counter'
2123 * items.
2124 */
2125 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2126 zlog_debug("dplane enqueues %d new work to provider '%s'",
2127 counter, dplane_provider_get_name(prov));
2128
2129 /* Capture current provider id in each context; check for
2130 * error status.
2131 */
2132 TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, tctx) {
2133 if (dplane_ctx_get_status(ctx) ==
2134 ZEBRA_DPLANE_REQUEST_SUCCESS) {
2135 ctx->zd_provider = prov->dp_id;
2136 } else {
2137 /*
2138 * TODO -- improve error-handling: recirc
2139 * errors backwards so that providers can
2140 * 'undo' their work (if they want to)
2141 */
2142
2143 /* Move to error list; will be returned
2144 * zebra main.
2145 */
2146 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
2147 TAILQ_INSERT_TAIL(&error_list,
2148 ctx, zd_q_entries);
2149 error_counter++;
2150 }
2151 }
2152
2153 /* Enqueue new work to the provider */
2154 dplane_provider_lock(prov);
2155
2156 if (TAILQ_FIRST(&work_list))
2157 TAILQ_CONCAT(&(prov->dp_ctx_in_q), &work_list,
2158 zd_q_entries);
2159
2160 atomic_fetch_add_explicit(&prov->dp_in_counter, counter,
2161 memory_order_relaxed);
2162 atomic_fetch_add_explicit(&prov->dp_in_queued, counter,
2163 memory_order_relaxed);
2164 curr = atomic_load_explicit(&prov->dp_in_queued,
2165 memory_order_relaxed);
2166 high = atomic_load_explicit(&prov->dp_in_max,
2167 memory_order_relaxed);
2168 if (curr > high)
2169 atomic_store_explicit(&prov->dp_in_max, curr,
2170 memory_order_relaxed);
2171
2172 dplane_provider_unlock(prov);
2173
2174 /* Reset the temp list (though the 'concat' may have done this
2175 * already), and the counter
2176 */
2177 TAILQ_INIT(&work_list);
2178 counter = 0;
2179
2180 /* Call into the provider code. Note that this is
2181 * unconditional: we offer to do work even if we don't enqueue
2182 * any _new_ work.
2183 */
2184 (*prov->dp_fp)(prov);
2185
2186 /* Check for zebra shutdown */
2187 if (!zdplane_info.dg_run)
2188 break;
2189
2190 /* Dequeue completed work from the provider */
2191 dplane_provider_lock(prov);
2192
2193 while (counter < limit) {
2194 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
2195 if (ctx) {
2196 TAILQ_REMOVE(&(prov->dp_ctx_out_q), ctx,
2197 zd_q_entries);
2198
2199 TAILQ_INSERT_TAIL(&work_list,
2200 ctx, zd_q_entries);
2201 counter++;
2202 } else
2203 break;
2204 }
2205
2206 dplane_provider_unlock(prov);
2207
2208 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2209 zlog_debug("dplane dequeues %d completed work from provider %s",
2210 counter, dplane_provider_get_name(prov));
2211
2212 /* Locate next provider */
2213 DPLANE_LOCK();
2214 prov = TAILQ_NEXT(prov, dp_prov_link);
2215 DPLANE_UNLOCK();
2216 }
2217
2218 /* After all providers have been serviced, enqueue any completed
2219 * work and any errors back to zebra so it can process the results.
2220 */
2221 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2222 zlog_debug("dplane has %d completed, %d errors, for zebra main",
2223 counter, error_counter);
2224
2225 /*
2226 * Hand lists through the api to zebra main,
2227 * to reduce the number of lock/unlock cycles
2228 */
2229
2230 /* Call through to zebra main */
2231 (zdplane_info.dg_results_cb)(&error_list);
2232
2233 TAILQ_INIT(&error_list);
2234
2235
2236 /* Call through to zebra main */
2237 (zdplane_info.dg_results_cb)(&work_list);
2238
2239 TAILQ_INIT(&work_list);
2240
2241 done:
2242 return 0;
2243 }
2244
2245 /*
2246 * Final phase of shutdown, after all work enqueued to dplane has been
2247 * processed. This is called from the zebra main pthread context.
2248 */
2249 void zebra_dplane_shutdown(void)
2250 {
2251 if (IS_ZEBRA_DEBUG_DPLANE)
2252 zlog_debug("Zebra dataplane shutdown called");
2253
2254 /* Stop dplane thread, if it's running */
2255
2256 zdplane_info.dg_run = false;
2257
2258 THREAD_OFF(zdplane_info.dg_t_update);
2259
2260 frr_pthread_stop(zdplane_info.dg_pthread, NULL);
2261
2262 /* Destroy pthread */
2263 frr_pthread_destroy(zdplane_info.dg_pthread);
2264 zdplane_info.dg_pthread = NULL;
2265 zdplane_info.dg_master = NULL;
2266
2267 /* TODO -- Notify provider(s) of final shutdown */
2268
2269 /* TODO -- Clean-up provider objects */
2270
2271 /* TODO -- Clean queue(s), free memory */
2272 }
2273
2274 /*
2275 * Initialize the dataplane module during startup, internal/private version
2276 */
2277 static void zebra_dplane_init_internal(void)
2278 {
2279 memset(&zdplane_info, 0, sizeof(zdplane_info));
2280
2281 pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
2282
2283 TAILQ_INIT(&zdplane_info.dg_route_ctx_q);
2284 TAILQ_INIT(&zdplane_info.dg_providers_q);
2285
2286 zdplane_info.dg_updates_per_cycle = DPLANE_DEFAULT_NEW_WORK;
2287
2288 zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;
2289
2290 /* Register default kernel 'provider' during init */
2291 dplane_provider_init();
2292 }
2293
2294 /*
2295 * Start the dataplane pthread. This step needs to be run later than the
2296 * 'init' step, in case zebra has fork-ed.
2297 */
2298 void zebra_dplane_start(void)
2299 {
2300 /* Start dataplane pthread */
2301
2302 struct frr_pthread_attr pattr = {
2303 .start = frr_pthread_attr_default.start,
2304 .stop = frr_pthread_attr_default.stop
2305 };
2306
2307 zdplane_info.dg_pthread = frr_pthread_new(&pattr, "Zebra dplane thread",
2308 "Zebra dplane");
2309
2310 zdplane_info.dg_master = zdplane_info.dg_pthread->master;
2311
2312 zdplane_info.dg_run = true;
2313
2314 /* Enqueue an initial event for the dataplane pthread */
2315 thread_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
2316 &zdplane_info.dg_t_update);
2317
2318 frr_pthread_run(zdplane_info.dg_pthread, NULL);
2319 }
2320
2321 /*
2322 * Initialize the dataplane module at startup; called by zebra rib_init()
2323 */
2324 void zebra_dplane_init(int (*results_fp)(struct dplane_ctx_q *))
2325 {
2326 zebra_dplane_init_internal();
2327 zdplane_info.dg_results_cb = results_fp;
2328 }