]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_dplane.c
zebra: dplane pseudowires including nexthop info
[mirror_frr.git] / zebra / zebra_dplane.c
1 /*
2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #include "lib/libfrr.h"
21 #include "lib/debug.h"
22 #include "lib/frratomic.h"
23 #include "lib/frr_pthread.h"
24 #include "lib/memory.h"
25 #include "lib/queue.h"
26 #include "lib/zebra.h"
27 #include "zebra/zebra_router.h"
28 #include "zebra/zebra_memory.h"
29 #include "zebra/zebra_router.h"
30 #include "zebra/zebra_dplane.h"
31 #include "zebra/rt.h"
32 #include "zebra/debug.h"
33
34 /* Memory type for context blocks */
35 DEFINE_MTYPE(ZEBRA, DP_CTX, "Zebra DPlane Ctx")
36 DEFINE_MTYPE(ZEBRA, DP_PROV, "Zebra DPlane Provider")
37
38 #ifndef AOK
39 # define AOK 0
40 #endif
41
42 /* Enable test dataplane provider */
43 /*#define DPLANE_TEST_PROVIDER 1 */
44
45 /* Default value for max queued incoming updates */
46 const uint32_t DPLANE_DEFAULT_MAX_QUEUED = 200;
47
48 /* Default value for new work per cycle */
49 const uint32_t DPLANE_DEFAULT_NEW_WORK = 100;
50
51 /* Validation check macro for context blocks */
52 /* #define DPLANE_DEBUG 1 */
53
54 #ifdef DPLANE_DEBUG
55
56 # define DPLANE_CTX_VALID(p) \
57 assert((p) != NULL)
58
59 #else
60
61 # define DPLANE_CTX_VALID(p)
62
63 #endif /* DPLANE_DEBUG */
64
65 /*
66 * Route information captured for route updates.
67 */
68 struct dplane_route_info {
69
70 /* Dest and (optional) source prefixes */
71 struct prefix zd_dest;
72 struct prefix zd_src;
73
74 afi_t zd_afi;
75 safi_t zd_safi;
76
77 int zd_type;
78 int zd_old_type;
79
80 route_tag_t zd_tag;
81 route_tag_t zd_old_tag;
82 uint32_t zd_metric;
83 uint32_t zd_old_metric;
84
85 uint16_t zd_instance;
86 uint16_t zd_old_instance;
87
88 uint8_t zd_distance;
89 uint8_t zd_old_distance;
90
91 uint32_t zd_mtu;
92 uint32_t zd_nexthop_mtu;
93
94 /* Nexthops */
95 struct nexthop_group zd_ng;
96
97 /* "Previous" nexthops, used only in route updates without netlink */
98 struct nexthop_group zd_old_ng;
99
100 /* TODO -- use fixed array of nexthops, to avoid mallocs? */
101
102 };
103
104 /*
105 * Pseudowire info for the dataplane
106 */
107 struct dplane_pw_info {
108 char ifname[IF_NAMESIZE];
109 ifindex_t ifindex;
110 int type;
111 int af;
112 int status;
113 uint32_t flags;
114 union g_addr dest;
115 mpls_label_t local_label;
116 mpls_label_t remote_label;
117
118 /* Nexthops */
119 struct nexthop_group nhg;
120
121 union pw_protocol_fields fields;
122 };
123
124 /*
125 * The context block used to exchange info about route updates across
126 * the boundary between the zebra main context (and pthread) and the
127 * dataplane layer (and pthread).
128 */
129 struct zebra_dplane_ctx {
130
131 /* Operation code */
132 enum dplane_op_e zd_op;
133
134 /* Status on return */
135 enum zebra_dplane_result zd_status;
136
137 /* Dplane provider id */
138 uint32_t zd_provider;
139
140 /* Flags - used by providers, e.g. */
141 int zd_flags;
142
143 bool zd_is_update;
144
145 uint32_t zd_seq;
146 uint32_t zd_old_seq;
147
148 /* TODO -- internal/sub-operation status? */
149 enum zebra_dplane_result zd_remote_status;
150 enum zebra_dplane_result zd_kernel_status;
151
152 vrf_id_t zd_vrf_id;
153 uint32_t zd_table_id;
154
155 /* Support info for either route or LSP update */
156 union {
157 struct dplane_route_info rinfo;
158 zebra_lsp_t lsp;
159 struct dplane_pw_info pw;
160 } u;
161
162 /* Namespace info, used especially for netlink kernel communication */
163 struct zebra_dplane_info zd_ns_info;
164
165 /* Embedded list linkage */
166 TAILQ_ENTRY(zebra_dplane_ctx) zd_q_entries;
167 };
168
169 /* Flag that can be set by a pre-kernel provider as a signal that an update
170 * should bypass the kernel.
171 */
172 #define DPLANE_CTX_FLAG_NO_KERNEL 0x01
173
174
175 /*
176 * Registration block for one dataplane provider.
177 */
178 struct zebra_dplane_provider {
179 /* Name */
180 char dp_name[DPLANE_PROVIDER_NAMELEN + 1];
181
182 /* Priority, for ordering among providers */
183 uint8_t dp_priority;
184
185 /* Id value */
186 uint32_t dp_id;
187
188 /* Mutex */
189 pthread_mutex_t dp_mutex;
190
191 /* Plugin-provided extra data */
192 void *dp_data;
193
194 /* Flags */
195 int dp_flags;
196
197 int (*dp_fp)(struct zebra_dplane_provider *prov);
198
199 int (*dp_fini)(struct zebra_dplane_provider *prov, bool early_p);
200
201 _Atomic uint32_t dp_in_counter;
202 _Atomic uint32_t dp_in_queued;
203 _Atomic uint32_t dp_in_max;
204 _Atomic uint32_t dp_out_counter;
205 _Atomic uint32_t dp_out_queued;
206 _Atomic uint32_t dp_out_max;
207 _Atomic uint32_t dp_error_counter;
208
209 /* Queue of contexts inbound to the provider */
210 struct dplane_ctx_q dp_ctx_in_q;
211
212 /* Queue of completed contexts outbound from the provider back
213 * towards the dataplane module.
214 */
215 struct dplane_ctx_q dp_ctx_out_q;
216
217 /* Embedded list linkage for provider objects */
218 TAILQ_ENTRY(zebra_dplane_provider) dp_prov_link;
219 };
220
221 /*
222 * Globals
223 */
224 static struct zebra_dplane_globals {
225 /* Mutex to control access to dataplane components */
226 pthread_mutex_t dg_mutex;
227
228 /* Results callback registered by zebra 'core' */
229 int (*dg_results_cb)(struct dplane_ctx_q *ctxlist);
230
231 /* Sentinel for beginning of shutdown */
232 volatile bool dg_is_shutdown;
233
234 /* Sentinel for end of shutdown */
235 volatile bool dg_run;
236
237 /* Route-update context queue inbound to the dataplane */
238 TAILQ_HEAD(zdg_ctx_q, zebra_dplane_ctx) dg_route_ctx_q;
239
240 /* Ordered list of providers */
241 TAILQ_HEAD(zdg_prov_q, zebra_dplane_provider) dg_providers_q;
242
243 /* Counter used to assign internal ids to providers */
244 uint32_t dg_provider_id;
245
246 /* Limit number of pending, unprocessed updates */
247 _Atomic uint32_t dg_max_queued_updates;
248
249 /* Limit number of new updates dequeued at once, to pace an
250 * incoming burst.
251 */
252 uint32_t dg_updates_per_cycle;
253
254 _Atomic uint32_t dg_routes_in;
255 _Atomic uint32_t dg_routes_queued;
256 _Atomic uint32_t dg_routes_queued_max;
257 _Atomic uint32_t dg_route_errors;
258 _Atomic uint32_t dg_other_errors;
259
260 _Atomic uint32_t dg_lsps_in;
261 _Atomic uint32_t dg_lsp_errors;
262
263 _Atomic uint32_t dg_pws_in;
264 _Atomic uint32_t dg_pw_errors;
265
266 _Atomic uint32_t dg_update_yields;
267
268 /* Dataplane pthread */
269 struct frr_pthread *dg_pthread;
270
271 /* Event-delivery context 'master' for the dplane */
272 struct thread_master *dg_master;
273
274 /* Event/'thread' pointer for queued updates */
275 struct thread *dg_t_update;
276
277 /* Event pointer for pending shutdown check loop */
278 struct thread *dg_t_shutdown_check;
279
280 } zdplane_info;
281
282 /*
283 * Lock and unlock for interactions with the zebra 'core' pthread
284 */
285 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
286 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
287
288
289 /*
290 * Lock and unlock for individual providers
291 */
292 #define DPLANE_PROV_LOCK(p) pthread_mutex_lock(&((p)->dp_mutex))
293 #define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
294
295 /* Prototypes */
296 static int dplane_thread_loop(struct thread *event);
297 static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
298 struct zebra_ns *zns);
299 static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
300 enum dplane_op_e op);
301 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
302 enum dplane_op_e op);
303
304 /*
305 * Public APIs
306 */
307
308 /* Obtain thread_master for dataplane thread */
309 struct thread_master *dplane_get_thread_master(void)
310 {
311 return zdplane_info.dg_master;
312 }
313
314 /*
315 * Allocate a dataplane update context
316 */
317 static struct zebra_dplane_ctx *dplane_ctx_alloc(void)
318 {
319 struct zebra_dplane_ctx *p;
320
321 /* TODO -- just alloc'ing memory, but would like to maintain
322 * a pool
323 */
324 p = XCALLOC(MTYPE_DP_CTX, sizeof(struct zebra_dplane_ctx));
325
326 return p;
327 }
328
329 /*
330 * Free a dataplane results context.
331 */
332 static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
333 {
334 if (pctx == NULL)
335 return;
336
337 DPLANE_CTX_VALID(*pctx);
338
339 /* TODO -- just freeing memory, but would like to maintain
340 * a pool
341 */
342
343 /* Some internal allocations may need to be freed, depending on
344 * the type of info captured in the ctx.
345 */
346 switch ((*pctx)->zd_op) {
347 case DPLANE_OP_ROUTE_INSTALL:
348 case DPLANE_OP_ROUTE_UPDATE:
349 case DPLANE_OP_ROUTE_DELETE:
350
351 /* Free allocated nexthops */
352 if ((*pctx)->u.rinfo.zd_ng.nexthop) {
353 /* This deals with recursive nexthops too */
354 nexthops_free((*pctx)->u.rinfo.zd_ng.nexthop);
355
356 (*pctx)->u.rinfo.zd_ng.nexthop = NULL;
357 }
358
359 if ((*pctx)->u.rinfo.zd_old_ng.nexthop) {
360 /* This deals with recursive nexthops too */
361 nexthops_free((*pctx)->u.rinfo.zd_old_ng.nexthop);
362
363 (*pctx)->u.rinfo.zd_old_ng.nexthop = NULL;
364 }
365
366 break;
367
368 case DPLANE_OP_LSP_INSTALL:
369 case DPLANE_OP_LSP_UPDATE:
370 case DPLANE_OP_LSP_DELETE:
371 {
372 zebra_nhlfe_t *nhlfe, *next;
373
374 /* Free allocated NHLFEs */
375 for (nhlfe = (*pctx)->u.lsp.nhlfe_list; nhlfe; nhlfe = next) {
376 next = nhlfe->next;
377
378 zebra_mpls_nhlfe_del(nhlfe);
379 }
380
381 /* Clear pointers in lsp struct, in case we're cacheing
382 * free context structs.
383 */
384 (*pctx)->u.lsp.nhlfe_list = NULL;
385 (*pctx)->u.lsp.best_nhlfe = NULL;
386
387 break;
388 }
389
390 case DPLANE_OP_PW_INSTALL:
391 case DPLANE_OP_PW_UNINSTALL:
392 /* Free allocated nexthops */
393 if ((*pctx)->u.pw.nhg.nexthop) {
394 /* This deals with recursive nexthops too */
395 nexthops_free((*pctx)->u.pw.nhg.nexthop);
396
397 (*pctx)->u.pw.nhg.nexthop = NULL;
398 }
399 break;
400
401 case DPLANE_OP_NONE:
402 break;
403 }
404
405 XFREE(MTYPE_DP_CTX, *pctx);
406 *pctx = NULL;
407 }
408
409 /*
410 * Return a context block to the dplane module after processing
411 */
412 void dplane_ctx_fini(struct zebra_dplane_ctx **pctx)
413 {
414 /* TODO -- maintain pool; for now, just free */
415 dplane_ctx_free(pctx);
416 }
417
418 /* Enqueue a context block */
419 void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
420 const struct zebra_dplane_ctx *ctx)
421 {
422 TAILQ_INSERT_TAIL(q, (struct zebra_dplane_ctx *)ctx, zd_q_entries);
423 }
424
425 /* Append a list of context blocks to another list */
426 void dplane_ctx_list_append(struct dplane_ctx_q *to_list,
427 struct dplane_ctx_q *from_list)
428 {
429 if (TAILQ_FIRST(from_list)) {
430 TAILQ_CONCAT(to_list, from_list, zd_q_entries);
431
432 /* And clear 'from' list */
433 TAILQ_INIT(from_list);
434 }
435 }
436
437 /* Dequeue a context block from the head of a list */
438 struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_q *q)
439 {
440 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
441
442 if (ctx)
443 TAILQ_REMOVE(q, ctx, zd_q_entries);
444
445 return ctx;
446 }
447
448 /*
449 * Accessors for information from the context object
450 */
451 enum zebra_dplane_result dplane_ctx_get_status(
452 const struct zebra_dplane_ctx *ctx)
453 {
454 DPLANE_CTX_VALID(ctx);
455
456 return ctx->zd_status;
457 }
458
459 void dplane_ctx_set_status(struct zebra_dplane_ctx *ctx,
460 enum zebra_dplane_result status)
461 {
462 DPLANE_CTX_VALID(ctx);
463
464 ctx->zd_status = status;
465 }
466
467 /* Retrieve last/current provider id */
468 uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx *ctx)
469 {
470 DPLANE_CTX_VALID(ctx);
471 return ctx->zd_provider;
472 }
473
474 /* Providers run before the kernel can control whether a kernel
475 * update should be done.
476 */
477 void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx *ctx)
478 {
479 DPLANE_CTX_VALID(ctx);
480
481 SET_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
482 }
483
484 bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx *ctx)
485 {
486 DPLANE_CTX_VALID(ctx);
487
488 return CHECK_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
489 }
490
491 enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx)
492 {
493 DPLANE_CTX_VALID(ctx);
494
495 return ctx->zd_op;
496 }
497
498 const char *dplane_op2str(enum dplane_op_e op)
499 {
500 const char *ret = "UNKNOWN";
501
502 switch (op) {
503 case DPLANE_OP_NONE:
504 ret = "NONE";
505 break;
506
507 /* Route update */
508 case DPLANE_OP_ROUTE_INSTALL:
509 ret = "ROUTE_INSTALL";
510 break;
511 case DPLANE_OP_ROUTE_UPDATE:
512 ret = "ROUTE_UPDATE";
513 break;
514 case DPLANE_OP_ROUTE_DELETE:
515 ret = "ROUTE_DELETE";
516 break;
517
518 case DPLANE_OP_LSP_INSTALL:
519 ret = "LSP_INSTALL";
520 break;
521 case DPLANE_OP_LSP_UPDATE:
522 ret = "LSP_UPDATE";
523 break;
524 case DPLANE_OP_LSP_DELETE:
525 ret = "LSP_DELETE";
526 break;
527
528 case DPLANE_OP_PW_INSTALL:
529 ret = "PW_INSTALL";
530 break;
531 case DPLANE_OP_PW_UNINSTALL:
532 ret = "PW_UNINSTALL";
533 break;
534
535 }
536
537 return ret;
538 }
539
540 const char *dplane_res2str(enum zebra_dplane_result res)
541 {
542 const char *ret = "<Unknown>";
543
544 switch (res) {
545 case ZEBRA_DPLANE_REQUEST_FAILURE:
546 ret = "FAILURE";
547 break;
548 case ZEBRA_DPLANE_REQUEST_QUEUED:
549 ret = "QUEUED";
550 break;
551 case ZEBRA_DPLANE_REQUEST_SUCCESS:
552 ret = "SUCCESS";
553 break;
554 }
555
556 return ret;
557 }
558
559 const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
560 {
561 DPLANE_CTX_VALID(ctx);
562
563 return &(ctx->u.rinfo.zd_dest);
564 }
565
566 /* Source prefix is a little special - return NULL for "no src prefix" */
567 const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx)
568 {
569 DPLANE_CTX_VALID(ctx);
570
571 if (ctx->u.rinfo.zd_src.prefixlen == 0 &&
572 IN6_IS_ADDR_UNSPECIFIED(&(ctx->u.rinfo.zd_src.u.prefix6))) {
573 return NULL;
574 } else {
575 return &(ctx->u.rinfo.zd_src);
576 }
577 }
578
579 bool dplane_ctx_is_update(const struct zebra_dplane_ctx *ctx)
580 {
581 DPLANE_CTX_VALID(ctx);
582
583 return ctx->zd_is_update;
584 }
585
586 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx *ctx)
587 {
588 DPLANE_CTX_VALID(ctx);
589
590 return ctx->zd_seq;
591 }
592
593 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx)
594 {
595 DPLANE_CTX_VALID(ctx);
596
597 return ctx->zd_old_seq;
598 }
599
600 vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx)
601 {
602 DPLANE_CTX_VALID(ctx);
603
604 return ctx->zd_vrf_id;
605 }
606
607 int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx)
608 {
609 DPLANE_CTX_VALID(ctx);
610
611 return ctx->u.rinfo.zd_type;
612 }
613
614 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx)
615 {
616 DPLANE_CTX_VALID(ctx);
617
618 return ctx->u.rinfo.zd_old_type;
619 }
620
621 afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
622 {
623 DPLANE_CTX_VALID(ctx);
624
625 return ctx->u.rinfo.zd_afi;
626 }
627
628 safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
629 {
630 DPLANE_CTX_VALID(ctx);
631
632 return ctx->u.rinfo.zd_safi;
633 }
634
635 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx)
636 {
637 DPLANE_CTX_VALID(ctx);
638
639 return ctx->zd_table_id;
640 }
641
642 route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx)
643 {
644 DPLANE_CTX_VALID(ctx);
645
646 return ctx->u.rinfo.zd_tag;
647 }
648
649 route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx)
650 {
651 DPLANE_CTX_VALID(ctx);
652
653 return ctx->u.rinfo.zd_old_tag;
654 }
655
656 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx)
657 {
658 DPLANE_CTX_VALID(ctx);
659
660 return ctx->u.rinfo.zd_instance;
661 }
662
663 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
664 {
665 DPLANE_CTX_VALID(ctx);
666
667 return ctx->u.rinfo.zd_old_instance;
668 }
669
670 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx)
671 {
672 DPLANE_CTX_VALID(ctx);
673
674 return ctx->u.rinfo.zd_metric;
675 }
676
677 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx)
678 {
679 DPLANE_CTX_VALID(ctx);
680
681 return ctx->u.rinfo.zd_old_metric;
682 }
683
684 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx)
685 {
686 DPLANE_CTX_VALID(ctx);
687
688 return ctx->u.rinfo.zd_mtu;
689 }
690
691 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx)
692 {
693 DPLANE_CTX_VALID(ctx);
694
695 return ctx->u.rinfo.zd_nexthop_mtu;
696 }
697
698 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx)
699 {
700 DPLANE_CTX_VALID(ctx);
701
702 return ctx->u.rinfo.zd_distance;
703 }
704
705 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
706 {
707 DPLANE_CTX_VALID(ctx);
708
709 return ctx->u.rinfo.zd_old_distance;
710 }
711
712 const struct nexthop_group *dplane_ctx_get_ng(
713 const struct zebra_dplane_ctx *ctx)
714 {
715 DPLANE_CTX_VALID(ctx);
716
717 return &(ctx->u.rinfo.zd_ng);
718 }
719
720 const struct nexthop_group *dplane_ctx_get_old_ng(
721 const struct zebra_dplane_ctx *ctx)
722 {
723 DPLANE_CTX_VALID(ctx);
724
725 return &(ctx->u.rinfo.zd_old_ng);
726 }
727
728 const struct zebra_dplane_info *dplane_ctx_get_ns(
729 const struct zebra_dplane_ctx *ctx)
730 {
731 DPLANE_CTX_VALID(ctx);
732
733 return &(ctx->zd_ns_info);
734 }
735
736 /* Accessors for LSP information */
737
738 mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx)
739 {
740 DPLANE_CTX_VALID(ctx);
741
742 return ctx->u.lsp.ile.in_label;
743 }
744
745 uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx *ctx)
746 {
747 DPLANE_CTX_VALID(ctx);
748
749 return ctx->u.lsp.addr_family;
750 }
751
752 uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx)
753 {
754 DPLANE_CTX_VALID(ctx);
755
756 return ctx->u.lsp.flags;
757 }
758
759 zebra_nhlfe_t *dplane_ctx_get_nhlfe(struct zebra_dplane_ctx *ctx)
760 {
761 DPLANE_CTX_VALID(ctx);
762
763 return ctx->u.lsp.nhlfe_list;
764 }
765
766 zebra_nhlfe_t *dplane_ctx_get_best_nhlfe(struct zebra_dplane_ctx *ctx)
767 {
768 DPLANE_CTX_VALID(ctx);
769
770 return ctx->u.lsp.best_nhlfe;
771 }
772
773 uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx *ctx)
774 {
775 DPLANE_CTX_VALID(ctx);
776
777 return ctx->u.lsp.num_ecmp;
778 }
779
780 const char *dplane_ctx_get_pw_ifname(const struct zebra_dplane_ctx *ctx)
781 {
782 DPLANE_CTX_VALID(ctx);
783
784 return ctx->u.pw.ifname;
785 }
786
787 mpls_label_t dplane_ctx_get_pw_local_label(const struct zebra_dplane_ctx *ctx)
788 {
789 DPLANE_CTX_VALID(ctx);
790
791 return ctx->u.pw.local_label;
792 }
793
794 mpls_label_t dplane_ctx_get_pw_remote_label(const struct zebra_dplane_ctx *ctx)
795 {
796 DPLANE_CTX_VALID(ctx);
797
798 return ctx->u.pw.remote_label;
799 }
800
801 int dplane_ctx_get_pw_type(const struct zebra_dplane_ctx *ctx)
802 {
803 DPLANE_CTX_VALID(ctx);
804
805 return ctx->u.pw.type;
806 }
807
808 int dplane_ctx_get_pw_af(const struct zebra_dplane_ctx *ctx)
809 {
810 DPLANE_CTX_VALID(ctx);
811
812 return ctx->u.pw.af;
813 }
814
815 uint32_t dplane_ctx_get_pw_flags(const struct zebra_dplane_ctx *ctx)
816 {
817 DPLANE_CTX_VALID(ctx);
818
819 return ctx->u.pw.flags;
820 }
821
822 int dplane_ctx_get_pw_status(const struct zebra_dplane_ctx *ctx)
823 {
824 DPLANE_CTX_VALID(ctx);
825
826 return ctx->u.pw.status;
827 }
828
829 const union g_addr *dplane_ctx_get_pw_dest(
830 const struct zebra_dplane_ctx *ctx)
831 {
832 DPLANE_CTX_VALID(ctx);
833
834 return &(ctx->u.pw.dest);
835 }
836
837 const union pw_protocol_fields *dplane_ctx_get_pw_proto(
838 const struct zebra_dplane_ctx *ctx)
839 {
840 DPLANE_CTX_VALID(ctx);
841
842 return &(ctx->u.pw.fields);
843 }
844
845 const struct nexthop_group *
846 dplane_ctx_get_pw_nhg(const struct zebra_dplane_ctx *ctx)
847 {
848 DPLANE_CTX_VALID(ctx);
849
850 return &(ctx->u.pw.nhg);
851 }
852
853 /*
854 * End of dplane context accessors
855 */
856
857
858 /*
859 * Retrieve the limit on the number of pending, unprocessed updates.
860 */
861 uint32_t dplane_get_in_queue_limit(void)
862 {
863 return atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
864 memory_order_relaxed);
865 }
866
867 /*
868 * Configure limit on the number of pending, queued updates.
869 */
870 void dplane_set_in_queue_limit(uint32_t limit, bool set)
871 {
872 /* Reset to default on 'unset' */
873 if (!set)
874 limit = DPLANE_DEFAULT_MAX_QUEUED;
875
876 atomic_store_explicit(&zdplane_info.dg_max_queued_updates, limit,
877 memory_order_relaxed);
878 }
879
880 /*
881 * Retrieve the current queue depth of incoming, unprocessed updates
882 */
883 uint32_t dplane_get_in_queue_len(void)
884 {
885 return atomic_load_explicit(&zdplane_info.dg_routes_queued,
886 memory_order_seq_cst);
887 }
888
889 /*
890 * Common dataplane context init with zebra namespace info.
891 */
892 static int dplane_ctx_ns_init(struct zebra_dplane_ctx *ctx,
893 struct zebra_ns *zns,
894 bool is_update)
895 {
896 dplane_info_from_zns(&(ctx->zd_ns_info), zns);
897
898 #if defined(HAVE_NETLINK)
899 /* Increment message counter after copying to context struct - may need
900 * two messages in some 'update' cases.
901 */
902 if (is_update)
903 zns->netlink_dplane.seq += 2;
904 else
905 zns->netlink_dplane.seq++;
906 #endif /* HAVE_NETLINK */
907
908 return AOK;
909 }
910
911 /*
912 * Initialize a context block for a route update from zebra data structs.
913 */
914 static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx,
915 enum dplane_op_e op,
916 struct route_node *rn,
917 struct route_entry *re)
918 {
919 int ret = EINVAL;
920 const struct route_table *table = NULL;
921 const rib_table_info_t *info;
922 const struct prefix *p, *src_p;
923 struct zebra_ns *zns;
924 struct zebra_vrf *zvrf;
925 struct nexthop *nexthop;
926
927 if (!ctx || !rn || !re)
928 goto done;
929
930 ctx->zd_op = op;
931 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
932
933 ctx->u.rinfo.zd_type = re->type;
934 ctx->u.rinfo.zd_old_type = re->type;
935
936 /* Prefixes: dest, and optional source */
937 srcdest_rnode_prefixes(rn, &p, &src_p);
938
939 prefix_copy(&(ctx->u.rinfo.zd_dest), p);
940
941 if (src_p)
942 prefix_copy(&(ctx->u.rinfo.zd_src), src_p);
943 else
944 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(ctx->u.rinfo.zd_src));
945
946 ctx->zd_table_id = re->table;
947
948 ctx->u.rinfo.zd_metric = re->metric;
949 ctx->u.rinfo.zd_old_metric = re->metric;
950 ctx->zd_vrf_id = re->vrf_id;
951 ctx->u.rinfo.zd_mtu = re->mtu;
952 ctx->u.rinfo.zd_nexthop_mtu = re->nexthop_mtu;
953 ctx->u.rinfo.zd_instance = re->instance;
954 ctx->u.rinfo.zd_tag = re->tag;
955 ctx->u.rinfo.zd_old_tag = re->tag;
956 ctx->u.rinfo.zd_distance = re->distance;
957
958 table = srcdest_rnode_table(rn);
959 info = table->info;
960
961 ctx->u.rinfo.zd_afi = info->afi;
962 ctx->u.rinfo.zd_safi = info->safi;
963
964 /* Extract ns info - can't use pointers to 'core' structs */
965 zvrf = vrf_info_lookup(re->vrf_id);
966 zns = zvrf->zns;
967
968 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_ROUTE_UPDATE));
969
970 /* Copy nexthops; recursive info is included too */
971 copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop), re->ng.nexthop, NULL);
972
973 /* TODO -- maybe use array of nexthops to avoid allocs? */
974
975 /* Ensure that the dplane's nexthops flags are clear. */
976 for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop))
977 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
978
979 /* Trying out the sequence number idea, so we can try to detect
980 * when a result is stale.
981 */
982 re->dplane_sequence = zebra_router_get_next_sequence();
983 ctx->zd_seq = re->dplane_sequence;
984
985 ret = AOK;
986
987 done:
988 return ret;
989 }
990
991 /*
992 * Capture information for an LSP update in a dplane context.
993 */
994 static int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx,
995 enum dplane_op_e op,
996 zebra_lsp_t *lsp)
997 {
998 int ret = AOK;
999 zebra_nhlfe_t *nhlfe, *new_nhlfe;
1000
1001 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1002 zlog_debug("init dplane ctx %s: in-label %u ecmp# %d",
1003 dplane_op2str(op), lsp->ile.in_label,
1004 lsp->num_ecmp);
1005
1006 ctx->zd_op = op;
1007 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
1008
1009 /* Capture namespace info */
1010 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
1011 (op == DPLANE_OP_LSP_UPDATE));
1012
1013 memset(&ctx->u.lsp, 0, sizeof(ctx->u.lsp));
1014
1015 ctx->u.lsp.ile = lsp->ile;
1016 ctx->u.lsp.addr_family = lsp->addr_family;
1017 ctx->u.lsp.num_ecmp = lsp->num_ecmp;
1018 ctx->u.lsp.flags = lsp->flags;
1019
1020 /* Copy source LSP's nhlfes, and capture 'best' nhlfe */
1021 for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
1022 /* Not sure if this is meaningful... */
1023 if (nhlfe->nexthop == NULL)
1024 continue;
1025
1026 new_nhlfe =
1027 zebra_mpls_lsp_add_nhlfe(
1028 &(ctx->u.lsp),
1029 nhlfe->type,
1030 nhlfe->nexthop->type,
1031 &(nhlfe->nexthop->gate),
1032 nhlfe->nexthop->ifindex,
1033 nhlfe->nexthop->nh_label->label[0]);
1034
1035 if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
1036 ret = ENOMEM;
1037 break;
1038 }
1039
1040 /* Need to copy flags too */
1041 new_nhlfe->flags = nhlfe->flags;
1042 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
1043
1044 if (nhlfe == lsp->best_nhlfe)
1045 ctx->u.lsp.best_nhlfe = new_nhlfe;
1046 }
1047
1048 /* On error the ctx will be cleaned-up, so we don't need to
1049 * deal with any allocated nhlfe or nexthop structs here.
1050 */
1051
1052 return ret;
1053 }
1054
1055 /*
1056 * Capture information for an LSP update in a dplane context.
1057 */
1058 static int dplane_ctx_pw_init(struct zebra_dplane_ctx *ctx,
1059 enum dplane_op_e op,
1060 struct zebra_pw *pw)
1061 {
1062 struct prefix p;
1063 afi_t afi;
1064 struct route_table *table;
1065 struct route_node *rn;
1066 struct route_entry *re;
1067
1068 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1069 zlog_debug("init dplane ctx %s: pw '%s', loc %u, rem %u",
1070 dplane_op2str(op), pw->ifname, pw->local_label,
1071 pw->remote_label);
1072
1073 ctx->zd_op = op;
1074 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
1075
1076 /* Capture namespace info: no netlink support as of 12/18,
1077 * but just in case...
1078 */
1079 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
1080
1081 memset(&ctx->u.pw, 0, sizeof(ctx->u.pw));
1082
1083 /* This name appears to be c-string, so we use string copy. */
1084 strlcpy(ctx->u.pw.ifname, pw->ifname, sizeof(ctx->u.pw.ifname));
1085
1086 ctx->zd_vrf_id = pw->vrf_id;
1087 ctx->u.pw.ifindex = pw->ifindex;
1088 ctx->u.pw.type = pw->type;
1089 ctx->u.pw.af = pw->af;
1090 ctx->u.pw.local_label = pw->local_label;
1091 ctx->u.pw.remote_label = pw->remote_label;
1092 ctx->u.pw.flags = pw->flags;
1093
1094 ctx->u.pw.dest = pw->nexthop;
1095
1096 ctx->u.pw.fields = pw->data;
1097
1098 /* Capture nexthop info for the pw destination. We need to look
1099 * up and use zebra datastructs, but we're running in the zebra
1100 * pthread here so that should be ok.
1101 */
1102 memcpy(&p.u, &pw->nexthop, sizeof(pw->nexthop));
1103 p.family = pw->af;
1104 p.prefixlen = ((pw->af == AF_INET) ?
1105 IPV4_MAX_PREFIXLEN : IPV6_MAX_PREFIXLEN);
1106
1107 afi = (pw->af == AF_INET) ? AFI_IP : AFI_IP6;
1108 table = zebra_vrf_table(afi, SAFI_UNICAST, pw->vrf_id);
1109 if (table) {
1110 rn = route_node_match(table, &p);
1111 if (rn) {
1112 RNODE_FOREACH_RE(rn, re) {
1113 if (CHECK_FLAG(re->flags, ZEBRA_FLAG_SELECTED))
1114 break;
1115 }
1116
1117 if (re)
1118 copy_nexthops(&(ctx->u.pw.nhg.nexthop),
1119 re->ng.nexthop, NULL);
1120
1121 route_unlock_node(rn);
1122 }
1123 }
1124
1125 return AOK;
1126 }
1127
1128 /*
1129 * Enqueue a new route update,
1130 * and ensure an event is active for the dataplane pthread.
1131 */
1132 static int dplane_route_enqueue(struct zebra_dplane_ctx *ctx)
1133 {
1134 int ret = EINVAL;
1135 uint32_t high, curr;
1136
1137 /* Enqueue for processing by the dataplane pthread */
1138 DPLANE_LOCK();
1139 {
1140 TAILQ_INSERT_TAIL(&zdplane_info.dg_route_ctx_q, ctx,
1141 zd_q_entries);
1142 }
1143 DPLANE_UNLOCK();
1144
1145 curr = atomic_add_fetch_explicit(
1146 #ifdef __clang__
1147 /* TODO -- issue with the clang atomic/intrinsics currently;
1148 * casting away the 'Atomic'-ness of the variable works.
1149 */
1150 (uint32_t *)&(zdplane_info.dg_routes_queued),
1151 #else
1152 &(zdplane_info.dg_routes_queued),
1153 #endif
1154 1, memory_order_seq_cst);
1155
1156 /* Maybe update high-water counter also */
1157 high = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
1158 memory_order_seq_cst);
1159 while (high < curr) {
1160 if (atomic_compare_exchange_weak_explicit(
1161 &zdplane_info.dg_routes_queued_max,
1162 &high, curr,
1163 memory_order_seq_cst,
1164 memory_order_seq_cst))
1165 break;
1166 }
1167
1168 /* Ensure that an event for the dataplane thread is active */
1169 ret = dplane_provider_work_ready();
1170
1171 return ret;
1172 }
1173
1174 /*
1175 * Utility that prepares a route update and enqueues it for processing
1176 */
1177 static enum zebra_dplane_result
1178 dplane_route_update_internal(struct route_node *rn,
1179 struct route_entry *re,
1180 struct route_entry *old_re,
1181 enum dplane_op_e op)
1182 {
1183 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1184 int ret = EINVAL;
1185 struct zebra_dplane_ctx *ctx = NULL;
1186
1187 /* Obtain context block */
1188 ctx = dplane_ctx_alloc();
1189 if (ctx == NULL) {
1190 ret = ENOMEM;
1191 goto done;
1192 }
1193
1194 /* Init context with info from zebra data structs */
1195 ret = dplane_ctx_route_init(ctx, op, rn, re);
1196 if (ret == AOK) {
1197 /* Capture some extra info for update case
1198 * where there's a different 'old' route.
1199 */
1200 if ((op == DPLANE_OP_ROUTE_UPDATE) &&
1201 old_re && (old_re != re)) {
1202 ctx->zd_is_update = true;
1203
1204 old_re->dplane_sequence =
1205 zebra_router_get_next_sequence();
1206 ctx->zd_old_seq = old_re->dplane_sequence;
1207
1208 ctx->u.rinfo.zd_old_tag = old_re->tag;
1209 ctx->u.rinfo.zd_old_type = old_re->type;
1210 ctx->u.rinfo.zd_old_instance = old_re->instance;
1211 ctx->u.rinfo.zd_old_distance = old_re->distance;
1212 ctx->u.rinfo.zd_old_metric = old_re->metric;
1213
1214 #ifndef HAVE_NETLINK
1215 /* For bsd, capture previous re's nexthops too, sigh.
1216 * We'll need these to do per-nexthop deletes.
1217 */
1218 copy_nexthops(&(ctx->u.rinfo.zd_old_ng.nexthop),
1219 old_re->ng.nexthop, NULL);
1220 #endif /* !HAVE_NETLINK */
1221 }
1222
1223 /* Enqueue context for processing */
1224 ret = dplane_route_enqueue(ctx);
1225 }
1226
1227 done:
1228 /* Update counter */
1229 atomic_fetch_add_explicit(&zdplane_info.dg_routes_in, 1,
1230 memory_order_relaxed);
1231
1232 if (ret == AOK)
1233 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1234 else {
1235 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors, 1,
1236 memory_order_relaxed);
1237 if (ctx)
1238 dplane_ctx_free(&ctx);
1239 }
1240
1241 return result;
1242 }
1243
1244 /*
1245 * Enqueue a route 'add' for the dataplane.
1246 */
1247 enum zebra_dplane_result dplane_route_add(struct route_node *rn,
1248 struct route_entry *re)
1249 {
1250 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1251
1252 if (rn == NULL || re == NULL)
1253 goto done;
1254
1255 ret = dplane_route_update_internal(rn, re, NULL,
1256 DPLANE_OP_ROUTE_INSTALL);
1257
1258 done:
1259 return ret;
1260 }
1261
1262 /*
1263 * Enqueue a route update for the dataplane.
1264 */
1265 enum zebra_dplane_result dplane_route_update(struct route_node *rn,
1266 struct route_entry *re,
1267 struct route_entry *old_re)
1268 {
1269 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1270
1271 if (rn == NULL || re == NULL)
1272 goto done;
1273
1274 ret = dplane_route_update_internal(rn, re, old_re,
1275 DPLANE_OP_ROUTE_UPDATE);
1276 done:
1277 return ret;
1278 }
1279
1280 /*
1281 * Enqueue a route removal for the dataplane.
1282 */
1283 enum zebra_dplane_result dplane_route_delete(struct route_node *rn,
1284 struct route_entry *re)
1285 {
1286 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1287
1288 if (rn == NULL || re == NULL)
1289 goto done;
1290
1291 ret = dplane_route_update_internal(rn, re, NULL,
1292 DPLANE_OP_ROUTE_DELETE);
1293
1294 done:
1295 return ret;
1296 }
1297
1298 /*
1299 * Enqueue LSP add for the dataplane.
1300 */
1301 enum zebra_dplane_result dplane_lsp_add(zebra_lsp_t *lsp)
1302 {
1303 enum zebra_dplane_result ret =
1304 lsp_update_internal(lsp, DPLANE_OP_LSP_INSTALL);
1305
1306 return ret;
1307 }
1308
1309 /*
1310 * Enqueue LSP update for the dataplane.
1311 */
1312 enum zebra_dplane_result dplane_lsp_update(zebra_lsp_t *lsp)
1313 {
1314 enum zebra_dplane_result ret =
1315 lsp_update_internal(lsp, DPLANE_OP_LSP_UPDATE);
1316
1317 return ret;
1318 }
1319
1320 /*
1321 * Enqueue LSP delete for the dataplane.
1322 */
1323 enum zebra_dplane_result dplane_lsp_delete(zebra_lsp_t *lsp)
1324 {
1325 enum zebra_dplane_result ret =
1326 lsp_update_internal(lsp, DPLANE_OP_LSP_DELETE);
1327
1328 return ret;
1329 }
1330
1331 /*
1332 * Enqueue pseudowire install for the dataplane.
1333 */
1334 enum zebra_dplane_result dplane_pw_install(struct zebra_pw *pw)
1335 {
1336 return pw_update_internal(pw, DPLANE_OP_PW_INSTALL);
1337 }
1338
1339 /*
1340 * Enqueue pseudowire un-install for the dataplane.
1341 */
1342 enum zebra_dplane_result dplane_pw_uninstall(struct zebra_pw *pw)
1343 {
1344 return pw_update_internal(pw, DPLANE_OP_PW_UNINSTALL);
1345 }
1346
1347 /*
1348 * Common internal LSP update utility
1349 */
1350 static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
1351 enum dplane_op_e op)
1352 {
1353 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1354 int ret = EINVAL;
1355 struct zebra_dplane_ctx *ctx = NULL;
1356
1357 /* Obtain context block */
1358 ctx = dplane_ctx_alloc();
1359 if (ctx == NULL) {
1360 ret = ENOMEM;
1361 goto done;
1362 }
1363
1364 ret = dplane_ctx_lsp_init(ctx, op, lsp);
1365 if (ret != AOK)
1366 goto done;
1367
1368 ret = dplane_route_enqueue(ctx);
1369
1370 done:
1371 /* Update counter */
1372 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
1373 memory_order_relaxed);
1374
1375 if (ret == AOK)
1376 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1377 else {
1378 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
1379 memory_order_relaxed);
1380 if (ctx)
1381 dplane_ctx_free(&ctx);
1382 }
1383
1384 return result;
1385 }
1386
1387 /*
1388 * Internal, common handler for pseudowire updates.
1389 */
1390 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
1391 enum dplane_op_e op)
1392 {
1393 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1394 int ret;
1395 struct zebra_dplane_ctx *ctx = NULL;
1396
1397 ctx = dplane_ctx_alloc();
1398 if (ctx == NULL) {
1399 ret = ENOMEM;
1400 goto done;
1401 }
1402
1403 ret = dplane_ctx_pw_init(ctx, op, pw);
1404 if (ret != AOK)
1405 goto done;
1406
1407 ret = dplane_route_enqueue(ctx);
1408
1409 done:
1410 /* Update counter */
1411 atomic_fetch_add_explicit(&zdplane_info.dg_pws_in, 1,
1412 memory_order_relaxed);
1413
1414 if (ret == AOK)
1415 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1416 else {
1417 atomic_fetch_add_explicit(&zdplane_info.dg_pw_errors, 1,
1418 memory_order_relaxed);
1419 if (ctx)
1420 dplane_ctx_free(&ctx);
1421 }
1422
1423 return result;
1424 }
1425
1426 /*
1427 * Handler for 'show dplane'
1428 */
1429 int dplane_show_helper(struct vty *vty, bool detailed)
1430 {
1431 uint64_t queued, queue_max, limit, errs, incoming, yields,
1432 other_errs;
1433
1434 /* Using atomics because counters are being changed in different
1435 * pthread contexts.
1436 */
1437 incoming = atomic_load_explicit(&zdplane_info.dg_routes_in,
1438 memory_order_relaxed);
1439 limit = atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
1440 memory_order_relaxed);
1441 queued = atomic_load_explicit(&zdplane_info.dg_routes_queued,
1442 memory_order_relaxed);
1443 queue_max = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
1444 memory_order_relaxed);
1445 errs = atomic_load_explicit(&zdplane_info.dg_route_errors,
1446 memory_order_relaxed);
1447 yields = atomic_load_explicit(&zdplane_info.dg_update_yields,
1448 memory_order_relaxed);
1449 other_errs = atomic_load_explicit(&zdplane_info.dg_other_errors,
1450 memory_order_relaxed);
1451
1452 vty_out(vty, "Zebra dataplane:\nRoute updates: %"PRIu64"\n",
1453 incoming);
1454 vty_out(vty, "Route update errors: %"PRIu64"\n", errs);
1455 vty_out(vty, "Other errors : %"PRIu64"\n", other_errs);
1456 vty_out(vty, "Route update queue limit: %"PRIu64"\n", limit);
1457 vty_out(vty, "Route update queue depth: %"PRIu64"\n", queued);
1458 vty_out(vty, "Route update queue max: %"PRIu64"\n", queue_max);
1459 vty_out(vty, "Dplane update yields: %"PRIu64"\n", yields);
1460
1461 return CMD_SUCCESS;
1462 }
1463
1464 /*
1465 * Handler for 'show dplane providers'
1466 */
1467 int dplane_show_provs_helper(struct vty *vty, bool detailed)
1468 {
1469 struct zebra_dplane_provider *prov;
1470 uint64_t in, in_max, out, out_max;
1471
1472 vty_out(vty, "Zebra dataplane providers:\n");
1473
1474 DPLANE_LOCK();
1475 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
1476 DPLANE_UNLOCK();
1477
1478 /* Show counters, useful info from each registered provider */
1479 while (prov) {
1480
1481 in = atomic_load_explicit(&prov->dp_in_counter,
1482 memory_order_relaxed);
1483 in_max = atomic_load_explicit(&prov->dp_in_max,
1484 memory_order_relaxed);
1485 out = atomic_load_explicit(&prov->dp_out_counter,
1486 memory_order_relaxed);
1487 out_max = atomic_load_explicit(&prov->dp_out_max,
1488 memory_order_relaxed);
1489
1490 vty_out(vty, "%s (%u): in: %"PRIu64", q_max: %"PRIu64", "
1491 "out: %"PRIu64", q_max: %"PRIu64"\n",
1492 prov->dp_name, prov->dp_id, in, in_max, out, out_max);
1493
1494 DPLANE_LOCK();
1495 prov = TAILQ_NEXT(prov, dp_prov_link);
1496 DPLANE_UNLOCK();
1497 }
1498
1499 return CMD_SUCCESS;
1500 }
1501
1502 /*
1503 * Provider registration
1504 */
1505 int dplane_provider_register(const char *name,
1506 enum dplane_provider_prio prio,
1507 int flags,
1508 int (*fp)(struct zebra_dplane_provider *),
1509 int (*fini_fp)(struct zebra_dplane_provider *,
1510 bool early),
1511 void *data,
1512 struct zebra_dplane_provider **prov_p)
1513 {
1514 int ret = 0;
1515 struct zebra_dplane_provider *p = NULL, *last;
1516
1517 /* Validate */
1518 if (fp == NULL) {
1519 ret = EINVAL;
1520 goto done;
1521 }
1522
1523 if (prio <= DPLANE_PRIO_NONE ||
1524 prio > DPLANE_PRIO_LAST) {
1525 ret = EINVAL;
1526 goto done;
1527 }
1528
1529 /* Allocate and init new provider struct */
1530 p = XCALLOC(MTYPE_DP_PROV, sizeof(struct zebra_dplane_provider));
1531
1532 pthread_mutex_init(&(p->dp_mutex), NULL);
1533 TAILQ_INIT(&(p->dp_ctx_in_q));
1534 TAILQ_INIT(&(p->dp_ctx_out_q));
1535
1536 p->dp_priority = prio;
1537 p->dp_fp = fp;
1538 p->dp_fini = fini_fp;
1539 p->dp_data = data;
1540
1541 /* Lock - the dplane pthread may be running */
1542 DPLANE_LOCK();
1543
1544 p->dp_id = ++zdplane_info.dg_provider_id;
1545
1546 if (name)
1547 strlcpy(p->dp_name, name, DPLANE_PROVIDER_NAMELEN);
1548 else
1549 snprintf(p->dp_name, DPLANE_PROVIDER_NAMELEN,
1550 "provider-%u", p->dp_id);
1551
1552 /* Insert into list ordered by priority */
1553 TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_prov_link) {
1554 if (last->dp_priority > p->dp_priority)
1555 break;
1556 }
1557
1558 if (last)
1559 TAILQ_INSERT_BEFORE(last, p, dp_prov_link);
1560 else
1561 TAILQ_INSERT_TAIL(&zdplane_info.dg_providers_q, p,
1562 dp_prov_link);
1563
1564 /* And unlock */
1565 DPLANE_UNLOCK();
1566
1567 if (IS_ZEBRA_DEBUG_DPLANE)
1568 zlog_debug("dplane: registered new provider '%s' (%u), prio %d",
1569 p->dp_name, p->dp_id, p->dp_priority);
1570
1571 done:
1572 if (prov_p)
1573 *prov_p = p;
1574
1575 return ret;
1576 }
1577
1578 /* Accessors for provider attributes */
1579 const char *dplane_provider_get_name(const struct zebra_dplane_provider *prov)
1580 {
1581 return prov->dp_name;
1582 }
1583
1584 uint32_t dplane_provider_get_id(const struct zebra_dplane_provider *prov)
1585 {
1586 return prov->dp_id;
1587 }
1588
1589 void *dplane_provider_get_data(const struct zebra_dplane_provider *prov)
1590 {
1591 return prov->dp_data;
1592 }
1593
1594 int dplane_provider_get_work_limit(const struct zebra_dplane_provider *prov)
1595 {
1596 return zdplane_info.dg_updates_per_cycle;
1597 }
1598
1599 /* Lock/unlock a provider's mutex - iff the provider was registered with
1600 * the THREADED flag.
1601 */
1602 void dplane_provider_lock(struct zebra_dplane_provider *prov)
1603 {
1604 if (dplane_provider_is_threaded(prov))
1605 DPLANE_PROV_LOCK(prov);
1606 }
1607
1608 void dplane_provider_unlock(struct zebra_dplane_provider *prov)
1609 {
1610 if (dplane_provider_is_threaded(prov))
1611 DPLANE_PROV_UNLOCK(prov);
1612 }
1613
1614 /*
1615 * Dequeue and maintain associated counter
1616 */
1617 struct zebra_dplane_ctx *dplane_provider_dequeue_in_ctx(
1618 struct zebra_dplane_provider *prov)
1619 {
1620 struct zebra_dplane_ctx *ctx = NULL;
1621
1622 dplane_provider_lock(prov);
1623
1624 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
1625 if (ctx) {
1626 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
1627
1628 atomic_fetch_sub_explicit(&prov->dp_in_queued, 1,
1629 memory_order_relaxed);
1630 }
1631
1632 dplane_provider_unlock(prov);
1633
1634 return ctx;
1635 }
1636
1637 /*
1638 * Dequeue work to a list, return count
1639 */
1640 int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov,
1641 struct dplane_ctx_q *listp)
1642 {
1643 int limit, ret;
1644 struct zebra_dplane_ctx *ctx;
1645
1646 limit = zdplane_info.dg_updates_per_cycle;
1647
1648 dplane_provider_lock(prov);
1649
1650 for (ret = 0; ret < limit; ret++) {
1651 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
1652 if (ctx) {
1653 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
1654
1655 TAILQ_INSERT_TAIL(listp, ctx, zd_q_entries);
1656 } else {
1657 break;
1658 }
1659 }
1660
1661 if (ret > 0)
1662 atomic_fetch_sub_explicit(&prov->dp_in_queued, ret,
1663 memory_order_relaxed);
1664
1665 dplane_provider_unlock(prov);
1666
1667 return ret;
1668 }
1669
1670 /*
1671 * Enqueue and maintain associated counter
1672 */
1673 void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
1674 struct zebra_dplane_ctx *ctx)
1675 {
1676 dplane_provider_lock(prov);
1677
1678 TAILQ_INSERT_TAIL(&(prov->dp_ctx_out_q), ctx,
1679 zd_q_entries);
1680
1681 dplane_provider_unlock(prov);
1682
1683 atomic_fetch_add_explicit(&(prov->dp_out_counter), 1,
1684 memory_order_relaxed);
1685 }
1686
1687 /*
1688 * Accessor for provider object
1689 */
1690 bool dplane_provider_is_threaded(const struct zebra_dplane_provider *prov)
1691 {
1692 return (prov->dp_flags & DPLANE_PROV_FLAG_THREADED);
1693 }
1694
1695 /*
1696 * Internal helper that copies information from a zebra ns object; this is
1697 * called in the zebra main pthread context as part of dplane ctx init.
1698 */
1699 static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
1700 struct zebra_ns *zns)
1701 {
1702 ns_info->ns_id = zns->ns_id;
1703
1704 #if defined(HAVE_NETLINK)
1705 ns_info->is_cmd = true;
1706 ns_info->nls = zns->netlink_dplane;
1707 #endif /* NETLINK */
1708 }
1709
1710 /*
1711 * Provider api to signal that work/events are available
1712 * for the dataplane pthread.
1713 */
1714 int dplane_provider_work_ready(void)
1715 {
1716 /* Note that during zebra startup, we may be offered work before
1717 * the dataplane pthread (and thread-master) are ready. We want to
1718 * enqueue the work, but the event-scheduling machinery may not be
1719 * available.
1720 */
1721 if (zdplane_info.dg_run) {
1722 thread_add_event(zdplane_info.dg_master,
1723 dplane_thread_loop, NULL, 0,
1724 &zdplane_info.dg_t_update);
1725 }
1726
1727 return AOK;
1728 }
1729
1730 /*
1731 * Kernel dataplane provider
1732 */
1733
1734 /*
1735 * Handler for kernel LSP updates
1736 */
1737 static enum zebra_dplane_result
1738 kernel_dplane_lsp_update(struct zebra_dplane_ctx *ctx)
1739 {
1740 enum zebra_dplane_result res;
1741
1742 /* Call into the synchronous kernel-facing code here */
1743 res = kernel_lsp_update(ctx);
1744
1745 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
1746 atomic_fetch_add_explicit(
1747 &zdplane_info.dg_lsp_errors, 1,
1748 memory_order_relaxed);
1749
1750 return res;
1751 }
1752
1753 /*
1754 * Handler for kernel pseudowire updates
1755 */
1756 static enum zebra_dplane_result
1757 kernel_dplane_pw_update(struct zebra_dplane_ctx *ctx)
1758 {
1759 enum zebra_dplane_result res;
1760
1761 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1762 zlog_debug("Dplane pw %s: op %s af %d loc: %u rem: %u",
1763 dplane_ctx_get_pw_ifname(ctx),
1764 dplane_op2str(ctx->zd_op),
1765 dplane_ctx_get_pw_af(ctx),
1766 dplane_ctx_get_pw_local_label(ctx),
1767 dplane_ctx_get_pw_remote_label(ctx));
1768
1769 res = kernel_pw_update(ctx);
1770
1771 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
1772 atomic_fetch_add_explicit(
1773 &zdplane_info.dg_pw_errors, 1,
1774 memory_order_relaxed);
1775
1776 return res;
1777 }
1778
1779 /*
1780 * Handler for kernel route updates
1781 */
1782 static enum zebra_dplane_result
1783 kernel_dplane_route_update(struct zebra_dplane_ctx *ctx)
1784 {
1785 enum zebra_dplane_result res;
1786
1787 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
1788 char dest_str[PREFIX_STRLEN];
1789
1790 prefix2str(dplane_ctx_get_dest(ctx),
1791 dest_str, sizeof(dest_str));
1792
1793 zlog_debug("%u:%s Dplane route update ctx %p op %s",
1794 dplane_ctx_get_vrf(ctx), dest_str,
1795 ctx, dplane_op2str(dplane_ctx_get_op(ctx)));
1796 }
1797
1798 /* Call into the synchronous kernel-facing code here */
1799 res = kernel_route_update(ctx);
1800
1801 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
1802 atomic_fetch_add_explicit(
1803 &zdplane_info.dg_route_errors, 1,
1804 memory_order_relaxed);
1805
1806 return res;
1807 }
1808
1809 /*
1810 * Kernel provider callback
1811 */
1812 static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
1813 {
1814 enum zebra_dplane_result res;
1815 struct zebra_dplane_ctx *ctx;
1816 int counter, limit;
1817
1818 limit = dplane_provider_get_work_limit(prov);
1819
1820 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1821 zlog_debug("dplane provider '%s': processing",
1822 dplane_provider_get_name(prov));
1823
1824 for (counter = 0; counter < limit; counter++) {
1825
1826 ctx = dplane_provider_dequeue_in_ctx(prov);
1827 if (ctx == NULL)
1828 break;
1829
1830 /* Dispatch to appropriate kernel-facing apis */
1831 switch (dplane_ctx_get_op(ctx)) {
1832
1833 case DPLANE_OP_ROUTE_INSTALL:
1834 case DPLANE_OP_ROUTE_UPDATE:
1835 case DPLANE_OP_ROUTE_DELETE:
1836 res = kernel_dplane_route_update(ctx);
1837 break;
1838
1839 case DPLANE_OP_LSP_INSTALL:
1840 case DPLANE_OP_LSP_UPDATE:
1841 case DPLANE_OP_LSP_DELETE:
1842 res = kernel_dplane_lsp_update(ctx);
1843 break;
1844
1845 case DPLANE_OP_PW_INSTALL:
1846 case DPLANE_OP_PW_UNINSTALL:
1847 res = kernel_dplane_pw_update(ctx);
1848 break;
1849
1850 default:
1851 atomic_fetch_add_explicit(
1852 &zdplane_info.dg_other_errors, 1,
1853 memory_order_relaxed);
1854
1855 res = ZEBRA_DPLANE_REQUEST_FAILURE;
1856 break;
1857 }
1858
1859 dplane_ctx_set_status(ctx, res);
1860
1861 dplane_provider_enqueue_out_ctx(prov, ctx);
1862 }
1863
1864 /* Ensure that we'll run the work loop again if there's still
1865 * more work to do.
1866 */
1867 if (counter >= limit) {
1868 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1869 zlog_debug("dplane provider '%s' reached max updates %d",
1870 dplane_provider_get_name(prov), counter);
1871
1872 atomic_fetch_add_explicit(&zdplane_info.dg_update_yields,
1873 1, memory_order_relaxed);
1874
1875 dplane_provider_work_ready();
1876 }
1877
1878 return 0;
1879 }
1880
1881 #if DPLANE_TEST_PROVIDER
1882
1883 /*
1884 * Test dataplane provider plugin
1885 */
1886
1887 /*
1888 * Test provider process callback
1889 */
1890 static int test_dplane_process_func(struct zebra_dplane_provider *prov)
1891 {
1892 struct zebra_dplane_ctx *ctx;
1893 int counter, limit;
1894
1895 /* Just moving from 'in' queue to 'out' queue */
1896
1897 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1898 zlog_debug("dplane provider '%s': processing",
1899 dplane_provider_get_name(prov));
1900
1901 limit = dplane_provider_get_work_limit(prov);
1902
1903 for (counter = 0; counter < limit; counter++) {
1904
1905 ctx = dplane_provider_dequeue_in_ctx(prov);
1906 if (ctx == NULL)
1907 break;
1908
1909 dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
1910
1911 dplane_provider_enqueue_out_ctx(prov, ctx);
1912 }
1913
1914 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1915 zlog_debug("dplane provider '%s': processed %d",
1916 dplane_provider_get_name(prov), counter);
1917
1918 /* Ensure that we'll run the work loop again if there's still
1919 * more work to do.
1920 */
1921 if (counter >= limit)
1922 dplane_provider_work_ready();
1923
1924 return 0;
1925 }
1926
1927 /*
1928 * Test provider shutdown/fini callback
1929 */
1930 static int test_dplane_shutdown_func(struct zebra_dplane_provider *prov,
1931 bool early)
1932 {
1933 if (IS_ZEBRA_DEBUG_DPLANE)
1934 zlog_debug("dplane provider '%s': %sshutdown",
1935 dplane_provider_get_name(prov),
1936 early ? "early " : "");
1937
1938 return 0;
1939 }
1940 #endif /* DPLANE_TEST_PROVIDER */
1941
1942 /*
1943 * Register default kernel provider
1944 */
1945 static void dplane_provider_init(void)
1946 {
1947 int ret;
1948
1949 ret = dplane_provider_register("Kernel",
1950 DPLANE_PRIO_KERNEL,
1951 DPLANE_PROV_FLAGS_DEFAULT,
1952 kernel_dplane_process_func,
1953 NULL,
1954 NULL, NULL);
1955
1956 if (ret != AOK)
1957 zlog_err("Unable to register kernel dplane provider: %d",
1958 ret);
1959
1960 #if DPLANE_TEST_PROVIDER
1961 /* Optional test provider ... */
1962 ret = dplane_provider_register("Test",
1963 DPLANE_PRIO_PRE_KERNEL,
1964 DPLANE_PROV_FLAGS_DEFAULT,
1965 test_dplane_process_func,
1966 test_dplane_shutdown_func,
1967 NULL /* data */, NULL);
1968
1969 if (ret != AOK)
1970 zlog_err("Unable to register test dplane provider: %d",
1971 ret);
1972 #endif /* DPLANE_TEST_PROVIDER */
1973 }
1974
1975 /* Indicates zebra shutdown/exit is in progress. Some operations may be
1976 * simplified or skipped during shutdown processing.
1977 */
1978 bool dplane_is_in_shutdown(void)
1979 {
1980 return zdplane_info.dg_is_shutdown;
1981 }
1982
1983 /*
1984 * Early or pre-shutdown, de-init notification api. This runs pretty
1985 * early during zebra shutdown, as a signal to stop new work and prepare
1986 * for updates generated by shutdown/cleanup activity, as zebra tries to
1987 * remove everything it's responsible for.
1988 * NB: This runs in the main zebra pthread context.
1989 */
1990 void zebra_dplane_pre_finish(void)
1991 {
1992 if (IS_ZEBRA_DEBUG_DPLANE)
1993 zlog_debug("Zebra dataplane pre-fini called");
1994
1995 zdplane_info.dg_is_shutdown = true;
1996
1997 /* TODO -- Notify provider(s) of pending shutdown */
1998 }
1999
2000 /*
2001 * Utility to determine whether work remains enqueued within the dplane;
2002 * used during system shutdown processing.
2003 */
2004 static bool dplane_work_pending(void)
2005 {
2006 bool ret = false;
2007 struct zebra_dplane_ctx *ctx;
2008 struct zebra_dplane_provider *prov;
2009
2010 /* TODO -- just checking incoming/pending work for now, must check
2011 * providers
2012 */
2013 DPLANE_LOCK();
2014 {
2015 ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
2016 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
2017 }
2018 DPLANE_UNLOCK();
2019
2020 if (ctx != NULL) {
2021 ret = true;
2022 goto done;
2023 }
2024
2025 while (prov) {
2026
2027 dplane_provider_lock(prov);
2028
2029 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
2030 if (ctx == NULL)
2031 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
2032
2033 dplane_provider_unlock(prov);
2034
2035 if (ctx != NULL)
2036 break;
2037
2038 DPLANE_LOCK();
2039 prov = TAILQ_NEXT(prov, dp_prov_link);
2040 DPLANE_UNLOCK();
2041 }
2042
2043 if (ctx != NULL)
2044 ret = true;
2045
2046 done:
2047 return ret;
2048 }
2049
2050 /*
2051 * Shutdown-time intermediate callback, used to determine when all pending
2052 * in-flight updates are done. If there's still work to do, reschedules itself.
2053 * If all work is done, schedules an event to the main zebra thread for
2054 * final zebra shutdown.
2055 * This runs in the dplane pthread context.
2056 */
2057 static int dplane_check_shutdown_status(struct thread *event)
2058 {
2059 if (IS_ZEBRA_DEBUG_DPLANE)
2060 zlog_debug("Zebra dataplane shutdown status check called");
2061
2062 if (dplane_work_pending()) {
2063 /* Reschedule dplane check on a short timer */
2064 thread_add_timer_msec(zdplane_info.dg_master,
2065 dplane_check_shutdown_status,
2066 NULL, 100,
2067 &zdplane_info.dg_t_shutdown_check);
2068
2069 /* TODO - give up and stop waiting after a short time? */
2070
2071 } else {
2072 /* We appear to be done - schedule a final callback event
2073 * for the zebra main pthread.
2074 */
2075 thread_add_event(zrouter.master, zebra_finalize, NULL, 0, NULL);
2076 }
2077
2078 return 0;
2079 }
2080
2081 /*
2082 * Shutdown, de-init api. This runs pretty late during shutdown,
2083 * after zebra has tried to free/remove/uninstall all routes during shutdown.
2084 * At this point, dplane work may still remain to be done, so we can't just
2085 * blindly terminate. If there's still work to do, we'll periodically check
2086 * and when done, we'll enqueue a task to the zebra main thread for final
2087 * termination processing.
2088 *
2089 * NB: This runs in the main zebra thread context.
2090 */
2091 void zebra_dplane_finish(void)
2092 {
2093 if (IS_ZEBRA_DEBUG_DPLANE)
2094 zlog_debug("Zebra dataplane fini called");
2095
2096 thread_add_event(zdplane_info.dg_master,
2097 dplane_check_shutdown_status, NULL, 0,
2098 &zdplane_info.dg_t_shutdown_check);
2099 }
2100
2101 /*
2102 * Main dataplane pthread event loop. The thread takes new incoming work
2103 * and offers it to the first provider. It then iterates through the
2104 * providers, taking complete work from each one and offering it
2105 * to the next in order. At each step, a limited number of updates are
2106 * processed during a cycle in order to provide some fairness.
2107 *
2108 * This loop through the providers is only run once, so that the dataplane
2109 * pthread can look for other pending work - such as i/o work on behalf of
2110 * providers.
2111 */
2112 static int dplane_thread_loop(struct thread *event)
2113 {
2114 struct dplane_ctx_q work_list;
2115 struct dplane_ctx_q error_list;
2116 struct zebra_dplane_provider *prov;
2117 struct zebra_dplane_ctx *ctx, *tctx;
2118 int limit, counter, error_counter;
2119 uint64_t curr, high;
2120
2121 /* Capture work limit per cycle */
2122 limit = zdplane_info.dg_updates_per_cycle;
2123
2124 /* Init temporary lists used to move contexts among providers */
2125 TAILQ_INIT(&work_list);
2126 TAILQ_INIT(&error_list);
2127 error_counter = 0;
2128
2129 /* Check for zebra shutdown */
2130 if (!zdplane_info.dg_run)
2131 goto done;
2132
2133 /* Dequeue some incoming work from zebra (if any) onto the temporary
2134 * working list.
2135 */
2136 DPLANE_LOCK();
2137
2138 /* Locate initial registered provider */
2139 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
2140
2141 /* Move new work from incoming list to temp list */
2142 for (counter = 0; counter < limit; counter++) {
2143 ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
2144 if (ctx) {
2145 TAILQ_REMOVE(&zdplane_info.dg_route_ctx_q, ctx,
2146 zd_q_entries);
2147
2148 ctx->zd_provider = prov->dp_id;
2149
2150 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
2151 } else {
2152 break;
2153 }
2154 }
2155
2156 DPLANE_UNLOCK();
2157
2158 atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, counter,
2159 memory_order_relaxed);
2160
2161 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2162 zlog_debug("dplane: incoming new work counter: %d", counter);
2163
2164 /* Iterate through the registered providers, offering new incoming
2165 * work. If the provider has outgoing work in its queue, take that
2166 * work for the next provider
2167 */
2168 while (prov) {
2169
2170 /* At each iteration, the temporary work list has 'counter'
2171 * items.
2172 */
2173 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2174 zlog_debug("dplane enqueues %d new work to provider '%s'",
2175 counter, dplane_provider_get_name(prov));
2176
2177 /* Capture current provider id in each context; check for
2178 * error status.
2179 */
2180 TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, tctx) {
2181 if (dplane_ctx_get_status(ctx) ==
2182 ZEBRA_DPLANE_REQUEST_SUCCESS) {
2183 ctx->zd_provider = prov->dp_id;
2184 } else {
2185 /*
2186 * TODO -- improve error-handling: recirc
2187 * errors backwards so that providers can
2188 * 'undo' their work (if they want to)
2189 */
2190
2191 /* Move to error list; will be returned
2192 * zebra main.
2193 */
2194 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
2195 TAILQ_INSERT_TAIL(&error_list,
2196 ctx, zd_q_entries);
2197 error_counter++;
2198 }
2199 }
2200
2201 /* Enqueue new work to the provider */
2202 dplane_provider_lock(prov);
2203
2204 if (TAILQ_FIRST(&work_list))
2205 TAILQ_CONCAT(&(prov->dp_ctx_in_q), &work_list,
2206 zd_q_entries);
2207
2208 atomic_fetch_add_explicit(&prov->dp_in_counter, counter,
2209 memory_order_relaxed);
2210 atomic_fetch_add_explicit(&prov->dp_in_queued, counter,
2211 memory_order_relaxed);
2212 curr = atomic_load_explicit(&prov->dp_in_queued,
2213 memory_order_relaxed);
2214 high = atomic_load_explicit(&prov->dp_in_max,
2215 memory_order_relaxed);
2216 if (curr > high)
2217 atomic_store_explicit(&prov->dp_in_max, curr,
2218 memory_order_relaxed);
2219
2220 dplane_provider_unlock(prov);
2221
2222 /* Reset the temp list (though the 'concat' may have done this
2223 * already), and the counter
2224 */
2225 TAILQ_INIT(&work_list);
2226 counter = 0;
2227
2228 /* Call into the provider code. Note that this is
2229 * unconditional: we offer to do work even if we don't enqueue
2230 * any _new_ work.
2231 */
2232 (*prov->dp_fp)(prov);
2233
2234 /* Check for zebra shutdown */
2235 if (!zdplane_info.dg_run)
2236 break;
2237
2238 /* Dequeue completed work from the provider */
2239 dplane_provider_lock(prov);
2240
2241 while (counter < limit) {
2242 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
2243 if (ctx) {
2244 TAILQ_REMOVE(&(prov->dp_ctx_out_q), ctx,
2245 zd_q_entries);
2246
2247 TAILQ_INSERT_TAIL(&work_list,
2248 ctx, zd_q_entries);
2249 counter++;
2250 } else
2251 break;
2252 }
2253
2254 dplane_provider_unlock(prov);
2255
2256 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2257 zlog_debug("dplane dequeues %d completed work from provider %s",
2258 counter, dplane_provider_get_name(prov));
2259
2260 /* Locate next provider */
2261 DPLANE_LOCK();
2262 prov = TAILQ_NEXT(prov, dp_prov_link);
2263 DPLANE_UNLOCK();
2264 }
2265
2266 /* After all providers have been serviced, enqueue any completed
2267 * work and any errors back to zebra so it can process the results.
2268 */
2269 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2270 zlog_debug("dplane has %d completed, %d errors, for zebra main",
2271 counter, error_counter);
2272
2273 /*
2274 * Hand lists through the api to zebra main,
2275 * to reduce the number of lock/unlock cycles
2276 */
2277
2278 /* Call through to zebra main */
2279 (zdplane_info.dg_results_cb)(&error_list);
2280
2281 TAILQ_INIT(&error_list);
2282
2283
2284 /* Call through to zebra main */
2285 (zdplane_info.dg_results_cb)(&work_list);
2286
2287 TAILQ_INIT(&work_list);
2288
2289 done:
2290 return 0;
2291 }
2292
2293 /*
2294 * Final phase of shutdown, after all work enqueued to dplane has been
2295 * processed. This is called from the zebra main pthread context.
2296 */
2297 void zebra_dplane_shutdown(void)
2298 {
2299 if (IS_ZEBRA_DEBUG_DPLANE)
2300 zlog_debug("Zebra dataplane shutdown called");
2301
2302 /* Stop dplane thread, if it's running */
2303
2304 zdplane_info.dg_run = false;
2305
2306 THREAD_OFF(zdplane_info.dg_t_update);
2307
2308 frr_pthread_stop(zdplane_info.dg_pthread, NULL);
2309
2310 /* Destroy pthread */
2311 frr_pthread_destroy(zdplane_info.dg_pthread);
2312 zdplane_info.dg_pthread = NULL;
2313 zdplane_info.dg_master = NULL;
2314
2315 /* TODO -- Notify provider(s) of final shutdown */
2316
2317 /* TODO -- Clean-up provider objects */
2318
2319 /* TODO -- Clean queue(s), free memory */
2320 }
2321
2322 /*
2323 * Initialize the dataplane module during startup, internal/private version
2324 */
2325 static void zebra_dplane_init_internal(void)
2326 {
2327 memset(&zdplane_info, 0, sizeof(zdplane_info));
2328
2329 pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
2330
2331 TAILQ_INIT(&zdplane_info.dg_route_ctx_q);
2332 TAILQ_INIT(&zdplane_info.dg_providers_q);
2333
2334 zdplane_info.dg_updates_per_cycle = DPLANE_DEFAULT_NEW_WORK;
2335
2336 zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;
2337
2338 /* Register default kernel 'provider' during init */
2339 dplane_provider_init();
2340 }
2341
2342 /*
2343 * Start the dataplane pthread. This step needs to be run later than the
2344 * 'init' step, in case zebra has fork-ed.
2345 */
2346 void zebra_dplane_start(void)
2347 {
2348 /* Start dataplane pthread */
2349
2350 struct frr_pthread_attr pattr = {
2351 .start = frr_pthread_attr_default.start,
2352 .stop = frr_pthread_attr_default.stop
2353 };
2354
2355 zdplane_info.dg_pthread = frr_pthread_new(&pattr, "Zebra dplane thread",
2356 "Zebra dplane");
2357
2358 zdplane_info.dg_master = zdplane_info.dg_pthread->master;
2359
2360 zdplane_info.dg_run = true;
2361
2362 /* Enqueue an initial event for the dataplane pthread */
2363 thread_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
2364 &zdplane_info.dg_t_update);
2365
2366 frr_pthread_run(zdplane_info.dg_pthread, NULL);
2367 }
2368
2369 /*
2370 * Initialize the dataplane module at startup; called by zebra rib_init()
2371 */
2372 void zebra_dplane_init(int (*results_fp)(struct dplane_ctx_q *))
2373 {
2374 zebra_dplane_init_internal();
2375 zdplane_info.dg_results_cb = results_fp;
2376 }