]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_dplane.c
Merge pull request #3775 from pguibert6WIND/ospf_missing_interface_handling_2
[mirror_frr.git] / zebra / zebra_dplane.c
1 /*
2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "lib/libfrr.h"
25 #include "lib/debug.h"
26 #include "lib/frratomic.h"
27 #include "lib/frr_pthread.h"
28 #include "lib/memory.h"
29 #include "lib/queue.h"
30 #include "lib/zebra.h"
31 #include "zebra/zebra_router.h"
32 #include "zebra/zebra_memory.h"
33 #include "zebra/zebra_router.h"
34 #include "zebra/zebra_dplane.h"
35 #include "zebra/rt.h"
36 #include "zebra/debug.h"
37
38 /* Memory type for context blocks */
39 DEFINE_MTYPE_STATIC(ZEBRA, DP_CTX, "Zebra DPlane Ctx")
40 DEFINE_MTYPE_STATIC(ZEBRA, DP_PROV, "Zebra DPlane Provider")
41
42 #ifndef AOK
43 # define AOK 0
44 #endif
45
46 /* Enable test dataplane provider */
47 /*#define DPLANE_TEST_PROVIDER 1 */
48
49 /* Default value for max queued incoming updates */
50 const uint32_t DPLANE_DEFAULT_MAX_QUEUED = 200;
51
52 /* Default value for new work per cycle */
53 const uint32_t DPLANE_DEFAULT_NEW_WORK = 100;
54
55 /* Validation check macro for context blocks */
56 /* #define DPLANE_DEBUG 1 */
57
58 #ifdef DPLANE_DEBUG
59
60 # define DPLANE_CTX_VALID(p) \
61 assert((p) != NULL)
62
63 #else
64
65 # define DPLANE_CTX_VALID(p)
66
67 #endif /* DPLANE_DEBUG */
68
69 /*
70 * Route information captured for route updates.
71 */
72 struct dplane_route_info {
73
74 /* Dest and (optional) source prefixes */
75 struct prefix zd_dest;
76 struct prefix zd_src;
77
78 afi_t zd_afi;
79 safi_t zd_safi;
80
81 int zd_type;
82 int zd_old_type;
83
84 route_tag_t zd_tag;
85 route_tag_t zd_old_tag;
86 uint32_t zd_metric;
87 uint32_t zd_old_metric;
88
89 uint16_t zd_instance;
90 uint16_t zd_old_instance;
91
92 uint8_t zd_distance;
93 uint8_t zd_old_distance;
94
95 uint32_t zd_mtu;
96 uint32_t zd_nexthop_mtu;
97
98 /* Nexthops */
99 struct nexthop_group zd_ng;
100
101 /* "Previous" nexthops, used only in route updates without netlink */
102 struct nexthop_group zd_old_ng;
103
104 /* TODO -- use fixed array of nexthops, to avoid mallocs? */
105
106 };
107
108 /*
109 * Pseudowire info for the dataplane
110 */
111 struct dplane_pw_info {
112 char ifname[IF_NAMESIZE];
113 ifindex_t ifindex;
114 int type;
115 int af;
116 int status;
117 uint32_t flags;
118 union g_addr dest;
119 mpls_label_t local_label;
120 mpls_label_t remote_label;
121
122 /* Nexthops */
123 struct nexthop_group nhg;
124
125 union pw_protocol_fields fields;
126 };
127
128 /*
129 * Interface/prefix info for the dataplane
130 */
131 struct dplane_intf_info {
132
133 char ifname[INTERFACE_NAMSIZ];
134 ifindex_t ifindex;
135
136 uint32_t metric;
137 uint32_t flags;
138
139 #define DPLANE_INTF_CONNECTED (1 << 0) /* Connected peer, p2p */
140 #define DPLANE_INTF_SECONDARY (1 << 1)
141 #define DPLANE_INTF_BROADCAST (1 << 2)
142 #define DPLANE_INTF_HAS_DEST (1 << 3)
143 #define DPLANE_INTF_HAS_LABEL (1 << 4)
144
145 /* Interface address/prefix */
146 struct prefix prefix;
147
148 /* Dest address, for p2p, or broadcast prefix */
149 struct prefix dest_prefix;
150
151 char *label;
152 char label_buf[32];
153 };
154
155 /*
156 * The context block used to exchange info about route updates across
157 * the boundary between the zebra main context (and pthread) and the
158 * dataplane layer (and pthread).
159 */
160 struct zebra_dplane_ctx {
161
162 /* Operation code */
163 enum dplane_op_e zd_op;
164
165 /* Status on return */
166 enum zebra_dplane_result zd_status;
167
168 /* Dplane provider id */
169 uint32_t zd_provider;
170
171 /* Flags - used by providers, e.g. */
172 int zd_flags;
173
174 bool zd_is_update;
175
176 uint32_t zd_seq;
177 uint32_t zd_old_seq;
178
179 /* Some updates may be generated by notifications: allow the
180 * plugin to notice and ignore results from its own notifications.
181 */
182 uint32_t zd_notif_provider;
183
184 /* TODO -- internal/sub-operation status? */
185 enum zebra_dplane_result zd_remote_status;
186 enum zebra_dplane_result zd_kernel_status;
187
188 vrf_id_t zd_vrf_id;
189 uint32_t zd_table_id;
190
191 /* Support info for different kinds of updates */
192 union {
193 struct dplane_route_info rinfo;
194 zebra_lsp_t lsp;
195 struct dplane_pw_info pw;
196 struct dplane_intf_info intf;
197 } u;
198
199 /* Namespace info, used especially for netlink kernel communication */
200 struct zebra_dplane_info zd_ns_info;
201
202 /* Embedded list linkage */
203 TAILQ_ENTRY(zebra_dplane_ctx) zd_q_entries;
204 };
205
206 /* Flag that can be set by a pre-kernel provider as a signal that an update
207 * should bypass the kernel.
208 */
209 #define DPLANE_CTX_FLAG_NO_KERNEL 0x01
210
211
212 /*
213 * Registration block for one dataplane provider.
214 */
215 struct zebra_dplane_provider {
216 /* Name */
217 char dp_name[DPLANE_PROVIDER_NAMELEN + 1];
218
219 /* Priority, for ordering among providers */
220 uint8_t dp_priority;
221
222 /* Id value */
223 uint32_t dp_id;
224
225 /* Mutex */
226 pthread_mutex_t dp_mutex;
227
228 /* Plugin-provided extra data */
229 void *dp_data;
230
231 /* Flags */
232 int dp_flags;
233
234 int (*dp_start)(struct zebra_dplane_provider *prov);
235
236 int (*dp_fp)(struct zebra_dplane_provider *prov);
237
238 int (*dp_fini)(struct zebra_dplane_provider *prov, bool early_p);
239
240 _Atomic uint32_t dp_in_counter;
241 _Atomic uint32_t dp_in_queued;
242 _Atomic uint32_t dp_in_max;
243 _Atomic uint32_t dp_out_counter;
244 _Atomic uint32_t dp_out_queued;
245 _Atomic uint32_t dp_out_max;
246 _Atomic uint32_t dp_error_counter;
247
248 /* Queue of contexts inbound to the provider */
249 struct dplane_ctx_q dp_ctx_in_q;
250
251 /* Queue of completed contexts outbound from the provider back
252 * towards the dataplane module.
253 */
254 struct dplane_ctx_q dp_ctx_out_q;
255
256 /* Embedded list linkage for provider objects */
257 TAILQ_ENTRY(zebra_dplane_provider) dp_prov_link;
258 };
259
260 /*
261 * Globals
262 */
263 static struct zebra_dplane_globals {
264 /* Mutex to control access to dataplane components */
265 pthread_mutex_t dg_mutex;
266
267 /* Results callback registered by zebra 'core' */
268 int (*dg_results_cb)(struct dplane_ctx_q *ctxlist);
269
270 /* Sentinel for beginning of shutdown */
271 volatile bool dg_is_shutdown;
272
273 /* Sentinel for end of shutdown */
274 volatile bool dg_run;
275
276 /* Route-update context queue inbound to the dataplane */
277 TAILQ_HEAD(zdg_ctx_q, zebra_dplane_ctx) dg_route_ctx_q;
278
279 /* Ordered list of providers */
280 TAILQ_HEAD(zdg_prov_q, zebra_dplane_provider) dg_providers_q;
281
282 /* Counter used to assign internal ids to providers */
283 uint32_t dg_provider_id;
284
285 /* Limit number of pending, unprocessed updates */
286 _Atomic uint32_t dg_max_queued_updates;
287
288 /* Control whether system route notifications should be produced. */
289 bool dg_sys_route_notifs;
290
291 /* Limit number of new updates dequeued at once, to pace an
292 * incoming burst.
293 */
294 uint32_t dg_updates_per_cycle;
295
296 _Atomic uint32_t dg_routes_in;
297 _Atomic uint32_t dg_routes_queued;
298 _Atomic uint32_t dg_routes_queued_max;
299 _Atomic uint32_t dg_route_errors;
300 _Atomic uint32_t dg_other_errors;
301
302 _Atomic uint32_t dg_lsps_in;
303 _Atomic uint32_t dg_lsp_errors;
304
305 _Atomic uint32_t dg_pws_in;
306 _Atomic uint32_t dg_pw_errors;
307
308 _Atomic uint32_t dg_intf_addrs_in;
309 _Atomic uint32_t dg_intf_addr_errors;
310
311 _Atomic uint32_t dg_update_yields;
312
313 /* Dataplane pthread */
314 struct frr_pthread *dg_pthread;
315
316 /* Event-delivery context 'master' for the dplane */
317 struct thread_master *dg_master;
318
319 /* Event/'thread' pointer for queued updates */
320 struct thread *dg_t_update;
321
322 /* Event pointer for pending shutdown check loop */
323 struct thread *dg_t_shutdown_check;
324
325 } zdplane_info;
326
327 /*
328 * Lock and unlock for interactions with the zebra 'core' pthread
329 */
330 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
331 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
332
333
334 /*
335 * Lock and unlock for individual providers
336 */
337 #define DPLANE_PROV_LOCK(p) pthread_mutex_lock(&((p)->dp_mutex))
338 #define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
339
340 /* Prototypes */
341 static int dplane_thread_loop(struct thread *event);
342 static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
343 struct zebra_ns *zns);
344 static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
345 enum dplane_op_e op);
346 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
347 enum dplane_op_e op);
348 static enum zebra_dplane_result intf_addr_update_internal(
349 const struct interface *ifp, const struct connected *ifc,
350 enum dplane_op_e op);
351
352 /*
353 * Public APIs
354 */
355
356 /* Obtain thread_master for dataplane thread */
357 struct thread_master *dplane_get_thread_master(void)
358 {
359 return zdplane_info.dg_master;
360 }
361
362 /*
363 * Allocate a dataplane update context
364 */
365 struct zebra_dplane_ctx *dplane_ctx_alloc(void)
366 {
367 struct zebra_dplane_ctx *p;
368
369 /* TODO -- just alloc'ing memory, but would like to maintain
370 * a pool
371 */
372 p = XCALLOC(MTYPE_DP_CTX, sizeof(struct zebra_dplane_ctx));
373
374 return p;
375 }
376
377 /* Enable system route notifications */
378 void dplane_enable_sys_route_notifs(void)
379 {
380 zdplane_info.dg_sys_route_notifs = true;
381 }
382
383 /*
384 * Free a dataplane results context.
385 */
386 static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
387 {
388 if (pctx == NULL)
389 return;
390
391 DPLANE_CTX_VALID(*pctx);
392
393 /* TODO -- just freeing memory, but would like to maintain
394 * a pool
395 */
396
397 /* Some internal allocations may need to be freed, depending on
398 * the type of info captured in the ctx.
399 */
400 switch ((*pctx)->zd_op) {
401 case DPLANE_OP_ROUTE_INSTALL:
402 case DPLANE_OP_ROUTE_UPDATE:
403 case DPLANE_OP_ROUTE_DELETE:
404 case DPLANE_OP_SYS_ROUTE_ADD:
405 case DPLANE_OP_SYS_ROUTE_DELETE:
406 case DPLANE_OP_ROUTE_NOTIFY:
407
408 /* Free allocated nexthops */
409 if ((*pctx)->u.rinfo.zd_ng.nexthop) {
410 /* This deals with recursive nexthops too */
411 nexthops_free((*pctx)->u.rinfo.zd_ng.nexthop);
412
413 (*pctx)->u.rinfo.zd_ng.nexthop = NULL;
414 }
415
416 if ((*pctx)->u.rinfo.zd_old_ng.nexthop) {
417 /* This deals with recursive nexthops too */
418 nexthops_free((*pctx)->u.rinfo.zd_old_ng.nexthop);
419
420 (*pctx)->u.rinfo.zd_old_ng.nexthop = NULL;
421 }
422
423 break;
424
425 case DPLANE_OP_LSP_INSTALL:
426 case DPLANE_OP_LSP_UPDATE:
427 case DPLANE_OP_LSP_DELETE:
428 case DPLANE_OP_LSP_NOTIFY:
429 {
430 zebra_nhlfe_t *nhlfe, *next;
431
432 /* Free allocated NHLFEs */
433 for (nhlfe = (*pctx)->u.lsp.nhlfe_list; nhlfe; nhlfe = next) {
434 next = nhlfe->next;
435
436 zebra_mpls_nhlfe_del(nhlfe);
437 }
438
439 /* Clear pointers in lsp struct, in case we're cacheing
440 * free context structs.
441 */
442 (*pctx)->u.lsp.nhlfe_list = NULL;
443 (*pctx)->u.lsp.best_nhlfe = NULL;
444
445 break;
446 }
447
448 case DPLANE_OP_PW_INSTALL:
449 case DPLANE_OP_PW_UNINSTALL:
450 /* Free allocated nexthops */
451 if ((*pctx)->u.pw.nhg.nexthop) {
452 /* This deals with recursive nexthops too */
453 nexthops_free((*pctx)->u.pw.nhg.nexthop);
454
455 (*pctx)->u.pw.nhg.nexthop = NULL;
456 }
457 break;
458
459 case DPLANE_OP_ADDR_INSTALL:
460 case DPLANE_OP_ADDR_UNINSTALL:
461 /* Maybe free label string, if allocated */
462 if ((*pctx)->u.intf.label != NULL &&
463 (*pctx)->u.intf.label != (*pctx)->u.intf.label_buf) {
464 free((*pctx)->u.intf.label);
465 (*pctx)->u.intf.label = NULL;
466 }
467 break;
468
469 case DPLANE_OP_NONE:
470 break;
471 }
472
473 XFREE(MTYPE_DP_CTX, *pctx);
474 *pctx = NULL;
475 }
476
477 /*
478 * Return a context block to the dplane module after processing
479 */
480 void dplane_ctx_fini(struct zebra_dplane_ctx **pctx)
481 {
482 /* TODO -- maintain pool; for now, just free */
483 dplane_ctx_free(pctx);
484 }
485
486 /* Enqueue a context block */
487 void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
488 const struct zebra_dplane_ctx *ctx)
489 {
490 TAILQ_INSERT_TAIL(q, (struct zebra_dplane_ctx *)ctx, zd_q_entries);
491 }
492
493 /* Append a list of context blocks to another list */
494 void dplane_ctx_list_append(struct dplane_ctx_q *to_list,
495 struct dplane_ctx_q *from_list)
496 {
497 if (TAILQ_FIRST(from_list)) {
498 TAILQ_CONCAT(to_list, from_list, zd_q_entries);
499
500 /* And clear 'from' list */
501 TAILQ_INIT(from_list);
502 }
503 }
504
505 /* Dequeue a context block from the head of a list */
506 struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_q *q)
507 {
508 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
509
510 if (ctx)
511 TAILQ_REMOVE(q, ctx, zd_q_entries);
512
513 return ctx;
514 }
515
516 /*
517 * Accessors for information from the context object
518 */
519 enum zebra_dplane_result dplane_ctx_get_status(
520 const struct zebra_dplane_ctx *ctx)
521 {
522 DPLANE_CTX_VALID(ctx);
523
524 return ctx->zd_status;
525 }
526
527 void dplane_ctx_set_status(struct zebra_dplane_ctx *ctx,
528 enum zebra_dplane_result status)
529 {
530 DPLANE_CTX_VALID(ctx);
531
532 ctx->zd_status = status;
533 }
534
535 /* Retrieve last/current provider id */
536 uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx *ctx)
537 {
538 DPLANE_CTX_VALID(ctx);
539 return ctx->zd_provider;
540 }
541
542 /* Providers run before the kernel can control whether a kernel
543 * update should be done.
544 */
545 void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx *ctx)
546 {
547 DPLANE_CTX_VALID(ctx);
548
549 SET_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
550 }
551
552 bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx *ctx)
553 {
554 DPLANE_CTX_VALID(ctx);
555
556 return CHECK_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
557 }
558
559 void dplane_ctx_set_op(struct zebra_dplane_ctx *ctx, enum dplane_op_e op)
560 {
561 DPLANE_CTX_VALID(ctx);
562 ctx->zd_op = op;
563 }
564
565 enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx)
566 {
567 DPLANE_CTX_VALID(ctx);
568
569 return ctx->zd_op;
570 }
571
572 const char *dplane_op2str(enum dplane_op_e op)
573 {
574 const char *ret = "UNKNOWN";
575
576 switch (op) {
577 case DPLANE_OP_NONE:
578 ret = "NONE";
579 break;
580
581 /* Route update */
582 case DPLANE_OP_ROUTE_INSTALL:
583 ret = "ROUTE_INSTALL";
584 break;
585 case DPLANE_OP_ROUTE_UPDATE:
586 ret = "ROUTE_UPDATE";
587 break;
588 case DPLANE_OP_ROUTE_DELETE:
589 ret = "ROUTE_DELETE";
590 break;
591 case DPLANE_OP_ROUTE_NOTIFY:
592 ret = "ROUTE_NOTIFY";
593 break;
594
595 case DPLANE_OP_LSP_INSTALL:
596 ret = "LSP_INSTALL";
597 break;
598 case DPLANE_OP_LSP_UPDATE:
599 ret = "LSP_UPDATE";
600 break;
601 case DPLANE_OP_LSP_DELETE:
602 ret = "LSP_DELETE";
603 break;
604 case DPLANE_OP_LSP_NOTIFY:
605 ret = "LSP_NOTIFY";
606 break;
607
608 case DPLANE_OP_PW_INSTALL:
609 ret = "PW_INSTALL";
610 break;
611 case DPLANE_OP_PW_UNINSTALL:
612 ret = "PW_UNINSTALL";
613 break;
614
615 case DPLANE_OP_SYS_ROUTE_ADD:
616 ret = "SYS_ROUTE_ADD";
617 break;
618 case DPLANE_OP_SYS_ROUTE_DELETE:
619 ret = "SYS_ROUTE_DEL";
620 break;
621
622 case DPLANE_OP_ADDR_INSTALL:
623 ret = "ADDR_INSTALL";
624 break;
625 case DPLANE_OP_ADDR_UNINSTALL:
626 ret = "ADDR_UNINSTALL";
627 break;
628
629 }
630
631 return ret;
632 }
633
634 const char *dplane_res2str(enum zebra_dplane_result res)
635 {
636 const char *ret = "<Unknown>";
637
638 switch (res) {
639 case ZEBRA_DPLANE_REQUEST_FAILURE:
640 ret = "FAILURE";
641 break;
642 case ZEBRA_DPLANE_REQUEST_QUEUED:
643 ret = "QUEUED";
644 break;
645 case ZEBRA_DPLANE_REQUEST_SUCCESS:
646 ret = "SUCCESS";
647 break;
648 }
649
650 return ret;
651 }
652
653 void dplane_ctx_set_dest(struct zebra_dplane_ctx *ctx,
654 const struct prefix *dest)
655 {
656 DPLANE_CTX_VALID(ctx);
657
658 prefix_copy(&(ctx->u.rinfo.zd_dest), dest);
659 }
660
661 const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
662 {
663 DPLANE_CTX_VALID(ctx);
664
665 return &(ctx->u.rinfo.zd_dest);
666 }
667
668 void dplane_ctx_set_src(struct zebra_dplane_ctx *ctx, const struct prefix *src)
669 {
670 DPLANE_CTX_VALID(ctx);
671
672 if (src)
673 prefix_copy(&(ctx->u.rinfo.zd_src), src);
674 else
675 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(struct prefix));
676 }
677
678 /* Source prefix is a little special - return NULL for "no src prefix" */
679 const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx)
680 {
681 DPLANE_CTX_VALID(ctx);
682
683 if (ctx->u.rinfo.zd_src.prefixlen == 0 &&
684 IN6_IS_ADDR_UNSPECIFIED(&(ctx->u.rinfo.zd_src.u.prefix6))) {
685 return NULL;
686 } else {
687 return &(ctx->u.rinfo.zd_src);
688 }
689 }
690
691 bool dplane_ctx_is_update(const struct zebra_dplane_ctx *ctx)
692 {
693 DPLANE_CTX_VALID(ctx);
694
695 return ctx->zd_is_update;
696 }
697
698 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx *ctx)
699 {
700 DPLANE_CTX_VALID(ctx);
701
702 return ctx->zd_seq;
703 }
704
705 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx)
706 {
707 DPLANE_CTX_VALID(ctx);
708
709 return ctx->zd_old_seq;
710 }
711
712 void dplane_ctx_set_vrf(struct zebra_dplane_ctx *ctx, vrf_id_t vrf)
713 {
714 DPLANE_CTX_VALID(ctx);
715
716 ctx->zd_vrf_id = vrf;
717 }
718
719 vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx)
720 {
721 DPLANE_CTX_VALID(ctx);
722
723 return ctx->zd_vrf_id;
724 }
725
726 bool dplane_ctx_is_from_notif(const struct zebra_dplane_ctx *ctx)
727 {
728 DPLANE_CTX_VALID(ctx);
729
730 return (ctx->zd_notif_provider != 0);
731 }
732
733 uint32_t dplane_ctx_get_notif_provider(const struct zebra_dplane_ctx *ctx)
734 {
735 DPLANE_CTX_VALID(ctx);
736
737 return ctx->zd_notif_provider;
738 }
739
740 void dplane_ctx_set_notif_provider(struct zebra_dplane_ctx *ctx,
741 uint32_t id)
742 {
743 DPLANE_CTX_VALID(ctx);
744
745 ctx->zd_notif_provider = id;
746 }
747
748 void dplane_ctx_set_type(struct zebra_dplane_ctx *ctx, int type)
749 {
750 DPLANE_CTX_VALID(ctx);
751
752 ctx->u.rinfo.zd_type = type;
753 }
754
755 int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx)
756 {
757 DPLANE_CTX_VALID(ctx);
758
759 return ctx->u.rinfo.zd_type;
760 }
761
762 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx)
763 {
764 DPLANE_CTX_VALID(ctx);
765
766 return ctx->u.rinfo.zd_old_type;
767 }
768
769 void dplane_ctx_set_afi(struct zebra_dplane_ctx *ctx, afi_t afi)
770 {
771 DPLANE_CTX_VALID(ctx);
772
773 ctx->u.rinfo.zd_afi = afi;
774 }
775
776 afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
777 {
778 DPLANE_CTX_VALID(ctx);
779
780 return ctx->u.rinfo.zd_afi;
781 }
782
783 void dplane_ctx_set_safi(struct zebra_dplane_ctx *ctx, safi_t safi)
784 {
785 DPLANE_CTX_VALID(ctx);
786
787 ctx->u.rinfo.zd_safi = safi;
788 }
789
790 safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
791 {
792 DPLANE_CTX_VALID(ctx);
793
794 return ctx->u.rinfo.zd_safi;
795 }
796
797 void dplane_ctx_set_table(struct zebra_dplane_ctx *ctx, uint32_t table)
798 {
799 DPLANE_CTX_VALID(ctx);
800
801 ctx->zd_table_id = table;
802 }
803
804 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx)
805 {
806 DPLANE_CTX_VALID(ctx);
807
808 return ctx->zd_table_id;
809 }
810
811 route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx)
812 {
813 DPLANE_CTX_VALID(ctx);
814
815 return ctx->u.rinfo.zd_tag;
816 }
817
818 void dplane_ctx_set_tag(struct zebra_dplane_ctx *ctx, route_tag_t tag)
819 {
820 DPLANE_CTX_VALID(ctx);
821
822 ctx->u.rinfo.zd_tag = tag;
823 }
824
825 route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx)
826 {
827 DPLANE_CTX_VALID(ctx);
828
829 return ctx->u.rinfo.zd_old_tag;
830 }
831
832 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx)
833 {
834 DPLANE_CTX_VALID(ctx);
835
836 return ctx->u.rinfo.zd_instance;
837 }
838
839 void dplane_ctx_set_instance(struct zebra_dplane_ctx *ctx, uint16_t instance)
840 {
841 DPLANE_CTX_VALID(ctx);
842
843 ctx->u.rinfo.zd_instance = instance;
844 }
845
846 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
847 {
848 DPLANE_CTX_VALID(ctx);
849
850 return ctx->u.rinfo.zd_old_instance;
851 }
852
853 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx)
854 {
855 DPLANE_CTX_VALID(ctx);
856
857 return ctx->u.rinfo.zd_metric;
858 }
859
860 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx)
861 {
862 DPLANE_CTX_VALID(ctx);
863
864 return ctx->u.rinfo.zd_old_metric;
865 }
866
867 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx)
868 {
869 DPLANE_CTX_VALID(ctx);
870
871 return ctx->u.rinfo.zd_mtu;
872 }
873
874 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx)
875 {
876 DPLANE_CTX_VALID(ctx);
877
878 return ctx->u.rinfo.zd_nexthop_mtu;
879 }
880
881 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx)
882 {
883 DPLANE_CTX_VALID(ctx);
884
885 return ctx->u.rinfo.zd_distance;
886 }
887
888 void dplane_ctx_set_distance(struct zebra_dplane_ctx *ctx, uint8_t distance)
889 {
890 DPLANE_CTX_VALID(ctx);
891
892 ctx->u.rinfo.zd_distance = distance;
893 }
894
895 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
896 {
897 DPLANE_CTX_VALID(ctx);
898
899 return ctx->u.rinfo.zd_old_distance;
900 }
901
902 void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh)
903 {
904 DPLANE_CTX_VALID(ctx);
905
906 if (ctx->u.rinfo.zd_ng.nexthop) {
907 nexthops_free(ctx->u.rinfo.zd_ng.nexthop);
908 ctx->u.rinfo.zd_ng.nexthop = NULL;
909 }
910 copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop), nh, NULL);
911 }
912
913 const struct nexthop_group *dplane_ctx_get_ng(
914 const struct zebra_dplane_ctx *ctx)
915 {
916 DPLANE_CTX_VALID(ctx);
917
918 return &(ctx->u.rinfo.zd_ng);
919 }
920
921 const struct nexthop_group *dplane_ctx_get_old_ng(
922 const struct zebra_dplane_ctx *ctx)
923 {
924 DPLANE_CTX_VALID(ctx);
925
926 return &(ctx->u.rinfo.zd_old_ng);
927 }
928
929 const struct zebra_dplane_info *dplane_ctx_get_ns(
930 const struct zebra_dplane_ctx *ctx)
931 {
932 DPLANE_CTX_VALID(ctx);
933
934 return &(ctx->zd_ns_info);
935 }
936
937 /* Accessors for LSP information */
938
939 mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx)
940 {
941 DPLANE_CTX_VALID(ctx);
942
943 return ctx->u.lsp.ile.in_label;
944 }
945
946 void dplane_ctx_set_in_label(struct zebra_dplane_ctx *ctx, mpls_label_t label)
947 {
948 DPLANE_CTX_VALID(ctx);
949
950 ctx->u.lsp.ile.in_label = label;
951 }
952
953 uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx *ctx)
954 {
955 DPLANE_CTX_VALID(ctx);
956
957 return ctx->u.lsp.addr_family;
958 }
959
960 void dplane_ctx_set_addr_family(struct zebra_dplane_ctx *ctx,
961 uint8_t family)
962 {
963 DPLANE_CTX_VALID(ctx);
964
965 ctx->u.lsp.addr_family = family;
966 }
967
968 uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx)
969 {
970 DPLANE_CTX_VALID(ctx);
971
972 return ctx->u.lsp.flags;
973 }
974
975 void dplane_ctx_set_lsp_flags(struct zebra_dplane_ctx *ctx,
976 uint32_t flags)
977 {
978 DPLANE_CTX_VALID(ctx);
979
980 ctx->u.lsp.flags = flags;
981 }
982
983 const zebra_nhlfe_t *dplane_ctx_get_nhlfe(const struct zebra_dplane_ctx *ctx)
984 {
985 DPLANE_CTX_VALID(ctx);
986
987 return ctx->u.lsp.nhlfe_list;
988 }
989
990 zebra_nhlfe_t *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
991 enum lsp_types_t lsp_type,
992 enum nexthop_types_t nh_type,
993 union g_addr *gate,
994 ifindex_t ifindex,
995 mpls_label_t out_label)
996 {
997 zebra_nhlfe_t *nhlfe;
998
999 DPLANE_CTX_VALID(ctx);
1000
1001 nhlfe = zebra_mpls_lsp_add_nhlfe(&(ctx->u.lsp),
1002 lsp_type, nh_type, gate,
1003 ifindex, out_label);
1004
1005 return nhlfe;
1006 }
1007
1008 const zebra_nhlfe_t *
1009 dplane_ctx_get_best_nhlfe(const struct zebra_dplane_ctx *ctx)
1010 {
1011 DPLANE_CTX_VALID(ctx);
1012
1013 return ctx->u.lsp.best_nhlfe;
1014 }
1015
1016 const zebra_nhlfe_t *
1017 dplane_ctx_set_best_nhlfe(struct zebra_dplane_ctx *ctx,
1018 zebra_nhlfe_t *nhlfe)
1019 {
1020 DPLANE_CTX_VALID(ctx);
1021
1022 ctx->u.lsp.best_nhlfe = nhlfe;
1023 return ctx->u.lsp.best_nhlfe;
1024 }
1025
1026 uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx *ctx)
1027 {
1028 DPLANE_CTX_VALID(ctx);
1029
1030 return ctx->u.lsp.num_ecmp;
1031 }
1032
1033 const char *dplane_ctx_get_pw_ifname(const struct zebra_dplane_ctx *ctx)
1034 {
1035 DPLANE_CTX_VALID(ctx);
1036
1037 return ctx->u.pw.ifname;
1038 }
1039
1040 mpls_label_t dplane_ctx_get_pw_local_label(const struct zebra_dplane_ctx *ctx)
1041 {
1042 DPLANE_CTX_VALID(ctx);
1043
1044 return ctx->u.pw.local_label;
1045 }
1046
1047 mpls_label_t dplane_ctx_get_pw_remote_label(const struct zebra_dplane_ctx *ctx)
1048 {
1049 DPLANE_CTX_VALID(ctx);
1050
1051 return ctx->u.pw.remote_label;
1052 }
1053
1054 int dplane_ctx_get_pw_type(const struct zebra_dplane_ctx *ctx)
1055 {
1056 DPLANE_CTX_VALID(ctx);
1057
1058 return ctx->u.pw.type;
1059 }
1060
1061 int dplane_ctx_get_pw_af(const struct zebra_dplane_ctx *ctx)
1062 {
1063 DPLANE_CTX_VALID(ctx);
1064
1065 return ctx->u.pw.af;
1066 }
1067
1068 uint32_t dplane_ctx_get_pw_flags(const struct zebra_dplane_ctx *ctx)
1069 {
1070 DPLANE_CTX_VALID(ctx);
1071
1072 return ctx->u.pw.flags;
1073 }
1074
1075 int dplane_ctx_get_pw_status(const struct zebra_dplane_ctx *ctx)
1076 {
1077 DPLANE_CTX_VALID(ctx);
1078
1079 return ctx->u.pw.status;
1080 }
1081
1082 const union g_addr *dplane_ctx_get_pw_dest(
1083 const struct zebra_dplane_ctx *ctx)
1084 {
1085 DPLANE_CTX_VALID(ctx);
1086
1087 return &(ctx->u.pw.dest);
1088 }
1089
1090 const union pw_protocol_fields *dplane_ctx_get_pw_proto(
1091 const struct zebra_dplane_ctx *ctx)
1092 {
1093 DPLANE_CTX_VALID(ctx);
1094
1095 return &(ctx->u.pw.fields);
1096 }
1097
1098 const struct nexthop_group *
1099 dplane_ctx_get_pw_nhg(const struct zebra_dplane_ctx *ctx)
1100 {
1101 DPLANE_CTX_VALID(ctx);
1102
1103 return &(ctx->u.pw.nhg);
1104 }
1105
1106 /* Accessors for interface information */
1107 const char *dplane_ctx_get_ifname(const struct zebra_dplane_ctx *ctx)
1108 {
1109 DPLANE_CTX_VALID(ctx);
1110
1111 return ctx->u.intf.ifname;
1112 }
1113
1114 ifindex_t dplane_ctx_get_ifindex(const struct zebra_dplane_ctx *ctx)
1115 {
1116 DPLANE_CTX_VALID(ctx);
1117
1118 return ctx->u.intf.ifindex;
1119 }
1120
1121 uint32_t dplane_ctx_get_intf_metric(const struct zebra_dplane_ctx *ctx)
1122 {
1123 DPLANE_CTX_VALID(ctx);
1124
1125 return ctx->u.intf.metric;
1126 }
1127
1128 /* Is interface addr p2p? */
1129 bool dplane_ctx_intf_is_connected(const struct zebra_dplane_ctx *ctx)
1130 {
1131 DPLANE_CTX_VALID(ctx);
1132
1133 return (ctx->u.intf.flags & DPLANE_INTF_CONNECTED);
1134 }
1135
1136 bool dplane_ctx_intf_is_secondary(const struct zebra_dplane_ctx *ctx)
1137 {
1138 DPLANE_CTX_VALID(ctx);
1139
1140 return (ctx->u.intf.flags & DPLANE_INTF_SECONDARY);
1141 }
1142
1143 bool dplane_ctx_intf_is_broadcast(const struct zebra_dplane_ctx *ctx)
1144 {
1145 DPLANE_CTX_VALID(ctx);
1146
1147 return (ctx->u.intf.flags & DPLANE_INTF_BROADCAST);
1148 }
1149
1150 const struct prefix *dplane_ctx_get_intf_addr(
1151 const struct zebra_dplane_ctx *ctx)
1152 {
1153 DPLANE_CTX_VALID(ctx);
1154
1155 return &(ctx->u.intf.prefix);
1156 }
1157
1158 bool dplane_ctx_intf_has_dest(const struct zebra_dplane_ctx *ctx)
1159 {
1160 DPLANE_CTX_VALID(ctx);
1161
1162 return (ctx->u.intf.flags & DPLANE_INTF_HAS_DEST);
1163 }
1164
1165 const struct prefix *dplane_ctx_get_intf_dest(
1166 const struct zebra_dplane_ctx *ctx)
1167 {
1168 DPLANE_CTX_VALID(ctx);
1169
1170 if (ctx->u.intf.flags & DPLANE_INTF_HAS_DEST)
1171 return &(ctx->u.intf.dest_prefix);
1172 else
1173 return NULL;
1174 }
1175
1176 bool dplane_ctx_intf_has_label(const struct zebra_dplane_ctx *ctx)
1177 {
1178 DPLANE_CTX_VALID(ctx);
1179
1180 return (ctx->u.intf.flags & DPLANE_INTF_HAS_LABEL);
1181 }
1182
1183 const char *dplane_ctx_get_intf_label(const struct zebra_dplane_ctx *ctx)
1184 {
1185 DPLANE_CTX_VALID(ctx);
1186
1187 return ctx->u.intf.label;
1188 }
1189
1190 /*
1191 * End of dplane context accessors
1192 */
1193
1194
1195 /*
1196 * Retrieve the limit on the number of pending, unprocessed updates.
1197 */
1198 uint32_t dplane_get_in_queue_limit(void)
1199 {
1200 return atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
1201 memory_order_relaxed);
1202 }
1203
1204 /*
1205 * Configure limit on the number of pending, queued updates.
1206 */
1207 void dplane_set_in_queue_limit(uint32_t limit, bool set)
1208 {
1209 /* Reset to default on 'unset' */
1210 if (!set)
1211 limit = DPLANE_DEFAULT_MAX_QUEUED;
1212
1213 atomic_store_explicit(&zdplane_info.dg_max_queued_updates, limit,
1214 memory_order_relaxed);
1215 }
1216
1217 /*
1218 * Retrieve the current queue depth of incoming, unprocessed updates
1219 */
1220 uint32_t dplane_get_in_queue_len(void)
1221 {
1222 return atomic_load_explicit(&zdplane_info.dg_routes_queued,
1223 memory_order_seq_cst);
1224 }
1225
1226 /*
1227 * Common dataplane context init with zebra namespace info.
1228 */
1229 static int dplane_ctx_ns_init(struct zebra_dplane_ctx *ctx,
1230 struct zebra_ns *zns,
1231 bool is_update)
1232 {
1233 dplane_info_from_zns(&(ctx->zd_ns_info), zns);
1234
1235 #if defined(HAVE_NETLINK)
1236 /* Increment message counter after copying to context struct - may need
1237 * two messages in some 'update' cases.
1238 */
1239 if (is_update)
1240 zns->netlink_dplane.seq += 2;
1241 else
1242 zns->netlink_dplane.seq++;
1243 #endif /* HAVE_NETLINK */
1244
1245 return AOK;
1246 }
1247
1248 /*
1249 * Initialize a context block for a route update from zebra data structs.
1250 */
1251 static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx,
1252 enum dplane_op_e op,
1253 struct route_node *rn,
1254 struct route_entry *re)
1255 {
1256 int ret = EINVAL;
1257 const struct route_table *table = NULL;
1258 const rib_table_info_t *info;
1259 const struct prefix *p, *src_p;
1260 struct zebra_ns *zns;
1261 struct zebra_vrf *zvrf;
1262 struct nexthop *nexthop;
1263
1264 if (!ctx || !rn || !re)
1265 goto done;
1266
1267 ctx->zd_op = op;
1268 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
1269
1270 ctx->u.rinfo.zd_type = re->type;
1271 ctx->u.rinfo.zd_old_type = re->type;
1272
1273 /* Prefixes: dest, and optional source */
1274 srcdest_rnode_prefixes(rn, &p, &src_p);
1275
1276 prefix_copy(&(ctx->u.rinfo.zd_dest), p);
1277
1278 if (src_p)
1279 prefix_copy(&(ctx->u.rinfo.zd_src), src_p);
1280 else
1281 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(ctx->u.rinfo.zd_src));
1282
1283 ctx->zd_table_id = re->table;
1284
1285 ctx->u.rinfo.zd_metric = re->metric;
1286 ctx->u.rinfo.zd_old_metric = re->metric;
1287 ctx->zd_vrf_id = re->vrf_id;
1288 ctx->u.rinfo.zd_mtu = re->mtu;
1289 ctx->u.rinfo.zd_nexthop_mtu = re->nexthop_mtu;
1290 ctx->u.rinfo.zd_instance = re->instance;
1291 ctx->u.rinfo.zd_tag = re->tag;
1292 ctx->u.rinfo.zd_old_tag = re->tag;
1293 ctx->u.rinfo.zd_distance = re->distance;
1294
1295 table = srcdest_rnode_table(rn);
1296 info = table->info;
1297
1298 ctx->u.rinfo.zd_afi = info->afi;
1299 ctx->u.rinfo.zd_safi = info->safi;
1300
1301 /* Copy nexthops; recursive info is included too */
1302 copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop), re->ng.nexthop, NULL);
1303
1304 /* Ensure that the dplane's nexthops flags are clear. */
1305 for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop))
1306 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
1307
1308 /* Don't need some info when capturing a system notification */
1309 if (op == DPLANE_OP_SYS_ROUTE_ADD ||
1310 op == DPLANE_OP_SYS_ROUTE_DELETE) {
1311 ret = AOK;
1312 goto done;
1313 }
1314
1315 /* Extract ns info - can't use pointers to 'core' structs */
1316 zvrf = vrf_info_lookup(re->vrf_id);
1317 zns = zvrf->zns;
1318 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_ROUTE_UPDATE));
1319
1320 /* Trying out the sequence number idea, so we can try to detect
1321 * when a result is stale.
1322 */
1323 re->dplane_sequence = zebra_router_get_next_sequence();
1324 ctx->zd_seq = re->dplane_sequence;
1325
1326 ret = AOK;
1327
1328 done:
1329 return ret;
1330 }
1331
1332 /*
1333 * Capture information for an LSP update in a dplane context.
1334 */
1335 static int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx,
1336 enum dplane_op_e op,
1337 zebra_lsp_t *lsp)
1338 {
1339 int ret = AOK;
1340 zebra_nhlfe_t *nhlfe, *new_nhlfe;
1341
1342 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1343 zlog_debug("init dplane ctx %s: in-label %u ecmp# %d",
1344 dplane_op2str(op), lsp->ile.in_label,
1345 lsp->num_ecmp);
1346
1347 ctx->zd_op = op;
1348 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
1349
1350 /* Capture namespace info */
1351 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
1352 (op == DPLANE_OP_LSP_UPDATE));
1353
1354 memset(&ctx->u.lsp, 0, sizeof(ctx->u.lsp));
1355
1356 ctx->u.lsp.ile = lsp->ile;
1357 ctx->u.lsp.addr_family = lsp->addr_family;
1358 ctx->u.lsp.num_ecmp = lsp->num_ecmp;
1359 ctx->u.lsp.flags = lsp->flags;
1360
1361 /* Copy source LSP's nhlfes, and capture 'best' nhlfe */
1362 for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
1363 /* Not sure if this is meaningful... */
1364 if (nhlfe->nexthop == NULL)
1365 continue;
1366
1367 new_nhlfe =
1368 zebra_mpls_lsp_add_nhlfe(
1369 &(ctx->u.lsp),
1370 nhlfe->type,
1371 nhlfe->nexthop->type,
1372 &(nhlfe->nexthop->gate),
1373 nhlfe->nexthop->ifindex,
1374 nhlfe->nexthop->nh_label->label[0]);
1375
1376 if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
1377 ret = ENOMEM;
1378 break;
1379 }
1380
1381 /* Need to copy flags too */
1382 new_nhlfe->flags = nhlfe->flags;
1383 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
1384
1385 if (nhlfe == lsp->best_nhlfe)
1386 ctx->u.lsp.best_nhlfe = new_nhlfe;
1387 }
1388
1389 /* On error the ctx will be cleaned-up, so we don't need to
1390 * deal with any allocated nhlfe or nexthop structs here.
1391 */
1392
1393 return ret;
1394 }
1395
1396 /*
1397 * Capture information for an LSP update in a dplane context.
1398 */
1399 static int dplane_ctx_pw_init(struct zebra_dplane_ctx *ctx,
1400 enum dplane_op_e op,
1401 struct zebra_pw *pw)
1402 {
1403 struct prefix p;
1404 afi_t afi;
1405 struct route_table *table;
1406 struct route_node *rn;
1407 struct route_entry *re;
1408
1409 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1410 zlog_debug("init dplane ctx %s: pw '%s', loc %u, rem %u",
1411 dplane_op2str(op), pw->ifname, pw->local_label,
1412 pw->remote_label);
1413
1414 ctx->zd_op = op;
1415 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
1416
1417 /* Capture namespace info: no netlink support as of 12/18,
1418 * but just in case...
1419 */
1420 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
1421
1422 memset(&ctx->u.pw, 0, sizeof(ctx->u.pw));
1423
1424 /* This name appears to be c-string, so we use string copy. */
1425 strlcpy(ctx->u.pw.ifname, pw->ifname, sizeof(ctx->u.pw.ifname));
1426
1427 ctx->zd_vrf_id = pw->vrf_id;
1428 ctx->u.pw.ifindex = pw->ifindex;
1429 ctx->u.pw.type = pw->type;
1430 ctx->u.pw.af = pw->af;
1431 ctx->u.pw.local_label = pw->local_label;
1432 ctx->u.pw.remote_label = pw->remote_label;
1433 ctx->u.pw.flags = pw->flags;
1434
1435 ctx->u.pw.dest = pw->nexthop;
1436
1437 ctx->u.pw.fields = pw->data;
1438
1439 /* Capture nexthop info for the pw destination. We need to look
1440 * up and use zebra datastructs, but we're running in the zebra
1441 * pthread here so that should be ok.
1442 */
1443 memcpy(&p.u, &pw->nexthop, sizeof(pw->nexthop));
1444 p.family = pw->af;
1445 p.prefixlen = ((pw->af == AF_INET) ?
1446 IPV4_MAX_PREFIXLEN : IPV6_MAX_PREFIXLEN);
1447
1448 afi = (pw->af == AF_INET) ? AFI_IP : AFI_IP6;
1449 table = zebra_vrf_table(afi, SAFI_UNICAST, pw->vrf_id);
1450 if (table) {
1451 rn = route_node_match(table, &p);
1452 if (rn) {
1453 RNODE_FOREACH_RE(rn, re) {
1454 if (CHECK_FLAG(re->flags, ZEBRA_FLAG_SELECTED))
1455 break;
1456 }
1457
1458 if (re)
1459 copy_nexthops(&(ctx->u.pw.nhg.nexthop),
1460 re->ng.nexthop, NULL);
1461
1462 route_unlock_node(rn);
1463 }
1464 }
1465
1466 return AOK;
1467 }
1468
1469 /*
1470 * Enqueue a new route update,
1471 * and ensure an event is active for the dataplane pthread.
1472 */
1473 static int dplane_route_enqueue(struct zebra_dplane_ctx *ctx)
1474 {
1475 int ret = EINVAL;
1476 uint32_t high, curr;
1477
1478 /* Enqueue for processing by the dataplane pthread */
1479 DPLANE_LOCK();
1480 {
1481 TAILQ_INSERT_TAIL(&zdplane_info.dg_route_ctx_q, ctx,
1482 zd_q_entries);
1483 }
1484 DPLANE_UNLOCK();
1485
1486 curr = atomic_add_fetch_explicit(
1487 #ifdef __clang__
1488 /* TODO -- issue with the clang atomic/intrinsics currently;
1489 * casting away the 'Atomic'-ness of the variable works.
1490 */
1491 (uint32_t *)&(zdplane_info.dg_routes_queued),
1492 #else
1493 &(zdplane_info.dg_routes_queued),
1494 #endif
1495 1, memory_order_seq_cst);
1496
1497 /* Maybe update high-water counter also */
1498 high = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
1499 memory_order_seq_cst);
1500 while (high < curr) {
1501 if (atomic_compare_exchange_weak_explicit(
1502 &zdplane_info.dg_routes_queued_max,
1503 &high, curr,
1504 memory_order_seq_cst,
1505 memory_order_seq_cst))
1506 break;
1507 }
1508
1509 /* Ensure that an event for the dataplane thread is active */
1510 ret = dplane_provider_work_ready();
1511
1512 return ret;
1513 }
1514
1515 /*
1516 * Utility that prepares a route update and enqueues it for processing
1517 */
1518 static enum zebra_dplane_result
1519 dplane_route_update_internal(struct route_node *rn,
1520 struct route_entry *re,
1521 struct route_entry *old_re,
1522 enum dplane_op_e op)
1523 {
1524 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1525 int ret = EINVAL;
1526 struct zebra_dplane_ctx *ctx = NULL;
1527
1528 /* Obtain context block */
1529 ctx = dplane_ctx_alloc();
1530
1531 /* Init context with info from zebra data structs */
1532 ret = dplane_ctx_route_init(ctx, op, rn, re);
1533 if (ret == AOK) {
1534 /* Capture some extra info for update case
1535 * where there's a different 'old' route.
1536 */
1537 if ((op == DPLANE_OP_ROUTE_UPDATE) &&
1538 old_re && (old_re != re)) {
1539 ctx->zd_is_update = true;
1540
1541 old_re->dplane_sequence =
1542 zebra_router_get_next_sequence();
1543 ctx->zd_old_seq = old_re->dplane_sequence;
1544
1545 ctx->u.rinfo.zd_old_tag = old_re->tag;
1546 ctx->u.rinfo.zd_old_type = old_re->type;
1547 ctx->u.rinfo.zd_old_instance = old_re->instance;
1548 ctx->u.rinfo.zd_old_distance = old_re->distance;
1549 ctx->u.rinfo.zd_old_metric = old_re->metric;
1550
1551 #ifndef HAVE_NETLINK
1552 /* For bsd, capture previous re's nexthops too, sigh.
1553 * We'll need these to do per-nexthop deletes.
1554 */
1555 copy_nexthops(&(ctx->u.rinfo.zd_old_ng.nexthop),
1556 old_re->ng.nexthop, NULL);
1557 #endif /* !HAVE_NETLINK */
1558 }
1559
1560 /* Enqueue context for processing */
1561 ret = dplane_route_enqueue(ctx);
1562 }
1563
1564 /* Update counter */
1565 atomic_fetch_add_explicit(&zdplane_info.dg_routes_in, 1,
1566 memory_order_relaxed);
1567
1568 if (ret == AOK)
1569 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1570 else {
1571 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors, 1,
1572 memory_order_relaxed);
1573 if (ctx)
1574 dplane_ctx_free(&ctx);
1575 }
1576
1577 return result;
1578 }
1579
1580 /*
1581 * Enqueue a route 'add' for the dataplane.
1582 */
1583 enum zebra_dplane_result dplane_route_add(struct route_node *rn,
1584 struct route_entry *re)
1585 {
1586 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1587
1588 if (rn == NULL || re == NULL)
1589 goto done;
1590
1591 ret = dplane_route_update_internal(rn, re, NULL,
1592 DPLANE_OP_ROUTE_INSTALL);
1593
1594 done:
1595 return ret;
1596 }
1597
1598 /*
1599 * Enqueue a route update for the dataplane.
1600 */
1601 enum zebra_dplane_result dplane_route_update(struct route_node *rn,
1602 struct route_entry *re,
1603 struct route_entry *old_re)
1604 {
1605 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1606
1607 if (rn == NULL || re == NULL)
1608 goto done;
1609
1610 ret = dplane_route_update_internal(rn, re, old_re,
1611 DPLANE_OP_ROUTE_UPDATE);
1612 done:
1613 return ret;
1614 }
1615
1616 /*
1617 * Enqueue a route removal for the dataplane.
1618 */
1619 enum zebra_dplane_result dplane_route_delete(struct route_node *rn,
1620 struct route_entry *re)
1621 {
1622 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1623
1624 if (rn == NULL || re == NULL)
1625 goto done;
1626
1627 ret = dplane_route_update_internal(rn, re, NULL,
1628 DPLANE_OP_ROUTE_DELETE);
1629
1630 done:
1631 return ret;
1632 }
1633
1634 /*
1635 * Notify the dplane when system/connected routes change.
1636 */
1637 enum zebra_dplane_result dplane_sys_route_add(struct route_node *rn,
1638 struct route_entry *re)
1639 {
1640 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1641
1642 /* Ignore this event unless a provider plugin has requested it. */
1643 if (!zdplane_info.dg_sys_route_notifs) {
1644 ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
1645 goto done;
1646 }
1647
1648 if (rn == NULL || re == NULL)
1649 goto done;
1650
1651 ret = dplane_route_update_internal(rn, re, NULL,
1652 DPLANE_OP_SYS_ROUTE_ADD);
1653
1654 done:
1655 return ret;
1656 }
1657
1658 /*
1659 * Notify the dplane when system/connected routes are deleted.
1660 */
1661 enum zebra_dplane_result dplane_sys_route_del(struct route_node *rn,
1662 struct route_entry *re)
1663 {
1664 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1665
1666 /* Ignore this event unless a provider plugin has requested it. */
1667 if (!zdplane_info.dg_sys_route_notifs) {
1668 ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
1669 goto done;
1670 }
1671
1672 if (rn == NULL || re == NULL)
1673 goto done;
1674
1675 ret = dplane_route_update_internal(rn, re, NULL,
1676 DPLANE_OP_SYS_ROUTE_DELETE);
1677
1678 done:
1679 return ret;
1680 }
1681
1682 /*
1683 * Update from an async notification, to bring other fibs up-to-date.
1684 */
1685 enum zebra_dplane_result
1686 dplane_route_notif_update(struct route_node *rn,
1687 struct route_entry *re,
1688 enum dplane_op_e op,
1689 struct zebra_dplane_ctx *ctx)
1690 {
1691 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1692 struct zebra_dplane_ctx *new_ctx = NULL;
1693 struct nexthop *nexthop;
1694
1695 if (rn == NULL || re == NULL)
1696 goto done;
1697
1698 new_ctx = dplane_ctx_alloc();
1699 if (new_ctx == NULL)
1700 goto done;
1701
1702 /* Init context with info from zebra data structs */
1703 dplane_ctx_route_init(new_ctx, op, rn, re);
1704
1705 /* For add/update, need to adjust the nexthops so that we match
1706 * the notification state, which may not be the route-entry/RIB
1707 * state.
1708 */
1709 if (op == DPLANE_OP_ROUTE_UPDATE ||
1710 op == DPLANE_OP_ROUTE_INSTALL) {
1711
1712 nexthops_free(new_ctx->u.rinfo.zd_ng.nexthop);
1713 new_ctx->u.rinfo.zd_ng.nexthop = NULL;
1714
1715 copy_nexthops(&(new_ctx->u.rinfo.zd_ng.nexthop),
1716 (rib_active_nhg(re))->nexthop, NULL);
1717
1718 for (ALL_NEXTHOPS(new_ctx->u.rinfo.zd_ng, nexthop))
1719 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
1720
1721 }
1722
1723 /* Capture info about the source of the notification, in 'ctx' */
1724 dplane_ctx_set_notif_provider(new_ctx,
1725 dplane_ctx_get_notif_provider(ctx));
1726
1727 dplane_route_enqueue(new_ctx);
1728
1729 ret = ZEBRA_DPLANE_REQUEST_QUEUED;
1730
1731 done:
1732 return ret;
1733 }
1734
1735 /*
1736 * Enqueue LSP add for the dataplane.
1737 */
1738 enum zebra_dplane_result dplane_lsp_add(zebra_lsp_t *lsp)
1739 {
1740 enum zebra_dplane_result ret =
1741 lsp_update_internal(lsp, DPLANE_OP_LSP_INSTALL);
1742
1743 return ret;
1744 }
1745
1746 /*
1747 * Enqueue LSP update for the dataplane.
1748 */
1749 enum zebra_dplane_result dplane_lsp_update(zebra_lsp_t *lsp)
1750 {
1751 enum zebra_dplane_result ret =
1752 lsp_update_internal(lsp, DPLANE_OP_LSP_UPDATE);
1753
1754 return ret;
1755 }
1756
1757 /*
1758 * Enqueue LSP delete for the dataplane.
1759 */
1760 enum zebra_dplane_result dplane_lsp_delete(zebra_lsp_t *lsp)
1761 {
1762 enum zebra_dplane_result ret =
1763 lsp_update_internal(lsp, DPLANE_OP_LSP_DELETE);
1764
1765 return ret;
1766 }
1767
1768 /* Update or un-install resulting from an async notification */
1769 enum zebra_dplane_result
1770 dplane_lsp_notif_update(zebra_lsp_t *lsp,
1771 enum dplane_op_e op,
1772 struct zebra_dplane_ctx *notif_ctx)
1773 {
1774 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1775 int ret = EINVAL;
1776 struct zebra_dplane_ctx *ctx = NULL;
1777
1778 /* Obtain context block */
1779 ctx = dplane_ctx_alloc();
1780 if (ctx == NULL) {
1781 ret = ENOMEM;
1782 goto done;
1783 }
1784
1785 ret = dplane_ctx_lsp_init(ctx, op, lsp);
1786 if (ret != AOK)
1787 goto done;
1788
1789 /* Capture info about the source of the notification */
1790 dplane_ctx_set_notif_provider(
1791 ctx,
1792 dplane_ctx_get_notif_provider(notif_ctx));
1793
1794 ret = dplane_route_enqueue(ctx);
1795
1796 done:
1797 /* Update counter */
1798 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
1799 memory_order_relaxed);
1800
1801 if (ret == AOK)
1802 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1803 else {
1804 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
1805 memory_order_relaxed);
1806 if (ctx)
1807 dplane_ctx_free(&ctx);
1808 }
1809 return result;
1810 }
1811
1812 /*
1813 * Enqueue pseudowire install for the dataplane.
1814 */
1815 enum zebra_dplane_result dplane_pw_install(struct zebra_pw *pw)
1816 {
1817 return pw_update_internal(pw, DPLANE_OP_PW_INSTALL);
1818 }
1819
1820 /*
1821 * Enqueue pseudowire un-install for the dataplane.
1822 */
1823 enum zebra_dplane_result dplane_pw_uninstall(struct zebra_pw *pw)
1824 {
1825 return pw_update_internal(pw, DPLANE_OP_PW_UNINSTALL);
1826 }
1827
1828 /*
1829 * Common internal LSP update utility
1830 */
1831 static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
1832 enum dplane_op_e op)
1833 {
1834 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1835 int ret = EINVAL;
1836 struct zebra_dplane_ctx *ctx = NULL;
1837
1838 /* Obtain context block */
1839 ctx = dplane_ctx_alloc();
1840
1841 ret = dplane_ctx_lsp_init(ctx, op, lsp);
1842 if (ret != AOK)
1843 goto done;
1844
1845 ret = dplane_route_enqueue(ctx);
1846
1847 done:
1848 /* Update counter */
1849 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
1850 memory_order_relaxed);
1851
1852 if (ret == AOK)
1853 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1854 else {
1855 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
1856 memory_order_relaxed);
1857 dplane_ctx_free(&ctx);
1858 }
1859
1860 return result;
1861 }
1862
1863 /*
1864 * Internal, common handler for pseudowire updates.
1865 */
1866 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
1867 enum dplane_op_e op)
1868 {
1869 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1870 int ret;
1871 struct zebra_dplane_ctx *ctx = NULL;
1872
1873 ctx = dplane_ctx_alloc();
1874
1875 ret = dplane_ctx_pw_init(ctx, op, pw);
1876 if (ret != AOK)
1877 goto done;
1878
1879 ret = dplane_route_enqueue(ctx);
1880
1881 done:
1882 /* Update counter */
1883 atomic_fetch_add_explicit(&zdplane_info.dg_pws_in, 1,
1884 memory_order_relaxed);
1885
1886 if (ret == AOK)
1887 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1888 else {
1889 atomic_fetch_add_explicit(&zdplane_info.dg_pw_errors, 1,
1890 memory_order_relaxed);
1891 dplane_ctx_free(&ctx);
1892 }
1893
1894 return result;
1895 }
1896
1897 /*
1898 * Enqueue interface address add for the dataplane.
1899 */
1900 enum zebra_dplane_result dplane_intf_addr_set(const struct interface *ifp,
1901 const struct connected *ifc)
1902 {
1903 #if !defined(HAVE_NETLINK) && defined(HAVE_STRUCT_IFALIASREQ)
1904 /* Extra checks for this OS path. */
1905
1906 /* Don't configure PtP addresses on broadcast ifs or reverse */
1907 if (!(ifp->flags & IFF_POINTOPOINT) != !CONNECTED_PEER(ifc)) {
1908 if (IS_ZEBRA_DEBUG_KERNEL || IS_ZEBRA_DEBUG_DPLANE)
1909 zlog_debug("Failed to set intf addr: mismatch p2p and connected");
1910
1911 return ZEBRA_DPLANE_REQUEST_FAILURE;
1912 }
1913
1914 /* Ensure that no existing installed v4 route conflicts with
1915 * the new interface prefix. This check must be done in the
1916 * zebra pthread context, and any route delete (if needed)
1917 * is enqueued before the interface address programming attempt.
1918 */
1919 if (ifc->address->family == AF_INET) {
1920 struct prefix_ipv4 *p;
1921
1922 p = (struct prefix_ipv4 *)ifc->address;
1923 rib_lookup_and_pushup(p, ifp->vrf->vrf_id);
1924 }
1925 #endif
1926
1927 return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_INSTALL);
1928 }
1929
1930 /*
1931 * Enqueue interface address remove/uninstall for the dataplane.
1932 */
1933 enum zebra_dplane_result dplane_intf_addr_unset(const struct interface *ifp,
1934 const struct connected *ifc)
1935 {
1936 return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_UNINSTALL);
1937 }
1938
1939 static enum zebra_dplane_result intf_addr_update_internal(
1940 const struct interface *ifp, const struct connected *ifc,
1941 enum dplane_op_e op)
1942 {
1943 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1944 int ret = EINVAL;
1945 struct zebra_dplane_ctx *ctx = NULL;
1946 struct zebra_ns *zns;
1947
1948 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
1949 char addr_str[PREFIX_STRLEN];
1950
1951 prefix2str(ifc->address, addr_str, sizeof(addr_str));
1952
1953 zlog_debug("init intf ctx %s: idx %d, addr %u:%s",
1954 dplane_op2str(op), ifp->ifindex, ifp->vrf->vrf_id,
1955 addr_str);
1956 }
1957
1958 ctx = dplane_ctx_alloc();
1959
1960 ctx->zd_op = op;
1961 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
1962 ctx->zd_vrf_id = ifp->vrf->vrf_id;
1963
1964 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
1965 dplane_ctx_ns_init(ctx, zns, false);
1966
1967 /* Init the interface-addr-specific area */
1968 memset(&ctx->u.intf, 0, sizeof(ctx->u.intf));
1969
1970 strlcpy(ctx->u.intf.ifname, ifp->name, sizeof(ctx->u.intf.ifname));
1971 ctx->u.intf.ifindex = ifp->ifindex;
1972 ctx->u.intf.prefix = *(ifc->address);
1973
1974 if (if_is_broadcast(ifp))
1975 ctx->u.intf.flags |= DPLANE_INTF_BROADCAST;
1976
1977 if (CONNECTED_PEER(ifc)) {
1978 ctx->u.intf.dest_prefix = *(ifc->destination);
1979 ctx->u.intf.flags |=
1980 (DPLANE_INTF_CONNECTED | DPLANE_INTF_HAS_DEST);
1981 } else if (ifc->destination) {
1982 ctx->u.intf.dest_prefix = *(ifc->destination);
1983 ctx->u.intf.flags |= DPLANE_INTF_HAS_DEST;
1984 }
1985
1986 if (CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY))
1987 ctx->u.intf.flags |= DPLANE_INTF_SECONDARY;
1988
1989 if (ifc->label) {
1990 size_t len;
1991
1992 ctx->u.intf.flags |= DPLANE_INTF_HAS_LABEL;
1993
1994 /* Use embedded buffer if it's adequate; else allocate. */
1995 len = strlen(ifc->label);
1996
1997 if (len < sizeof(ctx->u.intf.label_buf)) {
1998 strlcpy(ctx->u.intf.label_buf, ifc->label,
1999 sizeof(ctx->u.intf.label_buf));
2000 ctx->u.intf.label = ctx->u.intf.label_buf;
2001 } else {
2002 ctx->u.intf.label = strdup(ifc->label);
2003 }
2004 }
2005
2006 ret = dplane_route_enqueue(ctx);
2007
2008 /* Increment counter */
2009 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addrs_in, 1,
2010 memory_order_relaxed);
2011
2012 if (ret == AOK)
2013 result = ZEBRA_DPLANE_REQUEST_QUEUED;
2014 else {
2015 /* Error counter */
2016 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addr_errors,
2017 1, memory_order_relaxed);
2018 dplane_ctx_free(&ctx);
2019 }
2020
2021 return result;
2022 }
2023
2024 /*
2025 * Handler for 'show dplane'
2026 */
2027 int dplane_show_helper(struct vty *vty, bool detailed)
2028 {
2029 uint64_t queued, queue_max, limit, errs, incoming, yields,
2030 other_errs;
2031
2032 /* Using atomics because counters are being changed in different
2033 * pthread contexts.
2034 */
2035 incoming = atomic_load_explicit(&zdplane_info.dg_routes_in,
2036 memory_order_relaxed);
2037 limit = atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
2038 memory_order_relaxed);
2039 queued = atomic_load_explicit(&zdplane_info.dg_routes_queued,
2040 memory_order_relaxed);
2041 queue_max = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
2042 memory_order_relaxed);
2043 errs = atomic_load_explicit(&zdplane_info.dg_route_errors,
2044 memory_order_relaxed);
2045 yields = atomic_load_explicit(&zdplane_info.dg_update_yields,
2046 memory_order_relaxed);
2047 other_errs = atomic_load_explicit(&zdplane_info.dg_other_errors,
2048 memory_order_relaxed);
2049
2050 vty_out(vty, "Zebra dataplane:\nRoute updates: %"PRIu64"\n",
2051 incoming);
2052 vty_out(vty, "Route update errors: %"PRIu64"\n", errs);
2053 vty_out(vty, "Other errors : %"PRIu64"\n", other_errs);
2054 vty_out(vty, "Route update queue limit: %"PRIu64"\n", limit);
2055 vty_out(vty, "Route update queue depth: %"PRIu64"\n", queued);
2056 vty_out(vty, "Route update queue max: %"PRIu64"\n", queue_max);
2057 vty_out(vty, "Dplane update yields: %"PRIu64"\n", yields);
2058
2059 return CMD_SUCCESS;
2060 }
2061
2062 /*
2063 * Handler for 'show dplane providers'
2064 */
2065 int dplane_show_provs_helper(struct vty *vty, bool detailed)
2066 {
2067 struct zebra_dplane_provider *prov;
2068 uint64_t in, in_max, out, out_max;
2069
2070 vty_out(vty, "Zebra dataplane providers:\n");
2071
2072 DPLANE_LOCK();
2073 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
2074 DPLANE_UNLOCK();
2075
2076 /* Show counters, useful info from each registered provider */
2077 while (prov) {
2078
2079 in = atomic_load_explicit(&prov->dp_in_counter,
2080 memory_order_relaxed);
2081 in_max = atomic_load_explicit(&prov->dp_in_max,
2082 memory_order_relaxed);
2083 out = atomic_load_explicit(&prov->dp_out_counter,
2084 memory_order_relaxed);
2085 out_max = atomic_load_explicit(&prov->dp_out_max,
2086 memory_order_relaxed);
2087
2088 vty_out(vty, "%s (%u): in: %"PRIu64", q_max: %"PRIu64", "
2089 "out: %"PRIu64", q_max: %"PRIu64"\n",
2090 prov->dp_name, prov->dp_id, in, in_max, out, out_max);
2091
2092 DPLANE_LOCK();
2093 prov = TAILQ_NEXT(prov, dp_prov_link);
2094 DPLANE_UNLOCK();
2095 }
2096
2097 return CMD_SUCCESS;
2098 }
2099
2100 /*
2101 * Provider registration
2102 */
2103 int dplane_provider_register(const char *name,
2104 enum dplane_provider_prio prio,
2105 int flags,
2106 int (*start_fp)(struct zebra_dplane_provider *),
2107 int (*fp)(struct zebra_dplane_provider *),
2108 int (*fini_fp)(struct zebra_dplane_provider *,
2109 bool early),
2110 void *data,
2111 struct zebra_dplane_provider **prov_p)
2112 {
2113 int ret = 0;
2114 struct zebra_dplane_provider *p = NULL, *last;
2115
2116 /* Validate */
2117 if (fp == NULL) {
2118 ret = EINVAL;
2119 goto done;
2120 }
2121
2122 if (prio <= DPLANE_PRIO_NONE ||
2123 prio > DPLANE_PRIO_LAST) {
2124 ret = EINVAL;
2125 goto done;
2126 }
2127
2128 /* Allocate and init new provider struct */
2129 p = XCALLOC(MTYPE_DP_PROV, sizeof(struct zebra_dplane_provider));
2130
2131 pthread_mutex_init(&(p->dp_mutex), NULL);
2132 TAILQ_INIT(&(p->dp_ctx_in_q));
2133 TAILQ_INIT(&(p->dp_ctx_out_q));
2134
2135 p->dp_priority = prio;
2136 p->dp_fp = fp;
2137 p->dp_start = start_fp;
2138 p->dp_fini = fini_fp;
2139 p->dp_data = data;
2140
2141 /* Lock - the dplane pthread may be running */
2142 DPLANE_LOCK();
2143
2144 p->dp_id = ++zdplane_info.dg_provider_id;
2145
2146 if (name)
2147 strlcpy(p->dp_name, name, DPLANE_PROVIDER_NAMELEN);
2148 else
2149 snprintf(p->dp_name, DPLANE_PROVIDER_NAMELEN,
2150 "provider-%u", p->dp_id);
2151
2152 /* Insert into list ordered by priority */
2153 TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_prov_link) {
2154 if (last->dp_priority > p->dp_priority)
2155 break;
2156 }
2157
2158 if (last)
2159 TAILQ_INSERT_BEFORE(last, p, dp_prov_link);
2160 else
2161 TAILQ_INSERT_TAIL(&zdplane_info.dg_providers_q, p,
2162 dp_prov_link);
2163
2164 /* And unlock */
2165 DPLANE_UNLOCK();
2166
2167 if (IS_ZEBRA_DEBUG_DPLANE)
2168 zlog_debug("dplane: registered new provider '%s' (%u), prio %d",
2169 p->dp_name, p->dp_id, p->dp_priority);
2170
2171 done:
2172 if (prov_p)
2173 *prov_p = p;
2174
2175 return ret;
2176 }
2177
2178 /* Accessors for provider attributes */
2179 const char *dplane_provider_get_name(const struct zebra_dplane_provider *prov)
2180 {
2181 return prov->dp_name;
2182 }
2183
2184 uint32_t dplane_provider_get_id(const struct zebra_dplane_provider *prov)
2185 {
2186 return prov->dp_id;
2187 }
2188
2189 void *dplane_provider_get_data(const struct zebra_dplane_provider *prov)
2190 {
2191 return prov->dp_data;
2192 }
2193
2194 int dplane_provider_get_work_limit(const struct zebra_dplane_provider *prov)
2195 {
2196 return zdplane_info.dg_updates_per_cycle;
2197 }
2198
2199 /* Lock/unlock a provider's mutex - iff the provider was registered with
2200 * the THREADED flag.
2201 */
2202 void dplane_provider_lock(struct zebra_dplane_provider *prov)
2203 {
2204 if (dplane_provider_is_threaded(prov))
2205 DPLANE_PROV_LOCK(prov);
2206 }
2207
2208 void dplane_provider_unlock(struct zebra_dplane_provider *prov)
2209 {
2210 if (dplane_provider_is_threaded(prov))
2211 DPLANE_PROV_UNLOCK(prov);
2212 }
2213
2214 /*
2215 * Dequeue and maintain associated counter
2216 */
2217 struct zebra_dplane_ctx *dplane_provider_dequeue_in_ctx(
2218 struct zebra_dplane_provider *prov)
2219 {
2220 struct zebra_dplane_ctx *ctx = NULL;
2221
2222 dplane_provider_lock(prov);
2223
2224 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
2225 if (ctx) {
2226 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
2227
2228 atomic_fetch_sub_explicit(&prov->dp_in_queued, 1,
2229 memory_order_relaxed);
2230 }
2231
2232 dplane_provider_unlock(prov);
2233
2234 return ctx;
2235 }
2236
2237 /*
2238 * Dequeue work to a list, return count
2239 */
2240 int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov,
2241 struct dplane_ctx_q *listp)
2242 {
2243 int limit, ret;
2244 struct zebra_dplane_ctx *ctx;
2245
2246 limit = zdplane_info.dg_updates_per_cycle;
2247
2248 dplane_provider_lock(prov);
2249
2250 for (ret = 0; ret < limit; ret++) {
2251 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
2252 if (ctx) {
2253 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
2254
2255 TAILQ_INSERT_TAIL(listp, ctx, zd_q_entries);
2256 } else {
2257 break;
2258 }
2259 }
2260
2261 if (ret > 0)
2262 atomic_fetch_sub_explicit(&prov->dp_in_queued, ret,
2263 memory_order_relaxed);
2264
2265 dplane_provider_unlock(prov);
2266
2267 return ret;
2268 }
2269
2270 /*
2271 * Enqueue and maintain associated counter
2272 */
2273 void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
2274 struct zebra_dplane_ctx *ctx)
2275 {
2276 dplane_provider_lock(prov);
2277
2278 TAILQ_INSERT_TAIL(&(prov->dp_ctx_out_q), ctx,
2279 zd_q_entries);
2280
2281 dplane_provider_unlock(prov);
2282
2283 atomic_fetch_add_explicit(&(prov->dp_out_counter), 1,
2284 memory_order_relaxed);
2285 }
2286
2287 /*
2288 * Accessor for provider object
2289 */
2290 bool dplane_provider_is_threaded(const struct zebra_dplane_provider *prov)
2291 {
2292 return (prov->dp_flags & DPLANE_PROV_FLAG_THREADED);
2293 }
2294
2295 /*
2296 * Internal helper that copies information from a zebra ns object; this is
2297 * called in the zebra main pthread context as part of dplane ctx init.
2298 */
2299 static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
2300 struct zebra_ns *zns)
2301 {
2302 ns_info->ns_id = zns->ns_id;
2303
2304 #if defined(HAVE_NETLINK)
2305 ns_info->is_cmd = true;
2306 ns_info->nls = zns->netlink_dplane;
2307 #endif /* NETLINK */
2308 }
2309
2310 /*
2311 * Provider api to signal that work/events are available
2312 * for the dataplane pthread.
2313 */
2314 int dplane_provider_work_ready(void)
2315 {
2316 /* Note that during zebra startup, we may be offered work before
2317 * the dataplane pthread (and thread-master) are ready. We want to
2318 * enqueue the work, but the event-scheduling machinery may not be
2319 * available.
2320 */
2321 if (zdplane_info.dg_run) {
2322 thread_add_event(zdplane_info.dg_master,
2323 dplane_thread_loop, NULL, 0,
2324 &zdplane_info.dg_t_update);
2325 }
2326
2327 return AOK;
2328 }
2329
2330 /*
2331 * Enqueue a context directly to zebra main.
2332 */
2333 void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx)
2334 {
2335 struct dplane_ctx_q temp_list;
2336
2337 /* Zebra's api takes a list, so we need to use a temporary list */
2338 TAILQ_INIT(&temp_list);
2339
2340 TAILQ_INSERT_TAIL(&temp_list, ctx, zd_q_entries);
2341 (zdplane_info.dg_results_cb)(&temp_list);
2342 }
2343
2344 /*
2345 * Kernel dataplane provider
2346 */
2347
2348 /*
2349 * Handler for kernel LSP updates
2350 */
2351 static enum zebra_dplane_result
2352 kernel_dplane_lsp_update(struct zebra_dplane_ctx *ctx)
2353 {
2354 enum zebra_dplane_result res;
2355
2356 /* Call into the synchronous kernel-facing code here */
2357 res = kernel_lsp_update(ctx);
2358
2359 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
2360 atomic_fetch_add_explicit(
2361 &zdplane_info.dg_lsp_errors, 1,
2362 memory_order_relaxed);
2363
2364 return res;
2365 }
2366
2367 /*
2368 * Handler for kernel pseudowire updates
2369 */
2370 static enum zebra_dplane_result
2371 kernel_dplane_pw_update(struct zebra_dplane_ctx *ctx)
2372 {
2373 enum zebra_dplane_result res;
2374
2375 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2376 zlog_debug("Dplane pw %s: op %s af %d loc: %u rem: %u",
2377 dplane_ctx_get_pw_ifname(ctx),
2378 dplane_op2str(ctx->zd_op),
2379 dplane_ctx_get_pw_af(ctx),
2380 dplane_ctx_get_pw_local_label(ctx),
2381 dplane_ctx_get_pw_remote_label(ctx));
2382
2383 res = kernel_pw_update(ctx);
2384
2385 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
2386 atomic_fetch_add_explicit(
2387 &zdplane_info.dg_pw_errors, 1,
2388 memory_order_relaxed);
2389
2390 return res;
2391 }
2392
2393 /*
2394 * Handler for kernel route updates
2395 */
2396 static enum zebra_dplane_result
2397 kernel_dplane_route_update(struct zebra_dplane_ctx *ctx)
2398 {
2399 enum zebra_dplane_result res;
2400
2401 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2402 char dest_str[PREFIX_STRLEN];
2403
2404 prefix2str(dplane_ctx_get_dest(ctx),
2405 dest_str, sizeof(dest_str));
2406
2407 zlog_debug("%u:%s Dplane route update ctx %p op %s",
2408 dplane_ctx_get_vrf(ctx), dest_str,
2409 ctx, dplane_op2str(dplane_ctx_get_op(ctx)));
2410 }
2411
2412 /* Call into the synchronous kernel-facing code here */
2413 res = kernel_route_update(ctx);
2414
2415 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
2416 atomic_fetch_add_explicit(
2417 &zdplane_info.dg_route_errors, 1,
2418 memory_order_relaxed);
2419
2420 return res;
2421 }
2422
2423 /*
2424 * Handler for kernel-facing interface address updates
2425 */
2426 static enum zebra_dplane_result
2427 kernel_dplane_address_update(struct zebra_dplane_ctx *ctx)
2428 {
2429 enum zebra_dplane_result res;
2430
2431
2432 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2433 char dest_str[PREFIX_STRLEN];
2434
2435 prefix2str(dplane_ctx_get_intf_addr(ctx), dest_str,
2436 sizeof(dest_str));
2437
2438 zlog_debug("Dplane intf %s, idx %u, addr %s",
2439 dplane_op2str(dplane_ctx_get_op(ctx)),
2440 dplane_ctx_get_ifindex(ctx), dest_str);
2441 }
2442
2443 res = kernel_address_update_ctx(ctx);
2444
2445 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
2446 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addr_errors,
2447 1, memory_order_relaxed);
2448
2449 return res;
2450 }
2451
2452 /*
2453 * Kernel provider callback
2454 */
2455 static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
2456 {
2457 enum zebra_dplane_result res;
2458 struct zebra_dplane_ctx *ctx;
2459 int counter, limit;
2460
2461 limit = dplane_provider_get_work_limit(prov);
2462
2463 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2464 zlog_debug("dplane provider '%s': processing",
2465 dplane_provider_get_name(prov));
2466
2467 for (counter = 0; counter < limit; counter++) {
2468
2469 ctx = dplane_provider_dequeue_in_ctx(prov);
2470 if (ctx == NULL)
2471 break;
2472
2473 /* A previous provider plugin may have asked to skip the
2474 * kernel update.
2475 */
2476 if (dplane_ctx_is_skip_kernel(ctx)) {
2477 res = ZEBRA_DPLANE_REQUEST_SUCCESS;
2478 goto skip_one;
2479 }
2480
2481 /* Dispatch to appropriate kernel-facing apis */
2482 switch (dplane_ctx_get_op(ctx)) {
2483
2484 case DPLANE_OP_ROUTE_INSTALL:
2485 case DPLANE_OP_ROUTE_UPDATE:
2486 case DPLANE_OP_ROUTE_DELETE:
2487 res = kernel_dplane_route_update(ctx);
2488 break;
2489
2490 case DPLANE_OP_LSP_INSTALL:
2491 case DPLANE_OP_LSP_UPDATE:
2492 case DPLANE_OP_LSP_DELETE:
2493 res = kernel_dplane_lsp_update(ctx);
2494 break;
2495
2496 case DPLANE_OP_PW_INSTALL:
2497 case DPLANE_OP_PW_UNINSTALL:
2498 res = kernel_dplane_pw_update(ctx);
2499 break;
2500
2501 case DPLANE_OP_ADDR_INSTALL:
2502 case DPLANE_OP_ADDR_UNINSTALL:
2503 res = kernel_dplane_address_update(ctx);
2504 break;
2505
2506 /* Ignore 'notifications' - no-op */
2507 case DPLANE_OP_SYS_ROUTE_ADD:
2508 case DPLANE_OP_SYS_ROUTE_DELETE:
2509 case DPLANE_OP_ROUTE_NOTIFY:
2510 case DPLANE_OP_LSP_NOTIFY:
2511 res = ZEBRA_DPLANE_REQUEST_SUCCESS;
2512 break;
2513
2514 default:
2515 atomic_fetch_add_explicit(
2516 &zdplane_info.dg_other_errors, 1,
2517 memory_order_relaxed);
2518
2519 res = ZEBRA_DPLANE_REQUEST_FAILURE;
2520 break;
2521 }
2522
2523 skip_one:
2524 dplane_ctx_set_status(ctx, res);
2525
2526 dplane_provider_enqueue_out_ctx(prov, ctx);
2527 }
2528
2529 /* Ensure that we'll run the work loop again if there's still
2530 * more work to do.
2531 */
2532 if (counter >= limit) {
2533 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2534 zlog_debug("dplane provider '%s' reached max updates %d",
2535 dplane_provider_get_name(prov), counter);
2536
2537 atomic_fetch_add_explicit(&zdplane_info.dg_update_yields,
2538 1, memory_order_relaxed);
2539
2540 dplane_provider_work_ready();
2541 }
2542
2543 return 0;
2544 }
2545
2546 #if DPLANE_TEST_PROVIDER
2547
2548 /*
2549 * Test dataplane provider plugin
2550 */
2551
2552 /*
2553 * Test provider process callback
2554 */
2555 static int test_dplane_process_func(struct zebra_dplane_provider *prov)
2556 {
2557 struct zebra_dplane_ctx *ctx;
2558 int counter, limit;
2559
2560 /* Just moving from 'in' queue to 'out' queue */
2561
2562 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2563 zlog_debug("dplane provider '%s': processing",
2564 dplane_provider_get_name(prov));
2565
2566 limit = dplane_provider_get_work_limit(prov);
2567
2568 for (counter = 0; counter < limit; counter++) {
2569
2570 ctx = dplane_provider_dequeue_in_ctx(prov);
2571 if (ctx == NULL)
2572 break;
2573
2574 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2575 zlog_debug("dplane provider '%s': op %s",
2576 dplane_provider_get_name(prov),
2577 dplane_op2str(dplane_ctx_get_op(ctx)));
2578
2579 dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
2580
2581 dplane_provider_enqueue_out_ctx(prov, ctx);
2582 }
2583
2584 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2585 zlog_debug("dplane provider '%s': processed %d",
2586 dplane_provider_get_name(prov), counter);
2587
2588 /* Ensure that we'll run the work loop again if there's still
2589 * more work to do.
2590 */
2591 if (counter >= limit)
2592 dplane_provider_work_ready();
2593
2594 return 0;
2595 }
2596
2597 /*
2598 * Test provider shutdown/fini callback
2599 */
2600 static int test_dplane_shutdown_func(struct zebra_dplane_provider *prov,
2601 bool early)
2602 {
2603 if (IS_ZEBRA_DEBUG_DPLANE)
2604 zlog_debug("dplane provider '%s': %sshutdown",
2605 dplane_provider_get_name(prov),
2606 early ? "early " : "");
2607
2608 return 0;
2609 }
2610 #endif /* DPLANE_TEST_PROVIDER */
2611
2612 /*
2613 * Register default kernel provider
2614 */
2615 static void dplane_provider_init(void)
2616 {
2617 int ret;
2618
2619 ret = dplane_provider_register("Kernel",
2620 DPLANE_PRIO_KERNEL,
2621 DPLANE_PROV_FLAGS_DEFAULT, NULL,
2622 kernel_dplane_process_func,
2623 NULL,
2624 NULL, NULL);
2625
2626 if (ret != AOK)
2627 zlog_err("Unable to register kernel dplane provider: %d",
2628 ret);
2629
2630 #if DPLANE_TEST_PROVIDER
2631 /* Optional test provider ... */
2632 ret = dplane_provider_register("Test",
2633 DPLANE_PRIO_PRE_KERNEL,
2634 DPLANE_PROV_FLAGS_DEFAULT, NULL,
2635 test_dplane_process_func,
2636 test_dplane_shutdown_func,
2637 NULL /* data */, NULL);
2638
2639 if (ret != AOK)
2640 zlog_err("Unable to register test dplane provider: %d",
2641 ret);
2642 #endif /* DPLANE_TEST_PROVIDER */
2643 }
2644
2645 /* Indicates zebra shutdown/exit is in progress. Some operations may be
2646 * simplified or skipped during shutdown processing.
2647 */
2648 bool dplane_is_in_shutdown(void)
2649 {
2650 return zdplane_info.dg_is_shutdown;
2651 }
2652
2653 /*
2654 * Early or pre-shutdown, de-init notification api. This runs pretty
2655 * early during zebra shutdown, as a signal to stop new work and prepare
2656 * for updates generated by shutdown/cleanup activity, as zebra tries to
2657 * remove everything it's responsible for.
2658 * NB: This runs in the main zebra pthread context.
2659 */
2660 void zebra_dplane_pre_finish(void)
2661 {
2662 if (IS_ZEBRA_DEBUG_DPLANE)
2663 zlog_debug("Zebra dataplane pre-fini called");
2664
2665 zdplane_info.dg_is_shutdown = true;
2666
2667 /* TODO -- Notify provider(s) of pending shutdown */
2668 }
2669
2670 /*
2671 * Utility to determine whether work remains enqueued within the dplane;
2672 * used during system shutdown processing.
2673 */
2674 static bool dplane_work_pending(void)
2675 {
2676 bool ret = false;
2677 struct zebra_dplane_ctx *ctx;
2678 struct zebra_dplane_provider *prov;
2679
2680 /* TODO -- just checking incoming/pending work for now, must check
2681 * providers
2682 */
2683 DPLANE_LOCK();
2684 {
2685 ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
2686 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
2687 }
2688 DPLANE_UNLOCK();
2689
2690 if (ctx != NULL) {
2691 ret = true;
2692 goto done;
2693 }
2694
2695 while (prov) {
2696
2697 dplane_provider_lock(prov);
2698
2699 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
2700 if (ctx == NULL)
2701 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
2702
2703 dplane_provider_unlock(prov);
2704
2705 if (ctx != NULL)
2706 break;
2707
2708 DPLANE_LOCK();
2709 prov = TAILQ_NEXT(prov, dp_prov_link);
2710 DPLANE_UNLOCK();
2711 }
2712
2713 if (ctx != NULL)
2714 ret = true;
2715
2716 done:
2717 return ret;
2718 }
2719
2720 /*
2721 * Shutdown-time intermediate callback, used to determine when all pending
2722 * in-flight updates are done. If there's still work to do, reschedules itself.
2723 * If all work is done, schedules an event to the main zebra thread for
2724 * final zebra shutdown.
2725 * This runs in the dplane pthread context.
2726 */
2727 static int dplane_check_shutdown_status(struct thread *event)
2728 {
2729 if (IS_ZEBRA_DEBUG_DPLANE)
2730 zlog_debug("Zebra dataplane shutdown status check called");
2731
2732 if (dplane_work_pending()) {
2733 /* Reschedule dplane check on a short timer */
2734 thread_add_timer_msec(zdplane_info.dg_master,
2735 dplane_check_shutdown_status,
2736 NULL, 100,
2737 &zdplane_info.dg_t_shutdown_check);
2738
2739 /* TODO - give up and stop waiting after a short time? */
2740
2741 } else {
2742 /* We appear to be done - schedule a final callback event
2743 * for the zebra main pthread.
2744 */
2745 thread_add_event(zrouter.master, zebra_finalize, NULL, 0, NULL);
2746 }
2747
2748 return 0;
2749 }
2750
2751 /*
2752 * Shutdown, de-init api. This runs pretty late during shutdown,
2753 * after zebra has tried to free/remove/uninstall all routes during shutdown.
2754 * At this point, dplane work may still remain to be done, so we can't just
2755 * blindly terminate. If there's still work to do, we'll periodically check
2756 * and when done, we'll enqueue a task to the zebra main thread for final
2757 * termination processing.
2758 *
2759 * NB: This runs in the main zebra thread context.
2760 */
2761 void zebra_dplane_finish(void)
2762 {
2763 if (IS_ZEBRA_DEBUG_DPLANE)
2764 zlog_debug("Zebra dataplane fini called");
2765
2766 thread_add_event(zdplane_info.dg_master,
2767 dplane_check_shutdown_status, NULL, 0,
2768 &zdplane_info.dg_t_shutdown_check);
2769 }
2770
2771 /*
2772 * Main dataplane pthread event loop. The thread takes new incoming work
2773 * and offers it to the first provider. It then iterates through the
2774 * providers, taking complete work from each one and offering it
2775 * to the next in order. At each step, a limited number of updates are
2776 * processed during a cycle in order to provide some fairness.
2777 *
2778 * This loop through the providers is only run once, so that the dataplane
2779 * pthread can look for other pending work - such as i/o work on behalf of
2780 * providers.
2781 */
2782 static int dplane_thread_loop(struct thread *event)
2783 {
2784 struct dplane_ctx_q work_list;
2785 struct dplane_ctx_q error_list;
2786 struct zebra_dplane_provider *prov;
2787 struct zebra_dplane_ctx *ctx, *tctx;
2788 int limit, counter, error_counter;
2789 uint64_t curr, high;
2790
2791 /* Capture work limit per cycle */
2792 limit = zdplane_info.dg_updates_per_cycle;
2793
2794 /* Init temporary lists used to move contexts among providers */
2795 TAILQ_INIT(&work_list);
2796 TAILQ_INIT(&error_list);
2797 error_counter = 0;
2798
2799 /* Check for zebra shutdown */
2800 if (!zdplane_info.dg_run)
2801 goto done;
2802
2803 /* Dequeue some incoming work from zebra (if any) onto the temporary
2804 * working list.
2805 */
2806 DPLANE_LOCK();
2807
2808 /* Locate initial registered provider */
2809 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
2810
2811 /* Move new work from incoming list to temp list */
2812 for (counter = 0; counter < limit; counter++) {
2813 ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
2814 if (ctx) {
2815 TAILQ_REMOVE(&zdplane_info.dg_route_ctx_q, ctx,
2816 zd_q_entries);
2817
2818 ctx->zd_provider = prov->dp_id;
2819
2820 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
2821 } else {
2822 break;
2823 }
2824 }
2825
2826 DPLANE_UNLOCK();
2827
2828 atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, counter,
2829 memory_order_relaxed);
2830
2831 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2832 zlog_debug("dplane: incoming new work counter: %d", counter);
2833
2834 /* Iterate through the registered providers, offering new incoming
2835 * work. If the provider has outgoing work in its queue, take that
2836 * work for the next provider
2837 */
2838 while (prov) {
2839
2840 /* At each iteration, the temporary work list has 'counter'
2841 * items.
2842 */
2843 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2844 zlog_debug("dplane enqueues %d new work to provider '%s'",
2845 counter, dplane_provider_get_name(prov));
2846
2847 /* Capture current provider id in each context; check for
2848 * error status.
2849 */
2850 TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, tctx) {
2851 if (dplane_ctx_get_status(ctx) ==
2852 ZEBRA_DPLANE_REQUEST_SUCCESS) {
2853 ctx->zd_provider = prov->dp_id;
2854 } else {
2855 /*
2856 * TODO -- improve error-handling: recirc
2857 * errors backwards so that providers can
2858 * 'undo' their work (if they want to)
2859 */
2860
2861 /* Move to error list; will be returned
2862 * zebra main.
2863 */
2864 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
2865 TAILQ_INSERT_TAIL(&error_list,
2866 ctx, zd_q_entries);
2867 error_counter++;
2868 }
2869 }
2870
2871 /* Enqueue new work to the provider */
2872 dplane_provider_lock(prov);
2873
2874 if (TAILQ_FIRST(&work_list))
2875 TAILQ_CONCAT(&(prov->dp_ctx_in_q), &work_list,
2876 zd_q_entries);
2877
2878 atomic_fetch_add_explicit(&prov->dp_in_counter, counter,
2879 memory_order_relaxed);
2880 atomic_fetch_add_explicit(&prov->dp_in_queued, counter,
2881 memory_order_relaxed);
2882 curr = atomic_load_explicit(&prov->dp_in_queued,
2883 memory_order_relaxed);
2884 high = atomic_load_explicit(&prov->dp_in_max,
2885 memory_order_relaxed);
2886 if (curr > high)
2887 atomic_store_explicit(&prov->dp_in_max, curr,
2888 memory_order_relaxed);
2889
2890 dplane_provider_unlock(prov);
2891
2892 /* Reset the temp list (though the 'concat' may have done this
2893 * already), and the counter
2894 */
2895 TAILQ_INIT(&work_list);
2896 counter = 0;
2897
2898 /* Call into the provider code. Note that this is
2899 * unconditional: we offer to do work even if we don't enqueue
2900 * any _new_ work.
2901 */
2902 (*prov->dp_fp)(prov);
2903
2904 /* Check for zebra shutdown */
2905 if (!zdplane_info.dg_run)
2906 break;
2907
2908 /* Dequeue completed work from the provider */
2909 dplane_provider_lock(prov);
2910
2911 while (counter < limit) {
2912 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
2913 if (ctx) {
2914 TAILQ_REMOVE(&(prov->dp_ctx_out_q), ctx,
2915 zd_q_entries);
2916
2917 TAILQ_INSERT_TAIL(&work_list,
2918 ctx, zd_q_entries);
2919 counter++;
2920 } else
2921 break;
2922 }
2923
2924 dplane_provider_unlock(prov);
2925
2926 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2927 zlog_debug("dplane dequeues %d completed work from provider %s",
2928 counter, dplane_provider_get_name(prov));
2929
2930 /* Locate next provider */
2931 DPLANE_LOCK();
2932 prov = TAILQ_NEXT(prov, dp_prov_link);
2933 DPLANE_UNLOCK();
2934 }
2935
2936 /* After all providers have been serviced, enqueue any completed
2937 * work and any errors back to zebra so it can process the results.
2938 */
2939 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2940 zlog_debug("dplane has %d completed, %d errors, for zebra main",
2941 counter, error_counter);
2942
2943 /*
2944 * Hand lists through the api to zebra main,
2945 * to reduce the number of lock/unlock cycles
2946 */
2947
2948 /* Call through to zebra main */
2949 (zdplane_info.dg_results_cb)(&error_list);
2950
2951 TAILQ_INIT(&error_list);
2952
2953 /* Call through to zebra main */
2954 (zdplane_info.dg_results_cb)(&work_list);
2955
2956 TAILQ_INIT(&work_list);
2957
2958 done:
2959 return 0;
2960 }
2961
2962 /*
2963 * Final phase of shutdown, after all work enqueued to dplane has been
2964 * processed. This is called from the zebra main pthread context.
2965 */
2966 void zebra_dplane_shutdown(void)
2967 {
2968 if (IS_ZEBRA_DEBUG_DPLANE)
2969 zlog_debug("Zebra dataplane shutdown called");
2970
2971 /* Stop dplane thread, if it's running */
2972
2973 zdplane_info.dg_run = false;
2974
2975 THREAD_OFF(zdplane_info.dg_t_update);
2976
2977 frr_pthread_stop(zdplane_info.dg_pthread, NULL);
2978
2979 /* Destroy pthread */
2980 frr_pthread_destroy(zdplane_info.dg_pthread);
2981 zdplane_info.dg_pthread = NULL;
2982 zdplane_info.dg_master = NULL;
2983
2984 /* TODO -- Notify provider(s) of final shutdown */
2985
2986 /* TODO -- Clean-up provider objects */
2987
2988 /* TODO -- Clean queue(s), free memory */
2989 }
2990
2991 /*
2992 * Initialize the dataplane module during startup, internal/private version
2993 */
2994 static void zebra_dplane_init_internal(void)
2995 {
2996 memset(&zdplane_info, 0, sizeof(zdplane_info));
2997
2998 pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
2999
3000 TAILQ_INIT(&zdplane_info.dg_route_ctx_q);
3001 TAILQ_INIT(&zdplane_info.dg_providers_q);
3002
3003 zdplane_info.dg_updates_per_cycle = DPLANE_DEFAULT_NEW_WORK;
3004
3005 zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;
3006
3007 /* Register default kernel 'provider' during init */
3008 dplane_provider_init();
3009 }
3010
3011 /*
3012 * Start the dataplane pthread. This step needs to be run later than the
3013 * 'init' step, in case zebra has fork-ed.
3014 */
3015 void zebra_dplane_start(void)
3016 {
3017 struct zebra_dplane_provider *prov;
3018 struct frr_pthread_attr pattr = {
3019 .start = frr_pthread_attr_default.start,
3020 .stop = frr_pthread_attr_default.stop
3021 };
3022
3023 /* Start dataplane pthread */
3024
3025 zdplane_info.dg_pthread = frr_pthread_new(&pattr, "Zebra dplane thread",
3026 "Zebra dplane");
3027
3028 zdplane_info.dg_master = zdplane_info.dg_pthread->master;
3029
3030 zdplane_info.dg_run = true;
3031
3032 /* Enqueue an initial event for the dataplane pthread */
3033 thread_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
3034 &zdplane_info.dg_t_update);
3035
3036 /* Call start callbacks for registered providers */
3037
3038 DPLANE_LOCK();
3039 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
3040 DPLANE_UNLOCK();
3041
3042 while (prov) {
3043
3044 if (prov->dp_start)
3045 (prov->dp_start)(prov);
3046
3047 /* Locate next provider */
3048 DPLANE_LOCK();
3049 prov = TAILQ_NEXT(prov, dp_prov_link);
3050 DPLANE_UNLOCK();
3051 }
3052
3053 frr_pthread_run(zdplane_info.dg_pthread, NULL);
3054 }
3055
3056 /*
3057 * Initialize the dataplane module at startup; called by zebra rib_init()
3058 */
3059 void zebra_dplane_init(int (*results_fp)(struct dplane_ctx_q *))
3060 {
3061 zebra_dplane_init_internal();
3062 zdplane_info.dg_results_cb = results_fp;
3063 }