]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_dplane.c
Merge pull request #5778 from ton31337/fix/add_doc_for_ebgp_connected_route_check
[mirror_frr.git] / zebra / zebra_dplane.c
1 /*
2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "lib/libfrr.h"
25 #include "lib/debug.h"
26 #include "lib/frratomic.h"
27 #include "lib/frr_pthread.h"
28 #include "lib/memory.h"
29 #include "lib/queue.h"
30 #include "lib/zebra.h"
31 #include "zebra/zebra_router.h"
32 #include "zebra/zebra_memory.h"
33 #include "zebra/zebra_router.h"
34 #include "zebra/zebra_dplane.h"
35 #include "zebra/rt.h"
36 #include "zebra/debug.h"
37
38 /* Memory type for context blocks */
39 DEFINE_MTYPE_STATIC(ZEBRA, DP_CTX, "Zebra DPlane Ctx")
40 DEFINE_MTYPE_STATIC(ZEBRA, DP_PROV, "Zebra DPlane Provider")
41
42 #ifndef AOK
43 # define AOK 0
44 #endif
45
46 /* Enable test dataplane provider */
47 /*#define DPLANE_TEST_PROVIDER 1 */
48
49 /* Default value for max queued incoming updates */
50 const uint32_t DPLANE_DEFAULT_MAX_QUEUED = 200;
51
52 /* Default value for new work per cycle */
53 const uint32_t DPLANE_DEFAULT_NEW_WORK = 100;
54
55 /* Validation check macro for context blocks */
56 /* #define DPLANE_DEBUG 1 */
57
58 #ifdef DPLANE_DEBUG
59
60 # define DPLANE_CTX_VALID(p) \
61 assert((p) != NULL)
62
63 #else
64
65 # define DPLANE_CTX_VALID(p)
66
67 #endif /* DPLANE_DEBUG */
68
69 /*
70 * Nexthop information captured for nexthop/nexthop group updates
71 */
72 struct dplane_nexthop_info {
73 uint32_t id;
74 afi_t afi;
75 vrf_id_t vrf_id;
76 int type;
77
78 struct nexthop_group ng;
79 struct nh_grp nh_grp[MULTIPATH_NUM];
80 uint8_t nh_grp_count;
81 };
82
83 /*
84 * Route information captured for route updates.
85 */
86 struct dplane_route_info {
87
88 /* Dest and (optional) source prefixes */
89 struct prefix zd_dest;
90 struct prefix zd_src;
91
92 afi_t zd_afi;
93 safi_t zd_safi;
94
95 int zd_type;
96 int zd_old_type;
97
98 route_tag_t zd_tag;
99 route_tag_t zd_old_tag;
100 uint32_t zd_metric;
101 uint32_t zd_old_metric;
102
103 uint16_t zd_instance;
104 uint16_t zd_old_instance;
105
106 uint8_t zd_distance;
107 uint8_t zd_old_distance;
108
109 uint32_t zd_mtu;
110 uint32_t zd_nexthop_mtu;
111
112 /* Nexthop hash entry info */
113 struct dplane_nexthop_info nhe;
114
115 /* Nexthops */
116 struct nexthop_group zd_ng;
117
118 /* "Previous" nexthops, used only in route updates without netlink */
119 struct nexthop_group zd_old_ng;
120
121 /* TODO -- use fixed array of nexthops, to avoid mallocs? */
122
123 };
124
125 /*
126 * Pseudowire info for the dataplane
127 */
128 struct dplane_pw_info {
129 int type;
130 int af;
131 int status;
132 uint32_t flags;
133 union g_addr dest;
134 mpls_label_t local_label;
135 mpls_label_t remote_label;
136
137 /* Nexthops */
138 struct nexthop_group nhg;
139
140 union pw_protocol_fields fields;
141 };
142
143 /*
144 * Interface/prefix info for the dataplane
145 */
146 struct dplane_intf_info {
147
148 uint32_t metric;
149 uint32_t flags;
150
151 #define DPLANE_INTF_CONNECTED (1 << 0) /* Connected peer, p2p */
152 #define DPLANE_INTF_SECONDARY (1 << 1)
153 #define DPLANE_INTF_BROADCAST (1 << 2)
154 #define DPLANE_INTF_HAS_DEST DPLANE_INTF_CONNECTED
155 #define DPLANE_INTF_HAS_LABEL (1 << 4)
156
157 /* Interface address/prefix */
158 struct prefix prefix;
159
160 /* Dest address, for p2p, or broadcast prefix */
161 struct prefix dest_prefix;
162
163 char *label;
164 char label_buf[32];
165 };
166
167 /*
168 * EVPN MAC address info for the dataplane.
169 */
170 struct dplane_mac_info {
171 vlanid_t vid;
172 ifindex_t br_ifindex;
173 struct ethaddr mac;
174 struct in_addr vtep_ip;
175 bool is_sticky;
176
177 };
178
179 /*
180 * EVPN neighbor info for the dataplane
181 */
182 struct dplane_neigh_info {
183 struct ipaddr ip_addr;
184 struct ethaddr mac;
185 uint32_t flags;
186 uint16_t state;
187 };
188
189 /*
190 * The context block used to exchange info about route updates across
191 * the boundary between the zebra main context (and pthread) and the
192 * dataplane layer (and pthread).
193 */
194 struct zebra_dplane_ctx {
195
196 /* Operation code */
197 enum dplane_op_e zd_op;
198
199 /* Status on return */
200 enum zebra_dplane_result zd_status;
201
202 /* Dplane provider id */
203 uint32_t zd_provider;
204
205 /* Flags - used by providers, e.g. */
206 int zd_flags;
207
208 bool zd_is_update;
209
210 uint32_t zd_seq;
211 uint32_t zd_old_seq;
212
213 /* Some updates may be generated by notifications: allow the
214 * plugin to notice and ignore results from its own notifications.
215 */
216 uint32_t zd_notif_provider;
217
218 /* TODO -- internal/sub-operation status? */
219 enum zebra_dplane_result zd_remote_status;
220 enum zebra_dplane_result zd_kernel_status;
221
222 vrf_id_t zd_vrf_id;
223 uint32_t zd_table_id;
224
225 char zd_ifname[INTERFACE_NAMSIZ];
226 ifindex_t zd_ifindex;
227
228 /* Support info for different kinds of updates */
229 union {
230 struct dplane_route_info rinfo;
231 zebra_lsp_t lsp;
232 struct dplane_pw_info pw;
233 struct dplane_intf_info intf;
234 struct dplane_mac_info macinfo;
235 struct dplane_neigh_info neigh;
236 } u;
237
238 /* Namespace info, used especially for netlink kernel communication */
239 struct zebra_dplane_info zd_ns_info;
240
241 /* Embedded list linkage */
242 TAILQ_ENTRY(zebra_dplane_ctx) zd_q_entries;
243 };
244
245 /* Flag that can be set by a pre-kernel provider as a signal that an update
246 * should bypass the kernel.
247 */
248 #define DPLANE_CTX_FLAG_NO_KERNEL 0x01
249
250
251 /*
252 * Registration block for one dataplane provider.
253 */
254 struct zebra_dplane_provider {
255 /* Name */
256 char dp_name[DPLANE_PROVIDER_NAMELEN + 1];
257
258 /* Priority, for ordering among providers */
259 uint8_t dp_priority;
260
261 /* Id value */
262 uint32_t dp_id;
263
264 /* Mutex */
265 pthread_mutex_t dp_mutex;
266
267 /* Plugin-provided extra data */
268 void *dp_data;
269
270 /* Flags */
271 int dp_flags;
272
273 int (*dp_start)(struct zebra_dplane_provider *prov);
274
275 int (*dp_fp)(struct zebra_dplane_provider *prov);
276
277 int (*dp_fini)(struct zebra_dplane_provider *prov, bool early_p);
278
279 _Atomic uint32_t dp_in_counter;
280 _Atomic uint32_t dp_in_queued;
281 _Atomic uint32_t dp_in_max;
282 _Atomic uint32_t dp_out_counter;
283 _Atomic uint32_t dp_out_queued;
284 _Atomic uint32_t dp_out_max;
285 _Atomic uint32_t dp_error_counter;
286
287 /* Queue of contexts inbound to the provider */
288 struct dplane_ctx_q dp_ctx_in_q;
289
290 /* Queue of completed contexts outbound from the provider back
291 * towards the dataplane module.
292 */
293 struct dplane_ctx_q dp_ctx_out_q;
294
295 /* Embedded list linkage for provider objects */
296 TAILQ_ENTRY(zebra_dplane_provider) dp_prov_link;
297 };
298
299 /*
300 * Globals
301 */
302 static struct zebra_dplane_globals {
303 /* Mutex to control access to dataplane components */
304 pthread_mutex_t dg_mutex;
305
306 /* Results callback registered by zebra 'core' */
307 int (*dg_results_cb)(struct dplane_ctx_q *ctxlist);
308
309 /* Sentinel for beginning of shutdown */
310 volatile bool dg_is_shutdown;
311
312 /* Sentinel for end of shutdown */
313 volatile bool dg_run;
314
315 /* Update context queue inbound to the dataplane */
316 TAILQ_HEAD(zdg_ctx_q, zebra_dplane_ctx) dg_update_ctx_q;
317
318 /* Ordered list of providers */
319 TAILQ_HEAD(zdg_prov_q, zebra_dplane_provider) dg_providers_q;
320
321 /* Counter used to assign internal ids to providers */
322 uint32_t dg_provider_id;
323
324 /* Limit number of pending, unprocessed updates */
325 _Atomic uint32_t dg_max_queued_updates;
326
327 /* Control whether system route notifications should be produced. */
328 bool dg_sys_route_notifs;
329
330 /* Limit number of new updates dequeued at once, to pace an
331 * incoming burst.
332 */
333 uint32_t dg_updates_per_cycle;
334
335 _Atomic uint32_t dg_routes_in;
336 _Atomic uint32_t dg_routes_queued;
337 _Atomic uint32_t dg_routes_queued_max;
338 _Atomic uint32_t dg_route_errors;
339 _Atomic uint32_t dg_other_errors;
340
341 _Atomic uint32_t dg_nexthops_in;
342 _Atomic uint32_t dg_nexthop_errors;
343
344 _Atomic uint32_t dg_lsps_in;
345 _Atomic uint32_t dg_lsp_errors;
346
347 _Atomic uint32_t dg_pws_in;
348 _Atomic uint32_t dg_pw_errors;
349
350 _Atomic uint32_t dg_intf_addrs_in;
351 _Atomic uint32_t dg_intf_addr_errors;
352
353 _Atomic uint32_t dg_macs_in;
354 _Atomic uint32_t dg_mac_errors;
355
356 _Atomic uint32_t dg_neighs_in;
357 _Atomic uint32_t dg_neigh_errors;
358
359 _Atomic uint32_t dg_update_yields;
360
361 /* Dataplane pthread */
362 struct frr_pthread *dg_pthread;
363
364 /* Event-delivery context 'master' for the dplane */
365 struct thread_master *dg_master;
366
367 /* Event/'thread' pointer for queued updates */
368 struct thread *dg_t_update;
369
370 /* Event pointer for pending shutdown check loop */
371 struct thread *dg_t_shutdown_check;
372
373 } zdplane_info;
374
375 /*
376 * Lock and unlock for interactions with the zebra 'core' pthread
377 */
378 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
379 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
380
381
382 /*
383 * Lock and unlock for individual providers
384 */
385 #define DPLANE_PROV_LOCK(p) pthread_mutex_lock(&((p)->dp_mutex))
386 #define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
387
388 /* Prototypes */
389 static int dplane_thread_loop(struct thread *event);
390 static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
391 struct zebra_ns *zns);
392 static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
393 enum dplane_op_e op);
394 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
395 enum dplane_op_e op);
396 static enum zebra_dplane_result intf_addr_update_internal(
397 const struct interface *ifp, const struct connected *ifc,
398 enum dplane_op_e op);
399 static enum zebra_dplane_result mac_update_internal(
400 enum dplane_op_e op, const struct interface *ifp,
401 const struct interface *br_ifp,
402 vlanid_t vid, const struct ethaddr *mac,
403 struct in_addr vtep_ip, bool sticky);
404 static enum zebra_dplane_result neigh_update_internal(
405 enum dplane_op_e op,
406 const struct interface *ifp,
407 const struct ethaddr *mac,
408 const struct ipaddr *ip,
409 uint32_t flags, uint16_t state);
410
411 /*
412 * Public APIs
413 */
414
415 /* Obtain thread_master for dataplane thread */
416 struct thread_master *dplane_get_thread_master(void)
417 {
418 return zdplane_info.dg_master;
419 }
420
421 /*
422 * Allocate a dataplane update context
423 */
424 struct zebra_dplane_ctx *dplane_ctx_alloc(void)
425 {
426 struct zebra_dplane_ctx *p;
427
428 /* TODO -- just alloc'ing memory, but would like to maintain
429 * a pool
430 */
431 p = XCALLOC(MTYPE_DP_CTX, sizeof(struct zebra_dplane_ctx));
432
433 return p;
434 }
435
436 /* Enable system route notifications */
437 void dplane_enable_sys_route_notifs(void)
438 {
439 zdplane_info.dg_sys_route_notifs = true;
440 }
441
442 /*
443 * Free a dataplane results context.
444 */
445 static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
446 {
447 if (pctx == NULL)
448 return;
449
450 DPLANE_CTX_VALID(*pctx);
451
452 /* TODO -- just freeing memory, but would like to maintain
453 * a pool
454 */
455
456 /* Some internal allocations may need to be freed, depending on
457 * the type of info captured in the ctx.
458 */
459 switch ((*pctx)->zd_op) {
460 case DPLANE_OP_ROUTE_INSTALL:
461 case DPLANE_OP_ROUTE_UPDATE:
462 case DPLANE_OP_ROUTE_DELETE:
463 case DPLANE_OP_SYS_ROUTE_ADD:
464 case DPLANE_OP_SYS_ROUTE_DELETE:
465 case DPLANE_OP_ROUTE_NOTIFY:
466
467 /* Free allocated nexthops */
468 if ((*pctx)->u.rinfo.zd_ng.nexthop) {
469 /* This deals with recursive nexthops too */
470 nexthops_free((*pctx)->u.rinfo.zd_ng.nexthop);
471
472 (*pctx)->u.rinfo.zd_ng.nexthop = NULL;
473 }
474
475 if ((*pctx)->u.rinfo.zd_old_ng.nexthop) {
476 /* This deals with recursive nexthops too */
477 nexthops_free((*pctx)->u.rinfo.zd_old_ng.nexthop);
478
479 (*pctx)->u.rinfo.zd_old_ng.nexthop = NULL;
480 }
481
482 break;
483
484 case DPLANE_OP_NH_INSTALL:
485 case DPLANE_OP_NH_UPDATE:
486 case DPLANE_OP_NH_DELETE: {
487 if ((*pctx)->u.rinfo.nhe.ng.nexthop) {
488 /* This deals with recursive nexthops too */
489 nexthops_free((*pctx)->u.rinfo.nhe.ng.nexthop);
490
491 (*pctx)->u.rinfo.nhe.ng.nexthop = NULL;
492 }
493 break;
494 }
495
496 case DPLANE_OP_LSP_INSTALL:
497 case DPLANE_OP_LSP_UPDATE:
498 case DPLANE_OP_LSP_DELETE:
499 case DPLANE_OP_LSP_NOTIFY:
500 {
501 zebra_nhlfe_t *nhlfe, *next;
502
503 /* Free allocated NHLFEs */
504 for (nhlfe = (*pctx)->u.lsp.nhlfe_list; nhlfe; nhlfe = next) {
505 next = nhlfe->next;
506
507 zebra_mpls_nhlfe_del(nhlfe);
508 }
509
510 /* Clear pointers in lsp struct, in case we're cacheing
511 * free context structs.
512 */
513 (*pctx)->u.lsp.nhlfe_list = NULL;
514 (*pctx)->u.lsp.best_nhlfe = NULL;
515
516 break;
517 }
518
519 case DPLANE_OP_PW_INSTALL:
520 case DPLANE_OP_PW_UNINSTALL:
521 /* Free allocated nexthops */
522 if ((*pctx)->u.pw.nhg.nexthop) {
523 /* This deals with recursive nexthops too */
524 nexthops_free((*pctx)->u.pw.nhg.nexthop);
525
526 (*pctx)->u.pw.nhg.nexthop = NULL;
527 }
528 break;
529
530 case DPLANE_OP_ADDR_INSTALL:
531 case DPLANE_OP_ADDR_UNINSTALL:
532 /* Maybe free label string, if allocated */
533 if ((*pctx)->u.intf.label != NULL &&
534 (*pctx)->u.intf.label != (*pctx)->u.intf.label_buf) {
535 free((*pctx)->u.intf.label);
536 (*pctx)->u.intf.label = NULL;
537 }
538 break;
539
540 case DPLANE_OP_MAC_INSTALL:
541 case DPLANE_OP_MAC_DELETE:
542 case DPLANE_OP_NEIGH_INSTALL:
543 case DPLANE_OP_NEIGH_UPDATE:
544 case DPLANE_OP_NEIGH_DELETE:
545 case DPLANE_OP_VTEP_ADD:
546 case DPLANE_OP_VTEP_DELETE:
547 case DPLANE_OP_NONE:
548 break;
549 }
550
551 XFREE(MTYPE_DP_CTX, *pctx);
552 }
553
554 /*
555 * Return a context block to the dplane module after processing
556 */
557 void dplane_ctx_fini(struct zebra_dplane_ctx **pctx)
558 {
559 /* TODO -- maintain pool; for now, just free */
560 dplane_ctx_free(pctx);
561 }
562
563 /* Enqueue a context block */
564 void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
565 const struct zebra_dplane_ctx *ctx)
566 {
567 TAILQ_INSERT_TAIL(q, (struct zebra_dplane_ctx *)ctx, zd_q_entries);
568 }
569
570 /* Append a list of context blocks to another list */
571 void dplane_ctx_list_append(struct dplane_ctx_q *to_list,
572 struct dplane_ctx_q *from_list)
573 {
574 if (TAILQ_FIRST(from_list)) {
575 TAILQ_CONCAT(to_list, from_list, zd_q_entries);
576
577 /* And clear 'from' list */
578 TAILQ_INIT(from_list);
579 }
580 }
581
582 /* Dequeue a context block from the head of a list */
583 struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_q *q)
584 {
585 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
586
587 if (ctx)
588 TAILQ_REMOVE(q, ctx, zd_q_entries);
589
590 return ctx;
591 }
592
593 /*
594 * Accessors for information from the context object
595 */
596 enum zebra_dplane_result dplane_ctx_get_status(
597 const struct zebra_dplane_ctx *ctx)
598 {
599 DPLANE_CTX_VALID(ctx);
600
601 return ctx->zd_status;
602 }
603
604 void dplane_ctx_set_status(struct zebra_dplane_ctx *ctx,
605 enum zebra_dplane_result status)
606 {
607 DPLANE_CTX_VALID(ctx);
608
609 ctx->zd_status = status;
610 }
611
612 /* Retrieve last/current provider id */
613 uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx *ctx)
614 {
615 DPLANE_CTX_VALID(ctx);
616 return ctx->zd_provider;
617 }
618
619 /* Providers run before the kernel can control whether a kernel
620 * update should be done.
621 */
622 void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx *ctx)
623 {
624 DPLANE_CTX_VALID(ctx);
625
626 SET_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
627 }
628
629 bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx *ctx)
630 {
631 DPLANE_CTX_VALID(ctx);
632
633 return CHECK_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
634 }
635
636 void dplane_ctx_set_op(struct zebra_dplane_ctx *ctx, enum dplane_op_e op)
637 {
638 DPLANE_CTX_VALID(ctx);
639 ctx->zd_op = op;
640 }
641
642 enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx)
643 {
644 DPLANE_CTX_VALID(ctx);
645
646 return ctx->zd_op;
647 }
648
649 const char *dplane_op2str(enum dplane_op_e op)
650 {
651 const char *ret = "UNKNOWN";
652
653 switch (op) {
654 case DPLANE_OP_NONE:
655 ret = "NONE";
656 break;
657
658 /* Route update */
659 case DPLANE_OP_ROUTE_INSTALL:
660 ret = "ROUTE_INSTALL";
661 break;
662 case DPLANE_OP_ROUTE_UPDATE:
663 ret = "ROUTE_UPDATE";
664 break;
665 case DPLANE_OP_ROUTE_DELETE:
666 ret = "ROUTE_DELETE";
667 break;
668 case DPLANE_OP_ROUTE_NOTIFY:
669 ret = "ROUTE_NOTIFY";
670 break;
671
672 /* Nexthop update */
673 case DPLANE_OP_NH_INSTALL:
674 ret = "NH_INSTALL";
675 break;
676 case DPLANE_OP_NH_UPDATE:
677 ret = "NH_UPDATE";
678 break;
679 case DPLANE_OP_NH_DELETE:
680 ret = "NH_DELETE";
681 break;
682
683 case DPLANE_OP_LSP_INSTALL:
684 ret = "LSP_INSTALL";
685 break;
686 case DPLANE_OP_LSP_UPDATE:
687 ret = "LSP_UPDATE";
688 break;
689 case DPLANE_OP_LSP_DELETE:
690 ret = "LSP_DELETE";
691 break;
692 case DPLANE_OP_LSP_NOTIFY:
693 ret = "LSP_NOTIFY";
694 break;
695
696 case DPLANE_OP_PW_INSTALL:
697 ret = "PW_INSTALL";
698 break;
699 case DPLANE_OP_PW_UNINSTALL:
700 ret = "PW_UNINSTALL";
701 break;
702
703 case DPLANE_OP_SYS_ROUTE_ADD:
704 ret = "SYS_ROUTE_ADD";
705 break;
706 case DPLANE_OP_SYS_ROUTE_DELETE:
707 ret = "SYS_ROUTE_DEL";
708 break;
709
710 case DPLANE_OP_ADDR_INSTALL:
711 ret = "ADDR_INSTALL";
712 break;
713 case DPLANE_OP_ADDR_UNINSTALL:
714 ret = "ADDR_UNINSTALL";
715 break;
716
717 case DPLANE_OP_MAC_INSTALL:
718 ret = "MAC_INSTALL";
719 break;
720 case DPLANE_OP_MAC_DELETE:
721 ret = "MAC_DELETE";
722 break;
723
724 case DPLANE_OP_NEIGH_INSTALL:
725 ret = "NEIGH_INSTALL";
726 break;
727 case DPLANE_OP_NEIGH_UPDATE:
728 ret = "NEIGH_UPDATE";
729 break;
730 case DPLANE_OP_NEIGH_DELETE:
731 ret = "NEIGH_DELETE";
732 break;
733 case DPLANE_OP_VTEP_ADD:
734 ret = "VTEP_ADD";
735 break;
736 case DPLANE_OP_VTEP_DELETE:
737 ret = "VTEP_DELETE";
738 break;
739 }
740
741 return ret;
742 }
743
744 const char *dplane_res2str(enum zebra_dplane_result res)
745 {
746 const char *ret = "<Unknown>";
747
748 switch (res) {
749 case ZEBRA_DPLANE_REQUEST_FAILURE:
750 ret = "FAILURE";
751 break;
752 case ZEBRA_DPLANE_REQUEST_QUEUED:
753 ret = "QUEUED";
754 break;
755 case ZEBRA_DPLANE_REQUEST_SUCCESS:
756 ret = "SUCCESS";
757 break;
758 }
759
760 return ret;
761 }
762
763 void dplane_ctx_set_dest(struct zebra_dplane_ctx *ctx,
764 const struct prefix *dest)
765 {
766 DPLANE_CTX_VALID(ctx);
767
768 prefix_copy(&(ctx->u.rinfo.zd_dest), dest);
769 }
770
771 const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
772 {
773 DPLANE_CTX_VALID(ctx);
774
775 return &(ctx->u.rinfo.zd_dest);
776 }
777
778 void dplane_ctx_set_src(struct zebra_dplane_ctx *ctx, const struct prefix *src)
779 {
780 DPLANE_CTX_VALID(ctx);
781
782 if (src)
783 prefix_copy(&(ctx->u.rinfo.zd_src), src);
784 else
785 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(struct prefix));
786 }
787
788 /* Source prefix is a little special - return NULL for "no src prefix" */
789 const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx)
790 {
791 DPLANE_CTX_VALID(ctx);
792
793 if (ctx->u.rinfo.zd_src.prefixlen == 0 &&
794 IN6_IS_ADDR_UNSPECIFIED(&(ctx->u.rinfo.zd_src.u.prefix6))) {
795 return NULL;
796 } else {
797 return &(ctx->u.rinfo.zd_src);
798 }
799 }
800
801 bool dplane_ctx_is_update(const struct zebra_dplane_ctx *ctx)
802 {
803 DPLANE_CTX_VALID(ctx);
804
805 return ctx->zd_is_update;
806 }
807
808 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx *ctx)
809 {
810 DPLANE_CTX_VALID(ctx);
811
812 return ctx->zd_seq;
813 }
814
815 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx)
816 {
817 DPLANE_CTX_VALID(ctx);
818
819 return ctx->zd_old_seq;
820 }
821
822 void dplane_ctx_set_vrf(struct zebra_dplane_ctx *ctx, vrf_id_t vrf)
823 {
824 DPLANE_CTX_VALID(ctx);
825
826 ctx->zd_vrf_id = vrf;
827 }
828
829 vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx)
830 {
831 DPLANE_CTX_VALID(ctx);
832
833 return ctx->zd_vrf_id;
834 }
835
836 bool dplane_ctx_is_from_notif(const struct zebra_dplane_ctx *ctx)
837 {
838 DPLANE_CTX_VALID(ctx);
839
840 return (ctx->zd_notif_provider != 0);
841 }
842
843 uint32_t dplane_ctx_get_notif_provider(const struct zebra_dplane_ctx *ctx)
844 {
845 DPLANE_CTX_VALID(ctx);
846
847 return ctx->zd_notif_provider;
848 }
849
850 void dplane_ctx_set_notif_provider(struct zebra_dplane_ctx *ctx,
851 uint32_t id)
852 {
853 DPLANE_CTX_VALID(ctx);
854
855 ctx->zd_notif_provider = id;
856 }
857 const char *dplane_ctx_get_ifname(const struct zebra_dplane_ctx *ctx)
858 {
859 DPLANE_CTX_VALID(ctx);
860
861 return ctx->zd_ifname;
862 }
863
864 ifindex_t dplane_ctx_get_ifindex(const struct zebra_dplane_ctx *ctx)
865 {
866 DPLANE_CTX_VALID(ctx);
867
868 return ctx->zd_ifindex;
869 }
870
871 void dplane_ctx_set_type(struct zebra_dplane_ctx *ctx, int type)
872 {
873 DPLANE_CTX_VALID(ctx);
874
875 ctx->u.rinfo.zd_type = type;
876 }
877
878 int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx)
879 {
880 DPLANE_CTX_VALID(ctx);
881
882 return ctx->u.rinfo.zd_type;
883 }
884
885 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx)
886 {
887 DPLANE_CTX_VALID(ctx);
888
889 return ctx->u.rinfo.zd_old_type;
890 }
891
892 void dplane_ctx_set_afi(struct zebra_dplane_ctx *ctx, afi_t afi)
893 {
894 DPLANE_CTX_VALID(ctx);
895
896 ctx->u.rinfo.zd_afi = afi;
897 }
898
899 afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
900 {
901 DPLANE_CTX_VALID(ctx);
902
903 return ctx->u.rinfo.zd_afi;
904 }
905
906 void dplane_ctx_set_safi(struct zebra_dplane_ctx *ctx, safi_t safi)
907 {
908 DPLANE_CTX_VALID(ctx);
909
910 ctx->u.rinfo.zd_safi = safi;
911 }
912
913 safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
914 {
915 DPLANE_CTX_VALID(ctx);
916
917 return ctx->u.rinfo.zd_safi;
918 }
919
920 void dplane_ctx_set_table(struct zebra_dplane_ctx *ctx, uint32_t table)
921 {
922 DPLANE_CTX_VALID(ctx);
923
924 ctx->zd_table_id = table;
925 }
926
927 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx)
928 {
929 DPLANE_CTX_VALID(ctx);
930
931 return ctx->zd_table_id;
932 }
933
934 route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx)
935 {
936 DPLANE_CTX_VALID(ctx);
937
938 return ctx->u.rinfo.zd_tag;
939 }
940
941 void dplane_ctx_set_tag(struct zebra_dplane_ctx *ctx, route_tag_t tag)
942 {
943 DPLANE_CTX_VALID(ctx);
944
945 ctx->u.rinfo.zd_tag = tag;
946 }
947
948 route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx)
949 {
950 DPLANE_CTX_VALID(ctx);
951
952 return ctx->u.rinfo.zd_old_tag;
953 }
954
955 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx)
956 {
957 DPLANE_CTX_VALID(ctx);
958
959 return ctx->u.rinfo.zd_instance;
960 }
961
962 void dplane_ctx_set_instance(struct zebra_dplane_ctx *ctx, uint16_t instance)
963 {
964 DPLANE_CTX_VALID(ctx);
965
966 ctx->u.rinfo.zd_instance = instance;
967 }
968
969 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
970 {
971 DPLANE_CTX_VALID(ctx);
972
973 return ctx->u.rinfo.zd_old_instance;
974 }
975
976 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx)
977 {
978 DPLANE_CTX_VALID(ctx);
979
980 return ctx->u.rinfo.zd_metric;
981 }
982
983 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx)
984 {
985 DPLANE_CTX_VALID(ctx);
986
987 return ctx->u.rinfo.zd_old_metric;
988 }
989
990 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx)
991 {
992 DPLANE_CTX_VALID(ctx);
993
994 return ctx->u.rinfo.zd_mtu;
995 }
996
997 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx)
998 {
999 DPLANE_CTX_VALID(ctx);
1000
1001 return ctx->u.rinfo.zd_nexthop_mtu;
1002 }
1003
1004 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx)
1005 {
1006 DPLANE_CTX_VALID(ctx);
1007
1008 return ctx->u.rinfo.zd_distance;
1009 }
1010
1011 void dplane_ctx_set_distance(struct zebra_dplane_ctx *ctx, uint8_t distance)
1012 {
1013 DPLANE_CTX_VALID(ctx);
1014
1015 ctx->u.rinfo.zd_distance = distance;
1016 }
1017
1018 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
1019 {
1020 DPLANE_CTX_VALID(ctx);
1021
1022 return ctx->u.rinfo.zd_old_distance;
1023 }
1024
1025 /*
1026 * Set the nexthops associated with a context: note that processing code
1027 * may well expect that nexthops are in canonical (sorted) order, so we
1028 * will enforce that here.
1029 */
1030 void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh)
1031 {
1032 DPLANE_CTX_VALID(ctx);
1033
1034 if (ctx->u.rinfo.zd_ng.nexthop) {
1035 nexthops_free(ctx->u.rinfo.zd_ng.nexthop);
1036 ctx->u.rinfo.zd_ng.nexthop = NULL;
1037 }
1038 nexthop_group_copy_nh_sorted(&(ctx->u.rinfo.zd_ng), nh);
1039 }
1040
1041 const struct nexthop_group *dplane_ctx_get_ng(
1042 const struct zebra_dplane_ctx *ctx)
1043 {
1044 DPLANE_CTX_VALID(ctx);
1045
1046 return &(ctx->u.rinfo.zd_ng);
1047 }
1048
1049 const struct nexthop_group *dplane_ctx_get_old_ng(
1050 const struct zebra_dplane_ctx *ctx)
1051 {
1052 DPLANE_CTX_VALID(ctx);
1053
1054 return &(ctx->u.rinfo.zd_old_ng);
1055 }
1056
1057 const struct zebra_dplane_info *dplane_ctx_get_ns(
1058 const struct zebra_dplane_ctx *ctx)
1059 {
1060 DPLANE_CTX_VALID(ctx);
1061
1062 return &(ctx->zd_ns_info);
1063 }
1064
1065 /* Accessors for nexthop information */
1066 uint32_t dplane_ctx_get_nhe_id(const struct zebra_dplane_ctx *ctx)
1067 {
1068 DPLANE_CTX_VALID(ctx);
1069 return ctx->u.rinfo.nhe.id;
1070 }
1071
1072 afi_t dplane_ctx_get_nhe_afi(const struct zebra_dplane_ctx *ctx)
1073 {
1074 DPLANE_CTX_VALID(ctx);
1075 return ctx->u.rinfo.nhe.afi;
1076 }
1077
1078 vrf_id_t dplane_ctx_get_nhe_vrf_id(const struct zebra_dplane_ctx *ctx)
1079 {
1080 DPLANE_CTX_VALID(ctx);
1081 return ctx->u.rinfo.nhe.vrf_id;
1082 }
1083
1084 int dplane_ctx_get_nhe_type(const struct zebra_dplane_ctx *ctx)
1085 {
1086 DPLANE_CTX_VALID(ctx);
1087 return ctx->u.rinfo.nhe.type;
1088 }
1089
1090 const struct nexthop_group *
1091 dplane_ctx_get_nhe_ng(const struct zebra_dplane_ctx *ctx)
1092 {
1093 DPLANE_CTX_VALID(ctx);
1094 return &(ctx->u.rinfo.nhe.ng);
1095 }
1096
1097 const struct nh_grp *
1098 dplane_ctx_get_nhe_nh_grp(const struct zebra_dplane_ctx *ctx)
1099 {
1100 DPLANE_CTX_VALID(ctx);
1101 return ctx->u.rinfo.nhe.nh_grp;
1102 }
1103
1104 uint8_t dplane_ctx_get_nhe_nh_grp_count(const struct zebra_dplane_ctx *ctx)
1105 {
1106 DPLANE_CTX_VALID(ctx);
1107 return ctx->u.rinfo.nhe.nh_grp_count;
1108 }
1109
1110 /* Accessors for LSP information */
1111
1112 mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx)
1113 {
1114 DPLANE_CTX_VALID(ctx);
1115
1116 return ctx->u.lsp.ile.in_label;
1117 }
1118
1119 void dplane_ctx_set_in_label(struct zebra_dplane_ctx *ctx, mpls_label_t label)
1120 {
1121 DPLANE_CTX_VALID(ctx);
1122
1123 ctx->u.lsp.ile.in_label = label;
1124 }
1125
1126 uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx *ctx)
1127 {
1128 DPLANE_CTX_VALID(ctx);
1129
1130 return ctx->u.lsp.addr_family;
1131 }
1132
1133 void dplane_ctx_set_addr_family(struct zebra_dplane_ctx *ctx,
1134 uint8_t family)
1135 {
1136 DPLANE_CTX_VALID(ctx);
1137
1138 ctx->u.lsp.addr_family = family;
1139 }
1140
1141 uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx)
1142 {
1143 DPLANE_CTX_VALID(ctx);
1144
1145 return ctx->u.lsp.flags;
1146 }
1147
1148 void dplane_ctx_set_lsp_flags(struct zebra_dplane_ctx *ctx,
1149 uint32_t flags)
1150 {
1151 DPLANE_CTX_VALID(ctx);
1152
1153 ctx->u.lsp.flags = flags;
1154 }
1155
1156 const zebra_nhlfe_t *dplane_ctx_get_nhlfe(const struct zebra_dplane_ctx *ctx)
1157 {
1158 DPLANE_CTX_VALID(ctx);
1159
1160 return ctx->u.lsp.nhlfe_list;
1161 }
1162
1163 zebra_nhlfe_t *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
1164 enum lsp_types_t lsp_type,
1165 enum nexthop_types_t nh_type,
1166 union g_addr *gate,
1167 ifindex_t ifindex,
1168 uint8_t num_labels,
1169 mpls_label_t out_labels[])
1170 {
1171 zebra_nhlfe_t *nhlfe;
1172
1173 DPLANE_CTX_VALID(ctx);
1174
1175 nhlfe = zebra_mpls_lsp_add_nhlfe(&(ctx->u.lsp),
1176 lsp_type, nh_type, gate,
1177 ifindex, num_labels, out_labels);
1178
1179 return nhlfe;
1180 }
1181
1182 const zebra_nhlfe_t *
1183 dplane_ctx_get_best_nhlfe(const struct zebra_dplane_ctx *ctx)
1184 {
1185 DPLANE_CTX_VALID(ctx);
1186
1187 return ctx->u.lsp.best_nhlfe;
1188 }
1189
1190 const zebra_nhlfe_t *
1191 dplane_ctx_set_best_nhlfe(struct zebra_dplane_ctx *ctx,
1192 zebra_nhlfe_t *nhlfe)
1193 {
1194 DPLANE_CTX_VALID(ctx);
1195
1196 ctx->u.lsp.best_nhlfe = nhlfe;
1197 return ctx->u.lsp.best_nhlfe;
1198 }
1199
1200 uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx *ctx)
1201 {
1202 DPLANE_CTX_VALID(ctx);
1203
1204 return ctx->u.lsp.num_ecmp;
1205 }
1206
1207 mpls_label_t dplane_ctx_get_pw_local_label(const struct zebra_dplane_ctx *ctx)
1208 {
1209 DPLANE_CTX_VALID(ctx);
1210
1211 return ctx->u.pw.local_label;
1212 }
1213
1214 mpls_label_t dplane_ctx_get_pw_remote_label(const struct zebra_dplane_ctx *ctx)
1215 {
1216 DPLANE_CTX_VALID(ctx);
1217
1218 return ctx->u.pw.remote_label;
1219 }
1220
1221 int dplane_ctx_get_pw_type(const struct zebra_dplane_ctx *ctx)
1222 {
1223 DPLANE_CTX_VALID(ctx);
1224
1225 return ctx->u.pw.type;
1226 }
1227
1228 int dplane_ctx_get_pw_af(const struct zebra_dplane_ctx *ctx)
1229 {
1230 DPLANE_CTX_VALID(ctx);
1231
1232 return ctx->u.pw.af;
1233 }
1234
1235 uint32_t dplane_ctx_get_pw_flags(const struct zebra_dplane_ctx *ctx)
1236 {
1237 DPLANE_CTX_VALID(ctx);
1238
1239 return ctx->u.pw.flags;
1240 }
1241
1242 int dplane_ctx_get_pw_status(const struct zebra_dplane_ctx *ctx)
1243 {
1244 DPLANE_CTX_VALID(ctx);
1245
1246 return ctx->u.pw.status;
1247 }
1248
1249 const union g_addr *dplane_ctx_get_pw_dest(
1250 const struct zebra_dplane_ctx *ctx)
1251 {
1252 DPLANE_CTX_VALID(ctx);
1253
1254 return &(ctx->u.pw.dest);
1255 }
1256
1257 const union pw_protocol_fields *dplane_ctx_get_pw_proto(
1258 const struct zebra_dplane_ctx *ctx)
1259 {
1260 DPLANE_CTX_VALID(ctx);
1261
1262 return &(ctx->u.pw.fields);
1263 }
1264
1265 const struct nexthop_group *
1266 dplane_ctx_get_pw_nhg(const struct zebra_dplane_ctx *ctx)
1267 {
1268 DPLANE_CTX_VALID(ctx);
1269
1270 return &(ctx->u.pw.nhg);
1271 }
1272
1273 /* Accessors for interface information */
1274 uint32_t dplane_ctx_get_intf_metric(const struct zebra_dplane_ctx *ctx)
1275 {
1276 DPLANE_CTX_VALID(ctx);
1277
1278 return ctx->u.intf.metric;
1279 }
1280
1281 /* Is interface addr p2p? */
1282 bool dplane_ctx_intf_is_connected(const struct zebra_dplane_ctx *ctx)
1283 {
1284 DPLANE_CTX_VALID(ctx);
1285
1286 return (ctx->u.intf.flags & DPLANE_INTF_CONNECTED);
1287 }
1288
1289 bool dplane_ctx_intf_is_secondary(const struct zebra_dplane_ctx *ctx)
1290 {
1291 DPLANE_CTX_VALID(ctx);
1292
1293 return (ctx->u.intf.flags & DPLANE_INTF_SECONDARY);
1294 }
1295
1296 bool dplane_ctx_intf_is_broadcast(const struct zebra_dplane_ctx *ctx)
1297 {
1298 DPLANE_CTX_VALID(ctx);
1299
1300 return (ctx->u.intf.flags & DPLANE_INTF_BROADCAST);
1301 }
1302
1303 const struct prefix *dplane_ctx_get_intf_addr(
1304 const struct zebra_dplane_ctx *ctx)
1305 {
1306 DPLANE_CTX_VALID(ctx);
1307
1308 return &(ctx->u.intf.prefix);
1309 }
1310
1311 bool dplane_ctx_intf_has_dest(const struct zebra_dplane_ctx *ctx)
1312 {
1313 DPLANE_CTX_VALID(ctx);
1314
1315 return (ctx->u.intf.flags & DPLANE_INTF_HAS_DEST);
1316 }
1317
1318 const struct prefix *dplane_ctx_get_intf_dest(
1319 const struct zebra_dplane_ctx *ctx)
1320 {
1321 DPLANE_CTX_VALID(ctx);
1322
1323 if (ctx->u.intf.flags & DPLANE_INTF_HAS_DEST)
1324 return &(ctx->u.intf.dest_prefix);
1325 else
1326 return NULL;
1327 }
1328
1329 bool dplane_ctx_intf_has_label(const struct zebra_dplane_ctx *ctx)
1330 {
1331 DPLANE_CTX_VALID(ctx);
1332
1333 return (ctx->u.intf.flags & DPLANE_INTF_HAS_LABEL);
1334 }
1335
1336 const char *dplane_ctx_get_intf_label(const struct zebra_dplane_ctx *ctx)
1337 {
1338 DPLANE_CTX_VALID(ctx);
1339
1340 return ctx->u.intf.label;
1341 }
1342
1343 /* Accessors for MAC information */
1344 vlanid_t dplane_ctx_mac_get_vlan(const struct zebra_dplane_ctx *ctx)
1345 {
1346 DPLANE_CTX_VALID(ctx);
1347 return ctx->u.macinfo.vid;
1348 }
1349
1350 bool dplane_ctx_mac_is_sticky(const struct zebra_dplane_ctx *ctx)
1351 {
1352 DPLANE_CTX_VALID(ctx);
1353 return ctx->u.macinfo.is_sticky;
1354 }
1355
1356 const struct ethaddr *dplane_ctx_mac_get_addr(
1357 const struct zebra_dplane_ctx *ctx)
1358 {
1359 DPLANE_CTX_VALID(ctx);
1360 return &(ctx->u.macinfo.mac);
1361 }
1362
1363 const struct in_addr *dplane_ctx_mac_get_vtep_ip(
1364 const struct zebra_dplane_ctx *ctx)
1365 {
1366 DPLANE_CTX_VALID(ctx);
1367 return &(ctx->u.macinfo.vtep_ip);
1368 }
1369
1370 ifindex_t dplane_ctx_mac_get_br_ifindex(const struct zebra_dplane_ctx *ctx)
1371 {
1372 DPLANE_CTX_VALID(ctx);
1373 return ctx->u.macinfo.br_ifindex;
1374 }
1375
1376 /* Accessors for neighbor information */
1377 const struct ipaddr *dplane_ctx_neigh_get_ipaddr(
1378 const struct zebra_dplane_ctx *ctx)
1379 {
1380 DPLANE_CTX_VALID(ctx);
1381 return &(ctx->u.neigh.ip_addr);
1382 }
1383
1384 const struct ethaddr *dplane_ctx_neigh_get_mac(
1385 const struct zebra_dplane_ctx *ctx)
1386 {
1387 DPLANE_CTX_VALID(ctx);
1388 return &(ctx->u.neigh.mac);
1389 }
1390
1391 uint32_t dplane_ctx_neigh_get_flags(const struct zebra_dplane_ctx *ctx)
1392 {
1393 DPLANE_CTX_VALID(ctx);
1394 return ctx->u.neigh.flags;
1395 }
1396
1397 uint16_t dplane_ctx_neigh_get_state(const struct zebra_dplane_ctx *ctx)
1398 {
1399 DPLANE_CTX_VALID(ctx);
1400 return ctx->u.neigh.state;
1401 }
1402
1403 /*
1404 * End of dplane context accessors
1405 */
1406
1407
1408 /*
1409 * Retrieve the limit on the number of pending, unprocessed updates.
1410 */
1411 uint32_t dplane_get_in_queue_limit(void)
1412 {
1413 return atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
1414 memory_order_relaxed);
1415 }
1416
1417 /*
1418 * Configure limit on the number of pending, queued updates.
1419 */
1420 void dplane_set_in_queue_limit(uint32_t limit, bool set)
1421 {
1422 /* Reset to default on 'unset' */
1423 if (!set)
1424 limit = DPLANE_DEFAULT_MAX_QUEUED;
1425
1426 atomic_store_explicit(&zdplane_info.dg_max_queued_updates, limit,
1427 memory_order_relaxed);
1428 }
1429
1430 /*
1431 * Retrieve the current queue depth of incoming, unprocessed updates
1432 */
1433 uint32_t dplane_get_in_queue_len(void)
1434 {
1435 return atomic_load_explicit(&zdplane_info.dg_routes_queued,
1436 memory_order_seq_cst);
1437 }
1438
1439 /*
1440 * Common dataplane context init with zebra namespace info.
1441 */
1442 static int dplane_ctx_ns_init(struct zebra_dplane_ctx *ctx,
1443 struct zebra_ns *zns,
1444 bool is_update)
1445 {
1446 dplane_info_from_zns(&(ctx->zd_ns_info), zns);
1447
1448 #if defined(HAVE_NETLINK)
1449 /* Increment message counter after copying to context struct - may need
1450 * two messages in some 'update' cases.
1451 */
1452 if (is_update)
1453 zns->netlink_dplane.seq += 2;
1454 else
1455 zns->netlink_dplane.seq++;
1456 #endif /* HAVE_NETLINK */
1457
1458 return AOK;
1459 }
1460
1461 /*
1462 * Initialize a context block for a route update from zebra data structs.
1463 */
1464 static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx,
1465 enum dplane_op_e op,
1466 struct route_node *rn,
1467 struct route_entry *re)
1468 {
1469 int ret = EINVAL;
1470 const struct route_table *table = NULL;
1471 const rib_table_info_t *info;
1472 const struct prefix *p, *src_p;
1473 struct zebra_ns *zns;
1474 struct zebra_vrf *zvrf;
1475 struct nexthop *nexthop;
1476
1477 if (!ctx || !rn || !re)
1478 goto done;
1479
1480 ctx->zd_op = op;
1481 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
1482
1483 ctx->u.rinfo.zd_type = re->type;
1484 ctx->u.rinfo.zd_old_type = re->type;
1485
1486 /* Prefixes: dest, and optional source */
1487 srcdest_rnode_prefixes(rn, &p, &src_p);
1488
1489 prefix_copy(&(ctx->u.rinfo.zd_dest), p);
1490
1491 if (src_p)
1492 prefix_copy(&(ctx->u.rinfo.zd_src), src_p);
1493 else
1494 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(ctx->u.rinfo.zd_src));
1495
1496 ctx->zd_table_id = re->table;
1497
1498 ctx->u.rinfo.zd_metric = re->metric;
1499 ctx->u.rinfo.zd_old_metric = re->metric;
1500 ctx->zd_vrf_id = re->vrf_id;
1501 ctx->u.rinfo.zd_mtu = re->mtu;
1502 ctx->u.rinfo.zd_nexthop_mtu = re->nexthop_mtu;
1503 ctx->u.rinfo.zd_instance = re->instance;
1504 ctx->u.rinfo.zd_tag = re->tag;
1505 ctx->u.rinfo.zd_old_tag = re->tag;
1506 ctx->u.rinfo.zd_distance = re->distance;
1507
1508 table = srcdest_rnode_table(rn);
1509 info = table->info;
1510
1511 ctx->u.rinfo.zd_afi = info->afi;
1512 ctx->u.rinfo.zd_safi = info->safi;
1513
1514 /* Copy nexthops; recursive info is included too */
1515 copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop),
1516 re->nhe->nhg->nexthop, NULL);
1517
1518 /* Ensure that the dplane's nexthops flags are clear. */
1519 for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop))
1520 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
1521
1522 /* Don't need some info when capturing a system notification */
1523 if (op == DPLANE_OP_SYS_ROUTE_ADD ||
1524 op == DPLANE_OP_SYS_ROUTE_DELETE) {
1525 ret = AOK;
1526 goto done;
1527 }
1528
1529 /* Extract ns info - can't use pointers to 'core' structs */
1530 zvrf = vrf_info_lookup(re->vrf_id);
1531 zns = zvrf->zns;
1532 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_ROUTE_UPDATE));
1533
1534 #ifdef HAVE_NETLINK
1535 if (re->nhe_id) {
1536 struct nhg_hash_entry *nhe =
1537 zebra_nhg_resolve(zebra_nhg_lookup_id(re->nhe_id));
1538
1539 ctx->u.rinfo.nhe.id = nhe->id;
1540 /*
1541 * Check if the nhe is installed/queued before doing anything
1542 * with this route.
1543 *
1544 * If its a delete we only use the prefix anyway, so this only
1545 * matters for INSTALL/UPDATE.
1546 */
1547 if (((op == DPLANE_OP_ROUTE_INSTALL)
1548 || (op == DPLANE_OP_ROUTE_UPDATE))
1549 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)
1550 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED)) {
1551 ret = ENOENT;
1552 goto done;
1553 }
1554 }
1555 #endif /* HAVE_NETLINK */
1556
1557 /* Trying out the sequence number idea, so we can try to detect
1558 * when a result is stale.
1559 */
1560 re->dplane_sequence = zebra_router_get_next_sequence();
1561 ctx->zd_seq = re->dplane_sequence;
1562
1563 ret = AOK;
1564
1565 done:
1566 return ret;
1567 }
1568
1569 /**
1570 * dplane_ctx_nexthop_init() - Initialize a context block for a nexthop update
1571 *
1572 * @ctx: Dataplane context to init
1573 * @op: Operation being performed
1574 * @nhe: Nexthop group hash entry
1575 *
1576 * Return: Result status
1577 */
1578 static int dplane_ctx_nexthop_init(struct zebra_dplane_ctx *ctx,
1579 enum dplane_op_e op,
1580 struct nhg_hash_entry *nhe)
1581 {
1582 struct zebra_vrf *zvrf = NULL;
1583 struct zebra_ns *zns = NULL;
1584
1585 int ret = EINVAL;
1586
1587 if (!ctx || !nhe)
1588 goto done;
1589
1590 ctx->zd_op = op;
1591 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
1592
1593 /* Copy over nhe info */
1594 ctx->u.rinfo.nhe.id = nhe->id;
1595 ctx->u.rinfo.nhe.afi = nhe->afi;
1596 ctx->u.rinfo.nhe.vrf_id = nhe->vrf_id;
1597 ctx->u.rinfo.nhe.type = nhe->type;
1598
1599 nexthop_group_copy(&(ctx->u.rinfo.nhe.ng), nhe->nhg);
1600
1601 /* If its a group, convert it to a grp array of ids */
1602 if (!zebra_nhg_depends_is_empty(nhe)
1603 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_RECURSIVE))
1604 ctx->u.rinfo.nhe.nh_grp_count = zebra_nhg_nhe2grp(
1605 ctx->u.rinfo.nhe.nh_grp, nhe, MULTIPATH_NUM);
1606
1607 zvrf = vrf_info_lookup(nhe->vrf_id);
1608
1609 /*
1610 * Fallback to default namespace if the vrf got ripped out from under
1611 * us.
1612 */
1613 zns = zvrf ? zvrf->zns : zebra_ns_lookup(NS_DEFAULT);
1614
1615 /*
1616 * TODO: Might not need to mark this as an update, since
1617 * it probably won't require two messages
1618 */
1619 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_NH_UPDATE));
1620
1621 ret = AOK;
1622
1623 done:
1624 return ret;
1625 }
1626
1627 /*
1628 * Capture information for an LSP update in a dplane context.
1629 */
1630 static int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx,
1631 enum dplane_op_e op,
1632 zebra_lsp_t *lsp)
1633 {
1634 int ret = AOK;
1635 zebra_nhlfe_t *nhlfe, *new_nhlfe;
1636
1637 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1638 zlog_debug("init dplane ctx %s: in-label %u ecmp# %d",
1639 dplane_op2str(op), lsp->ile.in_label,
1640 lsp->num_ecmp);
1641
1642 ctx->zd_op = op;
1643 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
1644
1645 /* Capture namespace info */
1646 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
1647 (op == DPLANE_OP_LSP_UPDATE));
1648
1649 memset(&ctx->u.lsp, 0, sizeof(ctx->u.lsp));
1650
1651 ctx->u.lsp.ile = lsp->ile;
1652 ctx->u.lsp.addr_family = lsp->addr_family;
1653 ctx->u.lsp.num_ecmp = lsp->num_ecmp;
1654 ctx->u.lsp.flags = lsp->flags;
1655
1656 /* Copy source LSP's nhlfes, and capture 'best' nhlfe */
1657 for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
1658 /* Not sure if this is meaningful... */
1659 if (nhlfe->nexthop == NULL)
1660 continue;
1661
1662 new_nhlfe =
1663 zebra_mpls_lsp_add_nhlfe(
1664 &(ctx->u.lsp),
1665 nhlfe->type,
1666 nhlfe->nexthop->type,
1667 &(nhlfe->nexthop->gate),
1668 nhlfe->nexthop->ifindex,
1669 nhlfe->nexthop->nh_label->num_labels,
1670 nhlfe->nexthop->nh_label->label);
1671
1672 if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
1673 ret = ENOMEM;
1674 break;
1675 }
1676
1677 /* Need to copy flags too */
1678 new_nhlfe->flags = nhlfe->flags;
1679 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
1680
1681 if (nhlfe == lsp->best_nhlfe)
1682 ctx->u.lsp.best_nhlfe = new_nhlfe;
1683 }
1684
1685 /* On error the ctx will be cleaned-up, so we don't need to
1686 * deal with any allocated nhlfe or nexthop structs here.
1687 */
1688
1689 return ret;
1690 }
1691
1692 /*
1693 * Capture information for an LSP update in a dplane context.
1694 */
1695 static int dplane_ctx_pw_init(struct zebra_dplane_ctx *ctx,
1696 enum dplane_op_e op,
1697 struct zebra_pw *pw)
1698 {
1699 struct prefix p;
1700 afi_t afi;
1701 struct route_table *table;
1702 struct route_node *rn;
1703 struct route_entry *re;
1704
1705 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1706 zlog_debug("init dplane ctx %s: pw '%s', loc %u, rem %u",
1707 dplane_op2str(op), pw->ifname, pw->local_label,
1708 pw->remote_label);
1709
1710 ctx->zd_op = op;
1711 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
1712
1713 /* Capture namespace info: no netlink support as of 12/18,
1714 * but just in case...
1715 */
1716 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
1717
1718 memset(&ctx->u.pw, 0, sizeof(ctx->u.pw));
1719
1720 /* This name appears to be c-string, so we use string copy. */
1721 strlcpy(ctx->zd_ifname, pw->ifname, sizeof(ctx->zd_ifname));
1722
1723 ctx->zd_vrf_id = pw->vrf_id;
1724 ctx->zd_ifindex = pw->ifindex;
1725 ctx->u.pw.type = pw->type;
1726 ctx->u.pw.af = pw->af;
1727 ctx->u.pw.local_label = pw->local_label;
1728 ctx->u.pw.remote_label = pw->remote_label;
1729 ctx->u.pw.flags = pw->flags;
1730
1731 ctx->u.pw.dest = pw->nexthop;
1732
1733 ctx->u.pw.fields = pw->data;
1734
1735 /* Capture nexthop info for the pw destination. We need to look
1736 * up and use zebra datastructs, but we're running in the zebra
1737 * pthread here so that should be ok.
1738 */
1739 memcpy(&p.u, &pw->nexthop, sizeof(pw->nexthop));
1740 p.family = pw->af;
1741 p.prefixlen = ((pw->af == AF_INET) ?
1742 IPV4_MAX_PREFIXLEN : IPV6_MAX_PREFIXLEN);
1743
1744 afi = (pw->af == AF_INET) ? AFI_IP : AFI_IP6;
1745 table = zebra_vrf_table(afi, SAFI_UNICAST, pw->vrf_id);
1746 if (table) {
1747 rn = route_node_match(table, &p);
1748 if (rn) {
1749 RNODE_FOREACH_RE(rn, re) {
1750 if (CHECK_FLAG(re->flags, ZEBRA_FLAG_SELECTED))
1751 break;
1752 }
1753
1754 if (re)
1755 copy_nexthops(&(ctx->u.pw.nhg.nexthop),
1756 re->nhe->nhg->nexthop, NULL);
1757
1758 route_unlock_node(rn);
1759 }
1760 }
1761
1762 return AOK;
1763 }
1764
1765 /*
1766 * Enqueue a new update,
1767 * and ensure an event is active for the dataplane pthread.
1768 */
1769 static int dplane_update_enqueue(struct zebra_dplane_ctx *ctx)
1770 {
1771 int ret = EINVAL;
1772 uint32_t high, curr;
1773
1774 /* Enqueue for processing by the dataplane pthread */
1775 DPLANE_LOCK();
1776 {
1777 TAILQ_INSERT_TAIL(&zdplane_info.dg_update_ctx_q, ctx,
1778 zd_q_entries);
1779 }
1780 DPLANE_UNLOCK();
1781
1782 curr = atomic_add_fetch_explicit(
1783 #ifdef __clang__
1784 /* TODO -- issue with the clang atomic/intrinsics currently;
1785 * casting away the 'Atomic'-ness of the variable works.
1786 */
1787 (uint32_t *)&(zdplane_info.dg_routes_queued),
1788 #else
1789 &(zdplane_info.dg_routes_queued),
1790 #endif
1791 1, memory_order_seq_cst);
1792
1793 /* Maybe update high-water counter also */
1794 high = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
1795 memory_order_seq_cst);
1796 while (high < curr) {
1797 if (atomic_compare_exchange_weak_explicit(
1798 &zdplane_info.dg_routes_queued_max,
1799 &high, curr,
1800 memory_order_seq_cst,
1801 memory_order_seq_cst))
1802 break;
1803 }
1804
1805 /* Ensure that an event for the dataplane thread is active */
1806 ret = dplane_provider_work_ready();
1807
1808 return ret;
1809 }
1810
1811 /*
1812 * Utility that prepares a route update and enqueues it for processing
1813 */
1814 static enum zebra_dplane_result
1815 dplane_route_update_internal(struct route_node *rn,
1816 struct route_entry *re,
1817 struct route_entry *old_re,
1818 enum dplane_op_e op)
1819 {
1820 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1821 int ret = EINVAL;
1822 struct zebra_dplane_ctx *ctx = NULL;
1823
1824 /* Obtain context block */
1825 ctx = dplane_ctx_alloc();
1826
1827 /* Init context with info from zebra data structs */
1828 ret = dplane_ctx_route_init(ctx, op, rn, re);
1829 if (ret == AOK) {
1830 /* Capture some extra info for update case
1831 * where there's a different 'old' route.
1832 */
1833 if ((op == DPLANE_OP_ROUTE_UPDATE) &&
1834 old_re && (old_re != re)) {
1835 ctx->zd_is_update = true;
1836
1837 old_re->dplane_sequence =
1838 zebra_router_get_next_sequence();
1839 ctx->zd_old_seq = old_re->dplane_sequence;
1840
1841 ctx->u.rinfo.zd_old_tag = old_re->tag;
1842 ctx->u.rinfo.zd_old_type = old_re->type;
1843 ctx->u.rinfo.zd_old_instance = old_re->instance;
1844 ctx->u.rinfo.zd_old_distance = old_re->distance;
1845 ctx->u.rinfo.zd_old_metric = old_re->metric;
1846
1847 #ifndef HAVE_NETLINK
1848 /* For bsd, capture previous re's nexthops too, sigh.
1849 * We'll need these to do per-nexthop deletes.
1850 */
1851 copy_nexthops(&(ctx->u.rinfo.zd_old_ng.nexthop),
1852 old_re->nhe->nhg->nexthop, NULL);
1853 #endif /* !HAVE_NETLINK */
1854 }
1855
1856 /* Enqueue context for processing */
1857 ret = dplane_update_enqueue(ctx);
1858 }
1859
1860 /* Update counter */
1861 atomic_fetch_add_explicit(&zdplane_info.dg_routes_in, 1,
1862 memory_order_relaxed);
1863
1864 if (ret == AOK)
1865 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1866 else {
1867 if (ret == ENOENT)
1868 result = ZEBRA_DPLANE_REQUEST_SUCCESS;
1869 else
1870 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors,
1871 1, memory_order_relaxed);
1872 if (ctx)
1873 dplane_ctx_free(&ctx);
1874 }
1875
1876 return result;
1877 }
1878
1879 /**
1880 * dplane_nexthop_update_internal() - Helper for enqueuing nexthop changes
1881 *
1882 * @nhe: Nexthop group hash entry where the change occured
1883 * @op: The operation to be enqued
1884 *
1885 * Return: Result of the change
1886 */
1887 static enum zebra_dplane_result
1888 dplane_nexthop_update_internal(struct nhg_hash_entry *nhe, enum dplane_op_e op)
1889 {
1890 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1891 int ret = EINVAL;
1892 struct zebra_dplane_ctx *ctx = NULL;
1893
1894 /* Obtain context block */
1895 ctx = dplane_ctx_alloc();
1896 if (!ctx) {
1897 ret = ENOMEM;
1898 goto done;
1899 }
1900
1901 ret = dplane_ctx_nexthop_init(ctx, op, nhe);
1902 if (ret == AOK)
1903 ret = dplane_update_enqueue(ctx);
1904
1905 done:
1906 /* Update counter */
1907 atomic_fetch_add_explicit(&zdplane_info.dg_nexthops_in, 1,
1908 memory_order_relaxed);
1909
1910 if (ret == AOK)
1911 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1912 else {
1913 atomic_fetch_add_explicit(&zdplane_info.dg_nexthop_errors, 1,
1914 memory_order_relaxed);
1915 if (ctx)
1916 dplane_ctx_free(&ctx);
1917 }
1918
1919 return result;
1920 }
1921
1922 /*
1923 * Enqueue a route 'add' for the dataplane.
1924 */
1925 enum zebra_dplane_result dplane_route_add(struct route_node *rn,
1926 struct route_entry *re)
1927 {
1928 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1929
1930 if (rn == NULL || re == NULL)
1931 goto done;
1932
1933 ret = dplane_route_update_internal(rn, re, NULL,
1934 DPLANE_OP_ROUTE_INSTALL);
1935
1936 done:
1937 return ret;
1938 }
1939
1940 /*
1941 * Enqueue a route update for the dataplane.
1942 */
1943 enum zebra_dplane_result dplane_route_update(struct route_node *rn,
1944 struct route_entry *re,
1945 struct route_entry *old_re)
1946 {
1947 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1948
1949 if (rn == NULL || re == NULL)
1950 goto done;
1951
1952 ret = dplane_route_update_internal(rn, re, old_re,
1953 DPLANE_OP_ROUTE_UPDATE);
1954 done:
1955 return ret;
1956 }
1957
1958 /*
1959 * Enqueue a route removal for the dataplane.
1960 */
1961 enum zebra_dplane_result dplane_route_delete(struct route_node *rn,
1962 struct route_entry *re)
1963 {
1964 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1965
1966 if (rn == NULL || re == NULL)
1967 goto done;
1968
1969 ret = dplane_route_update_internal(rn, re, NULL,
1970 DPLANE_OP_ROUTE_DELETE);
1971
1972 done:
1973 return ret;
1974 }
1975
1976 /*
1977 * Notify the dplane when system/connected routes change.
1978 */
1979 enum zebra_dplane_result dplane_sys_route_add(struct route_node *rn,
1980 struct route_entry *re)
1981 {
1982 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1983
1984 /* Ignore this event unless a provider plugin has requested it. */
1985 if (!zdplane_info.dg_sys_route_notifs) {
1986 ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
1987 goto done;
1988 }
1989
1990 if (rn == NULL || re == NULL)
1991 goto done;
1992
1993 ret = dplane_route_update_internal(rn, re, NULL,
1994 DPLANE_OP_SYS_ROUTE_ADD);
1995
1996 done:
1997 return ret;
1998 }
1999
2000 /*
2001 * Notify the dplane when system/connected routes are deleted.
2002 */
2003 enum zebra_dplane_result dplane_sys_route_del(struct route_node *rn,
2004 struct route_entry *re)
2005 {
2006 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
2007
2008 /* Ignore this event unless a provider plugin has requested it. */
2009 if (!zdplane_info.dg_sys_route_notifs) {
2010 ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
2011 goto done;
2012 }
2013
2014 if (rn == NULL || re == NULL)
2015 goto done;
2016
2017 ret = dplane_route_update_internal(rn, re, NULL,
2018 DPLANE_OP_SYS_ROUTE_DELETE);
2019
2020 done:
2021 return ret;
2022 }
2023
2024 /*
2025 * Update from an async notification, to bring other fibs up-to-date.
2026 */
2027 enum zebra_dplane_result
2028 dplane_route_notif_update(struct route_node *rn,
2029 struct route_entry *re,
2030 enum dplane_op_e op,
2031 struct zebra_dplane_ctx *ctx)
2032 {
2033 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
2034 struct zebra_dplane_ctx *new_ctx = NULL;
2035 struct nexthop *nexthop;
2036
2037 if (rn == NULL || re == NULL)
2038 goto done;
2039
2040 new_ctx = dplane_ctx_alloc();
2041 if (new_ctx == NULL)
2042 goto done;
2043
2044 /* Init context with info from zebra data structs */
2045 dplane_ctx_route_init(new_ctx, op, rn, re);
2046
2047 /* For add/update, need to adjust the nexthops so that we match
2048 * the notification state, which may not be the route-entry/RIB
2049 * state.
2050 */
2051 if (op == DPLANE_OP_ROUTE_UPDATE ||
2052 op == DPLANE_OP_ROUTE_INSTALL) {
2053
2054 nexthops_free(new_ctx->u.rinfo.zd_ng.nexthop);
2055 new_ctx->u.rinfo.zd_ng.nexthop = NULL;
2056
2057 copy_nexthops(&(new_ctx->u.rinfo.zd_ng.nexthop),
2058 (rib_active_nhg(re))->nexthop, NULL);
2059
2060 for (ALL_NEXTHOPS(new_ctx->u.rinfo.zd_ng, nexthop))
2061 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
2062
2063 }
2064
2065 /* Capture info about the source of the notification, in 'ctx' */
2066 dplane_ctx_set_notif_provider(new_ctx,
2067 dplane_ctx_get_notif_provider(ctx));
2068
2069 dplane_update_enqueue(new_ctx);
2070
2071 ret = ZEBRA_DPLANE_REQUEST_QUEUED;
2072
2073 done:
2074 return ret;
2075 }
2076
2077 /*
2078 * Enqueue a nexthop add for the dataplane.
2079 */
2080 enum zebra_dplane_result dplane_nexthop_add(struct nhg_hash_entry *nhe)
2081 {
2082 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
2083
2084 if (nhe)
2085 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_INSTALL);
2086 return ret;
2087 }
2088
2089 /*
2090 * Enqueue a nexthop update for the dataplane.
2091 *
2092 * Might not need this func since zebra's nexthop objects should be immutable?
2093 */
2094 enum zebra_dplane_result dplane_nexthop_update(struct nhg_hash_entry *nhe)
2095 {
2096 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
2097
2098 if (nhe)
2099 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_UPDATE);
2100 return ret;
2101 }
2102
2103 /*
2104 * Enqueue a nexthop removal for the dataplane.
2105 */
2106 enum zebra_dplane_result dplane_nexthop_delete(struct nhg_hash_entry *nhe)
2107 {
2108 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
2109
2110 if (nhe)
2111 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_DELETE);
2112
2113 return ret;
2114 }
2115
2116 /*
2117 * Enqueue LSP add for the dataplane.
2118 */
2119 enum zebra_dplane_result dplane_lsp_add(zebra_lsp_t *lsp)
2120 {
2121 enum zebra_dplane_result ret =
2122 lsp_update_internal(lsp, DPLANE_OP_LSP_INSTALL);
2123
2124 return ret;
2125 }
2126
2127 /*
2128 * Enqueue LSP update for the dataplane.
2129 */
2130 enum zebra_dplane_result dplane_lsp_update(zebra_lsp_t *lsp)
2131 {
2132 enum zebra_dplane_result ret =
2133 lsp_update_internal(lsp, DPLANE_OP_LSP_UPDATE);
2134
2135 return ret;
2136 }
2137
2138 /*
2139 * Enqueue LSP delete for the dataplane.
2140 */
2141 enum zebra_dplane_result dplane_lsp_delete(zebra_lsp_t *lsp)
2142 {
2143 enum zebra_dplane_result ret =
2144 lsp_update_internal(lsp, DPLANE_OP_LSP_DELETE);
2145
2146 return ret;
2147 }
2148
2149 /* Update or un-install resulting from an async notification */
2150 enum zebra_dplane_result
2151 dplane_lsp_notif_update(zebra_lsp_t *lsp,
2152 enum dplane_op_e op,
2153 struct zebra_dplane_ctx *notif_ctx)
2154 {
2155 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
2156 int ret = EINVAL;
2157 struct zebra_dplane_ctx *ctx = NULL;
2158
2159 /* Obtain context block */
2160 ctx = dplane_ctx_alloc();
2161 if (ctx == NULL) {
2162 ret = ENOMEM;
2163 goto done;
2164 }
2165
2166 ret = dplane_ctx_lsp_init(ctx, op, lsp);
2167 if (ret != AOK)
2168 goto done;
2169
2170 /* Capture info about the source of the notification */
2171 dplane_ctx_set_notif_provider(
2172 ctx,
2173 dplane_ctx_get_notif_provider(notif_ctx));
2174
2175 ret = dplane_update_enqueue(ctx);
2176
2177 done:
2178 /* Update counter */
2179 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
2180 memory_order_relaxed);
2181
2182 if (ret == AOK)
2183 result = ZEBRA_DPLANE_REQUEST_QUEUED;
2184 else {
2185 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
2186 memory_order_relaxed);
2187 if (ctx)
2188 dplane_ctx_free(&ctx);
2189 }
2190 return result;
2191 }
2192
2193 /*
2194 * Enqueue pseudowire install for the dataplane.
2195 */
2196 enum zebra_dplane_result dplane_pw_install(struct zebra_pw *pw)
2197 {
2198 return pw_update_internal(pw, DPLANE_OP_PW_INSTALL);
2199 }
2200
2201 /*
2202 * Enqueue pseudowire un-install for the dataplane.
2203 */
2204 enum zebra_dplane_result dplane_pw_uninstall(struct zebra_pw *pw)
2205 {
2206 return pw_update_internal(pw, DPLANE_OP_PW_UNINSTALL);
2207 }
2208
2209 /*
2210 * Common internal LSP update utility
2211 */
2212 static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
2213 enum dplane_op_e op)
2214 {
2215 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
2216 int ret = EINVAL;
2217 struct zebra_dplane_ctx *ctx = NULL;
2218
2219 /* Obtain context block */
2220 ctx = dplane_ctx_alloc();
2221
2222 ret = dplane_ctx_lsp_init(ctx, op, lsp);
2223 if (ret != AOK)
2224 goto done;
2225
2226 ret = dplane_update_enqueue(ctx);
2227
2228 done:
2229 /* Update counter */
2230 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
2231 memory_order_relaxed);
2232
2233 if (ret == AOK)
2234 result = ZEBRA_DPLANE_REQUEST_QUEUED;
2235 else {
2236 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
2237 memory_order_relaxed);
2238 dplane_ctx_free(&ctx);
2239 }
2240
2241 return result;
2242 }
2243
2244 /*
2245 * Internal, common handler for pseudowire updates.
2246 */
2247 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
2248 enum dplane_op_e op)
2249 {
2250 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
2251 int ret;
2252 struct zebra_dplane_ctx *ctx = NULL;
2253
2254 ctx = dplane_ctx_alloc();
2255
2256 ret = dplane_ctx_pw_init(ctx, op, pw);
2257 if (ret != AOK)
2258 goto done;
2259
2260 ret = dplane_update_enqueue(ctx);
2261
2262 done:
2263 /* Update counter */
2264 atomic_fetch_add_explicit(&zdplane_info.dg_pws_in, 1,
2265 memory_order_relaxed);
2266
2267 if (ret == AOK)
2268 result = ZEBRA_DPLANE_REQUEST_QUEUED;
2269 else {
2270 atomic_fetch_add_explicit(&zdplane_info.dg_pw_errors, 1,
2271 memory_order_relaxed);
2272 dplane_ctx_free(&ctx);
2273 }
2274
2275 return result;
2276 }
2277
2278 /*
2279 * Enqueue interface address add for the dataplane.
2280 */
2281 enum zebra_dplane_result dplane_intf_addr_set(const struct interface *ifp,
2282 const struct connected *ifc)
2283 {
2284 #if !defined(HAVE_NETLINK) && defined(HAVE_STRUCT_IFALIASREQ)
2285 /* Extra checks for this OS path. */
2286
2287 /* Don't configure PtP addresses on broadcast ifs or reverse */
2288 if (!(ifp->flags & IFF_POINTOPOINT) != !CONNECTED_PEER(ifc)) {
2289 if (IS_ZEBRA_DEBUG_KERNEL || IS_ZEBRA_DEBUG_DPLANE)
2290 zlog_debug("Failed to set intf addr: mismatch p2p and connected");
2291
2292 return ZEBRA_DPLANE_REQUEST_FAILURE;
2293 }
2294
2295 /* Ensure that no existing installed v4 route conflicts with
2296 * the new interface prefix. This check must be done in the
2297 * zebra pthread context, and any route delete (if needed)
2298 * is enqueued before the interface address programming attempt.
2299 */
2300 if (ifc->address->family == AF_INET) {
2301 struct prefix_ipv4 *p;
2302
2303 p = (struct prefix_ipv4 *)ifc->address;
2304 rib_lookup_and_pushup(p, ifp->vrf_id);
2305 }
2306 #endif
2307
2308 return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_INSTALL);
2309 }
2310
2311 /*
2312 * Enqueue interface address remove/uninstall for the dataplane.
2313 */
2314 enum zebra_dplane_result dplane_intf_addr_unset(const struct interface *ifp,
2315 const struct connected *ifc)
2316 {
2317 return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_UNINSTALL);
2318 }
2319
2320 static enum zebra_dplane_result intf_addr_update_internal(
2321 const struct interface *ifp, const struct connected *ifc,
2322 enum dplane_op_e op)
2323 {
2324 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
2325 int ret = EINVAL;
2326 struct zebra_dplane_ctx *ctx = NULL;
2327 struct zebra_ns *zns;
2328
2329 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2330 char addr_str[PREFIX_STRLEN];
2331
2332 prefix2str(ifc->address, addr_str, sizeof(addr_str));
2333
2334 zlog_debug("init intf ctx %s: idx %d, addr %u:%s",
2335 dplane_op2str(op), ifp->ifindex, ifp->vrf_id,
2336 addr_str);
2337 }
2338
2339 ctx = dplane_ctx_alloc();
2340
2341 ctx->zd_op = op;
2342 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2343 ctx->zd_vrf_id = ifp->vrf_id;
2344
2345 zns = zebra_ns_lookup(ifp->vrf_id);
2346 dplane_ctx_ns_init(ctx, zns, false);
2347
2348 /* Init the interface-addr-specific area */
2349 memset(&ctx->u.intf, 0, sizeof(ctx->u.intf));
2350
2351 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
2352 ctx->zd_ifindex = ifp->ifindex;
2353 ctx->u.intf.prefix = *(ifc->address);
2354
2355 if (if_is_broadcast(ifp))
2356 ctx->u.intf.flags |= DPLANE_INTF_BROADCAST;
2357
2358 if (CONNECTED_PEER(ifc)) {
2359 ctx->u.intf.dest_prefix = *(ifc->destination);
2360 ctx->u.intf.flags |=
2361 (DPLANE_INTF_CONNECTED | DPLANE_INTF_HAS_DEST);
2362 }
2363
2364 if (CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY))
2365 ctx->u.intf.flags |= DPLANE_INTF_SECONDARY;
2366
2367 if (ifc->label) {
2368 size_t len;
2369
2370 ctx->u.intf.flags |= DPLANE_INTF_HAS_LABEL;
2371
2372 /* Use embedded buffer if it's adequate; else allocate. */
2373 len = strlen(ifc->label);
2374
2375 if (len < sizeof(ctx->u.intf.label_buf)) {
2376 strlcpy(ctx->u.intf.label_buf, ifc->label,
2377 sizeof(ctx->u.intf.label_buf));
2378 ctx->u.intf.label = ctx->u.intf.label_buf;
2379 } else {
2380 ctx->u.intf.label = strdup(ifc->label);
2381 }
2382 }
2383
2384 ret = dplane_update_enqueue(ctx);
2385
2386 /* Increment counter */
2387 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addrs_in, 1,
2388 memory_order_relaxed);
2389
2390 if (ret == AOK)
2391 result = ZEBRA_DPLANE_REQUEST_QUEUED;
2392 else {
2393 /* Error counter */
2394 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addr_errors,
2395 1, memory_order_relaxed);
2396 dplane_ctx_free(&ctx);
2397 }
2398
2399 return result;
2400 }
2401
2402 /*
2403 * Enqueue vxlan/evpn mac add (or update).
2404 */
2405 enum zebra_dplane_result dplane_mac_add(const struct interface *ifp,
2406 const struct interface *bridge_ifp,
2407 vlanid_t vid,
2408 const struct ethaddr *mac,
2409 struct in_addr vtep_ip,
2410 bool sticky)
2411 {
2412 enum zebra_dplane_result result;
2413
2414 /* Use common helper api */
2415 result = mac_update_internal(DPLANE_OP_MAC_INSTALL, ifp, bridge_ifp,
2416 vid, mac, vtep_ip, sticky);
2417 return result;
2418 }
2419
2420 /*
2421 * Enqueue vxlan/evpn mac delete.
2422 */
2423 enum zebra_dplane_result dplane_mac_del(const struct interface *ifp,
2424 const struct interface *bridge_ifp,
2425 vlanid_t vid,
2426 const struct ethaddr *mac,
2427 struct in_addr vtep_ip)
2428 {
2429 enum zebra_dplane_result result;
2430
2431 /* Use common helper api */
2432 result = mac_update_internal(DPLANE_OP_MAC_DELETE, ifp, bridge_ifp,
2433 vid, mac, vtep_ip, false);
2434 return result;
2435 }
2436
2437 /*
2438 * Common helper api for MAC address/vxlan updates
2439 */
2440 static enum zebra_dplane_result
2441 mac_update_internal(enum dplane_op_e op,
2442 const struct interface *ifp,
2443 const struct interface *br_ifp,
2444 vlanid_t vid,
2445 const struct ethaddr *mac,
2446 struct in_addr vtep_ip,
2447 bool sticky)
2448 {
2449 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
2450 int ret;
2451 struct zebra_dplane_ctx *ctx = NULL;
2452 struct zebra_ns *zns;
2453
2454 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2455 char buf1[ETHER_ADDR_STRLEN], buf2[PREFIX_STRLEN];
2456
2457 zlog_debug("init mac ctx %s: mac %s, ifp %s, vtep %s",
2458 dplane_op2str(op),
2459 prefix_mac2str(mac, buf1, sizeof(buf1)),
2460 ifp->name,
2461 inet_ntop(AF_INET, &vtep_ip, buf2, sizeof(buf2)));
2462 }
2463
2464 ctx = dplane_ctx_alloc();
2465
2466 ctx->zd_op = op;
2467 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2468 ctx->zd_vrf_id = ifp->vrf_id;
2469
2470 zns = zebra_ns_lookup(ifp->vrf_id);
2471 dplane_ctx_ns_init(ctx, zns, false);
2472
2473 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
2474 ctx->zd_ifindex = ifp->ifindex;
2475
2476 /* Init the mac-specific data area */
2477 memset(&ctx->u.macinfo, 0, sizeof(ctx->u.macinfo));
2478
2479 ctx->u.macinfo.br_ifindex = br_ifp->ifindex;
2480 ctx->u.macinfo.vtep_ip = vtep_ip;
2481 ctx->u.macinfo.mac = *mac;
2482 ctx->u.macinfo.vid = vid;
2483 ctx->u.macinfo.is_sticky = sticky;
2484
2485 /* Enqueue for processing on the dplane pthread */
2486 ret = dplane_update_enqueue(ctx);
2487
2488 /* Increment counter */
2489 atomic_fetch_add_explicit(&zdplane_info.dg_macs_in, 1,
2490 memory_order_relaxed);
2491
2492 if (ret == AOK)
2493 result = ZEBRA_DPLANE_REQUEST_QUEUED;
2494 else {
2495 /* Error counter */
2496 atomic_fetch_add_explicit(&zdplane_info.dg_mac_errors, 1,
2497 memory_order_relaxed);
2498 dplane_ctx_free(&ctx);
2499 }
2500
2501 return result;
2502 }
2503
2504 /*
2505 * Enqueue evpn neighbor add for the dataplane.
2506 */
2507 enum zebra_dplane_result dplane_neigh_add(const struct interface *ifp,
2508 const struct ipaddr *ip,
2509 const struct ethaddr *mac,
2510 uint32_t flags)
2511 {
2512 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
2513
2514 result = neigh_update_internal(DPLANE_OP_NEIGH_INSTALL,
2515 ifp, mac, ip, flags, DPLANE_NUD_NOARP);
2516
2517 return result;
2518 }
2519
2520 /*
2521 * Enqueue evpn neighbor update for the dataplane.
2522 */
2523 enum zebra_dplane_result dplane_neigh_update(const struct interface *ifp,
2524 const struct ipaddr *ip,
2525 const struct ethaddr *mac)
2526 {
2527 enum zebra_dplane_result result;
2528
2529 result = neigh_update_internal(DPLANE_OP_NEIGH_UPDATE,
2530 ifp, mac, ip, 0, DPLANE_NUD_PROBE);
2531
2532 return result;
2533 }
2534
2535 /*
2536 * Enqueue evpn neighbor delete for the dataplane.
2537 */
2538 enum zebra_dplane_result dplane_neigh_delete(const struct interface *ifp,
2539 const struct ipaddr *ip)
2540 {
2541 enum zebra_dplane_result result;
2542
2543 result = neigh_update_internal(DPLANE_OP_NEIGH_DELETE,
2544 ifp, NULL, ip, 0, 0);
2545
2546 return result;
2547 }
2548
2549 /*
2550 * Enqueue evpn VTEP add for the dataplane.
2551 */
2552 enum zebra_dplane_result dplane_vtep_add(const struct interface *ifp,
2553 const struct in_addr *ip,
2554 vni_t vni)
2555 {
2556 enum zebra_dplane_result result;
2557 struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
2558 struct ipaddr addr;
2559
2560 if (IS_ZEBRA_DEBUG_VXLAN)
2561 zlog_debug("Install %s into flood list for VNI %u intf %s(%u)",
2562 inet_ntoa(*ip), vni, ifp->name, ifp->ifindex);
2563
2564 SET_IPADDR_V4(&addr);
2565 addr.ipaddr_v4 = *ip;
2566
2567 result = neigh_update_internal(DPLANE_OP_VTEP_ADD,
2568 ifp, &mac, &addr, 0, 0);
2569
2570 return result;
2571 }
2572
2573 /*
2574 * Enqueue evpn VTEP add for the dataplane.
2575 */
2576 enum zebra_dplane_result dplane_vtep_delete(const struct interface *ifp,
2577 const struct in_addr *ip,
2578 vni_t vni)
2579 {
2580 enum zebra_dplane_result result;
2581 struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
2582 struct ipaddr addr;
2583
2584 if (IS_ZEBRA_DEBUG_VXLAN)
2585 zlog_debug(
2586 "Uninstall %s from flood list for VNI %u intf %s(%u)",
2587 inet_ntoa(*ip), vni, ifp->name, ifp->ifindex);
2588
2589 SET_IPADDR_V4(&addr);
2590 addr.ipaddr_v4 = *ip;
2591
2592 result = neigh_update_internal(DPLANE_OP_VTEP_DELETE,
2593 ifp, &mac, &addr, 0, 0);
2594
2595 return result;
2596 }
2597
2598 /*
2599 * Common helper api for evpn neighbor updates
2600 */
2601 static enum zebra_dplane_result
2602 neigh_update_internal(enum dplane_op_e op,
2603 const struct interface *ifp,
2604 const struct ethaddr *mac,
2605 const struct ipaddr *ip,
2606 uint32_t flags, uint16_t state)
2607 {
2608 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
2609 int ret;
2610 struct zebra_dplane_ctx *ctx = NULL;
2611 struct zebra_ns *zns;
2612
2613 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2614 char buf1[ETHER_ADDR_STRLEN], buf2[PREFIX_STRLEN];
2615
2616 zlog_debug("init neigh ctx %s: ifp %s, mac %s, ip %s",
2617 dplane_op2str(op),
2618 prefix_mac2str(mac, buf1, sizeof(buf1)),
2619 ifp->name,
2620 ipaddr2str(ip, buf2, sizeof(buf2)));
2621 }
2622
2623 ctx = dplane_ctx_alloc();
2624
2625 ctx->zd_op = op;
2626 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2627 ctx->zd_vrf_id = ifp->vrf_id;
2628
2629 zns = zebra_ns_lookup(ifp->vrf_id);
2630 dplane_ctx_ns_init(ctx, zns, false);
2631
2632 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
2633 ctx->zd_ifindex = ifp->ifindex;
2634
2635 /* Init the neighbor-specific data area */
2636 memset(&ctx->u.neigh, 0, sizeof(ctx->u.neigh));
2637
2638 ctx->u.neigh.ip_addr = *ip;
2639 if (mac)
2640 ctx->u.neigh.mac = *mac;
2641 ctx->u.neigh.flags = flags;
2642 ctx->u.neigh.state = state;
2643
2644 /* Enqueue for processing on the dplane pthread */
2645 ret = dplane_update_enqueue(ctx);
2646
2647 /* Increment counter */
2648 atomic_fetch_add_explicit(&zdplane_info.dg_neighs_in, 1,
2649 memory_order_relaxed);
2650
2651 if (ret == AOK)
2652 result = ZEBRA_DPLANE_REQUEST_QUEUED;
2653 else {
2654 /* Error counter */
2655 atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors, 1,
2656 memory_order_relaxed);
2657 dplane_ctx_free(&ctx);
2658 }
2659
2660 return result;
2661 }
2662
2663 /*
2664 * Handler for 'show dplane'
2665 */
2666 int dplane_show_helper(struct vty *vty, bool detailed)
2667 {
2668 uint64_t queued, queue_max, limit, errs, incoming, yields,
2669 other_errs;
2670
2671 /* Using atomics because counters are being changed in different
2672 * pthread contexts.
2673 */
2674 incoming = atomic_load_explicit(&zdplane_info.dg_routes_in,
2675 memory_order_relaxed);
2676 limit = atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
2677 memory_order_relaxed);
2678 queued = atomic_load_explicit(&zdplane_info.dg_routes_queued,
2679 memory_order_relaxed);
2680 queue_max = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
2681 memory_order_relaxed);
2682 errs = atomic_load_explicit(&zdplane_info.dg_route_errors,
2683 memory_order_relaxed);
2684 yields = atomic_load_explicit(&zdplane_info.dg_update_yields,
2685 memory_order_relaxed);
2686 other_errs = atomic_load_explicit(&zdplane_info.dg_other_errors,
2687 memory_order_relaxed);
2688
2689 vty_out(vty, "Zebra dataplane:\nRoute updates: %"PRIu64"\n",
2690 incoming);
2691 vty_out(vty, "Route update errors: %"PRIu64"\n", errs);
2692 vty_out(vty, "Other errors : %"PRIu64"\n", other_errs);
2693 vty_out(vty, "Route update queue limit: %"PRIu64"\n", limit);
2694 vty_out(vty, "Route update queue depth: %"PRIu64"\n", queued);
2695 vty_out(vty, "Route update queue max: %"PRIu64"\n", queue_max);
2696 vty_out(vty, "Dplane update yields: %"PRIu64"\n", yields);
2697
2698 incoming = atomic_load_explicit(&zdplane_info.dg_lsps_in,
2699 memory_order_relaxed);
2700 errs = atomic_load_explicit(&zdplane_info.dg_lsp_errors,
2701 memory_order_relaxed);
2702 vty_out(vty, "LSP updates: %"PRIu64"\n", incoming);
2703 vty_out(vty, "LSP update errors: %"PRIu64"\n", errs);
2704
2705 incoming = atomic_load_explicit(&zdplane_info.dg_pws_in,
2706 memory_order_relaxed);
2707 errs = atomic_load_explicit(&zdplane_info.dg_pw_errors,
2708 memory_order_relaxed);
2709 vty_out(vty, "PW updates: %"PRIu64"\n", incoming);
2710 vty_out(vty, "PW update errors: %"PRIu64"\n", errs);
2711
2712 incoming = atomic_load_explicit(&zdplane_info.dg_intf_addrs_in,
2713 memory_order_relaxed);
2714 errs = atomic_load_explicit(&zdplane_info.dg_intf_addr_errors,
2715 memory_order_relaxed);
2716 vty_out(vty, "Intf addr updates: %"PRIu64"\n", incoming);
2717 vty_out(vty, "Intf addr errors: %"PRIu64"\n", errs);
2718
2719 incoming = atomic_load_explicit(&zdplane_info.dg_macs_in,
2720 memory_order_relaxed);
2721 errs = atomic_load_explicit(&zdplane_info.dg_mac_errors,
2722 memory_order_relaxed);
2723 vty_out(vty, "EVPN MAC updates: %"PRIu64"\n", incoming);
2724 vty_out(vty, "EVPN MAC errors: %"PRIu64"\n", errs);
2725
2726 incoming = atomic_load_explicit(&zdplane_info.dg_neighs_in,
2727 memory_order_relaxed);
2728 errs = atomic_load_explicit(&zdplane_info.dg_neigh_errors,
2729 memory_order_relaxed);
2730 vty_out(vty, "EVPN neigh updates: %"PRIu64"\n", incoming);
2731 vty_out(vty, "EVPN neigh errors: %"PRIu64"\n", errs);
2732
2733 return CMD_SUCCESS;
2734 }
2735
2736 /*
2737 * Handler for 'show dplane providers'
2738 */
2739 int dplane_show_provs_helper(struct vty *vty, bool detailed)
2740 {
2741 struct zebra_dplane_provider *prov;
2742 uint64_t in, in_max, out, out_max;
2743
2744 vty_out(vty, "Zebra dataplane providers:\n");
2745
2746 DPLANE_LOCK();
2747 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
2748 DPLANE_UNLOCK();
2749
2750 /* Show counters, useful info from each registered provider */
2751 while (prov) {
2752
2753 in = atomic_load_explicit(&prov->dp_in_counter,
2754 memory_order_relaxed);
2755 in_max = atomic_load_explicit(&prov->dp_in_max,
2756 memory_order_relaxed);
2757 out = atomic_load_explicit(&prov->dp_out_counter,
2758 memory_order_relaxed);
2759 out_max = atomic_load_explicit(&prov->dp_out_max,
2760 memory_order_relaxed);
2761
2762 vty_out(vty, "%s (%u): in: %"PRIu64", q_max: %"PRIu64", "
2763 "out: %"PRIu64", q_max: %"PRIu64"\n",
2764 prov->dp_name, prov->dp_id, in, in_max, out, out_max);
2765
2766 DPLANE_LOCK();
2767 prov = TAILQ_NEXT(prov, dp_prov_link);
2768 DPLANE_UNLOCK();
2769 }
2770
2771 return CMD_SUCCESS;
2772 }
2773
2774 /*
2775 * Helper for 'show run' etc.
2776 */
2777 int dplane_config_write_helper(struct vty *vty)
2778 {
2779 if (zdplane_info.dg_max_queued_updates != DPLANE_DEFAULT_MAX_QUEUED)
2780 vty_out(vty, "zebra dplane limit %u\n",
2781 zdplane_info.dg_max_queued_updates);
2782
2783 return 0;
2784 }
2785
2786 /*
2787 * Provider registration
2788 */
2789 int dplane_provider_register(const char *name,
2790 enum dplane_provider_prio prio,
2791 int flags,
2792 int (*start_fp)(struct zebra_dplane_provider *),
2793 int (*fp)(struct zebra_dplane_provider *),
2794 int (*fini_fp)(struct zebra_dplane_provider *,
2795 bool early),
2796 void *data,
2797 struct zebra_dplane_provider **prov_p)
2798 {
2799 int ret = 0;
2800 struct zebra_dplane_provider *p = NULL, *last;
2801
2802 /* Validate */
2803 if (fp == NULL) {
2804 ret = EINVAL;
2805 goto done;
2806 }
2807
2808 if (prio <= DPLANE_PRIO_NONE ||
2809 prio > DPLANE_PRIO_LAST) {
2810 ret = EINVAL;
2811 goto done;
2812 }
2813
2814 /* Allocate and init new provider struct */
2815 p = XCALLOC(MTYPE_DP_PROV, sizeof(struct zebra_dplane_provider));
2816
2817 pthread_mutex_init(&(p->dp_mutex), NULL);
2818 TAILQ_INIT(&(p->dp_ctx_in_q));
2819 TAILQ_INIT(&(p->dp_ctx_out_q));
2820
2821 p->dp_flags = flags;
2822 p->dp_priority = prio;
2823 p->dp_fp = fp;
2824 p->dp_start = start_fp;
2825 p->dp_fini = fini_fp;
2826 p->dp_data = data;
2827
2828 /* Lock - the dplane pthread may be running */
2829 DPLANE_LOCK();
2830
2831 p->dp_id = ++zdplane_info.dg_provider_id;
2832
2833 if (name)
2834 strlcpy(p->dp_name, name, DPLANE_PROVIDER_NAMELEN);
2835 else
2836 snprintf(p->dp_name, DPLANE_PROVIDER_NAMELEN,
2837 "provider-%u", p->dp_id);
2838
2839 /* Insert into list ordered by priority */
2840 TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_prov_link) {
2841 if (last->dp_priority > p->dp_priority)
2842 break;
2843 }
2844
2845 if (last)
2846 TAILQ_INSERT_BEFORE(last, p, dp_prov_link);
2847 else
2848 TAILQ_INSERT_TAIL(&zdplane_info.dg_providers_q, p,
2849 dp_prov_link);
2850
2851 /* And unlock */
2852 DPLANE_UNLOCK();
2853
2854 if (IS_ZEBRA_DEBUG_DPLANE)
2855 zlog_debug("dplane: registered new provider '%s' (%u), prio %d",
2856 p->dp_name, p->dp_id, p->dp_priority);
2857
2858 done:
2859 if (prov_p)
2860 *prov_p = p;
2861
2862 return ret;
2863 }
2864
2865 /* Accessors for provider attributes */
2866 const char *dplane_provider_get_name(const struct zebra_dplane_provider *prov)
2867 {
2868 return prov->dp_name;
2869 }
2870
2871 uint32_t dplane_provider_get_id(const struct zebra_dplane_provider *prov)
2872 {
2873 return prov->dp_id;
2874 }
2875
2876 void *dplane_provider_get_data(const struct zebra_dplane_provider *prov)
2877 {
2878 return prov->dp_data;
2879 }
2880
2881 int dplane_provider_get_work_limit(const struct zebra_dplane_provider *prov)
2882 {
2883 return zdplane_info.dg_updates_per_cycle;
2884 }
2885
2886 /* Lock/unlock a provider's mutex - iff the provider was registered with
2887 * the THREADED flag.
2888 */
2889 void dplane_provider_lock(struct zebra_dplane_provider *prov)
2890 {
2891 if (dplane_provider_is_threaded(prov))
2892 DPLANE_PROV_LOCK(prov);
2893 }
2894
2895 void dplane_provider_unlock(struct zebra_dplane_provider *prov)
2896 {
2897 if (dplane_provider_is_threaded(prov))
2898 DPLANE_PROV_UNLOCK(prov);
2899 }
2900
2901 /*
2902 * Dequeue and maintain associated counter
2903 */
2904 struct zebra_dplane_ctx *dplane_provider_dequeue_in_ctx(
2905 struct zebra_dplane_provider *prov)
2906 {
2907 struct zebra_dplane_ctx *ctx = NULL;
2908
2909 dplane_provider_lock(prov);
2910
2911 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
2912 if (ctx) {
2913 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
2914
2915 atomic_fetch_sub_explicit(&prov->dp_in_queued, 1,
2916 memory_order_relaxed);
2917 }
2918
2919 dplane_provider_unlock(prov);
2920
2921 return ctx;
2922 }
2923
2924 /*
2925 * Dequeue work to a list, return count
2926 */
2927 int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov,
2928 struct dplane_ctx_q *listp)
2929 {
2930 int limit, ret;
2931 struct zebra_dplane_ctx *ctx;
2932
2933 limit = zdplane_info.dg_updates_per_cycle;
2934
2935 dplane_provider_lock(prov);
2936
2937 for (ret = 0; ret < limit; ret++) {
2938 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
2939 if (ctx) {
2940 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
2941
2942 TAILQ_INSERT_TAIL(listp, ctx, zd_q_entries);
2943 } else {
2944 break;
2945 }
2946 }
2947
2948 if (ret > 0)
2949 atomic_fetch_sub_explicit(&prov->dp_in_queued, ret,
2950 memory_order_relaxed);
2951
2952 dplane_provider_unlock(prov);
2953
2954 return ret;
2955 }
2956
2957 /*
2958 * Enqueue and maintain associated counter
2959 */
2960 void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
2961 struct zebra_dplane_ctx *ctx)
2962 {
2963 dplane_provider_lock(prov);
2964
2965 TAILQ_INSERT_TAIL(&(prov->dp_ctx_out_q), ctx,
2966 zd_q_entries);
2967
2968 dplane_provider_unlock(prov);
2969
2970 atomic_fetch_add_explicit(&(prov->dp_out_counter), 1,
2971 memory_order_relaxed);
2972 }
2973
2974 /*
2975 * Accessor for provider object
2976 */
2977 bool dplane_provider_is_threaded(const struct zebra_dplane_provider *prov)
2978 {
2979 return (prov->dp_flags & DPLANE_PROV_FLAG_THREADED);
2980 }
2981
2982 /*
2983 * Internal helper that copies information from a zebra ns object; this is
2984 * called in the zebra main pthread context as part of dplane ctx init.
2985 */
2986 static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
2987 struct zebra_ns *zns)
2988 {
2989 ns_info->ns_id = zns->ns_id;
2990
2991 #if defined(HAVE_NETLINK)
2992 ns_info->is_cmd = true;
2993 ns_info->nls = zns->netlink_dplane;
2994 #endif /* NETLINK */
2995 }
2996
2997 /*
2998 * Provider api to signal that work/events are available
2999 * for the dataplane pthread.
3000 */
3001 int dplane_provider_work_ready(void)
3002 {
3003 /* Note that during zebra startup, we may be offered work before
3004 * the dataplane pthread (and thread-master) are ready. We want to
3005 * enqueue the work, but the event-scheduling machinery may not be
3006 * available.
3007 */
3008 if (zdplane_info.dg_run) {
3009 thread_add_event(zdplane_info.dg_master,
3010 dplane_thread_loop, NULL, 0,
3011 &zdplane_info.dg_t_update);
3012 }
3013
3014 return AOK;
3015 }
3016
3017 /*
3018 * Enqueue a context directly to zebra main.
3019 */
3020 void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx)
3021 {
3022 struct dplane_ctx_q temp_list;
3023
3024 /* Zebra's api takes a list, so we need to use a temporary list */
3025 TAILQ_INIT(&temp_list);
3026
3027 TAILQ_INSERT_TAIL(&temp_list, ctx, zd_q_entries);
3028 (zdplane_info.dg_results_cb)(&temp_list);
3029 }
3030
3031 /*
3032 * Kernel dataplane provider
3033 */
3034
3035 /*
3036 * Handler for kernel LSP updates
3037 */
3038 static enum zebra_dplane_result
3039 kernel_dplane_lsp_update(struct zebra_dplane_ctx *ctx)
3040 {
3041 enum zebra_dplane_result res;
3042
3043 /* Call into the synchronous kernel-facing code here */
3044 res = kernel_lsp_update(ctx);
3045
3046 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
3047 atomic_fetch_add_explicit(
3048 &zdplane_info.dg_lsp_errors, 1,
3049 memory_order_relaxed);
3050
3051 return res;
3052 }
3053
3054 /*
3055 * Handler for kernel pseudowire updates
3056 */
3057 static enum zebra_dplane_result
3058 kernel_dplane_pw_update(struct zebra_dplane_ctx *ctx)
3059 {
3060 enum zebra_dplane_result res;
3061
3062 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3063 zlog_debug("Dplane pw %s: op %s af %d loc: %u rem: %u",
3064 dplane_ctx_get_ifname(ctx),
3065 dplane_op2str(ctx->zd_op),
3066 dplane_ctx_get_pw_af(ctx),
3067 dplane_ctx_get_pw_local_label(ctx),
3068 dplane_ctx_get_pw_remote_label(ctx));
3069
3070 res = kernel_pw_update(ctx);
3071
3072 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
3073 atomic_fetch_add_explicit(
3074 &zdplane_info.dg_pw_errors, 1,
3075 memory_order_relaxed);
3076
3077 return res;
3078 }
3079
3080 /*
3081 * Handler for kernel route updates
3082 */
3083 static enum zebra_dplane_result
3084 kernel_dplane_route_update(struct zebra_dplane_ctx *ctx)
3085 {
3086 enum zebra_dplane_result res;
3087
3088 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3089 char dest_str[PREFIX_STRLEN];
3090
3091 prefix2str(dplane_ctx_get_dest(ctx),
3092 dest_str, sizeof(dest_str));
3093
3094 zlog_debug("%u:%s Dplane route update ctx %p op %s",
3095 dplane_ctx_get_vrf(ctx), dest_str,
3096 ctx, dplane_op2str(dplane_ctx_get_op(ctx)));
3097 }
3098
3099 /* Call into the synchronous kernel-facing code here */
3100 res = kernel_route_update(ctx);
3101
3102 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
3103 atomic_fetch_add_explicit(
3104 &zdplane_info.dg_route_errors, 1,
3105 memory_order_relaxed);
3106
3107 return res;
3108 }
3109
3110 /*
3111 * Handler for kernel-facing interface address updates
3112 */
3113 static enum zebra_dplane_result
3114 kernel_dplane_address_update(struct zebra_dplane_ctx *ctx)
3115 {
3116 enum zebra_dplane_result res;
3117
3118 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3119 char dest_str[PREFIX_STRLEN];
3120
3121 prefix2str(dplane_ctx_get_intf_addr(ctx), dest_str,
3122 sizeof(dest_str));
3123
3124 zlog_debug("Dplane intf %s, idx %u, addr %s",
3125 dplane_op2str(dplane_ctx_get_op(ctx)),
3126 dplane_ctx_get_ifindex(ctx), dest_str);
3127 }
3128
3129 res = kernel_address_update_ctx(ctx);
3130
3131 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
3132 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addr_errors,
3133 1, memory_order_relaxed);
3134
3135 return res;
3136 }
3137
3138 /**
3139 * kernel_dplane_nexthop_update() - Handler for kernel nexthop updates
3140 *
3141 * @ctx: Dataplane context
3142 *
3143 * Return: Dataplane result flag
3144 */
3145 static enum zebra_dplane_result
3146 kernel_dplane_nexthop_update(struct zebra_dplane_ctx *ctx)
3147 {
3148 enum zebra_dplane_result res;
3149
3150 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3151 zlog_debug("ID (%u) Dplane nexthop update ctx %p op %s",
3152 dplane_ctx_get_nhe_id(ctx), ctx,
3153 dplane_op2str(dplane_ctx_get_op(ctx)));
3154 }
3155
3156 res = kernel_nexthop_update(ctx);
3157
3158 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
3159 atomic_fetch_add_explicit(&zdplane_info.dg_nexthop_errors, 1,
3160 memory_order_relaxed);
3161
3162 return res;
3163 }
3164
3165 /*
3166 * Handler for kernel-facing EVPN MAC address updates
3167 */
3168 static enum zebra_dplane_result
3169 kernel_dplane_mac_update(struct zebra_dplane_ctx *ctx)
3170 {
3171 enum zebra_dplane_result res;
3172
3173 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3174 char buf[ETHER_ADDR_STRLEN];
3175
3176 prefix_mac2str(dplane_ctx_mac_get_addr(ctx), buf,
3177 sizeof(buf));
3178
3179 zlog_debug("Dplane %s, mac %s, ifindex %u",
3180 dplane_op2str(dplane_ctx_get_op(ctx)),
3181 buf, dplane_ctx_get_ifindex(ctx));
3182 }
3183
3184 res = kernel_mac_update_ctx(ctx);
3185
3186 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
3187 atomic_fetch_add_explicit(&zdplane_info.dg_mac_errors,
3188 1, memory_order_relaxed);
3189
3190 return res;
3191 }
3192
3193 /*
3194 * Handler for kernel-facing EVPN neighbor updates
3195 */
3196 static enum zebra_dplane_result
3197 kernel_dplane_neigh_update(struct zebra_dplane_ctx *ctx)
3198 {
3199 enum zebra_dplane_result res;
3200
3201 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3202 char buf[PREFIX_STRLEN];
3203
3204 ipaddr2str(dplane_ctx_neigh_get_ipaddr(ctx), buf,
3205 sizeof(buf));
3206
3207 zlog_debug("Dplane %s, ip %s, ifindex %u",
3208 dplane_op2str(dplane_ctx_get_op(ctx)),
3209 buf, dplane_ctx_get_ifindex(ctx));
3210 }
3211
3212 res = kernel_neigh_update_ctx(ctx);
3213
3214 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
3215 atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors,
3216 1, memory_order_relaxed);
3217
3218 return res;
3219 }
3220
3221 /*
3222 * Kernel provider callback
3223 */
3224 static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
3225 {
3226 enum zebra_dplane_result res;
3227 struct zebra_dplane_ctx *ctx;
3228 int counter, limit;
3229
3230 limit = dplane_provider_get_work_limit(prov);
3231
3232 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3233 zlog_debug("dplane provider '%s': processing",
3234 dplane_provider_get_name(prov));
3235
3236 for (counter = 0; counter < limit; counter++) {
3237
3238 ctx = dplane_provider_dequeue_in_ctx(prov);
3239 if (ctx == NULL)
3240 break;
3241
3242 /* A previous provider plugin may have asked to skip the
3243 * kernel update.
3244 */
3245 if (dplane_ctx_is_skip_kernel(ctx)) {
3246 res = ZEBRA_DPLANE_REQUEST_SUCCESS;
3247 goto skip_one;
3248 }
3249
3250 /* Dispatch to appropriate kernel-facing apis */
3251 switch (dplane_ctx_get_op(ctx)) {
3252
3253 case DPLANE_OP_ROUTE_INSTALL:
3254 case DPLANE_OP_ROUTE_UPDATE:
3255 case DPLANE_OP_ROUTE_DELETE:
3256 res = kernel_dplane_route_update(ctx);
3257 break;
3258
3259 case DPLANE_OP_NH_INSTALL:
3260 case DPLANE_OP_NH_UPDATE:
3261 case DPLANE_OP_NH_DELETE:
3262 res = kernel_dplane_nexthop_update(ctx);
3263 break;
3264
3265 case DPLANE_OP_LSP_INSTALL:
3266 case DPLANE_OP_LSP_UPDATE:
3267 case DPLANE_OP_LSP_DELETE:
3268 res = kernel_dplane_lsp_update(ctx);
3269 break;
3270
3271 case DPLANE_OP_PW_INSTALL:
3272 case DPLANE_OP_PW_UNINSTALL:
3273 res = kernel_dplane_pw_update(ctx);
3274 break;
3275
3276 case DPLANE_OP_ADDR_INSTALL:
3277 case DPLANE_OP_ADDR_UNINSTALL:
3278 res = kernel_dplane_address_update(ctx);
3279 break;
3280
3281 case DPLANE_OP_MAC_INSTALL:
3282 case DPLANE_OP_MAC_DELETE:
3283 res = kernel_dplane_mac_update(ctx);
3284 break;
3285
3286 case DPLANE_OP_NEIGH_INSTALL:
3287 case DPLANE_OP_NEIGH_UPDATE:
3288 case DPLANE_OP_NEIGH_DELETE:
3289 case DPLANE_OP_VTEP_ADD:
3290 case DPLANE_OP_VTEP_DELETE:
3291 res = kernel_dplane_neigh_update(ctx);
3292 break;
3293
3294 /* Ignore 'notifications' - no-op */
3295 case DPLANE_OP_SYS_ROUTE_ADD:
3296 case DPLANE_OP_SYS_ROUTE_DELETE:
3297 case DPLANE_OP_ROUTE_NOTIFY:
3298 case DPLANE_OP_LSP_NOTIFY:
3299 res = ZEBRA_DPLANE_REQUEST_SUCCESS;
3300 break;
3301
3302 default:
3303 atomic_fetch_add_explicit(
3304 &zdplane_info.dg_other_errors, 1,
3305 memory_order_relaxed);
3306
3307 res = ZEBRA_DPLANE_REQUEST_FAILURE;
3308 break;
3309 }
3310
3311 skip_one:
3312 dplane_ctx_set_status(ctx, res);
3313
3314 dplane_provider_enqueue_out_ctx(prov, ctx);
3315 }
3316
3317 /* Ensure that we'll run the work loop again if there's still
3318 * more work to do.
3319 */
3320 if (counter >= limit) {
3321 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3322 zlog_debug("dplane provider '%s' reached max updates %d",
3323 dplane_provider_get_name(prov), counter);
3324
3325 atomic_fetch_add_explicit(&zdplane_info.dg_update_yields,
3326 1, memory_order_relaxed);
3327
3328 dplane_provider_work_ready();
3329 }
3330
3331 return 0;
3332 }
3333
3334 #if DPLANE_TEST_PROVIDER
3335
3336 /*
3337 * Test dataplane provider plugin
3338 */
3339
3340 /*
3341 * Test provider process callback
3342 */
3343 static int test_dplane_process_func(struct zebra_dplane_provider *prov)
3344 {
3345 struct zebra_dplane_ctx *ctx;
3346 int counter, limit;
3347
3348 /* Just moving from 'in' queue to 'out' queue */
3349
3350 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3351 zlog_debug("dplane provider '%s': processing",
3352 dplane_provider_get_name(prov));
3353
3354 limit = dplane_provider_get_work_limit(prov);
3355
3356 for (counter = 0; counter < limit; counter++) {
3357
3358 ctx = dplane_provider_dequeue_in_ctx(prov);
3359 if (ctx == NULL)
3360 break;
3361
3362 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3363 zlog_debug("dplane provider '%s': op %s",
3364 dplane_provider_get_name(prov),
3365 dplane_op2str(dplane_ctx_get_op(ctx)));
3366
3367 dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
3368
3369 dplane_provider_enqueue_out_ctx(prov, ctx);
3370 }
3371
3372 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3373 zlog_debug("dplane provider '%s': processed %d",
3374 dplane_provider_get_name(prov), counter);
3375
3376 /* Ensure that we'll run the work loop again if there's still
3377 * more work to do.
3378 */
3379 if (counter >= limit)
3380 dplane_provider_work_ready();
3381
3382 return 0;
3383 }
3384
3385 /*
3386 * Test provider shutdown/fini callback
3387 */
3388 static int test_dplane_shutdown_func(struct zebra_dplane_provider *prov,
3389 bool early)
3390 {
3391 if (IS_ZEBRA_DEBUG_DPLANE)
3392 zlog_debug("dplane provider '%s': %sshutdown",
3393 dplane_provider_get_name(prov),
3394 early ? "early " : "");
3395
3396 return 0;
3397 }
3398 #endif /* DPLANE_TEST_PROVIDER */
3399
3400 /*
3401 * Register default kernel provider
3402 */
3403 static void dplane_provider_init(void)
3404 {
3405 int ret;
3406
3407 ret = dplane_provider_register("Kernel",
3408 DPLANE_PRIO_KERNEL,
3409 DPLANE_PROV_FLAGS_DEFAULT, NULL,
3410 kernel_dplane_process_func,
3411 NULL,
3412 NULL, NULL);
3413
3414 if (ret != AOK)
3415 zlog_err("Unable to register kernel dplane provider: %d",
3416 ret);
3417
3418 #if DPLANE_TEST_PROVIDER
3419 /* Optional test provider ... */
3420 ret = dplane_provider_register("Test",
3421 DPLANE_PRIO_PRE_KERNEL,
3422 DPLANE_PROV_FLAGS_DEFAULT, NULL,
3423 test_dplane_process_func,
3424 test_dplane_shutdown_func,
3425 NULL /* data */, NULL);
3426
3427 if (ret != AOK)
3428 zlog_err("Unable to register test dplane provider: %d",
3429 ret);
3430 #endif /* DPLANE_TEST_PROVIDER */
3431 }
3432
3433 /* Indicates zebra shutdown/exit is in progress. Some operations may be
3434 * simplified or skipped during shutdown processing.
3435 */
3436 bool dplane_is_in_shutdown(void)
3437 {
3438 return zdplane_info.dg_is_shutdown;
3439 }
3440
3441 /*
3442 * Early or pre-shutdown, de-init notification api. This runs pretty
3443 * early during zebra shutdown, as a signal to stop new work and prepare
3444 * for updates generated by shutdown/cleanup activity, as zebra tries to
3445 * remove everything it's responsible for.
3446 * NB: This runs in the main zebra pthread context.
3447 */
3448 void zebra_dplane_pre_finish(void)
3449 {
3450 if (IS_ZEBRA_DEBUG_DPLANE)
3451 zlog_debug("Zebra dataplane pre-fini called");
3452
3453 zdplane_info.dg_is_shutdown = true;
3454
3455 /* TODO -- Notify provider(s) of pending shutdown */
3456 }
3457
3458 /*
3459 * Utility to determine whether work remains enqueued within the dplane;
3460 * used during system shutdown processing.
3461 */
3462 static bool dplane_work_pending(void)
3463 {
3464 bool ret = false;
3465 struct zebra_dplane_ctx *ctx;
3466 struct zebra_dplane_provider *prov;
3467
3468 /* TODO -- just checking incoming/pending work for now, must check
3469 * providers
3470 */
3471 DPLANE_LOCK();
3472 {
3473 ctx = TAILQ_FIRST(&zdplane_info.dg_update_ctx_q);
3474 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
3475 }
3476 DPLANE_UNLOCK();
3477
3478 if (ctx != NULL) {
3479 ret = true;
3480 goto done;
3481 }
3482
3483 while (prov) {
3484
3485 dplane_provider_lock(prov);
3486
3487 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
3488 if (ctx == NULL)
3489 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
3490
3491 dplane_provider_unlock(prov);
3492
3493 if (ctx != NULL)
3494 break;
3495
3496 DPLANE_LOCK();
3497 prov = TAILQ_NEXT(prov, dp_prov_link);
3498 DPLANE_UNLOCK();
3499 }
3500
3501 if (ctx != NULL)
3502 ret = true;
3503
3504 done:
3505 return ret;
3506 }
3507
3508 /*
3509 * Shutdown-time intermediate callback, used to determine when all pending
3510 * in-flight updates are done. If there's still work to do, reschedules itself.
3511 * If all work is done, schedules an event to the main zebra thread for
3512 * final zebra shutdown.
3513 * This runs in the dplane pthread context.
3514 */
3515 static int dplane_check_shutdown_status(struct thread *event)
3516 {
3517 if (IS_ZEBRA_DEBUG_DPLANE)
3518 zlog_debug("Zebra dataplane shutdown status check called");
3519
3520 if (dplane_work_pending()) {
3521 /* Reschedule dplane check on a short timer */
3522 thread_add_timer_msec(zdplane_info.dg_master,
3523 dplane_check_shutdown_status,
3524 NULL, 100,
3525 &zdplane_info.dg_t_shutdown_check);
3526
3527 /* TODO - give up and stop waiting after a short time? */
3528
3529 } else {
3530 /* We appear to be done - schedule a final callback event
3531 * for the zebra main pthread.
3532 */
3533 thread_add_event(zrouter.master, zebra_finalize, NULL, 0, NULL);
3534 }
3535
3536 return 0;
3537 }
3538
3539 /*
3540 * Shutdown, de-init api. This runs pretty late during shutdown,
3541 * after zebra has tried to free/remove/uninstall all routes during shutdown.
3542 * At this point, dplane work may still remain to be done, so we can't just
3543 * blindly terminate. If there's still work to do, we'll periodically check
3544 * and when done, we'll enqueue a task to the zebra main thread for final
3545 * termination processing.
3546 *
3547 * NB: This runs in the main zebra thread context.
3548 */
3549 void zebra_dplane_finish(void)
3550 {
3551 if (IS_ZEBRA_DEBUG_DPLANE)
3552 zlog_debug("Zebra dataplane fini called");
3553
3554 thread_add_event(zdplane_info.dg_master,
3555 dplane_check_shutdown_status, NULL, 0,
3556 &zdplane_info.dg_t_shutdown_check);
3557 }
3558
3559 /*
3560 * Main dataplane pthread event loop. The thread takes new incoming work
3561 * and offers it to the first provider. It then iterates through the
3562 * providers, taking complete work from each one and offering it
3563 * to the next in order. At each step, a limited number of updates are
3564 * processed during a cycle in order to provide some fairness.
3565 *
3566 * This loop through the providers is only run once, so that the dataplane
3567 * pthread can look for other pending work - such as i/o work on behalf of
3568 * providers.
3569 */
3570 static int dplane_thread_loop(struct thread *event)
3571 {
3572 struct dplane_ctx_q work_list;
3573 struct dplane_ctx_q error_list;
3574 struct zebra_dplane_provider *prov;
3575 struct zebra_dplane_ctx *ctx, *tctx;
3576 int limit, counter, error_counter;
3577 uint64_t curr, high;
3578
3579 /* Capture work limit per cycle */
3580 limit = zdplane_info.dg_updates_per_cycle;
3581
3582 /* Init temporary lists used to move contexts among providers */
3583 TAILQ_INIT(&work_list);
3584 TAILQ_INIT(&error_list);
3585 error_counter = 0;
3586
3587 /* Check for zebra shutdown */
3588 if (!zdplane_info.dg_run)
3589 goto done;
3590
3591 /* Dequeue some incoming work from zebra (if any) onto the temporary
3592 * working list.
3593 */
3594 DPLANE_LOCK();
3595
3596 /* Locate initial registered provider */
3597 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
3598
3599 /* Move new work from incoming list to temp list */
3600 for (counter = 0; counter < limit; counter++) {
3601 ctx = TAILQ_FIRST(&zdplane_info.dg_update_ctx_q);
3602 if (ctx) {
3603 TAILQ_REMOVE(&zdplane_info.dg_update_ctx_q, ctx,
3604 zd_q_entries);
3605
3606 ctx->zd_provider = prov->dp_id;
3607
3608 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
3609 } else {
3610 break;
3611 }
3612 }
3613
3614 DPLANE_UNLOCK();
3615
3616 atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, counter,
3617 memory_order_relaxed);
3618
3619 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3620 zlog_debug("dplane: incoming new work counter: %d", counter);
3621
3622 /* Iterate through the registered providers, offering new incoming
3623 * work. If the provider has outgoing work in its queue, take that
3624 * work for the next provider
3625 */
3626 while (prov) {
3627
3628 /* At each iteration, the temporary work list has 'counter'
3629 * items.
3630 */
3631 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3632 zlog_debug("dplane enqueues %d new work to provider '%s'",
3633 counter, dplane_provider_get_name(prov));
3634
3635 /* Capture current provider id in each context; check for
3636 * error status.
3637 */
3638 TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, tctx) {
3639 if (dplane_ctx_get_status(ctx) ==
3640 ZEBRA_DPLANE_REQUEST_SUCCESS) {
3641 ctx->zd_provider = prov->dp_id;
3642 } else {
3643 /*
3644 * TODO -- improve error-handling: recirc
3645 * errors backwards so that providers can
3646 * 'undo' their work (if they want to)
3647 */
3648
3649 /* Move to error list; will be returned
3650 * zebra main.
3651 */
3652 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
3653 TAILQ_INSERT_TAIL(&error_list,
3654 ctx, zd_q_entries);
3655 error_counter++;
3656 }
3657 }
3658
3659 /* Enqueue new work to the provider */
3660 dplane_provider_lock(prov);
3661
3662 if (TAILQ_FIRST(&work_list))
3663 TAILQ_CONCAT(&(prov->dp_ctx_in_q), &work_list,
3664 zd_q_entries);
3665
3666 atomic_fetch_add_explicit(&prov->dp_in_counter, counter,
3667 memory_order_relaxed);
3668 atomic_fetch_add_explicit(&prov->dp_in_queued, counter,
3669 memory_order_relaxed);
3670 curr = atomic_load_explicit(&prov->dp_in_queued,
3671 memory_order_relaxed);
3672 high = atomic_load_explicit(&prov->dp_in_max,
3673 memory_order_relaxed);
3674 if (curr > high)
3675 atomic_store_explicit(&prov->dp_in_max, curr,
3676 memory_order_relaxed);
3677
3678 dplane_provider_unlock(prov);
3679
3680 /* Reset the temp list (though the 'concat' may have done this
3681 * already), and the counter
3682 */
3683 TAILQ_INIT(&work_list);
3684 counter = 0;
3685
3686 /* Call into the provider code. Note that this is
3687 * unconditional: we offer to do work even if we don't enqueue
3688 * any _new_ work.
3689 */
3690 (*prov->dp_fp)(prov);
3691
3692 /* Check for zebra shutdown */
3693 if (!zdplane_info.dg_run)
3694 break;
3695
3696 /* Dequeue completed work from the provider */
3697 dplane_provider_lock(prov);
3698
3699 while (counter < limit) {
3700 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
3701 if (ctx) {
3702 TAILQ_REMOVE(&(prov->dp_ctx_out_q), ctx,
3703 zd_q_entries);
3704
3705 TAILQ_INSERT_TAIL(&work_list,
3706 ctx, zd_q_entries);
3707 counter++;
3708 } else
3709 break;
3710 }
3711
3712 dplane_provider_unlock(prov);
3713
3714 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3715 zlog_debug("dplane dequeues %d completed work from provider %s",
3716 counter, dplane_provider_get_name(prov));
3717
3718 /* Locate next provider */
3719 DPLANE_LOCK();
3720 prov = TAILQ_NEXT(prov, dp_prov_link);
3721 DPLANE_UNLOCK();
3722 }
3723
3724 /* After all providers have been serviced, enqueue any completed
3725 * work and any errors back to zebra so it can process the results.
3726 */
3727 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3728 zlog_debug("dplane has %d completed, %d errors, for zebra main",
3729 counter, error_counter);
3730
3731 /*
3732 * Hand lists through the api to zebra main,
3733 * to reduce the number of lock/unlock cycles
3734 */
3735
3736 /* Call through to zebra main */
3737 (zdplane_info.dg_results_cb)(&error_list);
3738
3739 TAILQ_INIT(&error_list);
3740
3741 /* Call through to zebra main */
3742 (zdplane_info.dg_results_cb)(&work_list);
3743
3744 TAILQ_INIT(&work_list);
3745
3746 done:
3747 return 0;
3748 }
3749
3750 /*
3751 * Final phase of shutdown, after all work enqueued to dplane has been
3752 * processed. This is called from the zebra main pthread context.
3753 */
3754 void zebra_dplane_shutdown(void)
3755 {
3756 if (IS_ZEBRA_DEBUG_DPLANE)
3757 zlog_debug("Zebra dataplane shutdown called");
3758
3759 /* Stop dplane thread, if it's running */
3760
3761 zdplane_info.dg_run = false;
3762
3763 if (zdplane_info.dg_t_update)
3764 thread_cancel_async(zdplane_info.dg_t_update->master,
3765 &zdplane_info.dg_t_update, NULL);
3766
3767 frr_pthread_stop(zdplane_info.dg_pthread, NULL);
3768
3769 /* Destroy pthread */
3770 frr_pthread_destroy(zdplane_info.dg_pthread);
3771 zdplane_info.dg_pthread = NULL;
3772 zdplane_info.dg_master = NULL;
3773
3774 /* TODO -- Notify provider(s) of final shutdown */
3775
3776 /* TODO -- Clean-up provider objects */
3777
3778 /* TODO -- Clean queue(s), free memory */
3779 }
3780
3781 /*
3782 * Initialize the dataplane module during startup, internal/private version
3783 */
3784 static void zebra_dplane_init_internal(void)
3785 {
3786 memset(&zdplane_info, 0, sizeof(zdplane_info));
3787
3788 pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
3789
3790 TAILQ_INIT(&zdplane_info.dg_update_ctx_q);
3791 TAILQ_INIT(&zdplane_info.dg_providers_q);
3792
3793 zdplane_info.dg_updates_per_cycle = DPLANE_DEFAULT_NEW_WORK;
3794
3795 zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;
3796
3797 /* Register default kernel 'provider' during init */
3798 dplane_provider_init();
3799 }
3800
3801 /*
3802 * Start the dataplane pthread. This step needs to be run later than the
3803 * 'init' step, in case zebra has fork-ed.
3804 */
3805 void zebra_dplane_start(void)
3806 {
3807 struct zebra_dplane_provider *prov;
3808 struct frr_pthread_attr pattr = {
3809 .start = frr_pthread_attr_default.start,
3810 .stop = frr_pthread_attr_default.stop
3811 };
3812
3813 /* Start dataplane pthread */
3814
3815 zdplane_info.dg_pthread = frr_pthread_new(&pattr, "Zebra dplane thread",
3816 "zebra_dplane");
3817
3818 zdplane_info.dg_master = zdplane_info.dg_pthread->master;
3819
3820 zdplane_info.dg_run = true;
3821
3822 /* Enqueue an initial event for the dataplane pthread */
3823 thread_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
3824 &zdplane_info.dg_t_update);
3825
3826 /* Call start callbacks for registered providers */
3827
3828 DPLANE_LOCK();
3829 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
3830 DPLANE_UNLOCK();
3831
3832 while (prov) {
3833
3834 if (prov->dp_start)
3835 (prov->dp_start)(prov);
3836
3837 /* Locate next provider */
3838 DPLANE_LOCK();
3839 prov = TAILQ_NEXT(prov, dp_prov_link);
3840 DPLANE_UNLOCK();
3841 }
3842
3843 frr_pthread_run(zdplane_info.dg_pthread, NULL);
3844 }
3845
3846 /*
3847 * Initialize the dataplane module at startup; called by zebra rib_init()
3848 */
3849 void zebra_dplane_init(int (*results_fp)(struct dplane_ctx_q *))
3850 {
3851 zebra_dplane_init_internal();
3852 zdplane_info.dg_results_cb = results_fp;
3853 }