]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_dplane.c
zebra: include zebra nexthop debug in show runn
[mirror_frr.git] / zebra / zebra_dplane.c
1 /*
2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "lib/libfrr.h"
25 #include "lib/debug.h"
26 #include "lib/frratomic.h"
27 #include "lib/frr_pthread.h"
28 #include "lib/memory.h"
29 #include "lib/queue.h"
30 #include "lib/zebra.h"
31 #include "zebra/zebra_router.h"
32 #include "zebra/zebra_memory.h"
33 #include "zebra/zebra_router.h"
34 #include "zebra/zebra_dplane.h"
35 #include "zebra/rt.h"
36 #include "zebra/debug.h"
37
38 /* Memory type for context blocks */
39 DEFINE_MTYPE_STATIC(ZEBRA, DP_CTX, "Zebra DPlane Ctx")
40 DEFINE_MTYPE_STATIC(ZEBRA, DP_PROV, "Zebra DPlane Provider")
41
42 #ifndef AOK
43 # define AOK 0
44 #endif
45
46 /* Enable test dataplane provider */
47 /*#define DPLANE_TEST_PROVIDER 1 */
48
49 /* Default value for max queued incoming updates */
50 const uint32_t DPLANE_DEFAULT_MAX_QUEUED = 200;
51
52 /* Default value for new work per cycle */
53 const uint32_t DPLANE_DEFAULT_NEW_WORK = 100;
54
55 /* Validation check macro for context blocks */
56 /* #define DPLANE_DEBUG 1 */
57
58 #ifdef DPLANE_DEBUG
59
60 # define DPLANE_CTX_VALID(p) \
61 assert((p) != NULL)
62
63 #else
64
65 # define DPLANE_CTX_VALID(p)
66
67 #endif /* DPLANE_DEBUG */
68
69 /*
70 * Nexthop information captured for nexthop/nexthop group updates
71 */
72 struct dplane_nexthop_info {
73 uint32_t id;
74 afi_t afi;
75 vrf_id_t vrf_id;
76 int type;
77
78 struct nexthop_group ng;
79 struct nh_grp nh_grp[MULTIPATH_NUM];
80 uint8_t nh_grp_count;
81 };
82
83 /*
84 * Route information captured for route updates.
85 */
86 struct dplane_route_info {
87
88 /* Dest and (optional) source prefixes */
89 struct prefix zd_dest;
90 struct prefix zd_src;
91
92 afi_t zd_afi;
93 safi_t zd_safi;
94
95 int zd_type;
96 int zd_old_type;
97
98 route_tag_t zd_tag;
99 route_tag_t zd_old_tag;
100 uint32_t zd_metric;
101 uint32_t zd_old_metric;
102
103 uint16_t zd_instance;
104 uint16_t zd_old_instance;
105
106 uint8_t zd_distance;
107 uint8_t zd_old_distance;
108
109 uint32_t zd_mtu;
110 uint32_t zd_nexthop_mtu;
111
112 /* Nexthop hash entry info */
113 struct dplane_nexthop_info nhe;
114
115 /* Nexthops */
116 struct nexthop_group zd_ng;
117
118 /* "Previous" nexthops, used only in route updates without netlink */
119 struct nexthop_group zd_old_ng;
120
121 /* TODO -- use fixed array of nexthops, to avoid mallocs? */
122
123 };
124
125 /*
126 * Pseudowire info for the dataplane
127 */
128 struct dplane_pw_info {
129 int type;
130 int af;
131 int status;
132 uint32_t flags;
133 union g_addr dest;
134 mpls_label_t local_label;
135 mpls_label_t remote_label;
136
137 /* Nexthops */
138 struct nexthop_group nhg;
139
140 union pw_protocol_fields fields;
141 };
142
143 /*
144 * Interface/prefix info for the dataplane
145 */
146 struct dplane_intf_info {
147
148 uint32_t metric;
149 uint32_t flags;
150
151 #define DPLANE_INTF_CONNECTED (1 << 0) /* Connected peer, p2p */
152 #define DPLANE_INTF_SECONDARY (1 << 1)
153 #define DPLANE_INTF_BROADCAST (1 << 2)
154 #define DPLANE_INTF_HAS_DEST DPLANE_INTF_CONNECTED
155 #define DPLANE_INTF_HAS_LABEL (1 << 4)
156
157 /* Interface address/prefix */
158 struct prefix prefix;
159
160 /* Dest address, for p2p, or broadcast prefix */
161 struct prefix dest_prefix;
162
163 char *label;
164 char label_buf[32];
165 };
166
167 /*
168 * EVPN MAC address info for the dataplane.
169 */
170 struct dplane_mac_info {
171 vlanid_t vid;
172 ifindex_t br_ifindex;
173 struct ethaddr mac;
174 struct in_addr vtep_ip;
175 bool is_sticky;
176
177 };
178
179 /*
180 * EVPN neighbor info for the dataplane
181 */
182 struct dplane_neigh_info {
183 struct ipaddr ip_addr;
184 struct ethaddr mac;
185 uint32_t flags;
186 uint16_t state;
187 };
188
189 /*
190 * The context block used to exchange info about route updates across
191 * the boundary between the zebra main context (and pthread) and the
192 * dataplane layer (and pthread).
193 */
194 struct zebra_dplane_ctx {
195
196 /* Operation code */
197 enum dplane_op_e zd_op;
198
199 /* Status on return */
200 enum zebra_dplane_result zd_status;
201
202 /* Dplane provider id */
203 uint32_t zd_provider;
204
205 /* Flags - used by providers, e.g. */
206 int zd_flags;
207
208 bool zd_is_update;
209
210 uint32_t zd_seq;
211 uint32_t zd_old_seq;
212
213 /* Some updates may be generated by notifications: allow the
214 * plugin to notice and ignore results from its own notifications.
215 */
216 uint32_t zd_notif_provider;
217
218 /* TODO -- internal/sub-operation status? */
219 enum zebra_dplane_result zd_remote_status;
220 enum zebra_dplane_result zd_kernel_status;
221
222 vrf_id_t zd_vrf_id;
223 uint32_t zd_table_id;
224
225 char zd_ifname[INTERFACE_NAMSIZ];
226 ifindex_t zd_ifindex;
227
228 /* Support info for different kinds of updates */
229 union {
230 struct dplane_route_info rinfo;
231 zebra_lsp_t lsp;
232 struct dplane_pw_info pw;
233 struct dplane_intf_info intf;
234 struct dplane_mac_info macinfo;
235 struct dplane_neigh_info neigh;
236 } u;
237
238 /* Namespace info, used especially for netlink kernel communication */
239 struct zebra_dplane_info zd_ns_info;
240
241 /* Embedded list linkage */
242 TAILQ_ENTRY(zebra_dplane_ctx) zd_q_entries;
243 };
244
245 /* Flag that can be set by a pre-kernel provider as a signal that an update
246 * should bypass the kernel.
247 */
248 #define DPLANE_CTX_FLAG_NO_KERNEL 0x01
249
250
251 /*
252 * Registration block for one dataplane provider.
253 */
254 struct zebra_dplane_provider {
255 /* Name */
256 char dp_name[DPLANE_PROVIDER_NAMELEN + 1];
257
258 /* Priority, for ordering among providers */
259 uint8_t dp_priority;
260
261 /* Id value */
262 uint32_t dp_id;
263
264 /* Mutex */
265 pthread_mutex_t dp_mutex;
266
267 /* Plugin-provided extra data */
268 void *dp_data;
269
270 /* Flags */
271 int dp_flags;
272
273 int (*dp_start)(struct zebra_dplane_provider *prov);
274
275 int (*dp_fp)(struct zebra_dplane_provider *prov);
276
277 int (*dp_fini)(struct zebra_dplane_provider *prov, bool early_p);
278
279 _Atomic uint32_t dp_in_counter;
280 _Atomic uint32_t dp_in_queued;
281 _Atomic uint32_t dp_in_max;
282 _Atomic uint32_t dp_out_counter;
283 _Atomic uint32_t dp_out_queued;
284 _Atomic uint32_t dp_out_max;
285 _Atomic uint32_t dp_error_counter;
286
287 /* Queue of contexts inbound to the provider */
288 struct dplane_ctx_q dp_ctx_in_q;
289
290 /* Queue of completed contexts outbound from the provider back
291 * towards the dataplane module.
292 */
293 struct dplane_ctx_q dp_ctx_out_q;
294
295 /* Embedded list linkage for provider objects */
296 TAILQ_ENTRY(zebra_dplane_provider) dp_prov_link;
297 };
298
299 /*
300 * Globals
301 */
302 static struct zebra_dplane_globals {
303 /* Mutex to control access to dataplane components */
304 pthread_mutex_t dg_mutex;
305
306 /* Results callback registered by zebra 'core' */
307 int (*dg_results_cb)(struct dplane_ctx_q *ctxlist);
308
309 /* Sentinel for beginning of shutdown */
310 volatile bool dg_is_shutdown;
311
312 /* Sentinel for end of shutdown */
313 volatile bool dg_run;
314
315 /* Update context queue inbound to the dataplane */
316 TAILQ_HEAD(zdg_ctx_q, zebra_dplane_ctx) dg_update_ctx_q;
317
318 /* Ordered list of providers */
319 TAILQ_HEAD(zdg_prov_q, zebra_dplane_provider) dg_providers_q;
320
321 /* Counter used to assign internal ids to providers */
322 uint32_t dg_provider_id;
323
324 /* Limit number of pending, unprocessed updates */
325 _Atomic uint32_t dg_max_queued_updates;
326
327 /* Control whether system route notifications should be produced. */
328 bool dg_sys_route_notifs;
329
330 /* Limit number of new updates dequeued at once, to pace an
331 * incoming burst.
332 */
333 uint32_t dg_updates_per_cycle;
334
335 _Atomic uint32_t dg_routes_in;
336 _Atomic uint32_t dg_routes_queued;
337 _Atomic uint32_t dg_routes_queued_max;
338 _Atomic uint32_t dg_route_errors;
339 _Atomic uint32_t dg_other_errors;
340
341 _Atomic uint32_t dg_nexthops_in;
342 _Atomic uint32_t dg_nexthop_errors;
343
344 _Atomic uint32_t dg_lsps_in;
345 _Atomic uint32_t dg_lsp_errors;
346
347 _Atomic uint32_t dg_pws_in;
348 _Atomic uint32_t dg_pw_errors;
349
350 _Atomic uint32_t dg_intf_addrs_in;
351 _Atomic uint32_t dg_intf_addr_errors;
352
353 _Atomic uint32_t dg_macs_in;
354 _Atomic uint32_t dg_mac_errors;
355
356 _Atomic uint32_t dg_neighs_in;
357 _Atomic uint32_t dg_neigh_errors;
358
359 _Atomic uint32_t dg_update_yields;
360
361 /* Dataplane pthread */
362 struct frr_pthread *dg_pthread;
363
364 /* Event-delivery context 'master' for the dplane */
365 struct thread_master *dg_master;
366
367 /* Event/'thread' pointer for queued updates */
368 struct thread *dg_t_update;
369
370 /* Event pointer for pending shutdown check loop */
371 struct thread *dg_t_shutdown_check;
372
373 } zdplane_info;
374
375 /*
376 * Lock and unlock for interactions with the zebra 'core' pthread
377 */
378 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
379 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
380
381
382 /*
383 * Lock and unlock for individual providers
384 */
385 #define DPLANE_PROV_LOCK(p) pthread_mutex_lock(&((p)->dp_mutex))
386 #define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
387
388 /* Prototypes */
389 static int dplane_thread_loop(struct thread *event);
390 static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
391 struct zebra_ns *zns);
392 static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
393 enum dplane_op_e op);
394 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
395 enum dplane_op_e op);
396 static enum zebra_dplane_result intf_addr_update_internal(
397 const struct interface *ifp, const struct connected *ifc,
398 enum dplane_op_e op);
399 static enum zebra_dplane_result mac_update_internal(
400 enum dplane_op_e op, const struct interface *ifp,
401 const struct interface *br_ifp,
402 vlanid_t vid, const struct ethaddr *mac,
403 struct in_addr vtep_ip, bool sticky);
404 static enum zebra_dplane_result neigh_update_internal(
405 enum dplane_op_e op,
406 const struct interface *ifp,
407 const struct ethaddr *mac,
408 const struct ipaddr *ip,
409 uint32_t flags, uint16_t state);
410
411 /*
412 * Public APIs
413 */
414
415 /* Obtain thread_master for dataplane thread */
416 struct thread_master *dplane_get_thread_master(void)
417 {
418 return zdplane_info.dg_master;
419 }
420
421 /*
422 * Allocate a dataplane update context
423 */
424 struct zebra_dplane_ctx *dplane_ctx_alloc(void)
425 {
426 struct zebra_dplane_ctx *p;
427
428 /* TODO -- just alloc'ing memory, but would like to maintain
429 * a pool
430 */
431 p = XCALLOC(MTYPE_DP_CTX, sizeof(struct zebra_dplane_ctx));
432
433 return p;
434 }
435
436 /* Enable system route notifications */
437 void dplane_enable_sys_route_notifs(void)
438 {
439 zdplane_info.dg_sys_route_notifs = true;
440 }
441
442 /*
443 * Free a dataplane results context.
444 */
445 static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
446 {
447 if (pctx == NULL)
448 return;
449
450 DPLANE_CTX_VALID(*pctx);
451
452 /* TODO -- just freeing memory, but would like to maintain
453 * a pool
454 */
455
456 /* Some internal allocations may need to be freed, depending on
457 * the type of info captured in the ctx.
458 */
459 switch ((*pctx)->zd_op) {
460 case DPLANE_OP_ROUTE_INSTALL:
461 case DPLANE_OP_ROUTE_UPDATE:
462 case DPLANE_OP_ROUTE_DELETE:
463 case DPLANE_OP_SYS_ROUTE_ADD:
464 case DPLANE_OP_SYS_ROUTE_DELETE:
465 case DPLANE_OP_ROUTE_NOTIFY:
466
467 /* Free allocated nexthops */
468 if ((*pctx)->u.rinfo.zd_ng.nexthop) {
469 /* This deals with recursive nexthops too */
470 nexthops_free((*pctx)->u.rinfo.zd_ng.nexthop);
471
472 (*pctx)->u.rinfo.zd_ng.nexthop = NULL;
473 }
474
475 if ((*pctx)->u.rinfo.zd_old_ng.nexthop) {
476 /* This deals with recursive nexthops too */
477 nexthops_free((*pctx)->u.rinfo.zd_old_ng.nexthop);
478
479 (*pctx)->u.rinfo.zd_old_ng.nexthop = NULL;
480 }
481
482 break;
483
484 case DPLANE_OP_NH_INSTALL:
485 case DPLANE_OP_NH_UPDATE:
486 case DPLANE_OP_NH_DELETE: {
487 if ((*pctx)->u.rinfo.nhe.ng.nexthop) {
488 /* This deals with recursive nexthops too */
489 nexthops_free((*pctx)->u.rinfo.nhe.ng.nexthop);
490
491 (*pctx)->u.rinfo.nhe.ng.nexthop = NULL;
492 }
493 break;
494 }
495
496 case DPLANE_OP_LSP_INSTALL:
497 case DPLANE_OP_LSP_UPDATE:
498 case DPLANE_OP_LSP_DELETE:
499 case DPLANE_OP_LSP_NOTIFY:
500 {
501 zebra_nhlfe_t *nhlfe, *next;
502
503 /* Free allocated NHLFEs */
504 for (nhlfe = (*pctx)->u.lsp.nhlfe_list; nhlfe; nhlfe = next) {
505 next = nhlfe->next;
506
507 zebra_mpls_nhlfe_del(nhlfe);
508 }
509
510 /* Clear pointers in lsp struct, in case we're cacheing
511 * free context structs.
512 */
513 (*pctx)->u.lsp.nhlfe_list = NULL;
514 (*pctx)->u.lsp.best_nhlfe = NULL;
515
516 break;
517 }
518
519 case DPLANE_OP_PW_INSTALL:
520 case DPLANE_OP_PW_UNINSTALL:
521 /* Free allocated nexthops */
522 if ((*pctx)->u.pw.nhg.nexthop) {
523 /* This deals with recursive nexthops too */
524 nexthops_free((*pctx)->u.pw.nhg.nexthop);
525
526 (*pctx)->u.pw.nhg.nexthop = NULL;
527 }
528 break;
529
530 case DPLANE_OP_ADDR_INSTALL:
531 case DPLANE_OP_ADDR_UNINSTALL:
532 /* Maybe free label string, if allocated */
533 if ((*pctx)->u.intf.label != NULL &&
534 (*pctx)->u.intf.label != (*pctx)->u.intf.label_buf) {
535 free((*pctx)->u.intf.label);
536 (*pctx)->u.intf.label = NULL;
537 }
538 break;
539
540 case DPLANE_OP_MAC_INSTALL:
541 case DPLANE_OP_MAC_DELETE:
542 case DPLANE_OP_NEIGH_INSTALL:
543 case DPLANE_OP_NEIGH_UPDATE:
544 case DPLANE_OP_NEIGH_DELETE:
545 case DPLANE_OP_VTEP_ADD:
546 case DPLANE_OP_VTEP_DELETE:
547 case DPLANE_OP_NONE:
548 break;
549 }
550
551 XFREE(MTYPE_DP_CTX, *pctx);
552 *pctx = NULL;
553 }
554
555 /*
556 * Return a context block to the dplane module after processing
557 */
558 void dplane_ctx_fini(struct zebra_dplane_ctx **pctx)
559 {
560 /* TODO -- maintain pool; for now, just free */
561 dplane_ctx_free(pctx);
562 }
563
564 /* Enqueue a context block */
565 void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
566 const struct zebra_dplane_ctx *ctx)
567 {
568 TAILQ_INSERT_TAIL(q, (struct zebra_dplane_ctx *)ctx, zd_q_entries);
569 }
570
571 /* Append a list of context blocks to another list */
572 void dplane_ctx_list_append(struct dplane_ctx_q *to_list,
573 struct dplane_ctx_q *from_list)
574 {
575 if (TAILQ_FIRST(from_list)) {
576 TAILQ_CONCAT(to_list, from_list, zd_q_entries);
577
578 /* And clear 'from' list */
579 TAILQ_INIT(from_list);
580 }
581 }
582
583 /* Dequeue a context block from the head of a list */
584 struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_q *q)
585 {
586 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
587
588 if (ctx)
589 TAILQ_REMOVE(q, ctx, zd_q_entries);
590
591 return ctx;
592 }
593
594 /*
595 * Accessors for information from the context object
596 */
597 enum zebra_dplane_result dplane_ctx_get_status(
598 const struct zebra_dplane_ctx *ctx)
599 {
600 DPLANE_CTX_VALID(ctx);
601
602 return ctx->zd_status;
603 }
604
605 void dplane_ctx_set_status(struct zebra_dplane_ctx *ctx,
606 enum zebra_dplane_result status)
607 {
608 DPLANE_CTX_VALID(ctx);
609
610 ctx->zd_status = status;
611 }
612
613 /* Retrieve last/current provider id */
614 uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx *ctx)
615 {
616 DPLANE_CTX_VALID(ctx);
617 return ctx->zd_provider;
618 }
619
620 /* Providers run before the kernel can control whether a kernel
621 * update should be done.
622 */
623 void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx *ctx)
624 {
625 DPLANE_CTX_VALID(ctx);
626
627 SET_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
628 }
629
630 bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx *ctx)
631 {
632 DPLANE_CTX_VALID(ctx);
633
634 return CHECK_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
635 }
636
637 void dplane_ctx_set_op(struct zebra_dplane_ctx *ctx, enum dplane_op_e op)
638 {
639 DPLANE_CTX_VALID(ctx);
640 ctx->zd_op = op;
641 }
642
643 enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx)
644 {
645 DPLANE_CTX_VALID(ctx);
646
647 return ctx->zd_op;
648 }
649
650 const char *dplane_op2str(enum dplane_op_e op)
651 {
652 const char *ret = "UNKNOWN";
653
654 switch (op) {
655 case DPLANE_OP_NONE:
656 ret = "NONE";
657 break;
658
659 /* Route update */
660 case DPLANE_OP_ROUTE_INSTALL:
661 ret = "ROUTE_INSTALL";
662 break;
663 case DPLANE_OP_ROUTE_UPDATE:
664 ret = "ROUTE_UPDATE";
665 break;
666 case DPLANE_OP_ROUTE_DELETE:
667 ret = "ROUTE_DELETE";
668 break;
669 case DPLANE_OP_ROUTE_NOTIFY:
670 ret = "ROUTE_NOTIFY";
671 break;
672
673 /* Nexthop update */
674 case DPLANE_OP_NH_INSTALL:
675 ret = "NH_INSTALL";
676 break;
677 case DPLANE_OP_NH_UPDATE:
678 ret = "NH_UPDATE";
679 break;
680 case DPLANE_OP_NH_DELETE:
681 ret = "NH_DELETE";
682 break;
683
684 case DPLANE_OP_LSP_INSTALL:
685 ret = "LSP_INSTALL";
686 break;
687 case DPLANE_OP_LSP_UPDATE:
688 ret = "LSP_UPDATE";
689 break;
690 case DPLANE_OP_LSP_DELETE:
691 ret = "LSP_DELETE";
692 break;
693 case DPLANE_OP_LSP_NOTIFY:
694 ret = "LSP_NOTIFY";
695 break;
696
697 case DPLANE_OP_PW_INSTALL:
698 ret = "PW_INSTALL";
699 break;
700 case DPLANE_OP_PW_UNINSTALL:
701 ret = "PW_UNINSTALL";
702 break;
703
704 case DPLANE_OP_SYS_ROUTE_ADD:
705 ret = "SYS_ROUTE_ADD";
706 break;
707 case DPLANE_OP_SYS_ROUTE_DELETE:
708 ret = "SYS_ROUTE_DEL";
709 break;
710
711 case DPLANE_OP_ADDR_INSTALL:
712 ret = "ADDR_INSTALL";
713 break;
714 case DPLANE_OP_ADDR_UNINSTALL:
715 ret = "ADDR_UNINSTALL";
716 break;
717
718 case DPLANE_OP_MAC_INSTALL:
719 ret = "MAC_INSTALL";
720 break;
721 case DPLANE_OP_MAC_DELETE:
722 ret = "MAC_DELETE";
723 break;
724
725 case DPLANE_OP_NEIGH_INSTALL:
726 ret = "NEIGH_INSTALL";
727 break;
728 case DPLANE_OP_NEIGH_UPDATE:
729 ret = "NEIGH_UPDATE";
730 break;
731 case DPLANE_OP_NEIGH_DELETE:
732 ret = "NEIGH_DELETE";
733 break;
734 case DPLANE_OP_VTEP_ADD:
735 ret = "VTEP_ADD";
736 break;
737 case DPLANE_OP_VTEP_DELETE:
738 ret = "VTEP_DELETE";
739 break;
740 }
741
742 return ret;
743 }
744
745 const char *dplane_res2str(enum zebra_dplane_result res)
746 {
747 const char *ret = "<Unknown>";
748
749 switch (res) {
750 case ZEBRA_DPLANE_REQUEST_FAILURE:
751 ret = "FAILURE";
752 break;
753 case ZEBRA_DPLANE_REQUEST_QUEUED:
754 ret = "QUEUED";
755 break;
756 case ZEBRA_DPLANE_REQUEST_SUCCESS:
757 ret = "SUCCESS";
758 break;
759 }
760
761 return ret;
762 }
763
764 void dplane_ctx_set_dest(struct zebra_dplane_ctx *ctx,
765 const struct prefix *dest)
766 {
767 DPLANE_CTX_VALID(ctx);
768
769 prefix_copy(&(ctx->u.rinfo.zd_dest), dest);
770 }
771
772 const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
773 {
774 DPLANE_CTX_VALID(ctx);
775
776 return &(ctx->u.rinfo.zd_dest);
777 }
778
779 void dplane_ctx_set_src(struct zebra_dplane_ctx *ctx, const struct prefix *src)
780 {
781 DPLANE_CTX_VALID(ctx);
782
783 if (src)
784 prefix_copy(&(ctx->u.rinfo.zd_src), src);
785 else
786 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(struct prefix));
787 }
788
789 /* Source prefix is a little special - return NULL for "no src prefix" */
790 const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx)
791 {
792 DPLANE_CTX_VALID(ctx);
793
794 if (ctx->u.rinfo.zd_src.prefixlen == 0 &&
795 IN6_IS_ADDR_UNSPECIFIED(&(ctx->u.rinfo.zd_src.u.prefix6))) {
796 return NULL;
797 } else {
798 return &(ctx->u.rinfo.zd_src);
799 }
800 }
801
802 bool dplane_ctx_is_update(const struct zebra_dplane_ctx *ctx)
803 {
804 DPLANE_CTX_VALID(ctx);
805
806 return ctx->zd_is_update;
807 }
808
809 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx *ctx)
810 {
811 DPLANE_CTX_VALID(ctx);
812
813 return ctx->zd_seq;
814 }
815
816 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx)
817 {
818 DPLANE_CTX_VALID(ctx);
819
820 return ctx->zd_old_seq;
821 }
822
823 void dplane_ctx_set_vrf(struct zebra_dplane_ctx *ctx, vrf_id_t vrf)
824 {
825 DPLANE_CTX_VALID(ctx);
826
827 ctx->zd_vrf_id = vrf;
828 }
829
830 vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx)
831 {
832 DPLANE_CTX_VALID(ctx);
833
834 return ctx->zd_vrf_id;
835 }
836
837 bool dplane_ctx_is_from_notif(const struct zebra_dplane_ctx *ctx)
838 {
839 DPLANE_CTX_VALID(ctx);
840
841 return (ctx->zd_notif_provider != 0);
842 }
843
844 uint32_t dplane_ctx_get_notif_provider(const struct zebra_dplane_ctx *ctx)
845 {
846 DPLANE_CTX_VALID(ctx);
847
848 return ctx->zd_notif_provider;
849 }
850
851 void dplane_ctx_set_notif_provider(struct zebra_dplane_ctx *ctx,
852 uint32_t id)
853 {
854 DPLANE_CTX_VALID(ctx);
855
856 ctx->zd_notif_provider = id;
857 }
858 const char *dplane_ctx_get_ifname(const struct zebra_dplane_ctx *ctx)
859 {
860 DPLANE_CTX_VALID(ctx);
861
862 return ctx->zd_ifname;
863 }
864
865 ifindex_t dplane_ctx_get_ifindex(const struct zebra_dplane_ctx *ctx)
866 {
867 DPLANE_CTX_VALID(ctx);
868
869 return ctx->zd_ifindex;
870 }
871
872 void dplane_ctx_set_type(struct zebra_dplane_ctx *ctx, int type)
873 {
874 DPLANE_CTX_VALID(ctx);
875
876 ctx->u.rinfo.zd_type = type;
877 }
878
879 int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx)
880 {
881 DPLANE_CTX_VALID(ctx);
882
883 return ctx->u.rinfo.zd_type;
884 }
885
886 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx)
887 {
888 DPLANE_CTX_VALID(ctx);
889
890 return ctx->u.rinfo.zd_old_type;
891 }
892
893 void dplane_ctx_set_afi(struct zebra_dplane_ctx *ctx, afi_t afi)
894 {
895 DPLANE_CTX_VALID(ctx);
896
897 ctx->u.rinfo.zd_afi = afi;
898 }
899
900 afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
901 {
902 DPLANE_CTX_VALID(ctx);
903
904 return ctx->u.rinfo.zd_afi;
905 }
906
907 void dplane_ctx_set_safi(struct zebra_dplane_ctx *ctx, safi_t safi)
908 {
909 DPLANE_CTX_VALID(ctx);
910
911 ctx->u.rinfo.zd_safi = safi;
912 }
913
914 safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
915 {
916 DPLANE_CTX_VALID(ctx);
917
918 return ctx->u.rinfo.zd_safi;
919 }
920
921 void dplane_ctx_set_table(struct zebra_dplane_ctx *ctx, uint32_t table)
922 {
923 DPLANE_CTX_VALID(ctx);
924
925 ctx->zd_table_id = table;
926 }
927
928 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx)
929 {
930 DPLANE_CTX_VALID(ctx);
931
932 return ctx->zd_table_id;
933 }
934
935 route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx)
936 {
937 DPLANE_CTX_VALID(ctx);
938
939 return ctx->u.rinfo.zd_tag;
940 }
941
942 void dplane_ctx_set_tag(struct zebra_dplane_ctx *ctx, route_tag_t tag)
943 {
944 DPLANE_CTX_VALID(ctx);
945
946 ctx->u.rinfo.zd_tag = tag;
947 }
948
949 route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx)
950 {
951 DPLANE_CTX_VALID(ctx);
952
953 return ctx->u.rinfo.zd_old_tag;
954 }
955
956 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx)
957 {
958 DPLANE_CTX_VALID(ctx);
959
960 return ctx->u.rinfo.zd_instance;
961 }
962
963 void dplane_ctx_set_instance(struct zebra_dplane_ctx *ctx, uint16_t instance)
964 {
965 DPLANE_CTX_VALID(ctx);
966
967 ctx->u.rinfo.zd_instance = instance;
968 }
969
970 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
971 {
972 DPLANE_CTX_VALID(ctx);
973
974 return ctx->u.rinfo.zd_old_instance;
975 }
976
977 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx)
978 {
979 DPLANE_CTX_VALID(ctx);
980
981 return ctx->u.rinfo.zd_metric;
982 }
983
984 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx)
985 {
986 DPLANE_CTX_VALID(ctx);
987
988 return ctx->u.rinfo.zd_old_metric;
989 }
990
991 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx)
992 {
993 DPLANE_CTX_VALID(ctx);
994
995 return ctx->u.rinfo.zd_mtu;
996 }
997
998 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx)
999 {
1000 DPLANE_CTX_VALID(ctx);
1001
1002 return ctx->u.rinfo.zd_nexthop_mtu;
1003 }
1004
1005 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx)
1006 {
1007 DPLANE_CTX_VALID(ctx);
1008
1009 return ctx->u.rinfo.zd_distance;
1010 }
1011
1012 void dplane_ctx_set_distance(struct zebra_dplane_ctx *ctx, uint8_t distance)
1013 {
1014 DPLANE_CTX_VALID(ctx);
1015
1016 ctx->u.rinfo.zd_distance = distance;
1017 }
1018
1019 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
1020 {
1021 DPLANE_CTX_VALID(ctx);
1022
1023 return ctx->u.rinfo.zd_old_distance;
1024 }
1025
1026 /*
1027 * Set the nexthops associated with a context: note that processing code
1028 * may well expect that nexthops are in canonical (sorted) order, so we
1029 * will enforce that here.
1030 */
1031 void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh)
1032 {
1033 DPLANE_CTX_VALID(ctx);
1034
1035 if (ctx->u.rinfo.zd_ng.nexthop) {
1036 nexthops_free(ctx->u.rinfo.zd_ng.nexthop);
1037 ctx->u.rinfo.zd_ng.nexthop = NULL;
1038 }
1039 nexthop_group_copy_nh_sorted(&(ctx->u.rinfo.zd_ng), nh);
1040 }
1041
1042 const struct nexthop_group *dplane_ctx_get_ng(
1043 const struct zebra_dplane_ctx *ctx)
1044 {
1045 DPLANE_CTX_VALID(ctx);
1046
1047 return &(ctx->u.rinfo.zd_ng);
1048 }
1049
1050 const struct nexthop_group *dplane_ctx_get_old_ng(
1051 const struct zebra_dplane_ctx *ctx)
1052 {
1053 DPLANE_CTX_VALID(ctx);
1054
1055 return &(ctx->u.rinfo.zd_old_ng);
1056 }
1057
1058 const struct zebra_dplane_info *dplane_ctx_get_ns(
1059 const struct zebra_dplane_ctx *ctx)
1060 {
1061 DPLANE_CTX_VALID(ctx);
1062
1063 return &(ctx->zd_ns_info);
1064 }
1065
1066 /* Accessors for nexthop information */
1067 uint32_t dplane_ctx_get_nhe_id(const struct zebra_dplane_ctx *ctx)
1068 {
1069 DPLANE_CTX_VALID(ctx);
1070 return ctx->u.rinfo.nhe.id;
1071 }
1072
1073 afi_t dplane_ctx_get_nhe_afi(const struct zebra_dplane_ctx *ctx)
1074 {
1075 DPLANE_CTX_VALID(ctx);
1076 return ctx->u.rinfo.nhe.afi;
1077 }
1078
1079 vrf_id_t dplane_ctx_get_nhe_vrf_id(const struct zebra_dplane_ctx *ctx)
1080 {
1081 DPLANE_CTX_VALID(ctx);
1082 return ctx->u.rinfo.nhe.vrf_id;
1083 }
1084
1085 int dplane_ctx_get_nhe_type(const struct zebra_dplane_ctx *ctx)
1086 {
1087 DPLANE_CTX_VALID(ctx);
1088 return ctx->u.rinfo.nhe.type;
1089 }
1090
1091 const struct nexthop_group *
1092 dplane_ctx_get_nhe_ng(const struct zebra_dplane_ctx *ctx)
1093 {
1094 DPLANE_CTX_VALID(ctx);
1095 return &(ctx->u.rinfo.nhe.ng);
1096 }
1097
1098 const struct nh_grp *
1099 dplane_ctx_get_nhe_nh_grp(const struct zebra_dplane_ctx *ctx)
1100 {
1101 DPLANE_CTX_VALID(ctx);
1102 return ctx->u.rinfo.nhe.nh_grp;
1103 }
1104
1105 uint8_t dplane_ctx_get_nhe_nh_grp_count(const struct zebra_dplane_ctx *ctx)
1106 {
1107 DPLANE_CTX_VALID(ctx);
1108 return ctx->u.rinfo.nhe.nh_grp_count;
1109 }
1110
1111 /* Accessors for LSP information */
1112
1113 mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx)
1114 {
1115 DPLANE_CTX_VALID(ctx);
1116
1117 return ctx->u.lsp.ile.in_label;
1118 }
1119
1120 void dplane_ctx_set_in_label(struct zebra_dplane_ctx *ctx, mpls_label_t label)
1121 {
1122 DPLANE_CTX_VALID(ctx);
1123
1124 ctx->u.lsp.ile.in_label = label;
1125 }
1126
1127 uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx *ctx)
1128 {
1129 DPLANE_CTX_VALID(ctx);
1130
1131 return ctx->u.lsp.addr_family;
1132 }
1133
1134 void dplane_ctx_set_addr_family(struct zebra_dplane_ctx *ctx,
1135 uint8_t family)
1136 {
1137 DPLANE_CTX_VALID(ctx);
1138
1139 ctx->u.lsp.addr_family = family;
1140 }
1141
1142 uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx)
1143 {
1144 DPLANE_CTX_VALID(ctx);
1145
1146 return ctx->u.lsp.flags;
1147 }
1148
1149 void dplane_ctx_set_lsp_flags(struct zebra_dplane_ctx *ctx,
1150 uint32_t flags)
1151 {
1152 DPLANE_CTX_VALID(ctx);
1153
1154 ctx->u.lsp.flags = flags;
1155 }
1156
1157 const zebra_nhlfe_t *dplane_ctx_get_nhlfe(const struct zebra_dplane_ctx *ctx)
1158 {
1159 DPLANE_CTX_VALID(ctx);
1160
1161 return ctx->u.lsp.nhlfe_list;
1162 }
1163
1164 zebra_nhlfe_t *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
1165 enum lsp_types_t lsp_type,
1166 enum nexthop_types_t nh_type,
1167 union g_addr *gate,
1168 ifindex_t ifindex,
1169 uint8_t num_labels,
1170 mpls_label_t out_labels[])
1171 {
1172 zebra_nhlfe_t *nhlfe;
1173
1174 DPLANE_CTX_VALID(ctx);
1175
1176 nhlfe = zebra_mpls_lsp_add_nhlfe(&(ctx->u.lsp),
1177 lsp_type, nh_type, gate,
1178 ifindex, num_labels, out_labels);
1179
1180 return nhlfe;
1181 }
1182
1183 const zebra_nhlfe_t *
1184 dplane_ctx_get_best_nhlfe(const struct zebra_dplane_ctx *ctx)
1185 {
1186 DPLANE_CTX_VALID(ctx);
1187
1188 return ctx->u.lsp.best_nhlfe;
1189 }
1190
1191 const zebra_nhlfe_t *
1192 dplane_ctx_set_best_nhlfe(struct zebra_dplane_ctx *ctx,
1193 zebra_nhlfe_t *nhlfe)
1194 {
1195 DPLANE_CTX_VALID(ctx);
1196
1197 ctx->u.lsp.best_nhlfe = nhlfe;
1198 return ctx->u.lsp.best_nhlfe;
1199 }
1200
1201 uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx *ctx)
1202 {
1203 DPLANE_CTX_VALID(ctx);
1204
1205 return ctx->u.lsp.num_ecmp;
1206 }
1207
1208 mpls_label_t dplane_ctx_get_pw_local_label(const struct zebra_dplane_ctx *ctx)
1209 {
1210 DPLANE_CTX_VALID(ctx);
1211
1212 return ctx->u.pw.local_label;
1213 }
1214
1215 mpls_label_t dplane_ctx_get_pw_remote_label(const struct zebra_dplane_ctx *ctx)
1216 {
1217 DPLANE_CTX_VALID(ctx);
1218
1219 return ctx->u.pw.remote_label;
1220 }
1221
1222 int dplane_ctx_get_pw_type(const struct zebra_dplane_ctx *ctx)
1223 {
1224 DPLANE_CTX_VALID(ctx);
1225
1226 return ctx->u.pw.type;
1227 }
1228
1229 int dplane_ctx_get_pw_af(const struct zebra_dplane_ctx *ctx)
1230 {
1231 DPLANE_CTX_VALID(ctx);
1232
1233 return ctx->u.pw.af;
1234 }
1235
1236 uint32_t dplane_ctx_get_pw_flags(const struct zebra_dplane_ctx *ctx)
1237 {
1238 DPLANE_CTX_VALID(ctx);
1239
1240 return ctx->u.pw.flags;
1241 }
1242
1243 int dplane_ctx_get_pw_status(const struct zebra_dplane_ctx *ctx)
1244 {
1245 DPLANE_CTX_VALID(ctx);
1246
1247 return ctx->u.pw.status;
1248 }
1249
1250 const union g_addr *dplane_ctx_get_pw_dest(
1251 const struct zebra_dplane_ctx *ctx)
1252 {
1253 DPLANE_CTX_VALID(ctx);
1254
1255 return &(ctx->u.pw.dest);
1256 }
1257
1258 const union pw_protocol_fields *dplane_ctx_get_pw_proto(
1259 const struct zebra_dplane_ctx *ctx)
1260 {
1261 DPLANE_CTX_VALID(ctx);
1262
1263 return &(ctx->u.pw.fields);
1264 }
1265
1266 const struct nexthop_group *
1267 dplane_ctx_get_pw_nhg(const struct zebra_dplane_ctx *ctx)
1268 {
1269 DPLANE_CTX_VALID(ctx);
1270
1271 return &(ctx->u.pw.nhg);
1272 }
1273
1274 /* Accessors for interface information */
1275 uint32_t dplane_ctx_get_intf_metric(const struct zebra_dplane_ctx *ctx)
1276 {
1277 DPLANE_CTX_VALID(ctx);
1278
1279 return ctx->u.intf.metric;
1280 }
1281
1282 /* Is interface addr p2p? */
1283 bool dplane_ctx_intf_is_connected(const struct zebra_dplane_ctx *ctx)
1284 {
1285 DPLANE_CTX_VALID(ctx);
1286
1287 return (ctx->u.intf.flags & DPLANE_INTF_CONNECTED);
1288 }
1289
1290 bool dplane_ctx_intf_is_secondary(const struct zebra_dplane_ctx *ctx)
1291 {
1292 DPLANE_CTX_VALID(ctx);
1293
1294 return (ctx->u.intf.flags & DPLANE_INTF_SECONDARY);
1295 }
1296
1297 bool dplane_ctx_intf_is_broadcast(const struct zebra_dplane_ctx *ctx)
1298 {
1299 DPLANE_CTX_VALID(ctx);
1300
1301 return (ctx->u.intf.flags & DPLANE_INTF_BROADCAST);
1302 }
1303
1304 const struct prefix *dplane_ctx_get_intf_addr(
1305 const struct zebra_dplane_ctx *ctx)
1306 {
1307 DPLANE_CTX_VALID(ctx);
1308
1309 return &(ctx->u.intf.prefix);
1310 }
1311
1312 bool dplane_ctx_intf_has_dest(const struct zebra_dplane_ctx *ctx)
1313 {
1314 DPLANE_CTX_VALID(ctx);
1315
1316 return (ctx->u.intf.flags & DPLANE_INTF_HAS_DEST);
1317 }
1318
1319 const struct prefix *dplane_ctx_get_intf_dest(
1320 const struct zebra_dplane_ctx *ctx)
1321 {
1322 DPLANE_CTX_VALID(ctx);
1323
1324 if (ctx->u.intf.flags & DPLANE_INTF_HAS_DEST)
1325 return &(ctx->u.intf.dest_prefix);
1326 else
1327 return NULL;
1328 }
1329
1330 bool dplane_ctx_intf_has_label(const struct zebra_dplane_ctx *ctx)
1331 {
1332 DPLANE_CTX_VALID(ctx);
1333
1334 return (ctx->u.intf.flags & DPLANE_INTF_HAS_LABEL);
1335 }
1336
1337 const char *dplane_ctx_get_intf_label(const struct zebra_dplane_ctx *ctx)
1338 {
1339 DPLANE_CTX_VALID(ctx);
1340
1341 return ctx->u.intf.label;
1342 }
1343
1344 /* Accessors for MAC information */
1345 vlanid_t dplane_ctx_mac_get_vlan(const struct zebra_dplane_ctx *ctx)
1346 {
1347 DPLANE_CTX_VALID(ctx);
1348 return ctx->u.macinfo.vid;
1349 }
1350
1351 bool dplane_ctx_mac_is_sticky(const struct zebra_dplane_ctx *ctx)
1352 {
1353 DPLANE_CTX_VALID(ctx);
1354 return ctx->u.macinfo.is_sticky;
1355 }
1356
1357 const struct ethaddr *dplane_ctx_mac_get_addr(
1358 const struct zebra_dplane_ctx *ctx)
1359 {
1360 DPLANE_CTX_VALID(ctx);
1361 return &(ctx->u.macinfo.mac);
1362 }
1363
1364 const struct in_addr *dplane_ctx_mac_get_vtep_ip(
1365 const struct zebra_dplane_ctx *ctx)
1366 {
1367 DPLANE_CTX_VALID(ctx);
1368 return &(ctx->u.macinfo.vtep_ip);
1369 }
1370
1371 ifindex_t dplane_ctx_mac_get_br_ifindex(const struct zebra_dplane_ctx *ctx)
1372 {
1373 DPLANE_CTX_VALID(ctx);
1374 return ctx->u.macinfo.br_ifindex;
1375 }
1376
1377 /* Accessors for neighbor information */
1378 const struct ipaddr *dplane_ctx_neigh_get_ipaddr(
1379 const struct zebra_dplane_ctx *ctx)
1380 {
1381 DPLANE_CTX_VALID(ctx);
1382 return &(ctx->u.neigh.ip_addr);
1383 }
1384
1385 const struct ethaddr *dplane_ctx_neigh_get_mac(
1386 const struct zebra_dplane_ctx *ctx)
1387 {
1388 DPLANE_CTX_VALID(ctx);
1389 return &(ctx->u.neigh.mac);
1390 }
1391
1392 uint32_t dplane_ctx_neigh_get_flags(const struct zebra_dplane_ctx *ctx)
1393 {
1394 DPLANE_CTX_VALID(ctx);
1395 return ctx->u.neigh.flags;
1396 }
1397
1398 uint16_t dplane_ctx_neigh_get_state(const struct zebra_dplane_ctx *ctx)
1399 {
1400 DPLANE_CTX_VALID(ctx);
1401 return ctx->u.neigh.state;
1402 }
1403
1404 /*
1405 * End of dplane context accessors
1406 */
1407
1408
1409 /*
1410 * Retrieve the limit on the number of pending, unprocessed updates.
1411 */
1412 uint32_t dplane_get_in_queue_limit(void)
1413 {
1414 return atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
1415 memory_order_relaxed);
1416 }
1417
1418 /*
1419 * Configure limit on the number of pending, queued updates.
1420 */
1421 void dplane_set_in_queue_limit(uint32_t limit, bool set)
1422 {
1423 /* Reset to default on 'unset' */
1424 if (!set)
1425 limit = DPLANE_DEFAULT_MAX_QUEUED;
1426
1427 atomic_store_explicit(&zdplane_info.dg_max_queued_updates, limit,
1428 memory_order_relaxed);
1429 }
1430
1431 /*
1432 * Retrieve the current queue depth of incoming, unprocessed updates
1433 */
1434 uint32_t dplane_get_in_queue_len(void)
1435 {
1436 return atomic_load_explicit(&zdplane_info.dg_routes_queued,
1437 memory_order_seq_cst);
1438 }
1439
1440 /*
1441 * Common dataplane context init with zebra namespace info.
1442 */
1443 static int dplane_ctx_ns_init(struct zebra_dplane_ctx *ctx,
1444 struct zebra_ns *zns,
1445 bool is_update)
1446 {
1447 dplane_info_from_zns(&(ctx->zd_ns_info), zns);
1448
1449 #if defined(HAVE_NETLINK)
1450 /* Increment message counter after copying to context struct - may need
1451 * two messages in some 'update' cases.
1452 */
1453 if (is_update)
1454 zns->netlink_dplane.seq += 2;
1455 else
1456 zns->netlink_dplane.seq++;
1457 #endif /* HAVE_NETLINK */
1458
1459 return AOK;
1460 }
1461
1462 /*
1463 * Initialize a context block for a route update from zebra data structs.
1464 */
1465 static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx,
1466 enum dplane_op_e op,
1467 struct route_node *rn,
1468 struct route_entry *re)
1469 {
1470 int ret = EINVAL;
1471 const struct route_table *table = NULL;
1472 const rib_table_info_t *info;
1473 const struct prefix *p, *src_p;
1474 struct zebra_ns *zns;
1475 struct zebra_vrf *zvrf;
1476 struct nexthop *nexthop;
1477
1478 if (!ctx || !rn || !re)
1479 goto done;
1480
1481 ctx->zd_op = op;
1482 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
1483
1484 ctx->u.rinfo.zd_type = re->type;
1485 ctx->u.rinfo.zd_old_type = re->type;
1486
1487 /* Prefixes: dest, and optional source */
1488 srcdest_rnode_prefixes(rn, &p, &src_p);
1489
1490 prefix_copy(&(ctx->u.rinfo.zd_dest), p);
1491
1492 if (src_p)
1493 prefix_copy(&(ctx->u.rinfo.zd_src), src_p);
1494 else
1495 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(ctx->u.rinfo.zd_src));
1496
1497 ctx->zd_table_id = re->table;
1498
1499 ctx->u.rinfo.zd_metric = re->metric;
1500 ctx->u.rinfo.zd_old_metric = re->metric;
1501 ctx->zd_vrf_id = re->vrf_id;
1502 ctx->u.rinfo.zd_mtu = re->mtu;
1503 ctx->u.rinfo.zd_nexthop_mtu = re->nexthop_mtu;
1504 ctx->u.rinfo.zd_instance = re->instance;
1505 ctx->u.rinfo.zd_tag = re->tag;
1506 ctx->u.rinfo.zd_old_tag = re->tag;
1507 ctx->u.rinfo.zd_distance = re->distance;
1508
1509 table = srcdest_rnode_table(rn);
1510 info = table->info;
1511
1512 ctx->u.rinfo.zd_afi = info->afi;
1513 ctx->u.rinfo.zd_safi = info->safi;
1514
1515 /* Copy nexthops; recursive info is included too */
1516 copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop),
1517 re->nhe->nhg->nexthop, NULL);
1518
1519 /* Ensure that the dplane's nexthops flags are clear. */
1520 for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop))
1521 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
1522
1523 /* Don't need some info when capturing a system notification */
1524 if (op == DPLANE_OP_SYS_ROUTE_ADD ||
1525 op == DPLANE_OP_SYS_ROUTE_DELETE) {
1526 ret = AOK;
1527 goto done;
1528 }
1529
1530 /* Extract ns info - can't use pointers to 'core' structs */
1531 zvrf = vrf_info_lookup(re->vrf_id);
1532 zns = zvrf->zns;
1533 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_ROUTE_UPDATE));
1534
1535 #ifdef HAVE_NETLINK
1536 if (re->nhe_id) {
1537 struct nhg_hash_entry *nhe =
1538 zebra_nhg_resolve(zebra_nhg_lookup_id(re->nhe_id));
1539
1540 ctx->u.rinfo.nhe.id = nhe->id;
1541 /*
1542 * Check if the nhe is installed/queued before doing anything
1543 * with this route.
1544 *
1545 * If its a delete we only use the prefix anyway, so this only
1546 * matters for INSTALL/UPDATE.
1547 */
1548 if (((op == DPLANE_OP_ROUTE_INSTALL)
1549 || (op == DPLANE_OP_ROUTE_UPDATE))
1550 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)
1551 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED)) {
1552 ret = ENOENT;
1553 goto done;
1554 }
1555 }
1556 #endif /* HAVE_NETLINK */
1557
1558 /* Trying out the sequence number idea, so we can try to detect
1559 * when a result is stale.
1560 */
1561 re->dplane_sequence = zebra_router_get_next_sequence();
1562 ctx->zd_seq = re->dplane_sequence;
1563
1564 ret = AOK;
1565
1566 done:
1567 return ret;
1568 }
1569
1570 /**
1571 * dplane_ctx_nexthop_init() - Initialize a context block for a nexthop update
1572 *
1573 * @ctx: Dataplane context to init
1574 * @op: Operation being performed
1575 * @nhe: Nexthop group hash entry
1576 *
1577 * Return: Result status
1578 */
1579 static int dplane_ctx_nexthop_init(struct zebra_dplane_ctx *ctx,
1580 enum dplane_op_e op,
1581 struct nhg_hash_entry *nhe)
1582 {
1583 struct zebra_vrf *zvrf = NULL;
1584 struct zebra_ns *zns = NULL;
1585
1586 int ret = EINVAL;
1587
1588 if (!ctx || !nhe)
1589 goto done;
1590
1591 ctx->zd_op = op;
1592 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
1593
1594 /* Copy over nhe info */
1595 ctx->u.rinfo.nhe.id = nhe->id;
1596 ctx->u.rinfo.nhe.afi = nhe->afi;
1597 ctx->u.rinfo.nhe.vrf_id = nhe->vrf_id;
1598 ctx->u.rinfo.nhe.type = nhe->type;
1599
1600 nexthop_group_copy(&(ctx->u.rinfo.nhe.ng), nhe->nhg);
1601
1602 /* If its a group, convert it to a grp array of ids */
1603 if (!zebra_nhg_depends_is_empty(nhe)
1604 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_RECURSIVE))
1605 ctx->u.rinfo.nhe.nh_grp_count = zebra_nhg_nhe2grp(
1606 ctx->u.rinfo.nhe.nh_grp, nhe, MULTIPATH_NUM);
1607
1608 zvrf = vrf_info_lookup(nhe->vrf_id);
1609
1610 /*
1611 * Fallback to default namespace if the vrf got ripped out from under
1612 * us.
1613 */
1614 zns = zvrf ? zvrf->zns : zebra_ns_lookup(NS_DEFAULT);
1615
1616 /*
1617 * TODO: Might not need to mark this as an update, since
1618 * it probably won't require two messages
1619 */
1620 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_NH_UPDATE));
1621
1622 ret = AOK;
1623
1624 done:
1625 return ret;
1626 }
1627
1628 /*
1629 * Capture information for an LSP update in a dplane context.
1630 */
1631 static int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx,
1632 enum dplane_op_e op,
1633 zebra_lsp_t *lsp)
1634 {
1635 int ret = AOK;
1636 zebra_nhlfe_t *nhlfe, *new_nhlfe;
1637
1638 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1639 zlog_debug("init dplane ctx %s: in-label %u ecmp# %d",
1640 dplane_op2str(op), lsp->ile.in_label,
1641 lsp->num_ecmp);
1642
1643 ctx->zd_op = op;
1644 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
1645
1646 /* Capture namespace info */
1647 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
1648 (op == DPLANE_OP_LSP_UPDATE));
1649
1650 memset(&ctx->u.lsp, 0, sizeof(ctx->u.lsp));
1651
1652 ctx->u.lsp.ile = lsp->ile;
1653 ctx->u.lsp.addr_family = lsp->addr_family;
1654 ctx->u.lsp.num_ecmp = lsp->num_ecmp;
1655 ctx->u.lsp.flags = lsp->flags;
1656
1657 /* Copy source LSP's nhlfes, and capture 'best' nhlfe */
1658 for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
1659 /* Not sure if this is meaningful... */
1660 if (nhlfe->nexthop == NULL)
1661 continue;
1662
1663 new_nhlfe =
1664 zebra_mpls_lsp_add_nhlfe(
1665 &(ctx->u.lsp),
1666 nhlfe->type,
1667 nhlfe->nexthop->type,
1668 &(nhlfe->nexthop->gate),
1669 nhlfe->nexthop->ifindex,
1670 nhlfe->nexthop->nh_label->num_labels,
1671 nhlfe->nexthop->nh_label->label);
1672
1673 if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
1674 ret = ENOMEM;
1675 break;
1676 }
1677
1678 /* Need to copy flags too */
1679 new_nhlfe->flags = nhlfe->flags;
1680 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
1681
1682 if (nhlfe == lsp->best_nhlfe)
1683 ctx->u.lsp.best_nhlfe = new_nhlfe;
1684 }
1685
1686 /* On error the ctx will be cleaned-up, so we don't need to
1687 * deal with any allocated nhlfe or nexthop structs here.
1688 */
1689
1690 return ret;
1691 }
1692
1693 /*
1694 * Capture information for an LSP update in a dplane context.
1695 */
1696 static int dplane_ctx_pw_init(struct zebra_dplane_ctx *ctx,
1697 enum dplane_op_e op,
1698 struct zebra_pw *pw)
1699 {
1700 struct prefix p;
1701 afi_t afi;
1702 struct route_table *table;
1703 struct route_node *rn;
1704 struct route_entry *re;
1705
1706 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1707 zlog_debug("init dplane ctx %s: pw '%s', loc %u, rem %u",
1708 dplane_op2str(op), pw->ifname, pw->local_label,
1709 pw->remote_label);
1710
1711 ctx->zd_op = op;
1712 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
1713
1714 /* Capture namespace info: no netlink support as of 12/18,
1715 * but just in case...
1716 */
1717 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
1718
1719 memset(&ctx->u.pw, 0, sizeof(ctx->u.pw));
1720
1721 /* This name appears to be c-string, so we use string copy. */
1722 strlcpy(ctx->zd_ifname, pw->ifname, sizeof(ctx->zd_ifname));
1723
1724 ctx->zd_vrf_id = pw->vrf_id;
1725 ctx->zd_ifindex = pw->ifindex;
1726 ctx->u.pw.type = pw->type;
1727 ctx->u.pw.af = pw->af;
1728 ctx->u.pw.local_label = pw->local_label;
1729 ctx->u.pw.remote_label = pw->remote_label;
1730 ctx->u.pw.flags = pw->flags;
1731
1732 ctx->u.pw.dest = pw->nexthop;
1733
1734 ctx->u.pw.fields = pw->data;
1735
1736 /* Capture nexthop info for the pw destination. We need to look
1737 * up and use zebra datastructs, but we're running in the zebra
1738 * pthread here so that should be ok.
1739 */
1740 memcpy(&p.u, &pw->nexthop, sizeof(pw->nexthop));
1741 p.family = pw->af;
1742 p.prefixlen = ((pw->af == AF_INET) ?
1743 IPV4_MAX_PREFIXLEN : IPV6_MAX_PREFIXLEN);
1744
1745 afi = (pw->af == AF_INET) ? AFI_IP : AFI_IP6;
1746 table = zebra_vrf_table(afi, SAFI_UNICAST, pw->vrf_id);
1747 if (table) {
1748 rn = route_node_match(table, &p);
1749 if (rn) {
1750 RNODE_FOREACH_RE(rn, re) {
1751 if (CHECK_FLAG(re->flags, ZEBRA_FLAG_SELECTED))
1752 break;
1753 }
1754
1755 if (re)
1756 copy_nexthops(&(ctx->u.pw.nhg.nexthop),
1757 re->nhe->nhg->nexthop, NULL);
1758
1759 route_unlock_node(rn);
1760 }
1761 }
1762
1763 return AOK;
1764 }
1765
1766 /*
1767 * Enqueue a new update,
1768 * and ensure an event is active for the dataplane pthread.
1769 */
1770 static int dplane_update_enqueue(struct zebra_dplane_ctx *ctx)
1771 {
1772 int ret = EINVAL;
1773 uint32_t high, curr;
1774
1775 /* Enqueue for processing by the dataplane pthread */
1776 DPLANE_LOCK();
1777 {
1778 TAILQ_INSERT_TAIL(&zdplane_info.dg_update_ctx_q, ctx,
1779 zd_q_entries);
1780 }
1781 DPLANE_UNLOCK();
1782
1783 curr = atomic_add_fetch_explicit(
1784 #ifdef __clang__
1785 /* TODO -- issue with the clang atomic/intrinsics currently;
1786 * casting away the 'Atomic'-ness of the variable works.
1787 */
1788 (uint32_t *)&(zdplane_info.dg_routes_queued),
1789 #else
1790 &(zdplane_info.dg_routes_queued),
1791 #endif
1792 1, memory_order_seq_cst);
1793
1794 /* Maybe update high-water counter also */
1795 high = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
1796 memory_order_seq_cst);
1797 while (high < curr) {
1798 if (atomic_compare_exchange_weak_explicit(
1799 &zdplane_info.dg_routes_queued_max,
1800 &high, curr,
1801 memory_order_seq_cst,
1802 memory_order_seq_cst))
1803 break;
1804 }
1805
1806 /* Ensure that an event for the dataplane thread is active */
1807 ret = dplane_provider_work_ready();
1808
1809 return ret;
1810 }
1811
1812 /*
1813 * Utility that prepares a route update and enqueues it for processing
1814 */
1815 static enum zebra_dplane_result
1816 dplane_route_update_internal(struct route_node *rn,
1817 struct route_entry *re,
1818 struct route_entry *old_re,
1819 enum dplane_op_e op)
1820 {
1821 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1822 int ret = EINVAL;
1823 struct zebra_dplane_ctx *ctx = NULL;
1824
1825 /* Obtain context block */
1826 ctx = dplane_ctx_alloc();
1827
1828 /* Init context with info from zebra data structs */
1829 ret = dplane_ctx_route_init(ctx, op, rn, re);
1830 if (ret == AOK) {
1831 /* Capture some extra info for update case
1832 * where there's a different 'old' route.
1833 */
1834 if ((op == DPLANE_OP_ROUTE_UPDATE) &&
1835 old_re && (old_re != re)) {
1836 ctx->zd_is_update = true;
1837
1838 old_re->dplane_sequence =
1839 zebra_router_get_next_sequence();
1840 ctx->zd_old_seq = old_re->dplane_sequence;
1841
1842 ctx->u.rinfo.zd_old_tag = old_re->tag;
1843 ctx->u.rinfo.zd_old_type = old_re->type;
1844 ctx->u.rinfo.zd_old_instance = old_re->instance;
1845 ctx->u.rinfo.zd_old_distance = old_re->distance;
1846 ctx->u.rinfo.zd_old_metric = old_re->metric;
1847
1848 #ifndef HAVE_NETLINK
1849 /* For bsd, capture previous re's nexthops too, sigh.
1850 * We'll need these to do per-nexthop deletes.
1851 */
1852 copy_nexthops(&(ctx->u.rinfo.zd_old_ng.nexthop),
1853 old_re->nhe->nhg->nexthop, NULL);
1854 #endif /* !HAVE_NETLINK */
1855 }
1856
1857 /* Enqueue context for processing */
1858 ret = dplane_update_enqueue(ctx);
1859 }
1860
1861 /* Update counter */
1862 atomic_fetch_add_explicit(&zdplane_info.dg_routes_in, 1,
1863 memory_order_relaxed);
1864
1865 if (ret == AOK)
1866 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1867 else {
1868 if (ret == ENOENT)
1869 result = ZEBRA_DPLANE_REQUEST_SUCCESS;
1870 else
1871 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors,
1872 1, memory_order_relaxed);
1873 if (ctx)
1874 dplane_ctx_free(&ctx);
1875 }
1876
1877 return result;
1878 }
1879
1880 /**
1881 * dplane_nexthop_update_internal() - Helper for enqueuing nexthop changes
1882 *
1883 * @nhe: Nexthop group hash entry where the change occured
1884 * @op: The operation to be enqued
1885 *
1886 * Return: Result of the change
1887 */
1888 static enum zebra_dplane_result
1889 dplane_nexthop_update_internal(struct nhg_hash_entry *nhe, enum dplane_op_e op)
1890 {
1891 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1892 int ret = EINVAL;
1893 struct zebra_dplane_ctx *ctx = NULL;
1894
1895 /* Obtain context block */
1896 ctx = dplane_ctx_alloc();
1897 if (!ctx) {
1898 ret = ENOMEM;
1899 goto done;
1900 }
1901
1902 ret = dplane_ctx_nexthop_init(ctx, op, nhe);
1903 if (ret == AOK)
1904 ret = dplane_update_enqueue(ctx);
1905
1906 done:
1907 /* Update counter */
1908 atomic_fetch_add_explicit(&zdplane_info.dg_nexthops_in, 1,
1909 memory_order_relaxed);
1910
1911 if (ret == AOK)
1912 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1913 else {
1914 atomic_fetch_add_explicit(&zdplane_info.dg_nexthop_errors, 1,
1915 memory_order_relaxed);
1916 if (ctx)
1917 dplane_ctx_free(&ctx);
1918 }
1919
1920 return result;
1921 }
1922
1923 /*
1924 * Enqueue a route 'add' for the dataplane.
1925 */
1926 enum zebra_dplane_result dplane_route_add(struct route_node *rn,
1927 struct route_entry *re)
1928 {
1929 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1930
1931 if (rn == NULL || re == NULL)
1932 goto done;
1933
1934 ret = dplane_route_update_internal(rn, re, NULL,
1935 DPLANE_OP_ROUTE_INSTALL);
1936
1937 done:
1938 return ret;
1939 }
1940
1941 /*
1942 * Enqueue a route update for the dataplane.
1943 */
1944 enum zebra_dplane_result dplane_route_update(struct route_node *rn,
1945 struct route_entry *re,
1946 struct route_entry *old_re)
1947 {
1948 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1949
1950 if (rn == NULL || re == NULL)
1951 goto done;
1952
1953 ret = dplane_route_update_internal(rn, re, old_re,
1954 DPLANE_OP_ROUTE_UPDATE);
1955 done:
1956 return ret;
1957 }
1958
1959 /*
1960 * Enqueue a route removal for the dataplane.
1961 */
1962 enum zebra_dplane_result dplane_route_delete(struct route_node *rn,
1963 struct route_entry *re)
1964 {
1965 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1966
1967 if (rn == NULL || re == NULL)
1968 goto done;
1969
1970 ret = dplane_route_update_internal(rn, re, NULL,
1971 DPLANE_OP_ROUTE_DELETE);
1972
1973 done:
1974 return ret;
1975 }
1976
1977 /*
1978 * Notify the dplane when system/connected routes change.
1979 */
1980 enum zebra_dplane_result dplane_sys_route_add(struct route_node *rn,
1981 struct route_entry *re)
1982 {
1983 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1984
1985 /* Ignore this event unless a provider plugin has requested it. */
1986 if (!zdplane_info.dg_sys_route_notifs) {
1987 ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
1988 goto done;
1989 }
1990
1991 if (rn == NULL || re == NULL)
1992 goto done;
1993
1994 ret = dplane_route_update_internal(rn, re, NULL,
1995 DPLANE_OP_SYS_ROUTE_ADD);
1996
1997 done:
1998 return ret;
1999 }
2000
2001 /*
2002 * Notify the dplane when system/connected routes are deleted.
2003 */
2004 enum zebra_dplane_result dplane_sys_route_del(struct route_node *rn,
2005 struct route_entry *re)
2006 {
2007 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
2008
2009 /* Ignore this event unless a provider plugin has requested it. */
2010 if (!zdplane_info.dg_sys_route_notifs) {
2011 ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
2012 goto done;
2013 }
2014
2015 if (rn == NULL || re == NULL)
2016 goto done;
2017
2018 ret = dplane_route_update_internal(rn, re, NULL,
2019 DPLANE_OP_SYS_ROUTE_DELETE);
2020
2021 done:
2022 return ret;
2023 }
2024
2025 /*
2026 * Update from an async notification, to bring other fibs up-to-date.
2027 */
2028 enum zebra_dplane_result
2029 dplane_route_notif_update(struct route_node *rn,
2030 struct route_entry *re,
2031 enum dplane_op_e op,
2032 struct zebra_dplane_ctx *ctx)
2033 {
2034 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
2035 struct zebra_dplane_ctx *new_ctx = NULL;
2036 struct nexthop *nexthop;
2037
2038 if (rn == NULL || re == NULL)
2039 goto done;
2040
2041 new_ctx = dplane_ctx_alloc();
2042 if (new_ctx == NULL)
2043 goto done;
2044
2045 /* Init context with info from zebra data structs */
2046 dplane_ctx_route_init(new_ctx, op, rn, re);
2047
2048 /* For add/update, need to adjust the nexthops so that we match
2049 * the notification state, which may not be the route-entry/RIB
2050 * state.
2051 */
2052 if (op == DPLANE_OP_ROUTE_UPDATE ||
2053 op == DPLANE_OP_ROUTE_INSTALL) {
2054
2055 nexthops_free(new_ctx->u.rinfo.zd_ng.nexthop);
2056 new_ctx->u.rinfo.zd_ng.nexthop = NULL;
2057
2058 copy_nexthops(&(new_ctx->u.rinfo.zd_ng.nexthop),
2059 (rib_active_nhg(re))->nexthop, NULL);
2060
2061 for (ALL_NEXTHOPS(new_ctx->u.rinfo.zd_ng, nexthop))
2062 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
2063
2064 }
2065
2066 /* Capture info about the source of the notification, in 'ctx' */
2067 dplane_ctx_set_notif_provider(new_ctx,
2068 dplane_ctx_get_notif_provider(ctx));
2069
2070 dplane_update_enqueue(new_ctx);
2071
2072 ret = ZEBRA_DPLANE_REQUEST_QUEUED;
2073
2074 done:
2075 return ret;
2076 }
2077
2078 /*
2079 * Enqueue a nexthop add for the dataplane.
2080 */
2081 enum zebra_dplane_result dplane_nexthop_add(struct nhg_hash_entry *nhe)
2082 {
2083 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
2084
2085 if (nhe)
2086 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_INSTALL);
2087 return ret;
2088 }
2089
2090 /*
2091 * Enqueue a nexthop update for the dataplane.
2092 *
2093 * Might not need this func since zebra's nexthop objects should be immutable?
2094 */
2095 enum zebra_dplane_result dplane_nexthop_update(struct nhg_hash_entry *nhe)
2096 {
2097 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
2098
2099 if (nhe)
2100 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_UPDATE);
2101 return ret;
2102 }
2103
2104 /*
2105 * Enqueue a nexthop removal for the dataplane.
2106 */
2107 enum zebra_dplane_result dplane_nexthop_delete(struct nhg_hash_entry *nhe)
2108 {
2109 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
2110
2111 if (nhe)
2112 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_DELETE);
2113
2114 return ret;
2115 }
2116
2117 /*
2118 * Enqueue LSP add for the dataplane.
2119 */
2120 enum zebra_dplane_result dplane_lsp_add(zebra_lsp_t *lsp)
2121 {
2122 enum zebra_dplane_result ret =
2123 lsp_update_internal(lsp, DPLANE_OP_LSP_INSTALL);
2124
2125 return ret;
2126 }
2127
2128 /*
2129 * Enqueue LSP update for the dataplane.
2130 */
2131 enum zebra_dplane_result dplane_lsp_update(zebra_lsp_t *lsp)
2132 {
2133 enum zebra_dplane_result ret =
2134 lsp_update_internal(lsp, DPLANE_OP_LSP_UPDATE);
2135
2136 return ret;
2137 }
2138
2139 /*
2140 * Enqueue LSP delete for the dataplane.
2141 */
2142 enum zebra_dplane_result dplane_lsp_delete(zebra_lsp_t *lsp)
2143 {
2144 enum zebra_dplane_result ret =
2145 lsp_update_internal(lsp, DPLANE_OP_LSP_DELETE);
2146
2147 return ret;
2148 }
2149
2150 /* Update or un-install resulting from an async notification */
2151 enum zebra_dplane_result
2152 dplane_lsp_notif_update(zebra_lsp_t *lsp,
2153 enum dplane_op_e op,
2154 struct zebra_dplane_ctx *notif_ctx)
2155 {
2156 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
2157 int ret = EINVAL;
2158 struct zebra_dplane_ctx *ctx = NULL;
2159
2160 /* Obtain context block */
2161 ctx = dplane_ctx_alloc();
2162 if (ctx == NULL) {
2163 ret = ENOMEM;
2164 goto done;
2165 }
2166
2167 ret = dplane_ctx_lsp_init(ctx, op, lsp);
2168 if (ret != AOK)
2169 goto done;
2170
2171 /* Capture info about the source of the notification */
2172 dplane_ctx_set_notif_provider(
2173 ctx,
2174 dplane_ctx_get_notif_provider(notif_ctx));
2175
2176 ret = dplane_update_enqueue(ctx);
2177
2178 done:
2179 /* Update counter */
2180 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
2181 memory_order_relaxed);
2182
2183 if (ret == AOK)
2184 result = ZEBRA_DPLANE_REQUEST_QUEUED;
2185 else {
2186 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
2187 memory_order_relaxed);
2188 if (ctx)
2189 dplane_ctx_free(&ctx);
2190 }
2191 return result;
2192 }
2193
2194 /*
2195 * Enqueue pseudowire install for the dataplane.
2196 */
2197 enum zebra_dplane_result dplane_pw_install(struct zebra_pw *pw)
2198 {
2199 return pw_update_internal(pw, DPLANE_OP_PW_INSTALL);
2200 }
2201
2202 /*
2203 * Enqueue pseudowire un-install for the dataplane.
2204 */
2205 enum zebra_dplane_result dplane_pw_uninstall(struct zebra_pw *pw)
2206 {
2207 return pw_update_internal(pw, DPLANE_OP_PW_UNINSTALL);
2208 }
2209
2210 /*
2211 * Common internal LSP update utility
2212 */
2213 static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
2214 enum dplane_op_e op)
2215 {
2216 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
2217 int ret = EINVAL;
2218 struct zebra_dplane_ctx *ctx = NULL;
2219
2220 /* Obtain context block */
2221 ctx = dplane_ctx_alloc();
2222
2223 ret = dplane_ctx_lsp_init(ctx, op, lsp);
2224 if (ret != AOK)
2225 goto done;
2226
2227 ret = dplane_update_enqueue(ctx);
2228
2229 done:
2230 /* Update counter */
2231 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
2232 memory_order_relaxed);
2233
2234 if (ret == AOK)
2235 result = ZEBRA_DPLANE_REQUEST_QUEUED;
2236 else {
2237 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
2238 memory_order_relaxed);
2239 dplane_ctx_free(&ctx);
2240 }
2241
2242 return result;
2243 }
2244
2245 /*
2246 * Internal, common handler for pseudowire updates.
2247 */
2248 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
2249 enum dplane_op_e op)
2250 {
2251 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
2252 int ret;
2253 struct zebra_dplane_ctx *ctx = NULL;
2254
2255 ctx = dplane_ctx_alloc();
2256
2257 ret = dplane_ctx_pw_init(ctx, op, pw);
2258 if (ret != AOK)
2259 goto done;
2260
2261 ret = dplane_update_enqueue(ctx);
2262
2263 done:
2264 /* Update counter */
2265 atomic_fetch_add_explicit(&zdplane_info.dg_pws_in, 1,
2266 memory_order_relaxed);
2267
2268 if (ret == AOK)
2269 result = ZEBRA_DPLANE_REQUEST_QUEUED;
2270 else {
2271 atomic_fetch_add_explicit(&zdplane_info.dg_pw_errors, 1,
2272 memory_order_relaxed);
2273 dplane_ctx_free(&ctx);
2274 }
2275
2276 return result;
2277 }
2278
2279 /*
2280 * Enqueue interface address add for the dataplane.
2281 */
2282 enum zebra_dplane_result dplane_intf_addr_set(const struct interface *ifp,
2283 const struct connected *ifc)
2284 {
2285 #if !defined(HAVE_NETLINK) && defined(HAVE_STRUCT_IFALIASREQ)
2286 /* Extra checks for this OS path. */
2287
2288 /* Don't configure PtP addresses on broadcast ifs or reverse */
2289 if (!(ifp->flags & IFF_POINTOPOINT) != !CONNECTED_PEER(ifc)) {
2290 if (IS_ZEBRA_DEBUG_KERNEL || IS_ZEBRA_DEBUG_DPLANE)
2291 zlog_debug("Failed to set intf addr: mismatch p2p and connected");
2292
2293 return ZEBRA_DPLANE_REQUEST_FAILURE;
2294 }
2295
2296 /* Ensure that no existing installed v4 route conflicts with
2297 * the new interface prefix. This check must be done in the
2298 * zebra pthread context, and any route delete (if needed)
2299 * is enqueued before the interface address programming attempt.
2300 */
2301 if (ifc->address->family == AF_INET) {
2302 struct prefix_ipv4 *p;
2303
2304 p = (struct prefix_ipv4 *)ifc->address;
2305 rib_lookup_and_pushup(p, ifp->vrf_id);
2306 }
2307 #endif
2308
2309 return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_INSTALL);
2310 }
2311
2312 /*
2313 * Enqueue interface address remove/uninstall for the dataplane.
2314 */
2315 enum zebra_dplane_result dplane_intf_addr_unset(const struct interface *ifp,
2316 const struct connected *ifc)
2317 {
2318 return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_UNINSTALL);
2319 }
2320
2321 static enum zebra_dplane_result intf_addr_update_internal(
2322 const struct interface *ifp, const struct connected *ifc,
2323 enum dplane_op_e op)
2324 {
2325 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
2326 int ret = EINVAL;
2327 struct zebra_dplane_ctx *ctx = NULL;
2328 struct zebra_ns *zns;
2329
2330 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2331 char addr_str[PREFIX_STRLEN];
2332
2333 prefix2str(ifc->address, addr_str, sizeof(addr_str));
2334
2335 zlog_debug("init intf ctx %s: idx %d, addr %u:%s",
2336 dplane_op2str(op), ifp->ifindex, ifp->vrf_id,
2337 addr_str);
2338 }
2339
2340 ctx = dplane_ctx_alloc();
2341
2342 ctx->zd_op = op;
2343 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2344 ctx->zd_vrf_id = ifp->vrf_id;
2345
2346 zns = zebra_ns_lookup(ifp->vrf_id);
2347 dplane_ctx_ns_init(ctx, zns, false);
2348
2349 /* Init the interface-addr-specific area */
2350 memset(&ctx->u.intf, 0, sizeof(ctx->u.intf));
2351
2352 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
2353 ctx->zd_ifindex = ifp->ifindex;
2354 ctx->u.intf.prefix = *(ifc->address);
2355
2356 if (if_is_broadcast(ifp))
2357 ctx->u.intf.flags |= DPLANE_INTF_BROADCAST;
2358
2359 if (CONNECTED_PEER(ifc)) {
2360 ctx->u.intf.dest_prefix = *(ifc->destination);
2361 ctx->u.intf.flags |=
2362 (DPLANE_INTF_CONNECTED | DPLANE_INTF_HAS_DEST);
2363 }
2364
2365 if (CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY))
2366 ctx->u.intf.flags |= DPLANE_INTF_SECONDARY;
2367
2368 if (ifc->label) {
2369 size_t len;
2370
2371 ctx->u.intf.flags |= DPLANE_INTF_HAS_LABEL;
2372
2373 /* Use embedded buffer if it's adequate; else allocate. */
2374 len = strlen(ifc->label);
2375
2376 if (len < sizeof(ctx->u.intf.label_buf)) {
2377 strlcpy(ctx->u.intf.label_buf, ifc->label,
2378 sizeof(ctx->u.intf.label_buf));
2379 ctx->u.intf.label = ctx->u.intf.label_buf;
2380 } else {
2381 ctx->u.intf.label = strdup(ifc->label);
2382 }
2383 }
2384
2385 ret = dplane_update_enqueue(ctx);
2386
2387 /* Increment counter */
2388 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addrs_in, 1,
2389 memory_order_relaxed);
2390
2391 if (ret == AOK)
2392 result = ZEBRA_DPLANE_REQUEST_QUEUED;
2393 else {
2394 /* Error counter */
2395 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addr_errors,
2396 1, memory_order_relaxed);
2397 dplane_ctx_free(&ctx);
2398 }
2399
2400 return result;
2401 }
2402
2403 /*
2404 * Enqueue vxlan/evpn mac add (or update).
2405 */
2406 enum zebra_dplane_result dplane_mac_add(const struct interface *ifp,
2407 const struct interface *bridge_ifp,
2408 vlanid_t vid,
2409 const struct ethaddr *mac,
2410 struct in_addr vtep_ip,
2411 bool sticky)
2412 {
2413 enum zebra_dplane_result result;
2414
2415 /* Use common helper api */
2416 result = mac_update_internal(DPLANE_OP_MAC_INSTALL, ifp, bridge_ifp,
2417 vid, mac, vtep_ip, sticky);
2418 return result;
2419 }
2420
2421 /*
2422 * Enqueue vxlan/evpn mac delete.
2423 */
2424 enum zebra_dplane_result dplane_mac_del(const struct interface *ifp,
2425 const struct interface *bridge_ifp,
2426 vlanid_t vid,
2427 const struct ethaddr *mac,
2428 struct in_addr vtep_ip)
2429 {
2430 enum zebra_dplane_result result;
2431
2432 /* Use common helper api */
2433 result = mac_update_internal(DPLANE_OP_MAC_DELETE, ifp, bridge_ifp,
2434 vid, mac, vtep_ip, false);
2435 return result;
2436 }
2437
2438 /*
2439 * Common helper api for MAC address/vxlan updates
2440 */
2441 static enum zebra_dplane_result
2442 mac_update_internal(enum dplane_op_e op,
2443 const struct interface *ifp,
2444 const struct interface *br_ifp,
2445 vlanid_t vid,
2446 const struct ethaddr *mac,
2447 struct in_addr vtep_ip,
2448 bool sticky)
2449 {
2450 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
2451 int ret;
2452 struct zebra_dplane_ctx *ctx = NULL;
2453 struct zebra_ns *zns;
2454
2455 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2456 char buf1[ETHER_ADDR_STRLEN], buf2[PREFIX_STRLEN];
2457
2458 zlog_debug("init mac ctx %s: mac %s, ifp %s, vtep %s",
2459 dplane_op2str(op),
2460 prefix_mac2str(mac, buf1, sizeof(buf1)),
2461 ifp->name,
2462 inet_ntop(AF_INET, &vtep_ip, buf2, sizeof(buf2)));
2463 }
2464
2465 ctx = dplane_ctx_alloc();
2466
2467 ctx->zd_op = op;
2468 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2469 ctx->zd_vrf_id = ifp->vrf_id;
2470
2471 zns = zebra_ns_lookup(ifp->vrf_id);
2472 dplane_ctx_ns_init(ctx, zns, false);
2473
2474 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
2475 ctx->zd_ifindex = ifp->ifindex;
2476
2477 /* Init the mac-specific data area */
2478 memset(&ctx->u.macinfo, 0, sizeof(ctx->u.macinfo));
2479
2480 ctx->u.macinfo.br_ifindex = br_ifp->ifindex;
2481 ctx->u.macinfo.vtep_ip = vtep_ip;
2482 ctx->u.macinfo.mac = *mac;
2483 ctx->u.macinfo.vid = vid;
2484 ctx->u.macinfo.is_sticky = sticky;
2485
2486 /* Enqueue for processing on the dplane pthread */
2487 ret = dplane_update_enqueue(ctx);
2488
2489 /* Increment counter */
2490 atomic_fetch_add_explicit(&zdplane_info.dg_macs_in, 1,
2491 memory_order_relaxed);
2492
2493 if (ret == AOK)
2494 result = ZEBRA_DPLANE_REQUEST_QUEUED;
2495 else {
2496 /* Error counter */
2497 atomic_fetch_add_explicit(&zdplane_info.dg_mac_errors, 1,
2498 memory_order_relaxed);
2499 dplane_ctx_free(&ctx);
2500 }
2501
2502 return result;
2503 }
2504
2505 /*
2506 * Enqueue evpn neighbor add for the dataplane.
2507 */
2508 enum zebra_dplane_result dplane_neigh_add(const struct interface *ifp,
2509 const struct ipaddr *ip,
2510 const struct ethaddr *mac,
2511 uint32_t flags)
2512 {
2513 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
2514
2515 result = neigh_update_internal(DPLANE_OP_NEIGH_INSTALL,
2516 ifp, mac, ip, flags, DPLANE_NUD_NOARP);
2517
2518 return result;
2519 }
2520
2521 /*
2522 * Enqueue evpn neighbor update for the dataplane.
2523 */
2524 enum zebra_dplane_result dplane_neigh_update(const struct interface *ifp,
2525 const struct ipaddr *ip,
2526 const struct ethaddr *mac)
2527 {
2528 enum zebra_dplane_result result;
2529
2530 result = neigh_update_internal(DPLANE_OP_NEIGH_UPDATE,
2531 ifp, mac, ip, 0, DPLANE_NUD_PROBE);
2532
2533 return result;
2534 }
2535
2536 /*
2537 * Enqueue evpn neighbor delete for the dataplane.
2538 */
2539 enum zebra_dplane_result dplane_neigh_delete(const struct interface *ifp,
2540 const struct ipaddr *ip)
2541 {
2542 enum zebra_dplane_result result;
2543
2544 result = neigh_update_internal(DPLANE_OP_NEIGH_DELETE,
2545 ifp, NULL, ip, 0, 0);
2546
2547 return result;
2548 }
2549
2550 /*
2551 * Enqueue evpn VTEP add for the dataplane.
2552 */
2553 enum zebra_dplane_result dplane_vtep_add(const struct interface *ifp,
2554 const struct in_addr *ip,
2555 vni_t vni)
2556 {
2557 enum zebra_dplane_result result;
2558 struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
2559 struct ipaddr addr;
2560
2561 if (IS_ZEBRA_DEBUG_VXLAN)
2562 zlog_debug("Install %s into flood list for VNI %u intf %s(%u)",
2563 inet_ntoa(*ip), vni, ifp->name, ifp->ifindex);
2564
2565 SET_IPADDR_V4(&addr);
2566 addr.ipaddr_v4 = *ip;
2567
2568 result = neigh_update_internal(DPLANE_OP_VTEP_ADD,
2569 ifp, &mac, &addr, 0, 0);
2570
2571 return result;
2572 }
2573
2574 /*
2575 * Enqueue evpn VTEP add for the dataplane.
2576 */
2577 enum zebra_dplane_result dplane_vtep_delete(const struct interface *ifp,
2578 const struct in_addr *ip,
2579 vni_t vni)
2580 {
2581 enum zebra_dplane_result result;
2582 struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
2583 struct ipaddr addr;
2584
2585 if (IS_ZEBRA_DEBUG_VXLAN)
2586 zlog_debug(
2587 "Uninstall %s from flood list for VNI %u intf %s(%u)",
2588 inet_ntoa(*ip), vni, ifp->name, ifp->ifindex);
2589
2590 SET_IPADDR_V4(&addr);
2591 addr.ipaddr_v4 = *ip;
2592
2593 result = neigh_update_internal(DPLANE_OP_VTEP_DELETE,
2594 ifp, &mac, &addr, 0, 0);
2595
2596 return result;
2597 }
2598
2599 /*
2600 * Common helper api for evpn neighbor updates
2601 */
2602 static enum zebra_dplane_result
2603 neigh_update_internal(enum dplane_op_e op,
2604 const struct interface *ifp,
2605 const struct ethaddr *mac,
2606 const struct ipaddr *ip,
2607 uint32_t flags, uint16_t state)
2608 {
2609 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
2610 int ret;
2611 struct zebra_dplane_ctx *ctx = NULL;
2612 struct zebra_ns *zns;
2613
2614 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2615 char buf1[ETHER_ADDR_STRLEN], buf2[PREFIX_STRLEN];
2616
2617 zlog_debug("init neigh ctx %s: ifp %s, mac %s, ip %s",
2618 dplane_op2str(op),
2619 prefix_mac2str(mac, buf1, sizeof(buf1)),
2620 ifp->name,
2621 ipaddr2str(ip, buf2, sizeof(buf2)));
2622 }
2623
2624 ctx = dplane_ctx_alloc();
2625
2626 ctx->zd_op = op;
2627 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2628 ctx->zd_vrf_id = ifp->vrf_id;
2629
2630 zns = zebra_ns_lookup(ifp->vrf_id);
2631 dplane_ctx_ns_init(ctx, zns, false);
2632
2633 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
2634 ctx->zd_ifindex = ifp->ifindex;
2635
2636 /* Init the neighbor-specific data area */
2637 memset(&ctx->u.neigh, 0, sizeof(ctx->u.neigh));
2638
2639 ctx->u.neigh.ip_addr = *ip;
2640 if (mac)
2641 ctx->u.neigh.mac = *mac;
2642 ctx->u.neigh.flags = flags;
2643 ctx->u.neigh.state = state;
2644
2645 /* Enqueue for processing on the dplane pthread */
2646 ret = dplane_update_enqueue(ctx);
2647
2648 /* Increment counter */
2649 atomic_fetch_add_explicit(&zdplane_info.dg_neighs_in, 1,
2650 memory_order_relaxed);
2651
2652 if (ret == AOK)
2653 result = ZEBRA_DPLANE_REQUEST_QUEUED;
2654 else {
2655 /* Error counter */
2656 atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors, 1,
2657 memory_order_relaxed);
2658 dplane_ctx_free(&ctx);
2659 }
2660
2661 return result;
2662 }
2663
2664 /*
2665 * Handler for 'show dplane'
2666 */
2667 int dplane_show_helper(struct vty *vty, bool detailed)
2668 {
2669 uint64_t queued, queue_max, limit, errs, incoming, yields,
2670 other_errs;
2671
2672 /* Using atomics because counters are being changed in different
2673 * pthread contexts.
2674 */
2675 incoming = atomic_load_explicit(&zdplane_info.dg_routes_in,
2676 memory_order_relaxed);
2677 limit = atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
2678 memory_order_relaxed);
2679 queued = atomic_load_explicit(&zdplane_info.dg_routes_queued,
2680 memory_order_relaxed);
2681 queue_max = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
2682 memory_order_relaxed);
2683 errs = atomic_load_explicit(&zdplane_info.dg_route_errors,
2684 memory_order_relaxed);
2685 yields = atomic_load_explicit(&zdplane_info.dg_update_yields,
2686 memory_order_relaxed);
2687 other_errs = atomic_load_explicit(&zdplane_info.dg_other_errors,
2688 memory_order_relaxed);
2689
2690 vty_out(vty, "Zebra dataplane:\nRoute updates: %"PRIu64"\n",
2691 incoming);
2692 vty_out(vty, "Route update errors: %"PRIu64"\n", errs);
2693 vty_out(vty, "Other errors : %"PRIu64"\n", other_errs);
2694 vty_out(vty, "Route update queue limit: %"PRIu64"\n", limit);
2695 vty_out(vty, "Route update queue depth: %"PRIu64"\n", queued);
2696 vty_out(vty, "Route update queue max: %"PRIu64"\n", queue_max);
2697 vty_out(vty, "Dplane update yields: %"PRIu64"\n", yields);
2698
2699 incoming = atomic_load_explicit(&zdplane_info.dg_lsps_in,
2700 memory_order_relaxed);
2701 errs = atomic_load_explicit(&zdplane_info.dg_lsp_errors,
2702 memory_order_relaxed);
2703 vty_out(vty, "LSP updates: %"PRIu64"\n", incoming);
2704 vty_out(vty, "LSP update errors: %"PRIu64"\n", errs);
2705
2706 incoming = atomic_load_explicit(&zdplane_info.dg_pws_in,
2707 memory_order_relaxed);
2708 errs = atomic_load_explicit(&zdplane_info.dg_pw_errors,
2709 memory_order_relaxed);
2710 vty_out(vty, "PW updates: %"PRIu64"\n", incoming);
2711 vty_out(vty, "PW update errors: %"PRIu64"\n", errs);
2712
2713 incoming = atomic_load_explicit(&zdplane_info.dg_intf_addrs_in,
2714 memory_order_relaxed);
2715 errs = atomic_load_explicit(&zdplane_info.dg_intf_addr_errors,
2716 memory_order_relaxed);
2717 vty_out(vty, "Intf addr updates: %"PRIu64"\n", incoming);
2718 vty_out(vty, "Intf addr errors: %"PRIu64"\n", errs);
2719
2720 incoming = atomic_load_explicit(&zdplane_info.dg_macs_in,
2721 memory_order_relaxed);
2722 errs = atomic_load_explicit(&zdplane_info.dg_mac_errors,
2723 memory_order_relaxed);
2724 vty_out(vty, "EVPN MAC updates: %"PRIu64"\n", incoming);
2725 vty_out(vty, "EVPN MAC errors: %"PRIu64"\n", errs);
2726
2727 incoming = atomic_load_explicit(&zdplane_info.dg_neighs_in,
2728 memory_order_relaxed);
2729 errs = atomic_load_explicit(&zdplane_info.dg_neigh_errors,
2730 memory_order_relaxed);
2731 vty_out(vty, "EVPN neigh updates: %"PRIu64"\n", incoming);
2732 vty_out(vty, "EVPN neigh errors: %"PRIu64"\n", errs);
2733
2734 return CMD_SUCCESS;
2735 }
2736
2737 /*
2738 * Handler for 'show dplane providers'
2739 */
2740 int dplane_show_provs_helper(struct vty *vty, bool detailed)
2741 {
2742 struct zebra_dplane_provider *prov;
2743 uint64_t in, in_max, out, out_max;
2744
2745 vty_out(vty, "Zebra dataplane providers:\n");
2746
2747 DPLANE_LOCK();
2748 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
2749 DPLANE_UNLOCK();
2750
2751 /* Show counters, useful info from each registered provider */
2752 while (prov) {
2753
2754 in = atomic_load_explicit(&prov->dp_in_counter,
2755 memory_order_relaxed);
2756 in_max = atomic_load_explicit(&prov->dp_in_max,
2757 memory_order_relaxed);
2758 out = atomic_load_explicit(&prov->dp_out_counter,
2759 memory_order_relaxed);
2760 out_max = atomic_load_explicit(&prov->dp_out_max,
2761 memory_order_relaxed);
2762
2763 vty_out(vty, "%s (%u): in: %"PRIu64", q_max: %"PRIu64", "
2764 "out: %"PRIu64", q_max: %"PRIu64"\n",
2765 prov->dp_name, prov->dp_id, in, in_max, out, out_max);
2766
2767 DPLANE_LOCK();
2768 prov = TAILQ_NEXT(prov, dp_prov_link);
2769 DPLANE_UNLOCK();
2770 }
2771
2772 return CMD_SUCCESS;
2773 }
2774
2775 /*
2776 * Helper for 'show run' etc.
2777 */
2778 int dplane_config_write_helper(struct vty *vty)
2779 {
2780 if (zdplane_info.dg_max_queued_updates != DPLANE_DEFAULT_MAX_QUEUED)
2781 vty_out(vty, "zebra dplane limit %u\n",
2782 zdplane_info.dg_max_queued_updates);
2783
2784 return 0;
2785 }
2786
2787 /*
2788 * Provider registration
2789 */
2790 int dplane_provider_register(const char *name,
2791 enum dplane_provider_prio prio,
2792 int flags,
2793 int (*start_fp)(struct zebra_dplane_provider *),
2794 int (*fp)(struct zebra_dplane_provider *),
2795 int (*fini_fp)(struct zebra_dplane_provider *,
2796 bool early),
2797 void *data,
2798 struct zebra_dplane_provider **prov_p)
2799 {
2800 int ret = 0;
2801 struct zebra_dplane_provider *p = NULL, *last;
2802
2803 /* Validate */
2804 if (fp == NULL) {
2805 ret = EINVAL;
2806 goto done;
2807 }
2808
2809 if (prio <= DPLANE_PRIO_NONE ||
2810 prio > DPLANE_PRIO_LAST) {
2811 ret = EINVAL;
2812 goto done;
2813 }
2814
2815 /* Allocate and init new provider struct */
2816 p = XCALLOC(MTYPE_DP_PROV, sizeof(struct zebra_dplane_provider));
2817
2818 pthread_mutex_init(&(p->dp_mutex), NULL);
2819 TAILQ_INIT(&(p->dp_ctx_in_q));
2820 TAILQ_INIT(&(p->dp_ctx_out_q));
2821
2822 p->dp_flags = flags;
2823 p->dp_priority = prio;
2824 p->dp_fp = fp;
2825 p->dp_start = start_fp;
2826 p->dp_fini = fini_fp;
2827 p->dp_data = data;
2828
2829 /* Lock - the dplane pthread may be running */
2830 DPLANE_LOCK();
2831
2832 p->dp_id = ++zdplane_info.dg_provider_id;
2833
2834 if (name)
2835 strlcpy(p->dp_name, name, DPLANE_PROVIDER_NAMELEN);
2836 else
2837 snprintf(p->dp_name, DPLANE_PROVIDER_NAMELEN,
2838 "provider-%u", p->dp_id);
2839
2840 /* Insert into list ordered by priority */
2841 TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_prov_link) {
2842 if (last->dp_priority > p->dp_priority)
2843 break;
2844 }
2845
2846 if (last)
2847 TAILQ_INSERT_BEFORE(last, p, dp_prov_link);
2848 else
2849 TAILQ_INSERT_TAIL(&zdplane_info.dg_providers_q, p,
2850 dp_prov_link);
2851
2852 /* And unlock */
2853 DPLANE_UNLOCK();
2854
2855 if (IS_ZEBRA_DEBUG_DPLANE)
2856 zlog_debug("dplane: registered new provider '%s' (%u), prio %d",
2857 p->dp_name, p->dp_id, p->dp_priority);
2858
2859 done:
2860 if (prov_p)
2861 *prov_p = p;
2862
2863 return ret;
2864 }
2865
2866 /* Accessors for provider attributes */
2867 const char *dplane_provider_get_name(const struct zebra_dplane_provider *prov)
2868 {
2869 return prov->dp_name;
2870 }
2871
2872 uint32_t dplane_provider_get_id(const struct zebra_dplane_provider *prov)
2873 {
2874 return prov->dp_id;
2875 }
2876
2877 void *dplane_provider_get_data(const struct zebra_dplane_provider *prov)
2878 {
2879 return prov->dp_data;
2880 }
2881
2882 int dplane_provider_get_work_limit(const struct zebra_dplane_provider *prov)
2883 {
2884 return zdplane_info.dg_updates_per_cycle;
2885 }
2886
2887 /* Lock/unlock a provider's mutex - iff the provider was registered with
2888 * the THREADED flag.
2889 */
2890 void dplane_provider_lock(struct zebra_dplane_provider *prov)
2891 {
2892 if (dplane_provider_is_threaded(prov))
2893 DPLANE_PROV_LOCK(prov);
2894 }
2895
2896 void dplane_provider_unlock(struct zebra_dplane_provider *prov)
2897 {
2898 if (dplane_provider_is_threaded(prov))
2899 DPLANE_PROV_UNLOCK(prov);
2900 }
2901
2902 /*
2903 * Dequeue and maintain associated counter
2904 */
2905 struct zebra_dplane_ctx *dplane_provider_dequeue_in_ctx(
2906 struct zebra_dplane_provider *prov)
2907 {
2908 struct zebra_dplane_ctx *ctx = NULL;
2909
2910 dplane_provider_lock(prov);
2911
2912 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
2913 if (ctx) {
2914 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
2915
2916 atomic_fetch_sub_explicit(&prov->dp_in_queued, 1,
2917 memory_order_relaxed);
2918 }
2919
2920 dplane_provider_unlock(prov);
2921
2922 return ctx;
2923 }
2924
2925 /*
2926 * Dequeue work to a list, return count
2927 */
2928 int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov,
2929 struct dplane_ctx_q *listp)
2930 {
2931 int limit, ret;
2932 struct zebra_dplane_ctx *ctx;
2933
2934 limit = zdplane_info.dg_updates_per_cycle;
2935
2936 dplane_provider_lock(prov);
2937
2938 for (ret = 0; ret < limit; ret++) {
2939 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
2940 if (ctx) {
2941 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
2942
2943 TAILQ_INSERT_TAIL(listp, ctx, zd_q_entries);
2944 } else {
2945 break;
2946 }
2947 }
2948
2949 if (ret > 0)
2950 atomic_fetch_sub_explicit(&prov->dp_in_queued, ret,
2951 memory_order_relaxed);
2952
2953 dplane_provider_unlock(prov);
2954
2955 return ret;
2956 }
2957
2958 /*
2959 * Enqueue and maintain associated counter
2960 */
2961 void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
2962 struct zebra_dplane_ctx *ctx)
2963 {
2964 dplane_provider_lock(prov);
2965
2966 TAILQ_INSERT_TAIL(&(prov->dp_ctx_out_q), ctx,
2967 zd_q_entries);
2968
2969 dplane_provider_unlock(prov);
2970
2971 atomic_fetch_add_explicit(&(prov->dp_out_counter), 1,
2972 memory_order_relaxed);
2973 }
2974
2975 /*
2976 * Accessor for provider object
2977 */
2978 bool dplane_provider_is_threaded(const struct zebra_dplane_provider *prov)
2979 {
2980 return (prov->dp_flags & DPLANE_PROV_FLAG_THREADED);
2981 }
2982
2983 /*
2984 * Internal helper that copies information from a zebra ns object; this is
2985 * called in the zebra main pthread context as part of dplane ctx init.
2986 */
2987 static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
2988 struct zebra_ns *zns)
2989 {
2990 ns_info->ns_id = zns->ns_id;
2991
2992 #if defined(HAVE_NETLINK)
2993 ns_info->is_cmd = true;
2994 ns_info->nls = zns->netlink_dplane;
2995 #endif /* NETLINK */
2996 }
2997
2998 /*
2999 * Provider api to signal that work/events are available
3000 * for the dataplane pthread.
3001 */
3002 int dplane_provider_work_ready(void)
3003 {
3004 /* Note that during zebra startup, we may be offered work before
3005 * the dataplane pthread (and thread-master) are ready. We want to
3006 * enqueue the work, but the event-scheduling machinery may not be
3007 * available.
3008 */
3009 if (zdplane_info.dg_run) {
3010 thread_add_event(zdplane_info.dg_master,
3011 dplane_thread_loop, NULL, 0,
3012 &zdplane_info.dg_t_update);
3013 }
3014
3015 return AOK;
3016 }
3017
3018 /*
3019 * Enqueue a context directly to zebra main.
3020 */
3021 void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx)
3022 {
3023 struct dplane_ctx_q temp_list;
3024
3025 /* Zebra's api takes a list, so we need to use a temporary list */
3026 TAILQ_INIT(&temp_list);
3027
3028 TAILQ_INSERT_TAIL(&temp_list, ctx, zd_q_entries);
3029 (zdplane_info.dg_results_cb)(&temp_list);
3030 }
3031
3032 /*
3033 * Kernel dataplane provider
3034 */
3035
3036 /*
3037 * Handler for kernel LSP updates
3038 */
3039 static enum zebra_dplane_result
3040 kernel_dplane_lsp_update(struct zebra_dplane_ctx *ctx)
3041 {
3042 enum zebra_dplane_result res;
3043
3044 /* Call into the synchronous kernel-facing code here */
3045 res = kernel_lsp_update(ctx);
3046
3047 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
3048 atomic_fetch_add_explicit(
3049 &zdplane_info.dg_lsp_errors, 1,
3050 memory_order_relaxed);
3051
3052 return res;
3053 }
3054
3055 /*
3056 * Handler for kernel pseudowire updates
3057 */
3058 static enum zebra_dplane_result
3059 kernel_dplane_pw_update(struct zebra_dplane_ctx *ctx)
3060 {
3061 enum zebra_dplane_result res;
3062
3063 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3064 zlog_debug("Dplane pw %s: op %s af %d loc: %u rem: %u",
3065 dplane_ctx_get_ifname(ctx),
3066 dplane_op2str(ctx->zd_op),
3067 dplane_ctx_get_pw_af(ctx),
3068 dplane_ctx_get_pw_local_label(ctx),
3069 dplane_ctx_get_pw_remote_label(ctx));
3070
3071 res = kernel_pw_update(ctx);
3072
3073 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
3074 atomic_fetch_add_explicit(
3075 &zdplane_info.dg_pw_errors, 1,
3076 memory_order_relaxed);
3077
3078 return res;
3079 }
3080
3081 /*
3082 * Handler for kernel route updates
3083 */
3084 static enum zebra_dplane_result
3085 kernel_dplane_route_update(struct zebra_dplane_ctx *ctx)
3086 {
3087 enum zebra_dplane_result res;
3088
3089 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3090 char dest_str[PREFIX_STRLEN];
3091
3092 prefix2str(dplane_ctx_get_dest(ctx),
3093 dest_str, sizeof(dest_str));
3094
3095 zlog_debug("%u:%s Dplane route update ctx %p op %s",
3096 dplane_ctx_get_vrf(ctx), dest_str,
3097 ctx, dplane_op2str(dplane_ctx_get_op(ctx)));
3098 }
3099
3100 /* Call into the synchronous kernel-facing code here */
3101 res = kernel_route_update(ctx);
3102
3103 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
3104 atomic_fetch_add_explicit(
3105 &zdplane_info.dg_route_errors, 1,
3106 memory_order_relaxed);
3107
3108 return res;
3109 }
3110
3111 /*
3112 * Handler for kernel-facing interface address updates
3113 */
3114 static enum zebra_dplane_result
3115 kernel_dplane_address_update(struct zebra_dplane_ctx *ctx)
3116 {
3117 enum zebra_dplane_result res;
3118
3119 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3120 char dest_str[PREFIX_STRLEN];
3121
3122 prefix2str(dplane_ctx_get_intf_addr(ctx), dest_str,
3123 sizeof(dest_str));
3124
3125 zlog_debug("Dplane intf %s, idx %u, addr %s",
3126 dplane_op2str(dplane_ctx_get_op(ctx)),
3127 dplane_ctx_get_ifindex(ctx), dest_str);
3128 }
3129
3130 res = kernel_address_update_ctx(ctx);
3131
3132 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
3133 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addr_errors,
3134 1, memory_order_relaxed);
3135
3136 return res;
3137 }
3138
3139 /**
3140 * kernel_dplane_nexthop_update() - Handler for kernel nexthop updates
3141 *
3142 * @ctx: Dataplane context
3143 *
3144 * Return: Dataplane result flag
3145 */
3146 static enum zebra_dplane_result
3147 kernel_dplane_nexthop_update(struct zebra_dplane_ctx *ctx)
3148 {
3149 enum zebra_dplane_result res;
3150
3151 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3152 zlog_debug("ID (%u) Dplane nexthop update ctx %p op %s",
3153 dplane_ctx_get_nhe_id(ctx), ctx,
3154 dplane_op2str(dplane_ctx_get_op(ctx)));
3155 }
3156
3157 res = kernel_nexthop_update(ctx);
3158
3159 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
3160 atomic_fetch_add_explicit(&zdplane_info.dg_nexthop_errors, 1,
3161 memory_order_relaxed);
3162
3163 return res;
3164 }
3165
3166 /*
3167 * Handler for kernel-facing EVPN MAC address updates
3168 */
3169 static enum zebra_dplane_result
3170 kernel_dplane_mac_update(struct zebra_dplane_ctx *ctx)
3171 {
3172 enum zebra_dplane_result res;
3173
3174 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3175 char buf[ETHER_ADDR_STRLEN];
3176
3177 prefix_mac2str(dplane_ctx_mac_get_addr(ctx), buf,
3178 sizeof(buf));
3179
3180 zlog_debug("Dplane %s, mac %s, ifindex %u",
3181 dplane_op2str(dplane_ctx_get_op(ctx)),
3182 buf, dplane_ctx_get_ifindex(ctx));
3183 }
3184
3185 res = kernel_mac_update_ctx(ctx);
3186
3187 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
3188 atomic_fetch_add_explicit(&zdplane_info.dg_mac_errors,
3189 1, memory_order_relaxed);
3190
3191 return res;
3192 }
3193
3194 /*
3195 * Handler for kernel-facing EVPN neighbor updates
3196 */
3197 static enum zebra_dplane_result
3198 kernel_dplane_neigh_update(struct zebra_dplane_ctx *ctx)
3199 {
3200 enum zebra_dplane_result res;
3201
3202 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3203 char buf[PREFIX_STRLEN];
3204
3205 ipaddr2str(dplane_ctx_neigh_get_ipaddr(ctx), buf,
3206 sizeof(buf));
3207
3208 zlog_debug("Dplane %s, ip %s, ifindex %u",
3209 dplane_op2str(dplane_ctx_get_op(ctx)),
3210 buf, dplane_ctx_get_ifindex(ctx));
3211 }
3212
3213 res = kernel_neigh_update_ctx(ctx);
3214
3215 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
3216 atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors,
3217 1, memory_order_relaxed);
3218
3219 return res;
3220 }
3221
3222 /*
3223 * Kernel provider callback
3224 */
3225 static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
3226 {
3227 enum zebra_dplane_result res;
3228 struct zebra_dplane_ctx *ctx;
3229 int counter, limit;
3230
3231 limit = dplane_provider_get_work_limit(prov);
3232
3233 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3234 zlog_debug("dplane provider '%s': processing",
3235 dplane_provider_get_name(prov));
3236
3237 for (counter = 0; counter < limit; counter++) {
3238
3239 ctx = dplane_provider_dequeue_in_ctx(prov);
3240 if (ctx == NULL)
3241 break;
3242
3243 /* A previous provider plugin may have asked to skip the
3244 * kernel update.
3245 */
3246 if (dplane_ctx_is_skip_kernel(ctx)) {
3247 res = ZEBRA_DPLANE_REQUEST_SUCCESS;
3248 goto skip_one;
3249 }
3250
3251 /* Dispatch to appropriate kernel-facing apis */
3252 switch (dplane_ctx_get_op(ctx)) {
3253
3254 case DPLANE_OP_ROUTE_INSTALL:
3255 case DPLANE_OP_ROUTE_UPDATE:
3256 case DPLANE_OP_ROUTE_DELETE:
3257 res = kernel_dplane_route_update(ctx);
3258 break;
3259
3260 case DPLANE_OP_NH_INSTALL:
3261 case DPLANE_OP_NH_UPDATE:
3262 case DPLANE_OP_NH_DELETE:
3263 res = kernel_dplane_nexthop_update(ctx);
3264 break;
3265
3266 case DPLANE_OP_LSP_INSTALL:
3267 case DPLANE_OP_LSP_UPDATE:
3268 case DPLANE_OP_LSP_DELETE:
3269 res = kernel_dplane_lsp_update(ctx);
3270 break;
3271
3272 case DPLANE_OP_PW_INSTALL:
3273 case DPLANE_OP_PW_UNINSTALL:
3274 res = kernel_dplane_pw_update(ctx);
3275 break;
3276
3277 case DPLANE_OP_ADDR_INSTALL:
3278 case DPLANE_OP_ADDR_UNINSTALL:
3279 res = kernel_dplane_address_update(ctx);
3280 break;
3281
3282 case DPLANE_OP_MAC_INSTALL:
3283 case DPLANE_OP_MAC_DELETE:
3284 res = kernel_dplane_mac_update(ctx);
3285 break;
3286
3287 case DPLANE_OP_NEIGH_INSTALL:
3288 case DPLANE_OP_NEIGH_UPDATE:
3289 case DPLANE_OP_NEIGH_DELETE:
3290 case DPLANE_OP_VTEP_ADD:
3291 case DPLANE_OP_VTEP_DELETE:
3292 res = kernel_dplane_neigh_update(ctx);
3293 break;
3294
3295 /* Ignore 'notifications' - no-op */
3296 case DPLANE_OP_SYS_ROUTE_ADD:
3297 case DPLANE_OP_SYS_ROUTE_DELETE:
3298 case DPLANE_OP_ROUTE_NOTIFY:
3299 case DPLANE_OP_LSP_NOTIFY:
3300 res = ZEBRA_DPLANE_REQUEST_SUCCESS;
3301 break;
3302
3303 default:
3304 atomic_fetch_add_explicit(
3305 &zdplane_info.dg_other_errors, 1,
3306 memory_order_relaxed);
3307
3308 res = ZEBRA_DPLANE_REQUEST_FAILURE;
3309 break;
3310 }
3311
3312 skip_one:
3313 dplane_ctx_set_status(ctx, res);
3314
3315 dplane_provider_enqueue_out_ctx(prov, ctx);
3316 }
3317
3318 /* Ensure that we'll run the work loop again if there's still
3319 * more work to do.
3320 */
3321 if (counter >= limit) {
3322 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3323 zlog_debug("dplane provider '%s' reached max updates %d",
3324 dplane_provider_get_name(prov), counter);
3325
3326 atomic_fetch_add_explicit(&zdplane_info.dg_update_yields,
3327 1, memory_order_relaxed);
3328
3329 dplane_provider_work_ready();
3330 }
3331
3332 return 0;
3333 }
3334
3335 #if DPLANE_TEST_PROVIDER
3336
3337 /*
3338 * Test dataplane provider plugin
3339 */
3340
3341 /*
3342 * Test provider process callback
3343 */
3344 static int test_dplane_process_func(struct zebra_dplane_provider *prov)
3345 {
3346 struct zebra_dplane_ctx *ctx;
3347 int counter, limit;
3348
3349 /* Just moving from 'in' queue to 'out' queue */
3350
3351 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3352 zlog_debug("dplane provider '%s': processing",
3353 dplane_provider_get_name(prov));
3354
3355 limit = dplane_provider_get_work_limit(prov);
3356
3357 for (counter = 0; counter < limit; counter++) {
3358
3359 ctx = dplane_provider_dequeue_in_ctx(prov);
3360 if (ctx == NULL)
3361 break;
3362
3363 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3364 zlog_debug("dplane provider '%s': op %s",
3365 dplane_provider_get_name(prov),
3366 dplane_op2str(dplane_ctx_get_op(ctx)));
3367
3368 dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
3369
3370 dplane_provider_enqueue_out_ctx(prov, ctx);
3371 }
3372
3373 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3374 zlog_debug("dplane provider '%s': processed %d",
3375 dplane_provider_get_name(prov), counter);
3376
3377 /* Ensure that we'll run the work loop again if there's still
3378 * more work to do.
3379 */
3380 if (counter >= limit)
3381 dplane_provider_work_ready();
3382
3383 return 0;
3384 }
3385
3386 /*
3387 * Test provider shutdown/fini callback
3388 */
3389 static int test_dplane_shutdown_func(struct zebra_dplane_provider *prov,
3390 bool early)
3391 {
3392 if (IS_ZEBRA_DEBUG_DPLANE)
3393 zlog_debug("dplane provider '%s': %sshutdown",
3394 dplane_provider_get_name(prov),
3395 early ? "early " : "");
3396
3397 return 0;
3398 }
3399 #endif /* DPLANE_TEST_PROVIDER */
3400
3401 /*
3402 * Register default kernel provider
3403 */
3404 static void dplane_provider_init(void)
3405 {
3406 int ret;
3407
3408 ret = dplane_provider_register("Kernel",
3409 DPLANE_PRIO_KERNEL,
3410 DPLANE_PROV_FLAGS_DEFAULT, NULL,
3411 kernel_dplane_process_func,
3412 NULL,
3413 NULL, NULL);
3414
3415 if (ret != AOK)
3416 zlog_err("Unable to register kernel dplane provider: %d",
3417 ret);
3418
3419 #if DPLANE_TEST_PROVIDER
3420 /* Optional test provider ... */
3421 ret = dplane_provider_register("Test",
3422 DPLANE_PRIO_PRE_KERNEL,
3423 DPLANE_PROV_FLAGS_DEFAULT, NULL,
3424 test_dplane_process_func,
3425 test_dplane_shutdown_func,
3426 NULL /* data */, NULL);
3427
3428 if (ret != AOK)
3429 zlog_err("Unable to register test dplane provider: %d",
3430 ret);
3431 #endif /* DPLANE_TEST_PROVIDER */
3432 }
3433
3434 /* Indicates zebra shutdown/exit is in progress. Some operations may be
3435 * simplified or skipped during shutdown processing.
3436 */
3437 bool dplane_is_in_shutdown(void)
3438 {
3439 return zdplane_info.dg_is_shutdown;
3440 }
3441
3442 /*
3443 * Early or pre-shutdown, de-init notification api. This runs pretty
3444 * early during zebra shutdown, as a signal to stop new work and prepare
3445 * for updates generated by shutdown/cleanup activity, as zebra tries to
3446 * remove everything it's responsible for.
3447 * NB: This runs in the main zebra pthread context.
3448 */
3449 void zebra_dplane_pre_finish(void)
3450 {
3451 if (IS_ZEBRA_DEBUG_DPLANE)
3452 zlog_debug("Zebra dataplane pre-fini called");
3453
3454 zdplane_info.dg_is_shutdown = true;
3455
3456 /* TODO -- Notify provider(s) of pending shutdown */
3457 }
3458
3459 /*
3460 * Utility to determine whether work remains enqueued within the dplane;
3461 * used during system shutdown processing.
3462 */
3463 static bool dplane_work_pending(void)
3464 {
3465 bool ret = false;
3466 struct zebra_dplane_ctx *ctx;
3467 struct zebra_dplane_provider *prov;
3468
3469 /* TODO -- just checking incoming/pending work for now, must check
3470 * providers
3471 */
3472 DPLANE_LOCK();
3473 {
3474 ctx = TAILQ_FIRST(&zdplane_info.dg_update_ctx_q);
3475 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
3476 }
3477 DPLANE_UNLOCK();
3478
3479 if (ctx != NULL) {
3480 ret = true;
3481 goto done;
3482 }
3483
3484 while (prov) {
3485
3486 dplane_provider_lock(prov);
3487
3488 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
3489 if (ctx == NULL)
3490 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
3491
3492 dplane_provider_unlock(prov);
3493
3494 if (ctx != NULL)
3495 break;
3496
3497 DPLANE_LOCK();
3498 prov = TAILQ_NEXT(prov, dp_prov_link);
3499 DPLANE_UNLOCK();
3500 }
3501
3502 if (ctx != NULL)
3503 ret = true;
3504
3505 done:
3506 return ret;
3507 }
3508
3509 /*
3510 * Shutdown-time intermediate callback, used to determine when all pending
3511 * in-flight updates are done. If there's still work to do, reschedules itself.
3512 * If all work is done, schedules an event to the main zebra thread for
3513 * final zebra shutdown.
3514 * This runs in the dplane pthread context.
3515 */
3516 static int dplane_check_shutdown_status(struct thread *event)
3517 {
3518 if (IS_ZEBRA_DEBUG_DPLANE)
3519 zlog_debug("Zebra dataplane shutdown status check called");
3520
3521 if (dplane_work_pending()) {
3522 /* Reschedule dplane check on a short timer */
3523 thread_add_timer_msec(zdplane_info.dg_master,
3524 dplane_check_shutdown_status,
3525 NULL, 100,
3526 &zdplane_info.dg_t_shutdown_check);
3527
3528 /* TODO - give up and stop waiting after a short time? */
3529
3530 } else {
3531 /* We appear to be done - schedule a final callback event
3532 * for the zebra main pthread.
3533 */
3534 thread_add_event(zrouter.master, zebra_finalize, NULL, 0, NULL);
3535 }
3536
3537 return 0;
3538 }
3539
3540 /*
3541 * Shutdown, de-init api. This runs pretty late during shutdown,
3542 * after zebra has tried to free/remove/uninstall all routes during shutdown.
3543 * At this point, dplane work may still remain to be done, so we can't just
3544 * blindly terminate. If there's still work to do, we'll periodically check
3545 * and when done, we'll enqueue a task to the zebra main thread for final
3546 * termination processing.
3547 *
3548 * NB: This runs in the main zebra thread context.
3549 */
3550 void zebra_dplane_finish(void)
3551 {
3552 if (IS_ZEBRA_DEBUG_DPLANE)
3553 zlog_debug("Zebra dataplane fini called");
3554
3555 thread_add_event(zdplane_info.dg_master,
3556 dplane_check_shutdown_status, NULL, 0,
3557 &zdplane_info.dg_t_shutdown_check);
3558 }
3559
3560 /*
3561 * Main dataplane pthread event loop. The thread takes new incoming work
3562 * and offers it to the first provider. It then iterates through the
3563 * providers, taking complete work from each one and offering it
3564 * to the next in order. At each step, a limited number of updates are
3565 * processed during a cycle in order to provide some fairness.
3566 *
3567 * This loop through the providers is only run once, so that the dataplane
3568 * pthread can look for other pending work - such as i/o work on behalf of
3569 * providers.
3570 */
3571 static int dplane_thread_loop(struct thread *event)
3572 {
3573 struct dplane_ctx_q work_list;
3574 struct dplane_ctx_q error_list;
3575 struct zebra_dplane_provider *prov;
3576 struct zebra_dplane_ctx *ctx, *tctx;
3577 int limit, counter, error_counter;
3578 uint64_t curr, high;
3579
3580 /* Capture work limit per cycle */
3581 limit = zdplane_info.dg_updates_per_cycle;
3582
3583 /* Init temporary lists used to move contexts among providers */
3584 TAILQ_INIT(&work_list);
3585 TAILQ_INIT(&error_list);
3586 error_counter = 0;
3587
3588 /* Check for zebra shutdown */
3589 if (!zdplane_info.dg_run)
3590 goto done;
3591
3592 /* Dequeue some incoming work from zebra (if any) onto the temporary
3593 * working list.
3594 */
3595 DPLANE_LOCK();
3596
3597 /* Locate initial registered provider */
3598 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
3599
3600 /* Move new work from incoming list to temp list */
3601 for (counter = 0; counter < limit; counter++) {
3602 ctx = TAILQ_FIRST(&zdplane_info.dg_update_ctx_q);
3603 if (ctx) {
3604 TAILQ_REMOVE(&zdplane_info.dg_update_ctx_q, ctx,
3605 zd_q_entries);
3606
3607 ctx->zd_provider = prov->dp_id;
3608
3609 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
3610 } else {
3611 break;
3612 }
3613 }
3614
3615 DPLANE_UNLOCK();
3616
3617 atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, counter,
3618 memory_order_relaxed);
3619
3620 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3621 zlog_debug("dplane: incoming new work counter: %d", counter);
3622
3623 /* Iterate through the registered providers, offering new incoming
3624 * work. If the provider has outgoing work in its queue, take that
3625 * work for the next provider
3626 */
3627 while (prov) {
3628
3629 /* At each iteration, the temporary work list has 'counter'
3630 * items.
3631 */
3632 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3633 zlog_debug("dplane enqueues %d new work to provider '%s'",
3634 counter, dplane_provider_get_name(prov));
3635
3636 /* Capture current provider id in each context; check for
3637 * error status.
3638 */
3639 TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, tctx) {
3640 if (dplane_ctx_get_status(ctx) ==
3641 ZEBRA_DPLANE_REQUEST_SUCCESS) {
3642 ctx->zd_provider = prov->dp_id;
3643 } else {
3644 /*
3645 * TODO -- improve error-handling: recirc
3646 * errors backwards so that providers can
3647 * 'undo' their work (if they want to)
3648 */
3649
3650 /* Move to error list; will be returned
3651 * zebra main.
3652 */
3653 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
3654 TAILQ_INSERT_TAIL(&error_list,
3655 ctx, zd_q_entries);
3656 error_counter++;
3657 }
3658 }
3659
3660 /* Enqueue new work to the provider */
3661 dplane_provider_lock(prov);
3662
3663 if (TAILQ_FIRST(&work_list))
3664 TAILQ_CONCAT(&(prov->dp_ctx_in_q), &work_list,
3665 zd_q_entries);
3666
3667 atomic_fetch_add_explicit(&prov->dp_in_counter, counter,
3668 memory_order_relaxed);
3669 atomic_fetch_add_explicit(&prov->dp_in_queued, counter,
3670 memory_order_relaxed);
3671 curr = atomic_load_explicit(&prov->dp_in_queued,
3672 memory_order_relaxed);
3673 high = atomic_load_explicit(&prov->dp_in_max,
3674 memory_order_relaxed);
3675 if (curr > high)
3676 atomic_store_explicit(&prov->dp_in_max, curr,
3677 memory_order_relaxed);
3678
3679 dplane_provider_unlock(prov);
3680
3681 /* Reset the temp list (though the 'concat' may have done this
3682 * already), and the counter
3683 */
3684 TAILQ_INIT(&work_list);
3685 counter = 0;
3686
3687 /* Call into the provider code. Note that this is
3688 * unconditional: we offer to do work even if we don't enqueue
3689 * any _new_ work.
3690 */
3691 (*prov->dp_fp)(prov);
3692
3693 /* Check for zebra shutdown */
3694 if (!zdplane_info.dg_run)
3695 break;
3696
3697 /* Dequeue completed work from the provider */
3698 dplane_provider_lock(prov);
3699
3700 while (counter < limit) {
3701 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
3702 if (ctx) {
3703 TAILQ_REMOVE(&(prov->dp_ctx_out_q), ctx,
3704 zd_q_entries);
3705
3706 TAILQ_INSERT_TAIL(&work_list,
3707 ctx, zd_q_entries);
3708 counter++;
3709 } else
3710 break;
3711 }
3712
3713 dplane_provider_unlock(prov);
3714
3715 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3716 zlog_debug("dplane dequeues %d completed work from provider %s",
3717 counter, dplane_provider_get_name(prov));
3718
3719 /* Locate next provider */
3720 DPLANE_LOCK();
3721 prov = TAILQ_NEXT(prov, dp_prov_link);
3722 DPLANE_UNLOCK();
3723 }
3724
3725 /* After all providers have been serviced, enqueue any completed
3726 * work and any errors back to zebra so it can process the results.
3727 */
3728 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3729 zlog_debug("dplane has %d completed, %d errors, for zebra main",
3730 counter, error_counter);
3731
3732 /*
3733 * Hand lists through the api to zebra main,
3734 * to reduce the number of lock/unlock cycles
3735 */
3736
3737 /* Call through to zebra main */
3738 (zdplane_info.dg_results_cb)(&error_list);
3739
3740 TAILQ_INIT(&error_list);
3741
3742 /* Call through to zebra main */
3743 (zdplane_info.dg_results_cb)(&work_list);
3744
3745 TAILQ_INIT(&work_list);
3746
3747 done:
3748 return 0;
3749 }
3750
3751 /*
3752 * Final phase of shutdown, after all work enqueued to dplane has been
3753 * processed. This is called from the zebra main pthread context.
3754 */
3755 void zebra_dplane_shutdown(void)
3756 {
3757 if (IS_ZEBRA_DEBUG_DPLANE)
3758 zlog_debug("Zebra dataplane shutdown called");
3759
3760 /* Stop dplane thread, if it's running */
3761
3762 zdplane_info.dg_run = false;
3763
3764 if (zdplane_info.dg_t_update)
3765 thread_cancel_async(zdplane_info.dg_t_update->master,
3766 &zdplane_info.dg_t_update, NULL);
3767
3768 frr_pthread_stop(zdplane_info.dg_pthread, NULL);
3769
3770 /* Destroy pthread */
3771 frr_pthread_destroy(zdplane_info.dg_pthread);
3772 zdplane_info.dg_pthread = NULL;
3773 zdplane_info.dg_master = NULL;
3774
3775 /* TODO -- Notify provider(s) of final shutdown */
3776
3777 /* TODO -- Clean-up provider objects */
3778
3779 /* TODO -- Clean queue(s), free memory */
3780 }
3781
3782 /*
3783 * Initialize the dataplane module during startup, internal/private version
3784 */
3785 static void zebra_dplane_init_internal(void)
3786 {
3787 memset(&zdplane_info, 0, sizeof(zdplane_info));
3788
3789 pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
3790
3791 TAILQ_INIT(&zdplane_info.dg_update_ctx_q);
3792 TAILQ_INIT(&zdplane_info.dg_providers_q);
3793
3794 zdplane_info.dg_updates_per_cycle = DPLANE_DEFAULT_NEW_WORK;
3795
3796 zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;
3797
3798 /* Register default kernel 'provider' during init */
3799 dplane_provider_init();
3800 }
3801
3802 /*
3803 * Start the dataplane pthread. This step needs to be run later than the
3804 * 'init' step, in case zebra has fork-ed.
3805 */
3806 void zebra_dplane_start(void)
3807 {
3808 struct zebra_dplane_provider *prov;
3809 struct frr_pthread_attr pattr = {
3810 .start = frr_pthread_attr_default.start,
3811 .stop = frr_pthread_attr_default.stop
3812 };
3813
3814 /* Start dataplane pthread */
3815
3816 zdplane_info.dg_pthread = frr_pthread_new(&pattr, "Zebra dplane thread",
3817 "zebra_dplane");
3818
3819 zdplane_info.dg_master = zdplane_info.dg_pthread->master;
3820
3821 zdplane_info.dg_run = true;
3822
3823 /* Enqueue an initial event for the dataplane pthread */
3824 thread_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
3825 &zdplane_info.dg_t_update);
3826
3827 /* Call start callbacks for registered providers */
3828
3829 DPLANE_LOCK();
3830 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
3831 DPLANE_UNLOCK();
3832
3833 while (prov) {
3834
3835 if (prov->dp_start)
3836 (prov->dp_start)(prov);
3837
3838 /* Locate next provider */
3839 DPLANE_LOCK();
3840 prov = TAILQ_NEXT(prov, dp_prov_link);
3841 DPLANE_UNLOCK();
3842 }
3843
3844 frr_pthread_run(zdplane_info.dg_pthread, NULL);
3845 }
3846
3847 /*
3848 * Initialize the dataplane module at startup; called by zebra rib_init()
3849 */
3850 void zebra_dplane_init(int (*results_fp)(struct dplane_ctx_q *))
3851 {
3852 zebra_dplane_init_internal();
3853 zdplane_info.dg_results_cb = results_fp;
3854 }