]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_dplane.c
bgpd: Make sure we can use `no bgp listen range ...`
[mirror_frr.git] / zebra / zebra_dplane.c
1 /*
2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "lib/libfrr.h"
25 #include "lib/debug.h"
26 #include "lib/frratomic.h"
27 #include "lib/frr_pthread.h"
28 #include "lib/memory.h"
29 #include "lib/queue.h"
30 #include "lib/zebra.h"
31 #include "zebra/zebra_router.h"
32 #include "zebra/zebra_memory.h"
33 #include "zebra/zebra_router.h"
34 #include "zebra/zebra_dplane.h"
35 #include "zebra/rt.h"
36 #include "zebra/debug.h"
37
38 /* Memory type for context blocks */
39 DEFINE_MTYPE_STATIC(ZEBRA, DP_CTX, "Zebra DPlane Ctx")
40 DEFINE_MTYPE_STATIC(ZEBRA, DP_PROV, "Zebra DPlane Provider")
41
42 #ifndef AOK
43 # define AOK 0
44 #endif
45
46 /* Enable test dataplane provider */
47 /*#define DPLANE_TEST_PROVIDER 1 */
48
49 /* Default value for max queued incoming updates */
50 const uint32_t DPLANE_DEFAULT_MAX_QUEUED = 200;
51
52 /* Default value for new work per cycle */
53 const uint32_t DPLANE_DEFAULT_NEW_WORK = 100;
54
55 /* Validation check macro for context blocks */
56 /* #define DPLANE_DEBUG 1 */
57
58 #ifdef DPLANE_DEBUG
59
60 # define DPLANE_CTX_VALID(p) \
61 assert((p) != NULL)
62
63 #else
64
65 # define DPLANE_CTX_VALID(p)
66
67 #endif /* DPLANE_DEBUG */
68
69 /*
70 * Route information captured for route updates.
71 */
72 struct dplane_route_info {
73
74 /* Dest and (optional) source prefixes */
75 struct prefix zd_dest;
76 struct prefix zd_src;
77
78 afi_t zd_afi;
79 safi_t zd_safi;
80
81 int zd_type;
82 int zd_old_type;
83
84 route_tag_t zd_tag;
85 route_tag_t zd_old_tag;
86 uint32_t zd_metric;
87 uint32_t zd_old_metric;
88
89 uint16_t zd_instance;
90 uint16_t zd_old_instance;
91
92 uint8_t zd_distance;
93 uint8_t zd_old_distance;
94
95 uint32_t zd_mtu;
96 uint32_t zd_nexthop_mtu;
97
98 /* Nexthops */
99 struct nexthop_group zd_ng;
100
101 /* "Previous" nexthops, used only in route updates without netlink */
102 struct nexthop_group zd_old_ng;
103
104 /* TODO -- use fixed array of nexthops, to avoid mallocs? */
105
106 };
107
108 /*
109 * Pseudowire info for the dataplane
110 */
111 struct dplane_pw_info {
112 int type;
113 int af;
114 int status;
115 uint32_t flags;
116 union g_addr dest;
117 mpls_label_t local_label;
118 mpls_label_t remote_label;
119
120 /* Nexthops */
121 struct nexthop_group nhg;
122
123 union pw_protocol_fields fields;
124 };
125
126 /*
127 * Interface/prefix info for the dataplane
128 */
129 struct dplane_intf_info {
130
131 uint32_t metric;
132 uint32_t flags;
133
134 #define DPLANE_INTF_CONNECTED (1 << 0) /* Connected peer, p2p */
135 #define DPLANE_INTF_SECONDARY (1 << 1)
136 #define DPLANE_INTF_BROADCAST (1 << 2)
137 #define DPLANE_INTF_HAS_DEST DPLANE_INTF_CONNECTED
138 #define DPLANE_INTF_HAS_LABEL (1 << 4)
139
140 /* Interface address/prefix */
141 struct prefix prefix;
142
143 /* Dest address, for p2p, or broadcast prefix */
144 struct prefix dest_prefix;
145
146 char *label;
147 char label_buf[32];
148 };
149
150 /*
151 * EVPN MAC address info for the dataplane.
152 */
153 struct dplane_mac_info {
154 vlanid_t vid;
155 struct ethaddr mac;
156 struct in_addr vtep_ip;
157 bool is_sticky;
158
159 };
160
161 /*
162 * EVPN neighbor info for the dataplane
163 */
164 struct dplane_neigh_info {
165 struct ipaddr ip_addr;
166 struct ethaddr mac;
167 uint32_t flags;
168 uint16_t state;
169 };
170
171 /*
172 * The context block used to exchange info about route updates across
173 * the boundary between the zebra main context (and pthread) and the
174 * dataplane layer (and pthread).
175 */
176 struct zebra_dplane_ctx {
177
178 /* Operation code */
179 enum dplane_op_e zd_op;
180
181 /* Status on return */
182 enum zebra_dplane_result zd_status;
183
184 /* Dplane provider id */
185 uint32_t zd_provider;
186
187 /* Flags - used by providers, e.g. */
188 int zd_flags;
189
190 bool zd_is_update;
191
192 uint32_t zd_seq;
193 uint32_t zd_old_seq;
194
195 /* Some updates may be generated by notifications: allow the
196 * plugin to notice and ignore results from its own notifications.
197 */
198 uint32_t zd_notif_provider;
199
200 /* TODO -- internal/sub-operation status? */
201 enum zebra_dplane_result zd_remote_status;
202 enum zebra_dplane_result zd_kernel_status;
203
204 vrf_id_t zd_vrf_id;
205 uint32_t zd_table_id;
206
207 char zd_ifname[INTERFACE_NAMSIZ];
208 ifindex_t zd_ifindex;
209
210 /* Support info for different kinds of updates */
211 union {
212 struct dplane_route_info rinfo;
213 zebra_lsp_t lsp;
214 struct dplane_pw_info pw;
215 struct dplane_intf_info intf;
216 struct dplane_mac_info macinfo;
217 struct dplane_neigh_info neigh;
218 } u;
219
220 /* Namespace info, used especially for netlink kernel communication */
221 struct zebra_dplane_info zd_ns_info;
222
223 /* Embedded list linkage */
224 TAILQ_ENTRY(zebra_dplane_ctx) zd_q_entries;
225 };
226
227 /* Flag that can be set by a pre-kernel provider as a signal that an update
228 * should bypass the kernel.
229 */
230 #define DPLANE_CTX_FLAG_NO_KERNEL 0x01
231
232
233 /*
234 * Registration block for one dataplane provider.
235 */
236 struct zebra_dplane_provider {
237 /* Name */
238 char dp_name[DPLANE_PROVIDER_NAMELEN + 1];
239
240 /* Priority, for ordering among providers */
241 uint8_t dp_priority;
242
243 /* Id value */
244 uint32_t dp_id;
245
246 /* Mutex */
247 pthread_mutex_t dp_mutex;
248
249 /* Plugin-provided extra data */
250 void *dp_data;
251
252 /* Flags */
253 int dp_flags;
254
255 int (*dp_start)(struct zebra_dplane_provider *prov);
256
257 int (*dp_fp)(struct zebra_dplane_provider *prov);
258
259 int (*dp_fini)(struct zebra_dplane_provider *prov, bool early_p);
260
261 _Atomic uint32_t dp_in_counter;
262 _Atomic uint32_t dp_in_queued;
263 _Atomic uint32_t dp_in_max;
264 _Atomic uint32_t dp_out_counter;
265 _Atomic uint32_t dp_out_queued;
266 _Atomic uint32_t dp_out_max;
267 _Atomic uint32_t dp_error_counter;
268
269 /* Queue of contexts inbound to the provider */
270 struct dplane_ctx_q dp_ctx_in_q;
271
272 /* Queue of completed contexts outbound from the provider back
273 * towards the dataplane module.
274 */
275 struct dplane_ctx_q dp_ctx_out_q;
276
277 /* Embedded list linkage for provider objects */
278 TAILQ_ENTRY(zebra_dplane_provider) dp_prov_link;
279 };
280
281 /*
282 * Globals
283 */
284 static struct zebra_dplane_globals {
285 /* Mutex to control access to dataplane components */
286 pthread_mutex_t dg_mutex;
287
288 /* Results callback registered by zebra 'core' */
289 int (*dg_results_cb)(struct dplane_ctx_q *ctxlist);
290
291 /* Sentinel for beginning of shutdown */
292 volatile bool dg_is_shutdown;
293
294 /* Sentinel for end of shutdown */
295 volatile bool dg_run;
296
297 /* Update context queue inbound to the dataplane */
298 TAILQ_HEAD(zdg_ctx_q, zebra_dplane_ctx) dg_update_ctx_q;
299
300 /* Ordered list of providers */
301 TAILQ_HEAD(zdg_prov_q, zebra_dplane_provider) dg_providers_q;
302
303 /* Counter used to assign internal ids to providers */
304 uint32_t dg_provider_id;
305
306 /* Limit number of pending, unprocessed updates */
307 _Atomic uint32_t dg_max_queued_updates;
308
309 /* Control whether system route notifications should be produced. */
310 bool dg_sys_route_notifs;
311
312 /* Limit number of new updates dequeued at once, to pace an
313 * incoming burst.
314 */
315 uint32_t dg_updates_per_cycle;
316
317 _Atomic uint32_t dg_routes_in;
318 _Atomic uint32_t dg_routes_queued;
319 _Atomic uint32_t dg_routes_queued_max;
320 _Atomic uint32_t dg_route_errors;
321 _Atomic uint32_t dg_other_errors;
322
323 _Atomic uint32_t dg_lsps_in;
324 _Atomic uint32_t dg_lsp_errors;
325
326 _Atomic uint32_t dg_pws_in;
327 _Atomic uint32_t dg_pw_errors;
328
329 _Atomic uint32_t dg_intf_addrs_in;
330 _Atomic uint32_t dg_intf_addr_errors;
331
332 _Atomic uint32_t dg_macs_in;
333 _Atomic uint32_t dg_mac_errors;
334
335 _Atomic uint32_t dg_neighs_in;
336 _Atomic uint32_t dg_neigh_errors;
337
338 _Atomic uint32_t dg_update_yields;
339
340 /* Dataplane pthread */
341 struct frr_pthread *dg_pthread;
342
343 /* Event-delivery context 'master' for the dplane */
344 struct thread_master *dg_master;
345
346 /* Event/'thread' pointer for queued updates */
347 struct thread *dg_t_update;
348
349 /* Event pointer for pending shutdown check loop */
350 struct thread *dg_t_shutdown_check;
351
352 } zdplane_info;
353
354 /*
355 * Lock and unlock for interactions with the zebra 'core' pthread
356 */
357 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
358 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
359
360
361 /*
362 * Lock and unlock for individual providers
363 */
364 #define DPLANE_PROV_LOCK(p) pthread_mutex_lock(&((p)->dp_mutex))
365 #define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
366
367 /* Prototypes */
368 static int dplane_thread_loop(struct thread *event);
369 static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
370 struct zebra_ns *zns);
371 static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
372 enum dplane_op_e op);
373 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
374 enum dplane_op_e op);
375 static enum zebra_dplane_result intf_addr_update_internal(
376 const struct interface *ifp, const struct connected *ifc,
377 enum dplane_op_e op);
378 static enum zebra_dplane_result mac_update_internal(
379 enum dplane_op_e op, const struct interface *ifp,
380 vlanid_t vid, const struct ethaddr *mac,
381 struct in_addr vtep_ip, bool sticky);
382 static enum zebra_dplane_result neigh_update_internal(
383 enum dplane_op_e op,
384 const struct interface *ifp,
385 const struct ethaddr *mac,
386 const struct ipaddr *ip,
387 uint32_t flags, uint16_t state);
388
389 /*
390 * Public APIs
391 */
392
393 /* Obtain thread_master for dataplane thread */
394 struct thread_master *dplane_get_thread_master(void)
395 {
396 return zdplane_info.dg_master;
397 }
398
399 /*
400 * Allocate a dataplane update context
401 */
402 struct zebra_dplane_ctx *dplane_ctx_alloc(void)
403 {
404 struct zebra_dplane_ctx *p;
405
406 /* TODO -- just alloc'ing memory, but would like to maintain
407 * a pool
408 */
409 p = XCALLOC(MTYPE_DP_CTX, sizeof(struct zebra_dplane_ctx));
410
411 return p;
412 }
413
414 /* Enable system route notifications */
415 void dplane_enable_sys_route_notifs(void)
416 {
417 zdplane_info.dg_sys_route_notifs = true;
418 }
419
420 /*
421 * Free a dataplane results context.
422 */
423 static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
424 {
425 if (pctx == NULL)
426 return;
427
428 DPLANE_CTX_VALID(*pctx);
429
430 /* TODO -- just freeing memory, but would like to maintain
431 * a pool
432 */
433
434 /* Some internal allocations may need to be freed, depending on
435 * the type of info captured in the ctx.
436 */
437 switch ((*pctx)->zd_op) {
438 case DPLANE_OP_ROUTE_INSTALL:
439 case DPLANE_OP_ROUTE_UPDATE:
440 case DPLANE_OP_ROUTE_DELETE:
441 case DPLANE_OP_SYS_ROUTE_ADD:
442 case DPLANE_OP_SYS_ROUTE_DELETE:
443 case DPLANE_OP_ROUTE_NOTIFY:
444
445 /* Free allocated nexthops */
446 if ((*pctx)->u.rinfo.zd_ng.nexthop) {
447 /* This deals with recursive nexthops too */
448 nexthops_free((*pctx)->u.rinfo.zd_ng.nexthop);
449
450 (*pctx)->u.rinfo.zd_ng.nexthop = NULL;
451 }
452
453 if ((*pctx)->u.rinfo.zd_old_ng.nexthop) {
454 /* This deals with recursive nexthops too */
455 nexthops_free((*pctx)->u.rinfo.zd_old_ng.nexthop);
456
457 (*pctx)->u.rinfo.zd_old_ng.nexthop = NULL;
458 }
459
460 break;
461
462 case DPLANE_OP_LSP_INSTALL:
463 case DPLANE_OP_LSP_UPDATE:
464 case DPLANE_OP_LSP_DELETE:
465 case DPLANE_OP_LSP_NOTIFY:
466 {
467 zebra_nhlfe_t *nhlfe, *next;
468
469 /* Free allocated NHLFEs */
470 for (nhlfe = (*pctx)->u.lsp.nhlfe_list; nhlfe; nhlfe = next) {
471 next = nhlfe->next;
472
473 zebra_mpls_nhlfe_del(nhlfe);
474 }
475
476 /* Clear pointers in lsp struct, in case we're cacheing
477 * free context structs.
478 */
479 (*pctx)->u.lsp.nhlfe_list = NULL;
480 (*pctx)->u.lsp.best_nhlfe = NULL;
481
482 break;
483 }
484
485 case DPLANE_OP_PW_INSTALL:
486 case DPLANE_OP_PW_UNINSTALL:
487 /* Free allocated nexthops */
488 if ((*pctx)->u.pw.nhg.nexthop) {
489 /* This deals with recursive nexthops too */
490 nexthops_free((*pctx)->u.pw.nhg.nexthop);
491
492 (*pctx)->u.pw.nhg.nexthop = NULL;
493 }
494 break;
495
496 case DPLANE_OP_ADDR_INSTALL:
497 case DPLANE_OP_ADDR_UNINSTALL:
498 /* Maybe free label string, if allocated */
499 if ((*pctx)->u.intf.label != NULL &&
500 (*pctx)->u.intf.label != (*pctx)->u.intf.label_buf) {
501 free((*pctx)->u.intf.label);
502 (*pctx)->u.intf.label = NULL;
503 }
504 break;
505
506 case DPLANE_OP_MAC_INSTALL:
507 case DPLANE_OP_MAC_DELETE:
508 case DPLANE_OP_NEIGH_INSTALL:
509 case DPLANE_OP_NEIGH_UPDATE:
510 case DPLANE_OP_NEIGH_DELETE:
511 case DPLANE_OP_VTEP_ADD:
512 case DPLANE_OP_VTEP_DELETE:
513 case DPLANE_OP_NONE:
514 break;
515 }
516
517 XFREE(MTYPE_DP_CTX, *pctx);
518 *pctx = NULL;
519 }
520
521 /*
522 * Return a context block to the dplane module after processing
523 */
524 void dplane_ctx_fini(struct zebra_dplane_ctx **pctx)
525 {
526 /* TODO -- maintain pool; for now, just free */
527 dplane_ctx_free(pctx);
528 }
529
530 /* Enqueue a context block */
531 void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
532 const struct zebra_dplane_ctx *ctx)
533 {
534 TAILQ_INSERT_TAIL(q, (struct zebra_dplane_ctx *)ctx, zd_q_entries);
535 }
536
537 /* Append a list of context blocks to another list */
538 void dplane_ctx_list_append(struct dplane_ctx_q *to_list,
539 struct dplane_ctx_q *from_list)
540 {
541 if (TAILQ_FIRST(from_list)) {
542 TAILQ_CONCAT(to_list, from_list, zd_q_entries);
543
544 /* And clear 'from' list */
545 TAILQ_INIT(from_list);
546 }
547 }
548
549 /* Dequeue a context block from the head of a list */
550 struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_q *q)
551 {
552 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
553
554 if (ctx)
555 TAILQ_REMOVE(q, ctx, zd_q_entries);
556
557 return ctx;
558 }
559
560 /*
561 * Accessors for information from the context object
562 */
563 enum zebra_dplane_result dplane_ctx_get_status(
564 const struct zebra_dplane_ctx *ctx)
565 {
566 DPLANE_CTX_VALID(ctx);
567
568 return ctx->zd_status;
569 }
570
571 void dplane_ctx_set_status(struct zebra_dplane_ctx *ctx,
572 enum zebra_dplane_result status)
573 {
574 DPLANE_CTX_VALID(ctx);
575
576 ctx->zd_status = status;
577 }
578
579 /* Retrieve last/current provider id */
580 uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx *ctx)
581 {
582 DPLANE_CTX_VALID(ctx);
583 return ctx->zd_provider;
584 }
585
586 /* Providers run before the kernel can control whether a kernel
587 * update should be done.
588 */
589 void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx *ctx)
590 {
591 DPLANE_CTX_VALID(ctx);
592
593 SET_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
594 }
595
596 bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx *ctx)
597 {
598 DPLANE_CTX_VALID(ctx);
599
600 return CHECK_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
601 }
602
603 void dplane_ctx_set_op(struct zebra_dplane_ctx *ctx, enum dplane_op_e op)
604 {
605 DPLANE_CTX_VALID(ctx);
606 ctx->zd_op = op;
607 }
608
609 enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx)
610 {
611 DPLANE_CTX_VALID(ctx);
612
613 return ctx->zd_op;
614 }
615
616 const char *dplane_op2str(enum dplane_op_e op)
617 {
618 const char *ret = "UNKNOWN";
619
620 switch (op) {
621 case DPLANE_OP_NONE:
622 ret = "NONE";
623 break;
624
625 /* Route update */
626 case DPLANE_OP_ROUTE_INSTALL:
627 ret = "ROUTE_INSTALL";
628 break;
629 case DPLANE_OP_ROUTE_UPDATE:
630 ret = "ROUTE_UPDATE";
631 break;
632 case DPLANE_OP_ROUTE_DELETE:
633 ret = "ROUTE_DELETE";
634 break;
635 case DPLANE_OP_ROUTE_NOTIFY:
636 ret = "ROUTE_NOTIFY";
637 break;
638
639 case DPLANE_OP_LSP_INSTALL:
640 ret = "LSP_INSTALL";
641 break;
642 case DPLANE_OP_LSP_UPDATE:
643 ret = "LSP_UPDATE";
644 break;
645 case DPLANE_OP_LSP_DELETE:
646 ret = "LSP_DELETE";
647 break;
648 case DPLANE_OP_LSP_NOTIFY:
649 ret = "LSP_NOTIFY";
650 break;
651
652 case DPLANE_OP_PW_INSTALL:
653 ret = "PW_INSTALL";
654 break;
655 case DPLANE_OP_PW_UNINSTALL:
656 ret = "PW_UNINSTALL";
657 break;
658
659 case DPLANE_OP_SYS_ROUTE_ADD:
660 ret = "SYS_ROUTE_ADD";
661 break;
662 case DPLANE_OP_SYS_ROUTE_DELETE:
663 ret = "SYS_ROUTE_DEL";
664 break;
665
666 case DPLANE_OP_ADDR_INSTALL:
667 ret = "ADDR_INSTALL";
668 break;
669 case DPLANE_OP_ADDR_UNINSTALL:
670 ret = "ADDR_UNINSTALL";
671 break;
672
673 case DPLANE_OP_MAC_INSTALL:
674 ret = "MAC_INSTALL";
675 break;
676 case DPLANE_OP_MAC_DELETE:
677 ret = "MAC_DELETE";
678 break;
679
680 case DPLANE_OP_NEIGH_INSTALL:
681 ret = "NEIGH_INSTALL";
682 break;
683 case DPLANE_OP_NEIGH_UPDATE:
684 ret = "NEIGH_UPDATE";
685 break;
686 case DPLANE_OP_NEIGH_DELETE:
687 ret = "NEIGH_DELETE";
688 break;
689 case DPLANE_OP_VTEP_ADD:
690 ret = "VTEP_ADD";
691 break;
692 case DPLANE_OP_VTEP_DELETE:
693 ret = "VTEP_DELETE";
694 break;
695 }
696
697 return ret;
698 }
699
700 const char *dplane_res2str(enum zebra_dplane_result res)
701 {
702 const char *ret = "<Unknown>";
703
704 switch (res) {
705 case ZEBRA_DPLANE_REQUEST_FAILURE:
706 ret = "FAILURE";
707 break;
708 case ZEBRA_DPLANE_REQUEST_QUEUED:
709 ret = "QUEUED";
710 break;
711 case ZEBRA_DPLANE_REQUEST_SUCCESS:
712 ret = "SUCCESS";
713 break;
714 }
715
716 return ret;
717 }
718
719 void dplane_ctx_set_dest(struct zebra_dplane_ctx *ctx,
720 const struct prefix *dest)
721 {
722 DPLANE_CTX_VALID(ctx);
723
724 prefix_copy(&(ctx->u.rinfo.zd_dest), dest);
725 }
726
727 const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
728 {
729 DPLANE_CTX_VALID(ctx);
730
731 return &(ctx->u.rinfo.zd_dest);
732 }
733
734 void dplane_ctx_set_src(struct zebra_dplane_ctx *ctx, const struct prefix *src)
735 {
736 DPLANE_CTX_VALID(ctx);
737
738 if (src)
739 prefix_copy(&(ctx->u.rinfo.zd_src), src);
740 else
741 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(struct prefix));
742 }
743
744 /* Source prefix is a little special - return NULL for "no src prefix" */
745 const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx)
746 {
747 DPLANE_CTX_VALID(ctx);
748
749 if (ctx->u.rinfo.zd_src.prefixlen == 0 &&
750 IN6_IS_ADDR_UNSPECIFIED(&(ctx->u.rinfo.zd_src.u.prefix6))) {
751 return NULL;
752 } else {
753 return &(ctx->u.rinfo.zd_src);
754 }
755 }
756
757 bool dplane_ctx_is_update(const struct zebra_dplane_ctx *ctx)
758 {
759 DPLANE_CTX_VALID(ctx);
760
761 return ctx->zd_is_update;
762 }
763
764 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx *ctx)
765 {
766 DPLANE_CTX_VALID(ctx);
767
768 return ctx->zd_seq;
769 }
770
771 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx)
772 {
773 DPLANE_CTX_VALID(ctx);
774
775 return ctx->zd_old_seq;
776 }
777
778 void dplane_ctx_set_vrf(struct zebra_dplane_ctx *ctx, vrf_id_t vrf)
779 {
780 DPLANE_CTX_VALID(ctx);
781
782 ctx->zd_vrf_id = vrf;
783 }
784
785 vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx)
786 {
787 DPLANE_CTX_VALID(ctx);
788
789 return ctx->zd_vrf_id;
790 }
791
792 bool dplane_ctx_is_from_notif(const struct zebra_dplane_ctx *ctx)
793 {
794 DPLANE_CTX_VALID(ctx);
795
796 return (ctx->zd_notif_provider != 0);
797 }
798
799 uint32_t dplane_ctx_get_notif_provider(const struct zebra_dplane_ctx *ctx)
800 {
801 DPLANE_CTX_VALID(ctx);
802
803 return ctx->zd_notif_provider;
804 }
805
806 void dplane_ctx_set_notif_provider(struct zebra_dplane_ctx *ctx,
807 uint32_t id)
808 {
809 DPLANE_CTX_VALID(ctx);
810
811 ctx->zd_notif_provider = id;
812 }
813 const char *dplane_ctx_get_ifname(const struct zebra_dplane_ctx *ctx)
814 {
815 DPLANE_CTX_VALID(ctx);
816
817 return ctx->zd_ifname;
818 }
819
820 ifindex_t dplane_ctx_get_ifindex(const struct zebra_dplane_ctx *ctx)
821 {
822 DPLANE_CTX_VALID(ctx);
823
824 return ctx->zd_ifindex;
825 }
826
827 void dplane_ctx_set_type(struct zebra_dplane_ctx *ctx, int type)
828 {
829 DPLANE_CTX_VALID(ctx);
830
831 ctx->u.rinfo.zd_type = type;
832 }
833
834 int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx)
835 {
836 DPLANE_CTX_VALID(ctx);
837
838 return ctx->u.rinfo.zd_type;
839 }
840
841 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx)
842 {
843 DPLANE_CTX_VALID(ctx);
844
845 return ctx->u.rinfo.zd_old_type;
846 }
847
848 void dplane_ctx_set_afi(struct zebra_dplane_ctx *ctx, afi_t afi)
849 {
850 DPLANE_CTX_VALID(ctx);
851
852 ctx->u.rinfo.zd_afi = afi;
853 }
854
855 afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
856 {
857 DPLANE_CTX_VALID(ctx);
858
859 return ctx->u.rinfo.zd_afi;
860 }
861
862 void dplane_ctx_set_safi(struct zebra_dplane_ctx *ctx, safi_t safi)
863 {
864 DPLANE_CTX_VALID(ctx);
865
866 ctx->u.rinfo.zd_safi = safi;
867 }
868
869 safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
870 {
871 DPLANE_CTX_VALID(ctx);
872
873 return ctx->u.rinfo.zd_safi;
874 }
875
876 void dplane_ctx_set_table(struct zebra_dplane_ctx *ctx, uint32_t table)
877 {
878 DPLANE_CTX_VALID(ctx);
879
880 ctx->zd_table_id = table;
881 }
882
883 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx)
884 {
885 DPLANE_CTX_VALID(ctx);
886
887 return ctx->zd_table_id;
888 }
889
890 route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx)
891 {
892 DPLANE_CTX_VALID(ctx);
893
894 return ctx->u.rinfo.zd_tag;
895 }
896
897 void dplane_ctx_set_tag(struct zebra_dplane_ctx *ctx, route_tag_t tag)
898 {
899 DPLANE_CTX_VALID(ctx);
900
901 ctx->u.rinfo.zd_tag = tag;
902 }
903
904 route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx)
905 {
906 DPLANE_CTX_VALID(ctx);
907
908 return ctx->u.rinfo.zd_old_tag;
909 }
910
911 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx)
912 {
913 DPLANE_CTX_VALID(ctx);
914
915 return ctx->u.rinfo.zd_instance;
916 }
917
918 void dplane_ctx_set_instance(struct zebra_dplane_ctx *ctx, uint16_t instance)
919 {
920 DPLANE_CTX_VALID(ctx);
921
922 ctx->u.rinfo.zd_instance = instance;
923 }
924
925 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
926 {
927 DPLANE_CTX_VALID(ctx);
928
929 return ctx->u.rinfo.zd_old_instance;
930 }
931
932 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx)
933 {
934 DPLANE_CTX_VALID(ctx);
935
936 return ctx->u.rinfo.zd_metric;
937 }
938
939 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx)
940 {
941 DPLANE_CTX_VALID(ctx);
942
943 return ctx->u.rinfo.zd_old_metric;
944 }
945
946 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx)
947 {
948 DPLANE_CTX_VALID(ctx);
949
950 return ctx->u.rinfo.zd_mtu;
951 }
952
953 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx)
954 {
955 DPLANE_CTX_VALID(ctx);
956
957 return ctx->u.rinfo.zd_nexthop_mtu;
958 }
959
960 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx)
961 {
962 DPLANE_CTX_VALID(ctx);
963
964 return ctx->u.rinfo.zd_distance;
965 }
966
967 void dplane_ctx_set_distance(struct zebra_dplane_ctx *ctx, uint8_t distance)
968 {
969 DPLANE_CTX_VALID(ctx);
970
971 ctx->u.rinfo.zd_distance = distance;
972 }
973
974 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
975 {
976 DPLANE_CTX_VALID(ctx);
977
978 return ctx->u.rinfo.zd_old_distance;
979 }
980
981 void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh)
982 {
983 DPLANE_CTX_VALID(ctx);
984
985 if (ctx->u.rinfo.zd_ng.nexthop) {
986 nexthops_free(ctx->u.rinfo.zd_ng.nexthop);
987 ctx->u.rinfo.zd_ng.nexthop = NULL;
988 }
989 copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop), nh, NULL);
990 }
991
992 const struct nexthop_group *dplane_ctx_get_ng(
993 const struct zebra_dplane_ctx *ctx)
994 {
995 DPLANE_CTX_VALID(ctx);
996
997 return &(ctx->u.rinfo.zd_ng);
998 }
999
1000 const struct nexthop_group *dplane_ctx_get_old_ng(
1001 const struct zebra_dplane_ctx *ctx)
1002 {
1003 DPLANE_CTX_VALID(ctx);
1004
1005 return &(ctx->u.rinfo.zd_old_ng);
1006 }
1007
1008 const struct zebra_dplane_info *dplane_ctx_get_ns(
1009 const struct zebra_dplane_ctx *ctx)
1010 {
1011 DPLANE_CTX_VALID(ctx);
1012
1013 return &(ctx->zd_ns_info);
1014 }
1015
1016 /* Accessors for LSP information */
1017
1018 mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx)
1019 {
1020 DPLANE_CTX_VALID(ctx);
1021
1022 return ctx->u.lsp.ile.in_label;
1023 }
1024
1025 void dplane_ctx_set_in_label(struct zebra_dplane_ctx *ctx, mpls_label_t label)
1026 {
1027 DPLANE_CTX_VALID(ctx);
1028
1029 ctx->u.lsp.ile.in_label = label;
1030 }
1031
1032 uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx *ctx)
1033 {
1034 DPLANE_CTX_VALID(ctx);
1035
1036 return ctx->u.lsp.addr_family;
1037 }
1038
1039 void dplane_ctx_set_addr_family(struct zebra_dplane_ctx *ctx,
1040 uint8_t family)
1041 {
1042 DPLANE_CTX_VALID(ctx);
1043
1044 ctx->u.lsp.addr_family = family;
1045 }
1046
1047 uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx)
1048 {
1049 DPLANE_CTX_VALID(ctx);
1050
1051 return ctx->u.lsp.flags;
1052 }
1053
1054 void dplane_ctx_set_lsp_flags(struct zebra_dplane_ctx *ctx,
1055 uint32_t flags)
1056 {
1057 DPLANE_CTX_VALID(ctx);
1058
1059 ctx->u.lsp.flags = flags;
1060 }
1061
1062 const zebra_nhlfe_t *dplane_ctx_get_nhlfe(const struct zebra_dplane_ctx *ctx)
1063 {
1064 DPLANE_CTX_VALID(ctx);
1065
1066 return ctx->u.lsp.nhlfe_list;
1067 }
1068
1069 zebra_nhlfe_t *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
1070 enum lsp_types_t lsp_type,
1071 enum nexthop_types_t nh_type,
1072 union g_addr *gate,
1073 ifindex_t ifindex,
1074 mpls_label_t out_label)
1075 {
1076 zebra_nhlfe_t *nhlfe;
1077
1078 DPLANE_CTX_VALID(ctx);
1079
1080 nhlfe = zebra_mpls_lsp_add_nhlfe(&(ctx->u.lsp),
1081 lsp_type, nh_type, gate,
1082 ifindex, out_label);
1083
1084 return nhlfe;
1085 }
1086
1087 const zebra_nhlfe_t *
1088 dplane_ctx_get_best_nhlfe(const struct zebra_dplane_ctx *ctx)
1089 {
1090 DPLANE_CTX_VALID(ctx);
1091
1092 return ctx->u.lsp.best_nhlfe;
1093 }
1094
1095 const zebra_nhlfe_t *
1096 dplane_ctx_set_best_nhlfe(struct zebra_dplane_ctx *ctx,
1097 zebra_nhlfe_t *nhlfe)
1098 {
1099 DPLANE_CTX_VALID(ctx);
1100
1101 ctx->u.lsp.best_nhlfe = nhlfe;
1102 return ctx->u.lsp.best_nhlfe;
1103 }
1104
1105 uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx *ctx)
1106 {
1107 DPLANE_CTX_VALID(ctx);
1108
1109 return ctx->u.lsp.num_ecmp;
1110 }
1111
1112 mpls_label_t dplane_ctx_get_pw_local_label(const struct zebra_dplane_ctx *ctx)
1113 {
1114 DPLANE_CTX_VALID(ctx);
1115
1116 return ctx->u.pw.local_label;
1117 }
1118
1119 mpls_label_t dplane_ctx_get_pw_remote_label(const struct zebra_dplane_ctx *ctx)
1120 {
1121 DPLANE_CTX_VALID(ctx);
1122
1123 return ctx->u.pw.remote_label;
1124 }
1125
1126 int dplane_ctx_get_pw_type(const struct zebra_dplane_ctx *ctx)
1127 {
1128 DPLANE_CTX_VALID(ctx);
1129
1130 return ctx->u.pw.type;
1131 }
1132
1133 int dplane_ctx_get_pw_af(const struct zebra_dplane_ctx *ctx)
1134 {
1135 DPLANE_CTX_VALID(ctx);
1136
1137 return ctx->u.pw.af;
1138 }
1139
1140 uint32_t dplane_ctx_get_pw_flags(const struct zebra_dplane_ctx *ctx)
1141 {
1142 DPLANE_CTX_VALID(ctx);
1143
1144 return ctx->u.pw.flags;
1145 }
1146
1147 int dplane_ctx_get_pw_status(const struct zebra_dplane_ctx *ctx)
1148 {
1149 DPLANE_CTX_VALID(ctx);
1150
1151 return ctx->u.pw.status;
1152 }
1153
1154 const union g_addr *dplane_ctx_get_pw_dest(
1155 const struct zebra_dplane_ctx *ctx)
1156 {
1157 DPLANE_CTX_VALID(ctx);
1158
1159 return &(ctx->u.pw.dest);
1160 }
1161
1162 const union pw_protocol_fields *dplane_ctx_get_pw_proto(
1163 const struct zebra_dplane_ctx *ctx)
1164 {
1165 DPLANE_CTX_VALID(ctx);
1166
1167 return &(ctx->u.pw.fields);
1168 }
1169
1170 const struct nexthop_group *
1171 dplane_ctx_get_pw_nhg(const struct zebra_dplane_ctx *ctx)
1172 {
1173 DPLANE_CTX_VALID(ctx);
1174
1175 return &(ctx->u.pw.nhg);
1176 }
1177
1178 /* Accessors for interface information */
1179 uint32_t dplane_ctx_get_intf_metric(const struct zebra_dplane_ctx *ctx)
1180 {
1181 DPLANE_CTX_VALID(ctx);
1182
1183 return ctx->u.intf.metric;
1184 }
1185
1186 /* Is interface addr p2p? */
1187 bool dplane_ctx_intf_is_connected(const struct zebra_dplane_ctx *ctx)
1188 {
1189 DPLANE_CTX_VALID(ctx);
1190
1191 return (ctx->u.intf.flags & DPLANE_INTF_CONNECTED);
1192 }
1193
1194 bool dplane_ctx_intf_is_secondary(const struct zebra_dplane_ctx *ctx)
1195 {
1196 DPLANE_CTX_VALID(ctx);
1197
1198 return (ctx->u.intf.flags & DPLANE_INTF_SECONDARY);
1199 }
1200
1201 bool dplane_ctx_intf_is_broadcast(const struct zebra_dplane_ctx *ctx)
1202 {
1203 DPLANE_CTX_VALID(ctx);
1204
1205 return (ctx->u.intf.flags & DPLANE_INTF_BROADCAST);
1206 }
1207
1208 const struct prefix *dplane_ctx_get_intf_addr(
1209 const struct zebra_dplane_ctx *ctx)
1210 {
1211 DPLANE_CTX_VALID(ctx);
1212
1213 return &(ctx->u.intf.prefix);
1214 }
1215
1216 bool dplane_ctx_intf_has_dest(const struct zebra_dplane_ctx *ctx)
1217 {
1218 DPLANE_CTX_VALID(ctx);
1219
1220 return (ctx->u.intf.flags & DPLANE_INTF_HAS_DEST);
1221 }
1222
1223 const struct prefix *dplane_ctx_get_intf_dest(
1224 const struct zebra_dplane_ctx *ctx)
1225 {
1226 DPLANE_CTX_VALID(ctx);
1227
1228 if (ctx->u.intf.flags & DPLANE_INTF_HAS_DEST)
1229 return &(ctx->u.intf.dest_prefix);
1230 else
1231 return NULL;
1232 }
1233
1234 bool dplane_ctx_intf_has_label(const struct zebra_dplane_ctx *ctx)
1235 {
1236 DPLANE_CTX_VALID(ctx);
1237
1238 return (ctx->u.intf.flags & DPLANE_INTF_HAS_LABEL);
1239 }
1240
1241 const char *dplane_ctx_get_intf_label(const struct zebra_dplane_ctx *ctx)
1242 {
1243 DPLANE_CTX_VALID(ctx);
1244
1245 return ctx->u.intf.label;
1246 }
1247
1248 /* Accessors for MAC information */
1249 vlanid_t dplane_ctx_mac_get_vlan(const struct zebra_dplane_ctx *ctx)
1250 {
1251 DPLANE_CTX_VALID(ctx);
1252 return ctx->u.macinfo.vid;
1253 }
1254
1255 bool dplane_ctx_mac_is_sticky(const struct zebra_dplane_ctx *ctx)
1256 {
1257 DPLANE_CTX_VALID(ctx);
1258 return ctx->u.macinfo.is_sticky;
1259 }
1260
1261 const struct ethaddr *dplane_ctx_mac_get_addr(
1262 const struct zebra_dplane_ctx *ctx)
1263 {
1264 DPLANE_CTX_VALID(ctx);
1265 return &(ctx->u.macinfo.mac);
1266 }
1267
1268 const struct in_addr *dplane_ctx_mac_get_vtep_ip(
1269 const struct zebra_dplane_ctx *ctx)
1270 {
1271 DPLANE_CTX_VALID(ctx);
1272 return &(ctx->u.macinfo.vtep_ip);
1273 }
1274
1275 /* Accessors for neighbor information */
1276 const struct ipaddr *dplane_ctx_neigh_get_ipaddr(
1277 const struct zebra_dplane_ctx *ctx)
1278 {
1279 DPLANE_CTX_VALID(ctx);
1280 return &(ctx->u.neigh.ip_addr);
1281 }
1282
1283 const struct ethaddr *dplane_ctx_neigh_get_mac(
1284 const struct zebra_dplane_ctx *ctx)
1285 {
1286 DPLANE_CTX_VALID(ctx);
1287 return &(ctx->u.neigh.mac);
1288 }
1289
1290 uint32_t dplane_ctx_neigh_get_flags(const struct zebra_dplane_ctx *ctx)
1291 {
1292 DPLANE_CTX_VALID(ctx);
1293 return ctx->u.neigh.flags;
1294 }
1295
1296 uint16_t dplane_ctx_neigh_get_state(const struct zebra_dplane_ctx *ctx)
1297 {
1298 DPLANE_CTX_VALID(ctx);
1299 return ctx->u.neigh.state;
1300 }
1301
1302 /*
1303 * End of dplane context accessors
1304 */
1305
1306
1307 /*
1308 * Retrieve the limit on the number of pending, unprocessed updates.
1309 */
1310 uint32_t dplane_get_in_queue_limit(void)
1311 {
1312 return atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
1313 memory_order_relaxed);
1314 }
1315
1316 /*
1317 * Configure limit on the number of pending, queued updates.
1318 */
1319 void dplane_set_in_queue_limit(uint32_t limit, bool set)
1320 {
1321 /* Reset to default on 'unset' */
1322 if (!set)
1323 limit = DPLANE_DEFAULT_MAX_QUEUED;
1324
1325 atomic_store_explicit(&zdplane_info.dg_max_queued_updates, limit,
1326 memory_order_relaxed);
1327 }
1328
1329 /*
1330 * Retrieve the current queue depth of incoming, unprocessed updates
1331 */
1332 uint32_t dplane_get_in_queue_len(void)
1333 {
1334 return atomic_load_explicit(&zdplane_info.dg_routes_queued,
1335 memory_order_seq_cst);
1336 }
1337
1338 /*
1339 * Common dataplane context init with zebra namespace info.
1340 */
1341 static int dplane_ctx_ns_init(struct zebra_dplane_ctx *ctx,
1342 struct zebra_ns *zns,
1343 bool is_update)
1344 {
1345 dplane_info_from_zns(&(ctx->zd_ns_info), zns);
1346
1347 #if defined(HAVE_NETLINK)
1348 /* Increment message counter after copying to context struct - may need
1349 * two messages in some 'update' cases.
1350 */
1351 if (is_update)
1352 zns->netlink_dplane.seq += 2;
1353 else
1354 zns->netlink_dplane.seq++;
1355 #endif /* HAVE_NETLINK */
1356
1357 return AOK;
1358 }
1359
1360 /*
1361 * Initialize a context block for a route update from zebra data structs.
1362 */
1363 static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx,
1364 enum dplane_op_e op,
1365 struct route_node *rn,
1366 struct route_entry *re)
1367 {
1368 int ret = EINVAL;
1369 const struct route_table *table = NULL;
1370 const rib_table_info_t *info;
1371 const struct prefix *p, *src_p;
1372 struct zebra_ns *zns;
1373 struct zebra_vrf *zvrf;
1374 struct nexthop *nexthop;
1375
1376 if (!ctx || !rn || !re)
1377 goto done;
1378
1379 ctx->zd_op = op;
1380 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
1381
1382 ctx->u.rinfo.zd_type = re->type;
1383 ctx->u.rinfo.zd_old_type = re->type;
1384
1385 /* Prefixes: dest, and optional source */
1386 srcdest_rnode_prefixes(rn, &p, &src_p);
1387
1388 prefix_copy(&(ctx->u.rinfo.zd_dest), p);
1389
1390 if (src_p)
1391 prefix_copy(&(ctx->u.rinfo.zd_src), src_p);
1392 else
1393 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(ctx->u.rinfo.zd_src));
1394
1395 ctx->zd_table_id = re->table;
1396
1397 ctx->u.rinfo.zd_metric = re->metric;
1398 ctx->u.rinfo.zd_old_metric = re->metric;
1399 ctx->zd_vrf_id = re->vrf_id;
1400 ctx->u.rinfo.zd_mtu = re->mtu;
1401 ctx->u.rinfo.zd_nexthop_mtu = re->nexthop_mtu;
1402 ctx->u.rinfo.zd_instance = re->instance;
1403 ctx->u.rinfo.zd_tag = re->tag;
1404 ctx->u.rinfo.zd_old_tag = re->tag;
1405 ctx->u.rinfo.zd_distance = re->distance;
1406
1407 table = srcdest_rnode_table(rn);
1408 info = table->info;
1409
1410 ctx->u.rinfo.zd_afi = info->afi;
1411 ctx->u.rinfo.zd_safi = info->safi;
1412
1413 /* Copy nexthops; recursive info is included too */
1414 copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop), re->ng.nexthop, NULL);
1415
1416 /* Ensure that the dplane's nexthops flags are clear. */
1417 for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop))
1418 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
1419
1420 /* Don't need some info when capturing a system notification */
1421 if (op == DPLANE_OP_SYS_ROUTE_ADD ||
1422 op == DPLANE_OP_SYS_ROUTE_DELETE) {
1423 ret = AOK;
1424 goto done;
1425 }
1426
1427 /* Extract ns info - can't use pointers to 'core' structs */
1428 zvrf = vrf_info_lookup(re->vrf_id);
1429 zns = zvrf->zns;
1430 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_ROUTE_UPDATE));
1431
1432 /* Trying out the sequence number idea, so we can try to detect
1433 * when a result is stale.
1434 */
1435 re->dplane_sequence = zebra_router_get_next_sequence();
1436 ctx->zd_seq = re->dplane_sequence;
1437
1438 ret = AOK;
1439
1440 done:
1441 return ret;
1442 }
1443
1444 /*
1445 * Capture information for an LSP update in a dplane context.
1446 */
1447 static int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx,
1448 enum dplane_op_e op,
1449 zebra_lsp_t *lsp)
1450 {
1451 int ret = AOK;
1452 zebra_nhlfe_t *nhlfe, *new_nhlfe;
1453
1454 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1455 zlog_debug("init dplane ctx %s: in-label %u ecmp# %d",
1456 dplane_op2str(op), lsp->ile.in_label,
1457 lsp->num_ecmp);
1458
1459 ctx->zd_op = op;
1460 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
1461
1462 /* Capture namespace info */
1463 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
1464 (op == DPLANE_OP_LSP_UPDATE));
1465
1466 memset(&ctx->u.lsp, 0, sizeof(ctx->u.lsp));
1467
1468 ctx->u.lsp.ile = lsp->ile;
1469 ctx->u.lsp.addr_family = lsp->addr_family;
1470 ctx->u.lsp.num_ecmp = lsp->num_ecmp;
1471 ctx->u.lsp.flags = lsp->flags;
1472
1473 /* Copy source LSP's nhlfes, and capture 'best' nhlfe */
1474 for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
1475 /* Not sure if this is meaningful... */
1476 if (nhlfe->nexthop == NULL)
1477 continue;
1478
1479 new_nhlfe =
1480 zebra_mpls_lsp_add_nhlfe(
1481 &(ctx->u.lsp),
1482 nhlfe->type,
1483 nhlfe->nexthop->type,
1484 &(nhlfe->nexthop->gate),
1485 nhlfe->nexthop->ifindex,
1486 nhlfe->nexthop->nh_label->label[0]);
1487
1488 if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
1489 ret = ENOMEM;
1490 break;
1491 }
1492
1493 /* Need to copy flags too */
1494 new_nhlfe->flags = nhlfe->flags;
1495 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
1496
1497 if (nhlfe == lsp->best_nhlfe)
1498 ctx->u.lsp.best_nhlfe = new_nhlfe;
1499 }
1500
1501 /* On error the ctx will be cleaned-up, so we don't need to
1502 * deal with any allocated nhlfe or nexthop structs here.
1503 */
1504
1505 return ret;
1506 }
1507
1508 /*
1509 * Capture information for an LSP update in a dplane context.
1510 */
1511 static int dplane_ctx_pw_init(struct zebra_dplane_ctx *ctx,
1512 enum dplane_op_e op,
1513 struct zebra_pw *pw)
1514 {
1515 struct prefix p;
1516 afi_t afi;
1517 struct route_table *table;
1518 struct route_node *rn;
1519 struct route_entry *re;
1520
1521 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1522 zlog_debug("init dplane ctx %s: pw '%s', loc %u, rem %u",
1523 dplane_op2str(op), pw->ifname, pw->local_label,
1524 pw->remote_label);
1525
1526 ctx->zd_op = op;
1527 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
1528
1529 /* Capture namespace info: no netlink support as of 12/18,
1530 * but just in case...
1531 */
1532 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
1533
1534 memset(&ctx->u.pw, 0, sizeof(ctx->u.pw));
1535
1536 /* This name appears to be c-string, so we use string copy. */
1537 strlcpy(ctx->zd_ifname, pw->ifname, sizeof(ctx->zd_ifname));
1538
1539 ctx->zd_vrf_id = pw->vrf_id;
1540 ctx->zd_ifindex = pw->ifindex;
1541 ctx->u.pw.type = pw->type;
1542 ctx->u.pw.af = pw->af;
1543 ctx->u.pw.local_label = pw->local_label;
1544 ctx->u.pw.remote_label = pw->remote_label;
1545 ctx->u.pw.flags = pw->flags;
1546
1547 ctx->u.pw.dest = pw->nexthop;
1548
1549 ctx->u.pw.fields = pw->data;
1550
1551 /* Capture nexthop info for the pw destination. We need to look
1552 * up and use zebra datastructs, but we're running in the zebra
1553 * pthread here so that should be ok.
1554 */
1555 memcpy(&p.u, &pw->nexthop, sizeof(pw->nexthop));
1556 p.family = pw->af;
1557 p.prefixlen = ((pw->af == AF_INET) ?
1558 IPV4_MAX_PREFIXLEN : IPV6_MAX_PREFIXLEN);
1559
1560 afi = (pw->af == AF_INET) ? AFI_IP : AFI_IP6;
1561 table = zebra_vrf_table(afi, SAFI_UNICAST, pw->vrf_id);
1562 if (table) {
1563 rn = route_node_match(table, &p);
1564 if (rn) {
1565 RNODE_FOREACH_RE(rn, re) {
1566 if (CHECK_FLAG(re->flags, ZEBRA_FLAG_SELECTED))
1567 break;
1568 }
1569
1570 if (re)
1571 copy_nexthops(&(ctx->u.pw.nhg.nexthop),
1572 re->ng.nexthop, NULL);
1573
1574 route_unlock_node(rn);
1575 }
1576 }
1577
1578 return AOK;
1579 }
1580
1581 /*
1582 * Enqueue a new update,
1583 * and ensure an event is active for the dataplane pthread.
1584 */
1585 static int dplane_update_enqueue(struct zebra_dplane_ctx *ctx)
1586 {
1587 int ret = EINVAL;
1588 uint32_t high, curr;
1589
1590 /* Enqueue for processing by the dataplane pthread */
1591 DPLANE_LOCK();
1592 {
1593 TAILQ_INSERT_TAIL(&zdplane_info.dg_update_ctx_q, ctx,
1594 zd_q_entries);
1595 }
1596 DPLANE_UNLOCK();
1597
1598 curr = atomic_add_fetch_explicit(
1599 #ifdef __clang__
1600 /* TODO -- issue with the clang atomic/intrinsics currently;
1601 * casting away the 'Atomic'-ness of the variable works.
1602 */
1603 (uint32_t *)&(zdplane_info.dg_routes_queued),
1604 #else
1605 &(zdplane_info.dg_routes_queued),
1606 #endif
1607 1, memory_order_seq_cst);
1608
1609 /* Maybe update high-water counter also */
1610 high = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
1611 memory_order_seq_cst);
1612 while (high < curr) {
1613 if (atomic_compare_exchange_weak_explicit(
1614 &zdplane_info.dg_routes_queued_max,
1615 &high, curr,
1616 memory_order_seq_cst,
1617 memory_order_seq_cst))
1618 break;
1619 }
1620
1621 /* Ensure that an event for the dataplane thread is active */
1622 ret = dplane_provider_work_ready();
1623
1624 return ret;
1625 }
1626
1627 /*
1628 * Utility that prepares a route update and enqueues it for processing
1629 */
1630 static enum zebra_dplane_result
1631 dplane_route_update_internal(struct route_node *rn,
1632 struct route_entry *re,
1633 struct route_entry *old_re,
1634 enum dplane_op_e op)
1635 {
1636 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1637 int ret = EINVAL;
1638 struct zebra_dplane_ctx *ctx = NULL;
1639
1640 /* Obtain context block */
1641 ctx = dplane_ctx_alloc();
1642
1643 /* Init context with info from zebra data structs */
1644 ret = dplane_ctx_route_init(ctx, op, rn, re);
1645 if (ret == AOK) {
1646 /* Capture some extra info for update case
1647 * where there's a different 'old' route.
1648 */
1649 if ((op == DPLANE_OP_ROUTE_UPDATE) &&
1650 old_re && (old_re != re)) {
1651 ctx->zd_is_update = true;
1652
1653 old_re->dplane_sequence =
1654 zebra_router_get_next_sequence();
1655 ctx->zd_old_seq = old_re->dplane_sequence;
1656
1657 ctx->u.rinfo.zd_old_tag = old_re->tag;
1658 ctx->u.rinfo.zd_old_type = old_re->type;
1659 ctx->u.rinfo.zd_old_instance = old_re->instance;
1660 ctx->u.rinfo.zd_old_distance = old_re->distance;
1661 ctx->u.rinfo.zd_old_metric = old_re->metric;
1662
1663 #ifndef HAVE_NETLINK
1664 /* For bsd, capture previous re's nexthops too, sigh.
1665 * We'll need these to do per-nexthop deletes.
1666 */
1667 copy_nexthops(&(ctx->u.rinfo.zd_old_ng.nexthop),
1668 old_re->ng.nexthop, NULL);
1669 #endif /* !HAVE_NETLINK */
1670 }
1671
1672 /* Enqueue context for processing */
1673 ret = dplane_update_enqueue(ctx);
1674 }
1675
1676 /* Update counter */
1677 atomic_fetch_add_explicit(&zdplane_info.dg_routes_in, 1,
1678 memory_order_relaxed);
1679
1680 if (ret == AOK)
1681 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1682 else {
1683 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors, 1,
1684 memory_order_relaxed);
1685 if (ctx)
1686 dplane_ctx_free(&ctx);
1687 }
1688
1689 return result;
1690 }
1691
1692 /*
1693 * Enqueue a route 'add' for the dataplane.
1694 */
1695 enum zebra_dplane_result dplane_route_add(struct route_node *rn,
1696 struct route_entry *re)
1697 {
1698 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1699
1700 if (rn == NULL || re == NULL)
1701 goto done;
1702
1703 ret = dplane_route_update_internal(rn, re, NULL,
1704 DPLANE_OP_ROUTE_INSTALL);
1705
1706 done:
1707 return ret;
1708 }
1709
1710 /*
1711 * Enqueue a route update for the dataplane.
1712 */
1713 enum zebra_dplane_result dplane_route_update(struct route_node *rn,
1714 struct route_entry *re,
1715 struct route_entry *old_re)
1716 {
1717 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1718
1719 if (rn == NULL || re == NULL)
1720 goto done;
1721
1722 ret = dplane_route_update_internal(rn, re, old_re,
1723 DPLANE_OP_ROUTE_UPDATE);
1724 done:
1725 return ret;
1726 }
1727
1728 /*
1729 * Enqueue a route removal for the dataplane.
1730 */
1731 enum zebra_dplane_result dplane_route_delete(struct route_node *rn,
1732 struct route_entry *re)
1733 {
1734 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1735
1736 if (rn == NULL || re == NULL)
1737 goto done;
1738
1739 ret = dplane_route_update_internal(rn, re, NULL,
1740 DPLANE_OP_ROUTE_DELETE);
1741
1742 done:
1743 return ret;
1744 }
1745
1746 /*
1747 * Notify the dplane when system/connected routes change.
1748 */
1749 enum zebra_dplane_result dplane_sys_route_add(struct route_node *rn,
1750 struct route_entry *re)
1751 {
1752 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1753
1754 /* Ignore this event unless a provider plugin has requested it. */
1755 if (!zdplane_info.dg_sys_route_notifs) {
1756 ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
1757 goto done;
1758 }
1759
1760 if (rn == NULL || re == NULL)
1761 goto done;
1762
1763 ret = dplane_route_update_internal(rn, re, NULL,
1764 DPLANE_OP_SYS_ROUTE_ADD);
1765
1766 done:
1767 return ret;
1768 }
1769
1770 /*
1771 * Notify the dplane when system/connected routes are deleted.
1772 */
1773 enum zebra_dplane_result dplane_sys_route_del(struct route_node *rn,
1774 struct route_entry *re)
1775 {
1776 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1777
1778 /* Ignore this event unless a provider plugin has requested it. */
1779 if (!zdplane_info.dg_sys_route_notifs) {
1780 ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
1781 goto done;
1782 }
1783
1784 if (rn == NULL || re == NULL)
1785 goto done;
1786
1787 ret = dplane_route_update_internal(rn, re, NULL,
1788 DPLANE_OP_SYS_ROUTE_DELETE);
1789
1790 done:
1791 return ret;
1792 }
1793
1794 /*
1795 * Update from an async notification, to bring other fibs up-to-date.
1796 */
1797 enum zebra_dplane_result
1798 dplane_route_notif_update(struct route_node *rn,
1799 struct route_entry *re,
1800 enum dplane_op_e op,
1801 struct zebra_dplane_ctx *ctx)
1802 {
1803 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
1804 struct zebra_dplane_ctx *new_ctx = NULL;
1805 struct nexthop *nexthop;
1806
1807 if (rn == NULL || re == NULL)
1808 goto done;
1809
1810 new_ctx = dplane_ctx_alloc();
1811 if (new_ctx == NULL)
1812 goto done;
1813
1814 /* Init context with info from zebra data structs */
1815 dplane_ctx_route_init(new_ctx, op, rn, re);
1816
1817 /* For add/update, need to adjust the nexthops so that we match
1818 * the notification state, which may not be the route-entry/RIB
1819 * state.
1820 */
1821 if (op == DPLANE_OP_ROUTE_UPDATE ||
1822 op == DPLANE_OP_ROUTE_INSTALL) {
1823
1824 nexthops_free(new_ctx->u.rinfo.zd_ng.nexthop);
1825 new_ctx->u.rinfo.zd_ng.nexthop = NULL;
1826
1827 copy_nexthops(&(new_ctx->u.rinfo.zd_ng.nexthop),
1828 (rib_active_nhg(re))->nexthop, NULL);
1829
1830 for (ALL_NEXTHOPS(new_ctx->u.rinfo.zd_ng, nexthop))
1831 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
1832
1833 }
1834
1835 /* Capture info about the source of the notification, in 'ctx' */
1836 dplane_ctx_set_notif_provider(new_ctx,
1837 dplane_ctx_get_notif_provider(ctx));
1838
1839 dplane_update_enqueue(new_ctx);
1840
1841 ret = ZEBRA_DPLANE_REQUEST_QUEUED;
1842
1843 done:
1844 return ret;
1845 }
1846
1847 /*
1848 * Enqueue LSP add for the dataplane.
1849 */
1850 enum zebra_dplane_result dplane_lsp_add(zebra_lsp_t *lsp)
1851 {
1852 enum zebra_dplane_result ret =
1853 lsp_update_internal(lsp, DPLANE_OP_LSP_INSTALL);
1854
1855 return ret;
1856 }
1857
1858 /*
1859 * Enqueue LSP update for the dataplane.
1860 */
1861 enum zebra_dplane_result dplane_lsp_update(zebra_lsp_t *lsp)
1862 {
1863 enum zebra_dplane_result ret =
1864 lsp_update_internal(lsp, DPLANE_OP_LSP_UPDATE);
1865
1866 return ret;
1867 }
1868
1869 /*
1870 * Enqueue LSP delete for the dataplane.
1871 */
1872 enum zebra_dplane_result dplane_lsp_delete(zebra_lsp_t *lsp)
1873 {
1874 enum zebra_dplane_result ret =
1875 lsp_update_internal(lsp, DPLANE_OP_LSP_DELETE);
1876
1877 return ret;
1878 }
1879
1880 /* Update or un-install resulting from an async notification */
1881 enum zebra_dplane_result
1882 dplane_lsp_notif_update(zebra_lsp_t *lsp,
1883 enum dplane_op_e op,
1884 struct zebra_dplane_ctx *notif_ctx)
1885 {
1886 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1887 int ret = EINVAL;
1888 struct zebra_dplane_ctx *ctx = NULL;
1889
1890 /* Obtain context block */
1891 ctx = dplane_ctx_alloc();
1892 if (ctx == NULL) {
1893 ret = ENOMEM;
1894 goto done;
1895 }
1896
1897 ret = dplane_ctx_lsp_init(ctx, op, lsp);
1898 if (ret != AOK)
1899 goto done;
1900
1901 /* Capture info about the source of the notification */
1902 dplane_ctx_set_notif_provider(
1903 ctx,
1904 dplane_ctx_get_notif_provider(notif_ctx));
1905
1906 ret = dplane_update_enqueue(ctx);
1907
1908 done:
1909 /* Update counter */
1910 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
1911 memory_order_relaxed);
1912
1913 if (ret == AOK)
1914 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1915 else {
1916 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
1917 memory_order_relaxed);
1918 if (ctx)
1919 dplane_ctx_free(&ctx);
1920 }
1921 return result;
1922 }
1923
1924 /*
1925 * Enqueue pseudowire install for the dataplane.
1926 */
1927 enum zebra_dplane_result dplane_pw_install(struct zebra_pw *pw)
1928 {
1929 return pw_update_internal(pw, DPLANE_OP_PW_INSTALL);
1930 }
1931
1932 /*
1933 * Enqueue pseudowire un-install for the dataplane.
1934 */
1935 enum zebra_dplane_result dplane_pw_uninstall(struct zebra_pw *pw)
1936 {
1937 return pw_update_internal(pw, DPLANE_OP_PW_UNINSTALL);
1938 }
1939
1940 /*
1941 * Common internal LSP update utility
1942 */
1943 static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
1944 enum dplane_op_e op)
1945 {
1946 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1947 int ret = EINVAL;
1948 struct zebra_dplane_ctx *ctx = NULL;
1949
1950 /* Obtain context block */
1951 ctx = dplane_ctx_alloc();
1952
1953 ret = dplane_ctx_lsp_init(ctx, op, lsp);
1954 if (ret != AOK)
1955 goto done;
1956
1957 ret = dplane_update_enqueue(ctx);
1958
1959 done:
1960 /* Update counter */
1961 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
1962 memory_order_relaxed);
1963
1964 if (ret == AOK)
1965 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1966 else {
1967 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
1968 memory_order_relaxed);
1969 dplane_ctx_free(&ctx);
1970 }
1971
1972 return result;
1973 }
1974
1975 /*
1976 * Internal, common handler for pseudowire updates.
1977 */
1978 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
1979 enum dplane_op_e op)
1980 {
1981 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1982 int ret;
1983 struct zebra_dplane_ctx *ctx = NULL;
1984
1985 ctx = dplane_ctx_alloc();
1986
1987 ret = dplane_ctx_pw_init(ctx, op, pw);
1988 if (ret != AOK)
1989 goto done;
1990
1991 ret = dplane_update_enqueue(ctx);
1992
1993 done:
1994 /* Update counter */
1995 atomic_fetch_add_explicit(&zdplane_info.dg_pws_in, 1,
1996 memory_order_relaxed);
1997
1998 if (ret == AOK)
1999 result = ZEBRA_DPLANE_REQUEST_QUEUED;
2000 else {
2001 atomic_fetch_add_explicit(&zdplane_info.dg_pw_errors, 1,
2002 memory_order_relaxed);
2003 dplane_ctx_free(&ctx);
2004 }
2005
2006 return result;
2007 }
2008
2009 /*
2010 * Enqueue interface address add for the dataplane.
2011 */
2012 enum zebra_dplane_result dplane_intf_addr_set(const struct interface *ifp,
2013 const struct connected *ifc)
2014 {
2015 #if !defined(HAVE_NETLINK) && defined(HAVE_STRUCT_IFALIASREQ)
2016 /* Extra checks for this OS path. */
2017
2018 /* Don't configure PtP addresses on broadcast ifs or reverse */
2019 if (!(ifp->flags & IFF_POINTOPOINT) != !CONNECTED_PEER(ifc)) {
2020 if (IS_ZEBRA_DEBUG_KERNEL || IS_ZEBRA_DEBUG_DPLANE)
2021 zlog_debug("Failed to set intf addr: mismatch p2p and connected");
2022
2023 return ZEBRA_DPLANE_REQUEST_FAILURE;
2024 }
2025
2026 /* Ensure that no existing installed v4 route conflicts with
2027 * the new interface prefix. This check must be done in the
2028 * zebra pthread context, and any route delete (if needed)
2029 * is enqueued before the interface address programming attempt.
2030 */
2031 if (ifc->address->family == AF_INET) {
2032 struct prefix_ipv4 *p;
2033
2034 p = (struct prefix_ipv4 *)ifc->address;
2035 rib_lookup_and_pushup(p, ifp->vrf_id);
2036 }
2037 #endif
2038
2039 return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_INSTALL);
2040 }
2041
2042 /*
2043 * Enqueue interface address remove/uninstall for the dataplane.
2044 */
2045 enum zebra_dplane_result dplane_intf_addr_unset(const struct interface *ifp,
2046 const struct connected *ifc)
2047 {
2048 return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_UNINSTALL);
2049 }
2050
2051 static enum zebra_dplane_result intf_addr_update_internal(
2052 const struct interface *ifp, const struct connected *ifc,
2053 enum dplane_op_e op)
2054 {
2055 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
2056 int ret = EINVAL;
2057 struct zebra_dplane_ctx *ctx = NULL;
2058 struct zebra_ns *zns;
2059
2060 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2061 char addr_str[PREFIX_STRLEN];
2062
2063 prefix2str(ifc->address, addr_str, sizeof(addr_str));
2064
2065 zlog_debug("init intf ctx %s: idx %d, addr %u:%s",
2066 dplane_op2str(op), ifp->ifindex, ifp->vrf_id,
2067 addr_str);
2068 }
2069
2070 ctx = dplane_ctx_alloc();
2071
2072 ctx->zd_op = op;
2073 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2074 ctx->zd_vrf_id = ifp->vrf_id;
2075
2076 zns = zebra_ns_lookup(ifp->vrf_id);
2077 dplane_ctx_ns_init(ctx, zns, false);
2078
2079 /* Init the interface-addr-specific area */
2080 memset(&ctx->u.intf, 0, sizeof(ctx->u.intf));
2081
2082 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
2083 ctx->zd_ifindex = ifp->ifindex;
2084 ctx->u.intf.prefix = *(ifc->address);
2085
2086 if (if_is_broadcast(ifp))
2087 ctx->u.intf.flags |= DPLANE_INTF_BROADCAST;
2088
2089 if (CONNECTED_PEER(ifc)) {
2090 ctx->u.intf.dest_prefix = *(ifc->destination);
2091 ctx->u.intf.flags |=
2092 (DPLANE_INTF_CONNECTED | DPLANE_INTF_HAS_DEST);
2093 }
2094
2095 if (CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY))
2096 ctx->u.intf.flags |= DPLANE_INTF_SECONDARY;
2097
2098 if (ifc->label) {
2099 size_t len;
2100
2101 ctx->u.intf.flags |= DPLANE_INTF_HAS_LABEL;
2102
2103 /* Use embedded buffer if it's adequate; else allocate. */
2104 len = strlen(ifc->label);
2105
2106 if (len < sizeof(ctx->u.intf.label_buf)) {
2107 strlcpy(ctx->u.intf.label_buf, ifc->label,
2108 sizeof(ctx->u.intf.label_buf));
2109 ctx->u.intf.label = ctx->u.intf.label_buf;
2110 } else {
2111 ctx->u.intf.label = strdup(ifc->label);
2112 }
2113 }
2114
2115 ret = dplane_update_enqueue(ctx);
2116
2117 /* Increment counter */
2118 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addrs_in, 1,
2119 memory_order_relaxed);
2120
2121 if (ret == AOK)
2122 result = ZEBRA_DPLANE_REQUEST_QUEUED;
2123 else {
2124 /* Error counter */
2125 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addr_errors,
2126 1, memory_order_relaxed);
2127 dplane_ctx_free(&ctx);
2128 }
2129
2130 return result;
2131 }
2132
2133 /*
2134 * Enqueue vxlan/evpn mac add (or update).
2135 */
2136 enum zebra_dplane_result dplane_mac_add(const struct interface *ifp,
2137 vlanid_t vid,
2138 const struct ethaddr *mac,
2139 struct in_addr vtep_ip,
2140 bool sticky)
2141 {
2142 enum zebra_dplane_result result;
2143
2144 /* Use common helper api */
2145 result = mac_update_internal(DPLANE_OP_MAC_INSTALL, ifp, vid,
2146 mac, vtep_ip, sticky);
2147 return result;
2148 }
2149
2150 /*
2151 * Enqueue vxlan/evpn mac delete.
2152 */
2153 enum zebra_dplane_result dplane_mac_del(const struct interface *ifp,
2154 vlanid_t vid,
2155 const struct ethaddr *mac,
2156 struct in_addr vtep_ip)
2157 {
2158 enum zebra_dplane_result result;
2159
2160 /* Use common helper api */
2161 result = mac_update_internal(DPLANE_OP_MAC_DELETE, ifp, vid, mac,
2162 vtep_ip, false);
2163 return result;
2164 }
2165
2166 /*
2167 * Common helper api for MAC address/vxlan updates
2168 */
2169 static enum zebra_dplane_result
2170 mac_update_internal(enum dplane_op_e op,
2171 const struct interface *ifp,
2172 vlanid_t vid,
2173 const struct ethaddr *mac,
2174 struct in_addr vtep_ip,
2175 bool sticky)
2176 {
2177 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
2178 int ret;
2179 struct zebra_dplane_ctx *ctx = NULL;
2180 struct zebra_ns *zns;
2181
2182 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2183 char buf1[ETHER_ADDR_STRLEN], buf2[PREFIX_STRLEN];
2184
2185 zlog_debug("init mac ctx %s: mac %s, ifp %s, vtep %s",
2186 dplane_op2str(op),
2187 prefix_mac2str(mac, buf1, sizeof(buf1)),
2188 ifp->name,
2189 inet_ntop(AF_INET, &vtep_ip, buf2, sizeof(buf2)));
2190 }
2191
2192 ctx = dplane_ctx_alloc();
2193
2194 ctx->zd_op = op;
2195 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2196 ctx->zd_vrf_id = ifp->vrf_id;
2197
2198 zns = zebra_ns_lookup(ifp->vrf_id);
2199 dplane_ctx_ns_init(ctx, zns, false);
2200
2201 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
2202 ctx->zd_ifindex = ifp->ifindex;
2203
2204 /* Init the mac-specific data area */
2205 memset(&ctx->u.macinfo, 0, sizeof(ctx->u.macinfo));
2206
2207 ctx->u.macinfo.vtep_ip = vtep_ip;
2208 ctx->u.macinfo.mac = *mac;
2209 ctx->u.macinfo.vid = vid;
2210 ctx->u.macinfo.is_sticky = sticky;
2211
2212 /* Enqueue for processing on the dplane pthread */
2213 ret = dplane_update_enqueue(ctx);
2214
2215 /* Increment counter */
2216 atomic_fetch_add_explicit(&zdplane_info.dg_macs_in, 1,
2217 memory_order_relaxed);
2218
2219 if (ret == AOK)
2220 result = ZEBRA_DPLANE_REQUEST_QUEUED;
2221 else {
2222 /* Error counter */
2223 atomic_fetch_add_explicit(&zdplane_info.dg_mac_errors, 1,
2224 memory_order_relaxed);
2225 dplane_ctx_free(&ctx);
2226 }
2227
2228 return result;
2229 }
2230
2231 /*
2232 * Enqueue evpn neighbor add for the dataplane.
2233 */
2234 enum zebra_dplane_result dplane_neigh_add(const struct interface *ifp,
2235 const struct ipaddr *ip,
2236 const struct ethaddr *mac,
2237 uint32_t flags)
2238 {
2239 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
2240
2241 result = neigh_update_internal(DPLANE_OP_NEIGH_INSTALL,
2242 ifp, mac, ip, flags, DPLANE_NUD_NOARP);
2243
2244 return result;
2245 }
2246
2247 /*
2248 * Enqueue evpn neighbor update for the dataplane.
2249 */
2250 enum zebra_dplane_result dplane_neigh_update(const struct interface *ifp,
2251 const struct ipaddr *ip,
2252 const struct ethaddr *mac)
2253 {
2254 enum zebra_dplane_result result;
2255
2256 result = neigh_update_internal(DPLANE_OP_NEIGH_UPDATE,
2257 ifp, mac, ip, 0, DPLANE_NUD_PROBE);
2258
2259 return result;
2260 }
2261
2262 /*
2263 * Enqueue evpn neighbor delete for the dataplane.
2264 */
2265 enum zebra_dplane_result dplane_neigh_delete(const struct interface *ifp,
2266 const struct ipaddr *ip)
2267 {
2268 enum zebra_dplane_result result;
2269
2270 result = neigh_update_internal(DPLANE_OP_NEIGH_DELETE,
2271 ifp, NULL, ip, 0, 0);
2272
2273 return result;
2274 }
2275
2276 /*
2277 * Enqueue evpn VTEP add for the dataplane.
2278 */
2279 enum zebra_dplane_result dplane_vtep_add(const struct interface *ifp,
2280 const struct in_addr *ip,
2281 vni_t vni)
2282 {
2283 enum zebra_dplane_result result;
2284 struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
2285 struct ipaddr addr;
2286
2287 if (IS_ZEBRA_DEBUG_VXLAN)
2288 zlog_debug("Install %s into flood list for VNI %u intf %s(%u)",
2289 inet_ntoa(*ip), vni, ifp->name, ifp->ifindex);
2290
2291 SET_IPADDR_V4(&addr);
2292 addr.ipaddr_v4 = *ip;
2293
2294 result = neigh_update_internal(DPLANE_OP_VTEP_ADD,
2295 ifp, &mac, &addr, 0, 0);
2296
2297 return result;
2298 }
2299
2300 /*
2301 * Enqueue evpn VTEP add for the dataplane.
2302 */
2303 enum zebra_dplane_result dplane_vtep_delete(const struct interface *ifp,
2304 const struct in_addr *ip,
2305 vni_t vni)
2306 {
2307 enum zebra_dplane_result result;
2308 struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
2309 struct ipaddr addr;
2310
2311 if (IS_ZEBRA_DEBUG_VXLAN)
2312 zlog_debug(
2313 "Uninstall %s from flood list for VNI %u intf %s(%u)",
2314 inet_ntoa(*ip), vni, ifp->name, ifp->ifindex);
2315
2316 SET_IPADDR_V4(&addr);
2317 addr.ipaddr_v4 = *ip;
2318
2319 result = neigh_update_internal(DPLANE_OP_VTEP_DELETE,
2320 ifp, &mac, &addr, 0, 0);
2321
2322 return result;
2323 }
2324
2325 /*
2326 * Common helper api for evpn neighbor updates
2327 */
2328 static enum zebra_dplane_result
2329 neigh_update_internal(enum dplane_op_e op,
2330 const struct interface *ifp,
2331 const struct ethaddr *mac,
2332 const struct ipaddr *ip,
2333 uint32_t flags, uint16_t state)
2334 {
2335 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
2336 int ret;
2337 struct zebra_dplane_ctx *ctx = NULL;
2338 struct zebra_ns *zns;
2339
2340 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2341 char buf1[ETHER_ADDR_STRLEN], buf2[PREFIX_STRLEN];
2342
2343 zlog_debug("init neigh ctx %s: ifp %s, mac %s, ip %s",
2344 dplane_op2str(op),
2345 prefix_mac2str(mac, buf1, sizeof(buf1)),
2346 ifp->name,
2347 ipaddr2str(ip, buf2, sizeof(buf2)));
2348 }
2349
2350 ctx = dplane_ctx_alloc();
2351
2352 ctx->zd_op = op;
2353 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2354 ctx->zd_vrf_id = ifp->vrf_id;
2355
2356 zns = zebra_ns_lookup(ifp->vrf_id);
2357 dplane_ctx_ns_init(ctx, zns, false);
2358
2359 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
2360 ctx->zd_ifindex = ifp->ifindex;
2361
2362 /* Init the neighbor-specific data area */
2363 memset(&ctx->u.neigh, 0, sizeof(ctx->u.neigh));
2364
2365 ctx->u.neigh.ip_addr = *ip;
2366 if (mac)
2367 ctx->u.neigh.mac = *mac;
2368 ctx->u.neigh.flags = flags;
2369 ctx->u.neigh.state = state;
2370
2371 /* Enqueue for processing on the dplane pthread */
2372 ret = dplane_update_enqueue(ctx);
2373
2374 /* Increment counter */
2375 atomic_fetch_add_explicit(&zdplane_info.dg_neighs_in, 1,
2376 memory_order_relaxed);
2377
2378 if (ret == AOK)
2379 result = ZEBRA_DPLANE_REQUEST_QUEUED;
2380 else {
2381 /* Error counter */
2382 atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors, 1,
2383 memory_order_relaxed);
2384 dplane_ctx_free(&ctx);
2385 }
2386
2387 return result;
2388 }
2389
2390 /*
2391 * Handler for 'show dplane'
2392 */
2393 int dplane_show_helper(struct vty *vty, bool detailed)
2394 {
2395 uint64_t queued, queue_max, limit, errs, incoming, yields,
2396 other_errs;
2397
2398 /* Using atomics because counters are being changed in different
2399 * pthread contexts.
2400 */
2401 incoming = atomic_load_explicit(&zdplane_info.dg_routes_in,
2402 memory_order_relaxed);
2403 limit = atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
2404 memory_order_relaxed);
2405 queued = atomic_load_explicit(&zdplane_info.dg_routes_queued,
2406 memory_order_relaxed);
2407 queue_max = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
2408 memory_order_relaxed);
2409 errs = atomic_load_explicit(&zdplane_info.dg_route_errors,
2410 memory_order_relaxed);
2411 yields = atomic_load_explicit(&zdplane_info.dg_update_yields,
2412 memory_order_relaxed);
2413 other_errs = atomic_load_explicit(&zdplane_info.dg_other_errors,
2414 memory_order_relaxed);
2415
2416 vty_out(vty, "Zebra dataplane:\nRoute updates: %"PRIu64"\n",
2417 incoming);
2418 vty_out(vty, "Route update errors: %"PRIu64"\n", errs);
2419 vty_out(vty, "Other errors : %"PRIu64"\n", other_errs);
2420 vty_out(vty, "Route update queue limit: %"PRIu64"\n", limit);
2421 vty_out(vty, "Route update queue depth: %"PRIu64"\n", queued);
2422 vty_out(vty, "Route update queue max: %"PRIu64"\n", queue_max);
2423 vty_out(vty, "Dplane update yields: %"PRIu64"\n", yields);
2424
2425 incoming = atomic_load_explicit(&zdplane_info.dg_lsps_in,
2426 memory_order_relaxed);
2427 errs = atomic_load_explicit(&zdplane_info.dg_lsp_errors,
2428 memory_order_relaxed);
2429 vty_out(vty, "LSP updates: %"PRIu64"\n", incoming);
2430 vty_out(vty, "LSP update errors: %"PRIu64"\n", errs);
2431
2432 incoming = atomic_load_explicit(&zdplane_info.dg_pws_in,
2433 memory_order_relaxed);
2434 errs = atomic_load_explicit(&zdplane_info.dg_pw_errors,
2435 memory_order_relaxed);
2436 vty_out(vty, "PW updates: %"PRIu64"\n", incoming);
2437 vty_out(vty, "PW update errors: %"PRIu64"\n", errs);
2438
2439 incoming = atomic_load_explicit(&zdplane_info.dg_intf_addrs_in,
2440 memory_order_relaxed);
2441 errs = atomic_load_explicit(&zdplane_info.dg_intf_addr_errors,
2442 memory_order_relaxed);
2443 vty_out(vty, "Intf addr updates: %"PRIu64"\n", incoming);
2444 vty_out(vty, "Intf addr errors: %"PRIu64"\n", errs);
2445
2446 incoming = atomic_load_explicit(&zdplane_info.dg_macs_in,
2447 memory_order_relaxed);
2448 errs = atomic_load_explicit(&zdplane_info.dg_mac_errors,
2449 memory_order_relaxed);
2450 vty_out(vty, "EVPN MAC updates: %"PRIu64"\n", incoming);
2451 vty_out(vty, "EVPN MAC errors: %"PRIu64"\n", errs);
2452
2453 incoming = atomic_load_explicit(&zdplane_info.dg_neighs_in,
2454 memory_order_relaxed);
2455 errs = atomic_load_explicit(&zdplane_info.dg_neigh_errors,
2456 memory_order_relaxed);
2457 vty_out(vty, "EVPN neigh updates: %"PRIu64"\n", incoming);
2458 vty_out(vty, "EVPN neigh errors: %"PRIu64"\n", errs);
2459
2460 return CMD_SUCCESS;
2461 }
2462
2463 /*
2464 * Handler for 'show dplane providers'
2465 */
2466 int dplane_show_provs_helper(struct vty *vty, bool detailed)
2467 {
2468 struct zebra_dplane_provider *prov;
2469 uint64_t in, in_max, out, out_max;
2470
2471 vty_out(vty, "Zebra dataplane providers:\n");
2472
2473 DPLANE_LOCK();
2474 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
2475 DPLANE_UNLOCK();
2476
2477 /* Show counters, useful info from each registered provider */
2478 while (prov) {
2479
2480 in = atomic_load_explicit(&prov->dp_in_counter,
2481 memory_order_relaxed);
2482 in_max = atomic_load_explicit(&prov->dp_in_max,
2483 memory_order_relaxed);
2484 out = atomic_load_explicit(&prov->dp_out_counter,
2485 memory_order_relaxed);
2486 out_max = atomic_load_explicit(&prov->dp_out_max,
2487 memory_order_relaxed);
2488
2489 vty_out(vty, "%s (%u): in: %"PRIu64", q_max: %"PRIu64", "
2490 "out: %"PRIu64", q_max: %"PRIu64"\n",
2491 prov->dp_name, prov->dp_id, in, in_max, out, out_max);
2492
2493 DPLANE_LOCK();
2494 prov = TAILQ_NEXT(prov, dp_prov_link);
2495 DPLANE_UNLOCK();
2496 }
2497
2498 return CMD_SUCCESS;
2499 }
2500
2501 /*
2502 * Helper for 'show run' etc.
2503 */
2504 int dplane_config_write_helper(struct vty *vty)
2505 {
2506 if (zdplane_info.dg_max_queued_updates != DPLANE_DEFAULT_MAX_QUEUED)
2507 vty_out(vty, "zebra dplane limit %u\n",
2508 zdplane_info.dg_max_queued_updates);
2509
2510 return 0;
2511 }
2512
2513 /*
2514 * Provider registration
2515 */
2516 int dplane_provider_register(const char *name,
2517 enum dplane_provider_prio prio,
2518 int flags,
2519 int (*start_fp)(struct zebra_dplane_provider *),
2520 int (*fp)(struct zebra_dplane_provider *),
2521 int (*fini_fp)(struct zebra_dplane_provider *,
2522 bool early),
2523 void *data,
2524 struct zebra_dplane_provider **prov_p)
2525 {
2526 int ret = 0;
2527 struct zebra_dplane_provider *p = NULL, *last;
2528
2529 /* Validate */
2530 if (fp == NULL) {
2531 ret = EINVAL;
2532 goto done;
2533 }
2534
2535 if (prio <= DPLANE_PRIO_NONE ||
2536 prio > DPLANE_PRIO_LAST) {
2537 ret = EINVAL;
2538 goto done;
2539 }
2540
2541 /* Allocate and init new provider struct */
2542 p = XCALLOC(MTYPE_DP_PROV, sizeof(struct zebra_dplane_provider));
2543
2544 pthread_mutex_init(&(p->dp_mutex), NULL);
2545 TAILQ_INIT(&(p->dp_ctx_in_q));
2546 TAILQ_INIT(&(p->dp_ctx_out_q));
2547
2548 p->dp_flags = flags;
2549 p->dp_priority = prio;
2550 p->dp_fp = fp;
2551 p->dp_start = start_fp;
2552 p->dp_fini = fini_fp;
2553 p->dp_data = data;
2554
2555 /* Lock - the dplane pthread may be running */
2556 DPLANE_LOCK();
2557
2558 p->dp_id = ++zdplane_info.dg_provider_id;
2559
2560 if (name)
2561 strlcpy(p->dp_name, name, DPLANE_PROVIDER_NAMELEN);
2562 else
2563 snprintf(p->dp_name, DPLANE_PROVIDER_NAMELEN,
2564 "provider-%u", p->dp_id);
2565
2566 /* Insert into list ordered by priority */
2567 TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_prov_link) {
2568 if (last->dp_priority > p->dp_priority)
2569 break;
2570 }
2571
2572 if (last)
2573 TAILQ_INSERT_BEFORE(last, p, dp_prov_link);
2574 else
2575 TAILQ_INSERT_TAIL(&zdplane_info.dg_providers_q, p,
2576 dp_prov_link);
2577
2578 /* And unlock */
2579 DPLANE_UNLOCK();
2580
2581 if (IS_ZEBRA_DEBUG_DPLANE)
2582 zlog_debug("dplane: registered new provider '%s' (%u), prio %d",
2583 p->dp_name, p->dp_id, p->dp_priority);
2584
2585 done:
2586 if (prov_p)
2587 *prov_p = p;
2588
2589 return ret;
2590 }
2591
2592 /* Accessors for provider attributes */
2593 const char *dplane_provider_get_name(const struct zebra_dplane_provider *prov)
2594 {
2595 return prov->dp_name;
2596 }
2597
2598 uint32_t dplane_provider_get_id(const struct zebra_dplane_provider *prov)
2599 {
2600 return prov->dp_id;
2601 }
2602
2603 void *dplane_provider_get_data(const struct zebra_dplane_provider *prov)
2604 {
2605 return prov->dp_data;
2606 }
2607
2608 int dplane_provider_get_work_limit(const struct zebra_dplane_provider *prov)
2609 {
2610 return zdplane_info.dg_updates_per_cycle;
2611 }
2612
2613 /* Lock/unlock a provider's mutex - iff the provider was registered with
2614 * the THREADED flag.
2615 */
2616 void dplane_provider_lock(struct zebra_dplane_provider *prov)
2617 {
2618 if (dplane_provider_is_threaded(prov))
2619 DPLANE_PROV_LOCK(prov);
2620 }
2621
2622 void dplane_provider_unlock(struct zebra_dplane_provider *prov)
2623 {
2624 if (dplane_provider_is_threaded(prov))
2625 DPLANE_PROV_UNLOCK(prov);
2626 }
2627
2628 /*
2629 * Dequeue and maintain associated counter
2630 */
2631 struct zebra_dplane_ctx *dplane_provider_dequeue_in_ctx(
2632 struct zebra_dplane_provider *prov)
2633 {
2634 struct zebra_dplane_ctx *ctx = NULL;
2635
2636 dplane_provider_lock(prov);
2637
2638 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
2639 if (ctx) {
2640 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
2641
2642 atomic_fetch_sub_explicit(&prov->dp_in_queued, 1,
2643 memory_order_relaxed);
2644 }
2645
2646 dplane_provider_unlock(prov);
2647
2648 return ctx;
2649 }
2650
2651 /*
2652 * Dequeue work to a list, return count
2653 */
2654 int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov,
2655 struct dplane_ctx_q *listp)
2656 {
2657 int limit, ret;
2658 struct zebra_dplane_ctx *ctx;
2659
2660 limit = zdplane_info.dg_updates_per_cycle;
2661
2662 dplane_provider_lock(prov);
2663
2664 for (ret = 0; ret < limit; ret++) {
2665 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
2666 if (ctx) {
2667 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
2668
2669 TAILQ_INSERT_TAIL(listp, ctx, zd_q_entries);
2670 } else {
2671 break;
2672 }
2673 }
2674
2675 if (ret > 0)
2676 atomic_fetch_sub_explicit(&prov->dp_in_queued, ret,
2677 memory_order_relaxed);
2678
2679 dplane_provider_unlock(prov);
2680
2681 return ret;
2682 }
2683
2684 /*
2685 * Enqueue and maintain associated counter
2686 */
2687 void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
2688 struct zebra_dplane_ctx *ctx)
2689 {
2690 dplane_provider_lock(prov);
2691
2692 TAILQ_INSERT_TAIL(&(prov->dp_ctx_out_q), ctx,
2693 zd_q_entries);
2694
2695 dplane_provider_unlock(prov);
2696
2697 atomic_fetch_add_explicit(&(prov->dp_out_counter), 1,
2698 memory_order_relaxed);
2699 }
2700
2701 /*
2702 * Accessor for provider object
2703 */
2704 bool dplane_provider_is_threaded(const struct zebra_dplane_provider *prov)
2705 {
2706 return (prov->dp_flags & DPLANE_PROV_FLAG_THREADED);
2707 }
2708
2709 /*
2710 * Internal helper that copies information from a zebra ns object; this is
2711 * called in the zebra main pthread context as part of dplane ctx init.
2712 */
2713 static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
2714 struct zebra_ns *zns)
2715 {
2716 ns_info->ns_id = zns->ns_id;
2717
2718 #if defined(HAVE_NETLINK)
2719 ns_info->is_cmd = true;
2720 ns_info->nls = zns->netlink_dplane;
2721 #endif /* NETLINK */
2722 }
2723
2724 /*
2725 * Provider api to signal that work/events are available
2726 * for the dataplane pthread.
2727 */
2728 int dplane_provider_work_ready(void)
2729 {
2730 /* Note that during zebra startup, we may be offered work before
2731 * the dataplane pthread (and thread-master) are ready. We want to
2732 * enqueue the work, but the event-scheduling machinery may not be
2733 * available.
2734 */
2735 if (zdplane_info.dg_run) {
2736 thread_add_event(zdplane_info.dg_master,
2737 dplane_thread_loop, NULL, 0,
2738 &zdplane_info.dg_t_update);
2739 }
2740
2741 return AOK;
2742 }
2743
2744 /*
2745 * Enqueue a context directly to zebra main.
2746 */
2747 void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx)
2748 {
2749 struct dplane_ctx_q temp_list;
2750
2751 /* Zebra's api takes a list, so we need to use a temporary list */
2752 TAILQ_INIT(&temp_list);
2753
2754 TAILQ_INSERT_TAIL(&temp_list, ctx, zd_q_entries);
2755 (zdplane_info.dg_results_cb)(&temp_list);
2756 }
2757
2758 /*
2759 * Kernel dataplane provider
2760 */
2761
2762 /*
2763 * Handler for kernel LSP updates
2764 */
2765 static enum zebra_dplane_result
2766 kernel_dplane_lsp_update(struct zebra_dplane_ctx *ctx)
2767 {
2768 enum zebra_dplane_result res;
2769
2770 /* Call into the synchronous kernel-facing code here */
2771 res = kernel_lsp_update(ctx);
2772
2773 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
2774 atomic_fetch_add_explicit(
2775 &zdplane_info.dg_lsp_errors, 1,
2776 memory_order_relaxed);
2777
2778 return res;
2779 }
2780
2781 /*
2782 * Handler for kernel pseudowire updates
2783 */
2784 static enum zebra_dplane_result
2785 kernel_dplane_pw_update(struct zebra_dplane_ctx *ctx)
2786 {
2787 enum zebra_dplane_result res;
2788
2789 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2790 zlog_debug("Dplane pw %s: op %s af %d loc: %u rem: %u",
2791 dplane_ctx_get_ifname(ctx),
2792 dplane_op2str(ctx->zd_op),
2793 dplane_ctx_get_pw_af(ctx),
2794 dplane_ctx_get_pw_local_label(ctx),
2795 dplane_ctx_get_pw_remote_label(ctx));
2796
2797 res = kernel_pw_update(ctx);
2798
2799 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
2800 atomic_fetch_add_explicit(
2801 &zdplane_info.dg_pw_errors, 1,
2802 memory_order_relaxed);
2803
2804 return res;
2805 }
2806
2807 /*
2808 * Handler for kernel route updates
2809 */
2810 static enum zebra_dplane_result
2811 kernel_dplane_route_update(struct zebra_dplane_ctx *ctx)
2812 {
2813 enum zebra_dplane_result res;
2814
2815 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2816 char dest_str[PREFIX_STRLEN];
2817
2818 prefix2str(dplane_ctx_get_dest(ctx),
2819 dest_str, sizeof(dest_str));
2820
2821 zlog_debug("%u:%s Dplane route update ctx %p op %s",
2822 dplane_ctx_get_vrf(ctx), dest_str,
2823 ctx, dplane_op2str(dplane_ctx_get_op(ctx)));
2824 }
2825
2826 /* Call into the synchronous kernel-facing code here */
2827 res = kernel_route_update(ctx);
2828
2829 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
2830 atomic_fetch_add_explicit(
2831 &zdplane_info.dg_route_errors, 1,
2832 memory_order_relaxed);
2833
2834 return res;
2835 }
2836
2837 /*
2838 * Handler for kernel-facing interface address updates
2839 */
2840 static enum zebra_dplane_result
2841 kernel_dplane_address_update(struct zebra_dplane_ctx *ctx)
2842 {
2843 enum zebra_dplane_result res;
2844
2845 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2846 char dest_str[PREFIX_STRLEN];
2847
2848 prefix2str(dplane_ctx_get_intf_addr(ctx), dest_str,
2849 sizeof(dest_str));
2850
2851 zlog_debug("Dplane intf %s, idx %u, addr %s",
2852 dplane_op2str(dplane_ctx_get_op(ctx)),
2853 dplane_ctx_get_ifindex(ctx), dest_str);
2854 }
2855
2856 res = kernel_address_update_ctx(ctx);
2857
2858 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
2859 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addr_errors,
2860 1, memory_order_relaxed);
2861
2862 return res;
2863 }
2864
2865 /*
2866 * Handler for kernel-facing EVPN MAC address updates
2867 */
2868 static enum zebra_dplane_result
2869 kernel_dplane_mac_update(struct zebra_dplane_ctx *ctx)
2870 {
2871 enum zebra_dplane_result res;
2872
2873 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2874 char buf[ETHER_ADDR_STRLEN];
2875
2876 prefix_mac2str(dplane_ctx_mac_get_addr(ctx), buf,
2877 sizeof(buf));
2878
2879 zlog_debug("Dplane %s, mac %s, ifindex %u",
2880 dplane_op2str(dplane_ctx_get_op(ctx)),
2881 buf, dplane_ctx_get_ifindex(ctx));
2882 }
2883
2884 res = kernel_mac_update_ctx(ctx);
2885
2886 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
2887 atomic_fetch_add_explicit(&zdplane_info.dg_mac_errors,
2888 1, memory_order_relaxed);
2889
2890 return res;
2891 }
2892
2893 /*
2894 * Handler for kernel-facing EVPN neighbor updates
2895 */
2896 static enum zebra_dplane_result
2897 kernel_dplane_neigh_update(struct zebra_dplane_ctx *ctx)
2898 {
2899 enum zebra_dplane_result res;
2900
2901 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2902 char buf[PREFIX_STRLEN];
2903
2904 ipaddr2str(dplane_ctx_neigh_get_ipaddr(ctx), buf,
2905 sizeof(buf));
2906
2907 zlog_debug("Dplane %s, ip %s, ifindex %u",
2908 dplane_op2str(dplane_ctx_get_op(ctx)),
2909 buf, dplane_ctx_get_ifindex(ctx));
2910 }
2911
2912 res = kernel_neigh_update_ctx(ctx);
2913
2914 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
2915 atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors,
2916 1, memory_order_relaxed);
2917
2918 return res;
2919 }
2920
2921 /*
2922 * Kernel provider callback
2923 */
2924 static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
2925 {
2926 enum zebra_dplane_result res;
2927 struct zebra_dplane_ctx *ctx;
2928 int counter, limit;
2929
2930 limit = dplane_provider_get_work_limit(prov);
2931
2932 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2933 zlog_debug("dplane provider '%s': processing",
2934 dplane_provider_get_name(prov));
2935
2936 for (counter = 0; counter < limit; counter++) {
2937
2938 ctx = dplane_provider_dequeue_in_ctx(prov);
2939 if (ctx == NULL)
2940 break;
2941
2942 /* A previous provider plugin may have asked to skip the
2943 * kernel update.
2944 */
2945 if (dplane_ctx_is_skip_kernel(ctx)) {
2946 res = ZEBRA_DPLANE_REQUEST_SUCCESS;
2947 goto skip_one;
2948 }
2949
2950 /* Dispatch to appropriate kernel-facing apis */
2951 switch (dplane_ctx_get_op(ctx)) {
2952
2953 case DPLANE_OP_ROUTE_INSTALL:
2954 case DPLANE_OP_ROUTE_UPDATE:
2955 case DPLANE_OP_ROUTE_DELETE:
2956 res = kernel_dplane_route_update(ctx);
2957 break;
2958
2959 case DPLANE_OP_LSP_INSTALL:
2960 case DPLANE_OP_LSP_UPDATE:
2961 case DPLANE_OP_LSP_DELETE:
2962 res = kernel_dplane_lsp_update(ctx);
2963 break;
2964
2965 case DPLANE_OP_PW_INSTALL:
2966 case DPLANE_OP_PW_UNINSTALL:
2967 res = kernel_dplane_pw_update(ctx);
2968 break;
2969
2970 case DPLANE_OP_ADDR_INSTALL:
2971 case DPLANE_OP_ADDR_UNINSTALL:
2972 res = kernel_dplane_address_update(ctx);
2973 break;
2974
2975 case DPLANE_OP_MAC_INSTALL:
2976 case DPLANE_OP_MAC_DELETE:
2977 res = kernel_dplane_mac_update(ctx);
2978 break;
2979
2980 case DPLANE_OP_NEIGH_INSTALL:
2981 case DPLANE_OP_NEIGH_UPDATE:
2982 case DPLANE_OP_NEIGH_DELETE:
2983 case DPLANE_OP_VTEP_ADD:
2984 case DPLANE_OP_VTEP_DELETE:
2985 res = kernel_dplane_neigh_update(ctx);
2986 break;
2987
2988 /* Ignore 'notifications' - no-op */
2989 case DPLANE_OP_SYS_ROUTE_ADD:
2990 case DPLANE_OP_SYS_ROUTE_DELETE:
2991 case DPLANE_OP_ROUTE_NOTIFY:
2992 case DPLANE_OP_LSP_NOTIFY:
2993 res = ZEBRA_DPLANE_REQUEST_SUCCESS;
2994 break;
2995
2996 default:
2997 atomic_fetch_add_explicit(
2998 &zdplane_info.dg_other_errors, 1,
2999 memory_order_relaxed);
3000
3001 res = ZEBRA_DPLANE_REQUEST_FAILURE;
3002 break;
3003 }
3004
3005 skip_one:
3006 dplane_ctx_set_status(ctx, res);
3007
3008 dplane_provider_enqueue_out_ctx(prov, ctx);
3009 }
3010
3011 /* Ensure that we'll run the work loop again if there's still
3012 * more work to do.
3013 */
3014 if (counter >= limit) {
3015 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3016 zlog_debug("dplane provider '%s' reached max updates %d",
3017 dplane_provider_get_name(prov), counter);
3018
3019 atomic_fetch_add_explicit(&zdplane_info.dg_update_yields,
3020 1, memory_order_relaxed);
3021
3022 dplane_provider_work_ready();
3023 }
3024
3025 return 0;
3026 }
3027
3028 #if DPLANE_TEST_PROVIDER
3029
3030 /*
3031 * Test dataplane provider plugin
3032 */
3033
3034 /*
3035 * Test provider process callback
3036 */
3037 static int test_dplane_process_func(struct zebra_dplane_provider *prov)
3038 {
3039 struct zebra_dplane_ctx *ctx;
3040 int counter, limit;
3041
3042 /* Just moving from 'in' queue to 'out' queue */
3043
3044 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3045 zlog_debug("dplane provider '%s': processing",
3046 dplane_provider_get_name(prov));
3047
3048 limit = dplane_provider_get_work_limit(prov);
3049
3050 for (counter = 0; counter < limit; counter++) {
3051
3052 ctx = dplane_provider_dequeue_in_ctx(prov);
3053 if (ctx == NULL)
3054 break;
3055
3056 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3057 zlog_debug("dplane provider '%s': op %s",
3058 dplane_provider_get_name(prov),
3059 dplane_op2str(dplane_ctx_get_op(ctx)));
3060
3061 dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
3062
3063 dplane_provider_enqueue_out_ctx(prov, ctx);
3064 }
3065
3066 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3067 zlog_debug("dplane provider '%s': processed %d",
3068 dplane_provider_get_name(prov), counter);
3069
3070 /* Ensure that we'll run the work loop again if there's still
3071 * more work to do.
3072 */
3073 if (counter >= limit)
3074 dplane_provider_work_ready();
3075
3076 return 0;
3077 }
3078
3079 /*
3080 * Test provider shutdown/fini callback
3081 */
3082 static int test_dplane_shutdown_func(struct zebra_dplane_provider *prov,
3083 bool early)
3084 {
3085 if (IS_ZEBRA_DEBUG_DPLANE)
3086 zlog_debug("dplane provider '%s': %sshutdown",
3087 dplane_provider_get_name(prov),
3088 early ? "early " : "");
3089
3090 return 0;
3091 }
3092 #endif /* DPLANE_TEST_PROVIDER */
3093
3094 /*
3095 * Register default kernel provider
3096 */
3097 static void dplane_provider_init(void)
3098 {
3099 int ret;
3100
3101 ret = dplane_provider_register("Kernel",
3102 DPLANE_PRIO_KERNEL,
3103 DPLANE_PROV_FLAGS_DEFAULT, NULL,
3104 kernel_dplane_process_func,
3105 NULL,
3106 NULL, NULL);
3107
3108 if (ret != AOK)
3109 zlog_err("Unable to register kernel dplane provider: %d",
3110 ret);
3111
3112 #if DPLANE_TEST_PROVIDER
3113 /* Optional test provider ... */
3114 ret = dplane_provider_register("Test",
3115 DPLANE_PRIO_PRE_KERNEL,
3116 DPLANE_PROV_FLAGS_DEFAULT, NULL,
3117 test_dplane_process_func,
3118 test_dplane_shutdown_func,
3119 NULL /* data */, NULL);
3120
3121 if (ret != AOK)
3122 zlog_err("Unable to register test dplane provider: %d",
3123 ret);
3124 #endif /* DPLANE_TEST_PROVIDER */
3125 }
3126
3127 /* Indicates zebra shutdown/exit is in progress. Some operations may be
3128 * simplified or skipped during shutdown processing.
3129 */
3130 bool dplane_is_in_shutdown(void)
3131 {
3132 return zdplane_info.dg_is_shutdown;
3133 }
3134
3135 /*
3136 * Early or pre-shutdown, de-init notification api. This runs pretty
3137 * early during zebra shutdown, as a signal to stop new work and prepare
3138 * for updates generated by shutdown/cleanup activity, as zebra tries to
3139 * remove everything it's responsible for.
3140 * NB: This runs in the main zebra pthread context.
3141 */
3142 void zebra_dplane_pre_finish(void)
3143 {
3144 if (IS_ZEBRA_DEBUG_DPLANE)
3145 zlog_debug("Zebra dataplane pre-fini called");
3146
3147 zdplane_info.dg_is_shutdown = true;
3148
3149 /* TODO -- Notify provider(s) of pending shutdown */
3150 }
3151
3152 /*
3153 * Utility to determine whether work remains enqueued within the dplane;
3154 * used during system shutdown processing.
3155 */
3156 static bool dplane_work_pending(void)
3157 {
3158 bool ret = false;
3159 struct zebra_dplane_ctx *ctx;
3160 struct zebra_dplane_provider *prov;
3161
3162 /* TODO -- just checking incoming/pending work for now, must check
3163 * providers
3164 */
3165 DPLANE_LOCK();
3166 {
3167 ctx = TAILQ_FIRST(&zdplane_info.dg_update_ctx_q);
3168 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
3169 }
3170 DPLANE_UNLOCK();
3171
3172 if (ctx != NULL) {
3173 ret = true;
3174 goto done;
3175 }
3176
3177 while (prov) {
3178
3179 dplane_provider_lock(prov);
3180
3181 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
3182 if (ctx == NULL)
3183 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
3184
3185 dplane_provider_unlock(prov);
3186
3187 if (ctx != NULL)
3188 break;
3189
3190 DPLANE_LOCK();
3191 prov = TAILQ_NEXT(prov, dp_prov_link);
3192 DPLANE_UNLOCK();
3193 }
3194
3195 if (ctx != NULL)
3196 ret = true;
3197
3198 done:
3199 return ret;
3200 }
3201
3202 /*
3203 * Shutdown-time intermediate callback, used to determine when all pending
3204 * in-flight updates are done. If there's still work to do, reschedules itself.
3205 * If all work is done, schedules an event to the main zebra thread for
3206 * final zebra shutdown.
3207 * This runs in the dplane pthread context.
3208 */
3209 static int dplane_check_shutdown_status(struct thread *event)
3210 {
3211 if (IS_ZEBRA_DEBUG_DPLANE)
3212 zlog_debug("Zebra dataplane shutdown status check called");
3213
3214 if (dplane_work_pending()) {
3215 /* Reschedule dplane check on a short timer */
3216 thread_add_timer_msec(zdplane_info.dg_master,
3217 dplane_check_shutdown_status,
3218 NULL, 100,
3219 &zdplane_info.dg_t_shutdown_check);
3220
3221 /* TODO - give up and stop waiting after a short time? */
3222
3223 } else {
3224 /* We appear to be done - schedule a final callback event
3225 * for the zebra main pthread.
3226 */
3227 thread_add_event(zrouter.master, zebra_finalize, NULL, 0, NULL);
3228 }
3229
3230 return 0;
3231 }
3232
3233 /*
3234 * Shutdown, de-init api. This runs pretty late during shutdown,
3235 * after zebra has tried to free/remove/uninstall all routes during shutdown.
3236 * At this point, dplane work may still remain to be done, so we can't just
3237 * blindly terminate. If there's still work to do, we'll periodically check
3238 * and when done, we'll enqueue a task to the zebra main thread for final
3239 * termination processing.
3240 *
3241 * NB: This runs in the main zebra thread context.
3242 */
3243 void zebra_dplane_finish(void)
3244 {
3245 if (IS_ZEBRA_DEBUG_DPLANE)
3246 zlog_debug("Zebra dataplane fini called");
3247
3248 thread_add_event(zdplane_info.dg_master,
3249 dplane_check_shutdown_status, NULL, 0,
3250 &zdplane_info.dg_t_shutdown_check);
3251 }
3252
3253 /*
3254 * Main dataplane pthread event loop. The thread takes new incoming work
3255 * and offers it to the first provider. It then iterates through the
3256 * providers, taking complete work from each one and offering it
3257 * to the next in order. At each step, a limited number of updates are
3258 * processed during a cycle in order to provide some fairness.
3259 *
3260 * This loop through the providers is only run once, so that the dataplane
3261 * pthread can look for other pending work - such as i/o work on behalf of
3262 * providers.
3263 */
3264 static int dplane_thread_loop(struct thread *event)
3265 {
3266 struct dplane_ctx_q work_list;
3267 struct dplane_ctx_q error_list;
3268 struct zebra_dplane_provider *prov;
3269 struct zebra_dplane_ctx *ctx, *tctx;
3270 int limit, counter, error_counter;
3271 uint64_t curr, high;
3272
3273 /* Capture work limit per cycle */
3274 limit = zdplane_info.dg_updates_per_cycle;
3275
3276 /* Init temporary lists used to move contexts among providers */
3277 TAILQ_INIT(&work_list);
3278 TAILQ_INIT(&error_list);
3279 error_counter = 0;
3280
3281 /* Check for zebra shutdown */
3282 if (!zdplane_info.dg_run)
3283 goto done;
3284
3285 /* Dequeue some incoming work from zebra (if any) onto the temporary
3286 * working list.
3287 */
3288 DPLANE_LOCK();
3289
3290 /* Locate initial registered provider */
3291 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
3292
3293 /* Move new work from incoming list to temp list */
3294 for (counter = 0; counter < limit; counter++) {
3295 ctx = TAILQ_FIRST(&zdplane_info.dg_update_ctx_q);
3296 if (ctx) {
3297 TAILQ_REMOVE(&zdplane_info.dg_update_ctx_q, ctx,
3298 zd_q_entries);
3299
3300 ctx->zd_provider = prov->dp_id;
3301
3302 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
3303 } else {
3304 break;
3305 }
3306 }
3307
3308 DPLANE_UNLOCK();
3309
3310 atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, counter,
3311 memory_order_relaxed);
3312
3313 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3314 zlog_debug("dplane: incoming new work counter: %d", counter);
3315
3316 /* Iterate through the registered providers, offering new incoming
3317 * work. If the provider has outgoing work in its queue, take that
3318 * work for the next provider
3319 */
3320 while (prov) {
3321
3322 /* At each iteration, the temporary work list has 'counter'
3323 * items.
3324 */
3325 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3326 zlog_debug("dplane enqueues %d new work to provider '%s'",
3327 counter, dplane_provider_get_name(prov));
3328
3329 /* Capture current provider id in each context; check for
3330 * error status.
3331 */
3332 TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, tctx) {
3333 if (dplane_ctx_get_status(ctx) ==
3334 ZEBRA_DPLANE_REQUEST_SUCCESS) {
3335 ctx->zd_provider = prov->dp_id;
3336 } else {
3337 /*
3338 * TODO -- improve error-handling: recirc
3339 * errors backwards so that providers can
3340 * 'undo' their work (if they want to)
3341 */
3342
3343 /* Move to error list; will be returned
3344 * zebra main.
3345 */
3346 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
3347 TAILQ_INSERT_TAIL(&error_list,
3348 ctx, zd_q_entries);
3349 error_counter++;
3350 }
3351 }
3352
3353 /* Enqueue new work to the provider */
3354 dplane_provider_lock(prov);
3355
3356 if (TAILQ_FIRST(&work_list))
3357 TAILQ_CONCAT(&(prov->dp_ctx_in_q), &work_list,
3358 zd_q_entries);
3359
3360 atomic_fetch_add_explicit(&prov->dp_in_counter, counter,
3361 memory_order_relaxed);
3362 atomic_fetch_add_explicit(&prov->dp_in_queued, counter,
3363 memory_order_relaxed);
3364 curr = atomic_load_explicit(&prov->dp_in_queued,
3365 memory_order_relaxed);
3366 high = atomic_load_explicit(&prov->dp_in_max,
3367 memory_order_relaxed);
3368 if (curr > high)
3369 atomic_store_explicit(&prov->dp_in_max, curr,
3370 memory_order_relaxed);
3371
3372 dplane_provider_unlock(prov);
3373
3374 /* Reset the temp list (though the 'concat' may have done this
3375 * already), and the counter
3376 */
3377 TAILQ_INIT(&work_list);
3378 counter = 0;
3379
3380 /* Call into the provider code. Note that this is
3381 * unconditional: we offer to do work even if we don't enqueue
3382 * any _new_ work.
3383 */
3384 (*prov->dp_fp)(prov);
3385
3386 /* Check for zebra shutdown */
3387 if (!zdplane_info.dg_run)
3388 break;
3389
3390 /* Dequeue completed work from the provider */
3391 dplane_provider_lock(prov);
3392
3393 while (counter < limit) {
3394 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
3395 if (ctx) {
3396 TAILQ_REMOVE(&(prov->dp_ctx_out_q), ctx,
3397 zd_q_entries);
3398
3399 TAILQ_INSERT_TAIL(&work_list,
3400 ctx, zd_q_entries);
3401 counter++;
3402 } else
3403 break;
3404 }
3405
3406 dplane_provider_unlock(prov);
3407
3408 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3409 zlog_debug("dplane dequeues %d completed work from provider %s",
3410 counter, dplane_provider_get_name(prov));
3411
3412 /* Locate next provider */
3413 DPLANE_LOCK();
3414 prov = TAILQ_NEXT(prov, dp_prov_link);
3415 DPLANE_UNLOCK();
3416 }
3417
3418 /* After all providers have been serviced, enqueue any completed
3419 * work and any errors back to zebra so it can process the results.
3420 */
3421 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3422 zlog_debug("dplane has %d completed, %d errors, for zebra main",
3423 counter, error_counter);
3424
3425 /*
3426 * Hand lists through the api to zebra main,
3427 * to reduce the number of lock/unlock cycles
3428 */
3429
3430 /* Call through to zebra main */
3431 (zdplane_info.dg_results_cb)(&error_list);
3432
3433 TAILQ_INIT(&error_list);
3434
3435 /* Call through to zebra main */
3436 (zdplane_info.dg_results_cb)(&work_list);
3437
3438 TAILQ_INIT(&work_list);
3439
3440 done:
3441 return 0;
3442 }
3443
3444 /*
3445 * Final phase of shutdown, after all work enqueued to dplane has been
3446 * processed. This is called from the zebra main pthread context.
3447 */
3448 void zebra_dplane_shutdown(void)
3449 {
3450 if (IS_ZEBRA_DEBUG_DPLANE)
3451 zlog_debug("Zebra dataplane shutdown called");
3452
3453 /* Stop dplane thread, if it's running */
3454
3455 zdplane_info.dg_run = false;
3456
3457 THREAD_OFF(zdplane_info.dg_t_update);
3458
3459 frr_pthread_stop(zdplane_info.dg_pthread, NULL);
3460
3461 /* Destroy pthread */
3462 frr_pthread_destroy(zdplane_info.dg_pthread);
3463 zdplane_info.dg_pthread = NULL;
3464 zdplane_info.dg_master = NULL;
3465
3466 /* TODO -- Notify provider(s) of final shutdown */
3467
3468 /* TODO -- Clean-up provider objects */
3469
3470 /* TODO -- Clean queue(s), free memory */
3471 }
3472
3473 /*
3474 * Initialize the dataplane module during startup, internal/private version
3475 */
3476 static void zebra_dplane_init_internal(void)
3477 {
3478 memset(&zdplane_info, 0, sizeof(zdplane_info));
3479
3480 pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
3481
3482 TAILQ_INIT(&zdplane_info.dg_update_ctx_q);
3483 TAILQ_INIT(&zdplane_info.dg_providers_q);
3484
3485 zdplane_info.dg_updates_per_cycle = DPLANE_DEFAULT_NEW_WORK;
3486
3487 zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;
3488
3489 /* Register default kernel 'provider' during init */
3490 dplane_provider_init();
3491 }
3492
3493 /*
3494 * Start the dataplane pthread. This step needs to be run later than the
3495 * 'init' step, in case zebra has fork-ed.
3496 */
3497 void zebra_dplane_start(void)
3498 {
3499 struct zebra_dplane_provider *prov;
3500 struct frr_pthread_attr pattr = {
3501 .start = frr_pthread_attr_default.start,
3502 .stop = frr_pthread_attr_default.stop
3503 };
3504
3505 /* Start dataplane pthread */
3506
3507 zdplane_info.dg_pthread = frr_pthread_new(&pattr, "Zebra dplane thread",
3508 "zebra_dplane");
3509
3510 zdplane_info.dg_master = zdplane_info.dg_pthread->master;
3511
3512 zdplane_info.dg_run = true;
3513
3514 /* Enqueue an initial event for the dataplane pthread */
3515 thread_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
3516 &zdplane_info.dg_t_update);
3517
3518 /* Call start callbacks for registered providers */
3519
3520 DPLANE_LOCK();
3521 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
3522 DPLANE_UNLOCK();
3523
3524 while (prov) {
3525
3526 if (prov->dp_start)
3527 (prov->dp_start)(prov);
3528
3529 /* Locate next provider */
3530 DPLANE_LOCK();
3531 prov = TAILQ_NEXT(prov, dp_prov_link);
3532 DPLANE_UNLOCK();
3533 }
3534
3535 frr_pthread_run(zdplane_info.dg_pthread, NULL);
3536 }
3537
3538 /*
3539 * Initialize the dataplane module at startup; called by zebra rib_init()
3540 */
3541 void zebra_dplane_init(int (*results_fp)(struct dplane_ctx_q *))
3542 {
3543 zebra_dplane_init_internal();
3544 zdplane_info.dg_results_cb = results_fp;
3545 }