]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_dplane.c
zebra: delay default vrf name after vrf initialization
[mirror_frr.git] / zebra / zebra_dplane.c
1 /*
2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #include "lib/libfrr.h"
21 #include "lib/debug.h"
22 #include "lib/frratomic.h"
23 #include "lib/frr_pthread.h"
24 #include "lib/memory.h"
25 #include "lib/queue.h"
26 #include "lib/zebra.h"
27 #include "zebra/zebra_memory.h"
28 #include "zebra/zserv.h"
29 #include "zebra/zebra_dplane.h"
30 #include "zebra/rt.h"
31 #include "zebra/debug.h"
32
33 /* Memory type for context blocks */
34 DEFINE_MTYPE(ZEBRA, DP_CTX, "Zebra DPlane Ctx")
35 DEFINE_MTYPE(ZEBRA, DP_PROV, "Zebra DPlane Provider")
36
37 #ifndef AOK
38 # define AOK 0
39 #endif
40
41 /* Default value for max queued incoming updates */
42 const uint32_t DPLANE_DEFAULT_MAX_QUEUED = 200;
43
44
45 /* Validation check macro for context blocks */
46 /* #define DPLANE_DEBUG 1 */
47
48 #ifdef DPLANE_DEBUG
49
50 # define DPLANE_CTX_VALID(p) \
51 assert((p) != NULL)
52
53 #else
54
55 # define DPLANE_CTX_VALID(p)
56
57 #endif /* DPLANE_DEBUG */
58
59 /*
60 * The context block used to exchange info about route updates across
61 * the boundary between the zebra main context (and pthread) and the
62 * dataplane layer (and pthread).
63 */
64 struct zebra_dplane_ctx {
65
66 /* Operation code */
67 enum dplane_op_e zd_op;
68
69 /* Status on return */
70 enum zebra_dplane_result zd_status;
71
72 /* TODO -- internal/sub-operation status? */
73 enum zebra_dplane_result zd_remote_status;
74 enum zebra_dplane_result zd_kernel_status;
75
76 /* Dest and (optional) source prefixes */
77 struct prefix zd_dest;
78 struct prefix zd_src;
79
80 bool zd_is_update;
81
82 uint32_t zd_seq;
83 uint32_t zd_old_seq;
84 vrf_id_t zd_vrf_id;
85 uint32_t zd_table_id;
86
87 int zd_type;
88 int zd_old_type;
89
90 afi_t zd_afi;
91 safi_t zd_safi;
92
93 route_tag_t zd_tag;
94 route_tag_t zd_old_tag;
95 uint32_t zd_metric;
96 uint32_t zd_old_metric;
97 uint16_t zd_instance;
98 uint16_t zd_old_instance;
99
100 uint8_t zd_distance;
101 uint8_t zd_old_distance;
102
103 uint32_t zd_mtu;
104 uint32_t zd_nexthop_mtu;
105
106 /* Namespace info */
107 struct zebra_dplane_info zd_ns_info;
108
109 /* Nexthops */
110 struct nexthop_group zd_ng;
111
112 /* "Previous" nexthops, used only in route updates without netlink */
113 struct nexthop_group zd_old_ng;
114
115 /* TODO -- use fixed array of nexthops, to avoid mallocs? */
116
117 /* Embedded list linkage */
118 TAILQ_ENTRY(zebra_dplane_ctx) zd_q_entries;
119 };
120
121 /*
122 * Registration block for one dataplane provider.
123 */
124 struct zebra_dplane_provider {
125 /* Name */
126 char dp_name[DPLANE_PROVIDER_NAMELEN + 1];
127
128 /* Priority, for ordering among providers */
129 uint8_t dp_priority;
130
131 /* Id value */
132 uint32_t dp_id;
133
134 dplane_provider_process_fp dp_fp;
135
136 dplane_provider_fini_fp dp_fini;
137
138 _Atomic uint32_t dp_in_counter;
139 _Atomic uint32_t dp_error_counter;
140
141 /* Embedded list linkage */
142 TAILQ_ENTRY(zebra_dplane_provider) dp_q_providers;
143
144 };
145
146 /*
147 * Globals
148 */
149 static struct zebra_dplane_globals {
150 /* Mutex to control access to dataplane components */
151 pthread_mutex_t dg_mutex;
152
153 /* Results callback registered by zebra 'core' */
154 dplane_results_fp dg_results_cb;
155
156 /* Sentinel for beginning of shutdown */
157 volatile bool dg_is_shutdown;
158
159 /* Sentinel for end of shutdown */
160 volatile bool dg_run;
161
162 /* Route-update context queue inbound to the dataplane */
163 TAILQ_HEAD(zdg_ctx_q, zebra_dplane_ctx) dg_route_ctx_q;
164
165 /* Ordered list of providers */
166 TAILQ_HEAD(zdg_prov_q, zebra_dplane_provider) dg_providers_q;
167
168 /* Counter used to assign internal ids to providers */
169 uint32_t dg_provider_id;
170
171 /* Limit number of pending, unprocessed updates */
172 _Atomic uint32_t dg_max_queued_updates;
173
174 _Atomic uint32_t dg_routes_in;
175 _Atomic uint32_t dg_routes_queued;
176 _Atomic uint32_t dg_routes_queued_max;
177 _Atomic uint32_t dg_route_errors;
178
179 /* Event-delivery context 'master' for the dplane */
180 struct thread_master *dg_master;
181
182 /* Event/'thread' pointer for queued updates */
183 struct thread *dg_t_update;
184
185 /* Event pointer for pending shutdown check loop */
186 struct thread *dg_t_shutdown_check;
187
188 } zdplane_info;
189
190 /*
191 * Lock and unlock for interactions with the zebra 'core'
192 */
193 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
194
195 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
196
197 /* Prototypes */
198 static int dplane_route_process(struct thread *event);
199
200 /*
201 * Public APIs
202 */
203
204 /*
205 * Allocate a dataplane update context
206 */
207 static struct zebra_dplane_ctx *dplane_ctx_alloc(void)
208 {
209 struct zebra_dplane_ctx *p;
210
211 /* TODO -- just alloc'ing memory, but would like to maintain
212 * a pool
213 */
214 p = XCALLOC(MTYPE_DP_CTX, sizeof(struct zebra_dplane_ctx));
215
216 return p;
217 }
218
219 /*
220 * Free a dataplane results context.
221 */
222 static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
223 {
224 if (pctx) {
225 DPLANE_CTX_VALID(*pctx);
226
227 /* TODO -- just freeing memory, but would like to maintain
228 * a pool
229 */
230
231 /* Free embedded nexthops */
232 if ((*pctx)->zd_ng.nexthop) {
233 /* This deals with recursive nexthops too */
234 nexthops_free((*pctx)->zd_ng.nexthop);
235 }
236
237 if ((*pctx)->zd_old_ng.nexthop) {
238 /* This deals with recursive nexthops too */
239 nexthops_free((*pctx)->zd_old_ng.nexthop);
240 }
241
242 XFREE(MTYPE_DP_CTX, *pctx);
243 *pctx = NULL;
244 }
245 }
246
247 /*
248 * Return a context block to the dplane module after processing
249 */
250 void dplane_ctx_fini(struct zebra_dplane_ctx **pctx)
251 {
252 /* TODO -- enqueue for next provider; for now, just free */
253 dplane_ctx_free(pctx);
254 }
255
256 /* Enqueue a context block */
257 void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
258 const struct zebra_dplane_ctx *ctx)
259 {
260 TAILQ_INSERT_TAIL(q, (struct zebra_dplane_ctx *)ctx, zd_q_entries);
261 }
262
263 /* Dequeue a context block from the head of a list */
264 void dplane_ctx_dequeue(struct dplane_ctx_q *q, struct zebra_dplane_ctx **ctxp)
265 {
266 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
267
268 if (ctx)
269 TAILQ_REMOVE(q, ctx, zd_q_entries);
270
271 *ctxp = ctx;
272 }
273
274 /*
275 * Accessors for information from the context object
276 */
277 enum zebra_dplane_result dplane_ctx_get_status(
278 const struct zebra_dplane_ctx *ctx)
279 {
280 DPLANE_CTX_VALID(ctx);
281
282 return ctx->zd_status;
283 }
284
285 enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx)
286 {
287 DPLANE_CTX_VALID(ctx);
288
289 return ctx->zd_op;
290 }
291
292 const char *dplane_op2str(enum dplane_op_e op)
293 {
294 const char *ret = "UNKNOWN";
295
296 switch (op) {
297 case DPLANE_OP_NONE:
298 ret = "NONE";
299 break;
300
301 /* Route update */
302 case DPLANE_OP_ROUTE_INSTALL:
303 ret = "ROUTE_INSTALL";
304 break;
305 case DPLANE_OP_ROUTE_UPDATE:
306 ret = "ROUTE_UPDATE";
307 break;
308 case DPLANE_OP_ROUTE_DELETE:
309 ret = "ROUTE_DELETE";
310 break;
311
312 };
313
314 return ret;
315 }
316
317 const char *dplane_res2str(enum zebra_dplane_result res)
318 {
319 const char *ret = "<Unknown>";
320
321 switch (res) {
322 case ZEBRA_DPLANE_REQUEST_FAILURE:
323 ret = "FAILURE";
324 break;
325 case ZEBRA_DPLANE_REQUEST_QUEUED:
326 ret = "QUEUED";
327 break;
328 case ZEBRA_DPLANE_REQUEST_SUCCESS:
329 ret = "SUCCESS";
330 break;
331 };
332
333 return ret;
334 }
335
336 const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
337 {
338 DPLANE_CTX_VALID(ctx);
339
340 return &(ctx->zd_dest);
341 }
342
343 /* Source prefix is a little special - return NULL for "no src prefix" */
344 const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx)
345 {
346 DPLANE_CTX_VALID(ctx);
347
348 if (ctx->zd_src.prefixlen == 0 &&
349 IN6_IS_ADDR_UNSPECIFIED(&(ctx->zd_src.u.prefix6))) {
350 return NULL;
351 } else {
352 return &(ctx->zd_src);
353 }
354 }
355
356 bool dplane_ctx_is_update(const struct zebra_dplane_ctx *ctx)
357 {
358 DPLANE_CTX_VALID(ctx);
359
360 return ctx->zd_is_update;
361 }
362
363 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx *ctx)
364 {
365 DPLANE_CTX_VALID(ctx);
366
367 return ctx->zd_seq;
368 }
369
370 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx)
371 {
372 DPLANE_CTX_VALID(ctx);
373
374 return ctx->zd_old_seq;
375 }
376
377 vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx)
378 {
379 DPLANE_CTX_VALID(ctx);
380
381 return ctx->zd_vrf_id;
382 }
383
384 int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx)
385 {
386 DPLANE_CTX_VALID(ctx);
387
388 return ctx->zd_type;
389 }
390
391 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx)
392 {
393 DPLANE_CTX_VALID(ctx);
394
395 return ctx->zd_old_type;
396 }
397
398 afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
399 {
400 DPLANE_CTX_VALID(ctx);
401
402 return ctx->zd_afi;
403 }
404
405 safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
406 {
407 DPLANE_CTX_VALID(ctx);
408
409 return ctx->zd_safi;
410 }
411
412 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx)
413 {
414 DPLANE_CTX_VALID(ctx);
415
416 return ctx->zd_table_id;
417 }
418
419 route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx)
420 {
421 DPLANE_CTX_VALID(ctx);
422
423 return ctx->zd_tag;
424 }
425
426 route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx)
427 {
428 DPLANE_CTX_VALID(ctx);
429
430 return ctx->zd_old_tag;
431 }
432
433 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx)
434 {
435 DPLANE_CTX_VALID(ctx);
436
437 return ctx->zd_instance;
438 }
439
440 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
441 {
442 DPLANE_CTX_VALID(ctx);
443
444 return ctx->zd_instance;
445 }
446
447 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx)
448 {
449 DPLANE_CTX_VALID(ctx);
450
451 return ctx->zd_metric;
452 }
453
454 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx)
455 {
456 DPLANE_CTX_VALID(ctx);
457
458 return ctx->zd_old_metric;
459 }
460
461 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx)
462 {
463 DPLANE_CTX_VALID(ctx);
464
465 return ctx->zd_mtu;
466 }
467
468 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx)
469 {
470 DPLANE_CTX_VALID(ctx);
471
472 return ctx->zd_nexthop_mtu;
473 }
474
475 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx)
476 {
477 DPLANE_CTX_VALID(ctx);
478
479 return ctx->zd_distance;
480 }
481
482 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
483 {
484 DPLANE_CTX_VALID(ctx);
485
486 return ctx->zd_old_distance;
487 }
488
489 const struct nexthop_group *dplane_ctx_get_ng(
490 const struct zebra_dplane_ctx *ctx)
491 {
492 DPLANE_CTX_VALID(ctx);
493
494 return &(ctx->zd_ng);
495 }
496
497 const struct nexthop_group *dplane_ctx_get_old_ng(
498 const struct zebra_dplane_ctx *ctx)
499 {
500 DPLANE_CTX_VALID(ctx);
501
502 return &(ctx->zd_old_ng);
503 }
504
505 const struct zebra_dplane_info *dplane_ctx_get_ns(
506 const struct zebra_dplane_ctx *ctx)
507 {
508 DPLANE_CTX_VALID(ctx);
509
510 return &(ctx->zd_ns_info);
511 }
512
513 /*
514 * End of dplane context accessors
515 */
516
517 /*
518 * Retrieve the limit on the number of pending, unprocessed updates.
519 */
520 uint32_t dplane_get_in_queue_limit(void)
521 {
522 return atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
523 memory_order_relaxed);
524 }
525
526 /*
527 * Configure limit on the number of pending, queued updates.
528 */
529 void dplane_set_in_queue_limit(uint32_t limit, bool set)
530 {
531 /* Reset to default on 'unset' */
532 if (!set)
533 limit = DPLANE_DEFAULT_MAX_QUEUED;
534
535 atomic_store_explicit(&zdplane_info.dg_max_queued_updates, limit,
536 memory_order_relaxed);
537 }
538
539 /*
540 * Retrieve the current queue depth of incoming, unprocessed updates
541 */
542 uint32_t dplane_get_in_queue_len(void)
543 {
544 return atomic_load_explicit(&zdplane_info.dg_routes_queued,
545 memory_order_seq_cst);
546 }
547
548 /*
549 * Initialize a context block for a route update from zebra data structs.
550 */
551 static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx,
552 enum dplane_op_e op,
553 struct route_node *rn,
554 struct route_entry *re)
555 {
556 int ret = EINVAL;
557 const struct route_table *table = NULL;
558 const rib_table_info_t *info;
559 const struct prefix *p, *src_p;
560 struct zebra_ns *zns;
561 struct zebra_vrf *zvrf;
562 struct nexthop *nexthop;
563
564 if (!ctx || !rn || !re)
565 goto done;
566
567 ctx->zd_op = op;
568
569 ctx->zd_type = re->type;
570 ctx->zd_old_type = re->type;
571
572 /* Prefixes: dest, and optional source */
573 srcdest_rnode_prefixes(rn, &p, &src_p);
574
575 prefix_copy(&(ctx->zd_dest), p);
576
577 if (src_p)
578 prefix_copy(&(ctx->zd_src), src_p);
579 else
580 memset(&(ctx->zd_src), 0, sizeof(ctx->zd_src));
581
582 ctx->zd_table_id = re->table;
583
584 ctx->zd_metric = re->metric;
585 ctx->zd_old_metric = re->metric;
586 ctx->zd_vrf_id = re->vrf_id;
587 ctx->zd_mtu = re->mtu;
588 ctx->zd_nexthop_mtu = re->nexthop_mtu;
589 ctx->zd_instance = re->instance;
590 ctx->zd_tag = re->tag;
591 ctx->zd_old_tag = re->tag;
592 ctx->zd_distance = re->distance;
593
594 table = srcdest_rnode_table(rn);
595 info = table->info;
596
597 ctx->zd_afi = info->afi;
598 ctx->zd_safi = info->safi;
599
600 /* Extract ns info - can't use pointers to 'core' structs */
601 zvrf = vrf_info_lookup(re->vrf_id);
602 zns = zvrf->zns;
603
604 zebra_dplane_info_from_zns(&(ctx->zd_ns_info), zns, true /*is_cmd*/);
605
606 #if defined(HAVE_NETLINK)
607 /* Increment message counter after copying to context struct - may need
608 * two messages in some 'update' cases.
609 */
610 if (op == DPLANE_OP_ROUTE_UPDATE)
611 zns->netlink_cmd.seq += 2;
612 else
613 zns->netlink_cmd.seq++;
614 #endif /* NETLINK*/
615
616 /* Copy nexthops; recursive info is included too */
617 copy_nexthops(&(ctx->zd_ng.nexthop), re->ng.nexthop, NULL);
618
619 /* TODO -- maybe use array of nexthops to avoid allocs? */
620
621 /* Ensure that the dplane's nexthop flag is clear. */
622 for (ALL_NEXTHOPS(ctx->zd_ng, nexthop))
623 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
624
625 /* Trying out the sequence number idea, so we can try to detect
626 * when a result is stale.
627 */
628 re->dplane_sequence++;
629 ctx->zd_seq = re->dplane_sequence;
630
631 ret = AOK;
632
633 done:
634 return ret;
635 }
636
637 /*
638 * Enqueue a new route update,
639 * and ensure an event is active for the dataplane thread.
640 */
641 static int dplane_route_enqueue(struct zebra_dplane_ctx *ctx)
642 {
643 int ret = EINVAL;
644 uint32_t high, curr;
645
646 /* Enqueue for processing by the dataplane thread */
647 DPLANE_LOCK();
648 {
649 TAILQ_INSERT_TAIL(&zdplane_info.dg_route_ctx_q, ctx,
650 zd_q_entries);
651 }
652 DPLANE_UNLOCK();
653
654 curr = atomic_add_fetch_explicit(
655 #ifdef __clang__
656 /* TODO -- issue with the clang atomic/intrinsics currently;
657 * casting away the 'Atomic'-ness of the variable works.
658 */
659 (uint32_t *)&(zdplane_info.dg_routes_queued),
660 #else
661 &(zdplane_info.dg_routes_queued),
662 #endif
663 1, memory_order_seq_cst);
664
665 /* Maybe update high-water counter also */
666 high = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
667 memory_order_seq_cst);
668 while (high < curr) {
669 if (atomic_compare_exchange_weak_explicit(
670 &zdplane_info.dg_routes_queued_max,
671 &high, curr,
672 memory_order_seq_cst,
673 memory_order_seq_cst))
674 break;
675 }
676
677 /* Ensure that an event for the dataplane thread is active */
678 thread_add_event(zdplane_info.dg_master, dplane_route_process, NULL, 0,
679 &zdplane_info.dg_t_update);
680
681 ret = AOK;
682
683 return ret;
684 }
685
686 /*
687 * Attempt to dequeue a route-update block
688 */
689 static struct zebra_dplane_ctx *dplane_route_dequeue(void)
690 {
691 struct zebra_dplane_ctx *ctx = NULL;
692
693 DPLANE_LOCK();
694 {
695 ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
696 if (ctx) {
697 TAILQ_REMOVE(&zdplane_info.dg_route_ctx_q,
698 ctx, zd_q_entries);
699 }
700 }
701 DPLANE_UNLOCK();
702
703 return ctx;
704 }
705
706 /*
707 * Utility that prepares a route update and enqueues it for processing
708 */
709 static enum zebra_dplane_result
710 dplane_route_update_internal(struct route_node *rn,
711 struct route_entry *re,
712 struct route_entry *old_re,
713 enum dplane_op_e op)
714 {
715 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
716 int ret = EINVAL;
717 struct zebra_dplane_ctx *ctx = NULL;
718
719 /* Obtain context block */
720 ctx = dplane_ctx_alloc();
721 if (ctx == NULL) {
722 ret = ENOMEM;
723 goto done;
724 }
725
726 /* Init context with info from zebra data structs */
727 ret = dplane_ctx_route_init(ctx, op, rn, re);
728 if (ret == AOK) {
729 /* Capture some extra info for update case
730 * where there's a different 'old' route.
731 */
732 if ((op == DPLANE_OP_ROUTE_UPDATE) &&
733 old_re && (old_re != re)) {
734 ctx->zd_is_update = true;
735
736 old_re->dplane_sequence++;
737 ctx->zd_old_seq = old_re->dplane_sequence;
738
739 ctx->zd_old_tag = old_re->tag;
740 ctx->zd_old_type = old_re->type;
741 ctx->zd_old_instance = old_re->instance;
742 ctx->zd_old_distance = old_re->distance;
743 ctx->zd_old_metric = old_re->metric;
744
745 #ifndef HAVE_NETLINK
746 /* For bsd, capture previous re's nexthops too, sigh.
747 * We'll need these to do per-nexthop deletes.
748 */
749 copy_nexthops(&(ctx->zd_old_ng.nexthop),
750 old_re->ng.nexthop, NULL);
751 #endif /* !HAVE_NETLINK */
752 }
753
754 /* Enqueue context for processing */
755 ret = dplane_route_enqueue(ctx);
756 }
757
758 done:
759 /* Update counter */
760 atomic_fetch_add_explicit(&zdplane_info.dg_routes_in, 1,
761 memory_order_relaxed);
762
763 if (ret == AOK)
764 result = ZEBRA_DPLANE_REQUEST_QUEUED;
765 else if (ctx) {
766 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors, 1,
767 memory_order_relaxed);
768 dplane_ctx_free(&ctx);
769 }
770
771 return result;
772 }
773
774 /*
775 * Enqueue a route 'add' for the dataplane.
776 */
777 enum zebra_dplane_result dplane_route_add(struct route_node *rn,
778 struct route_entry *re)
779 {
780 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
781
782 if (rn == NULL || re == NULL)
783 goto done;
784
785 ret = dplane_route_update_internal(rn, re, NULL,
786 DPLANE_OP_ROUTE_INSTALL);
787
788 done:
789 return ret;
790 }
791
792 /*
793 * Enqueue a route update for the dataplane.
794 */
795 enum zebra_dplane_result dplane_route_update(struct route_node *rn,
796 struct route_entry *re,
797 struct route_entry *old_re)
798 {
799 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
800
801 if (rn == NULL || re == NULL)
802 goto done;
803
804 ret = dplane_route_update_internal(rn, re, old_re,
805 DPLANE_OP_ROUTE_UPDATE);
806 done:
807 return ret;
808 }
809
810 /*
811 * Enqueue a route removal for the dataplane.
812 */
813 enum zebra_dplane_result dplane_route_delete(struct route_node *rn,
814 struct route_entry *re)
815 {
816 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
817
818 if (rn == NULL || re == NULL)
819 goto done;
820
821 ret = dplane_route_update_internal(rn, re, NULL,
822 DPLANE_OP_ROUTE_DELETE);
823
824 done:
825 return ret;
826 }
827
828 /*
829 * Event handler function for routing updates
830 */
831 static int dplane_route_process(struct thread *event)
832 {
833 enum zebra_dplane_result res;
834 struct zebra_dplane_ctx *ctx;
835
836 while (1) {
837 /* Check for shutdown */
838 if (!zdplane_info.dg_run)
839 break;
840
841 /* TODO -- limit number of updates per cycle? */
842 ctx = dplane_route_dequeue();
843 if (ctx == NULL)
844 break;
845
846 /* Update counter */
847 atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, 1,
848 memory_order_relaxed);
849
850 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
851 char dest_str[PREFIX_STRLEN];
852
853 prefix2str(dplane_ctx_get_dest(ctx),
854 dest_str, sizeof(dest_str));
855
856 zlog_debug("%u:%s Dplane route update ctx %p op %s",
857 dplane_ctx_get_vrf(ctx), dest_str,
858 ctx, dplane_op2str(dplane_ctx_get_op(ctx)));
859 }
860
861 /* TODO -- support series of providers */
862
863 /* Initially, just doing kernel-facing update here */
864 res = kernel_route_update(ctx);
865
866 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
867 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors,
868 1, memory_order_relaxed);
869
870 ctx->zd_status = res;
871
872 /* Enqueue result to zebra main context */
873 zdplane_info.dg_results_cb(ctx);
874
875 ctx = NULL;
876 }
877
878 return 0;
879 }
880
881 /*
882 * Handler for 'show dplane'
883 */
884 int dplane_show_helper(struct vty *vty, bool detailed)
885 {
886 uint64_t queued, limit, queue_max, errs, incoming;
887
888 /* Using atomics because counters are being changed in different
889 * contexts.
890 */
891 incoming = atomic_load_explicit(&zdplane_info.dg_routes_in,
892 memory_order_relaxed);
893 limit = atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
894 memory_order_relaxed);
895 queued = atomic_load_explicit(&zdplane_info.dg_routes_queued,
896 memory_order_relaxed);
897 queue_max = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
898 memory_order_relaxed);
899 errs = atomic_load_explicit(&zdplane_info.dg_route_errors,
900 memory_order_relaxed);
901
902 vty_out(vty, "Route updates: %"PRIu64"\n", incoming);
903 vty_out(vty, "Route update errors: %"PRIu64"\n", errs);
904 vty_out(vty, "Route update queue limit: %"PRIu64"\n", limit);
905 vty_out(vty, "Route update queue depth: %"PRIu64"\n", queued);
906 vty_out(vty, "Route update queue max: %"PRIu64"\n", queue_max);
907
908 return CMD_SUCCESS;
909 }
910
911 /*
912 * Handler for 'show dplane providers'
913 */
914 int dplane_show_provs_helper(struct vty *vty, bool detailed)
915 {
916 vty_out(vty, "Zebra dataplane providers:%s\n",
917 (detailed ? " (detailed)" : ""));
918
919 return CMD_SUCCESS;
920 }
921
922 /*
923 * Provider registration
924 */
925 int dplane_provider_register(const char *name,
926 enum dplane_provider_prio_e prio,
927 dplane_provider_process_fp fp,
928 dplane_provider_fini_fp fini_fp)
929 {
930 int ret = 0;
931 struct zebra_dplane_provider *p, *last;
932
933 /* Validate */
934 if (fp == NULL) {
935 ret = EINVAL;
936 goto done;
937 }
938
939 if (prio <= DPLANE_PRIO_NONE ||
940 prio > DPLANE_PRIO_LAST) {
941 ret = EINVAL;
942 goto done;
943 }
944
945 /* Allocate and init new provider struct */
946 p = XCALLOC(MTYPE_DP_PROV, sizeof(struct zebra_dplane_provider));
947 if (p == NULL) {
948 ret = ENOMEM;
949 goto done;
950 }
951
952 strncpy(p->dp_name, name, DPLANE_PROVIDER_NAMELEN);
953 p->dp_name[DPLANE_PROVIDER_NAMELEN] = '\0'; /* Belt-and-suspenders */
954
955 p->dp_priority = prio;
956 p->dp_fp = fp;
957 p->dp_fini = fini_fp;
958
959 /* Lock the lock - the dplane pthread may be running */
960 DPLANE_LOCK();
961
962 p->dp_id = ++zdplane_info.dg_provider_id;
963
964 /* Insert into list ordered by priority */
965 TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_q_providers) {
966 if (last->dp_priority > p->dp_priority)
967 break;
968 }
969
970 if (last)
971 TAILQ_INSERT_BEFORE(last, p, dp_q_providers);
972 else
973 TAILQ_INSERT_TAIL(&zdplane_info.dg_providers_q, p,
974 dp_q_providers);
975
976 /* And unlock */
977 DPLANE_UNLOCK();
978
979 done:
980 return ret;
981 }
982
983 /*
984 * Zebra registers a results callback with the dataplane system
985 */
986 int dplane_results_register(dplane_results_fp fp)
987 {
988 zdplane_info.dg_results_cb = fp;
989 return AOK;
990 }
991
992 /*
993 * Initialize the dataplane module during startup, internal/private version
994 */
995 static void zebra_dplane_init_internal(struct zebra_t *zebra)
996 {
997 memset(&zdplane_info, 0, sizeof(zdplane_info));
998
999 pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
1000
1001 TAILQ_INIT(&zdplane_info.dg_route_ctx_q);
1002 TAILQ_INIT(&zdplane_info.dg_providers_q);
1003
1004 zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;
1005
1006 /* TODO -- register default kernel 'provider' during init */
1007
1008 zdplane_info.dg_run = true;
1009
1010 /* TODO -- start dataplane pthread. We're using the zebra
1011 * core/main thread temporarily
1012 */
1013 zdplane_info.dg_master = zebra->master;
1014 }
1015
1016 /* Indicates zebra shutdown/exit is in progress. Some operations may be
1017 * simplified or skipped during shutdown processing.
1018 */
1019 bool dplane_is_in_shutdown(void)
1020 {
1021 return zdplane_info.dg_is_shutdown;
1022 }
1023
1024 /*
1025 * Early or pre-shutdown, de-init notification api. This runs pretty
1026 * early during zebra shutdown, as a signal to stop new work and prepare
1027 * for updates generated by shutdown/cleanup activity, as zebra tries to
1028 * remove everything it's responsible for.
1029 * NB: This runs in the main zebra thread context.
1030 */
1031 void zebra_dplane_pre_finish(void)
1032 {
1033 if (IS_ZEBRA_DEBUG_DPLANE)
1034 zlog_debug("Zebra dataplane pre-fini called");
1035
1036 zdplane_info.dg_is_shutdown = true;
1037
1038 /* Notify provider(s) of pending shutdown */
1039 }
1040
1041 /*
1042 * Utility to determine whether work remains enqueued within the dplane;
1043 * used during system shutdown processing.
1044 */
1045 static bool dplane_work_pending(void)
1046 {
1047 struct zebra_dplane_ctx *ctx;
1048
1049 /* TODO -- just checking incoming/pending work for now */
1050 DPLANE_LOCK();
1051 {
1052 ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
1053 }
1054 DPLANE_UNLOCK();
1055
1056 return (ctx != NULL);
1057 }
1058
1059 /*
1060 * Shutdown-time intermediate callback, used to determine when all pending
1061 * in-flight updates are done. If there's still work to do, reschedules itself.
1062 * If all work is done, schedules an event to the main zebra thread for
1063 * final zebra shutdown.
1064 * This runs in the dplane pthread context.
1065 */
1066 static int dplane_check_shutdown_status(struct thread *event)
1067 {
1068 if (IS_ZEBRA_DEBUG_DPLANE)
1069 zlog_debug("Zebra dataplane shutdown status check called");
1070
1071 if (dplane_work_pending()) {
1072 /* Reschedule dplane check on a short timer */
1073 thread_add_timer_msec(zdplane_info.dg_master,
1074 dplane_check_shutdown_status,
1075 NULL, 100,
1076 &zdplane_info.dg_t_shutdown_check);
1077
1078 /* TODO - give up and stop waiting after a short time? */
1079
1080 } else {
1081 /* We appear to be done - schedule a final callback event
1082 * for the zebra main pthread.
1083 */
1084 thread_add_event(zebrad.master, zebra_finalize, NULL, 0, NULL);
1085 }
1086
1087 return 0;
1088 }
1089
1090 /*
1091 * Shutdown, de-init api. This runs pretty late during shutdown,
1092 * after zebra has tried to free/remove/uninstall all routes during shutdown.
1093 * At this point, dplane work may still remain to be done, so we can't just
1094 * blindly terminate. If there's still work to do, we'll periodically check
1095 * and when done, we'll enqueue a task to the zebra main thread for final
1096 * termination processing.
1097 *
1098 * NB: This runs in the main zebra thread context.
1099 */
1100 void zebra_dplane_finish(void)
1101 {
1102 if (IS_ZEBRA_DEBUG_DPLANE)
1103 zlog_debug("Zebra dataplane fini called");
1104
1105 thread_add_event(zdplane_info.dg_master,
1106 dplane_check_shutdown_status, NULL, 0,
1107 &zdplane_info.dg_t_shutdown_check);
1108 }
1109
1110 /*
1111 * Final phase of shutdown, after all work enqueued to dplane has been
1112 * processed. This is called from the zebra main pthread context.
1113 */
1114 void zebra_dplane_shutdown(void)
1115 {
1116 if (IS_ZEBRA_DEBUG_DPLANE)
1117 zlog_debug("Zebra dataplane shutdown called");
1118
1119 /* Stop dplane thread, if it's running */
1120
1121 zdplane_info.dg_run = false;
1122
1123 THREAD_OFF(zdplane_info.dg_t_update);
1124
1125 /* TODO */
1126 /* frr_pthread_stop(...) */
1127
1128 /* Notify provider(s) of final shutdown */
1129
1130 /* Clean-up provider objects */
1131
1132 /* Clean queue(s) */
1133 }
1134
1135 /*
1136 * Initialize the dataplane module at startup; called by zebra rib_init()
1137 */
1138 void zebra_dplane_init(void)
1139 {
1140 zebra_dplane_init_internal(&zebrad);
1141 }