]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_dplane.c
zebra: limit queued route updates
[mirror_frr.git] / zebra / zebra_dplane.c
1 /*
2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #include "lib/libfrr.h"
21 #include "lib/debug.h"
22 #include "lib/frratomic.h"
23 #include "lib/frr_pthread.h"
24 #include "lib/memory.h"
25 #include "lib/queue.h"
26 #include "lib/zebra.h"
27 #include "zebra/zebra_memory.h"
28 #include "zebra/zserv.h"
29 #include "zebra/zebra_dplane.h"
30 #include "zebra/rt.h"
31 #include "zebra/debug.h"
32
33 /* Memory type for context blocks */
34 DEFINE_MTYPE(ZEBRA, DP_CTX, "Zebra DPlane Ctx")
35 DEFINE_MTYPE(ZEBRA, DP_PROV, "Zebra DPlane Provider")
36
37 #ifndef AOK
38 # define AOK 0
39 #endif
40
41 /* Default value for max queued incoming updates */
42 const uint32_t DPLANE_DEFAULT_MAX_QUEUED = 200;
43
44
45 /* Validation check macro for context blocks */
46 /* #define DPLANE_DEBUG 1 */
47
48 #ifdef DPLANE_DEBUG
49
50 # define DPLANE_CTX_VALID(p) \
51 assert((p) != NULL)
52
53 #else
54
55 # define DPLANE_CTX_VALID(p)
56
57 #endif /* DPLANE_DEBUG */
58
59 /*
60 * The context block used to exchange info about route updates across
61 * the boundary between the zebra main context (and pthread) and the
62 * dataplane layer (and pthread).
63 */
64 struct zebra_dplane_ctx {
65
66 /* Operation code */
67 enum dplane_op_e zd_op;
68
69 /* Status on return */
70 enum zebra_dplane_result zd_status;
71
72 /* TODO -- internal/sub-operation status? */
73 enum zebra_dplane_status zd_remote_status;
74 enum zebra_dplane_status zd_kernel_status;
75
76 /* Dest and (optional) source prefixes */
77 struct prefix zd_dest;
78 struct prefix zd_src;
79
80 bool zd_is_update;
81
82 uint32_t zd_seq;
83 uint32_t zd_old_seq;
84 vrf_id_t zd_vrf_id;
85 uint32_t zd_table_id;
86
87 int zd_type;
88 int zd_old_type;
89
90 afi_t zd_afi;
91 safi_t zd_safi;
92
93 route_tag_t zd_tag;
94 route_tag_t zd_old_tag;
95 uint32_t zd_metric;
96 uint32_t zd_old_metric;
97 uint16_t zd_instance;
98 uint16_t zd_old_instance;
99
100 uint8_t zd_distance;
101 uint8_t zd_old_distance;
102
103 uint32_t zd_mtu;
104 uint32_t zd_nexthop_mtu;
105
106 /* Namespace info */
107 struct zebra_dplane_info zd_ns_info;
108
109 /* Nexthops */
110 struct nexthop_group zd_ng;
111
112 /* "Previous" nexthops, used only in route updates without netlink */
113 struct nexthop_group zd_old_ng;
114
115 /* TODO -- use fixed array of nexthops, to avoid mallocs? */
116
117 /* Embedded list linkage */
118 TAILQ_ENTRY(zebra_dplane_ctx) zd_q_entries;
119 };
120
121 /*
122 * Registration block for one dataplane provider.
123 */
124 struct zebra_dplane_provider {
125 /* Name */
126 char dp_name[DPLANE_PROVIDER_NAMELEN + 1];
127
128 /* Priority, for ordering among providers */
129 uint8_t dp_priority;
130
131 /* Id value */
132 uint32_t dp_id;
133
134 dplane_provider_process_fp dp_fp;
135
136 dplane_provider_fini_fp dp_fini;
137
138 _Atomic uint64_t dp_in_counter;
139 _Atomic uint64_t dp_error_counter;
140
141 /* Embedded list linkage */
142 TAILQ_ENTRY(zebra_dplane_provider) dp_q_providers;
143
144 };
145
146 /*
147 * Globals
148 */
149 static struct zebra_dplane_globals {
150 /* Mutex to control access to dataplane components */
151 pthread_mutex_t dg_mutex;
152
153 /* Results callback registered by zebra 'core' */
154 dplane_results_fp dg_results_cb;
155
156 /* Sentinel for beginning of shutdown */
157 volatile bool dg_is_shutdown;
158
159 /* Sentinel for end of shutdown */
160 volatile bool dg_run;
161
162 /* Route-update context queue inbound to the dataplane */
163 TAILQ_HEAD(zdg_ctx_q, zebra_dplane_ctx) dg_route_ctx_q;
164
165 /* Ordered list of providers */
166 TAILQ_HEAD(zdg_prov_q, zebra_dplane_provider) dg_providers_q;
167
168 /* Counter used to assign internal ids to providers */
169 uint32_t dg_provider_id;
170
171 /* Limit number of pending, unprocessed updates */
172 _Atomic uint32_t dg_max_queued_updates;
173
174 _Atomic uint64_t dg_routes_in;
175 _Atomic uint32_t dg_routes_queued;
176 _Atomic uint32_t dg_routes_queued_max;
177 _Atomic uint64_t dg_route_errors;
178
179 /* Event-delivery context 'master' for the dplane */
180 struct thread_master *dg_master;
181
182 /* Event/'thread' pointer for queued updates */
183 struct thread *dg_t_update;
184
185 /* Event pointer for pending shutdown check loop */
186 struct thread *dg_t_shutdown_check;
187
188 } zdplane_info;
189
190 /*
191 * Lock and unlock for interactions with the zebra 'core'
192 */
193 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
194
195 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
196
197 /* Prototypes */
198 static int dplane_route_process(struct thread *event);
199
200 /*
201 * Public APIs
202 */
203
204 /*
205 * Allocate a dataplane update context
206 */
207 static struct zebra_dplane_ctx *dplane_ctx_alloc(void)
208 {
209 struct zebra_dplane_ctx *p;
210
211 /* TODO -- just alloc'ing memory, but would like to maintain
212 * a pool
213 */
214 p = XCALLOC(MTYPE_DP_CTX, sizeof(struct zebra_dplane_ctx));
215
216 return p;
217 }
218
219 /*
220 * Free a dataplane results context.
221 */
222 static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
223 {
224 if (pctx) {
225 DPLANE_CTX_VALID(*pctx);
226
227 /* TODO -- just freeing memory, but would like to maintain
228 * a pool
229 */
230
231 /* Free embedded nexthops */
232 if ((*pctx)->zd_ng.nexthop) {
233 /* This deals with recursive nexthops too */
234 nexthops_free((*pctx)->zd_ng.nexthop);
235 }
236
237 if ((*pctx)->zd_old_ng.nexthop) {
238 /* This deals with recursive nexthops too */
239 nexthops_free((*pctx)->zd_old_ng.nexthop);
240 }
241
242 XFREE(MTYPE_DP_CTX, *pctx);
243 *pctx = NULL;
244 }
245 }
246
247 /*
248 * Return a context block to the dplane module after processing
249 */
250 void dplane_ctx_fini(struct zebra_dplane_ctx **pctx)
251 {
252 /* TODO -- enqueue for next provider; for now, just free */
253 dplane_ctx_free(pctx);
254 }
255
256 /* Enqueue a context block */
257 void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
258 const struct zebra_dplane_ctx *ctx)
259 {
260 TAILQ_INSERT_TAIL(q, (struct zebra_dplane_ctx *)ctx, zd_q_entries);
261 }
262
263 /* Dequeue a context block from the head of a list */
264 void dplane_ctx_dequeue(struct dplane_ctx_q *q, struct zebra_dplane_ctx **ctxp)
265 {
266 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
267
268 if (ctx)
269 TAILQ_REMOVE(q, ctx, zd_q_entries);
270
271 *ctxp = ctx;
272 }
273
274 /*
275 * Accessors for information from the context object
276 */
277 enum zebra_dplane_result dplane_ctx_get_status(
278 const struct zebra_dplane_ctx *ctx)
279 {
280 DPLANE_CTX_VALID(ctx);
281
282 return ctx->zd_status;
283 }
284
285 enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx)
286 {
287 DPLANE_CTX_VALID(ctx);
288
289 return ctx->zd_op;
290 }
291
292 const char *dplane_op2str(enum dplane_op_e op)
293 {
294 const char *ret = "UNKNOWN";
295
296 switch (op) {
297 case DPLANE_OP_NONE:
298 ret = "NONE";
299 break;
300
301 /* Route update */
302 case DPLANE_OP_ROUTE_INSTALL:
303 ret = "ROUTE_INSTALL";
304 break;
305 case DPLANE_OP_ROUTE_UPDATE:
306 ret = "ROUTE_UPDATE";
307 break;
308 case DPLANE_OP_ROUTE_DELETE:
309 ret = "ROUTE_DELETE";
310 break;
311
312 };
313
314 return ret;
315 }
316
317 const char *dplane_res2str(enum zebra_dplane_result res)
318 {
319 const char *ret = "<Unknown>";
320
321 switch (res) {
322 case ZEBRA_DPLANE_REQUEST_FAILURE:
323 ret = "FAILURE";
324 break;
325 case ZEBRA_DPLANE_REQUEST_QUEUED:
326 ret = "QUEUED";
327 break;
328 case ZEBRA_DPLANE_REQUEST_SUCCESS:
329 ret = "SUCCESS";
330 break;
331 };
332
333 return ret;
334 }
335
336 const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
337 {
338 DPLANE_CTX_VALID(ctx);
339
340 return &(ctx->zd_dest);
341 }
342
343 /* Source prefix is a little special - return NULL for "no src prefix" */
344 const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx)
345 {
346 DPLANE_CTX_VALID(ctx);
347
348 if (ctx->zd_src.prefixlen == 0 &&
349 IN6_IS_ADDR_UNSPECIFIED(&(ctx->zd_src.u.prefix6))) {
350 return NULL;
351 } else {
352 return &(ctx->zd_src);
353 }
354 }
355
356 bool dplane_ctx_is_update(const struct zebra_dplane_ctx *ctx)
357 {
358 DPLANE_CTX_VALID(ctx);
359
360 return ctx->zd_is_update;
361 }
362
363 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx *ctx)
364 {
365 DPLANE_CTX_VALID(ctx);
366
367 return ctx->zd_seq;
368 }
369
370 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx)
371 {
372 DPLANE_CTX_VALID(ctx);
373
374 return ctx->zd_old_seq;
375 }
376
377 vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx)
378 {
379 DPLANE_CTX_VALID(ctx);
380
381 return ctx->zd_vrf_id;
382 }
383
384 int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx)
385 {
386 DPLANE_CTX_VALID(ctx);
387
388 return ctx->zd_type;
389 }
390
391 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx)
392 {
393 DPLANE_CTX_VALID(ctx);
394
395 return ctx->zd_old_type;
396 }
397
398 afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
399 {
400 DPLANE_CTX_VALID(ctx);
401
402 return ctx->zd_afi;
403 }
404
405 safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
406 {
407 DPLANE_CTX_VALID(ctx);
408
409 return ctx->zd_safi;
410 }
411
412 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx)
413 {
414 DPLANE_CTX_VALID(ctx);
415
416 return ctx->zd_table_id;
417 }
418
419 route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx)
420 {
421 DPLANE_CTX_VALID(ctx);
422
423 return ctx->zd_tag;
424 }
425
426 route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx)
427 {
428 DPLANE_CTX_VALID(ctx);
429
430 return ctx->zd_old_tag;
431 }
432
433 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx)
434 {
435 DPLANE_CTX_VALID(ctx);
436
437 return ctx->zd_instance;
438 }
439
440 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
441 {
442 DPLANE_CTX_VALID(ctx);
443
444 return ctx->zd_instance;
445 }
446
447 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx)
448 {
449 DPLANE_CTX_VALID(ctx);
450
451 return ctx->zd_metric;
452 }
453
454 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx)
455 {
456 DPLANE_CTX_VALID(ctx);
457
458 return ctx->zd_old_metric;
459 }
460
461 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx)
462 {
463 DPLANE_CTX_VALID(ctx);
464
465 return ctx->zd_mtu;
466 }
467
468 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx)
469 {
470 DPLANE_CTX_VALID(ctx);
471
472 return ctx->zd_nexthop_mtu;
473 }
474
475 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx)
476 {
477 DPLANE_CTX_VALID(ctx);
478
479 return ctx->zd_distance;
480 }
481
482 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
483 {
484 DPLANE_CTX_VALID(ctx);
485
486 return ctx->zd_old_distance;
487 }
488
489 const struct nexthop_group *dplane_ctx_get_ng(
490 const struct zebra_dplane_ctx *ctx)
491 {
492 DPLANE_CTX_VALID(ctx);
493
494 return &(ctx->zd_ng);
495 }
496
497 const struct nexthop_group *dplane_ctx_get_old_ng(
498 const struct zebra_dplane_ctx *ctx)
499 {
500 DPLANE_CTX_VALID(ctx);
501
502 return &(ctx->zd_old_ng);
503 }
504
505 const struct zebra_dplane_info *dplane_ctx_get_ns(
506 const struct zebra_dplane_ctx *ctx)
507 {
508 DPLANE_CTX_VALID(ctx);
509
510 return &(ctx->zd_ns_info);
511 }
512
513 /*
514 * End of dplane context accessors
515 */
516
517 /*
518 * Retrieve the limit on the number of pending, unprocessed updates.
519 */
520 uint32_t dplane_get_in_queue_limit(void)
521 {
522 return atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
523 memory_order_relaxed);
524 }
525
526 /*
527 * Configure limit on the number of pending, queued updates.
528 */
529 void dplane_set_in_queue_limit(uint32_t limit, bool set)
530 {
531 /* Reset to default on 'unset' */
532 if (!set)
533 limit = DPLANE_DEFAULT_MAX_QUEUED;
534
535 atomic_store_explicit(&zdplane_info.dg_max_queued_updates, limit,
536 memory_order_relaxed);
537 }
538
539 /*
540 * Retrieve the current queue depth of incoming, unprocessed updates
541 */
542 uint32_t dplane_get_in_queue_len(void)
543 {
544 return atomic_load_explicit(&zdplane_info.dg_routes_queued,
545 memory_order_seq_cst);
546 }
547
548 /*
549 * Initialize a context block for a route update from zebra data structs.
550 */
551 static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx,
552 enum dplane_op_e op,
553 struct route_node *rn,
554 struct route_entry *re)
555 {
556 int ret = EINVAL;
557 const struct route_table *table = NULL;
558 const rib_table_info_t *info;
559 const struct prefix *p, *src_p;
560 struct zebra_ns *zns;
561 struct zebra_vrf *zvrf;
562 struct nexthop *nexthop;
563
564 if (!ctx || !rn || !re)
565 goto done;
566
567 ctx->zd_op = op;
568
569 ctx->zd_type = re->type;
570 ctx->zd_old_type = re->type;
571
572 /* Prefixes: dest, and optional source */
573 srcdest_rnode_prefixes(rn, &p, &src_p);
574
575 prefix_copy(&(ctx->zd_dest), p);
576
577 if (src_p)
578 prefix_copy(&(ctx->zd_src), src_p);
579 else
580 memset(&(ctx->zd_src), 0, sizeof(ctx->zd_src));
581
582 ctx->zd_table_id = re->table;
583
584 ctx->zd_metric = re->metric;
585 ctx->zd_old_metric = re->metric;
586 ctx->zd_vrf_id = re->vrf_id;
587 ctx->zd_mtu = re->mtu;
588 ctx->zd_nexthop_mtu = re->nexthop_mtu;
589 ctx->zd_instance = re->instance;
590 ctx->zd_tag = re->tag;
591 ctx->zd_old_tag = re->tag;
592 ctx->zd_distance = re->distance;
593
594 table = srcdest_rnode_table(rn);
595 info = table->info;
596
597 ctx->zd_afi = info->afi;
598 ctx->zd_safi = info->safi;
599
600 /* Extract ns info - can't use pointers to 'core' structs */
601 zvrf = vrf_info_lookup(re->vrf_id);
602 zns = zvrf->zns;
603
604 zebra_dplane_info_from_zns(&(ctx->zd_ns_info), zns, true /*is_cmd*/);
605
606 #if defined(HAVE_NETLINK)
607 /* Increment message counter after copying to context struct - may need
608 * two messages in some 'update' cases.
609 */
610 if (op == DPLANE_OP_ROUTE_UPDATE)
611 zns->netlink_cmd.seq += 2;
612 else
613 zns->netlink_cmd.seq++;
614 #endif /* NETLINK*/
615
616 /* Copy nexthops; recursive info is included too */
617 copy_nexthops(&(ctx->zd_ng.nexthop), re->ng.nexthop, NULL);
618
619 /* TODO -- maybe use array of nexthops to avoid allocs? */
620
621 /* Ensure that the dplane's nexthop flag is clear. */
622 for (ALL_NEXTHOPS(ctx->zd_ng, nexthop))
623 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
624
625 /* Trying out the sequence number idea, so we can try to detect
626 * when a result is stale.
627 */
628 re->dplane_sequence++;
629 ctx->zd_seq = re->dplane_sequence;
630
631 ret = AOK;
632
633 done:
634 return ret;
635 }
636
637 /*
638 * Enqueue a new route update,
639 * and ensure an event is active for the dataplane thread.
640 */
641 static int dplane_route_enqueue(struct zebra_dplane_ctx *ctx)
642 {
643 int ret = EINVAL;
644 uint32_t high, curr;
645
646 /* Enqueue for processing by the dataplane thread */
647 DPLANE_LOCK();
648 {
649 TAILQ_INSERT_TAIL(&zdplane_info.dg_route_ctx_q, ctx,
650 zd_q_entries);
651 }
652 DPLANE_UNLOCK();
653
654 curr = atomic_add_fetch_explicit(&zdplane_info.dg_routes_queued,
655 1, memory_order_seq_cst);
656
657 /* Maybe update high-water counter also */
658 high = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
659 memory_order_seq_cst);
660 while (high < curr) {
661 if (atomic_compare_exchange_weak_explicit(
662 &zdplane_info.dg_routes_queued_max,
663 &high, curr,
664 memory_order_seq_cst,
665 memory_order_seq_cst))
666 break;
667 }
668
669 /* Ensure that an event for the dataplane thread is active */
670 thread_add_event(zdplane_info.dg_master, dplane_route_process, NULL, 0,
671 &zdplane_info.dg_t_update);
672
673 ret = AOK;
674
675 return ret;
676 }
677
678 /*
679 * Attempt to dequeue a route-update block
680 */
681 static struct zebra_dplane_ctx *dplane_route_dequeue(void)
682 {
683 struct zebra_dplane_ctx *ctx = NULL;
684
685 DPLANE_LOCK();
686 {
687 ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
688 if (ctx) {
689 TAILQ_REMOVE(&zdplane_info.dg_route_ctx_q,
690 ctx, zd_q_entries);
691 }
692 }
693 DPLANE_UNLOCK();
694
695 return ctx;
696 }
697
698 /*
699 * Utility that prepares a route update and enqueues it for processing
700 */
701 static enum zebra_dplane_result
702 dplane_route_update_internal(struct route_node *rn,
703 struct route_entry *re,
704 struct route_entry *old_re,
705 enum dplane_op_e op)
706 {
707 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
708 int ret = EINVAL;
709 struct zebra_dplane_ctx *ctx = NULL;
710
711 /* Obtain context block */
712 ctx = dplane_ctx_alloc();
713 if (ctx == NULL) {
714 ret = ENOMEM;
715 goto done;
716 }
717
718 /* Init context with info from zebra data structs */
719 ret = dplane_ctx_route_init(ctx, op, rn, re);
720 if (ret == AOK) {
721 /* Capture some extra info for update case
722 * where there's a different 'old' route.
723 */
724 if ((op == DPLANE_OP_ROUTE_UPDATE) &&
725 old_re && (old_re != re)) {
726 ctx->zd_is_update = true;
727
728 old_re->dplane_sequence++;
729 ctx->zd_old_seq = old_re->dplane_sequence;
730
731 ctx->zd_old_tag = old_re->tag;
732 ctx->zd_old_type = old_re->type;
733 ctx->zd_old_instance = old_re->instance;
734 ctx->zd_old_distance = old_re->distance;
735 ctx->zd_old_metric = old_re->metric;
736
737 #ifndef HAVE_NETLINK
738 /* For bsd, capture previous re's nexthops too, sigh.
739 * We'll need these to do per-nexthop deletes.
740 */
741 copy_nexthops(&(ctx->zd_old_ng.nexthop),
742 old_re->ng.nexthop, NULL);
743 #endif /* !HAVE_NETLINK */
744 }
745
746 /* Enqueue context for processing */
747 ret = dplane_route_enqueue(ctx);
748 }
749
750 done:
751 /* Update counter */
752 atomic_fetch_add_explicit(&zdplane_info.dg_routes_in, 1,
753 memory_order_relaxed);
754
755 if (ret == AOK)
756 result = ZEBRA_DPLANE_REQUEST_QUEUED;
757 else if (ctx) {
758 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors, 1,
759 memory_order_relaxed);
760 dplane_ctx_free(&ctx);
761 }
762
763 return result;
764 }
765
766 /*
767 * Enqueue a route 'add' for the dataplane.
768 */
769 enum zebra_dplane_result dplane_route_add(struct route_node *rn,
770 struct route_entry *re)
771 {
772 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
773
774 if (rn == NULL || re == NULL)
775 goto done;
776
777 ret = dplane_route_update_internal(rn, re, NULL,
778 DPLANE_OP_ROUTE_INSTALL);
779
780 done:
781 return ret;
782 }
783
784 /*
785 * Enqueue a route update for the dataplane.
786 */
787 enum zebra_dplane_result dplane_route_update(struct route_node *rn,
788 struct route_entry *re,
789 struct route_entry *old_re)
790 {
791 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
792
793 if (rn == NULL || re == NULL)
794 goto done;
795
796 ret = dplane_route_update_internal(rn, re, old_re,
797 DPLANE_OP_ROUTE_UPDATE);
798 done:
799 return ret;
800 }
801
802 /*
803 * Enqueue a route removal for the dataplane.
804 */
805 enum zebra_dplane_result dplane_route_delete(struct route_node *rn,
806 struct route_entry *re)
807 {
808 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
809
810 if (rn == NULL || re == NULL)
811 goto done;
812
813 ret = dplane_route_update_internal(rn, re, NULL,
814 DPLANE_OP_ROUTE_DELETE);
815
816 done:
817 return ret;
818 }
819
820 /*
821 * Event handler function for routing updates
822 */
823 static int dplane_route_process(struct thread *event)
824 {
825 enum zebra_dplane_result res;
826 struct zebra_dplane_ctx *ctx;
827
828 while (1) {
829 /* Check for shutdown */
830 if (!zdplane_info.dg_run)
831 break;
832
833 /* TODO -- limit number of updates per cycle? */
834 ctx = dplane_route_dequeue();
835 if (ctx == NULL)
836 break;
837
838 /* Update counter */
839 atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, 1,
840 memory_order_relaxed);
841
842 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
843 char dest_str[PREFIX_STRLEN];
844
845 prefix2str(dplane_ctx_get_dest(ctx),
846 dest_str, sizeof(dest_str));
847
848 zlog_debug("%u:%s Dplane route update ctx %p op %s",
849 dplane_ctx_get_vrf(ctx), dest_str,
850 ctx, dplane_op2str(dplane_ctx_get_op(ctx)));
851 }
852
853 /* TODO -- support series of providers */
854
855 /* Initially, just doing kernel-facing update here */
856 res = kernel_route_update(ctx);
857
858 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
859 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors,
860 1, memory_order_relaxed);
861
862 ctx->zd_status = res;
863
864 /* Enqueue result to zebra main context */
865 zdplane_info.dg_results_cb(ctx);
866
867 ctx = NULL;
868 }
869
870 return 0;
871 }
872
873 /*
874 * Handler for 'show dplane'
875 */
876 int dplane_show_helper(struct vty *vty, bool detailed)
877 {
878 uint64_t queued, limit, queue_max, errs, incoming;
879
880 /* Using atomics because counters are being changed in different
881 * contexts.
882 */
883 incoming = atomic_load_explicit(&zdplane_info.dg_routes_in,
884 memory_order_relaxed);
885 limit = atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
886 memory_order_relaxed);
887 queued = atomic_load_explicit(&zdplane_info.dg_routes_queued,
888 memory_order_relaxed);
889 queue_max = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
890 memory_order_relaxed);
891 errs = atomic_load_explicit(&zdplane_info.dg_route_errors,
892 memory_order_relaxed);
893
894 vty_out(vty, "Route updates: %"PRIu64"\n", incoming);
895 vty_out(vty, "Route update errors: %"PRIu64"\n", errs);
896 vty_out(vty, "Route update queue limit: %"PRIu64"\n", limit);
897 vty_out(vty, "Route update queue depth: %"PRIu64"\n", queued);
898 vty_out(vty, "Route update queue max: %"PRIu64"\n", queue_max);
899
900 return CMD_SUCCESS;
901 }
902
903 /*
904 * Handler for 'show dplane providers'
905 */
906 int dplane_show_provs_helper(struct vty *vty, bool detailed)
907 {
908 vty_out(vty, "Zebra dataplane providers:%s\n",
909 (detailed ? " (detailed)" : ""));
910
911 return CMD_SUCCESS;
912 }
913
914 /*
915 * Provider registration
916 */
917 int dplane_provider_register(const char *name,
918 enum dplane_provider_prio_e prio,
919 dplane_provider_process_fp fp,
920 dplane_provider_fini_fp fini_fp)
921 {
922 int ret = 0;
923 struct zebra_dplane_provider *p, *last;
924
925 /* Validate */
926 if (fp == NULL) {
927 ret = EINVAL;
928 goto done;
929 }
930
931 if (prio <= DPLANE_PRIO_NONE ||
932 prio > DPLANE_PRIO_LAST) {
933 ret = EINVAL;
934 goto done;
935 }
936
937 /* Allocate and init new provider struct */
938 p = XCALLOC(MTYPE_DP_PROV, sizeof(struct zebra_dplane_provider));
939 if (p == NULL) {
940 ret = ENOMEM;
941 goto done;
942 }
943
944 strncpy(p->dp_name, name, DPLANE_PROVIDER_NAMELEN);
945 p->dp_name[DPLANE_PROVIDER_NAMELEN] = '\0'; /* Belt-and-suspenders */
946
947 p->dp_priority = prio;
948 p->dp_fp = fp;
949 p->dp_fini = fini_fp;
950
951 /* Lock the lock - the dplane pthread may be running */
952 DPLANE_LOCK();
953
954 p->dp_id = ++zdplane_info.dg_provider_id;
955
956 /* Insert into list ordered by priority */
957 TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_q_providers) {
958 if (last->dp_priority > p->dp_priority)
959 break;
960 }
961
962 if (last)
963 TAILQ_INSERT_BEFORE(last, p, dp_q_providers);
964 else
965 TAILQ_INSERT_TAIL(&zdplane_info.dg_providers_q, p,
966 dp_q_providers);
967
968 /* And unlock */
969 DPLANE_UNLOCK();
970
971 done:
972 return ret;
973 }
974
975 /*
976 * Zebra registers a results callback with the dataplane system
977 */
978 int dplane_results_register(dplane_results_fp fp)
979 {
980 zdplane_info.dg_results_cb = fp;
981 return AOK;
982 }
983
984 /*
985 * Initialize the dataplane module during startup, internal/private version
986 */
987 static void zebra_dplane_init_internal(struct zebra_t *zebra)
988 {
989 memset(&zdplane_info, 0, sizeof(zdplane_info));
990
991 pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
992
993 TAILQ_INIT(&zdplane_info.dg_route_ctx_q);
994 TAILQ_INIT(&zdplane_info.dg_providers_q);
995
996 zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;
997
998 /* TODO -- register default kernel 'provider' during init */
999
1000 zdplane_info.dg_run = true;
1001
1002 /* TODO -- start dataplane pthread. We're using the zebra
1003 * core/main thread temporarily
1004 */
1005 zdplane_info.dg_master = zebra->master;
1006 }
1007
1008 /* Indicates zebra shutdown/exit is in progress. Some operations may be
1009 * simplified or skipped during shutdown processing.
1010 */
1011 bool dplane_is_in_shutdown(void)
1012 {
1013 return zdplane_info.dg_is_shutdown;
1014 }
1015
1016 /*
1017 * Early or pre-shutdown, de-init notification api. This runs pretty
1018 * early during zebra shutdown, as a signal to stop new work and prepare
1019 * for updates generated by shutdown/cleanup activity, as zebra tries to
1020 * remove everything it's responsible for.
1021 * NB: This runs in the main zebra thread context.
1022 */
1023 void zebra_dplane_pre_finish(void)
1024 {
1025 if (IS_ZEBRA_DEBUG_DPLANE)
1026 zlog_debug("Zebra dataplane pre-fini called");
1027
1028 zdplane_info.dg_is_shutdown = true;
1029
1030 /* Notify provider(s) of pending shutdown */
1031 }
1032
1033 /*
1034 * Utility to determine whether work remains enqueued within the dplane;
1035 * used during system shutdown processing.
1036 */
1037 static bool dplane_work_pending(void)
1038 {
1039 struct zebra_dplane_ctx *ctx;
1040
1041 /* TODO -- just checking incoming/pending work for now */
1042 DPLANE_LOCK();
1043 {
1044 ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
1045 }
1046 DPLANE_UNLOCK();
1047
1048 return (ctx != NULL);
1049 }
1050
1051 /*
1052 * Shutdown-time intermediate callback, used to determine when all pending
1053 * in-flight updates are done. If there's still work to do, reschedules itself.
1054 * If all work is done, schedules an event to the main zebra thread for
1055 * final zebra shutdown.
1056 * This runs in the dplane pthread context.
1057 */
1058 static int dplane_check_shutdown_status(struct thread *event)
1059 {
1060 if (IS_ZEBRA_DEBUG_DPLANE)
1061 zlog_debug("Zebra dataplane shutdown status check called");
1062
1063 if (dplane_work_pending()) {
1064 /* Reschedule dplane check on a short timer */
1065 thread_add_timer_msec(zdplane_info.dg_master,
1066 dplane_check_shutdown_status,
1067 NULL, 100,
1068 &zdplane_info.dg_t_shutdown_check);
1069
1070 /* TODO - give up and stop waiting after a short time? */
1071
1072 } else {
1073 /* We appear to be done - schedule a final callback event
1074 * for the zebra main pthread.
1075 */
1076 thread_add_event(zebrad.master, zebra_finalize, NULL, 0, NULL);
1077 }
1078
1079 return 0;
1080 }
1081
1082 /*
1083 * Shutdown, de-init api. This runs pretty late during shutdown,
1084 * after zebra has tried to free/remove/uninstall all routes during shutdown.
1085 * At this point, dplane work may still remain to be done, so we can't just
1086 * blindly terminate. If there's still work to do, we'll periodically check
1087 * and when done, we'll enqueue a task to the zebra main thread for final
1088 * termination processing.
1089 *
1090 * NB: This runs in the main zebra thread context.
1091 */
1092 void zebra_dplane_finish(void)
1093 {
1094 if (IS_ZEBRA_DEBUG_DPLANE)
1095 zlog_debug("Zebra dataplane fini called");
1096
1097 thread_add_event(zdplane_info.dg_master,
1098 dplane_check_shutdown_status, NULL, 0,
1099 &zdplane_info.dg_t_shutdown_check);
1100 }
1101
1102 /*
1103 * Final phase of shutdown, after all work enqueued to dplane has been
1104 * processed. This is called from the zebra main pthread context.
1105 */
1106 void zebra_dplane_shutdown(void)
1107 {
1108 if (IS_ZEBRA_DEBUG_DPLANE)
1109 zlog_debug("Zebra dataplane shutdown called");
1110
1111 /* Stop dplane thread, if it's running */
1112
1113 zdplane_info.dg_run = false;
1114
1115 THREAD_OFF(zdplane_info.dg_t_update);
1116
1117 /* TODO */
1118 /* frr_pthread_stop(...) */
1119
1120 /* Notify provider(s) of final shutdown */
1121
1122 /* Clean-up provider objects */
1123
1124 /* Clean queue(s) */
1125 }
1126
1127 /*
1128 * Initialize the dataplane module at startup; called by zebra rib_init()
1129 */
1130 void zebra_dplane_init(void)
1131 {
1132 zebra_dplane_init_internal(&zebrad);
1133 }