]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_dplane.c
zebra: fix get_old_instance api
[mirror_frr.git] / zebra / zebra_dplane.c
1 /*
2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #include "lib/libfrr.h"
21 #include "lib/debug.h"
22 #include "lib/frratomic.h"
23 #include "lib/frr_pthread.h"
24 #include "lib/memory.h"
25 #include "lib/queue.h"
26 #include "lib/zebra.h"
27 #include "zebra/zebra_memory.h"
28 #include "zebra/zserv.h"
29 #include "zebra/zebra_dplane.h"
30 #include "zebra/rt.h"
31 #include "zebra/debug.h"
32
33 /* Memory type for context blocks */
34 DEFINE_MTYPE(ZEBRA, DP_CTX, "Zebra DPlane Ctx")
35 DEFINE_MTYPE(ZEBRA, DP_PROV, "Zebra DPlane Provider")
36
37 #ifndef AOK
38 # define AOK 0
39 #endif
40
41 /* Default value for max queued incoming updates */
42 const uint32_t DPLANE_DEFAULT_MAX_QUEUED = 200;
43
44
45 /* Validation check macro for context blocks */
46 /* #define DPLANE_DEBUG 1 */
47
48 #ifdef DPLANE_DEBUG
49
50 # define DPLANE_CTX_VALID(p) \
51 assert((p) != NULL)
52
53 #else
54
55 # define DPLANE_CTX_VALID(p)
56
57 #endif /* DPLANE_DEBUG */
58
59 /*
60 * The context block used to exchange info about route updates across
61 * the boundary between the zebra main context (and pthread) and the
62 * dataplane layer (and pthread).
63 */
64 struct zebra_dplane_ctx {
65
66 /* Operation code */
67 enum dplane_op_e zd_op;
68
69 /* Status on return */
70 enum zebra_dplane_result zd_status;
71
72 /* TODO -- internal/sub-operation status? */
73 enum zebra_dplane_result zd_remote_status;
74 enum zebra_dplane_result zd_kernel_status;
75
76 /* Dest and (optional) source prefixes */
77 struct prefix zd_dest;
78 struct prefix zd_src;
79
80 bool zd_is_update;
81
82 uint32_t zd_seq;
83 uint32_t zd_old_seq;
84 vrf_id_t zd_vrf_id;
85 uint32_t zd_table_id;
86
87 int zd_type;
88 int zd_old_type;
89
90 afi_t zd_afi;
91 safi_t zd_safi;
92
93 route_tag_t zd_tag;
94 route_tag_t zd_old_tag;
95 uint32_t zd_metric;
96 uint32_t zd_old_metric;
97 uint16_t zd_instance;
98 uint16_t zd_old_instance;
99
100 uint8_t zd_distance;
101 uint8_t zd_old_distance;
102
103 uint32_t zd_mtu;
104 uint32_t zd_nexthop_mtu;
105
106 /* Namespace info */
107 struct zebra_dplane_info zd_ns_info;
108
109 /* Nexthops */
110 struct nexthop_group zd_ng;
111
112 /* "Previous" nexthops, used only in route updates without netlink */
113 struct nexthop_group zd_old_ng;
114
115 /* TODO -- use fixed array of nexthops, to avoid mallocs? */
116
117 /* Embedded list linkage */
118 TAILQ_ENTRY(zebra_dplane_ctx) zd_q_entries;
119 };
120
121 /*
122 * Registration block for one dataplane provider.
123 */
124 struct zebra_dplane_provider {
125 /* Name */
126 char dp_name[DPLANE_PROVIDER_NAMELEN + 1];
127
128 /* Priority, for ordering among providers */
129 uint8_t dp_priority;
130
131 /* Id value */
132 uint32_t dp_id;
133
134 dplane_provider_process_fp dp_fp;
135
136 dplane_provider_fini_fp dp_fini;
137
138 _Atomic uint32_t dp_in_counter;
139 _Atomic uint32_t dp_error_counter;
140
141 /* Embedded list linkage */
142 TAILQ_ENTRY(zebra_dplane_provider) dp_q_providers;
143
144 };
145
146 /*
147 * Globals
148 */
149 static struct zebra_dplane_globals {
150 /* Mutex to control access to dataplane components */
151 pthread_mutex_t dg_mutex;
152
153 /* Results callback registered by zebra 'core' */
154 dplane_results_fp dg_results_cb;
155
156 /* Sentinel for beginning of shutdown */
157 volatile bool dg_is_shutdown;
158
159 /* Sentinel for end of shutdown */
160 volatile bool dg_run;
161
162 /* Route-update context queue inbound to the dataplane */
163 TAILQ_HEAD(zdg_ctx_q, zebra_dplane_ctx) dg_route_ctx_q;
164
165 /* Ordered list of providers */
166 TAILQ_HEAD(zdg_prov_q, zebra_dplane_provider) dg_providers_q;
167
168 /* Counter used to assign internal ids to providers */
169 uint32_t dg_provider_id;
170
171 /* Limit number of pending, unprocessed updates */
172 _Atomic uint32_t dg_max_queued_updates;
173
174 _Atomic uint32_t dg_routes_in;
175 _Atomic uint32_t dg_routes_queued;
176 _Atomic uint32_t dg_routes_queued_max;
177 _Atomic uint32_t dg_route_errors;
178
179 /* Dataplane pthread */
180 struct frr_pthread *dg_pthread;
181
182 /* Event-delivery context 'master' for the dplane */
183 struct thread_master *dg_master;
184
185 /* Event/'thread' pointer for queued updates */
186 struct thread *dg_t_update;
187
188 /* Event pointer for pending shutdown check loop */
189 struct thread *dg_t_shutdown_check;
190
191 } zdplane_info;
192
193 /*
194 * Lock and unlock for interactions with the zebra 'core'
195 */
196 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
197
198 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
199
200 /* Prototypes */
201 static int dplane_route_process(struct thread *event);
202
203 /*
204 * Public APIs
205 */
206
207 /*
208 * Allocate a dataplane update context
209 */
210 static struct zebra_dplane_ctx *dplane_ctx_alloc(void)
211 {
212 struct zebra_dplane_ctx *p;
213
214 /* TODO -- just alloc'ing memory, but would like to maintain
215 * a pool
216 */
217 p = XCALLOC(MTYPE_DP_CTX, sizeof(struct zebra_dplane_ctx));
218
219 return p;
220 }
221
222 /*
223 * Free a dataplane results context.
224 */
225 static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
226 {
227 if (pctx) {
228 DPLANE_CTX_VALID(*pctx);
229
230 /* TODO -- just freeing memory, but would like to maintain
231 * a pool
232 */
233
234 /* Free embedded nexthops */
235 if ((*pctx)->zd_ng.nexthop) {
236 /* This deals with recursive nexthops too */
237 nexthops_free((*pctx)->zd_ng.nexthop);
238 }
239
240 if ((*pctx)->zd_old_ng.nexthop) {
241 /* This deals with recursive nexthops too */
242 nexthops_free((*pctx)->zd_old_ng.nexthop);
243 }
244
245 XFREE(MTYPE_DP_CTX, *pctx);
246 *pctx = NULL;
247 }
248 }
249
250 /*
251 * Return a context block to the dplane module after processing
252 */
253 void dplane_ctx_fini(struct zebra_dplane_ctx **pctx)
254 {
255 /* TODO -- enqueue for next provider; for now, just free */
256 dplane_ctx_free(pctx);
257 }
258
259 /* Enqueue a context block */
260 void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
261 const struct zebra_dplane_ctx *ctx)
262 {
263 TAILQ_INSERT_TAIL(q, (struct zebra_dplane_ctx *)ctx, zd_q_entries);
264 }
265
266 /* Dequeue a context block from the head of a list */
267 void dplane_ctx_dequeue(struct dplane_ctx_q *q, struct zebra_dplane_ctx **ctxp)
268 {
269 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
270
271 if (ctx)
272 TAILQ_REMOVE(q, ctx, zd_q_entries);
273
274 *ctxp = ctx;
275 }
276
277 /*
278 * Accessors for information from the context object
279 */
280 enum zebra_dplane_result dplane_ctx_get_status(
281 const struct zebra_dplane_ctx *ctx)
282 {
283 DPLANE_CTX_VALID(ctx);
284
285 return ctx->zd_status;
286 }
287
288 enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx)
289 {
290 DPLANE_CTX_VALID(ctx);
291
292 return ctx->zd_op;
293 }
294
295 const char *dplane_op2str(enum dplane_op_e op)
296 {
297 const char *ret = "UNKNOWN";
298
299 switch (op) {
300 case DPLANE_OP_NONE:
301 ret = "NONE";
302 break;
303
304 /* Route update */
305 case DPLANE_OP_ROUTE_INSTALL:
306 ret = "ROUTE_INSTALL";
307 break;
308 case DPLANE_OP_ROUTE_UPDATE:
309 ret = "ROUTE_UPDATE";
310 break;
311 case DPLANE_OP_ROUTE_DELETE:
312 ret = "ROUTE_DELETE";
313 break;
314
315 };
316
317 return ret;
318 }
319
320 const char *dplane_res2str(enum zebra_dplane_result res)
321 {
322 const char *ret = "<Unknown>";
323
324 switch (res) {
325 case ZEBRA_DPLANE_REQUEST_FAILURE:
326 ret = "FAILURE";
327 break;
328 case ZEBRA_DPLANE_REQUEST_QUEUED:
329 ret = "QUEUED";
330 break;
331 case ZEBRA_DPLANE_REQUEST_SUCCESS:
332 ret = "SUCCESS";
333 break;
334 };
335
336 return ret;
337 }
338
339 const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
340 {
341 DPLANE_CTX_VALID(ctx);
342
343 return &(ctx->zd_dest);
344 }
345
346 /* Source prefix is a little special - return NULL for "no src prefix" */
347 const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx)
348 {
349 DPLANE_CTX_VALID(ctx);
350
351 if (ctx->zd_src.prefixlen == 0 &&
352 IN6_IS_ADDR_UNSPECIFIED(&(ctx->zd_src.u.prefix6))) {
353 return NULL;
354 } else {
355 return &(ctx->zd_src);
356 }
357 }
358
359 bool dplane_ctx_is_update(const struct zebra_dplane_ctx *ctx)
360 {
361 DPLANE_CTX_VALID(ctx);
362
363 return ctx->zd_is_update;
364 }
365
366 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx *ctx)
367 {
368 DPLANE_CTX_VALID(ctx);
369
370 return ctx->zd_seq;
371 }
372
373 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx)
374 {
375 DPLANE_CTX_VALID(ctx);
376
377 return ctx->zd_old_seq;
378 }
379
380 vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx)
381 {
382 DPLANE_CTX_VALID(ctx);
383
384 return ctx->zd_vrf_id;
385 }
386
387 int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx)
388 {
389 DPLANE_CTX_VALID(ctx);
390
391 return ctx->zd_type;
392 }
393
394 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx)
395 {
396 DPLANE_CTX_VALID(ctx);
397
398 return ctx->zd_old_type;
399 }
400
401 afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
402 {
403 DPLANE_CTX_VALID(ctx);
404
405 return ctx->zd_afi;
406 }
407
408 safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
409 {
410 DPLANE_CTX_VALID(ctx);
411
412 return ctx->zd_safi;
413 }
414
415 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx)
416 {
417 DPLANE_CTX_VALID(ctx);
418
419 return ctx->zd_table_id;
420 }
421
422 route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx)
423 {
424 DPLANE_CTX_VALID(ctx);
425
426 return ctx->zd_tag;
427 }
428
429 route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx)
430 {
431 DPLANE_CTX_VALID(ctx);
432
433 return ctx->zd_old_tag;
434 }
435
436 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx)
437 {
438 DPLANE_CTX_VALID(ctx);
439
440 return ctx->zd_instance;
441 }
442
443 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
444 {
445 DPLANE_CTX_VALID(ctx);
446
447 return ctx->zd_old_instance;
448 }
449
450 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx)
451 {
452 DPLANE_CTX_VALID(ctx);
453
454 return ctx->zd_metric;
455 }
456
457 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx)
458 {
459 DPLANE_CTX_VALID(ctx);
460
461 return ctx->zd_old_metric;
462 }
463
464 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx)
465 {
466 DPLANE_CTX_VALID(ctx);
467
468 return ctx->zd_mtu;
469 }
470
471 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx)
472 {
473 DPLANE_CTX_VALID(ctx);
474
475 return ctx->zd_nexthop_mtu;
476 }
477
478 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx)
479 {
480 DPLANE_CTX_VALID(ctx);
481
482 return ctx->zd_distance;
483 }
484
485 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
486 {
487 DPLANE_CTX_VALID(ctx);
488
489 return ctx->zd_old_distance;
490 }
491
492 const struct nexthop_group *dplane_ctx_get_ng(
493 const struct zebra_dplane_ctx *ctx)
494 {
495 DPLANE_CTX_VALID(ctx);
496
497 return &(ctx->zd_ng);
498 }
499
500 const struct nexthop_group *dplane_ctx_get_old_ng(
501 const struct zebra_dplane_ctx *ctx)
502 {
503 DPLANE_CTX_VALID(ctx);
504
505 return &(ctx->zd_old_ng);
506 }
507
508 const struct zebra_dplane_info *dplane_ctx_get_ns(
509 const struct zebra_dplane_ctx *ctx)
510 {
511 DPLANE_CTX_VALID(ctx);
512
513 return &(ctx->zd_ns_info);
514 }
515
516 /*
517 * End of dplane context accessors
518 */
519
520 /*
521 * Retrieve the limit on the number of pending, unprocessed updates.
522 */
523 uint32_t dplane_get_in_queue_limit(void)
524 {
525 return atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
526 memory_order_relaxed);
527 }
528
529 /*
530 * Configure limit on the number of pending, queued updates.
531 */
532 void dplane_set_in_queue_limit(uint32_t limit, bool set)
533 {
534 /* Reset to default on 'unset' */
535 if (!set)
536 limit = DPLANE_DEFAULT_MAX_QUEUED;
537
538 atomic_store_explicit(&zdplane_info.dg_max_queued_updates, limit,
539 memory_order_relaxed);
540 }
541
542 /*
543 * Retrieve the current queue depth of incoming, unprocessed updates
544 */
545 uint32_t dplane_get_in_queue_len(void)
546 {
547 return atomic_load_explicit(&zdplane_info.dg_routes_queued,
548 memory_order_seq_cst);
549 }
550
551 /*
552 * Initialize a context block for a route update from zebra data structs.
553 */
554 static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx,
555 enum dplane_op_e op,
556 struct route_node *rn,
557 struct route_entry *re)
558 {
559 int ret = EINVAL;
560 const struct route_table *table = NULL;
561 const rib_table_info_t *info;
562 const struct prefix *p, *src_p;
563 struct zebra_ns *zns;
564 struct zebra_vrf *zvrf;
565 struct nexthop *nexthop;
566
567 if (!ctx || !rn || !re)
568 goto done;
569
570 ctx->zd_op = op;
571
572 ctx->zd_type = re->type;
573 ctx->zd_old_type = re->type;
574
575 /* Prefixes: dest, and optional source */
576 srcdest_rnode_prefixes(rn, &p, &src_p);
577
578 prefix_copy(&(ctx->zd_dest), p);
579
580 if (src_p)
581 prefix_copy(&(ctx->zd_src), src_p);
582 else
583 memset(&(ctx->zd_src), 0, sizeof(ctx->zd_src));
584
585 ctx->zd_table_id = re->table;
586
587 ctx->zd_metric = re->metric;
588 ctx->zd_old_metric = re->metric;
589 ctx->zd_vrf_id = re->vrf_id;
590 ctx->zd_mtu = re->mtu;
591 ctx->zd_nexthop_mtu = re->nexthop_mtu;
592 ctx->zd_instance = re->instance;
593 ctx->zd_tag = re->tag;
594 ctx->zd_old_tag = re->tag;
595 ctx->zd_distance = re->distance;
596
597 table = srcdest_rnode_table(rn);
598 info = table->info;
599
600 ctx->zd_afi = info->afi;
601 ctx->zd_safi = info->safi;
602
603 /* Extract ns info - can't use pointers to 'core' structs */
604 zvrf = vrf_info_lookup(re->vrf_id);
605 zns = zvrf->zns;
606
607 zebra_dplane_info_from_zns(&(ctx->zd_ns_info), zns, true /*is_cmd*/);
608
609 #if defined(HAVE_NETLINK)
610 /* Increment message counter after copying to context struct - may need
611 * two messages in some 'update' cases.
612 */
613 if (op == DPLANE_OP_ROUTE_UPDATE)
614 zns->netlink_cmd.seq += 2;
615 else
616 zns->netlink_cmd.seq++;
617 #endif /* NETLINK*/
618
619 /* Copy nexthops; recursive info is included too */
620 copy_nexthops(&(ctx->zd_ng.nexthop), re->ng.nexthop, NULL);
621
622 /* TODO -- maybe use array of nexthops to avoid allocs? */
623
624 /* Ensure that the dplane's nexthop flag is clear. */
625 for (ALL_NEXTHOPS(ctx->zd_ng, nexthop))
626 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
627
628 /* Trying out the sequence number idea, so we can try to detect
629 * when a result is stale.
630 */
631 re->dplane_sequence++;
632 ctx->zd_seq = re->dplane_sequence;
633
634 ret = AOK;
635
636 done:
637 return ret;
638 }
639
640 /*
641 * Enqueue a new route update,
642 * and ensure an event is active for the dataplane thread.
643 */
644 static int dplane_route_enqueue(struct zebra_dplane_ctx *ctx)
645 {
646 int ret = EINVAL;
647 uint32_t high, curr;
648
649 /* Enqueue for processing by the dataplane thread */
650 DPLANE_LOCK();
651 {
652 TAILQ_INSERT_TAIL(&zdplane_info.dg_route_ctx_q, ctx,
653 zd_q_entries);
654 }
655 DPLANE_UNLOCK();
656
657 curr = atomic_add_fetch_explicit(
658 #ifdef __clang__
659 /* TODO -- issue with the clang atomic/intrinsics currently;
660 * casting away the 'Atomic'-ness of the variable works.
661 */
662 (uint32_t *)&(zdplane_info.dg_routes_queued),
663 #else
664 &(zdplane_info.dg_routes_queued),
665 #endif
666 1, memory_order_seq_cst);
667
668 /* Maybe update high-water counter also */
669 high = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
670 memory_order_seq_cst);
671 while (high < curr) {
672 if (atomic_compare_exchange_weak_explicit(
673 &zdplane_info.dg_routes_queued_max,
674 &high, curr,
675 memory_order_seq_cst,
676 memory_order_seq_cst))
677 break;
678 }
679
680 /* Ensure that an event for the dataplane thread is active */
681 thread_add_event(zdplane_info.dg_master, dplane_route_process, NULL, 0,
682 &zdplane_info.dg_t_update);
683
684 ret = AOK;
685
686 return ret;
687 }
688
689 /*
690 * Attempt to dequeue a route-update block
691 */
692 static struct zebra_dplane_ctx *dplane_route_dequeue(void)
693 {
694 struct zebra_dplane_ctx *ctx = NULL;
695
696 DPLANE_LOCK();
697 {
698 ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
699 if (ctx) {
700 TAILQ_REMOVE(&zdplane_info.dg_route_ctx_q,
701 ctx, zd_q_entries);
702 }
703 }
704 DPLANE_UNLOCK();
705
706 return ctx;
707 }
708
709 /*
710 * Utility that prepares a route update and enqueues it for processing
711 */
712 static enum zebra_dplane_result
713 dplane_route_update_internal(struct route_node *rn,
714 struct route_entry *re,
715 struct route_entry *old_re,
716 enum dplane_op_e op)
717 {
718 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
719 int ret = EINVAL;
720 struct zebra_dplane_ctx *ctx = NULL;
721
722 /* Obtain context block */
723 ctx = dplane_ctx_alloc();
724 if (ctx == NULL) {
725 ret = ENOMEM;
726 goto done;
727 }
728
729 /* Init context with info from zebra data structs */
730 ret = dplane_ctx_route_init(ctx, op, rn, re);
731 if (ret == AOK) {
732 /* Capture some extra info for update case
733 * where there's a different 'old' route.
734 */
735 if ((op == DPLANE_OP_ROUTE_UPDATE) &&
736 old_re && (old_re != re)) {
737 ctx->zd_is_update = true;
738
739 old_re->dplane_sequence++;
740 ctx->zd_old_seq = old_re->dplane_sequence;
741
742 ctx->zd_old_tag = old_re->tag;
743 ctx->zd_old_type = old_re->type;
744 ctx->zd_old_instance = old_re->instance;
745 ctx->zd_old_distance = old_re->distance;
746 ctx->zd_old_metric = old_re->metric;
747
748 #ifndef HAVE_NETLINK
749 /* For bsd, capture previous re's nexthops too, sigh.
750 * We'll need these to do per-nexthop deletes.
751 */
752 copy_nexthops(&(ctx->zd_old_ng.nexthop),
753 old_re->ng.nexthop, NULL);
754 #endif /* !HAVE_NETLINK */
755 }
756
757 /* Enqueue context for processing */
758 ret = dplane_route_enqueue(ctx);
759 }
760
761 done:
762 /* Update counter */
763 atomic_fetch_add_explicit(&zdplane_info.dg_routes_in, 1,
764 memory_order_relaxed);
765
766 if (ret == AOK)
767 result = ZEBRA_DPLANE_REQUEST_QUEUED;
768 else if (ctx) {
769 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors, 1,
770 memory_order_relaxed);
771 dplane_ctx_free(&ctx);
772 }
773
774 return result;
775 }
776
777 /*
778 * Enqueue a route 'add' for the dataplane.
779 */
780 enum zebra_dplane_result dplane_route_add(struct route_node *rn,
781 struct route_entry *re)
782 {
783 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
784
785 if (rn == NULL || re == NULL)
786 goto done;
787
788 ret = dplane_route_update_internal(rn, re, NULL,
789 DPLANE_OP_ROUTE_INSTALL);
790
791 done:
792 return ret;
793 }
794
795 /*
796 * Enqueue a route update for the dataplane.
797 */
798 enum zebra_dplane_result dplane_route_update(struct route_node *rn,
799 struct route_entry *re,
800 struct route_entry *old_re)
801 {
802 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
803
804 if (rn == NULL || re == NULL)
805 goto done;
806
807 ret = dplane_route_update_internal(rn, re, old_re,
808 DPLANE_OP_ROUTE_UPDATE);
809 done:
810 return ret;
811 }
812
813 /*
814 * Enqueue a route removal for the dataplane.
815 */
816 enum zebra_dplane_result dplane_route_delete(struct route_node *rn,
817 struct route_entry *re)
818 {
819 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
820
821 if (rn == NULL || re == NULL)
822 goto done;
823
824 ret = dplane_route_update_internal(rn, re, NULL,
825 DPLANE_OP_ROUTE_DELETE);
826
827 done:
828 return ret;
829 }
830
831 /*
832 * Event handler function for routing updates
833 */
834 static int dplane_route_process(struct thread *event)
835 {
836 enum zebra_dplane_result res;
837 struct zebra_dplane_ctx *ctx;
838
839 while (1) {
840 /* Check for shutdown */
841 if (!zdplane_info.dg_run)
842 break;
843
844 /* TODO -- limit number of updates per cycle? */
845 ctx = dplane_route_dequeue();
846 if (ctx == NULL)
847 break;
848
849 /* Update counter */
850 atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, 1,
851 memory_order_relaxed);
852
853 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
854 char dest_str[PREFIX_STRLEN];
855
856 prefix2str(dplane_ctx_get_dest(ctx),
857 dest_str, sizeof(dest_str));
858
859 zlog_debug("%u:%s Dplane route update ctx %p op %s",
860 dplane_ctx_get_vrf(ctx), dest_str,
861 ctx, dplane_op2str(dplane_ctx_get_op(ctx)));
862 }
863
864 /* TODO -- support series of providers */
865
866 /* Initially, just doing kernel-facing update here */
867 res = kernel_route_update(ctx);
868
869 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
870 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors,
871 1, memory_order_relaxed);
872
873 ctx->zd_status = res;
874
875 /* Enqueue result to zebra main context */
876 zdplane_info.dg_results_cb(ctx);
877
878 ctx = NULL;
879 }
880
881 return 0;
882 }
883
884 /*
885 * Handler for 'show dplane'
886 */
887 int dplane_show_helper(struct vty *vty, bool detailed)
888 {
889 uint64_t queued, limit, queue_max, errs, incoming;
890
891 /* Using atomics because counters are being changed in different
892 * contexts.
893 */
894 incoming = atomic_load_explicit(&zdplane_info.dg_routes_in,
895 memory_order_relaxed);
896 limit = atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
897 memory_order_relaxed);
898 queued = atomic_load_explicit(&zdplane_info.dg_routes_queued,
899 memory_order_relaxed);
900 queue_max = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
901 memory_order_relaxed);
902 errs = atomic_load_explicit(&zdplane_info.dg_route_errors,
903 memory_order_relaxed);
904
905 vty_out(vty, "Route updates: %"PRIu64"\n", incoming);
906 vty_out(vty, "Route update errors: %"PRIu64"\n", errs);
907 vty_out(vty, "Route update queue limit: %"PRIu64"\n", limit);
908 vty_out(vty, "Route update queue depth: %"PRIu64"\n", queued);
909 vty_out(vty, "Route update queue max: %"PRIu64"\n", queue_max);
910
911 return CMD_SUCCESS;
912 }
913
914 /*
915 * Handler for 'show dplane providers'
916 */
917 int dplane_show_provs_helper(struct vty *vty, bool detailed)
918 {
919 vty_out(vty, "Zebra dataplane providers:%s\n",
920 (detailed ? " (detailed)" : ""));
921
922 return CMD_SUCCESS;
923 }
924
925 /*
926 * Provider registration
927 */
928 int dplane_provider_register(const char *name,
929 enum dplane_provider_prio_e prio,
930 dplane_provider_process_fp fp,
931 dplane_provider_fini_fp fini_fp)
932 {
933 int ret = 0;
934 struct zebra_dplane_provider *p, *last;
935
936 /* Validate */
937 if (fp == NULL) {
938 ret = EINVAL;
939 goto done;
940 }
941
942 if (prio <= DPLANE_PRIO_NONE ||
943 prio > DPLANE_PRIO_LAST) {
944 ret = EINVAL;
945 goto done;
946 }
947
948 /* Allocate and init new provider struct */
949 p = XCALLOC(MTYPE_DP_PROV, sizeof(struct zebra_dplane_provider));
950 if (p == NULL) {
951 ret = ENOMEM;
952 goto done;
953 }
954
955 strncpy(p->dp_name, name, DPLANE_PROVIDER_NAMELEN);
956 p->dp_name[DPLANE_PROVIDER_NAMELEN] = '\0'; /* Belt-and-suspenders */
957
958 p->dp_priority = prio;
959 p->dp_fp = fp;
960 p->dp_fini = fini_fp;
961
962 /* Lock the lock - the dplane pthread may be running */
963 DPLANE_LOCK();
964
965 p->dp_id = ++zdplane_info.dg_provider_id;
966
967 /* Insert into list ordered by priority */
968 TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_q_providers) {
969 if (last->dp_priority > p->dp_priority)
970 break;
971 }
972
973 if (last)
974 TAILQ_INSERT_BEFORE(last, p, dp_q_providers);
975 else
976 TAILQ_INSERT_TAIL(&zdplane_info.dg_providers_q, p,
977 dp_q_providers);
978
979 /* And unlock */
980 DPLANE_UNLOCK();
981
982 done:
983 return ret;
984 }
985
986 /*
987 * Zebra registers a results callback with the dataplane system
988 */
989 int dplane_results_register(dplane_results_fp fp)
990 {
991 zdplane_info.dg_results_cb = fp;
992 return AOK;
993 }
994
995 /*
996 * Initialize the dataplane module during startup, internal/private version
997 */
998 static void zebra_dplane_init_internal(struct zebra_t *zebra)
999 {
1000 memset(&zdplane_info, 0, sizeof(zdplane_info));
1001
1002 pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
1003
1004 TAILQ_INIT(&zdplane_info.dg_route_ctx_q);
1005 TAILQ_INIT(&zdplane_info.dg_providers_q);
1006
1007 zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;
1008
1009 /* TODO -- register default kernel 'provider' during init */
1010 zdplane_info.dg_run = true;
1011
1012 /* Start dataplane pthread */
1013
1014 zdplane_info.dg_run = true;
1015
1016 struct frr_pthread_attr pattr = {
1017 .start = frr_pthread_attr_default.start,
1018 .stop = frr_pthread_attr_default.stop
1019 };
1020
1021 zdplane_info.dg_pthread = frr_pthread_new(&pattr, "Zebra dplane thread",
1022 "Zebra dplane");
1023
1024 zdplane_info.dg_master = zdplane_info.dg_pthread->master;
1025
1026 frr_pthread_run(zdplane_info.dg_pthread, NULL);
1027 }
1028
1029 /* Indicates zebra shutdown/exit is in progress. Some operations may be
1030 * simplified or skipped during shutdown processing.
1031 */
1032 bool dplane_is_in_shutdown(void)
1033 {
1034 return zdplane_info.dg_is_shutdown;
1035 }
1036
1037 /*
1038 * Early or pre-shutdown, de-init notification api. This runs pretty
1039 * early during zebra shutdown, as a signal to stop new work and prepare
1040 * for updates generated by shutdown/cleanup activity, as zebra tries to
1041 * remove everything it's responsible for.
1042 * NB: This runs in the main zebra thread context.
1043 */
1044 void zebra_dplane_pre_finish(void)
1045 {
1046 if (IS_ZEBRA_DEBUG_DPLANE)
1047 zlog_debug("Zebra dataplane pre-fini called");
1048
1049 zdplane_info.dg_is_shutdown = true;
1050
1051 /* Notify provider(s) of pending shutdown */
1052 }
1053
1054 /*
1055 * Utility to determine whether work remains enqueued within the dplane;
1056 * used during system shutdown processing.
1057 */
1058 static bool dplane_work_pending(void)
1059 {
1060 struct zebra_dplane_ctx *ctx;
1061
1062 /* TODO -- just checking incoming/pending work for now */
1063 DPLANE_LOCK();
1064 {
1065 ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
1066 }
1067 DPLANE_UNLOCK();
1068
1069 return (ctx != NULL);
1070 }
1071
1072 /*
1073 * Shutdown-time intermediate callback, used to determine when all pending
1074 * in-flight updates are done. If there's still work to do, reschedules itself.
1075 * If all work is done, schedules an event to the main zebra thread for
1076 * final zebra shutdown.
1077 * This runs in the dplane pthread context.
1078 */
1079 static int dplane_check_shutdown_status(struct thread *event)
1080 {
1081 if (IS_ZEBRA_DEBUG_DPLANE)
1082 zlog_debug("Zebra dataplane shutdown status check called");
1083
1084 if (dplane_work_pending()) {
1085 /* Reschedule dplane check on a short timer */
1086 thread_add_timer_msec(zdplane_info.dg_master,
1087 dplane_check_shutdown_status,
1088 NULL, 100,
1089 &zdplane_info.dg_t_shutdown_check);
1090
1091 /* TODO - give up and stop waiting after a short time? */
1092
1093 } else {
1094 /* We appear to be done - schedule a final callback event
1095 * for the zebra main pthread.
1096 */
1097 thread_add_event(zebrad.master, zebra_finalize, NULL, 0, NULL);
1098 }
1099
1100 return 0;
1101 }
1102
1103 /*
1104 * Shutdown, de-init api. This runs pretty late during shutdown,
1105 * after zebra has tried to free/remove/uninstall all routes during shutdown.
1106 * At this point, dplane work may still remain to be done, so we can't just
1107 * blindly terminate. If there's still work to do, we'll periodically check
1108 * and when done, we'll enqueue a task to the zebra main thread for final
1109 * termination processing.
1110 *
1111 * NB: This runs in the main zebra thread context.
1112 */
1113 void zebra_dplane_finish(void)
1114 {
1115 if (IS_ZEBRA_DEBUG_DPLANE)
1116 zlog_debug("Zebra dataplane fini called");
1117
1118 thread_add_event(zdplane_info.dg_master,
1119 dplane_check_shutdown_status, NULL, 0,
1120 &zdplane_info.dg_t_shutdown_check);
1121 }
1122
1123 /*
1124 * Final phase of shutdown, after all work enqueued to dplane has been
1125 * processed. This is called from the zebra main pthread context.
1126 */
1127 void zebra_dplane_shutdown(void)
1128 {
1129 if (IS_ZEBRA_DEBUG_DPLANE)
1130 zlog_debug("Zebra dataplane shutdown called");
1131
1132 /* Stop dplane thread, if it's running */
1133
1134 zdplane_info.dg_run = false;
1135
1136 THREAD_OFF(zdplane_info.dg_t_update);
1137
1138 frr_pthread_stop(zdplane_info.dg_pthread, NULL);
1139
1140 /* Destroy pthread */
1141 frr_pthread_destroy(zdplane_info.dg_pthread);
1142 zdplane_info.dg_pthread = NULL;
1143 zdplane_info.dg_master = NULL;
1144
1145 /* Notify provider(s) of final shutdown */
1146
1147 /* Clean-up provider objects */
1148
1149 /* Clean queue(s) */
1150 }
1151
1152 /*
1153 * Initialize the dataplane module at startup; called by zebra rib_init()
1154 */
1155 void zebra_dplane_init(void)
1156 {
1157 zebra_dplane_init_internal(&zebrad);
1158 }