]> git.proxmox.com Git - mirror_frr.git/blame - zebra/zebra_dplane.c
zebra: start pseudowire support
[mirror_frr.git] / zebra / zebra_dplane.c
CommitLineData
ea1c14f6
MS
1/*
2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
18c37974 20#include "lib/libfrr.h"
1d11b21f
MS
21#include "lib/debug.h"
22#include "lib/frratomic.h"
7cdb1a84 23#include "lib/frr_pthread.h"
1d11b21f 24#include "lib/memory.h"
7cdb1a84 25#include "lib/queue.h"
1d11b21f 26#include "lib/zebra.h"
1485bbe7 27#include "zebra/zebra_router.h"
7cdb1a84
MS
28#include "zebra/zebra_memory.h"
29#include "zebra/zserv.h"
30#include "zebra/zebra_dplane.h"
31#include "zebra/rt.h"
32#include "zebra/debug.h"
33
34/* Memory type for context blocks */
35DEFINE_MTYPE(ZEBRA, DP_CTX, "Zebra DPlane Ctx")
b8e0423d 36DEFINE_MTYPE(ZEBRA, DP_PROV, "Zebra DPlane Provider")
7cdb1a84
MS
37
38#ifndef AOK
39# define AOK 0
40#endif
41
e5a60d82
MS
42/* Enable test dataplane provider */
43/*#define DPLANE_TEST_PROVIDER 1 */
44
91f16812
MS
45/* Default value for max queued incoming updates */
46const uint32_t DPLANE_DEFAULT_MAX_QUEUED = 200;
47
c831033f
MS
48/* Default value for new work per cycle */
49const uint32_t DPLANE_DEFAULT_NEW_WORK = 100;
50
7cdb1a84
MS
51/* Validation check macro for context blocks */
52/* #define DPLANE_DEBUG 1 */
53
54#ifdef DPLANE_DEBUG
55
25779064
MS
56# define DPLANE_CTX_VALID(p) \
57 assert((p) != NULL)
7cdb1a84
MS
58
59#else
60
5709131c 61# define DPLANE_CTX_VALID(p)
7cdb1a84
MS
62
63#endif /* DPLANE_DEBUG */
64
65/*
0f461727 66 * Route information captured for route updates.
7cdb1a84 67 */
0f461727 68struct dplane_route_info {
7cdb1a84
MS
69
70 /* Dest and (optional) source prefixes */
71 struct prefix zd_dest;
72 struct prefix zd_src;
73
0f461727
MS
74 afi_t zd_afi;
75 safi_t zd_safi;
7cdb1a84
MS
76
77 int zd_type;
78 int zd_old_type;
79
7cdb1a84
MS
80 route_tag_t zd_tag;
81 route_tag_t zd_old_tag;
82 uint32_t zd_metric;
01ce7cba 83 uint32_t zd_old_metric;
0f461727 84
7cdb1a84
MS
85 uint16_t zd_instance;
86 uint16_t zd_old_instance;
87
88 uint8_t zd_distance;
89 uint8_t zd_old_distance;
90
91 uint32_t zd_mtu;
92 uint32_t zd_nexthop_mtu;
93
7cdb1a84
MS
94 /* Nexthops */
95 struct nexthop_group zd_ng;
96
4dfd7a02 97 /* "Previous" nexthops, used only in route updates without netlink */
01ce7cba
MS
98 struct nexthop_group zd_old_ng;
99
b8e0423d
MS
100 /* TODO -- use fixed array of nexthops, to avoid mallocs? */
101
0f461727
MS
102};
103
d613b8e1
MS
104/*
105 * Pseudowire info for the dataplane
106 */
107struct dplane_pw_info {
108 char ifname[IF_NAMESIZE];
109 ifindex_t ifindex;
110 int type;
111 int af;
112 int status;
113 uint32_t flags;
114 union g_addr nexthop;
115 mpls_label_t local_label;
116 mpls_label_t remote_label;
117
118 union pw_protocol_fields fields;
119};
120
0f461727
MS
121/*
122 * The context block used to exchange info about route updates across
123 * the boundary between the zebra main context (and pthread) and the
124 * dataplane layer (and pthread).
125 */
126struct zebra_dplane_ctx {
127
128 /* Operation code */
129 enum dplane_op_e zd_op;
130
131 /* Status on return */
132 enum zebra_dplane_result zd_status;
133
134 /* Dplane provider id */
135 uint32_t zd_provider;
136
137 /* Flags - used by providers, e.g. */
138 int zd_flags;
139
140 bool zd_is_update;
141
142 uint32_t zd_seq;
143 uint32_t zd_old_seq;
144
145 /* TODO -- internal/sub-operation status? */
146 enum zebra_dplane_result zd_remote_status;
147 enum zebra_dplane_result zd_kernel_status;
148
149 vrf_id_t zd_vrf_id;
150 uint32_t zd_table_id;
151
16c628de 152 /* Support info for either route or LSP update */
0f461727
MS
153 union {
154 struct dplane_route_info rinfo;
155 zebra_lsp_t lsp;
d613b8e1 156 struct dplane_pw_info pw;
0f461727
MS
157 } u;
158
159 /* Namespace info, used especially for netlink kernel communication */
160 struct zebra_dplane_info zd_ns_info;
161
7cdb1a84 162 /* Embedded list linkage */
25779064 163 TAILQ_ENTRY(zebra_dplane_ctx) zd_q_entries;
7cdb1a84
MS
164};
165
c831033f
MS
166/* Flag that can be set by a pre-kernel provider as a signal that an update
167 * should bypass the kernel.
168 */
169#define DPLANE_CTX_FLAG_NO_KERNEL 0x01
170
171
7cdb1a84
MS
172/*
173 * Registration block for one dataplane provider.
174 */
25779064 175struct zebra_dplane_provider {
7cdb1a84
MS
176 /* Name */
177 char dp_name[DPLANE_PROVIDER_NAMELEN + 1];
178
179 /* Priority, for ordering among providers */
180 uint8_t dp_priority;
181
182 /* Id value */
183 uint32_t dp_id;
184
c831033f
MS
185 /* Mutex */
186 pthread_mutex_t dp_mutex;
187
188 /* Plugin-provided extra data */
189 void *dp_data;
190
191 /* Flags */
192 int dp_flags;
193
4c206c8f 194 int (*dp_fp)(struct zebra_dplane_provider *prov);
7cdb1a84 195
4c206c8f 196 int (*dp_fini)(struct zebra_dplane_provider *prov, bool early_p);
18c37974 197
0545c373 198 _Atomic uint32_t dp_in_counter;
c9d17fe8 199 _Atomic uint32_t dp_in_queued;
c831033f
MS
200 _Atomic uint32_t dp_in_max;
201 _Atomic uint32_t dp_out_counter;
c9d17fe8 202 _Atomic uint32_t dp_out_queued;
c831033f 203 _Atomic uint32_t dp_out_max;
0545c373 204 _Atomic uint32_t dp_error_counter;
1d11b21f 205
c831033f
MS
206 /* Queue of contexts inbound to the provider */
207 struct dplane_ctx_q dp_ctx_in_q;
208
209 /* Queue of completed contexts outbound from the provider back
210 * towards the dataplane module.
211 */
212 struct dplane_ctx_q dp_ctx_out_q;
7cdb1a84 213
c831033f
MS
214 /* Embedded list linkage for provider objects */
215 TAILQ_ENTRY(zebra_dplane_provider) dp_prov_link;
7cdb1a84
MS
216};
217
218/*
219 * Globals
220 */
25779064 221static struct zebra_dplane_globals {
7cdb1a84
MS
222 /* Mutex to control access to dataplane components */
223 pthread_mutex_t dg_mutex;
224
225 /* Results callback registered by zebra 'core' */
4c206c8f 226 int (*dg_results_cb)(struct dplane_ctx_q *ctxlist);
7cdb1a84 227
4dfd7a02
MS
228 /* Sentinel for beginning of shutdown */
229 volatile bool dg_is_shutdown;
230
231 /* Sentinel for end of shutdown */
1d11b21f
MS
232 volatile bool dg_run;
233
7cdb1a84 234 /* Route-update context queue inbound to the dataplane */
25779064 235 TAILQ_HEAD(zdg_ctx_q, zebra_dplane_ctx) dg_route_ctx_q;
7cdb1a84
MS
236
237 /* Ordered list of providers */
25779064 238 TAILQ_HEAD(zdg_prov_q, zebra_dplane_provider) dg_providers_q;
7cdb1a84 239
1d11b21f 240 /* Counter used to assign internal ids to providers */
b8e0423d
MS
241 uint32_t dg_provider_id;
242
91f16812
MS
243 /* Limit number of pending, unprocessed updates */
244 _Atomic uint32_t dg_max_queued_updates;
245
c831033f
MS
246 /* Limit number of new updates dequeued at once, to pace an
247 * incoming burst.
248 */
249 uint32_t dg_updates_per_cycle;
250
0545c373 251 _Atomic uint32_t dg_routes_in;
1d11b21f 252 _Atomic uint32_t dg_routes_queued;
4dfd7a02 253 _Atomic uint32_t dg_routes_queued_max;
0545c373 254 _Atomic uint32_t dg_route_errors;
16c628de
MS
255 _Atomic uint32_t dg_other_errors;
256
257 _Atomic uint32_t dg_lsps_in;
258 _Atomic uint32_t dg_lsps_queued;
259 _Atomic uint32_t dg_lsps_queued_max;
260 _Atomic uint32_t dg_lsp_errors;
261
c831033f 262 _Atomic uint32_t dg_update_yields;
1d11b21f 263
d8c16a95
MS
264 /* Dataplane pthread */
265 struct frr_pthread *dg_pthread;
266
7cdb1a84
MS
267 /* Event-delivery context 'master' for the dplane */
268 struct thread_master *dg_master;
269
270 /* Event/'thread' pointer for queued updates */
271 struct thread *dg_t_update;
272
4dfd7a02
MS
273 /* Event pointer for pending shutdown check loop */
274 struct thread *dg_t_shutdown_check;
275
25779064 276} zdplane_info;
7cdb1a84
MS
277
278/*
c831033f 279 * Lock and unlock for interactions with the zebra 'core' pthread
7cdb1a84 280 */
25779064 281#define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
25779064 282#define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
7cdb1a84 283
c831033f
MS
284
285/*
286 * Lock and unlock for individual providers
287 */
288#define DPLANE_PROV_LOCK(p) pthread_mutex_lock(&((p)->dp_mutex))
289#define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
290
7cdb1a84 291/* Prototypes */
c831033f 292static int dplane_thread_loop(struct thread *event);
62b8bb7a
MS
293static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
294 struct zebra_ns *zns);
16c628de
MS
295static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
296 enum dplane_op_e op);
7cdb1a84
MS
297
298/*
299 * Public APIs
300 */
301
ad6aad4d
MS
302/* Obtain thread_master for dataplane thread */
303struct thread_master *dplane_get_thread_master(void)
304{
305 return zdplane_info.dg_master;
306}
307
7cdb1a84 308/*
b8e0423d 309 * Allocate a dataplane update context
7cdb1a84 310 */
25779064 311static struct zebra_dplane_ctx *dplane_ctx_alloc(void)
7cdb1a84 312{
25779064 313 struct zebra_dplane_ctx *p;
7cdb1a84 314
b8e0423d
MS
315 /* TODO -- just alloc'ing memory, but would like to maintain
316 * a pool
317 */
25779064 318 p = XCALLOC(MTYPE_DP_CTX, sizeof(struct zebra_dplane_ctx));
7cdb1a84 319
5709131c 320 return p;
7cdb1a84
MS
321}
322
323/*
b8e0423d 324 * Free a dataplane results context.
7cdb1a84 325 */
25779064 326static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
7cdb1a84 327{
16c628de
MS
328 if (pctx == NULL)
329 return;
7cdb1a84 330
16c628de
MS
331 DPLANE_CTX_VALID(*pctx);
332
333 /* TODO -- just freeing memory, but would like to maintain
334 * a pool
335 */
336
337 /* Some internal allocations may need to be freed, depending on
338 * the type of info captured in the ctx.
339 */
340 switch ((*pctx)->zd_op) {
341 case DPLANE_OP_ROUTE_INSTALL:
342 case DPLANE_OP_ROUTE_UPDATE:
343 case DPLANE_OP_ROUTE_DELETE:
b8e0423d 344
16c628de 345 /* Free allocated nexthops */
0f461727 346 if ((*pctx)->u.rinfo.zd_ng.nexthop) {
7cdb1a84 347 /* This deals with recursive nexthops too */
0f461727 348 nexthops_free((*pctx)->u.rinfo.zd_ng.nexthop);
16c628de
MS
349
350 (*pctx)->u.rinfo.zd_ng.nexthop = NULL;
7cdb1a84
MS
351 }
352
0f461727 353 if ((*pctx)->u.rinfo.zd_old_ng.nexthop) {
4dfd7a02 354 /* This deals with recursive nexthops too */
0f461727 355 nexthops_free((*pctx)->u.rinfo.zd_old_ng.nexthop);
16c628de
MS
356
357 (*pctx)->u.rinfo.zd_old_ng.nexthop = NULL;
358 }
359
360 break;
361
362 case DPLANE_OP_LSP_INSTALL:
363 case DPLANE_OP_LSP_UPDATE:
364 case DPLANE_OP_LSP_DELETE:
365 {
366 zebra_nhlfe_t *nhlfe, *next;
367
368 /* Free allocated NHLFEs */
369 for (nhlfe = (*pctx)->u.lsp.nhlfe_list; nhlfe; nhlfe = next) {
370 next = nhlfe->next;
371
372 zebra_mpls_nhlfe_del(nhlfe);
01ce7cba
MS
373 }
374
16c628de
MS
375 /* Clear pointers in lsp struct, in case we're cacheing
376 * free context structs.
377 */
378 (*pctx)->u.lsp.nhlfe_list = NULL;
379 (*pctx)->u.lsp.best_nhlfe = NULL;
380
381 break;
382 }
383
384 case DPLANE_OP_NONE:
385 break;
7cdb1a84 386 }
16c628de
MS
387
388 XFREE(MTYPE_DP_CTX, *pctx);
389 *pctx = NULL;
7cdb1a84
MS
390}
391
392/*
393 * Return a context block to the dplane module after processing
394 */
25779064 395void dplane_ctx_fini(struct zebra_dplane_ctx **pctx)
7cdb1a84 396{
14b0bc8e 397 /* TODO -- maintain pool; for now, just free */
7cdb1a84
MS
398 dplane_ctx_free(pctx);
399}
400
401/* Enqueue a context block */
25779064
MS
402void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
403 const struct zebra_dplane_ctx *ctx)
7cdb1a84 404{
25779064 405 TAILQ_INSERT_TAIL(q, (struct zebra_dplane_ctx *)ctx, zd_q_entries);
7cdb1a84
MS
406}
407
14b0bc8e
MS
408/* Append a list of context blocks to another list */
409void dplane_ctx_list_append(struct dplane_ctx_q *to_list,
410 struct dplane_ctx_q *from_list)
411{
412 if (TAILQ_FIRST(from_list)) {
413 TAILQ_CONCAT(to_list, from_list, zd_q_entries);
414
415 /* And clear 'from' list */
416 TAILQ_INIT(from_list);
417 }
418}
419
7cdb1a84 420/* Dequeue a context block from the head of a list */
68b375e0 421struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_q *q)
7cdb1a84 422{
25779064 423 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
5709131c
MS
424
425 if (ctx)
7cdb1a84 426 TAILQ_REMOVE(q, ctx, zd_q_entries);
7cdb1a84 427
68b375e0 428 return ctx;
7cdb1a84
MS
429}
430
431/*
432 * Accessors for information from the context object
433 */
25779064
MS
434enum zebra_dplane_result dplane_ctx_get_status(
435 const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
436{
437 DPLANE_CTX_VALID(ctx);
438
5709131c 439 return ctx->zd_status;
7cdb1a84
MS
440}
441
c831033f
MS
442void dplane_ctx_set_status(struct zebra_dplane_ctx *ctx,
443 enum zebra_dplane_result status)
444{
445 DPLANE_CTX_VALID(ctx);
446
447 ctx->zd_status = status;
448}
449
450/* Retrieve last/current provider id */
451uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx *ctx)
452{
453 DPLANE_CTX_VALID(ctx);
454 return ctx->zd_provider;
455}
456
457/* Providers run before the kernel can control whether a kernel
458 * update should be done.
459 */
460void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx *ctx)
461{
462 DPLANE_CTX_VALID(ctx);
463
464 SET_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
465}
466
467bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx *ctx)
468{
469 DPLANE_CTX_VALID(ctx);
470
471 return CHECK_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
472}
473
25779064 474enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
475{
476 DPLANE_CTX_VALID(ctx);
477
5709131c 478 return ctx->zd_op;
7cdb1a84
MS
479}
480
5709131c 481const char *dplane_op2str(enum dplane_op_e op)
7cdb1a84
MS
482{
483 const char *ret = "UNKNOWN";
484
5709131c 485 switch (op) {
7cdb1a84
MS
486 case DPLANE_OP_NONE:
487 ret = "NONE";
488 break;
489
490 /* Route update */
491 case DPLANE_OP_ROUTE_INSTALL:
492 ret = "ROUTE_INSTALL";
493 break;
494 case DPLANE_OP_ROUTE_UPDATE:
495 ret = "ROUTE_UPDATE";
496 break;
497 case DPLANE_OP_ROUTE_DELETE:
498 ret = "ROUTE_DELETE";
499 break;
500
16c628de
MS
501 case DPLANE_OP_LSP_INSTALL:
502 ret = "LSP_INSTALL";
503 break;
504 case DPLANE_OP_LSP_UPDATE:
505 ret = "LSP_UPDATE";
506 break;
507 case DPLANE_OP_LSP_DELETE:
508 ret = "LSP_DELETE";
509 break;
510
7cdb1a84
MS
511 };
512
5709131c 513 return ret;
7cdb1a84
MS
514}
515
f183e380
MS
516const char *dplane_res2str(enum zebra_dplane_result res)
517{
518 const char *ret = "<Unknown>";
519
520 switch (res) {
521 case ZEBRA_DPLANE_REQUEST_FAILURE:
522 ret = "FAILURE";
523 break;
524 case ZEBRA_DPLANE_REQUEST_QUEUED:
525 ret = "QUEUED";
526 break;
527 case ZEBRA_DPLANE_REQUEST_SUCCESS:
528 ret = "SUCCESS";
529 break;
530 };
531
532 return ret;
533}
534
25779064 535const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
536{
537 DPLANE_CTX_VALID(ctx);
538
0f461727 539 return &(ctx->u.rinfo.zd_dest);
7cdb1a84
MS
540}
541
5709131c 542/* Source prefix is a little special - return NULL for "no src prefix" */
25779064 543const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
544{
545 DPLANE_CTX_VALID(ctx);
546
0f461727
MS
547 if (ctx->u.rinfo.zd_src.prefixlen == 0 &&
548 IN6_IS_ADDR_UNSPECIFIED(&(ctx->u.rinfo.zd_src.u.prefix6))) {
5709131c 549 return NULL;
7cdb1a84 550 } else {
0f461727 551 return &(ctx->u.rinfo.zd_src);
7cdb1a84
MS
552 }
553}
554
25779064 555bool dplane_ctx_is_update(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
556{
557 DPLANE_CTX_VALID(ctx);
558
5709131c 559 return ctx->zd_is_update;
7cdb1a84
MS
560}
561
25779064 562uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
563{
564 DPLANE_CTX_VALID(ctx);
565
5709131c 566 return ctx->zd_seq;
7cdb1a84
MS
567}
568
25779064 569uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
570{
571 DPLANE_CTX_VALID(ctx);
572
5709131c 573 return ctx->zd_old_seq;
7cdb1a84
MS
574}
575
25779064 576vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
577{
578 DPLANE_CTX_VALID(ctx);
579
5709131c 580 return ctx->zd_vrf_id;
7cdb1a84
MS
581}
582
25779064 583int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
584{
585 DPLANE_CTX_VALID(ctx);
586
0f461727 587 return ctx->u.rinfo.zd_type;
7cdb1a84
MS
588}
589
25779064 590int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
591{
592 DPLANE_CTX_VALID(ctx);
593
0f461727 594 return ctx->u.rinfo.zd_old_type;
7cdb1a84
MS
595}
596
25779064 597afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
598{
599 DPLANE_CTX_VALID(ctx);
600
0f461727 601 return ctx->u.rinfo.zd_afi;
7cdb1a84
MS
602}
603
25779064 604safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
605{
606 DPLANE_CTX_VALID(ctx);
607
0f461727 608 return ctx->u.rinfo.zd_safi;
7cdb1a84
MS
609}
610
25779064 611uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
612{
613 DPLANE_CTX_VALID(ctx);
614
5709131c 615 return ctx->zd_table_id;
7cdb1a84
MS
616}
617
25779064 618route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
619{
620 DPLANE_CTX_VALID(ctx);
621
0f461727 622 return ctx->u.rinfo.zd_tag;
7cdb1a84
MS
623}
624
25779064 625route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
626{
627 DPLANE_CTX_VALID(ctx);
628
0f461727 629 return ctx->u.rinfo.zd_old_tag;
7cdb1a84
MS
630}
631
25779064 632uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
633{
634 DPLANE_CTX_VALID(ctx);
635
0f461727 636 return ctx->u.rinfo.zd_instance;
7cdb1a84
MS
637}
638
25779064 639uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
640{
641 DPLANE_CTX_VALID(ctx);
642
0f461727 643 return ctx->u.rinfo.zd_old_instance;
7cdb1a84
MS
644}
645
25779064 646uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
647{
648 DPLANE_CTX_VALID(ctx);
649
0f461727 650 return ctx->u.rinfo.zd_metric;
7cdb1a84
MS
651}
652
25779064 653uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx)
01ce7cba
MS
654{
655 DPLANE_CTX_VALID(ctx);
656
0f461727 657 return ctx->u.rinfo.zd_old_metric;
01ce7cba
MS
658}
659
25779064 660uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
661{
662 DPLANE_CTX_VALID(ctx);
663
0f461727 664 return ctx->u.rinfo.zd_mtu;
7cdb1a84
MS
665}
666
25779064 667uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
668{
669 DPLANE_CTX_VALID(ctx);
670
0f461727 671 return ctx->u.rinfo.zd_nexthop_mtu;
7cdb1a84
MS
672}
673
25779064 674uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
675{
676 DPLANE_CTX_VALID(ctx);
677
0f461727 678 return ctx->u.rinfo.zd_distance;
7cdb1a84
MS
679}
680
25779064 681uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
682{
683 DPLANE_CTX_VALID(ctx);
684
0f461727 685 return ctx->u.rinfo.zd_old_distance;
7cdb1a84
MS
686}
687
25779064
MS
688const struct nexthop_group *dplane_ctx_get_ng(
689 const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
690{
691 DPLANE_CTX_VALID(ctx);
692
0f461727 693 return &(ctx->u.rinfo.zd_ng);
7cdb1a84
MS
694}
695
25779064
MS
696const struct nexthop_group *dplane_ctx_get_old_ng(
697 const struct zebra_dplane_ctx *ctx)
01ce7cba
MS
698{
699 DPLANE_CTX_VALID(ctx);
700
0f461727 701 return &(ctx->u.rinfo.zd_old_ng);
01ce7cba
MS
702}
703
25779064
MS
704const struct zebra_dplane_info *dplane_ctx_get_ns(
705 const struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
706{
707 DPLANE_CTX_VALID(ctx);
708
5709131c 709 return &(ctx->zd_ns_info);
7cdb1a84
MS
710}
711
0f461727
MS
712/* Accessors for LSP information */
713
714mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx)
715{
716 DPLANE_CTX_VALID(ctx);
717
718 return ctx->u.lsp.ile.in_label;
719}
720
721uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx *ctx)
722{
723 DPLANE_CTX_VALID(ctx);
724
725 return ctx->u.lsp.addr_family;
726}
727
728uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx)
729{
730 DPLANE_CTX_VALID(ctx);
731
732 return ctx->u.lsp.flags;
733}
734
735zebra_nhlfe_t *dplane_ctx_get_nhlfe(struct zebra_dplane_ctx *ctx)
736{
737 DPLANE_CTX_VALID(ctx);
738
739 return ctx->u.lsp.nhlfe_list;
740}
741
742zebra_nhlfe_t *dplane_ctx_get_best_nhlfe(struct zebra_dplane_ctx *ctx)
743{
744 DPLANE_CTX_VALID(ctx);
745
746 return ctx->u.lsp.best_nhlfe;
747}
748
749uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx *ctx)
750{
751 DPLANE_CTX_VALID(ctx);
752
753 return ctx->u.lsp.num_ecmp;
754}
755
d613b8e1
MS
756const char *dplane_ctx_get_pw_ifname(const struct zebra_dplane_ctx *ctx)
757{
758 DPLANE_CTX_VALID(ctx);
759
760 return ctx->u.pw.ifname;
761}
762
763mpls_label_t dplane_ctx_get_pw_local_label(const struct zebra_dplane_ctx *ctx)
764{
765 DPLANE_CTX_VALID(ctx);
766
767 return ctx->u.pw.local_label;
768}
769
770mpls_label_t dplane_ctx_get_pw_remote_label(const struct zebra_dplane_ctx *ctx)
771{
772 DPLANE_CTX_VALID(ctx);
773
774 return ctx->u.pw.remote_label;
775}
776
777int dplane_ctx_get_pw_type(const struct zebra_dplane_ctx *ctx)
778{
779 DPLANE_CTX_VALID(ctx);
780
781 return ctx->u.pw.type;
782}
783
784int dplane_ctx_get_pw_af(const struct zebra_dplane_ctx *ctx)
785{
786 DPLANE_CTX_VALID(ctx);
787
788 return ctx->u.pw.af;
789}
790
791uint32_t dplane_ctx_get_pw_flags(const struct zebra_dplane_ctx *ctx)
792{
793 DPLANE_CTX_VALID(ctx);
794
795 return ctx->u.pw.flags;
796}
797
798int dplane_ctx_get_pw_status(const struct zebra_dplane_ctx *ctx)
799{
800 DPLANE_CTX_VALID(ctx);
801
802 return ctx->u.pw.status;
803}
804
805const union g_addr *dplane_ctx_get_pw_nexthop(
806 const struct zebra_dplane_ctx *ctx)
807{
808 DPLANE_CTX_VALID(ctx);
809
810 return &(ctx->u.pw.nexthop);
811}
812
813const union pw_protocol_fields *dplane_ctx_get_pw_proto(
814 const struct zebra_dplane_ctx *ctx)
815{
816 DPLANE_CTX_VALID(ctx);
817
818 return &(ctx->u.pw.fields);
819}
820
7cdb1a84
MS
821/*
822 * End of dplane context accessors
823 */
824
c831033f 825
91f16812
MS
826/*
827 * Retrieve the limit on the number of pending, unprocessed updates.
828 */
829uint32_t dplane_get_in_queue_limit(void)
830{
831 return atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
832 memory_order_relaxed);
833}
834
835/*
836 * Configure limit on the number of pending, queued updates.
837 */
838void dplane_set_in_queue_limit(uint32_t limit, bool set)
839{
840 /* Reset to default on 'unset' */
841 if (!set)
842 limit = DPLANE_DEFAULT_MAX_QUEUED;
843
844 atomic_store_explicit(&zdplane_info.dg_max_queued_updates, limit,
845 memory_order_relaxed);
846}
847
848/*
849 * Retrieve the current queue depth of incoming, unprocessed updates
850 */
851uint32_t dplane_get_in_queue_len(void)
852{
853 return atomic_load_explicit(&zdplane_info.dg_routes_queued,
854 memory_order_seq_cst);
855}
856
16c628de
MS
857/*
858 * Common dataplane context init with zebra namespace info.
859 */
860static int dplane_ctx_ns_init(struct zebra_dplane_ctx *ctx,
861 struct zebra_ns *zns,
862 bool is_update)
863{
864 dplane_info_from_zns(&(ctx->zd_ns_info), zns);
865
866#if defined(HAVE_NETLINK)
867 /* Increment message counter after copying to context struct - may need
868 * two messages in some 'update' cases.
869 */
870 if (is_update)
871 zns->netlink_dplane.seq += 2;
872 else
873 zns->netlink_dplane.seq++;
874#endif /* HAVE_NETLINK */
875
876 return AOK;
877}
878
7cdb1a84
MS
879/*
880 * Initialize a context block for a route update from zebra data structs.
881 */
25779064 882static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx,
5709131c 883 enum dplane_op_e op,
7cdb1a84
MS
884 struct route_node *rn,
885 struct route_entry *re)
886{
887 int ret = EINVAL;
888 const struct route_table *table = NULL;
889 const rib_table_info_t *info;
890 const struct prefix *p, *src_p;
891 struct zebra_ns *zns;
892 struct zebra_vrf *zvrf;
f183e380 893 struct nexthop *nexthop;
7cdb1a84 894
5709131c 895 if (!ctx || !rn || !re)
7cdb1a84 896 goto done;
7cdb1a84
MS
897
898 ctx->zd_op = op;
14b0bc8e 899 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
7cdb1a84 900
0f461727
MS
901 ctx->u.rinfo.zd_type = re->type;
902 ctx->u.rinfo.zd_old_type = re->type;
7cdb1a84
MS
903
904 /* Prefixes: dest, and optional source */
905 srcdest_rnode_prefixes(rn, &p, &src_p);
906
0f461727 907 prefix_copy(&(ctx->u.rinfo.zd_dest), p);
7cdb1a84 908
5709131c 909 if (src_p)
0f461727 910 prefix_copy(&(ctx->u.rinfo.zd_src), src_p);
5709131c 911 else
0f461727 912 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(ctx->u.rinfo.zd_src));
7cdb1a84
MS
913
914 ctx->zd_table_id = re->table;
915
0f461727
MS
916 ctx->u.rinfo.zd_metric = re->metric;
917 ctx->u.rinfo.zd_old_metric = re->metric;
7cdb1a84 918 ctx->zd_vrf_id = re->vrf_id;
0f461727
MS
919 ctx->u.rinfo.zd_mtu = re->mtu;
920 ctx->u.rinfo.zd_nexthop_mtu = re->nexthop_mtu;
921 ctx->u.rinfo.zd_instance = re->instance;
922 ctx->u.rinfo.zd_tag = re->tag;
923 ctx->u.rinfo.zd_old_tag = re->tag;
924 ctx->u.rinfo.zd_distance = re->distance;
7cdb1a84
MS
925
926 table = srcdest_rnode_table(rn);
927 info = table->info;
928
0f461727
MS
929 ctx->u.rinfo.zd_afi = info->afi;
930 ctx->u.rinfo.zd_safi = info->safi;
7cdb1a84
MS
931
932 /* Extract ns info - can't use pointers to 'core' structs */
933 zvrf = vrf_info_lookup(re->vrf_id);
934 zns = zvrf->zns;
935
16c628de 936 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_ROUTE_UPDATE));
7cdb1a84
MS
937
938 /* Copy nexthops; recursive info is included too */
0f461727 939 copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop), re->ng.nexthop, NULL);
7cdb1a84 940
b8e0423d
MS
941 /* TODO -- maybe use array of nexthops to avoid allocs? */
942
14b0bc8e 943 /* Ensure that the dplane's nexthops flags are clear. */
0f461727 944 for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop))
f183e380 945 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
f183e380 946
7cdb1a84
MS
947 /* Trying out the sequence number idea, so we can try to detect
948 * when a result is stale.
949 */
1485bbe7 950 re->dplane_sequence = zebra_router_get_next_sequence();
7cdb1a84
MS
951 ctx->zd_seq = re->dplane_sequence;
952
953 ret = AOK;
954
955done:
956 return ret;
957}
958
16c628de
MS
959/*
960 * Capture information for an LSP update in a dplane context.
961 */
962static int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx,
963 enum dplane_op_e op,
964 zebra_lsp_t *lsp)
965{
966 int ret = AOK;
967 zebra_nhlfe_t *nhlfe, *new_nhlfe;
968
969 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
970 zlog_debug("init dplane ctx %s: in-label %u ecmp# %d",
971 dplane_op2str(op), lsp->ile.in_label,
972 lsp->num_ecmp);
973
974 ctx->zd_op = op;
975 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
976
977 /* Capture namespace info */
978 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
979 (op == DPLANE_OP_LSP_UPDATE));
980
981 memset(&ctx->u.lsp, 0, sizeof(ctx->u.lsp));
982
983 ctx->u.lsp.ile = lsp->ile;
984 ctx->u.lsp.addr_family = lsp->addr_family;
985 ctx->u.lsp.num_ecmp = lsp->num_ecmp;
986 ctx->u.lsp.flags = lsp->flags;
987
988 /* Copy source LSP's nhlfes, and capture 'best' nhlfe */
989 for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
990 /* Not sure if this is meaningful... */
991 if (nhlfe->nexthop == NULL)
992 continue;
993
994 new_nhlfe =
995 zebra_mpls_lsp_add_nhlfe(
996 &(ctx->u.lsp),
997 nhlfe->type,
998 nhlfe->nexthop->type,
999 &(nhlfe->nexthop->gate),
1000 nhlfe->nexthop->ifindex,
1001 nhlfe->nexthop->nh_label->label[0]);
1002
1003 if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
1004 ret = ENOMEM;
1005 break;
1006 }
1007
1008 /* Need to copy flags too */
1009 new_nhlfe->flags = nhlfe->flags;
1010 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
1011
1012 if (nhlfe == lsp->best_nhlfe)
1013 ctx->u.lsp.best_nhlfe = new_nhlfe;
1014 }
1015
1016 /* On error the ctx will be cleaned-up, so we don't need to
1017 * deal with any allocated nhlfe or nexthop structs here.
1018 */
1019
1020 return ret;
1021}
1022
7cdb1a84
MS
1023/*
1024 * Enqueue a new route update,
16c628de 1025 * and ensure an event is active for the dataplane pthread.
7cdb1a84 1026 */
25779064 1027static int dplane_route_enqueue(struct zebra_dplane_ctx *ctx)
7cdb1a84
MS
1028{
1029 int ret = EINVAL;
91f16812 1030 uint32_t high, curr;
7cdb1a84 1031
16c628de 1032 /* Enqueue for processing by the dataplane pthread */
7cdb1a84
MS
1033 DPLANE_LOCK();
1034 {
25779064
MS
1035 TAILQ_INSERT_TAIL(&zdplane_info.dg_route_ctx_q, ctx,
1036 zd_q_entries);
7cdb1a84
MS
1037 }
1038 DPLANE_UNLOCK();
1039
e07e9549
MS
1040 curr = atomic_add_fetch_explicit(
1041#ifdef __clang__
1042 /* TODO -- issue with the clang atomic/intrinsics currently;
1043 * casting away the 'Atomic'-ness of the variable works.
1044 */
1045 (uint32_t *)&(zdplane_info.dg_routes_queued),
1046#else
1047 &(zdplane_info.dg_routes_queued),
1048#endif
1049 1, memory_order_seq_cst);
91f16812
MS
1050
1051 /* Maybe update high-water counter also */
1052 high = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
1053 memory_order_seq_cst);
1054 while (high < curr) {
1055 if (atomic_compare_exchange_weak_explicit(
1056 &zdplane_info.dg_routes_queued_max,
1057 &high, curr,
1058 memory_order_seq_cst,
1059 memory_order_seq_cst))
1060 break;
1061 }
1062
7cdb1a84 1063 /* Ensure that an event for the dataplane thread is active */
c831033f 1064 ret = dplane_provider_work_ready();
7cdb1a84 1065
5709131c 1066 return ret;
7cdb1a84
MS
1067}
1068
7cdb1a84
MS
1069/*
1070 * Utility that prepares a route update and enqueues it for processing
1071 */
655d681a
MS
1072static enum zebra_dplane_result
1073dplane_route_update_internal(struct route_node *rn,
1074 struct route_entry *re,
1075 struct route_entry *old_re,
5709131c 1076 enum dplane_op_e op)
7cdb1a84 1077{
655d681a 1078 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
7cdb1a84 1079 int ret = EINVAL;
25779064 1080 struct zebra_dplane_ctx *ctx = NULL;
7cdb1a84
MS
1081
1082 /* Obtain context block */
1083 ctx = dplane_ctx_alloc();
1084 if (ctx == NULL) {
1085 ret = ENOMEM;
1086 goto done;
1087 }
1088
1089 /* Init context with info from zebra data structs */
1090 ret = dplane_ctx_route_init(ctx, op, rn, re);
1091 if (ret == AOK) {
1092 /* Capture some extra info for update case
1093 * where there's a different 'old' route.
1094 */
b8e0423d
MS
1095 if ((op == DPLANE_OP_ROUTE_UPDATE) &&
1096 old_re && (old_re != re)) {
7cdb1a84
MS
1097 ctx->zd_is_update = true;
1098
1485bbe7
DS
1099 old_re->dplane_sequence =
1100 zebra_router_get_next_sequence();
7cdb1a84
MS
1101 ctx->zd_old_seq = old_re->dplane_sequence;
1102
0f461727
MS
1103 ctx->u.rinfo.zd_old_tag = old_re->tag;
1104 ctx->u.rinfo.zd_old_type = old_re->type;
1105 ctx->u.rinfo.zd_old_instance = old_re->instance;
1106 ctx->u.rinfo.zd_old_distance = old_re->distance;
1107 ctx->u.rinfo.zd_old_metric = old_re->metric;
01ce7cba
MS
1108
1109#ifndef HAVE_NETLINK
f183e380
MS
1110 /* For bsd, capture previous re's nexthops too, sigh.
1111 * We'll need these to do per-nexthop deletes.
1112 */
0f461727 1113 copy_nexthops(&(ctx->u.rinfo.zd_old_ng.nexthop),
01ce7cba
MS
1114 old_re->ng.nexthop, NULL);
1115#endif /* !HAVE_NETLINK */
7cdb1a84
MS
1116 }
1117
1118 /* Enqueue context for processing */
1119 ret = dplane_route_enqueue(ctx);
1120 }
1121
1122done:
91f16812 1123 /* Update counter */
25779064 1124 atomic_fetch_add_explicit(&zdplane_info.dg_routes_in, 1,
1d11b21f
MS
1125 memory_order_relaxed);
1126
91f16812 1127 if (ret == AOK)
655d681a 1128 result = ZEBRA_DPLANE_REQUEST_QUEUED;
16c628de 1129 else {
25779064 1130 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors, 1,
1d11b21f 1131 memory_order_relaxed);
16c628de
MS
1132 if (ctx)
1133 dplane_ctx_free(&ctx);
1d11b21f 1134 }
7cdb1a84 1135
5709131c 1136 return result;
7cdb1a84
MS
1137}
1138
1139/*
1140 * Enqueue a route 'add' for the dataplane.
1141 */
655d681a
MS
1142enum zebra_dplane_result dplane_route_add(struct route_node *rn,
1143 struct route_entry *re)
7cdb1a84 1144{
655d681a 1145 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
7cdb1a84 1146
5709131c 1147 if (rn == NULL || re == NULL)
7cdb1a84 1148 goto done;
7cdb1a84
MS
1149
1150 ret = dplane_route_update_internal(rn, re, NULL,
1151 DPLANE_OP_ROUTE_INSTALL);
1152
1153done:
5709131c 1154 return ret;
7cdb1a84
MS
1155}
1156
1157/*
1158 * Enqueue a route update for the dataplane.
1159 */
655d681a
MS
1160enum zebra_dplane_result dplane_route_update(struct route_node *rn,
1161 struct route_entry *re,
1162 struct route_entry *old_re)
7cdb1a84 1163{
655d681a 1164 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
7cdb1a84 1165
5709131c 1166 if (rn == NULL || re == NULL)
7cdb1a84 1167 goto done;
7cdb1a84
MS
1168
1169 ret = dplane_route_update_internal(rn, re, old_re,
1170 DPLANE_OP_ROUTE_UPDATE);
7cdb1a84 1171done:
5709131c 1172 return ret;
7cdb1a84
MS
1173}
1174
1175/*
1176 * Enqueue a route removal for the dataplane.
1177 */
655d681a
MS
1178enum zebra_dplane_result dplane_route_delete(struct route_node *rn,
1179 struct route_entry *re)
7cdb1a84 1180{
655d681a 1181 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
7cdb1a84 1182
5709131c 1183 if (rn == NULL || re == NULL)
7cdb1a84 1184 goto done;
7cdb1a84
MS
1185
1186 ret = dplane_route_update_internal(rn, re, NULL,
1187 DPLANE_OP_ROUTE_DELETE);
1188
1189done:
5709131c 1190 return ret;
7cdb1a84
MS
1191}
1192
16c628de
MS
1193/*
1194 * Enqueue LSP add for the dataplane.
1195 */
1196enum zebra_dplane_result dplane_lsp_add(zebra_lsp_t *lsp)
1197{
1198 enum zebra_dplane_result ret =
1199 lsp_update_internal(lsp, DPLANE_OP_LSP_INSTALL);
1200
1201 return ret;
1202}
1203
1204/*
1205 * Enqueue LSP update for the dataplane.
1206 */
1207enum zebra_dplane_result dplane_lsp_update(zebra_lsp_t *lsp)
1208{
1209 enum zebra_dplane_result ret =
1210 lsp_update_internal(lsp, DPLANE_OP_LSP_UPDATE);
1211
1212 return ret;
1213}
1214
1215/*
1216 * Enqueue LSP delete for the dataplane.
1217 */
1218enum zebra_dplane_result dplane_lsp_delete(zebra_lsp_t *lsp)
1219{
1220 enum zebra_dplane_result ret =
1221 lsp_update_internal(lsp, DPLANE_OP_LSP_DELETE);
1222
1223 return ret;
1224}
1225
1226/*
1227 * Common internal LSP update utility
1228 */
1229static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
1230 enum dplane_op_e op)
1231{
1232 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
1233 int ret = EINVAL;
1234 struct zebra_dplane_ctx *ctx = NULL;
1235
1236 /* Obtain context block */
1237 ctx = dplane_ctx_alloc();
1238 if (ctx == NULL) {
1239 ret = ENOMEM;
1240 goto done;
1241 }
1242
1243 ret = dplane_ctx_lsp_init(ctx, op, lsp);
1244 if (ret != AOK)
1245 goto done;
1246
1247 ret = dplane_route_enqueue(ctx);
1248
1249done:
1250 /* Update counter */
1251 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
1252 memory_order_relaxed);
1253
1254 if (ret == AOK)
1255 result = ZEBRA_DPLANE_REQUEST_QUEUED;
1256 else {
1257 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
1258 memory_order_relaxed);
1259 if (ctx)
1260 dplane_ctx_free(&ctx);
1261 }
1262
1263 return result;
1264}
1265
1d11b21f
MS
1266/*
1267 * Handler for 'show dplane'
1268 */
1269int dplane_show_helper(struct vty *vty, bool detailed)
1270{
16c628de
MS
1271 uint64_t queued, queue_max, limit, errs, incoming, yields,
1272 other_errs;
1d11b21f 1273
4dfd7a02 1274 /* Using atomics because counters are being changed in different
c831033f 1275 * pthread contexts.
4dfd7a02 1276 */
25779064 1277 incoming = atomic_load_explicit(&zdplane_info.dg_routes_in,
1d11b21f 1278 memory_order_relaxed);
91f16812
MS
1279 limit = atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
1280 memory_order_relaxed);
25779064 1281 queued = atomic_load_explicit(&zdplane_info.dg_routes_queued,
1d11b21f 1282 memory_order_relaxed);
25779064 1283 queue_max = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
4dfd7a02 1284 memory_order_relaxed);
25779064 1285 errs = atomic_load_explicit(&zdplane_info.dg_route_errors,
1d11b21f 1286 memory_order_relaxed);
c831033f
MS
1287 yields = atomic_load_explicit(&zdplane_info.dg_update_yields,
1288 memory_order_relaxed);
16c628de
MS
1289 other_errs = atomic_load_explicit(&zdplane_info.dg_other_errors,
1290 memory_order_relaxed);
1d11b21f 1291
c831033f
MS
1292 vty_out(vty, "Zebra dataplane:\nRoute updates: %"PRIu64"\n",
1293 incoming);
1d11b21f 1294 vty_out(vty, "Route update errors: %"PRIu64"\n", errs);
16c628de 1295 vty_out(vty, "Other errors : %"PRIu64"\n", other_errs);
91f16812 1296 vty_out(vty, "Route update queue limit: %"PRIu64"\n", limit);
1d11b21f 1297 vty_out(vty, "Route update queue depth: %"PRIu64"\n", queued);
4dfd7a02 1298 vty_out(vty, "Route update queue max: %"PRIu64"\n", queue_max);
16c628de 1299 vty_out(vty, "Dplane update yields: %"PRIu64"\n", yields);
1d11b21f
MS
1300
1301 return CMD_SUCCESS;
1302}
1303
1304/*
1305 * Handler for 'show dplane providers'
1306 */
1307int dplane_show_provs_helper(struct vty *vty, bool detailed)
1308{
c831033f
MS
1309 struct zebra_dplane_provider *prov;
1310 uint64_t in, in_max, out, out_max;
1311
1312 vty_out(vty, "Zebra dataplane providers:\n");
1313
1314 DPLANE_LOCK();
1315 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
1316 DPLANE_UNLOCK();
1317
1318 /* Show counters, useful info from each registered provider */
1319 while (prov) {
1320
1321 in = atomic_load_explicit(&prov->dp_in_counter,
1322 memory_order_relaxed);
1323 in_max = atomic_load_explicit(&prov->dp_in_max,
1324 memory_order_relaxed);
1325 out = atomic_load_explicit(&prov->dp_out_counter,
1326 memory_order_relaxed);
1327 out_max = atomic_load_explicit(&prov->dp_out_max,
1328 memory_order_relaxed);
1329
c9d17fe8
MS
1330 vty_out(vty, "%s (%u): in: %"PRIu64", q_max: %"PRIu64", "
1331 "out: %"PRIu64", q_max: %"PRIu64"\n",
c831033f
MS
1332 prov->dp_name, prov->dp_id, in, in_max, out, out_max);
1333
1334 DPLANE_LOCK();
1335 prov = TAILQ_NEXT(prov, dp_prov_link);
1336 DPLANE_UNLOCK();
1337 }
1d11b21f
MS
1338
1339 return CMD_SUCCESS;
1340}
1341
b8e0423d
MS
1342/*
1343 * Provider registration
1344 */
1345int dplane_provider_register(const char *name,
c831033f
MS
1346 enum dplane_provider_prio prio,
1347 int flags,
4c206c8f
MS
1348 int (*fp)(struct zebra_dplane_provider *),
1349 int (*fini_fp)(struct zebra_dplane_provider *,
1350 bool early),
1ff8a248
MS
1351 void *data,
1352 struct zebra_dplane_provider **prov_p)
b8e0423d
MS
1353{
1354 int ret = 0;
6fb51ccb 1355 struct zebra_dplane_provider *p = NULL, *last;
b8e0423d
MS
1356
1357 /* Validate */
1358 if (fp == NULL) {
1359 ret = EINVAL;
1360 goto done;
1361 }
1362
1363 if (prio <= DPLANE_PRIO_NONE ||
1bcea841 1364 prio > DPLANE_PRIO_LAST) {
b8e0423d
MS
1365 ret = EINVAL;
1366 goto done;
1367 }
1368
1369 /* Allocate and init new provider struct */
25779064 1370 p = XCALLOC(MTYPE_DP_PROV, sizeof(struct zebra_dplane_provider));
b8e0423d
MS
1371 if (p == NULL) {
1372 ret = ENOMEM;
1373 goto done;
1374 }
1375
c831033f
MS
1376 pthread_mutex_init(&(p->dp_mutex), NULL);
1377 TAILQ_INIT(&(p->dp_ctx_in_q));
1378 TAILQ_INIT(&(p->dp_ctx_out_q));
b8e0423d
MS
1379
1380 p->dp_priority = prio;
1381 p->dp_fp = fp;
18c37974 1382 p->dp_fini = fini_fp;
c831033f 1383 p->dp_data = data;
18c37974 1384
c831033f 1385 /* Lock - the dplane pthread may be running */
18c37974 1386 DPLANE_LOCK();
b8e0423d 1387
25779064 1388 p->dp_id = ++zdplane_info.dg_provider_id;
b8e0423d 1389
c831033f
MS
1390 if (name)
1391 strlcpy(p->dp_name, name, DPLANE_PROVIDER_NAMELEN);
1392 else
1393 snprintf(p->dp_name, DPLANE_PROVIDER_NAMELEN,
1394 "provider-%u", p->dp_id);
1395
b8e0423d 1396 /* Insert into list ordered by priority */
c831033f 1397 TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_prov_link) {
5709131c 1398 if (last->dp_priority > p->dp_priority)
b8e0423d 1399 break;
b8e0423d
MS
1400 }
1401
5709131c 1402 if (last)
c831033f 1403 TAILQ_INSERT_BEFORE(last, p, dp_prov_link);
5709131c 1404 else
25779064 1405 TAILQ_INSERT_TAIL(&zdplane_info.dg_providers_q, p,
c831033f 1406 dp_prov_link);
b8e0423d 1407
18c37974
MS
1408 /* And unlock */
1409 DPLANE_UNLOCK();
1410
c831033f
MS
1411 if (IS_ZEBRA_DEBUG_DPLANE)
1412 zlog_debug("dplane: registered new provider '%s' (%u), prio %d",
1413 p->dp_name, p->dp_id, p->dp_priority);
1414
b8e0423d 1415done:
1ff8a248
MS
1416 if (prov_p)
1417 *prov_p = p;
1418
5709131c 1419 return ret;
b8e0423d
MS
1420}
1421
c831033f
MS
1422/* Accessors for provider attributes */
1423const char *dplane_provider_get_name(const struct zebra_dplane_provider *prov)
1424{
1425 return prov->dp_name;
1426}
1427
1428uint32_t dplane_provider_get_id(const struct zebra_dplane_provider *prov)
1429{
1430 return prov->dp_id;
1431}
1432
1433void *dplane_provider_get_data(const struct zebra_dplane_provider *prov)
1434{
1435 return prov->dp_data;
1436}
1437
1438int dplane_provider_get_work_limit(const struct zebra_dplane_provider *prov)
1439{
1440 return zdplane_info.dg_updates_per_cycle;
1441}
1442
ad6aad4d
MS
1443/* Lock/unlock a provider's mutex - iff the provider was registered with
1444 * the THREADED flag.
1445 */
1446void dplane_provider_lock(struct zebra_dplane_provider *prov)
1447{
1448 if (dplane_provider_is_threaded(prov))
1449 DPLANE_PROV_LOCK(prov);
1450}
1451
1452void dplane_provider_unlock(struct zebra_dplane_provider *prov)
1453{
1454 if (dplane_provider_is_threaded(prov))
1455 DPLANE_PROV_UNLOCK(prov);
1456}
1457
c831033f
MS
1458/*
1459 * Dequeue and maintain associated counter
1460 */
1461struct zebra_dplane_ctx *dplane_provider_dequeue_in_ctx(
1462 struct zebra_dplane_provider *prov)
1463{
1464 struct zebra_dplane_ctx *ctx = NULL;
1465
ad6aad4d 1466 dplane_provider_lock(prov);
c831033f
MS
1467
1468 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
1469 if (ctx) {
1470 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
c9d17fe8
MS
1471
1472 atomic_fetch_sub_explicit(&prov->dp_in_queued, 1,
1473 memory_order_relaxed);
c831033f
MS
1474 }
1475
ad6aad4d 1476 dplane_provider_unlock(prov);
c831033f
MS
1477
1478 return ctx;
1479}
1480
1481/*
1482 * Dequeue work to a list, return count
1483 */
1484int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov,
1485 struct dplane_ctx_q *listp)
1486{
1487 int limit, ret;
1488 struct zebra_dplane_ctx *ctx;
1489
1490 limit = zdplane_info.dg_updates_per_cycle;
1491
ad6aad4d 1492 dplane_provider_lock(prov);
c831033f
MS
1493
1494 for (ret = 0; ret < limit; ret++) {
1495 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
1496 if (ctx) {
1497 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
1498
1499 TAILQ_INSERT_TAIL(listp, ctx, zd_q_entries);
1500 } else {
1501 break;
1502 }
1503 }
1504
c9d17fe8
MS
1505 if (ret > 0)
1506 atomic_fetch_sub_explicit(&prov->dp_in_queued, ret,
1507 memory_order_relaxed);
1508
ad6aad4d 1509 dplane_provider_unlock(prov);
c831033f
MS
1510
1511 return ret;
1512}
1513
1514/*
1515 * Enqueue and maintain associated counter
1516 */
1517void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
1518 struct zebra_dplane_ctx *ctx)
1519{
ad6aad4d 1520 dplane_provider_lock(prov);
c831033f
MS
1521
1522 TAILQ_INSERT_TAIL(&(prov->dp_ctx_out_q), ctx,
1523 zd_q_entries);
1524
ad6aad4d 1525 dplane_provider_unlock(prov);
c831033f
MS
1526
1527 atomic_fetch_add_explicit(&(prov->dp_out_counter), 1,
1528 memory_order_relaxed);
1529}
1530
62b8bb7a
MS
1531/*
1532 * Accessor for provider object
1533 */
c831033f
MS
1534bool dplane_provider_is_threaded(const struct zebra_dplane_provider *prov)
1535{
1536 return (prov->dp_flags & DPLANE_PROV_FLAG_THREADED);
1537}
1538
62b8bb7a
MS
1539/*
1540 * Internal helper that copies information from a zebra ns object; this is
1541 * called in the zebra main pthread context as part of dplane ctx init.
1542 */
1543static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
1544 struct zebra_ns *zns)
1545{
1546 ns_info->ns_id = zns->ns_id;
1547
1548#if defined(HAVE_NETLINK)
1549 ns_info->is_cmd = true;
1550 ns_info->nls = zns->netlink_dplane;
1551#endif /* NETLINK */
1552}
1553
c831033f
MS
1554/*
1555 * Provider api to signal that work/events are available
1556 * for the dataplane pthread.
1557 */
1558int dplane_provider_work_ready(void)
1559{
e5a60d82
MS
1560 /* Note that during zebra startup, we may be offered work before
1561 * the dataplane pthread (and thread-master) are ready. We want to
1562 * enqueue the work, but the event-scheduling machinery may not be
1563 * available.
1564 */
1565 if (zdplane_info.dg_run) {
1566 thread_add_event(zdplane_info.dg_master,
1567 dplane_thread_loop, NULL, 0,
1568 &zdplane_info.dg_t_update);
1569 }
c831033f
MS
1570
1571 return AOK;
1572}
1573
7cdb1a84 1574/*
c831033f 1575 * Kernel dataplane provider
7cdb1a84 1576 */
c831033f 1577
16c628de
MS
1578/*
1579 * Handler for kernel LSP updates
1580 */
1581static enum zebra_dplane_result
1582kernel_dplane_lsp_update(struct zebra_dplane_ctx *ctx)
1583{
1584 enum zebra_dplane_result res;
1585
1586 /* Call into the synchronous kernel-facing code here */
1587 res = kernel_lsp_update(ctx);
1588
1589 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
1590 atomic_fetch_add_explicit(
1591 &zdplane_info.dg_lsp_errors, 1,
1592 memory_order_relaxed);
1593
1594 return res;
1595}
1596
1597/*
1598 * Handler for kernel route updates
1599 */
1600static enum zebra_dplane_result
1601kernel_dplane_route_update(struct zebra_dplane_ctx *ctx)
1602{
1603 enum zebra_dplane_result res;
1604
1605 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
1606 char dest_str[PREFIX_STRLEN];
1607
1608 prefix2str(dplane_ctx_get_dest(ctx),
1609 dest_str, sizeof(dest_str));
1610
1611 zlog_debug("%u:%s Dplane route update ctx %p op %s",
1612 dplane_ctx_get_vrf(ctx), dest_str,
1613 ctx, dplane_op2str(dplane_ctx_get_op(ctx)));
1614 }
1615
1616 /* Call into the synchronous kernel-facing code here */
1617 res = kernel_route_update(ctx);
1618
1619 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
1620 atomic_fetch_add_explicit(
1621 &zdplane_info.dg_route_errors, 1,
1622 memory_order_relaxed);
1623
1624 return res;
1625}
1626
c831033f
MS
1627/*
1628 * Kernel provider callback
1629 */
1630static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
7cdb1a84 1631{
c831033f
MS
1632 enum zebra_dplane_result res;
1633 struct zebra_dplane_ctx *ctx;
1634 int counter, limit;
7cdb1a84 1635
c831033f 1636 limit = dplane_provider_get_work_limit(prov);
7cdb1a84 1637
c831033f
MS
1638 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1639 zlog_debug("dplane provider '%s': processing",
1640 dplane_provider_get_name(prov));
7cdb1a84 1641
c831033f 1642 for (counter = 0; counter < limit; counter++) {
91f16812 1643
c831033f
MS
1644 ctx = dplane_provider_dequeue_in_ctx(prov);
1645 if (ctx == NULL)
1646 break;
d8c16a95 1647
16c628de
MS
1648 /* Dispatch to appropriate kernel-facing apis */
1649 switch (dplane_ctx_get_op(ctx)) {
1d11b21f 1650
16c628de
MS
1651 case DPLANE_OP_ROUTE_INSTALL:
1652 case DPLANE_OP_ROUTE_UPDATE:
1653 case DPLANE_OP_ROUTE_DELETE:
1654 res = kernel_dplane_route_update(ctx);
1655 break;
d8c16a95 1656
16c628de
MS
1657 case DPLANE_OP_LSP_INSTALL:
1658 case DPLANE_OP_LSP_UPDATE:
1659 case DPLANE_OP_LSP_DELETE:
1660 res = kernel_dplane_lsp_update(ctx);
1661 break;
d8c16a95 1662
16c628de 1663 default:
c831033f 1664 atomic_fetch_add_explicit(
16c628de 1665 &zdplane_info.dg_other_errors, 1,
c831033f 1666 memory_order_relaxed);
d8c16a95 1667
16c628de
MS
1668 res = ZEBRA_DPLANE_REQUEST_FAILURE;
1669 break;
1670 }
1671
c831033f
MS
1672 dplane_ctx_set_status(ctx, res);
1673
1674 dplane_provider_enqueue_out_ctx(prov, ctx);
1675 }
1676
1677 /* Ensure that we'll run the work loop again if there's still
1678 * more work to do.
1679 */
1680 if (counter >= limit) {
1681 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1682 zlog_debug("dplane provider '%s' reached max updates %d",
1683 dplane_provider_get_name(prov), counter);
1684
1685 atomic_fetch_add_explicit(&zdplane_info.dg_update_yields,
1686 1, memory_order_relaxed);
1687
1688 dplane_provider_work_ready();
1689 }
1690
1691 return 0;
1692}
1693
e5a60d82
MS
1694#if DPLANE_TEST_PROVIDER
1695
c831033f
MS
1696/*
1697 * Test dataplane provider plugin
1698 */
1699
1700/*
1701 * Test provider process callback
1702 */
1703static int test_dplane_process_func(struct zebra_dplane_provider *prov)
1704{
1705 struct zebra_dplane_ctx *ctx;
1706 int counter, limit;
1707
1708 /* Just moving from 'in' queue to 'out' queue */
1709
1710 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1711 zlog_debug("dplane provider '%s': processing",
1712 dplane_provider_get_name(prov));
1713
1714 limit = dplane_provider_get_work_limit(prov);
1715
1716 for (counter = 0; counter < limit; counter++) {
1717
1718 ctx = dplane_provider_dequeue_in_ctx(prov);
1719 if (ctx == NULL)
1720 break;
1721
1722 dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
1723
1724 dplane_provider_enqueue_out_ctx(prov, ctx);
1725 }
1726
c9d17fe8
MS
1727 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1728 zlog_debug("dplane provider '%s': processed %d",
1729 dplane_provider_get_name(prov), counter);
1730
c831033f
MS
1731 /* Ensure that we'll run the work loop again if there's still
1732 * more work to do.
1733 */
1734 if (counter >= limit)
1735 dplane_provider_work_ready();
1736
1737 return 0;
1738}
1739
1740/*
1741 * Test provider shutdown/fini callback
1742 */
1743static int test_dplane_shutdown_func(struct zebra_dplane_provider *prov,
1744 bool early)
1745{
1746 if (IS_ZEBRA_DEBUG_DPLANE)
1747 zlog_debug("dplane provider '%s': %sshutdown",
1748 dplane_provider_get_name(prov),
1749 early ? "early " : "");
1750
1751 return 0;
1752}
e5a60d82 1753#endif /* DPLANE_TEST_PROVIDER */
c831033f
MS
1754
1755/*
1756 * Register default kernel provider
1757 */
1758static void dplane_provider_init(void)
1759{
1760 int ret;
1761
1762 ret = dplane_provider_register("Kernel",
1763 DPLANE_PRIO_KERNEL,
1764 DPLANE_PROV_FLAGS_DEFAULT,
1765 kernel_dplane_process_func,
1766 NULL,
1ff8a248 1767 NULL, NULL);
c831033f
MS
1768
1769 if (ret != AOK)
1770 zlog_err("Unable to register kernel dplane provider: %d",
1771 ret);
1772
e5a60d82
MS
1773#if DPLANE_TEST_PROVIDER
1774 /* Optional test provider ... */
c831033f
MS
1775 ret = dplane_provider_register("Test",
1776 DPLANE_PRIO_PRE_KERNEL,
1777 DPLANE_PROV_FLAGS_DEFAULT,
1778 test_dplane_process_func,
1779 test_dplane_shutdown_func,
1ff8a248 1780 NULL /* data */, NULL);
c831033f
MS
1781
1782 if (ret != AOK)
1783 zlog_err("Unable to register test dplane provider: %d",
1784 ret);
e5a60d82 1785#endif /* DPLANE_TEST_PROVIDER */
7cdb1a84
MS
1786}
1787
4dfd7a02
MS
1788/* Indicates zebra shutdown/exit is in progress. Some operations may be
1789 * simplified or skipped during shutdown processing.
1790 */
1791bool dplane_is_in_shutdown(void)
1792{
25779064 1793 return zdplane_info.dg_is_shutdown;
4dfd7a02
MS
1794}
1795
1796/*
1797 * Early or pre-shutdown, de-init notification api. This runs pretty
1798 * early during zebra shutdown, as a signal to stop new work and prepare
1799 * for updates generated by shutdown/cleanup activity, as zebra tries to
1800 * remove everything it's responsible for.
c9d17fe8 1801 * NB: This runs in the main zebra pthread context.
4dfd7a02
MS
1802 */
1803void zebra_dplane_pre_finish(void)
1804{
1805 if (IS_ZEBRA_DEBUG_DPLANE)
1806 zlog_debug("Zebra dataplane pre-fini called");
1807
25779064 1808 zdplane_info.dg_is_shutdown = true;
4dfd7a02 1809
c9d17fe8 1810 /* TODO -- Notify provider(s) of pending shutdown */
4dfd7a02
MS
1811}
1812
1813/*
1814 * Utility to determine whether work remains enqueued within the dplane;
1815 * used during system shutdown processing.
1816 */
1817static bool dplane_work_pending(void)
1818{
c9d17fe8 1819 bool ret = false;
25779064 1820 struct zebra_dplane_ctx *ctx;
c9d17fe8 1821 struct zebra_dplane_provider *prov;
4dfd7a02 1822
c831033f
MS
1823 /* TODO -- just checking incoming/pending work for now, must check
1824 * providers
1825 */
4dfd7a02
MS
1826 DPLANE_LOCK();
1827 {
25779064 1828 ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
c9d17fe8 1829 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
4dfd7a02
MS
1830 }
1831 DPLANE_UNLOCK();
1832
c9d17fe8
MS
1833 if (ctx != NULL) {
1834 ret = true;
1835 goto done;
1836 }
1837
1838 while (prov) {
1839
ad6aad4d 1840 dplane_provider_lock(prov);
c9d17fe8
MS
1841
1842 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
1843 if (ctx == NULL)
1844 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
1845
ad6aad4d 1846 dplane_provider_unlock(prov);
c9d17fe8
MS
1847
1848 if (ctx != NULL)
1849 break;
1850
1851 DPLANE_LOCK();
1852 prov = TAILQ_NEXT(prov, dp_prov_link);
1853 DPLANE_UNLOCK();
1854 }
1855
1856 if (ctx != NULL)
1857 ret = true;
1858
1859done:
1860 return ret;
4dfd7a02
MS
1861}
1862
1863/*
1864 * Shutdown-time intermediate callback, used to determine when all pending
1865 * in-flight updates are done. If there's still work to do, reschedules itself.
1866 * If all work is done, schedules an event to the main zebra thread for
1867 * final zebra shutdown.
1868 * This runs in the dplane pthread context.
1869 */
1870static int dplane_check_shutdown_status(struct thread *event)
1871{
1872 if (IS_ZEBRA_DEBUG_DPLANE)
1873 zlog_debug("Zebra dataplane shutdown status check called");
1874
1875 if (dplane_work_pending()) {
1876 /* Reschedule dplane check on a short timer */
25779064 1877 thread_add_timer_msec(zdplane_info.dg_master,
4dfd7a02
MS
1878 dplane_check_shutdown_status,
1879 NULL, 100,
25779064 1880 &zdplane_info.dg_t_shutdown_check);
4dfd7a02
MS
1881
1882 /* TODO - give up and stop waiting after a short time? */
1883
1884 } else {
1885 /* We appear to be done - schedule a final callback event
1886 * for the zebra main pthread.
1887 */
1888 thread_add_event(zebrad.master, zebra_finalize, NULL, 0, NULL);
1889 }
1890
1891 return 0;
1892}
1893
18c37974 1894/*
1d11b21f 1895 * Shutdown, de-init api. This runs pretty late during shutdown,
4dfd7a02
MS
1896 * after zebra has tried to free/remove/uninstall all routes during shutdown.
1897 * At this point, dplane work may still remain to be done, so we can't just
1898 * blindly terminate. If there's still work to do, we'll periodically check
1899 * and when done, we'll enqueue a task to the zebra main thread for final
1900 * termination processing.
1901 *
1d11b21f 1902 * NB: This runs in the main zebra thread context.
18c37974 1903 */
1d11b21f 1904void zebra_dplane_finish(void)
18c37974 1905{
4dfd7a02
MS
1906 if (IS_ZEBRA_DEBUG_DPLANE)
1907 zlog_debug("Zebra dataplane fini called");
1908
25779064 1909 thread_add_event(zdplane_info.dg_master,
4dfd7a02 1910 dplane_check_shutdown_status, NULL, 0,
25779064 1911 &zdplane_info.dg_t_shutdown_check);
4dfd7a02
MS
1912}
1913
c831033f
MS
1914/*
1915 * Main dataplane pthread event loop. The thread takes new incoming work
1916 * and offers it to the first provider. It then iterates through the
1917 * providers, taking complete work from each one and offering it
1918 * to the next in order. At each step, a limited number of updates are
1919 * processed during a cycle in order to provide some fairness.
14b0bc8e
MS
1920 *
1921 * This loop through the providers is only run once, so that the dataplane
1922 * pthread can look for other pending work - such as i/o work on behalf of
1923 * providers.
c831033f
MS
1924 */
1925static int dplane_thread_loop(struct thread *event)
1926{
1927 struct dplane_ctx_q work_list;
1928 struct dplane_ctx_q error_list;
1929 struct zebra_dplane_provider *prov;
1930 struct zebra_dplane_ctx *ctx, *tctx;
1931 int limit, counter, error_counter;
c9d17fe8 1932 uint64_t curr, high;
c831033f
MS
1933
1934 /* Capture work limit per cycle */
1935 limit = zdplane_info.dg_updates_per_cycle;
1936
14b0bc8e 1937 /* Init temporary lists used to move contexts among providers */
c831033f 1938 TAILQ_INIT(&work_list);
14b0bc8e
MS
1939 TAILQ_INIT(&error_list);
1940 error_counter = 0;
c831033f
MS
1941
1942 /* Check for zebra shutdown */
1943 if (!zdplane_info.dg_run)
1944 goto done;
1945
1946 /* Dequeue some incoming work from zebra (if any) onto the temporary
1947 * working list.
1948 */
1949 DPLANE_LOCK();
1950
1951 /* Locate initial registered provider */
1952 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
1953
14b0bc8e 1954 /* Move new work from incoming list to temp list */
c831033f
MS
1955 for (counter = 0; counter < limit; counter++) {
1956 ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
1957 if (ctx) {
1958 TAILQ_REMOVE(&zdplane_info.dg_route_ctx_q, ctx,
1959 zd_q_entries);
1960
c831033f
MS
1961 ctx->zd_provider = prov->dp_id;
1962
1963 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
1964 } else {
1965 break;
1966 }
1967 }
1968
1969 DPLANE_UNLOCK();
1970
14b0bc8e
MS
1971 atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, counter,
1972 memory_order_relaxed);
1973
c831033f
MS
1974 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1975 zlog_debug("dplane: incoming new work counter: %d", counter);
1976
1977 /* Iterate through the registered providers, offering new incoming
1978 * work. If the provider has outgoing work in its queue, take that
1979 * work for the next provider
1980 */
1981 while (prov) {
1982
14b0bc8e
MS
1983 /* At each iteration, the temporary work list has 'counter'
1984 * items.
1985 */
c831033f
MS
1986 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
1987 zlog_debug("dplane enqueues %d new work to provider '%s'",
1988 counter, dplane_provider_get_name(prov));
1989
1990 /* Capture current provider id in each context; check for
1991 * error status.
1992 */
1993 TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, tctx) {
1994 if (dplane_ctx_get_status(ctx) ==
1995 ZEBRA_DPLANE_REQUEST_SUCCESS) {
1996 ctx->zd_provider = prov->dp_id;
1997 } else {
1998 /*
1999 * TODO -- improve error-handling: recirc
2000 * errors backwards so that providers can
2001 * 'undo' their work (if they want to)
2002 */
2003
2004 /* Move to error list; will be returned
2005 * zebra main.
2006 */
2007 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
2008 TAILQ_INSERT_TAIL(&error_list,
2009 ctx, zd_q_entries);
2010 error_counter++;
2011 }
2012 }
2013
2014 /* Enqueue new work to the provider */
ad6aad4d 2015 dplane_provider_lock(prov);
c831033f
MS
2016
2017 if (TAILQ_FIRST(&work_list))
2018 TAILQ_CONCAT(&(prov->dp_ctx_in_q), &work_list,
2019 zd_q_entries);
2020
c9d17fe8
MS
2021 atomic_fetch_add_explicit(&prov->dp_in_counter, counter,
2022 memory_order_relaxed);
2023 atomic_fetch_add_explicit(&prov->dp_in_queued, counter,
2024 memory_order_relaxed);
2025 curr = atomic_load_explicit(&prov->dp_in_queued,
2026 memory_order_relaxed);
2027 high = atomic_load_explicit(&prov->dp_in_max,
2028 memory_order_relaxed);
2029 if (curr > high)
2030 atomic_store_explicit(&prov->dp_in_max, curr,
c831033f
MS
2031 memory_order_relaxed);
2032
ad6aad4d 2033 dplane_provider_unlock(prov);
c831033f 2034
14b0bc8e
MS
2035 /* Reset the temp list (though the 'concat' may have done this
2036 * already), and the counter
2037 */
c831033f
MS
2038 TAILQ_INIT(&work_list);
2039 counter = 0;
2040
14b0bc8e
MS
2041 /* Call into the provider code. Note that this is
2042 * unconditional: we offer to do work even if we don't enqueue
2043 * any _new_ work.
2044 */
c831033f
MS
2045 (*prov->dp_fp)(prov);
2046
2047 /* Check for zebra shutdown */
2048 if (!zdplane_info.dg_run)
2049 break;
2050
2051 /* Dequeue completed work from the provider */
ad6aad4d 2052 dplane_provider_lock(prov);
c831033f
MS
2053
2054 while (counter < limit) {
2055 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
2056 if (ctx) {
2057 TAILQ_REMOVE(&(prov->dp_ctx_out_q), ctx,
2058 zd_q_entries);
2059
2060 TAILQ_INSERT_TAIL(&work_list,
2061 ctx, zd_q_entries);
2062 counter++;
2063 } else
2064 break;
2065 }
2066
ad6aad4d 2067 dplane_provider_unlock(prov);
c831033f
MS
2068
2069 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2070 zlog_debug("dplane dequeues %d completed work from provider %s",
2071 counter, dplane_provider_get_name(prov));
2072
2073 /* Locate next provider */
2074 DPLANE_LOCK();
2075 prov = TAILQ_NEXT(prov, dp_prov_link);
2076 DPLANE_UNLOCK();
c831033f
MS
2077 }
2078
2079 /* After all providers have been serviced, enqueue any completed
14b0bc8e 2080 * work and any errors back to zebra so it can process the results.
c831033f 2081 */
c831033f 2082 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
14b0bc8e
MS
2083 zlog_debug("dplane has %d completed, %d errors, for zebra main",
2084 counter, error_counter);
c831033f
MS
2085
2086 /*
4c206c8f 2087 * Hand lists through the api to zebra main,
c831033f
MS
2088 * to reduce the number of lock/unlock cycles
2089 */
14b0bc8e 2090
4c206c8f
MS
2091 /* Call through to zebra main */
2092 (zdplane_info.dg_results_cb)(&error_list);
14b0bc8e 2093
4c206c8f 2094 TAILQ_INIT(&error_list);
14b0bc8e 2095
c831033f 2096
4c206c8f
MS
2097 /* Call through to zebra main */
2098 (zdplane_info.dg_results_cb)(&work_list);
c831033f 2099
4c206c8f 2100 TAILQ_INIT(&work_list);
c831033f
MS
2101
2102done:
2103 return 0;
2104}
2105
4dfd7a02
MS
2106/*
2107 * Final phase of shutdown, after all work enqueued to dplane has been
2108 * processed. This is called from the zebra main pthread context.
2109 */
2110void zebra_dplane_shutdown(void)
2111{
2112 if (IS_ZEBRA_DEBUG_DPLANE)
2113 zlog_debug("Zebra dataplane shutdown called");
1d11b21f
MS
2114
2115 /* Stop dplane thread, if it's running */
2116
25779064 2117 zdplane_info.dg_run = false;
1d11b21f 2118
25779064 2119 THREAD_OFF(zdplane_info.dg_t_update);
1d11b21f 2120
d8c16a95
MS
2121 frr_pthread_stop(zdplane_info.dg_pthread, NULL);
2122
2123 /* Destroy pthread */
2124 frr_pthread_destroy(zdplane_info.dg_pthread);
2125 zdplane_info.dg_pthread = NULL;
2126 zdplane_info.dg_master = NULL;
4dfd7a02 2127
c831033f
MS
2128 /* TODO -- Notify provider(s) of final shutdown */
2129
2130 /* TODO -- Clean-up provider objects */
2131
2132 /* TODO -- Clean queue(s), free memory */
2133}
2134
2135/*
2136 * Initialize the dataplane module during startup, internal/private version
2137 */
2138static void zebra_dplane_init_internal(struct zebra_t *zebra)
2139{
2140 memset(&zdplane_info, 0, sizeof(zdplane_info));
2141
2142 pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
1d11b21f 2143
c831033f
MS
2144 TAILQ_INIT(&zdplane_info.dg_route_ctx_q);
2145 TAILQ_INIT(&zdplane_info.dg_providers_q);
1d11b21f 2146
c831033f
MS
2147 zdplane_info.dg_updates_per_cycle = DPLANE_DEFAULT_NEW_WORK;
2148
2149 zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;
2150
2151 /* Register default kernel 'provider' during init */
2152 dplane_provider_init();
e5a60d82 2153}
c831033f 2154
e5a60d82
MS
2155/*
2156 * Start the dataplane pthread. This step needs to be run later than the
2157 * 'init' step, in case zebra has fork-ed.
2158 */
2159void zebra_dplane_start(void)
2160{
c831033f
MS
2161 /* Start dataplane pthread */
2162
c831033f
MS
2163 struct frr_pthread_attr pattr = {
2164 .start = frr_pthread_attr_default.start,
2165 .stop = frr_pthread_attr_default.stop
2166 };
2167
2168 zdplane_info.dg_pthread = frr_pthread_new(&pattr, "Zebra dplane thread",
2169 "Zebra dplane");
2170
2171 zdplane_info.dg_master = zdplane_info.dg_pthread->master;
2172
e5a60d82
MS
2173 zdplane_info.dg_run = true;
2174
c831033f
MS
2175 /* Enqueue an initial event for the dataplane pthread */
2176 thread_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
2177 &zdplane_info.dg_t_update);
2178
2179 frr_pthread_run(zdplane_info.dg_pthread, NULL);
18c37974
MS
2180}
2181
7cdb1a84 2182/*
b8e0423d 2183 * Initialize the dataplane module at startup; called by zebra rib_init()
7cdb1a84 2184 */
4c206c8f 2185void zebra_dplane_init(int (*results_fp)(struct dplane_ctx_q *))
7cdb1a84
MS
2186{
2187 zebra_dplane_init_internal(&zebrad);
4c206c8f 2188 zdplane_info.dg_results_cb = results_fp;
7cdb1a84 2189}