]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_dplane.c
Merge pull request #10323 from opensourcerouting/ospf6-lsa-stats
[mirror_frr.git] / zebra / zebra_dplane.c
1 /*
2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "lib/libfrr.h"
25 #include "lib/debug.h"
26 #include "lib/frratomic.h"
27 #include "lib/frr_pthread.h"
28 #include "lib/memory.h"
29 #include "lib/queue.h"
30 #include "lib/zebra.h"
31 #include "zebra/zebra_router.h"
32 #include "zebra/zebra_dplane.h"
33 #include "zebra/zebra_vxlan_private.h"
34 #include "zebra/zebra_mpls.h"
35 #include "zebra/rt.h"
36 #include "zebra/debug.h"
37 #include "zebra/zebra_pbr.h"
38 #include "printfrr.h"
39
40 /* Memory types */
41 DEFINE_MTYPE_STATIC(ZEBRA, DP_CTX, "Zebra DPlane Ctx");
42 DEFINE_MTYPE_STATIC(ZEBRA, DP_INTF, "Zebra DPlane Intf");
43 DEFINE_MTYPE_STATIC(ZEBRA, DP_PROV, "Zebra DPlane Provider");
44 DEFINE_MTYPE_STATIC(ZEBRA, DP_NETFILTER, "Zebra Netfilter Internal Object");
45 DEFINE_MTYPE_STATIC(ZEBRA, DP_NS, "DPlane NSes");
46
47 #ifndef AOK
48 # define AOK 0
49 #endif
50
51 /* Control for collection of extra interface info with route updates; a plugin
52 * can enable the extra info via a dplane api.
53 */
54 static bool dplane_collect_extra_intf_info;
55
56 /* Enable test dataplane provider */
57 /*#define DPLANE_TEST_PROVIDER 1 */
58
59 /* Default value for max queued incoming updates */
60 const uint32_t DPLANE_DEFAULT_MAX_QUEUED = 200;
61
62 /* Default value for new work per cycle */
63 const uint32_t DPLANE_DEFAULT_NEW_WORK = 100;
64
65 /* Validation check macro for context blocks */
66 /* #define DPLANE_DEBUG 1 */
67
68 #ifdef DPLANE_DEBUG
69
70 # define DPLANE_CTX_VALID(p) \
71 assert((p) != NULL)
72
73 #else
74
75 # define DPLANE_CTX_VALID(p)
76
77 #endif /* DPLANE_DEBUG */
78
79 /*
80 * Nexthop information captured for nexthop/nexthop group updates
81 */
82 struct dplane_nexthop_info {
83 uint32_t id;
84 uint32_t old_id;
85 afi_t afi;
86 vrf_id_t vrf_id;
87 int type;
88
89 struct nexthop_group ng;
90 struct nh_grp nh_grp[MULTIPATH_NUM];
91 uint8_t nh_grp_count;
92 };
93
94 /*
95 * Optional extra info about interfaces used in route updates' nexthops.
96 */
97 struct dplane_intf_extra {
98 vrf_id_t vrf_id;
99 uint32_t ifindex;
100 uint32_t flags;
101 uint32_t status;
102
103 TAILQ_ENTRY(dplane_intf_extra) link;
104 };
105
106 /*
107 * Route information captured for route updates.
108 */
109 struct dplane_route_info {
110
111 /* Dest and (optional) source prefixes */
112 struct prefix zd_dest;
113 struct prefix zd_src;
114
115 afi_t zd_afi;
116 safi_t zd_safi;
117
118 int zd_type;
119 int zd_old_type;
120
121 route_tag_t zd_tag;
122 route_tag_t zd_old_tag;
123 uint32_t zd_metric;
124 uint32_t zd_old_metric;
125
126 uint16_t zd_instance;
127 uint16_t zd_old_instance;
128
129 uint8_t zd_distance;
130 uint8_t zd_old_distance;
131
132 uint32_t zd_mtu;
133 uint32_t zd_nexthop_mtu;
134
135 /* Nexthop hash entry info */
136 struct dplane_nexthop_info nhe;
137
138 /* Nexthops */
139 uint32_t zd_nhg_id;
140 struct nexthop_group zd_ng;
141
142 /* Backup nexthops (if present) */
143 struct nexthop_group backup_ng;
144
145 /* "Previous" nexthops, used only in route updates without netlink */
146 struct nexthop_group zd_old_ng;
147 struct nexthop_group old_backup_ng;
148
149 /* Optional list of extra interface info */
150 TAILQ_HEAD(dp_intf_extra_q, dplane_intf_extra) intf_extra_q;
151 };
152
153 /*
154 * Pseudowire info for the dataplane
155 */
156 struct dplane_pw_info {
157 int type;
158 int af;
159 int status;
160 uint32_t flags;
161 uint32_t nhg_id;
162 union g_addr dest;
163 mpls_label_t local_label;
164 mpls_label_t remote_label;
165
166 /* Nexthops that are valid and installed */
167 struct nexthop_group fib_nhg;
168
169 /* Primary and backup nexthop sets, copied from the resolving route. */
170 struct nexthop_group primary_nhg;
171 struct nexthop_group backup_nhg;
172
173 union pw_protocol_fields fields;
174 };
175
176 /*
177 * Bridge port info for the dataplane
178 */
179 struct dplane_br_port_info {
180 uint32_t sph_filter_cnt;
181 struct in_addr sph_filters[ES_VTEP_MAX_CNT];
182 /* DPLANE_BR_PORT_XXX - see zebra_dplane.h*/
183 uint32_t flags;
184 uint32_t backup_nhg_id;
185 };
186
187 /*
188 * Interface/prefix info for the dataplane
189 */
190 struct dplane_intf_info {
191
192 uint32_t metric;
193 uint32_t flags;
194
195 #define DPLANE_INTF_CONNECTED (1 << 0) /* Connected peer, p2p */
196 #define DPLANE_INTF_SECONDARY (1 << 1)
197 #define DPLANE_INTF_BROADCAST (1 << 2)
198 #define DPLANE_INTF_HAS_DEST DPLANE_INTF_CONNECTED
199 #define DPLANE_INTF_HAS_LABEL (1 << 4)
200
201 /* Interface address/prefix */
202 struct prefix prefix;
203
204 /* Dest address, for p2p, or broadcast prefix */
205 struct prefix dest_prefix;
206
207 char *label;
208 char label_buf[32];
209 };
210
211 /*
212 * EVPN MAC address info for the dataplane.
213 */
214 struct dplane_mac_info {
215 vlanid_t vid;
216 ifindex_t br_ifindex;
217 struct ethaddr mac;
218 struct in_addr vtep_ip;
219 bool is_sticky;
220 uint32_t nhg_id;
221 uint32_t update_flags;
222 };
223
224 /*
225 * Neighbor info for the dataplane
226 */
227 struct dplane_neigh_info {
228 struct ipaddr ip_addr;
229 union {
230 struct ethaddr mac;
231 struct ipaddr ip_addr;
232 } link;
233 uint32_t flags;
234 uint16_t state;
235 uint32_t update_flags;
236 };
237
238 /*
239 * Neighbor Table
240 */
241 struct dplane_neigh_table {
242 uint8_t family;
243 uint32_t app_probes;
244 uint32_t ucast_probes;
245 uint32_t mcast_probes;
246 };
247
248 /*
249 * Policy based routing rule info for the dataplane
250 */
251 struct dplane_ctx_rule {
252 uint32_t priority;
253
254 /* The route table pointed by this rule */
255 uint32_t table;
256
257 /* Filter criteria */
258 uint32_t filter_bm;
259 uint32_t fwmark;
260 uint8_t dsfield;
261 struct prefix src_ip;
262 struct prefix dst_ip;
263 uint8_t ip_proto;
264
265 uint8_t action_pcp;
266 uint16_t action_vlan_id;
267 uint16_t action_vlan_flags;
268
269 uint32_t action_queue_id;
270
271 char ifname[INTERFACE_NAMSIZ + 1];
272 };
273
274 struct dplane_rule_info {
275 /*
276 * Originating zclient sock fd, so we can know who to send
277 * back to.
278 */
279 int sock;
280
281 int unique;
282 int seq;
283
284 struct dplane_ctx_rule new;
285 struct dplane_ctx_rule old;
286 };
287
288 struct dplane_gre_ctx {
289 uint32_t link_ifindex;
290 unsigned int mtu;
291 struct zebra_l2info_gre info;
292 };
293 /*
294 * The context block used to exchange info about route updates across
295 * the boundary between the zebra main context (and pthread) and the
296 * dataplane layer (and pthread).
297 */
298 struct zebra_dplane_ctx {
299
300 /* Operation code */
301 enum dplane_op_e zd_op;
302
303 /* Status on return */
304 enum zebra_dplane_result zd_status;
305
306 /* Dplane provider id */
307 uint32_t zd_provider;
308
309 /* Flags - used by providers, e.g. */
310 int zd_flags;
311
312 bool zd_is_update;
313
314 uint32_t zd_seq;
315 uint32_t zd_old_seq;
316
317 /* Some updates may be generated by notifications: allow the
318 * plugin to notice and ignore results from its own notifications.
319 */
320 uint32_t zd_notif_provider;
321
322 /* TODO -- internal/sub-operation status? */
323 enum zebra_dplane_result zd_remote_status;
324 enum zebra_dplane_result zd_kernel_status;
325
326 vrf_id_t zd_vrf_id;
327 uint32_t zd_table_id;
328
329 char zd_ifname[INTERFACE_NAMSIZ];
330 ifindex_t zd_ifindex;
331
332 /* Support info for different kinds of updates */
333 union {
334 struct dplane_route_info rinfo;
335 struct zebra_lsp lsp;
336 struct dplane_pw_info pw;
337 struct dplane_br_port_info br_port;
338 struct dplane_intf_info intf;
339 struct dplane_mac_info macinfo;
340 struct dplane_neigh_info neigh;
341 struct dplane_rule_info rule;
342 struct zebra_pbr_iptable iptable;
343 struct zebra_pbr_ipset ipset;
344 struct {
345 struct zebra_pbr_ipset_entry entry;
346 struct zebra_pbr_ipset_info info;
347 } ipset_entry;
348 struct dplane_neigh_table neightable;
349 struct dplane_gre_ctx gre;
350 } u;
351
352 /* Namespace info, used especially for netlink kernel communication */
353 struct zebra_dplane_info zd_ns_info;
354
355 /* Embedded list linkage */
356 TAILQ_ENTRY(zebra_dplane_ctx) zd_q_entries;
357 };
358
359 /* Flag that can be set by a pre-kernel provider as a signal that an update
360 * should bypass the kernel.
361 */
362 #define DPLANE_CTX_FLAG_NO_KERNEL 0x01
363
364
365 /*
366 * Registration block for one dataplane provider.
367 */
368 struct zebra_dplane_provider {
369 /* Name */
370 char dp_name[DPLANE_PROVIDER_NAMELEN + 1];
371
372 /* Priority, for ordering among providers */
373 uint8_t dp_priority;
374
375 /* Id value */
376 uint32_t dp_id;
377
378 /* Mutex */
379 pthread_mutex_t dp_mutex;
380
381 /* Plugin-provided extra data */
382 void *dp_data;
383
384 /* Flags */
385 int dp_flags;
386
387 int (*dp_start)(struct zebra_dplane_provider *prov);
388
389 int (*dp_fp)(struct zebra_dplane_provider *prov);
390
391 int (*dp_fini)(struct zebra_dplane_provider *prov, bool early_p);
392
393 _Atomic uint32_t dp_in_counter;
394 _Atomic uint32_t dp_in_queued;
395 _Atomic uint32_t dp_in_max;
396 _Atomic uint32_t dp_out_counter;
397 _Atomic uint32_t dp_out_queued;
398 _Atomic uint32_t dp_out_max;
399 _Atomic uint32_t dp_error_counter;
400
401 /* Queue of contexts inbound to the provider */
402 struct dplane_ctx_q dp_ctx_in_q;
403
404 /* Queue of completed contexts outbound from the provider back
405 * towards the dataplane module.
406 */
407 struct dplane_ctx_q dp_ctx_out_q;
408
409 /* Embedded list linkage for provider objects */
410 TAILQ_ENTRY(zebra_dplane_provider) dp_prov_link;
411 };
412
413 /* Declare types for list of zns info objects */
414 PREDECL_DLIST(zns_info_list);
415
416 struct dplane_zns_info {
417 struct zebra_dplane_info info;
418
419 /* Read event */
420 struct thread *t_read;
421
422 /* List linkage */
423 struct zns_info_list_item link;
424 };
425
426 /*
427 * Globals
428 */
429 static struct zebra_dplane_globals {
430 /* Mutex to control access to dataplane components */
431 pthread_mutex_t dg_mutex;
432
433 /* Results callback registered by zebra 'core' */
434 int (*dg_results_cb)(struct dplane_ctx_q *ctxlist);
435
436 /* Sentinel for beginning of shutdown */
437 volatile bool dg_is_shutdown;
438
439 /* Sentinel for end of shutdown */
440 volatile bool dg_run;
441
442 /* Update context queue inbound to the dataplane */
443 TAILQ_HEAD(zdg_ctx_q, zebra_dplane_ctx) dg_update_ctx_q;
444
445 /* Ordered list of providers */
446 TAILQ_HEAD(zdg_prov_q, zebra_dplane_provider) dg_providers_q;
447
448 /* List of info about each zns */
449 struct zns_info_list_head dg_zns_list;
450
451 /* Counter used to assign internal ids to providers */
452 uint32_t dg_provider_id;
453
454 /* Limit number of pending, unprocessed updates */
455 _Atomic uint32_t dg_max_queued_updates;
456
457 /* Control whether system route notifications should be produced. */
458 bool dg_sys_route_notifs;
459
460 /* Limit number of new updates dequeued at once, to pace an
461 * incoming burst.
462 */
463 uint32_t dg_updates_per_cycle;
464
465 _Atomic uint32_t dg_routes_in;
466 _Atomic uint32_t dg_routes_queued;
467 _Atomic uint32_t dg_routes_queued_max;
468 _Atomic uint32_t dg_route_errors;
469 _Atomic uint32_t dg_other_errors;
470
471 _Atomic uint32_t dg_nexthops_in;
472 _Atomic uint32_t dg_nexthop_errors;
473
474 _Atomic uint32_t dg_lsps_in;
475 _Atomic uint32_t dg_lsp_errors;
476
477 _Atomic uint32_t dg_pws_in;
478 _Atomic uint32_t dg_pw_errors;
479
480 _Atomic uint32_t dg_br_port_in;
481 _Atomic uint32_t dg_br_port_errors;
482
483 _Atomic uint32_t dg_intf_addrs_in;
484 _Atomic uint32_t dg_intf_addr_errors;
485
486 _Atomic uint32_t dg_macs_in;
487 _Atomic uint32_t dg_mac_errors;
488
489 _Atomic uint32_t dg_neighs_in;
490 _Atomic uint32_t dg_neigh_errors;
491
492 _Atomic uint32_t dg_rules_in;
493 _Atomic uint32_t dg_rule_errors;
494
495 _Atomic uint32_t dg_update_yields;
496
497 _Atomic uint32_t dg_iptable_in;
498 _Atomic uint32_t dg_iptable_errors;
499
500 _Atomic uint32_t dg_ipset_in;
501 _Atomic uint32_t dg_ipset_errors;
502 _Atomic uint32_t dg_ipset_entry_in;
503 _Atomic uint32_t dg_ipset_entry_errors;
504
505 _Atomic uint32_t dg_neightable_in;
506 _Atomic uint32_t dg_neightable_errors;
507
508 _Atomic uint32_t dg_gre_set_in;
509 _Atomic uint32_t dg_gre_set_errors;
510
511 /* Dataplane pthread */
512 struct frr_pthread *dg_pthread;
513
514 /* Event-delivery context 'master' for the dplane */
515 struct thread_master *dg_master;
516
517 /* Event/'thread' pointer for queued updates */
518 struct thread *dg_t_update;
519
520 /* Event pointer for pending shutdown check loop */
521 struct thread *dg_t_shutdown_check;
522
523 } zdplane_info;
524
525 /* Instantiate zns list type */
526 DECLARE_DLIST(zns_info_list, struct dplane_zns_info, link);
527
528 /*
529 * Lock and unlock for interactions with the zebra 'core' pthread
530 */
531 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
532 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
533
534
535 /*
536 * Lock and unlock for individual providers
537 */
538 #define DPLANE_PROV_LOCK(p) pthread_mutex_lock(&((p)->dp_mutex))
539 #define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
540
541 /* Prototypes */
542 static int dplane_thread_loop(struct thread *event);
543 static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
544 struct zebra_ns *zns);
545 static enum zebra_dplane_result lsp_update_internal(struct zebra_lsp *lsp,
546 enum dplane_op_e op);
547 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
548 enum dplane_op_e op);
549 static enum zebra_dplane_result intf_addr_update_internal(
550 const struct interface *ifp, const struct connected *ifc,
551 enum dplane_op_e op);
552 static enum zebra_dplane_result mac_update_common(
553 enum dplane_op_e op, const struct interface *ifp,
554 const struct interface *br_ifp,
555 vlanid_t vid, const struct ethaddr *mac,
556 struct in_addr vtep_ip, bool sticky, uint32_t nhg_id,
557 uint32_t update_flags);
558 static enum zebra_dplane_result
559 neigh_update_internal(enum dplane_op_e op, const struct interface *ifp,
560 const void *link, int link_family,
561 const struct ipaddr *ip, uint32_t flags, uint16_t state,
562 uint32_t update_flags, int protocol);
563
564 /*
565 * Public APIs
566 */
567
568 /* Obtain thread_master for dataplane thread */
569 struct thread_master *dplane_get_thread_master(void)
570 {
571 return zdplane_info.dg_master;
572 }
573
574 /*
575 * Allocate a dataplane update context
576 */
577 struct zebra_dplane_ctx *dplane_ctx_alloc(void)
578 {
579 struct zebra_dplane_ctx *p;
580
581 /* TODO -- just alloc'ing memory, but would like to maintain
582 * a pool
583 */
584 p = XCALLOC(MTYPE_DP_CTX, sizeof(struct zebra_dplane_ctx));
585
586 return p;
587 }
588
589 /* Enable system route notifications */
590 void dplane_enable_sys_route_notifs(void)
591 {
592 zdplane_info.dg_sys_route_notifs = true;
593 }
594
595 /*
596 * Clean up dependent/internal allocations inside a context object
597 */
598 static void dplane_ctx_free_internal(struct zebra_dplane_ctx *ctx)
599 {
600 struct dplane_intf_extra *if_extra, *if_tmp;
601
602 /*
603 * Some internal allocations may need to be freed, depending on
604 * the type of info captured in the ctx.
605 */
606 switch (ctx->zd_op) {
607 case DPLANE_OP_ROUTE_INSTALL:
608 case DPLANE_OP_ROUTE_UPDATE:
609 case DPLANE_OP_ROUTE_DELETE:
610 case DPLANE_OP_SYS_ROUTE_ADD:
611 case DPLANE_OP_SYS_ROUTE_DELETE:
612 case DPLANE_OP_ROUTE_NOTIFY:
613
614 /* Free allocated nexthops */
615 if (ctx->u.rinfo.zd_ng.nexthop) {
616 /* This deals with recursive nexthops too */
617 nexthops_free(ctx->u.rinfo.zd_ng.nexthop);
618
619 ctx->u.rinfo.zd_ng.nexthop = NULL;
620 }
621
622 /* Free backup info also (if present) */
623 if (ctx->u.rinfo.backup_ng.nexthop) {
624 /* This deals with recursive nexthops too */
625 nexthops_free(ctx->u.rinfo.backup_ng.nexthop);
626
627 ctx->u.rinfo.backup_ng.nexthop = NULL;
628 }
629
630 if (ctx->u.rinfo.zd_old_ng.nexthop) {
631 /* This deals with recursive nexthops too */
632 nexthops_free(ctx->u.rinfo.zd_old_ng.nexthop);
633
634 ctx->u.rinfo.zd_old_ng.nexthop = NULL;
635 }
636
637 if (ctx->u.rinfo.old_backup_ng.nexthop) {
638 /* This deals with recursive nexthops too */
639 nexthops_free(ctx->u.rinfo.old_backup_ng.nexthop);
640
641 ctx->u.rinfo.old_backup_ng.nexthop = NULL;
642 }
643
644 /* Optional extra interface info */
645 TAILQ_FOREACH_SAFE(if_extra, &ctx->u.rinfo.intf_extra_q,
646 link, if_tmp) {
647 TAILQ_REMOVE(&ctx->u.rinfo.intf_extra_q, if_extra,
648 link);
649 XFREE(MTYPE_DP_INTF, if_extra);
650 }
651
652 break;
653
654 case DPLANE_OP_NH_INSTALL:
655 case DPLANE_OP_NH_UPDATE:
656 case DPLANE_OP_NH_DELETE: {
657 if (ctx->u.rinfo.nhe.ng.nexthop) {
658 /* This deals with recursive nexthops too */
659 nexthops_free(ctx->u.rinfo.nhe.ng.nexthop);
660
661 ctx->u.rinfo.nhe.ng.nexthop = NULL;
662 }
663 break;
664 }
665
666 case DPLANE_OP_LSP_INSTALL:
667 case DPLANE_OP_LSP_UPDATE:
668 case DPLANE_OP_LSP_DELETE:
669 case DPLANE_OP_LSP_NOTIFY:
670 {
671 struct zebra_nhlfe *nhlfe;
672
673 /* Unlink and free allocated NHLFEs */
674 frr_each_safe(nhlfe_list, &ctx->u.lsp.nhlfe_list, nhlfe) {
675 nhlfe_list_del(&ctx->u.lsp.nhlfe_list, nhlfe);
676 zebra_mpls_nhlfe_free(nhlfe);
677 }
678
679 /* Unlink and free allocated backup NHLFEs, if present */
680 frr_each_safe(nhlfe_list,
681 &(ctx->u.lsp.backup_nhlfe_list), nhlfe) {
682 nhlfe_list_del(&ctx->u.lsp.backup_nhlfe_list,
683 nhlfe);
684 zebra_mpls_nhlfe_free(nhlfe);
685 }
686
687 /* Clear pointers in lsp struct, in case we're caching
688 * free context structs.
689 */
690 nhlfe_list_init(&ctx->u.lsp.nhlfe_list);
691 ctx->u.lsp.best_nhlfe = NULL;
692 nhlfe_list_init(&ctx->u.lsp.backup_nhlfe_list);
693
694 break;
695 }
696
697 case DPLANE_OP_PW_INSTALL:
698 case DPLANE_OP_PW_UNINSTALL:
699 /* Free allocated nexthops */
700 if (ctx->u.pw.fib_nhg.nexthop) {
701 /* This deals with recursive nexthops too */
702 nexthops_free(ctx->u.pw.fib_nhg.nexthop);
703
704 ctx->u.pw.fib_nhg.nexthop = NULL;
705 }
706 if (ctx->u.pw.primary_nhg.nexthop) {
707 nexthops_free(ctx->u.pw.primary_nhg.nexthop);
708
709 ctx->u.pw.primary_nhg.nexthop = NULL;
710 }
711 if (ctx->u.pw.backup_nhg.nexthop) {
712 nexthops_free(ctx->u.pw.backup_nhg.nexthop);
713
714 ctx->u.pw.backup_nhg.nexthop = NULL;
715 }
716 break;
717
718 case DPLANE_OP_ADDR_INSTALL:
719 case DPLANE_OP_ADDR_UNINSTALL:
720 case DPLANE_OP_INTF_ADDR_ADD:
721 case DPLANE_OP_INTF_ADDR_DEL:
722 /* Maybe free label string, if allocated */
723 if (ctx->u.intf.label != NULL &&
724 ctx->u.intf.label != ctx->u.intf.label_buf) {
725 free(ctx->u.intf.label);
726 ctx->u.intf.label = NULL;
727 }
728 break;
729
730 case DPLANE_OP_MAC_INSTALL:
731 case DPLANE_OP_MAC_DELETE:
732 case DPLANE_OP_NEIGH_INSTALL:
733 case DPLANE_OP_NEIGH_UPDATE:
734 case DPLANE_OP_NEIGH_DELETE:
735 case DPLANE_OP_VTEP_ADD:
736 case DPLANE_OP_VTEP_DELETE:
737 case DPLANE_OP_RULE_ADD:
738 case DPLANE_OP_RULE_DELETE:
739 case DPLANE_OP_RULE_UPDATE:
740 case DPLANE_OP_NEIGH_DISCOVER:
741 case DPLANE_OP_BR_PORT_UPDATE:
742 case DPLANE_OP_NEIGH_IP_INSTALL:
743 case DPLANE_OP_NEIGH_IP_DELETE:
744 case DPLANE_OP_NONE:
745 case DPLANE_OP_IPSET_ADD:
746 case DPLANE_OP_IPSET_DELETE:
747 break;
748
749 case DPLANE_OP_IPSET_ENTRY_ADD:
750 case DPLANE_OP_IPSET_ENTRY_DELETE:
751 break;
752 case DPLANE_OP_NEIGH_TABLE_UPDATE:
753 break;
754 case DPLANE_OP_IPTABLE_ADD:
755 case DPLANE_OP_IPTABLE_DELETE:
756 if (ctx->u.iptable.interface_name_list) {
757 struct listnode *node, *nnode;
758 char *ifname;
759
760 for (ALL_LIST_ELEMENTS(
761 ctx->u.iptable.interface_name_list, node,
762 nnode, ifname)) {
763 LISTNODE_DETACH(
764 ctx->u.iptable.interface_name_list,
765 node);
766 XFREE(MTYPE_DP_NETFILTER, ifname);
767 }
768 list_delete(&ctx->u.iptable.interface_name_list);
769 }
770 break;
771 case DPLANE_OP_GRE_SET:
772 break;
773 }
774 }
775
776 /*
777 * Free a dataplane results context.
778 */
779 static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
780 {
781 if (pctx == NULL)
782 return;
783
784 DPLANE_CTX_VALID(*pctx);
785
786 /* TODO -- just freeing memory, but would like to maintain
787 * a pool
788 */
789
790 /* Some internal allocations may need to be freed, depending on
791 * the type of info captured in the ctx.
792 */
793 dplane_ctx_free_internal(*pctx);
794
795 XFREE(MTYPE_DP_CTX, *pctx);
796 }
797
798 /*
799 * Reset an allocated context object for re-use. All internal allocations are
800 * freed and the context is memset.
801 */
802 void dplane_ctx_reset(struct zebra_dplane_ctx *ctx)
803 {
804 dplane_ctx_free_internal(ctx);
805 memset(ctx, 0, sizeof(*ctx));
806 }
807
808 /*
809 * Return a context block to the dplane module after processing
810 */
811 void dplane_ctx_fini(struct zebra_dplane_ctx **pctx)
812 {
813 /* TODO -- maintain pool; for now, just free */
814 dplane_ctx_free(pctx);
815 }
816
817 /* Enqueue a context block */
818 void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
819 const struct zebra_dplane_ctx *ctx)
820 {
821 TAILQ_INSERT_TAIL(q, (struct zebra_dplane_ctx *)ctx, zd_q_entries);
822 }
823
824 /* Append a list of context blocks to another list */
825 void dplane_ctx_list_append(struct dplane_ctx_q *to_list,
826 struct dplane_ctx_q *from_list)
827 {
828 if (TAILQ_FIRST(from_list)) {
829 TAILQ_CONCAT(to_list, from_list, zd_q_entries);
830
831 /* And clear 'from' list */
832 TAILQ_INIT(from_list);
833 }
834 }
835
836 struct zebra_dplane_ctx *dplane_ctx_get_head(struct dplane_ctx_q *q)
837 {
838 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
839
840 return ctx;
841 }
842
843 /* Dequeue a context block from the head of a list */
844 struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_q *q)
845 {
846 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
847
848 if (ctx)
849 TAILQ_REMOVE(q, ctx, zd_q_entries);
850
851 return ctx;
852 }
853
854 /*
855 * Accessors for information from the context object
856 */
857 enum zebra_dplane_result dplane_ctx_get_status(
858 const struct zebra_dplane_ctx *ctx)
859 {
860 DPLANE_CTX_VALID(ctx);
861
862 return ctx->zd_status;
863 }
864
865 void dplane_ctx_set_status(struct zebra_dplane_ctx *ctx,
866 enum zebra_dplane_result status)
867 {
868 DPLANE_CTX_VALID(ctx);
869
870 ctx->zd_status = status;
871 }
872
873 /* Retrieve last/current provider id */
874 uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx *ctx)
875 {
876 DPLANE_CTX_VALID(ctx);
877 return ctx->zd_provider;
878 }
879
880 /* Providers run before the kernel can control whether a kernel
881 * update should be done.
882 */
883 void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx *ctx)
884 {
885 DPLANE_CTX_VALID(ctx);
886
887 SET_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
888 }
889
890 bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx *ctx)
891 {
892 DPLANE_CTX_VALID(ctx);
893
894 return CHECK_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
895 }
896
897 void dplane_ctx_set_op(struct zebra_dplane_ctx *ctx, enum dplane_op_e op)
898 {
899 DPLANE_CTX_VALID(ctx);
900 ctx->zd_op = op;
901 }
902
903 enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx)
904 {
905 DPLANE_CTX_VALID(ctx);
906
907 return ctx->zd_op;
908 }
909
910 const char *dplane_op2str(enum dplane_op_e op)
911 {
912 const char *ret = "UNKNOWN";
913
914 switch (op) {
915 case DPLANE_OP_NONE:
916 ret = "NONE";
917 break;
918
919 /* Route update */
920 case DPLANE_OP_ROUTE_INSTALL:
921 ret = "ROUTE_INSTALL";
922 break;
923 case DPLANE_OP_ROUTE_UPDATE:
924 ret = "ROUTE_UPDATE";
925 break;
926 case DPLANE_OP_ROUTE_DELETE:
927 ret = "ROUTE_DELETE";
928 break;
929 case DPLANE_OP_ROUTE_NOTIFY:
930 ret = "ROUTE_NOTIFY";
931 break;
932
933 /* Nexthop update */
934 case DPLANE_OP_NH_INSTALL:
935 ret = "NH_INSTALL";
936 break;
937 case DPLANE_OP_NH_UPDATE:
938 ret = "NH_UPDATE";
939 break;
940 case DPLANE_OP_NH_DELETE:
941 ret = "NH_DELETE";
942 break;
943
944 case DPLANE_OP_LSP_INSTALL:
945 ret = "LSP_INSTALL";
946 break;
947 case DPLANE_OP_LSP_UPDATE:
948 ret = "LSP_UPDATE";
949 break;
950 case DPLANE_OP_LSP_DELETE:
951 ret = "LSP_DELETE";
952 break;
953 case DPLANE_OP_LSP_NOTIFY:
954 ret = "LSP_NOTIFY";
955 break;
956
957 case DPLANE_OP_PW_INSTALL:
958 ret = "PW_INSTALL";
959 break;
960 case DPLANE_OP_PW_UNINSTALL:
961 ret = "PW_UNINSTALL";
962 break;
963
964 case DPLANE_OP_SYS_ROUTE_ADD:
965 ret = "SYS_ROUTE_ADD";
966 break;
967 case DPLANE_OP_SYS_ROUTE_DELETE:
968 ret = "SYS_ROUTE_DEL";
969 break;
970
971 case DPLANE_OP_BR_PORT_UPDATE:
972 ret = "BR_PORT_UPDATE";
973 break;
974
975 case DPLANE_OP_ADDR_INSTALL:
976 ret = "ADDR_INSTALL";
977 break;
978 case DPLANE_OP_ADDR_UNINSTALL:
979 ret = "ADDR_UNINSTALL";
980 break;
981
982 case DPLANE_OP_MAC_INSTALL:
983 ret = "MAC_INSTALL";
984 break;
985 case DPLANE_OP_MAC_DELETE:
986 ret = "MAC_DELETE";
987 break;
988
989 case DPLANE_OP_NEIGH_INSTALL:
990 ret = "NEIGH_INSTALL";
991 break;
992 case DPLANE_OP_NEIGH_UPDATE:
993 ret = "NEIGH_UPDATE";
994 break;
995 case DPLANE_OP_NEIGH_DELETE:
996 ret = "NEIGH_DELETE";
997 break;
998 case DPLANE_OP_VTEP_ADD:
999 ret = "VTEP_ADD";
1000 break;
1001 case DPLANE_OP_VTEP_DELETE:
1002 ret = "VTEP_DELETE";
1003 break;
1004
1005 case DPLANE_OP_RULE_ADD:
1006 ret = "RULE_ADD";
1007 break;
1008 case DPLANE_OP_RULE_DELETE:
1009 ret = "RULE_DELETE";
1010 break;
1011 case DPLANE_OP_RULE_UPDATE:
1012 ret = "RULE_UPDATE";
1013 break;
1014
1015 case DPLANE_OP_NEIGH_DISCOVER:
1016 ret = "NEIGH_DISCOVER";
1017 break;
1018
1019 case DPLANE_OP_IPTABLE_ADD:
1020 ret = "IPTABLE_ADD";
1021 break;
1022 case DPLANE_OP_IPTABLE_DELETE:
1023 ret = "IPTABLE_DELETE";
1024 break;
1025 case DPLANE_OP_IPSET_ADD:
1026 ret = "IPSET_ADD";
1027 break;
1028 case DPLANE_OP_IPSET_DELETE:
1029 ret = "IPSET_DELETE";
1030 break;
1031 case DPLANE_OP_IPSET_ENTRY_ADD:
1032 ret = "IPSET_ENTRY_ADD";
1033 break;
1034 case DPLANE_OP_IPSET_ENTRY_DELETE:
1035 ret = "IPSET_ENTRY_DELETE";
1036 break;
1037 case DPLANE_OP_NEIGH_IP_INSTALL:
1038 ret = "NEIGH_IP_INSTALL";
1039 break;
1040 case DPLANE_OP_NEIGH_IP_DELETE:
1041 ret = "NEIGH_IP_DELETE";
1042 break;
1043 case DPLANE_OP_NEIGH_TABLE_UPDATE:
1044 ret = "NEIGH_TABLE_UPDATE";
1045 break;
1046
1047 case DPLANE_OP_GRE_SET:
1048 ret = "GRE_SET";
1049 break;
1050
1051 case DPLANE_OP_INTF_ADDR_ADD:
1052 return "INTF_ADDR_ADD";
1053
1054 case DPLANE_OP_INTF_ADDR_DEL:
1055 return "INTF_ADDR_DEL";
1056 }
1057
1058 return ret;
1059 }
1060
1061 const char *dplane_res2str(enum zebra_dplane_result res)
1062 {
1063 const char *ret = "<Unknown>";
1064
1065 switch (res) {
1066 case ZEBRA_DPLANE_REQUEST_FAILURE:
1067 ret = "FAILURE";
1068 break;
1069 case ZEBRA_DPLANE_REQUEST_QUEUED:
1070 ret = "QUEUED";
1071 break;
1072 case ZEBRA_DPLANE_REQUEST_SUCCESS:
1073 ret = "SUCCESS";
1074 break;
1075 }
1076
1077 return ret;
1078 }
1079
1080 void dplane_ctx_set_dest(struct zebra_dplane_ctx *ctx,
1081 const struct prefix *dest)
1082 {
1083 DPLANE_CTX_VALID(ctx);
1084
1085 prefix_copy(&(ctx->u.rinfo.zd_dest), dest);
1086 }
1087
1088 const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
1089 {
1090 DPLANE_CTX_VALID(ctx);
1091
1092 return &(ctx->u.rinfo.zd_dest);
1093 }
1094
1095 void dplane_ctx_set_src(struct zebra_dplane_ctx *ctx, const struct prefix *src)
1096 {
1097 DPLANE_CTX_VALID(ctx);
1098
1099 if (src)
1100 prefix_copy(&(ctx->u.rinfo.zd_src), src);
1101 else
1102 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(struct prefix));
1103 }
1104
1105 /* Source prefix is a little special - return NULL for "no src prefix" */
1106 const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx)
1107 {
1108 DPLANE_CTX_VALID(ctx);
1109
1110 if (ctx->u.rinfo.zd_src.prefixlen == 0 &&
1111 IN6_IS_ADDR_UNSPECIFIED(&(ctx->u.rinfo.zd_src.u.prefix6))) {
1112 return NULL;
1113 } else {
1114 return &(ctx->u.rinfo.zd_src);
1115 }
1116 }
1117
1118 bool dplane_ctx_is_update(const struct zebra_dplane_ctx *ctx)
1119 {
1120 DPLANE_CTX_VALID(ctx);
1121
1122 return ctx->zd_is_update;
1123 }
1124
1125 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx *ctx)
1126 {
1127 DPLANE_CTX_VALID(ctx);
1128
1129 return ctx->zd_seq;
1130 }
1131
1132 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx)
1133 {
1134 DPLANE_CTX_VALID(ctx);
1135
1136 return ctx->zd_old_seq;
1137 }
1138
1139 void dplane_ctx_set_vrf(struct zebra_dplane_ctx *ctx, vrf_id_t vrf)
1140 {
1141 DPLANE_CTX_VALID(ctx);
1142
1143 ctx->zd_vrf_id = vrf;
1144 }
1145
1146 vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx)
1147 {
1148 DPLANE_CTX_VALID(ctx);
1149
1150 return ctx->zd_vrf_id;
1151 }
1152
1153 /* In some paths we have only a namespace id */
1154 void dplane_ctx_set_ns_id(struct zebra_dplane_ctx *ctx, ns_id_t nsid)
1155 {
1156 DPLANE_CTX_VALID(ctx);
1157
1158 ctx->zd_ns_info.ns_id = nsid;
1159 }
1160
1161 ns_id_t dplane_ctx_get_ns_id(const struct zebra_dplane_ctx *ctx)
1162 {
1163 DPLANE_CTX_VALID(ctx);
1164
1165 return ctx->zd_ns_info.ns_id;
1166 }
1167
1168 bool dplane_ctx_is_from_notif(const struct zebra_dplane_ctx *ctx)
1169 {
1170 DPLANE_CTX_VALID(ctx);
1171
1172 return (ctx->zd_notif_provider != 0);
1173 }
1174
1175 uint32_t dplane_ctx_get_notif_provider(const struct zebra_dplane_ctx *ctx)
1176 {
1177 DPLANE_CTX_VALID(ctx);
1178
1179 return ctx->zd_notif_provider;
1180 }
1181
1182 void dplane_ctx_set_notif_provider(struct zebra_dplane_ctx *ctx,
1183 uint32_t id)
1184 {
1185 DPLANE_CTX_VALID(ctx);
1186
1187 ctx->zd_notif_provider = id;
1188 }
1189
1190 const char *dplane_ctx_get_ifname(const struct zebra_dplane_ctx *ctx)
1191 {
1192 DPLANE_CTX_VALID(ctx);
1193
1194 return ctx->zd_ifname;
1195 }
1196
1197 void dplane_ctx_set_ifname(struct zebra_dplane_ctx *ctx, const char *ifname)
1198 {
1199 DPLANE_CTX_VALID(ctx);
1200
1201 if (!ifname)
1202 return;
1203
1204 strlcpy(ctx->zd_ifname, ifname, sizeof(ctx->zd_ifname));
1205 }
1206
1207 ifindex_t dplane_ctx_get_ifindex(const struct zebra_dplane_ctx *ctx)
1208 {
1209 DPLANE_CTX_VALID(ctx);
1210
1211 return ctx->zd_ifindex;
1212 }
1213
1214 void dplane_ctx_set_ifindex(struct zebra_dplane_ctx *ctx, ifindex_t ifindex)
1215 {
1216 DPLANE_CTX_VALID(ctx);
1217
1218 ctx->zd_ifindex = ifindex;
1219 }
1220
1221 void dplane_ctx_set_type(struct zebra_dplane_ctx *ctx, int type)
1222 {
1223 DPLANE_CTX_VALID(ctx);
1224
1225 ctx->u.rinfo.zd_type = type;
1226 }
1227
1228 int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx)
1229 {
1230 DPLANE_CTX_VALID(ctx);
1231
1232 return ctx->u.rinfo.zd_type;
1233 }
1234
1235 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx)
1236 {
1237 DPLANE_CTX_VALID(ctx);
1238
1239 return ctx->u.rinfo.zd_old_type;
1240 }
1241
1242 void dplane_ctx_set_afi(struct zebra_dplane_ctx *ctx, afi_t afi)
1243 {
1244 DPLANE_CTX_VALID(ctx);
1245
1246 ctx->u.rinfo.zd_afi = afi;
1247 }
1248
1249 afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
1250 {
1251 DPLANE_CTX_VALID(ctx);
1252
1253 return ctx->u.rinfo.zd_afi;
1254 }
1255
1256 void dplane_ctx_set_safi(struct zebra_dplane_ctx *ctx, safi_t safi)
1257 {
1258 DPLANE_CTX_VALID(ctx);
1259
1260 ctx->u.rinfo.zd_safi = safi;
1261 }
1262
1263 safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
1264 {
1265 DPLANE_CTX_VALID(ctx);
1266
1267 return ctx->u.rinfo.zd_safi;
1268 }
1269
1270 void dplane_ctx_set_table(struct zebra_dplane_ctx *ctx, uint32_t table)
1271 {
1272 DPLANE_CTX_VALID(ctx);
1273
1274 ctx->zd_table_id = table;
1275 }
1276
1277 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx)
1278 {
1279 DPLANE_CTX_VALID(ctx);
1280
1281 return ctx->zd_table_id;
1282 }
1283
1284 route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx)
1285 {
1286 DPLANE_CTX_VALID(ctx);
1287
1288 return ctx->u.rinfo.zd_tag;
1289 }
1290
1291 void dplane_ctx_set_tag(struct zebra_dplane_ctx *ctx, route_tag_t tag)
1292 {
1293 DPLANE_CTX_VALID(ctx);
1294
1295 ctx->u.rinfo.zd_tag = tag;
1296 }
1297
1298 route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx)
1299 {
1300 DPLANE_CTX_VALID(ctx);
1301
1302 return ctx->u.rinfo.zd_old_tag;
1303 }
1304
1305 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx)
1306 {
1307 DPLANE_CTX_VALID(ctx);
1308
1309 return ctx->u.rinfo.zd_instance;
1310 }
1311
1312 void dplane_ctx_set_instance(struct zebra_dplane_ctx *ctx, uint16_t instance)
1313 {
1314 DPLANE_CTX_VALID(ctx);
1315
1316 ctx->u.rinfo.zd_instance = instance;
1317 }
1318
1319 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
1320 {
1321 DPLANE_CTX_VALID(ctx);
1322
1323 return ctx->u.rinfo.zd_old_instance;
1324 }
1325
1326 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx)
1327 {
1328 DPLANE_CTX_VALID(ctx);
1329
1330 return ctx->u.rinfo.zd_metric;
1331 }
1332
1333 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx)
1334 {
1335 DPLANE_CTX_VALID(ctx);
1336
1337 return ctx->u.rinfo.zd_old_metric;
1338 }
1339
1340 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx)
1341 {
1342 DPLANE_CTX_VALID(ctx);
1343
1344 return ctx->u.rinfo.zd_mtu;
1345 }
1346
1347 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx)
1348 {
1349 DPLANE_CTX_VALID(ctx);
1350
1351 return ctx->u.rinfo.zd_nexthop_mtu;
1352 }
1353
1354 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx)
1355 {
1356 DPLANE_CTX_VALID(ctx);
1357
1358 return ctx->u.rinfo.zd_distance;
1359 }
1360
1361 void dplane_ctx_set_distance(struct zebra_dplane_ctx *ctx, uint8_t distance)
1362 {
1363 DPLANE_CTX_VALID(ctx);
1364
1365 ctx->u.rinfo.zd_distance = distance;
1366 }
1367
1368 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
1369 {
1370 DPLANE_CTX_VALID(ctx);
1371
1372 return ctx->u.rinfo.zd_old_distance;
1373 }
1374
1375 /*
1376 * Set the nexthops associated with a context: note that processing code
1377 * may well expect that nexthops are in canonical (sorted) order, so we
1378 * will enforce that here.
1379 */
1380 void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh)
1381 {
1382 DPLANE_CTX_VALID(ctx);
1383
1384 if (ctx->u.rinfo.zd_ng.nexthop) {
1385 nexthops_free(ctx->u.rinfo.zd_ng.nexthop);
1386 ctx->u.rinfo.zd_ng.nexthop = NULL;
1387 }
1388 nexthop_group_copy_nh_sorted(&(ctx->u.rinfo.zd_ng), nh);
1389 }
1390
1391 /*
1392 * Set the list of backup nexthops; their ordering is preserved (they're not
1393 * re-sorted.)
1394 */
1395 void dplane_ctx_set_backup_nhg(struct zebra_dplane_ctx *ctx,
1396 const struct nexthop_group *nhg)
1397 {
1398 struct nexthop *nh, *last_nh, *nexthop;
1399
1400 DPLANE_CTX_VALID(ctx);
1401
1402 if (ctx->u.rinfo.backup_ng.nexthop) {
1403 nexthops_free(ctx->u.rinfo.backup_ng.nexthop);
1404 ctx->u.rinfo.backup_ng.nexthop = NULL;
1405 }
1406
1407 last_nh = NULL;
1408
1409 /* Be careful to preserve the order of the backup list */
1410 for (nh = nhg->nexthop; nh; nh = nh->next) {
1411 nexthop = nexthop_dup(nh, NULL);
1412
1413 if (last_nh)
1414 NEXTHOP_APPEND(last_nh, nexthop);
1415 else
1416 ctx->u.rinfo.backup_ng.nexthop = nexthop;
1417
1418 last_nh = nexthop;
1419 }
1420 }
1421
1422 uint32_t dplane_ctx_get_nhg_id(const struct zebra_dplane_ctx *ctx)
1423 {
1424 DPLANE_CTX_VALID(ctx);
1425 return ctx->u.rinfo.zd_nhg_id;
1426 }
1427
1428 const struct nexthop_group *dplane_ctx_get_ng(
1429 const struct zebra_dplane_ctx *ctx)
1430 {
1431 DPLANE_CTX_VALID(ctx);
1432
1433 return &(ctx->u.rinfo.zd_ng);
1434 }
1435
1436 const struct nexthop_group *
1437 dplane_ctx_get_backup_ng(const struct zebra_dplane_ctx *ctx)
1438 {
1439 DPLANE_CTX_VALID(ctx);
1440
1441 return &(ctx->u.rinfo.backup_ng);
1442 }
1443
1444 const struct nexthop_group *
1445 dplane_ctx_get_old_ng(const struct zebra_dplane_ctx *ctx)
1446 {
1447 DPLANE_CTX_VALID(ctx);
1448
1449 return &(ctx->u.rinfo.zd_old_ng);
1450 }
1451
1452 const struct nexthop_group *
1453 dplane_ctx_get_old_backup_ng(const struct zebra_dplane_ctx *ctx)
1454 {
1455 DPLANE_CTX_VALID(ctx);
1456
1457 return &(ctx->u.rinfo.old_backup_ng);
1458 }
1459
1460 const struct zebra_dplane_info *dplane_ctx_get_ns(
1461 const struct zebra_dplane_ctx *ctx)
1462 {
1463 DPLANE_CTX_VALID(ctx);
1464
1465 return &(ctx->zd_ns_info);
1466 }
1467
1468 /* Accessors for nexthop information */
1469 uint32_t dplane_ctx_get_nhe_id(const struct zebra_dplane_ctx *ctx)
1470 {
1471 DPLANE_CTX_VALID(ctx);
1472 return ctx->u.rinfo.nhe.id;
1473 }
1474
1475 uint32_t dplane_ctx_get_old_nhe_id(const struct zebra_dplane_ctx *ctx)
1476 {
1477 DPLANE_CTX_VALID(ctx);
1478 return ctx->u.rinfo.nhe.old_id;
1479 }
1480
1481 afi_t dplane_ctx_get_nhe_afi(const struct zebra_dplane_ctx *ctx)
1482 {
1483 DPLANE_CTX_VALID(ctx);
1484 return ctx->u.rinfo.nhe.afi;
1485 }
1486
1487 vrf_id_t dplane_ctx_get_nhe_vrf_id(const struct zebra_dplane_ctx *ctx)
1488 {
1489 DPLANE_CTX_VALID(ctx);
1490 return ctx->u.rinfo.nhe.vrf_id;
1491 }
1492
1493 int dplane_ctx_get_nhe_type(const struct zebra_dplane_ctx *ctx)
1494 {
1495 DPLANE_CTX_VALID(ctx);
1496 return ctx->u.rinfo.nhe.type;
1497 }
1498
1499 const struct nexthop_group *
1500 dplane_ctx_get_nhe_ng(const struct zebra_dplane_ctx *ctx)
1501 {
1502 DPLANE_CTX_VALID(ctx);
1503 return &(ctx->u.rinfo.nhe.ng);
1504 }
1505
1506 const struct nh_grp *
1507 dplane_ctx_get_nhe_nh_grp(const struct zebra_dplane_ctx *ctx)
1508 {
1509 DPLANE_CTX_VALID(ctx);
1510 return ctx->u.rinfo.nhe.nh_grp;
1511 }
1512
1513 uint8_t dplane_ctx_get_nhe_nh_grp_count(const struct zebra_dplane_ctx *ctx)
1514 {
1515 DPLANE_CTX_VALID(ctx);
1516 return ctx->u.rinfo.nhe.nh_grp_count;
1517 }
1518
1519 /* Accessors for LSP information */
1520
1521 mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx)
1522 {
1523 DPLANE_CTX_VALID(ctx);
1524
1525 return ctx->u.lsp.ile.in_label;
1526 }
1527
1528 void dplane_ctx_set_in_label(struct zebra_dplane_ctx *ctx, mpls_label_t label)
1529 {
1530 DPLANE_CTX_VALID(ctx);
1531
1532 ctx->u.lsp.ile.in_label = label;
1533 }
1534
1535 uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx *ctx)
1536 {
1537 DPLANE_CTX_VALID(ctx);
1538
1539 return ctx->u.lsp.addr_family;
1540 }
1541
1542 void dplane_ctx_set_addr_family(struct zebra_dplane_ctx *ctx,
1543 uint8_t family)
1544 {
1545 DPLANE_CTX_VALID(ctx);
1546
1547 ctx->u.lsp.addr_family = family;
1548 }
1549
1550 uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx)
1551 {
1552 DPLANE_CTX_VALID(ctx);
1553
1554 return ctx->u.lsp.flags;
1555 }
1556
1557 void dplane_ctx_set_lsp_flags(struct zebra_dplane_ctx *ctx,
1558 uint32_t flags)
1559 {
1560 DPLANE_CTX_VALID(ctx);
1561
1562 ctx->u.lsp.flags = flags;
1563 }
1564
1565 const struct nhlfe_list_head *dplane_ctx_get_nhlfe_list(
1566 const struct zebra_dplane_ctx *ctx)
1567 {
1568 DPLANE_CTX_VALID(ctx);
1569 return &(ctx->u.lsp.nhlfe_list);
1570 }
1571
1572 const struct nhlfe_list_head *dplane_ctx_get_backup_nhlfe_list(
1573 const struct zebra_dplane_ctx *ctx)
1574 {
1575 DPLANE_CTX_VALID(ctx);
1576 return &(ctx->u.lsp.backup_nhlfe_list);
1577 }
1578
1579 struct zebra_nhlfe *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
1580 enum lsp_types_t lsp_type,
1581 enum nexthop_types_t nh_type,
1582 const union g_addr *gate,
1583 ifindex_t ifindex, uint8_t num_labels,
1584 mpls_label_t *out_labels)
1585 {
1586 struct zebra_nhlfe *nhlfe;
1587
1588 DPLANE_CTX_VALID(ctx);
1589
1590 nhlfe = zebra_mpls_lsp_add_nhlfe(&(ctx->u.lsp),
1591 lsp_type, nh_type, gate,
1592 ifindex, num_labels, out_labels);
1593
1594 return nhlfe;
1595 }
1596
1597 struct zebra_nhlfe *dplane_ctx_add_backup_nhlfe(
1598 struct zebra_dplane_ctx *ctx, enum lsp_types_t lsp_type,
1599 enum nexthop_types_t nh_type, const union g_addr *gate,
1600 ifindex_t ifindex, uint8_t num_labels, mpls_label_t *out_labels)
1601 {
1602 struct zebra_nhlfe *nhlfe;
1603
1604 DPLANE_CTX_VALID(ctx);
1605
1606 nhlfe = zebra_mpls_lsp_add_backup_nhlfe(&(ctx->u.lsp),
1607 lsp_type, nh_type, gate,
1608 ifindex, num_labels,
1609 out_labels);
1610
1611 return nhlfe;
1612 }
1613
1614 const struct zebra_nhlfe *
1615 dplane_ctx_get_best_nhlfe(const struct zebra_dplane_ctx *ctx)
1616 {
1617 DPLANE_CTX_VALID(ctx);
1618
1619 return ctx->u.lsp.best_nhlfe;
1620 }
1621
1622 const struct zebra_nhlfe *
1623 dplane_ctx_set_best_nhlfe(struct zebra_dplane_ctx *ctx,
1624 struct zebra_nhlfe *nhlfe)
1625 {
1626 DPLANE_CTX_VALID(ctx);
1627
1628 ctx->u.lsp.best_nhlfe = nhlfe;
1629 return ctx->u.lsp.best_nhlfe;
1630 }
1631
1632 uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx *ctx)
1633 {
1634 DPLANE_CTX_VALID(ctx);
1635
1636 return ctx->u.lsp.num_ecmp;
1637 }
1638
1639 mpls_label_t dplane_ctx_get_pw_local_label(const struct zebra_dplane_ctx *ctx)
1640 {
1641 DPLANE_CTX_VALID(ctx);
1642
1643 return ctx->u.pw.local_label;
1644 }
1645
1646 mpls_label_t dplane_ctx_get_pw_remote_label(const struct zebra_dplane_ctx *ctx)
1647 {
1648 DPLANE_CTX_VALID(ctx);
1649
1650 return ctx->u.pw.remote_label;
1651 }
1652
1653 int dplane_ctx_get_pw_type(const struct zebra_dplane_ctx *ctx)
1654 {
1655 DPLANE_CTX_VALID(ctx);
1656
1657 return ctx->u.pw.type;
1658 }
1659
1660 int dplane_ctx_get_pw_af(const struct zebra_dplane_ctx *ctx)
1661 {
1662 DPLANE_CTX_VALID(ctx);
1663
1664 return ctx->u.pw.af;
1665 }
1666
1667 uint32_t dplane_ctx_get_pw_flags(const struct zebra_dplane_ctx *ctx)
1668 {
1669 DPLANE_CTX_VALID(ctx);
1670
1671 return ctx->u.pw.flags;
1672 }
1673
1674 int dplane_ctx_get_pw_status(const struct zebra_dplane_ctx *ctx)
1675 {
1676 DPLANE_CTX_VALID(ctx);
1677
1678 return ctx->u.pw.status;
1679 }
1680
1681 void dplane_ctx_set_pw_status(struct zebra_dplane_ctx *ctx, int status)
1682 {
1683 DPLANE_CTX_VALID(ctx);
1684
1685 ctx->u.pw.status = status;
1686 }
1687
1688 const union g_addr *dplane_ctx_get_pw_dest(
1689 const struct zebra_dplane_ctx *ctx)
1690 {
1691 DPLANE_CTX_VALID(ctx);
1692
1693 return &(ctx->u.pw.dest);
1694 }
1695
1696 const union pw_protocol_fields *dplane_ctx_get_pw_proto(
1697 const struct zebra_dplane_ctx *ctx)
1698 {
1699 DPLANE_CTX_VALID(ctx);
1700
1701 return &(ctx->u.pw.fields);
1702 }
1703
1704 const struct nexthop_group *
1705 dplane_ctx_get_pw_nhg(const struct zebra_dplane_ctx *ctx)
1706 {
1707 DPLANE_CTX_VALID(ctx);
1708
1709 return &(ctx->u.pw.fib_nhg);
1710 }
1711
1712 const struct nexthop_group *
1713 dplane_ctx_get_pw_primary_nhg(const struct zebra_dplane_ctx *ctx)
1714 {
1715 DPLANE_CTX_VALID(ctx);
1716
1717 return &(ctx->u.pw.primary_nhg);
1718 }
1719
1720 const struct nexthop_group *
1721 dplane_ctx_get_pw_backup_nhg(const struct zebra_dplane_ctx *ctx)
1722 {
1723 DPLANE_CTX_VALID(ctx);
1724
1725 return &(ctx->u.pw.backup_nhg);
1726 }
1727
1728 /* Accessors for interface information */
1729 uint32_t dplane_ctx_get_intf_metric(const struct zebra_dplane_ctx *ctx)
1730 {
1731 DPLANE_CTX_VALID(ctx);
1732
1733 return ctx->u.intf.metric;
1734 }
1735
1736 void dplane_ctx_set_intf_metric(struct zebra_dplane_ctx *ctx, uint32_t metric)
1737 {
1738 DPLANE_CTX_VALID(ctx);
1739
1740 ctx->u.intf.metric = metric;
1741 }
1742
1743 /* Is interface addr p2p? */
1744 bool dplane_ctx_intf_is_connected(const struct zebra_dplane_ctx *ctx)
1745 {
1746 DPLANE_CTX_VALID(ctx);
1747
1748 return (ctx->u.intf.flags & DPLANE_INTF_CONNECTED);
1749 }
1750
1751 bool dplane_ctx_intf_is_secondary(const struct zebra_dplane_ctx *ctx)
1752 {
1753 DPLANE_CTX_VALID(ctx);
1754
1755 return (ctx->u.intf.flags & DPLANE_INTF_SECONDARY);
1756 }
1757
1758 bool dplane_ctx_intf_is_broadcast(const struct zebra_dplane_ctx *ctx)
1759 {
1760 DPLANE_CTX_VALID(ctx);
1761
1762 return (ctx->u.intf.flags & DPLANE_INTF_BROADCAST);
1763 }
1764
1765 void dplane_ctx_intf_set_connected(struct zebra_dplane_ctx *ctx)
1766 {
1767 DPLANE_CTX_VALID(ctx);
1768
1769 ctx->u.intf.flags |= DPLANE_INTF_CONNECTED;
1770 }
1771
1772 void dplane_ctx_intf_set_secondary(struct zebra_dplane_ctx *ctx)
1773 {
1774 DPLANE_CTX_VALID(ctx);
1775
1776 ctx->u.intf.flags |= DPLANE_INTF_SECONDARY;
1777 }
1778
1779 void dplane_ctx_intf_set_broadcast(struct zebra_dplane_ctx *ctx)
1780 {
1781 DPLANE_CTX_VALID(ctx);
1782
1783 ctx->u.intf.flags |= DPLANE_INTF_BROADCAST;
1784 }
1785
1786 const struct prefix *dplane_ctx_get_intf_addr(
1787 const struct zebra_dplane_ctx *ctx)
1788 {
1789 DPLANE_CTX_VALID(ctx);
1790
1791 return &(ctx->u.intf.prefix);
1792 }
1793
1794 void dplane_ctx_set_intf_addr(struct zebra_dplane_ctx *ctx,
1795 const struct prefix *p)
1796 {
1797 DPLANE_CTX_VALID(ctx);
1798
1799 prefix_copy(&(ctx->u.intf.prefix), p);
1800 }
1801
1802 bool dplane_ctx_intf_has_dest(const struct zebra_dplane_ctx *ctx)
1803 {
1804 DPLANE_CTX_VALID(ctx);
1805
1806 return (ctx->u.intf.flags & DPLANE_INTF_HAS_DEST);
1807 }
1808
1809 const struct prefix *dplane_ctx_get_intf_dest(
1810 const struct zebra_dplane_ctx *ctx)
1811 {
1812 DPLANE_CTX_VALID(ctx);
1813
1814 return &(ctx->u.intf.dest_prefix);
1815 }
1816
1817 void dplane_ctx_set_intf_dest(struct zebra_dplane_ctx *ctx,
1818 const struct prefix *p)
1819 {
1820 DPLANE_CTX_VALID(ctx);
1821
1822 prefix_copy(&(ctx->u.intf.dest_prefix), p);
1823 }
1824
1825 bool dplane_ctx_intf_has_label(const struct zebra_dplane_ctx *ctx)
1826 {
1827 DPLANE_CTX_VALID(ctx);
1828
1829 return (ctx->u.intf.flags & DPLANE_INTF_HAS_LABEL);
1830 }
1831
1832 const char *dplane_ctx_get_intf_label(const struct zebra_dplane_ctx *ctx)
1833 {
1834 DPLANE_CTX_VALID(ctx);
1835
1836 return ctx->u.intf.label;
1837 }
1838
1839 void dplane_ctx_set_intf_label(struct zebra_dplane_ctx *ctx, const char *label)
1840 {
1841 size_t len;
1842
1843 DPLANE_CTX_VALID(ctx);
1844
1845 if (ctx->u.intf.label && ctx->u.intf.label != ctx->u.intf.label_buf)
1846 free(ctx->u.intf.label);
1847
1848 ctx->u.intf.label = NULL;
1849
1850 if (label) {
1851 ctx->u.intf.flags |= DPLANE_INTF_HAS_LABEL;
1852
1853 /* Use embedded buffer if it's adequate; else allocate. */
1854 len = strlen(label);
1855
1856 if (len < sizeof(ctx->u.intf.label_buf)) {
1857 strlcpy(ctx->u.intf.label_buf, label,
1858 sizeof(ctx->u.intf.label_buf));
1859 ctx->u.intf.label = ctx->u.intf.label_buf;
1860 } else {
1861 ctx->u.intf.label = strdup(label);
1862 }
1863 } else {
1864 ctx->u.intf.flags &= ~DPLANE_INTF_HAS_LABEL;
1865 }
1866 }
1867
1868 /* Accessors for MAC information */
1869 vlanid_t dplane_ctx_mac_get_vlan(const struct zebra_dplane_ctx *ctx)
1870 {
1871 DPLANE_CTX_VALID(ctx);
1872 return ctx->u.macinfo.vid;
1873 }
1874
1875 bool dplane_ctx_mac_is_sticky(const struct zebra_dplane_ctx *ctx)
1876 {
1877 DPLANE_CTX_VALID(ctx);
1878 return ctx->u.macinfo.is_sticky;
1879 }
1880
1881 uint32_t dplane_ctx_mac_get_nhg_id(const struct zebra_dplane_ctx *ctx)
1882 {
1883 DPLANE_CTX_VALID(ctx);
1884 return ctx->u.macinfo.nhg_id;
1885 }
1886
1887 uint32_t dplane_ctx_mac_get_update_flags(const struct zebra_dplane_ctx *ctx)
1888 {
1889 DPLANE_CTX_VALID(ctx);
1890 return ctx->u.macinfo.update_flags;
1891 }
1892
1893 const struct ethaddr *dplane_ctx_mac_get_addr(
1894 const struct zebra_dplane_ctx *ctx)
1895 {
1896 DPLANE_CTX_VALID(ctx);
1897 return &(ctx->u.macinfo.mac);
1898 }
1899
1900 const struct in_addr *dplane_ctx_mac_get_vtep_ip(
1901 const struct zebra_dplane_ctx *ctx)
1902 {
1903 DPLANE_CTX_VALID(ctx);
1904 return &(ctx->u.macinfo.vtep_ip);
1905 }
1906
1907 ifindex_t dplane_ctx_mac_get_br_ifindex(const struct zebra_dplane_ctx *ctx)
1908 {
1909 DPLANE_CTX_VALID(ctx);
1910 return ctx->u.macinfo.br_ifindex;
1911 }
1912
1913 /* Accessors for neighbor information */
1914 const struct ipaddr *dplane_ctx_neigh_get_ipaddr(
1915 const struct zebra_dplane_ctx *ctx)
1916 {
1917 DPLANE_CTX_VALID(ctx);
1918 return &(ctx->u.neigh.ip_addr);
1919 }
1920
1921 const struct ipaddr *
1922 dplane_ctx_neigh_get_link_ip(const struct zebra_dplane_ctx *ctx)
1923 {
1924 DPLANE_CTX_VALID(ctx);
1925 return &(ctx->u.neigh.link.ip_addr);
1926 }
1927
1928 const struct ethaddr *dplane_ctx_neigh_get_mac(
1929 const struct zebra_dplane_ctx *ctx)
1930 {
1931 DPLANE_CTX_VALID(ctx);
1932 return &(ctx->u.neigh.link.mac);
1933 }
1934
1935 uint32_t dplane_ctx_neigh_get_flags(const struct zebra_dplane_ctx *ctx)
1936 {
1937 DPLANE_CTX_VALID(ctx);
1938 return ctx->u.neigh.flags;
1939 }
1940
1941 uint16_t dplane_ctx_neigh_get_state(const struct zebra_dplane_ctx *ctx)
1942 {
1943 DPLANE_CTX_VALID(ctx);
1944 return ctx->u.neigh.state;
1945 }
1946
1947 uint32_t dplane_ctx_neigh_get_update_flags(const struct zebra_dplane_ctx *ctx)
1948 {
1949 DPLANE_CTX_VALID(ctx);
1950 return ctx->u.neigh.update_flags;
1951 }
1952
1953 /* Accessor for GRE set */
1954 uint32_t
1955 dplane_ctx_gre_get_link_ifindex(const struct zebra_dplane_ctx *ctx)
1956 {
1957 DPLANE_CTX_VALID(ctx);
1958
1959 return ctx->u.gre.link_ifindex;
1960 }
1961
1962 unsigned int
1963 dplane_ctx_gre_get_mtu(const struct zebra_dplane_ctx *ctx)
1964 {
1965 DPLANE_CTX_VALID(ctx);
1966
1967 return ctx->u.gre.mtu;
1968 }
1969
1970 const struct zebra_l2info_gre *
1971 dplane_ctx_gre_get_info(const struct zebra_dplane_ctx *ctx)
1972 {
1973 DPLANE_CTX_VALID(ctx);
1974
1975 return &ctx->u.gre.info;
1976 }
1977
1978 /* Accessors for PBR rule information */
1979 int dplane_ctx_rule_get_sock(const struct zebra_dplane_ctx *ctx)
1980 {
1981 DPLANE_CTX_VALID(ctx);
1982
1983 return ctx->u.rule.sock;
1984 }
1985
1986 const char *dplane_ctx_rule_get_ifname(const struct zebra_dplane_ctx *ctx)
1987 {
1988 DPLANE_CTX_VALID(ctx);
1989
1990 return ctx->u.rule.new.ifname;
1991 }
1992
1993 int dplane_ctx_rule_get_unique(const struct zebra_dplane_ctx *ctx)
1994 {
1995 DPLANE_CTX_VALID(ctx);
1996
1997 return ctx->u.rule.unique;
1998 }
1999
2000 int dplane_ctx_rule_get_seq(const struct zebra_dplane_ctx *ctx)
2001 {
2002 DPLANE_CTX_VALID(ctx);
2003
2004 return ctx->u.rule.seq;
2005 }
2006
2007 uint32_t dplane_ctx_rule_get_priority(const struct zebra_dplane_ctx *ctx)
2008 {
2009 DPLANE_CTX_VALID(ctx);
2010
2011 return ctx->u.rule.new.priority;
2012 }
2013
2014 uint32_t dplane_ctx_rule_get_old_priority(const struct zebra_dplane_ctx *ctx)
2015 {
2016 DPLANE_CTX_VALID(ctx);
2017
2018 return ctx->u.rule.old.priority;
2019 }
2020
2021 uint32_t dplane_ctx_rule_get_table(const struct zebra_dplane_ctx *ctx)
2022 {
2023 DPLANE_CTX_VALID(ctx);
2024
2025 return ctx->u.rule.new.table;
2026 }
2027
2028 uint32_t dplane_ctx_rule_get_old_table(const struct zebra_dplane_ctx *ctx)
2029 {
2030 DPLANE_CTX_VALID(ctx);
2031
2032 return ctx->u.rule.old.table;
2033 }
2034
2035 uint32_t dplane_ctx_rule_get_filter_bm(const struct zebra_dplane_ctx *ctx)
2036 {
2037 DPLANE_CTX_VALID(ctx);
2038
2039 return ctx->u.rule.new.filter_bm;
2040 }
2041
2042 uint32_t dplane_ctx_rule_get_old_filter_bm(const struct zebra_dplane_ctx *ctx)
2043 {
2044 DPLANE_CTX_VALID(ctx);
2045
2046 return ctx->u.rule.old.filter_bm;
2047 }
2048
2049 uint32_t dplane_ctx_rule_get_fwmark(const struct zebra_dplane_ctx *ctx)
2050 {
2051 DPLANE_CTX_VALID(ctx);
2052
2053 return ctx->u.rule.new.fwmark;
2054 }
2055
2056 uint32_t dplane_ctx_rule_get_old_fwmark(const struct zebra_dplane_ctx *ctx)
2057 {
2058 DPLANE_CTX_VALID(ctx);
2059
2060 return ctx->u.rule.old.fwmark;
2061 }
2062
2063 uint8_t dplane_ctx_rule_get_ipproto(const struct zebra_dplane_ctx *ctx)
2064 {
2065 DPLANE_CTX_VALID(ctx);
2066
2067 return ctx->u.rule.new.ip_proto;
2068 }
2069
2070 uint8_t dplane_ctx_rule_get_old_ipproto(const struct zebra_dplane_ctx *ctx)
2071 {
2072 DPLANE_CTX_VALID(ctx);
2073
2074 return ctx->u.rule.old.ip_proto;
2075 }
2076
2077 uint8_t dplane_ctx_rule_get_dsfield(const struct zebra_dplane_ctx *ctx)
2078 {
2079 DPLANE_CTX_VALID(ctx);
2080
2081 return ctx->u.rule.new.dsfield;
2082 }
2083
2084 uint8_t dplane_ctx_rule_get_old_dsfield(const struct zebra_dplane_ctx *ctx)
2085 {
2086 DPLANE_CTX_VALID(ctx);
2087
2088 return ctx->u.rule.old.dsfield;
2089 }
2090
2091 const struct prefix *
2092 dplane_ctx_rule_get_src_ip(const struct zebra_dplane_ctx *ctx)
2093 {
2094 DPLANE_CTX_VALID(ctx);
2095
2096 return &(ctx->u.rule.new.src_ip);
2097 }
2098
2099 const struct prefix *
2100 dplane_ctx_rule_get_old_src_ip(const struct zebra_dplane_ctx *ctx)
2101 {
2102 DPLANE_CTX_VALID(ctx);
2103
2104 return &(ctx->u.rule.old.src_ip);
2105 }
2106
2107 const struct prefix *
2108 dplane_ctx_rule_get_dst_ip(const struct zebra_dplane_ctx *ctx)
2109 {
2110 DPLANE_CTX_VALID(ctx);
2111
2112 return &(ctx->u.rule.new.dst_ip);
2113 }
2114
2115 const struct prefix *
2116 dplane_ctx_rule_get_old_dst_ip(const struct zebra_dplane_ctx *ctx)
2117 {
2118 DPLANE_CTX_VALID(ctx);
2119
2120 return &(ctx->u.rule.old.dst_ip);
2121 }
2122
2123 uint32_t dplane_ctx_get_br_port_flags(const struct zebra_dplane_ctx *ctx)
2124 {
2125 DPLANE_CTX_VALID(ctx);
2126
2127 return ctx->u.br_port.flags;
2128 }
2129
2130 uint32_t
2131 dplane_ctx_get_br_port_sph_filter_cnt(const struct zebra_dplane_ctx *ctx)
2132 {
2133 DPLANE_CTX_VALID(ctx);
2134
2135 return ctx->u.br_port.sph_filter_cnt;
2136 }
2137
2138 const struct in_addr *
2139 dplane_ctx_get_br_port_sph_filters(const struct zebra_dplane_ctx *ctx)
2140 {
2141 DPLANE_CTX_VALID(ctx);
2142
2143 return ctx->u.br_port.sph_filters;
2144 }
2145
2146 uint32_t
2147 dplane_ctx_get_br_port_backup_nhg_id(const struct zebra_dplane_ctx *ctx)
2148 {
2149 DPLANE_CTX_VALID(ctx);
2150
2151 return ctx->u.br_port.backup_nhg_id;
2152 }
2153
2154 /* Accessors for PBR iptable information */
2155 void dplane_ctx_get_pbr_iptable(const struct zebra_dplane_ctx *ctx,
2156 struct zebra_pbr_iptable *table)
2157 {
2158 DPLANE_CTX_VALID(ctx);
2159
2160 memcpy(table, &ctx->u.iptable, sizeof(struct zebra_pbr_iptable));
2161 }
2162
2163 void dplane_ctx_get_pbr_ipset(const struct zebra_dplane_ctx *ctx,
2164 struct zebra_pbr_ipset *ipset)
2165 {
2166 DPLANE_CTX_VALID(ctx);
2167
2168 assert(ipset);
2169
2170 if (ctx->zd_op == DPLANE_OP_IPSET_ENTRY_ADD ||
2171 ctx->zd_op == DPLANE_OP_IPSET_ENTRY_DELETE) {
2172 memset(ipset, 0, sizeof(struct zebra_pbr_ipset));
2173 ipset->type = ctx->u.ipset_entry.info.type;
2174 ipset->family = ctx->u.ipset_entry.info.family;
2175 memcpy(&ipset->ipset_name, &ctx->u.ipset_entry.info.ipset_name,
2176 ZEBRA_IPSET_NAME_SIZE);
2177 } else
2178 memcpy(ipset, &ctx->u.ipset, sizeof(struct zebra_pbr_ipset));
2179 }
2180
2181 void dplane_ctx_get_pbr_ipset_entry(const struct zebra_dplane_ctx *ctx,
2182 struct zebra_pbr_ipset_entry *entry)
2183 {
2184 DPLANE_CTX_VALID(ctx);
2185
2186 assert(entry);
2187
2188 memcpy(entry, &ctx->u.ipset_entry.entry, sizeof(struct zebra_pbr_ipset_entry));
2189 }
2190
2191 /*
2192 * End of dplane context accessors
2193 */
2194
2195 /* Optional extra info about interfaces in nexthops - a plugin must enable
2196 * this extra info.
2197 */
2198 const struct dplane_intf_extra *
2199 dplane_ctx_get_intf_extra(const struct zebra_dplane_ctx *ctx)
2200 {
2201 return TAILQ_FIRST(&ctx->u.rinfo.intf_extra_q);
2202 }
2203
2204 const struct dplane_intf_extra *
2205 dplane_ctx_intf_extra_next(const struct zebra_dplane_ctx *ctx,
2206 const struct dplane_intf_extra *ptr)
2207 {
2208 return TAILQ_NEXT(ptr, link);
2209 }
2210
2211 vrf_id_t dplane_intf_extra_get_vrfid(const struct dplane_intf_extra *ptr)
2212 {
2213 return ptr->vrf_id;
2214 }
2215
2216 uint32_t dplane_intf_extra_get_ifindex(const struct dplane_intf_extra *ptr)
2217 {
2218 return ptr->ifindex;
2219 }
2220
2221 uint32_t dplane_intf_extra_get_flags(const struct dplane_intf_extra *ptr)
2222 {
2223 return ptr->flags;
2224 }
2225
2226 uint32_t dplane_intf_extra_get_status(const struct dplane_intf_extra *ptr)
2227 {
2228 return ptr->status;
2229 }
2230
2231 uint8_t dplane_ctx_neightable_get_family(const struct zebra_dplane_ctx *ctx)
2232 {
2233 DPLANE_CTX_VALID(ctx);
2234
2235 return ctx->u.neightable.family;
2236 }
2237
2238 uint32_t
2239 dplane_ctx_neightable_get_app_probes(const struct zebra_dplane_ctx *ctx)
2240 {
2241 DPLANE_CTX_VALID(ctx);
2242
2243 return ctx->u.neightable.app_probes;
2244 }
2245
2246 uint32_t
2247 dplane_ctx_neightable_get_ucast_probes(const struct zebra_dplane_ctx *ctx)
2248 {
2249 DPLANE_CTX_VALID(ctx);
2250
2251 return ctx->u.neightable.ucast_probes;
2252 }
2253
2254 uint32_t
2255 dplane_ctx_neightable_get_mcast_probes(const struct zebra_dplane_ctx *ctx)
2256 {
2257 DPLANE_CTX_VALID(ctx);
2258
2259 return ctx->u.neightable.mcast_probes;
2260 }
2261
2262 /*
2263 * End of interface extra info accessors
2264 */
2265
2266 /*
2267 * Retrieve the limit on the number of pending, unprocessed updates.
2268 */
2269 uint32_t dplane_get_in_queue_limit(void)
2270 {
2271 return atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
2272 memory_order_relaxed);
2273 }
2274
2275 /*
2276 * Configure limit on the number of pending, queued updates.
2277 */
2278 void dplane_set_in_queue_limit(uint32_t limit, bool set)
2279 {
2280 /* Reset to default on 'unset' */
2281 if (!set)
2282 limit = DPLANE_DEFAULT_MAX_QUEUED;
2283
2284 atomic_store_explicit(&zdplane_info.dg_max_queued_updates, limit,
2285 memory_order_relaxed);
2286 }
2287
2288 /*
2289 * Retrieve the current queue depth of incoming, unprocessed updates
2290 */
2291 uint32_t dplane_get_in_queue_len(void)
2292 {
2293 return atomic_load_explicit(&zdplane_info.dg_routes_queued,
2294 memory_order_seq_cst);
2295 }
2296
2297 /*
2298 * Common dataplane context init with zebra namespace info.
2299 */
2300 static int dplane_ctx_ns_init(struct zebra_dplane_ctx *ctx,
2301 struct zebra_ns *zns,
2302 bool is_update)
2303 {
2304 dplane_info_from_zns(&(ctx->zd_ns_info), zns);
2305
2306 ctx->zd_is_update = is_update;
2307
2308 #if defined(HAVE_NETLINK)
2309 /* Increment message counter after copying to context struct - may need
2310 * two messages in some 'update' cases.
2311 */
2312 if (is_update)
2313 zns->netlink_dplane_out.seq += 2;
2314 else
2315 zns->netlink_dplane_out.seq++;
2316 #endif /* HAVE_NETLINK */
2317
2318 return AOK;
2319 }
2320
2321 /*
2322 * Initialize a context block for a route update from zebra data structs.
2323 */
2324 int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2325 struct route_node *rn, struct route_entry *re)
2326 {
2327 int ret = EINVAL;
2328 const struct route_table *table = NULL;
2329 const struct rib_table_info *info;
2330 const struct prefix *p, *src_p;
2331 struct zebra_ns *zns;
2332 struct zebra_vrf *zvrf;
2333 struct nexthop *nexthop;
2334 struct zebra_l3vni *zl3vni;
2335 const struct interface *ifp;
2336 struct dplane_intf_extra *if_extra;
2337
2338 if (!ctx || !rn || !re)
2339 goto done;
2340
2341 TAILQ_INIT(&ctx->u.rinfo.intf_extra_q);
2342
2343 ctx->zd_op = op;
2344 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2345
2346 ctx->u.rinfo.zd_type = re->type;
2347 ctx->u.rinfo.zd_old_type = re->type;
2348
2349 /* Prefixes: dest, and optional source */
2350 srcdest_rnode_prefixes(rn, &p, &src_p);
2351
2352 prefix_copy(&(ctx->u.rinfo.zd_dest), p);
2353
2354 if (src_p)
2355 prefix_copy(&(ctx->u.rinfo.zd_src), src_p);
2356 else
2357 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(ctx->u.rinfo.zd_src));
2358
2359 ctx->zd_table_id = re->table;
2360
2361 ctx->u.rinfo.zd_metric = re->metric;
2362 ctx->u.rinfo.zd_old_metric = re->metric;
2363 ctx->zd_vrf_id = re->vrf_id;
2364 ctx->u.rinfo.zd_mtu = re->mtu;
2365 ctx->u.rinfo.zd_nexthop_mtu = re->nexthop_mtu;
2366 ctx->u.rinfo.zd_instance = re->instance;
2367 ctx->u.rinfo.zd_tag = re->tag;
2368 ctx->u.rinfo.zd_old_tag = re->tag;
2369 ctx->u.rinfo.zd_distance = re->distance;
2370
2371 table = srcdest_rnode_table(rn);
2372 info = table->info;
2373
2374 ctx->u.rinfo.zd_afi = info->afi;
2375 ctx->u.rinfo.zd_safi = info->safi;
2376
2377 /* Copy nexthops; recursive info is included too */
2378 copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop),
2379 re->nhe->nhg.nexthop, NULL);
2380 ctx->u.rinfo.zd_nhg_id = re->nhe->id;
2381
2382 /* Copy backup nexthop info, if present */
2383 if (re->nhe->backup_info && re->nhe->backup_info->nhe) {
2384 copy_nexthops(&(ctx->u.rinfo.backup_ng.nexthop),
2385 re->nhe->backup_info->nhe->nhg.nexthop, NULL);
2386 }
2387
2388 /*
2389 * Ensure that the dplane nexthops' flags are clear and copy
2390 * encapsulation information.
2391 */
2392 for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop)) {
2393 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
2394
2395 /* Optionally capture extra interface info while we're in the
2396 * main zebra pthread - a plugin has to ask for this info.
2397 */
2398 if (dplane_collect_extra_intf_info) {
2399 ifp = if_lookup_by_index(nexthop->ifindex,
2400 nexthop->vrf_id);
2401
2402 if (ifp) {
2403 if_extra = XCALLOC(
2404 MTYPE_DP_INTF,
2405 sizeof(struct dplane_intf_extra));
2406 if_extra->vrf_id = nexthop->vrf_id;
2407 if_extra->ifindex = nexthop->ifindex;
2408 if_extra->flags = ifp->flags;
2409 if_extra->status = ifp->status;
2410
2411 TAILQ_INSERT_TAIL(&ctx->u.rinfo.intf_extra_q,
2412 if_extra, link);
2413 }
2414 }
2415
2416 /* Check for available evpn encapsulations. */
2417 if (!CHECK_FLAG(re->flags, ZEBRA_FLAG_EVPN_ROUTE))
2418 continue;
2419
2420 zl3vni = zl3vni_from_vrf(nexthop->vrf_id);
2421 if (zl3vni && is_l3vni_oper_up(zl3vni)) {
2422 nexthop->nh_encap_type = NET_VXLAN;
2423 nexthop->nh_encap.vni = zl3vni->vni;
2424 }
2425 }
2426
2427 /* Don't need some info when capturing a system notification */
2428 if (op == DPLANE_OP_SYS_ROUTE_ADD ||
2429 op == DPLANE_OP_SYS_ROUTE_DELETE) {
2430 ret = AOK;
2431 goto done;
2432 }
2433
2434 /* Extract ns info - can't use pointers to 'core' structs */
2435 zvrf = vrf_info_lookup(re->vrf_id);
2436 zns = zvrf->zns;
2437 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_ROUTE_UPDATE));
2438
2439 #ifdef HAVE_NETLINK
2440 {
2441 struct nhg_hash_entry *nhe = zebra_nhg_resolve(re->nhe);
2442
2443 ctx->u.rinfo.nhe.id = nhe->id;
2444 ctx->u.rinfo.nhe.old_id = 0;
2445 /*
2446 * Check if the nhe is installed/queued before doing anything
2447 * with this route.
2448 *
2449 * If its a delete we only use the prefix anyway, so this only
2450 * matters for INSTALL/UPDATE.
2451 */
2452 if (zebra_nhg_kernel_nexthops_enabled()
2453 && (((op == DPLANE_OP_ROUTE_INSTALL)
2454 || (op == DPLANE_OP_ROUTE_UPDATE))
2455 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)
2456 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED))) {
2457 ret = ENOENT;
2458 goto done;
2459 }
2460
2461 re->nhe_installed_id = nhe->id;
2462 }
2463 #endif /* HAVE_NETLINK */
2464
2465 /* Trying out the sequence number idea, so we can try to detect
2466 * when a result is stale.
2467 */
2468 re->dplane_sequence = zebra_router_get_next_sequence();
2469 ctx->zd_seq = re->dplane_sequence;
2470
2471 ret = AOK;
2472
2473 done:
2474 return ret;
2475 }
2476
2477 /**
2478 * dplane_ctx_nexthop_init() - Initialize a context block for a nexthop update
2479 *
2480 * @ctx: Dataplane context to init
2481 * @op: Operation being performed
2482 * @nhe: Nexthop group hash entry
2483 *
2484 * Return: Result status
2485 */
2486 int dplane_ctx_nexthop_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2487 struct nhg_hash_entry *nhe)
2488 {
2489 struct zebra_vrf *zvrf = NULL;
2490 struct zebra_ns *zns = NULL;
2491 int ret = EINVAL;
2492
2493 if (!ctx || !nhe)
2494 goto done;
2495
2496 ctx->zd_op = op;
2497 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2498
2499 /* Copy over nhe info */
2500 ctx->u.rinfo.nhe.id = nhe->id;
2501 ctx->u.rinfo.nhe.afi = nhe->afi;
2502 ctx->u.rinfo.nhe.vrf_id = nhe->vrf_id;
2503 ctx->u.rinfo.nhe.type = nhe->type;
2504
2505 nexthop_group_copy(&(ctx->u.rinfo.nhe.ng), &(nhe->nhg));
2506
2507 /* If this is a group, convert it to a grp array of ids */
2508 if (!zebra_nhg_depends_is_empty(nhe)
2509 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_RECURSIVE))
2510 ctx->u.rinfo.nhe.nh_grp_count = zebra_nhg_nhe2grp(
2511 ctx->u.rinfo.nhe.nh_grp, nhe, MULTIPATH_NUM);
2512
2513 zvrf = vrf_info_lookup(nhe->vrf_id);
2514
2515 /*
2516 * Fallback to default namespace if the vrf got ripped out from under
2517 * us.
2518 */
2519 zns = zvrf ? zvrf->zns : zebra_ns_lookup(NS_DEFAULT);
2520
2521 /*
2522 * TODO: Might not need to mark this as an update, since
2523 * it probably won't require two messages
2524 */
2525 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_NH_UPDATE));
2526
2527 ret = AOK;
2528
2529 done:
2530 return ret;
2531 }
2532
2533 /*
2534 * Capture information for an LSP update in a dplane context.
2535 */
2536 int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2537 struct zebra_lsp *lsp)
2538 {
2539 int ret = AOK;
2540 struct zebra_nhlfe *nhlfe, *new_nhlfe;
2541
2542 ctx->zd_op = op;
2543 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2544
2545 /* Capture namespace info */
2546 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
2547 (op == DPLANE_OP_LSP_UPDATE));
2548
2549 memset(&ctx->u.lsp, 0, sizeof(ctx->u.lsp));
2550
2551 nhlfe_list_init(&(ctx->u.lsp.nhlfe_list));
2552 nhlfe_list_init(&(ctx->u.lsp.backup_nhlfe_list));
2553
2554 /* This may be called to create/init a dplane context, not necessarily
2555 * to copy an lsp object.
2556 */
2557 if (lsp == NULL) {
2558 ret = AOK;
2559 goto done;
2560 }
2561
2562 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2563 zlog_debug("init dplane ctx %s: in-label %u ecmp# %d",
2564 dplane_op2str(op), lsp->ile.in_label,
2565 lsp->num_ecmp);
2566
2567 ctx->u.lsp.ile = lsp->ile;
2568 ctx->u.lsp.addr_family = lsp->addr_family;
2569 ctx->u.lsp.num_ecmp = lsp->num_ecmp;
2570 ctx->u.lsp.flags = lsp->flags;
2571
2572 /* Copy source LSP's nhlfes, and capture 'best' nhlfe */
2573 frr_each(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
2574 /* Not sure if this is meaningful... */
2575 if (nhlfe->nexthop == NULL)
2576 continue;
2577
2578 new_nhlfe = zebra_mpls_lsp_add_nh(&(ctx->u.lsp), nhlfe->type,
2579 nhlfe->nexthop);
2580 if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
2581 ret = ENOMEM;
2582 break;
2583 }
2584
2585 /* Need to copy flags and backup info too */
2586 new_nhlfe->flags = nhlfe->flags;
2587 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
2588
2589 if (CHECK_FLAG(new_nhlfe->nexthop->flags,
2590 NEXTHOP_FLAG_HAS_BACKUP)) {
2591 new_nhlfe->nexthop->backup_num =
2592 nhlfe->nexthop->backup_num;
2593 memcpy(new_nhlfe->nexthop->backup_idx,
2594 nhlfe->nexthop->backup_idx,
2595 new_nhlfe->nexthop->backup_num);
2596 }
2597
2598 if (nhlfe == lsp->best_nhlfe)
2599 ctx->u.lsp.best_nhlfe = new_nhlfe;
2600 }
2601
2602 if (ret != AOK)
2603 goto done;
2604
2605 /* Capture backup nhlfes/nexthops */
2606 frr_each(nhlfe_list, &lsp->backup_nhlfe_list, nhlfe) {
2607 /* Not sure if this is meaningful... */
2608 if (nhlfe->nexthop == NULL)
2609 continue;
2610
2611 new_nhlfe = zebra_mpls_lsp_add_backup_nh(&(ctx->u.lsp),
2612 nhlfe->type,
2613 nhlfe->nexthop);
2614 if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
2615 ret = ENOMEM;
2616 break;
2617 }
2618
2619 /* Need to copy flags too */
2620 new_nhlfe->flags = nhlfe->flags;
2621 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
2622 }
2623
2624 /* On error the ctx will be cleaned-up, so we don't need to
2625 * deal with any allocated nhlfe or nexthop structs here.
2626 */
2627 done:
2628
2629 return ret;
2630 }
2631
2632 /*
2633 * Capture information for an LSP update in a dplane context.
2634 */
2635 static int dplane_ctx_pw_init(struct zebra_dplane_ctx *ctx,
2636 enum dplane_op_e op,
2637 struct zebra_pw *pw)
2638 {
2639 int ret = EINVAL;
2640 struct prefix p;
2641 afi_t afi;
2642 struct route_table *table;
2643 struct route_node *rn;
2644 struct route_entry *re;
2645 const struct nexthop_group *nhg;
2646 struct nexthop *nh, *newnh, *last_nh;
2647
2648 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2649 zlog_debug("init dplane ctx %s: pw '%s', loc %u, rem %u",
2650 dplane_op2str(op), pw->ifname, pw->local_label,
2651 pw->remote_label);
2652
2653 ctx->zd_op = op;
2654 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2655
2656 /* Capture namespace info: no netlink support as of 12/18,
2657 * but just in case...
2658 */
2659 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
2660
2661 memset(&ctx->u.pw, 0, sizeof(ctx->u.pw));
2662
2663 /* This name appears to be c-string, so we use string copy. */
2664 strlcpy(ctx->zd_ifname, pw->ifname, sizeof(ctx->zd_ifname));
2665
2666 ctx->zd_vrf_id = pw->vrf_id;
2667 ctx->zd_ifindex = pw->ifindex;
2668 ctx->u.pw.type = pw->type;
2669 ctx->u.pw.af = pw->af;
2670 ctx->u.pw.local_label = pw->local_label;
2671 ctx->u.pw.remote_label = pw->remote_label;
2672 ctx->u.pw.flags = pw->flags;
2673
2674 ctx->u.pw.dest = pw->nexthop;
2675
2676 ctx->u.pw.fields = pw->data;
2677
2678 /* Capture nexthop info for the pw destination. We need to look
2679 * up and use zebra datastructs, but we're running in the zebra
2680 * pthread here so that should be ok.
2681 */
2682 memcpy(&p.u, &pw->nexthop, sizeof(pw->nexthop));
2683 p.family = pw->af;
2684 p.prefixlen = ((pw->af == AF_INET) ? IPV4_MAX_BITLEN : IPV6_MAX_BITLEN);
2685
2686 afi = (pw->af == AF_INET) ? AFI_IP : AFI_IP6;
2687 table = zebra_vrf_table(afi, SAFI_UNICAST, pw->vrf_id);
2688 if (table == NULL)
2689 goto done;
2690
2691 rn = route_node_match(table, &p);
2692 if (rn == NULL)
2693 goto done;
2694
2695 re = NULL;
2696 RNODE_FOREACH_RE(rn, re) {
2697 if (CHECK_FLAG(re->flags, ZEBRA_FLAG_SELECTED))
2698 break;
2699 }
2700
2701 if (re) {
2702 /* We'll capture a 'fib' list of nexthops that meet our
2703 * criteria: installed, and labelled.
2704 */
2705 nhg = rib_get_fib_nhg(re);
2706 last_nh = NULL;
2707
2708 if (nhg && nhg->nexthop) {
2709 for (ALL_NEXTHOPS_PTR(nhg, nh)) {
2710 if (!CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE)
2711 || CHECK_FLAG(nh->flags,
2712 NEXTHOP_FLAG_RECURSIVE)
2713 || nh->nh_label == NULL)
2714 continue;
2715
2716 newnh = nexthop_dup(nh, NULL);
2717
2718 if (last_nh)
2719 NEXTHOP_APPEND(last_nh, newnh);
2720 else
2721 ctx->u.pw.fib_nhg.nexthop = newnh;
2722 last_nh = newnh;
2723 }
2724 }
2725
2726 /* Include any installed backup nexthops also. */
2727 nhg = rib_get_fib_backup_nhg(re);
2728 if (nhg && nhg->nexthop) {
2729 for (ALL_NEXTHOPS_PTR(nhg, nh)) {
2730 if (!CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE)
2731 || CHECK_FLAG(nh->flags,
2732 NEXTHOP_FLAG_RECURSIVE)
2733 || nh->nh_label == NULL)
2734 continue;
2735
2736 newnh = nexthop_dup(nh, NULL);
2737
2738 if (last_nh)
2739 NEXTHOP_APPEND(last_nh, newnh);
2740 else
2741 ctx->u.pw.fib_nhg.nexthop = newnh;
2742 last_nh = newnh;
2743 }
2744 }
2745
2746 /* Copy primary nexthops; recursive info is included too */
2747 assert(re->nhe != NULL); /* SA warning */
2748 copy_nexthops(&(ctx->u.pw.primary_nhg.nexthop),
2749 re->nhe->nhg.nexthop, NULL);
2750 ctx->u.pw.nhg_id = re->nhe->id;
2751
2752 /* Copy backup nexthop info, if present */
2753 if (re->nhe->backup_info && re->nhe->backup_info->nhe) {
2754 copy_nexthops(&(ctx->u.pw.backup_nhg.nexthop),
2755 re->nhe->backup_info->nhe->nhg.nexthop,
2756 NULL);
2757 }
2758 }
2759 route_unlock_node(rn);
2760
2761 ret = AOK;
2762
2763 done:
2764 return ret;
2765 }
2766
2767 /**
2768 * dplane_ctx_rule_init_single() - Initialize a dataplane representation of a
2769 * PBR rule.
2770 *
2771 * @dplane_rule: Dataplane internal representation of a rule
2772 * @rule: PBR rule
2773 */
2774 static void dplane_ctx_rule_init_single(struct dplane_ctx_rule *dplane_rule,
2775 struct zebra_pbr_rule *rule)
2776 {
2777 dplane_rule->priority = rule->rule.priority;
2778 dplane_rule->table = rule->rule.action.table;
2779
2780 dplane_rule->filter_bm = rule->rule.filter.filter_bm;
2781 dplane_rule->fwmark = rule->rule.filter.fwmark;
2782 dplane_rule->dsfield = rule->rule.filter.dsfield;
2783 dplane_rule->ip_proto = rule->rule.filter.ip_proto;
2784 prefix_copy(&(dplane_rule->dst_ip), &rule->rule.filter.dst_ip);
2785 prefix_copy(&(dplane_rule->src_ip), &rule->rule.filter.src_ip);
2786
2787 dplane_rule->action_pcp = rule->rule.action.pcp;
2788 dplane_rule->action_vlan_flags = rule->rule.action.vlan_flags;
2789 dplane_rule->action_vlan_id = rule->rule.action.vlan_id;
2790 dplane_rule->action_queue_id = rule->rule.action.queue_id;
2791
2792 strlcpy(dplane_rule->ifname, rule->ifname, INTERFACE_NAMSIZ);
2793 }
2794
2795 /**
2796 * dplane_ctx_rule_init() - Initialize a context block for a PBR rule update.
2797 *
2798 * @ctx: Dataplane context to init
2799 * @op: Operation being performed
2800 * @new_rule: PBR rule
2801 *
2802 * Return: Result status
2803 */
2804 static int dplane_ctx_rule_init(struct zebra_dplane_ctx *ctx,
2805 enum dplane_op_e op,
2806 struct zebra_pbr_rule *new_rule,
2807 struct zebra_pbr_rule *old_rule)
2808 {
2809 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2810 zlog_debug(
2811 "init dplane ctx %s: IF %s Prio %u Fwmark %u Src %pFX Dst %pFX Table %u",
2812 dplane_op2str(op), new_rule->ifname,
2813 new_rule->rule.priority, new_rule->rule.filter.fwmark,
2814 &new_rule->rule.filter.src_ip,
2815 &new_rule->rule.filter.dst_ip,
2816 new_rule->rule.action.table);
2817
2818 ctx->zd_op = op;
2819 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2820
2821 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
2822 op == DPLANE_OP_RULE_UPDATE);
2823
2824 ctx->zd_vrf_id = new_rule->vrf_id;
2825 strlcpy(ctx->zd_ifname, new_rule->ifname, sizeof(ctx->zd_ifname));
2826
2827 ctx->u.rule.sock = new_rule->sock;
2828 ctx->u.rule.unique = new_rule->rule.unique;
2829 ctx->u.rule.seq = new_rule->rule.seq;
2830
2831 dplane_ctx_rule_init_single(&ctx->u.rule.new, new_rule);
2832 if (op == DPLANE_OP_RULE_UPDATE)
2833 dplane_ctx_rule_init_single(&ctx->u.rule.old, old_rule);
2834
2835 return AOK;
2836 }
2837
2838 /**
2839 * dplane_ctx_iptable_init() - Initialize a context block for a PBR iptable
2840 * update.
2841 *
2842 * @ctx: Dataplane context to init
2843 * @op: Operation being performed
2844 * @new_rule: PBR iptable
2845 *
2846 * Return: Result status
2847 */
2848 static int dplane_ctx_iptable_init(struct zebra_dplane_ctx *ctx,
2849 enum dplane_op_e op,
2850 struct zebra_pbr_iptable *iptable)
2851 {
2852 char *ifname;
2853 struct listnode *node;
2854
2855 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2856 zlog_debug(
2857 "init dplane ctx %s: Unique %u Fwmark %u Family %s Action %s",
2858 dplane_op2str(op), iptable->unique, iptable->fwmark,
2859 family2str(iptable->family),
2860 iptable->action == ZEBRA_IPTABLES_DROP ? "Drop"
2861 : "Forward");
2862 }
2863
2864 ctx->zd_op = op;
2865 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2866
2867 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
2868
2869 ctx->zd_vrf_id = iptable->vrf_id;
2870 memcpy(&ctx->u.iptable, iptable, sizeof(struct zebra_pbr_iptable));
2871 ctx->u.iptable.interface_name_list = NULL;
2872 if (iptable->nb_interface > 0) {
2873 ctx->u.iptable.interface_name_list = list_new();
2874 for (ALL_LIST_ELEMENTS_RO(iptable->interface_name_list, node,
2875 ifname)) {
2876 listnode_add(ctx->u.iptable.interface_name_list,
2877 XSTRDUP(MTYPE_DP_NETFILTER, ifname));
2878 }
2879 }
2880 return AOK;
2881 }
2882
2883 /**
2884 * dplane_ctx_ipset_init() - Initialize a context block for a PBR ipset update.
2885 *
2886 * @ctx: Dataplane context to init
2887 * @op: Operation being performed
2888 * @new_rule: PBR ipset
2889 *
2890 * Return: Result status
2891 */
2892 static int dplane_ctx_ipset_init(struct zebra_dplane_ctx *ctx,
2893 enum dplane_op_e op,
2894 struct zebra_pbr_ipset *ipset)
2895 {
2896 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2897 zlog_debug("init dplane ctx %s: %s Unique %u Family %s Type %s",
2898 dplane_op2str(op), ipset->ipset_name, ipset->unique,
2899 family2str(ipset->family),
2900 zebra_pbr_ipset_type2str(ipset->type));
2901 }
2902
2903 ctx->zd_op = op;
2904 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2905
2906 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
2907
2908 ctx->zd_vrf_id = ipset->vrf_id;
2909
2910 memcpy(&ctx->u.ipset, ipset, sizeof(struct zebra_pbr_ipset));
2911 return AOK;
2912 }
2913
2914 /**
2915 * dplane_ctx_ipset_entry_init() - Initialize a context block for a PBR ipset
2916 * update.
2917 *
2918 * @ctx: Dataplane context to init
2919 * @op: Operation being performed
2920 * @new_rule: PBR ipset
2921 *
2922 * Return: Result status
2923 */
2924 static int
2925 dplane_ctx_ipset_entry_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2926 struct zebra_pbr_ipset_entry *ipset_entry)
2927 {
2928 struct zebra_pbr_ipset *ipset;
2929
2930 ipset = ipset_entry->backpointer;
2931 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2932 zlog_debug("init dplane ctx %s: %s Unique %u filter %u",
2933 dplane_op2str(op), ipset->ipset_name,
2934 ipset_entry->unique, ipset_entry->filter_bm);
2935 }
2936
2937 ctx->zd_op = op;
2938 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2939
2940 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
2941
2942 ctx->zd_vrf_id = ipset->vrf_id;
2943
2944 memcpy(&ctx->u.ipset_entry.entry, ipset_entry,
2945 sizeof(struct zebra_pbr_ipset_entry));
2946 ctx->u.ipset_entry.entry.backpointer = NULL;
2947 ctx->u.ipset_entry.info.type = ipset->type;
2948 ctx->u.ipset_entry.info.family = ipset->family;
2949 memcpy(&ctx->u.ipset_entry.info.ipset_name, &ipset->ipset_name,
2950 ZEBRA_IPSET_NAME_SIZE);
2951
2952 return AOK;
2953 }
2954
2955
2956 /*
2957 * Enqueue a new update,
2958 * and ensure an event is active for the dataplane pthread.
2959 */
2960 static int dplane_update_enqueue(struct zebra_dplane_ctx *ctx)
2961 {
2962 int ret = EINVAL;
2963 uint32_t high, curr;
2964
2965 /* Enqueue for processing by the dataplane pthread */
2966 DPLANE_LOCK();
2967 {
2968 TAILQ_INSERT_TAIL(&zdplane_info.dg_update_ctx_q, ctx,
2969 zd_q_entries);
2970 }
2971 DPLANE_UNLOCK();
2972
2973 curr = atomic_fetch_add_explicit(
2974 &(zdplane_info.dg_routes_queued),
2975 1, memory_order_seq_cst);
2976
2977 curr++; /* We got the pre-incremented value */
2978
2979 /* Maybe update high-water counter also */
2980 high = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
2981 memory_order_seq_cst);
2982 while (high < curr) {
2983 if (atomic_compare_exchange_weak_explicit(
2984 &zdplane_info.dg_routes_queued_max,
2985 &high, curr,
2986 memory_order_seq_cst,
2987 memory_order_seq_cst))
2988 break;
2989 }
2990
2991 /* Ensure that an event for the dataplane thread is active */
2992 ret = dplane_provider_work_ready();
2993
2994 return ret;
2995 }
2996
2997 /*
2998 * Utility that prepares a route update and enqueues it for processing
2999 */
3000 static enum zebra_dplane_result
3001 dplane_route_update_internal(struct route_node *rn,
3002 struct route_entry *re,
3003 struct route_entry *old_re,
3004 enum dplane_op_e op)
3005 {
3006 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3007 int ret = EINVAL;
3008 struct zebra_dplane_ctx *ctx = NULL;
3009
3010 /* Obtain context block */
3011 ctx = dplane_ctx_alloc();
3012
3013 /* Init context with info from zebra data structs */
3014 ret = dplane_ctx_route_init(ctx, op, rn, re);
3015 if (ret == AOK) {
3016 /* Capture some extra info for update case
3017 * where there's a different 'old' route.
3018 */
3019 if ((op == DPLANE_OP_ROUTE_UPDATE) &&
3020 old_re && (old_re != re)) {
3021
3022 old_re->dplane_sequence =
3023 zebra_router_get_next_sequence();
3024 ctx->zd_old_seq = old_re->dplane_sequence;
3025
3026 ctx->u.rinfo.zd_old_tag = old_re->tag;
3027 ctx->u.rinfo.zd_old_type = old_re->type;
3028 ctx->u.rinfo.zd_old_instance = old_re->instance;
3029 ctx->u.rinfo.zd_old_distance = old_re->distance;
3030 ctx->u.rinfo.zd_old_metric = old_re->metric;
3031 ctx->u.rinfo.nhe.old_id = old_re->nhe->id;
3032
3033 #ifndef HAVE_NETLINK
3034 /* For bsd, capture previous re's nexthops too, sigh.
3035 * We'll need these to do per-nexthop deletes.
3036 */
3037 copy_nexthops(&(ctx->u.rinfo.zd_old_ng.nexthop),
3038 old_re->nhe->nhg.nexthop, NULL);
3039
3040 if (zebra_nhg_get_backup_nhg(old_re->nhe) != NULL) {
3041 struct nexthop_group *nhg;
3042 struct nexthop **nh;
3043
3044 nhg = zebra_nhg_get_backup_nhg(old_re->nhe);
3045 nh = &(ctx->u.rinfo.old_backup_ng.nexthop);
3046
3047 if (nhg->nexthop)
3048 copy_nexthops(nh, nhg->nexthop, NULL);
3049 }
3050 #endif /* !HAVE_NETLINK */
3051 }
3052
3053 /*
3054 * If the old and new context type, and nexthop group id
3055 * are the same there is no need to send down a route replace
3056 * as that we know we have sent a nexthop group replace
3057 * or an upper level protocol has sent us the exact
3058 * same route again.
3059 */
3060 if ((dplane_ctx_get_type(ctx) == dplane_ctx_get_old_type(ctx))
3061 && (dplane_ctx_get_nhe_id(ctx)
3062 == dplane_ctx_get_old_nhe_id(ctx))
3063 && (dplane_ctx_get_nhe_id(ctx) >= ZEBRA_NHG_PROTO_LOWER)) {
3064 struct nexthop *nexthop;
3065
3066 if (IS_ZEBRA_DEBUG_DPLANE)
3067 zlog_debug(
3068 "%s: Ignoring Route exactly the same",
3069 __func__);
3070
3071 for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx),
3072 nexthop)) {
3073 if (CHECK_FLAG(nexthop->flags,
3074 NEXTHOP_FLAG_RECURSIVE))
3075 continue;
3076
3077 if (CHECK_FLAG(nexthop->flags,
3078 NEXTHOP_FLAG_ACTIVE))
3079 SET_FLAG(nexthop->flags,
3080 NEXTHOP_FLAG_FIB);
3081 }
3082
3083 dplane_ctx_free(&ctx);
3084 return ZEBRA_DPLANE_REQUEST_SUCCESS;
3085 }
3086
3087 /* Enqueue context for processing */
3088 ret = dplane_update_enqueue(ctx);
3089 }
3090
3091 /* Update counter */
3092 atomic_fetch_add_explicit(&zdplane_info.dg_routes_in, 1,
3093 memory_order_relaxed);
3094
3095 if (ret == AOK)
3096 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3097 else {
3098 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors, 1,
3099 memory_order_relaxed);
3100 if (ctx)
3101 dplane_ctx_free(&ctx);
3102 }
3103
3104 return result;
3105 }
3106
3107 /**
3108 * dplane_nexthop_update_internal() - Helper for enqueuing nexthop changes
3109 *
3110 * @nhe: Nexthop group hash entry where the change occured
3111 * @op: The operation to be enqued
3112 *
3113 * Return: Result of the change
3114 */
3115 static enum zebra_dplane_result
3116 dplane_nexthop_update_internal(struct nhg_hash_entry *nhe, enum dplane_op_e op)
3117 {
3118 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3119 int ret = EINVAL;
3120 struct zebra_dplane_ctx *ctx = NULL;
3121
3122 /* Obtain context block */
3123 ctx = dplane_ctx_alloc();
3124 if (!ctx) {
3125 ret = ENOMEM;
3126 goto done;
3127 }
3128
3129 ret = dplane_ctx_nexthop_init(ctx, op, nhe);
3130 if (ret == AOK)
3131 ret = dplane_update_enqueue(ctx);
3132
3133 done:
3134 /* Update counter */
3135 atomic_fetch_add_explicit(&zdplane_info.dg_nexthops_in, 1,
3136 memory_order_relaxed);
3137
3138 if (ret == AOK)
3139 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3140 else {
3141 atomic_fetch_add_explicit(&zdplane_info.dg_nexthop_errors, 1,
3142 memory_order_relaxed);
3143 if (ctx)
3144 dplane_ctx_free(&ctx);
3145 }
3146
3147 return result;
3148 }
3149
3150 /*
3151 * Enqueue a route 'add' for the dataplane.
3152 */
3153 enum zebra_dplane_result dplane_route_add(struct route_node *rn,
3154 struct route_entry *re)
3155 {
3156 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3157
3158 if (rn == NULL || re == NULL)
3159 goto done;
3160
3161 ret = dplane_route_update_internal(rn, re, NULL,
3162 DPLANE_OP_ROUTE_INSTALL);
3163
3164 done:
3165 return ret;
3166 }
3167
3168 /*
3169 * Enqueue a route update for the dataplane.
3170 */
3171 enum zebra_dplane_result dplane_route_update(struct route_node *rn,
3172 struct route_entry *re,
3173 struct route_entry *old_re)
3174 {
3175 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3176
3177 if (rn == NULL || re == NULL)
3178 goto done;
3179
3180 ret = dplane_route_update_internal(rn, re, old_re,
3181 DPLANE_OP_ROUTE_UPDATE);
3182 done:
3183 return ret;
3184 }
3185
3186 /*
3187 * Enqueue a route removal for the dataplane.
3188 */
3189 enum zebra_dplane_result dplane_route_delete(struct route_node *rn,
3190 struct route_entry *re)
3191 {
3192 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3193
3194 if (rn == NULL || re == NULL)
3195 goto done;
3196
3197 ret = dplane_route_update_internal(rn, re, NULL,
3198 DPLANE_OP_ROUTE_DELETE);
3199
3200 done:
3201 return ret;
3202 }
3203
3204 /*
3205 * Notify the dplane when system/connected routes change.
3206 */
3207 enum zebra_dplane_result dplane_sys_route_add(struct route_node *rn,
3208 struct route_entry *re)
3209 {
3210 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3211
3212 /* Ignore this event unless a provider plugin has requested it. */
3213 if (!zdplane_info.dg_sys_route_notifs) {
3214 ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
3215 goto done;
3216 }
3217
3218 if (rn == NULL || re == NULL)
3219 goto done;
3220
3221 ret = dplane_route_update_internal(rn, re, NULL,
3222 DPLANE_OP_SYS_ROUTE_ADD);
3223
3224 done:
3225 return ret;
3226 }
3227
3228 /*
3229 * Notify the dplane when system/connected routes are deleted.
3230 */
3231 enum zebra_dplane_result dplane_sys_route_del(struct route_node *rn,
3232 struct route_entry *re)
3233 {
3234 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3235
3236 /* Ignore this event unless a provider plugin has requested it. */
3237 if (!zdplane_info.dg_sys_route_notifs) {
3238 ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
3239 goto done;
3240 }
3241
3242 if (rn == NULL || re == NULL)
3243 goto done;
3244
3245 ret = dplane_route_update_internal(rn, re, NULL,
3246 DPLANE_OP_SYS_ROUTE_DELETE);
3247
3248 done:
3249 return ret;
3250 }
3251
3252 /*
3253 * Update from an async notification, to bring other fibs up-to-date.
3254 */
3255 enum zebra_dplane_result
3256 dplane_route_notif_update(struct route_node *rn,
3257 struct route_entry *re,
3258 enum dplane_op_e op,
3259 struct zebra_dplane_ctx *ctx)
3260 {
3261 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3262 int ret = EINVAL;
3263 struct zebra_dplane_ctx *new_ctx = NULL;
3264 struct nexthop *nexthop;
3265 struct nexthop_group *nhg;
3266
3267 if (rn == NULL || re == NULL)
3268 goto done;
3269
3270 new_ctx = dplane_ctx_alloc();
3271 if (new_ctx == NULL)
3272 goto done;
3273
3274 /* Init context with info from zebra data structs */
3275 dplane_ctx_route_init(new_ctx, op, rn, re);
3276
3277 /* For add/update, need to adjust the nexthops so that we match
3278 * the notification state, which may not be the route-entry/RIB
3279 * state.
3280 */
3281 if (op == DPLANE_OP_ROUTE_UPDATE ||
3282 op == DPLANE_OP_ROUTE_INSTALL) {
3283
3284 nexthops_free(new_ctx->u.rinfo.zd_ng.nexthop);
3285 new_ctx->u.rinfo.zd_ng.nexthop = NULL;
3286
3287 nhg = rib_get_fib_nhg(re);
3288 if (nhg && nhg->nexthop)
3289 copy_nexthops(&(new_ctx->u.rinfo.zd_ng.nexthop),
3290 nhg->nexthop, NULL);
3291
3292 /* Check for installed backup nexthops also */
3293 nhg = rib_get_fib_backup_nhg(re);
3294 if (nhg && nhg->nexthop) {
3295 copy_nexthops(&(new_ctx->u.rinfo.zd_ng.nexthop),
3296 nhg->nexthop, NULL);
3297 }
3298
3299 for (ALL_NEXTHOPS(new_ctx->u.rinfo.zd_ng, nexthop))
3300 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
3301
3302 }
3303
3304 /* Capture info about the source of the notification, in 'ctx' */
3305 dplane_ctx_set_notif_provider(new_ctx,
3306 dplane_ctx_get_notif_provider(ctx));
3307
3308 ret = dplane_update_enqueue(new_ctx);
3309
3310 done:
3311 if (ret == AOK)
3312 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3313 else if (new_ctx)
3314 dplane_ctx_free(&new_ctx);
3315
3316 return result;
3317 }
3318
3319 /*
3320 * Enqueue a nexthop add for the dataplane.
3321 */
3322 enum zebra_dplane_result dplane_nexthop_add(struct nhg_hash_entry *nhe)
3323 {
3324 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3325
3326 if (nhe)
3327 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_INSTALL);
3328 return ret;
3329 }
3330
3331 /*
3332 * Enqueue a nexthop update for the dataplane.
3333 *
3334 * Might not need this func since zebra's nexthop objects should be immutable?
3335 */
3336 enum zebra_dplane_result dplane_nexthop_update(struct nhg_hash_entry *nhe)
3337 {
3338 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3339
3340 if (nhe)
3341 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_UPDATE);
3342 return ret;
3343 }
3344
3345 /*
3346 * Enqueue a nexthop removal for the dataplane.
3347 */
3348 enum zebra_dplane_result dplane_nexthop_delete(struct nhg_hash_entry *nhe)
3349 {
3350 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3351
3352 if (nhe)
3353 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_DELETE);
3354
3355 return ret;
3356 }
3357
3358 /*
3359 * Enqueue LSP add for the dataplane.
3360 */
3361 enum zebra_dplane_result dplane_lsp_add(struct zebra_lsp *lsp)
3362 {
3363 enum zebra_dplane_result ret =
3364 lsp_update_internal(lsp, DPLANE_OP_LSP_INSTALL);
3365
3366 return ret;
3367 }
3368
3369 /*
3370 * Enqueue LSP update for the dataplane.
3371 */
3372 enum zebra_dplane_result dplane_lsp_update(struct zebra_lsp *lsp)
3373 {
3374 enum zebra_dplane_result ret =
3375 lsp_update_internal(lsp, DPLANE_OP_LSP_UPDATE);
3376
3377 return ret;
3378 }
3379
3380 /*
3381 * Enqueue LSP delete for the dataplane.
3382 */
3383 enum zebra_dplane_result dplane_lsp_delete(struct zebra_lsp *lsp)
3384 {
3385 enum zebra_dplane_result ret =
3386 lsp_update_internal(lsp, DPLANE_OP_LSP_DELETE);
3387
3388 return ret;
3389 }
3390
3391 /* Update or un-install resulting from an async notification */
3392 enum zebra_dplane_result
3393 dplane_lsp_notif_update(struct zebra_lsp *lsp, enum dplane_op_e op,
3394 struct zebra_dplane_ctx *notif_ctx)
3395 {
3396 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3397 int ret = EINVAL;
3398 struct zebra_dplane_ctx *ctx = NULL;
3399 struct nhlfe_list_head *head;
3400 struct zebra_nhlfe *nhlfe, *new_nhlfe;
3401
3402 /* Obtain context block */
3403 ctx = dplane_ctx_alloc();
3404 if (ctx == NULL) {
3405 ret = ENOMEM;
3406 goto done;
3407 }
3408
3409 /* Copy info from zebra LSP */
3410 ret = dplane_ctx_lsp_init(ctx, op, lsp);
3411 if (ret != AOK)
3412 goto done;
3413
3414 /* Add any installed backup nhlfes */
3415 head = &(ctx->u.lsp.backup_nhlfe_list);
3416 frr_each(nhlfe_list, head, nhlfe) {
3417
3418 if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED) &&
3419 CHECK_FLAG(nhlfe->nexthop->flags, NEXTHOP_FLAG_FIB)) {
3420 new_nhlfe = zebra_mpls_lsp_add_nh(&(ctx->u.lsp),
3421 nhlfe->type,
3422 nhlfe->nexthop);
3423
3424 /* Need to copy flags too */
3425 new_nhlfe->flags = nhlfe->flags;
3426 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
3427 }
3428 }
3429
3430 /* Capture info about the source of the notification */
3431 dplane_ctx_set_notif_provider(
3432 ctx,
3433 dplane_ctx_get_notif_provider(notif_ctx));
3434
3435 ret = dplane_update_enqueue(ctx);
3436
3437 done:
3438 /* Update counter */
3439 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
3440 memory_order_relaxed);
3441
3442 if (ret == AOK)
3443 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3444 else {
3445 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
3446 memory_order_relaxed);
3447 if (ctx)
3448 dplane_ctx_free(&ctx);
3449 }
3450 return result;
3451 }
3452
3453 /*
3454 * Enqueue pseudowire install for the dataplane.
3455 */
3456 enum zebra_dplane_result dplane_pw_install(struct zebra_pw *pw)
3457 {
3458 return pw_update_internal(pw, DPLANE_OP_PW_INSTALL);
3459 }
3460
3461 /*
3462 * Enqueue pseudowire un-install for the dataplane.
3463 */
3464 enum zebra_dplane_result dplane_pw_uninstall(struct zebra_pw *pw)
3465 {
3466 return pw_update_internal(pw, DPLANE_OP_PW_UNINSTALL);
3467 }
3468
3469 /*
3470 * Common internal LSP update utility
3471 */
3472 static enum zebra_dplane_result lsp_update_internal(struct zebra_lsp *lsp,
3473 enum dplane_op_e op)
3474 {
3475 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3476 int ret = EINVAL;
3477 struct zebra_dplane_ctx *ctx = NULL;
3478
3479 /* Obtain context block */
3480 ctx = dplane_ctx_alloc();
3481
3482 ret = dplane_ctx_lsp_init(ctx, op, lsp);
3483 if (ret != AOK)
3484 goto done;
3485
3486 ret = dplane_update_enqueue(ctx);
3487
3488 done:
3489 /* Update counter */
3490 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
3491 memory_order_relaxed);
3492
3493 if (ret == AOK)
3494 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3495 else {
3496 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
3497 memory_order_relaxed);
3498 dplane_ctx_free(&ctx);
3499 }
3500
3501 return result;
3502 }
3503
3504 /*
3505 * Internal, common handler for pseudowire updates.
3506 */
3507 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
3508 enum dplane_op_e op)
3509 {
3510 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3511 int ret;
3512 struct zebra_dplane_ctx *ctx = NULL;
3513
3514 ctx = dplane_ctx_alloc();
3515
3516 ret = dplane_ctx_pw_init(ctx, op, pw);
3517 if (ret != AOK)
3518 goto done;
3519
3520 ret = dplane_update_enqueue(ctx);
3521
3522 done:
3523 /* Update counter */
3524 atomic_fetch_add_explicit(&zdplane_info.dg_pws_in, 1,
3525 memory_order_relaxed);
3526
3527 if (ret == AOK)
3528 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3529 else {
3530 atomic_fetch_add_explicit(&zdplane_info.dg_pw_errors, 1,
3531 memory_order_relaxed);
3532 dplane_ctx_free(&ctx);
3533 }
3534
3535 return result;
3536 }
3537
3538 /*
3539 * Enqueue access br_port update.
3540 */
3541 enum zebra_dplane_result
3542 dplane_br_port_update(const struct interface *ifp, bool non_df,
3543 uint32_t sph_filter_cnt,
3544 const struct in_addr *sph_filters, uint32_t backup_nhg_id)
3545 {
3546 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3547 uint32_t flags = 0;
3548 int ret;
3549 struct zebra_dplane_ctx *ctx = NULL;
3550 struct zebra_ns *zns;
3551 enum dplane_op_e op = DPLANE_OP_BR_PORT_UPDATE;
3552
3553 if (non_df)
3554 flags |= DPLANE_BR_PORT_NON_DF;
3555
3556 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL || IS_ZEBRA_DEBUG_EVPN_MH_ES) {
3557 uint32_t i;
3558 char vtep_str[ES_VTEP_LIST_STR_SZ];
3559
3560 vtep_str[0] = '\0';
3561 for (i = 0; i < sph_filter_cnt; ++i) {
3562 snprintfrr(vtep_str + strlen(vtep_str),
3563 sizeof(vtep_str) - strlen(vtep_str), "%pI4 ",
3564 &sph_filters[i]);
3565 }
3566 zlog_debug(
3567 "init br_port ctx %s: ifp %s, flags 0x%x backup_nhg 0x%x sph %s",
3568 dplane_op2str(op), ifp->name, flags, backup_nhg_id,
3569 vtep_str);
3570 }
3571
3572 ctx = dplane_ctx_alloc();
3573
3574 ctx->zd_op = op;
3575 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3576 ctx->zd_vrf_id = ifp->vrf->vrf_id;
3577
3578 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
3579 dplane_ctx_ns_init(ctx, zns, false);
3580
3581 ctx->zd_ifindex = ifp->ifindex;
3582 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
3583
3584 /* Init the br-port-specific data area */
3585 memset(&ctx->u.br_port, 0, sizeof(ctx->u.br_port));
3586
3587 ctx->u.br_port.flags = flags;
3588 ctx->u.br_port.backup_nhg_id = backup_nhg_id;
3589 ctx->u.br_port.sph_filter_cnt = sph_filter_cnt;
3590 memcpy(ctx->u.br_port.sph_filters, sph_filters,
3591 sizeof(ctx->u.br_port.sph_filters[0]) * sph_filter_cnt);
3592
3593 /* Enqueue for processing on the dplane pthread */
3594 ret = dplane_update_enqueue(ctx);
3595
3596 /* Increment counter */
3597 atomic_fetch_add_explicit(&zdplane_info.dg_br_port_in, 1,
3598 memory_order_relaxed);
3599
3600 if (ret == AOK) {
3601 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3602 } else {
3603 /* Error counter */
3604 atomic_fetch_add_explicit(&zdplane_info.dg_br_port_errors, 1,
3605 memory_order_relaxed);
3606 dplane_ctx_free(&ctx);
3607 }
3608
3609 return result;
3610 }
3611
3612 /*
3613 * Enqueue interface address add for the dataplane.
3614 */
3615 enum zebra_dplane_result dplane_intf_addr_set(const struct interface *ifp,
3616 const struct connected *ifc)
3617 {
3618 #if !defined(HAVE_NETLINK) && defined(HAVE_STRUCT_IFALIASREQ)
3619 /* Extra checks for this OS path. */
3620
3621 /* Don't configure PtP addresses on broadcast ifs or reverse */
3622 if (!(ifp->flags & IFF_POINTOPOINT) != !CONNECTED_PEER(ifc)) {
3623 if (IS_ZEBRA_DEBUG_KERNEL || IS_ZEBRA_DEBUG_DPLANE)
3624 zlog_debug("Failed to set intf addr: mismatch p2p and connected");
3625
3626 return ZEBRA_DPLANE_REQUEST_FAILURE;
3627 }
3628 #endif
3629
3630 return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_INSTALL);
3631 }
3632
3633 /*
3634 * Enqueue interface address remove/uninstall for the dataplane.
3635 */
3636 enum zebra_dplane_result dplane_intf_addr_unset(const struct interface *ifp,
3637 const struct connected *ifc)
3638 {
3639 return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_UNINSTALL);
3640 }
3641
3642 static enum zebra_dplane_result intf_addr_update_internal(
3643 const struct interface *ifp, const struct connected *ifc,
3644 enum dplane_op_e op)
3645 {
3646 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3647 int ret = EINVAL;
3648 struct zebra_dplane_ctx *ctx = NULL;
3649 struct zebra_ns *zns;
3650
3651 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3652 zlog_debug("init intf ctx %s: idx %d, addr %u:%pFX",
3653 dplane_op2str(op), ifp->ifindex, ifp->vrf->vrf_id,
3654 ifc->address);
3655
3656 ctx = dplane_ctx_alloc();
3657
3658 ctx->zd_op = op;
3659 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3660 ctx->zd_vrf_id = ifp->vrf->vrf_id;
3661
3662 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
3663 dplane_ctx_ns_init(ctx, zns, false);
3664
3665 /* Init the interface-addr-specific area */
3666 memset(&ctx->u.intf, 0, sizeof(ctx->u.intf));
3667
3668 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
3669 ctx->zd_ifindex = ifp->ifindex;
3670 ctx->u.intf.prefix = *(ifc->address);
3671
3672 if (if_is_broadcast(ifp))
3673 ctx->u.intf.flags |= DPLANE_INTF_BROADCAST;
3674
3675 if (CONNECTED_PEER(ifc)) {
3676 ctx->u.intf.dest_prefix = *(ifc->destination);
3677 ctx->u.intf.flags |=
3678 (DPLANE_INTF_CONNECTED | DPLANE_INTF_HAS_DEST);
3679 }
3680
3681 if (CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY))
3682 ctx->u.intf.flags |= DPLANE_INTF_SECONDARY;
3683
3684 if (ifc->label) {
3685 size_t len;
3686
3687 ctx->u.intf.flags |= DPLANE_INTF_HAS_LABEL;
3688
3689 /* Use embedded buffer if it's adequate; else allocate. */
3690 len = strlen(ifc->label);
3691
3692 if (len < sizeof(ctx->u.intf.label_buf)) {
3693 strlcpy(ctx->u.intf.label_buf, ifc->label,
3694 sizeof(ctx->u.intf.label_buf));
3695 ctx->u.intf.label = ctx->u.intf.label_buf;
3696 } else {
3697 ctx->u.intf.label = strdup(ifc->label);
3698 }
3699 }
3700
3701 ret = dplane_update_enqueue(ctx);
3702
3703 /* Increment counter */
3704 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addrs_in, 1,
3705 memory_order_relaxed);
3706
3707 if (ret == AOK)
3708 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3709 else {
3710 /* Error counter */
3711 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addr_errors,
3712 1, memory_order_relaxed);
3713 dplane_ctx_free(&ctx);
3714 }
3715
3716 return result;
3717 }
3718
3719 /*
3720 * Enqueue vxlan/evpn mac add (or update).
3721 */
3722 enum zebra_dplane_result dplane_rem_mac_add(const struct interface *ifp,
3723 const struct interface *bridge_ifp,
3724 vlanid_t vid,
3725 const struct ethaddr *mac,
3726 struct in_addr vtep_ip,
3727 bool sticky,
3728 uint32_t nhg_id,
3729 bool was_static)
3730 {
3731 enum zebra_dplane_result result;
3732 uint32_t update_flags = 0;
3733
3734 update_flags |= DPLANE_MAC_REMOTE;
3735 if (was_static)
3736 update_flags |= DPLANE_MAC_WAS_STATIC;
3737
3738 /* Use common helper api */
3739 result = mac_update_common(DPLANE_OP_MAC_INSTALL, ifp, bridge_ifp,
3740 vid, mac, vtep_ip, sticky, nhg_id, update_flags);
3741 return result;
3742 }
3743
3744 /*
3745 * Enqueue vxlan/evpn mac delete.
3746 */
3747 enum zebra_dplane_result dplane_rem_mac_del(const struct interface *ifp,
3748 const struct interface *bridge_ifp,
3749 vlanid_t vid,
3750 const struct ethaddr *mac,
3751 struct in_addr vtep_ip)
3752 {
3753 enum zebra_dplane_result result;
3754 uint32_t update_flags = 0;
3755
3756 update_flags |= DPLANE_MAC_REMOTE;
3757
3758 /* Use common helper api */
3759 result = mac_update_common(DPLANE_OP_MAC_DELETE, ifp, bridge_ifp,
3760 vid, mac, vtep_ip, false, 0, update_flags);
3761 return result;
3762 }
3763
3764 /*
3765 * API to configure link local with either MAC address or IP information
3766 */
3767 enum zebra_dplane_result dplane_neigh_ip_update(enum dplane_op_e op,
3768 const struct interface *ifp,
3769 struct ipaddr *link_ip,
3770 struct ipaddr *ip,
3771 uint32_t ndm_state, int protocol)
3772 {
3773 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3774 uint16_t state = 0;
3775 uint32_t update_flags;
3776
3777 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3778 zlog_debug("%s: init link ctx %s: ifp %s, link_ip %pIA ip %pIA",
3779 __func__, dplane_op2str(op), ifp->name, link_ip, ip);
3780
3781 if (ndm_state == ZEBRA_NEIGH_STATE_REACHABLE)
3782 state = DPLANE_NUD_REACHABLE;
3783 else if (ndm_state == ZEBRA_NEIGH_STATE_FAILED)
3784 state = DPLANE_NUD_FAILED;
3785
3786 update_flags = DPLANE_NEIGH_NO_EXTENSION;
3787
3788 result = neigh_update_internal(op, ifp, (const void *)link_ip,
3789 ipaddr_family(link_ip), ip, 0, state,
3790 update_flags, protocol);
3791
3792 return result;
3793 }
3794
3795 /*
3796 * Enqueue local mac add (or update).
3797 */
3798 enum zebra_dplane_result dplane_local_mac_add(const struct interface *ifp,
3799 const struct interface *bridge_ifp,
3800 vlanid_t vid,
3801 const struct ethaddr *mac,
3802 bool sticky,
3803 uint32_t set_static,
3804 uint32_t set_inactive)
3805 {
3806 enum zebra_dplane_result result;
3807 uint32_t update_flags = 0;
3808 struct in_addr vtep_ip;
3809
3810 if (set_static)
3811 update_flags |= DPLANE_MAC_SET_STATIC;
3812
3813 if (set_inactive)
3814 update_flags |= DPLANE_MAC_SET_INACTIVE;
3815
3816 vtep_ip.s_addr = 0;
3817
3818 /* Use common helper api */
3819 result = mac_update_common(DPLANE_OP_MAC_INSTALL, ifp, bridge_ifp,
3820 vid, mac, vtep_ip, sticky, 0,
3821 update_flags);
3822 return result;
3823 }
3824
3825 /*
3826 * Enqueue local mac del
3827 */
3828 enum zebra_dplane_result
3829 dplane_local_mac_del(const struct interface *ifp,
3830 const struct interface *bridge_ifp, vlanid_t vid,
3831 const struct ethaddr *mac)
3832 {
3833 enum zebra_dplane_result result;
3834 struct in_addr vtep_ip;
3835
3836 vtep_ip.s_addr = 0;
3837
3838 /* Use common helper api */
3839 result = mac_update_common(DPLANE_OP_MAC_DELETE, ifp, bridge_ifp, vid,
3840 mac, vtep_ip, false, 0, 0);
3841 return result;
3842 }
3843 /*
3844 * Public api to init an empty context - either newly-allocated or
3845 * reset/cleared - for a MAC update.
3846 */
3847 void dplane_mac_init(struct zebra_dplane_ctx *ctx,
3848 const struct interface *ifp,
3849 const struct interface *br_ifp,
3850 vlanid_t vid,
3851 const struct ethaddr *mac,
3852 struct in_addr vtep_ip,
3853 bool sticky,
3854 uint32_t nhg_id,
3855 uint32_t update_flags)
3856 {
3857 struct zebra_ns *zns;
3858
3859 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3860 ctx->zd_vrf_id = ifp->vrf->vrf_id;
3861
3862 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
3863 dplane_ctx_ns_init(ctx, zns, false);
3864
3865 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
3866 ctx->zd_ifindex = ifp->ifindex;
3867
3868 /* Init the mac-specific data area */
3869 memset(&ctx->u.macinfo, 0, sizeof(ctx->u.macinfo));
3870
3871 ctx->u.macinfo.br_ifindex = br_ifp->ifindex;
3872 ctx->u.macinfo.vtep_ip = vtep_ip;
3873 ctx->u.macinfo.mac = *mac;
3874 ctx->u.macinfo.vid = vid;
3875 ctx->u.macinfo.is_sticky = sticky;
3876 ctx->u.macinfo.nhg_id = nhg_id;
3877 ctx->u.macinfo.update_flags = update_flags;
3878 }
3879
3880 /*
3881 * Common helper api for MAC address/vxlan updates
3882 */
3883 static enum zebra_dplane_result
3884 mac_update_common(enum dplane_op_e op,
3885 const struct interface *ifp,
3886 const struct interface *br_ifp,
3887 vlanid_t vid,
3888 const struct ethaddr *mac,
3889 struct in_addr vtep_ip,
3890 bool sticky,
3891 uint32_t nhg_id,
3892 uint32_t update_flags)
3893 {
3894 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3895 int ret;
3896 struct zebra_dplane_ctx *ctx = NULL;
3897
3898 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3899 zlog_debug("init mac ctx %s: mac %pEA, ifp %s, vtep %pI4",
3900 dplane_op2str(op), mac, ifp->name, &vtep_ip);
3901
3902 ctx = dplane_ctx_alloc();
3903 ctx->zd_op = op;
3904
3905 /* Common init for the ctx */
3906 dplane_mac_init(ctx, ifp, br_ifp, vid, mac, vtep_ip, sticky,
3907 nhg_id, update_flags);
3908
3909 /* Enqueue for processing on the dplane pthread */
3910 ret = dplane_update_enqueue(ctx);
3911
3912 /* Increment counter */
3913 atomic_fetch_add_explicit(&zdplane_info.dg_macs_in, 1,
3914 memory_order_relaxed);
3915
3916 if (ret == AOK)
3917 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3918 else {
3919 /* Error counter */
3920 atomic_fetch_add_explicit(&zdplane_info.dg_mac_errors, 1,
3921 memory_order_relaxed);
3922 dplane_ctx_free(&ctx);
3923 }
3924
3925 return result;
3926 }
3927
3928 /*
3929 * Enqueue evpn neighbor add for the dataplane.
3930 */
3931 enum zebra_dplane_result dplane_rem_neigh_add(const struct interface *ifp,
3932 const struct ipaddr *ip,
3933 const struct ethaddr *mac,
3934 uint32_t flags, bool was_static)
3935 {
3936 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3937 uint32_t update_flags = 0;
3938
3939 update_flags |= DPLANE_NEIGH_REMOTE;
3940
3941 if (was_static)
3942 update_flags |= DPLANE_NEIGH_WAS_STATIC;
3943
3944 result = neigh_update_internal(
3945 DPLANE_OP_NEIGH_INSTALL, ifp, (const void *)mac, AF_ETHERNET,
3946 ip, flags, DPLANE_NUD_NOARP, update_flags, 0);
3947
3948 return result;
3949 }
3950
3951 /*
3952 * Enqueue local neighbor add for the dataplane.
3953 */
3954 enum zebra_dplane_result dplane_local_neigh_add(const struct interface *ifp,
3955 const struct ipaddr *ip,
3956 const struct ethaddr *mac,
3957 bool set_router, bool set_static,
3958 bool set_inactive)
3959 {
3960 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3961 uint32_t update_flags = 0;
3962 uint32_t ntf = 0;
3963 uint16_t state;
3964
3965 if (set_static)
3966 update_flags |= DPLANE_NEIGH_SET_STATIC;
3967
3968 if (set_inactive) {
3969 update_flags |= DPLANE_NEIGH_SET_INACTIVE;
3970 state = DPLANE_NUD_STALE;
3971 } else {
3972 state = DPLANE_NUD_REACHABLE;
3973 }
3974
3975 if (set_router)
3976 ntf |= DPLANE_NTF_ROUTER;
3977
3978 result = neigh_update_internal(DPLANE_OP_NEIGH_INSTALL, ifp,
3979 (const void *)mac, AF_ETHERNET, ip, ntf,
3980 state, update_flags, 0);
3981
3982 return result;
3983 }
3984
3985 /*
3986 * Enqueue evpn neighbor delete for the dataplane.
3987 */
3988 enum zebra_dplane_result dplane_rem_neigh_delete(const struct interface *ifp,
3989 const struct ipaddr *ip)
3990 {
3991 enum zebra_dplane_result result;
3992 uint32_t update_flags = 0;
3993
3994 update_flags |= DPLANE_NEIGH_REMOTE;
3995
3996 result = neigh_update_internal(DPLANE_OP_NEIGH_DELETE, ifp, NULL,
3997 AF_ETHERNET, ip, 0, 0, update_flags, 0);
3998
3999 return result;
4000 }
4001
4002 /*
4003 * Enqueue evpn VTEP add for the dataplane.
4004 */
4005 enum zebra_dplane_result dplane_vtep_add(const struct interface *ifp,
4006 const struct in_addr *ip,
4007 vni_t vni)
4008 {
4009 enum zebra_dplane_result result;
4010 struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
4011 struct ipaddr addr;
4012
4013 if (IS_ZEBRA_DEBUG_VXLAN)
4014 zlog_debug("Install %pI4 into flood list for VNI %u intf %s(%u)",
4015 ip, vni, ifp->name, ifp->ifindex);
4016
4017 SET_IPADDR_V4(&addr);
4018 addr.ipaddr_v4 = *ip;
4019
4020 result = neigh_update_internal(DPLANE_OP_VTEP_ADD, ifp, &mac,
4021 AF_ETHERNET, &addr, 0, 0, 0, 0);
4022
4023 return result;
4024 }
4025
4026 /*
4027 * Enqueue evpn VTEP add for the dataplane.
4028 */
4029 enum zebra_dplane_result dplane_vtep_delete(const struct interface *ifp,
4030 const struct in_addr *ip,
4031 vni_t vni)
4032 {
4033 enum zebra_dplane_result result;
4034 struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
4035 struct ipaddr addr;
4036
4037 if (IS_ZEBRA_DEBUG_VXLAN)
4038 zlog_debug(
4039 "Uninstall %pI4 from flood list for VNI %u intf %s(%u)",
4040 ip, vni, ifp->name, ifp->ifindex);
4041
4042 SET_IPADDR_V4(&addr);
4043 addr.ipaddr_v4 = *ip;
4044
4045 result = neigh_update_internal(DPLANE_OP_VTEP_DELETE, ifp,
4046 (const void *)&mac, AF_ETHERNET, &addr,
4047 0, 0, 0, 0);
4048
4049 return result;
4050 }
4051
4052 enum zebra_dplane_result dplane_neigh_discover(const struct interface *ifp,
4053 const struct ipaddr *ip)
4054 {
4055 enum zebra_dplane_result result;
4056
4057 result = neigh_update_internal(DPLANE_OP_NEIGH_DISCOVER, ifp, NULL,
4058 AF_ETHERNET, ip, DPLANE_NTF_USE,
4059 DPLANE_NUD_INCOMPLETE, 0, 0);
4060
4061 return result;
4062 }
4063
4064 enum zebra_dplane_result dplane_neigh_table_update(const struct interface *ifp,
4065 const uint8_t family,
4066 const uint32_t app_probes,
4067 const uint32_t ucast_probes,
4068 const uint32_t mcast_probes)
4069 {
4070 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4071 int ret;
4072 struct zebra_dplane_ctx *ctx = NULL;
4073 struct zebra_ns *zns;
4074 enum dplane_op_e op = DPLANE_OP_NEIGH_TABLE_UPDATE;
4075
4076 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
4077 zlog_debug("set neigh ctx %s: ifp %s, family %s",
4078 dplane_op2str(op), ifp->name, family2str(family));
4079 }
4080
4081 ctx = dplane_ctx_alloc();
4082
4083 ctx->zd_op = op;
4084 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4085 ctx->zd_vrf_id = ifp->vrf->vrf_id;
4086
4087 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
4088 dplane_ctx_ns_init(ctx, zns, false);
4089
4090 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
4091 ctx->zd_ifindex = ifp->ifindex;
4092
4093 /* Init the neighbor-specific data area */
4094 memset(&ctx->u.neightable, 0, sizeof(ctx->u.neightable));
4095
4096 ctx->u.neightable.family = family;
4097 ctx->u.neightable.app_probes = app_probes;
4098 ctx->u.neightable.ucast_probes = ucast_probes;
4099 ctx->u.neightable.mcast_probes = mcast_probes;
4100
4101 /* Enqueue for processing on the dplane pthread */
4102 ret = dplane_update_enqueue(ctx);
4103
4104 /* Increment counter */
4105 atomic_fetch_add_explicit(&zdplane_info.dg_neightable_in, 1,
4106 memory_order_relaxed);
4107
4108 if (ret == AOK)
4109 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4110 else {
4111 /* Error counter */
4112 atomic_fetch_add_explicit(&zdplane_info.dg_neightable_errors, 1,
4113 memory_order_relaxed);
4114 dplane_ctx_free(&ctx);
4115 }
4116
4117 return result;
4118 }
4119
4120 /*
4121 * Common helper api for neighbor updates
4122 */
4123 static enum zebra_dplane_result
4124 neigh_update_internal(enum dplane_op_e op, const struct interface *ifp,
4125 const void *link, const int link_family,
4126 const struct ipaddr *ip, uint32_t flags, uint16_t state,
4127 uint32_t update_flags, int protocol)
4128 {
4129 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4130 int ret;
4131 struct zebra_dplane_ctx *ctx = NULL;
4132 struct zebra_ns *zns;
4133 const struct ethaddr *mac = NULL;
4134 const struct ipaddr *link_ip = NULL;
4135
4136 if (link_family == AF_ETHERNET)
4137 mac = (const struct ethaddr *)link;
4138 else
4139 link_ip = (const struct ipaddr *)link;
4140
4141 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
4142 char buf1[PREFIX_STRLEN];
4143
4144 buf1[0] = '\0';
4145 if (link_family == AF_ETHERNET)
4146 prefix_mac2str(mac, buf1, sizeof(buf1));
4147 else
4148 ipaddr2str(link_ip, buf1, sizeof(buf1));
4149 zlog_debug("init neigh ctx %s: ifp %s, %s %s, ip %pIA",
4150 dplane_op2str(op), ifp->name,
4151 link_family == AF_ETHERNET ? "mac " : "link ",
4152 buf1, ip);
4153 }
4154
4155 ctx = dplane_ctx_alloc();
4156
4157 ctx->zd_op = op;
4158 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4159 ctx->zd_vrf_id = ifp->vrf->vrf_id;
4160 dplane_ctx_set_type(ctx, protocol);
4161
4162 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
4163 dplane_ctx_ns_init(ctx, zns, false);
4164
4165 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
4166 ctx->zd_ifindex = ifp->ifindex;
4167
4168 /* Init the neighbor-specific data area */
4169 memset(&ctx->u.neigh, 0, sizeof(ctx->u.neigh));
4170
4171 ctx->u.neigh.ip_addr = *ip;
4172 if (mac)
4173 ctx->u.neigh.link.mac = *mac;
4174 else if (link_ip)
4175 ctx->u.neigh.link.ip_addr = *link_ip;
4176
4177 ctx->u.neigh.flags = flags;
4178 ctx->u.neigh.state = state;
4179 ctx->u.neigh.update_flags = update_flags;
4180
4181 /* Enqueue for processing on the dplane pthread */
4182 ret = dplane_update_enqueue(ctx);
4183
4184 /* Increment counter */
4185 atomic_fetch_add_explicit(&zdplane_info.dg_neighs_in, 1,
4186 memory_order_relaxed);
4187
4188 if (ret == AOK)
4189 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4190 else {
4191 /* Error counter */
4192 atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors, 1,
4193 memory_order_relaxed);
4194 dplane_ctx_free(&ctx);
4195 }
4196
4197 return result;
4198 }
4199
4200 /*
4201 * Common helper api for PBR rule updates
4202 */
4203 static enum zebra_dplane_result
4204 rule_update_internal(enum dplane_op_e op, struct zebra_pbr_rule *new_rule,
4205 struct zebra_pbr_rule *old_rule)
4206 {
4207 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4208 struct zebra_dplane_ctx *ctx;
4209 int ret;
4210
4211 ctx = dplane_ctx_alloc();
4212
4213 ret = dplane_ctx_rule_init(ctx, op, new_rule, old_rule);
4214 if (ret != AOK)
4215 goto done;
4216
4217 ret = dplane_update_enqueue(ctx);
4218
4219 done:
4220 atomic_fetch_add_explicit(&zdplane_info.dg_rules_in, 1,
4221 memory_order_relaxed);
4222
4223 if (ret == AOK)
4224 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4225 else {
4226 atomic_fetch_add_explicit(&zdplane_info.dg_rule_errors, 1,
4227 memory_order_relaxed);
4228 dplane_ctx_free(&ctx);
4229 }
4230
4231 return result;
4232 }
4233
4234 enum zebra_dplane_result dplane_pbr_rule_add(struct zebra_pbr_rule *rule)
4235 {
4236 return rule_update_internal(DPLANE_OP_RULE_ADD, rule, NULL);
4237 }
4238
4239 enum zebra_dplane_result dplane_pbr_rule_delete(struct zebra_pbr_rule *rule)
4240 {
4241 return rule_update_internal(DPLANE_OP_RULE_DELETE, rule, NULL);
4242 }
4243
4244 enum zebra_dplane_result dplane_pbr_rule_update(struct zebra_pbr_rule *old_rule,
4245 struct zebra_pbr_rule *new_rule)
4246 {
4247 return rule_update_internal(DPLANE_OP_RULE_UPDATE, new_rule, old_rule);
4248 }
4249 /*
4250 * Common helper api for iptable updates
4251 */
4252 static enum zebra_dplane_result
4253 iptable_update_internal(enum dplane_op_e op, struct zebra_pbr_iptable *iptable)
4254 {
4255 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4256 struct zebra_dplane_ctx *ctx;
4257 int ret;
4258
4259 ctx = dplane_ctx_alloc();
4260
4261 ret = dplane_ctx_iptable_init(ctx, op, iptable);
4262 if (ret != AOK)
4263 goto done;
4264
4265 ret = dplane_update_enqueue(ctx);
4266
4267 done:
4268 atomic_fetch_add_explicit(&zdplane_info.dg_iptable_in, 1,
4269 memory_order_relaxed);
4270
4271 if (ret == AOK)
4272 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4273 else {
4274 atomic_fetch_add_explicit(&zdplane_info.dg_iptable_errors, 1,
4275 memory_order_relaxed);
4276 dplane_ctx_free(&ctx);
4277 }
4278
4279 return result;
4280 }
4281
4282 enum zebra_dplane_result
4283 dplane_pbr_iptable_add(struct zebra_pbr_iptable *iptable)
4284 {
4285 return iptable_update_internal(DPLANE_OP_IPTABLE_ADD, iptable);
4286 }
4287
4288 enum zebra_dplane_result
4289 dplane_pbr_iptable_delete(struct zebra_pbr_iptable *iptable)
4290 {
4291 return iptable_update_internal(DPLANE_OP_IPTABLE_DELETE, iptable);
4292 }
4293
4294 /*
4295 * Common helper api for ipset updates
4296 */
4297 static enum zebra_dplane_result
4298 ipset_update_internal(enum dplane_op_e op, struct zebra_pbr_ipset *ipset)
4299 {
4300 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4301 struct zebra_dplane_ctx *ctx;
4302 int ret;
4303
4304 ctx = dplane_ctx_alloc();
4305
4306 ret = dplane_ctx_ipset_init(ctx, op, ipset);
4307 if (ret != AOK)
4308 goto done;
4309
4310 ret = dplane_update_enqueue(ctx);
4311
4312 done:
4313 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_in, 1,
4314 memory_order_relaxed);
4315
4316 if (ret == AOK)
4317 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4318 else {
4319 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_errors, 1,
4320 memory_order_relaxed);
4321 dplane_ctx_free(&ctx);
4322 }
4323
4324 return result;
4325 }
4326
4327 enum zebra_dplane_result dplane_pbr_ipset_add(struct zebra_pbr_ipset *ipset)
4328 {
4329 return ipset_update_internal(DPLANE_OP_IPSET_ADD, ipset);
4330 }
4331
4332 enum zebra_dplane_result dplane_pbr_ipset_delete(struct zebra_pbr_ipset *ipset)
4333 {
4334 return ipset_update_internal(DPLANE_OP_IPSET_DELETE, ipset);
4335 }
4336
4337 /*
4338 * Common helper api for ipset updates
4339 */
4340 static enum zebra_dplane_result
4341 ipset_entry_update_internal(enum dplane_op_e op,
4342 struct zebra_pbr_ipset_entry *ipset_entry)
4343 {
4344 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4345 struct zebra_dplane_ctx *ctx;
4346 int ret;
4347
4348 ctx = dplane_ctx_alloc();
4349
4350 ret = dplane_ctx_ipset_entry_init(ctx, op, ipset_entry);
4351 if (ret != AOK)
4352 goto done;
4353
4354 ret = dplane_update_enqueue(ctx);
4355
4356 done:
4357 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_entry_in, 1,
4358 memory_order_relaxed);
4359
4360 if (ret == AOK)
4361 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4362 else {
4363 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_entry_errors,
4364 1, memory_order_relaxed);
4365 dplane_ctx_free(&ctx);
4366 }
4367
4368 return result;
4369 }
4370
4371 enum zebra_dplane_result
4372 dplane_pbr_ipset_entry_add(struct zebra_pbr_ipset_entry *ipset)
4373 {
4374 return ipset_entry_update_internal(DPLANE_OP_IPSET_ENTRY_ADD, ipset);
4375 }
4376
4377 enum zebra_dplane_result
4378 dplane_pbr_ipset_entry_delete(struct zebra_pbr_ipset_entry *ipset)
4379 {
4380 return ipset_entry_update_internal(DPLANE_OP_IPSET_ENTRY_DELETE, ipset);
4381 }
4382
4383 /*
4384 * Common helper api for GRE set
4385 */
4386 enum zebra_dplane_result
4387 dplane_gre_set(struct interface *ifp, struct interface *ifp_link,
4388 unsigned int mtu, const struct zebra_l2info_gre *gre_info)
4389 {
4390 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4391 struct zebra_dplane_ctx *ctx;
4392 enum dplane_op_e op = DPLANE_OP_GRE_SET;
4393 int ret;
4394 struct zebra_ns *zns;
4395
4396 ctx = dplane_ctx_alloc();
4397
4398 if (!ifp)
4399 return result;
4400
4401 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
4402 zlog_debug("init dplane ctx %s: if %s link %s%s",
4403 dplane_op2str(op), ifp->name,
4404 ifp_link ? "set" : "unset", ifp_link ?
4405 ifp_link->name : "");
4406 }
4407
4408 ctx->zd_op = op;
4409 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4410 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
4411 if (!zns)
4412 return result;
4413 dplane_ctx_ns_init(ctx, zns, false);
4414
4415 dplane_ctx_set_ifname(ctx, ifp->name);
4416 ctx->zd_vrf_id = ifp->vrf->vrf_id;
4417 ctx->zd_ifindex = ifp->ifindex;
4418 if (ifp_link)
4419 ctx->u.gre.link_ifindex = ifp_link->ifindex;
4420 else
4421 ctx->u.gre.link_ifindex = 0;
4422 if (gre_info)
4423 memcpy(&ctx->u.gre.info, gre_info, sizeof(ctx->u.gre.info));
4424 ctx->u.gre.mtu = mtu;
4425
4426 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4427
4428 /* Enqueue context for processing */
4429 ret = dplane_update_enqueue(ctx);
4430
4431 /* Update counter */
4432 atomic_fetch_add_explicit(&zdplane_info.dg_gre_set_in, 1,
4433 memory_order_relaxed);
4434
4435 if (ret == AOK)
4436 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4437 else {
4438 atomic_fetch_add_explicit(
4439 &zdplane_info.dg_gre_set_errors, 1,
4440 memory_order_relaxed);
4441 if (ctx)
4442 dplane_ctx_free(&ctx);
4443 result = ZEBRA_DPLANE_REQUEST_FAILURE;
4444 }
4445 return result;
4446 }
4447
4448 /*
4449 * Handler for 'show dplane'
4450 */
4451 int dplane_show_helper(struct vty *vty, bool detailed)
4452 {
4453 uint64_t queued, queue_max, limit, errs, incoming, yields,
4454 other_errs;
4455
4456 /* Using atomics because counters are being changed in different
4457 * pthread contexts.
4458 */
4459 incoming = atomic_load_explicit(&zdplane_info.dg_routes_in,
4460 memory_order_relaxed);
4461 limit = atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
4462 memory_order_relaxed);
4463 queued = atomic_load_explicit(&zdplane_info.dg_routes_queued,
4464 memory_order_relaxed);
4465 queue_max = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
4466 memory_order_relaxed);
4467 errs = atomic_load_explicit(&zdplane_info.dg_route_errors,
4468 memory_order_relaxed);
4469 yields = atomic_load_explicit(&zdplane_info.dg_update_yields,
4470 memory_order_relaxed);
4471 other_errs = atomic_load_explicit(&zdplane_info.dg_other_errors,
4472 memory_order_relaxed);
4473
4474 vty_out(vty, "Zebra dataplane:\nRoute updates: %"PRIu64"\n",
4475 incoming);
4476 vty_out(vty, "Route update errors: %"PRIu64"\n", errs);
4477 vty_out(vty, "Other errors : %"PRIu64"\n", other_errs);
4478 vty_out(vty, "Route update queue limit: %"PRIu64"\n", limit);
4479 vty_out(vty, "Route update queue depth: %"PRIu64"\n", queued);
4480 vty_out(vty, "Route update queue max: %"PRIu64"\n", queue_max);
4481 vty_out(vty, "Dplane update yields: %"PRIu64"\n", yields);
4482
4483 incoming = atomic_load_explicit(&zdplane_info.dg_lsps_in,
4484 memory_order_relaxed);
4485 errs = atomic_load_explicit(&zdplane_info.dg_lsp_errors,
4486 memory_order_relaxed);
4487 vty_out(vty, "LSP updates: %"PRIu64"\n", incoming);
4488 vty_out(vty, "LSP update errors: %"PRIu64"\n", errs);
4489
4490 incoming = atomic_load_explicit(&zdplane_info.dg_pws_in,
4491 memory_order_relaxed);
4492 errs = atomic_load_explicit(&zdplane_info.dg_pw_errors,
4493 memory_order_relaxed);
4494 vty_out(vty, "PW updates: %"PRIu64"\n", incoming);
4495 vty_out(vty, "PW update errors: %"PRIu64"\n", errs);
4496
4497 incoming = atomic_load_explicit(&zdplane_info.dg_intf_addrs_in,
4498 memory_order_relaxed);
4499 errs = atomic_load_explicit(&zdplane_info.dg_intf_addr_errors,
4500 memory_order_relaxed);
4501 vty_out(vty, "Intf addr updates: %"PRIu64"\n", incoming);
4502 vty_out(vty, "Intf addr errors: %"PRIu64"\n", errs);
4503
4504 incoming = atomic_load_explicit(&zdplane_info.dg_macs_in,
4505 memory_order_relaxed);
4506 errs = atomic_load_explicit(&zdplane_info.dg_mac_errors,
4507 memory_order_relaxed);
4508 vty_out(vty, "EVPN MAC updates: %"PRIu64"\n", incoming);
4509 vty_out(vty, "EVPN MAC errors: %"PRIu64"\n", errs);
4510
4511 incoming = atomic_load_explicit(&zdplane_info.dg_neighs_in,
4512 memory_order_relaxed);
4513 errs = atomic_load_explicit(&zdplane_info.dg_neigh_errors,
4514 memory_order_relaxed);
4515 vty_out(vty, "EVPN neigh updates: %"PRIu64"\n", incoming);
4516 vty_out(vty, "EVPN neigh errors: %"PRIu64"\n", errs);
4517
4518 incoming = atomic_load_explicit(&zdplane_info.dg_rules_in,
4519 memory_order_relaxed);
4520 errs = atomic_load_explicit(&zdplane_info.dg_rule_errors,
4521 memory_order_relaxed);
4522 vty_out(vty, "Rule updates: %" PRIu64 "\n", incoming);
4523 vty_out(vty, "Rule errors: %" PRIu64 "\n", errs);
4524
4525 incoming = atomic_load_explicit(&zdplane_info.dg_br_port_in,
4526 memory_order_relaxed);
4527 errs = atomic_load_explicit(&zdplane_info.dg_br_port_errors,
4528 memory_order_relaxed);
4529 vty_out(vty, "Bridge port updates: %" PRIu64 "\n", incoming);
4530 vty_out(vty, "Bridge port errors: %" PRIu64 "\n", errs);
4531
4532 incoming = atomic_load_explicit(&zdplane_info.dg_iptable_in,
4533 memory_order_relaxed);
4534 errs = atomic_load_explicit(&zdplane_info.dg_iptable_errors,
4535 memory_order_relaxed);
4536 vty_out(vty, "IPtable updates: %" PRIu64 "\n", incoming);
4537 vty_out(vty, "IPtable errors: %" PRIu64 "\n", errs);
4538 incoming = atomic_load_explicit(&zdplane_info.dg_ipset_in,
4539 memory_order_relaxed);
4540 errs = atomic_load_explicit(&zdplane_info.dg_ipset_errors,
4541 memory_order_relaxed);
4542 vty_out(vty, "IPset updates: %" PRIu64 "\n", incoming);
4543 vty_out(vty, "IPset errors: %" PRIu64 "\n", errs);
4544 incoming = atomic_load_explicit(&zdplane_info.dg_ipset_entry_in,
4545 memory_order_relaxed);
4546 errs = atomic_load_explicit(&zdplane_info.dg_ipset_entry_errors,
4547 memory_order_relaxed);
4548 vty_out(vty, "IPset entry updates: %" PRIu64 "\n", incoming);
4549 vty_out(vty, "IPset entry errors: %" PRIu64 "\n", errs);
4550
4551 incoming = atomic_load_explicit(&zdplane_info.dg_neightable_in,
4552 memory_order_relaxed);
4553 errs = atomic_load_explicit(&zdplane_info.dg_neightable_errors,
4554 memory_order_relaxed);
4555 vty_out(vty, "Neighbor Table updates: %"PRIu64"\n", incoming);
4556 vty_out(vty, "Neighbor Table errors: %"PRIu64"\n", errs);
4557
4558 incoming = atomic_load_explicit(&zdplane_info.dg_gre_set_in,
4559 memory_order_relaxed);
4560 errs = atomic_load_explicit(&zdplane_info.dg_gre_set_errors,
4561 memory_order_relaxed);
4562 vty_out(vty, "GRE set updates: %"PRIu64"\n", incoming);
4563 vty_out(vty, "GRE set errors: %"PRIu64"\n", errs);
4564 return CMD_SUCCESS;
4565 }
4566
4567 /*
4568 * Handler for 'show dplane providers'
4569 */
4570 int dplane_show_provs_helper(struct vty *vty, bool detailed)
4571 {
4572 struct zebra_dplane_provider *prov;
4573 uint64_t in, in_q, in_max, out, out_q, out_max;
4574
4575 vty_out(vty, "Zebra dataplane providers:\n");
4576
4577 DPLANE_LOCK();
4578 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
4579 DPLANE_UNLOCK();
4580
4581 /* Show counters, useful info from each registered provider */
4582 while (prov) {
4583
4584 in = atomic_load_explicit(&prov->dp_in_counter,
4585 memory_order_relaxed);
4586 in_q = atomic_load_explicit(&prov->dp_in_queued,
4587 memory_order_relaxed);
4588 in_max = atomic_load_explicit(&prov->dp_in_max,
4589 memory_order_relaxed);
4590 out = atomic_load_explicit(&prov->dp_out_counter,
4591 memory_order_relaxed);
4592 out_q = atomic_load_explicit(&prov->dp_out_queued,
4593 memory_order_relaxed);
4594 out_max = atomic_load_explicit(&prov->dp_out_max,
4595 memory_order_relaxed);
4596
4597 vty_out(vty, "%s (%u): in: %"PRIu64", q: %"PRIu64", q_max: %"PRIu64", out: %"PRIu64", q: %"PRIu64", q_max: %"PRIu64"\n",
4598 prov->dp_name, prov->dp_id, in, in_q, in_max,
4599 out, out_q, out_max);
4600
4601 DPLANE_LOCK();
4602 prov = TAILQ_NEXT(prov, dp_prov_link);
4603 DPLANE_UNLOCK();
4604 }
4605
4606 return CMD_SUCCESS;
4607 }
4608
4609 /*
4610 * Helper for 'show run' etc.
4611 */
4612 int dplane_config_write_helper(struct vty *vty)
4613 {
4614 if (zdplane_info.dg_max_queued_updates != DPLANE_DEFAULT_MAX_QUEUED)
4615 vty_out(vty, "zebra dplane limit %u\n",
4616 zdplane_info.dg_max_queued_updates);
4617
4618 return 0;
4619 }
4620
4621 /*
4622 * Provider registration
4623 */
4624 int dplane_provider_register(const char *name,
4625 enum dplane_provider_prio prio,
4626 int flags,
4627 int (*start_fp)(struct zebra_dplane_provider *),
4628 int (*fp)(struct zebra_dplane_provider *),
4629 int (*fini_fp)(struct zebra_dplane_provider *,
4630 bool early),
4631 void *data,
4632 struct zebra_dplane_provider **prov_p)
4633 {
4634 int ret = 0;
4635 struct zebra_dplane_provider *p = NULL, *last;
4636
4637 /* Validate */
4638 if (fp == NULL) {
4639 ret = EINVAL;
4640 goto done;
4641 }
4642
4643 if (prio <= DPLANE_PRIO_NONE ||
4644 prio > DPLANE_PRIO_LAST) {
4645 ret = EINVAL;
4646 goto done;
4647 }
4648
4649 /* Allocate and init new provider struct */
4650 p = XCALLOC(MTYPE_DP_PROV, sizeof(struct zebra_dplane_provider));
4651
4652 pthread_mutex_init(&(p->dp_mutex), NULL);
4653 TAILQ_INIT(&(p->dp_ctx_in_q));
4654 TAILQ_INIT(&(p->dp_ctx_out_q));
4655
4656 p->dp_flags = flags;
4657 p->dp_priority = prio;
4658 p->dp_fp = fp;
4659 p->dp_start = start_fp;
4660 p->dp_fini = fini_fp;
4661 p->dp_data = data;
4662
4663 /* Lock - the dplane pthread may be running */
4664 DPLANE_LOCK();
4665
4666 p->dp_id = ++zdplane_info.dg_provider_id;
4667
4668 if (name)
4669 strlcpy(p->dp_name, name, DPLANE_PROVIDER_NAMELEN);
4670 else
4671 snprintf(p->dp_name, DPLANE_PROVIDER_NAMELEN,
4672 "provider-%u", p->dp_id);
4673
4674 /* Insert into list ordered by priority */
4675 TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_prov_link) {
4676 if (last->dp_priority > p->dp_priority)
4677 break;
4678 }
4679
4680 if (last)
4681 TAILQ_INSERT_BEFORE(last, p, dp_prov_link);
4682 else
4683 TAILQ_INSERT_TAIL(&zdplane_info.dg_providers_q, p,
4684 dp_prov_link);
4685
4686 /* And unlock */
4687 DPLANE_UNLOCK();
4688
4689 if (IS_ZEBRA_DEBUG_DPLANE)
4690 zlog_debug("dplane: registered new provider '%s' (%u), prio %d",
4691 p->dp_name, p->dp_id, p->dp_priority);
4692
4693 done:
4694 if (prov_p)
4695 *prov_p = p;
4696
4697 return ret;
4698 }
4699
4700 /* Accessors for provider attributes */
4701 const char *dplane_provider_get_name(const struct zebra_dplane_provider *prov)
4702 {
4703 return prov->dp_name;
4704 }
4705
4706 uint32_t dplane_provider_get_id(const struct zebra_dplane_provider *prov)
4707 {
4708 return prov->dp_id;
4709 }
4710
4711 void *dplane_provider_get_data(const struct zebra_dplane_provider *prov)
4712 {
4713 return prov->dp_data;
4714 }
4715
4716 int dplane_provider_get_work_limit(const struct zebra_dplane_provider *prov)
4717 {
4718 return zdplane_info.dg_updates_per_cycle;
4719 }
4720
4721 /* Lock/unlock a provider's mutex - iff the provider was registered with
4722 * the THREADED flag.
4723 */
4724 void dplane_provider_lock(struct zebra_dplane_provider *prov)
4725 {
4726 if (dplane_provider_is_threaded(prov))
4727 DPLANE_PROV_LOCK(prov);
4728 }
4729
4730 void dplane_provider_unlock(struct zebra_dplane_provider *prov)
4731 {
4732 if (dplane_provider_is_threaded(prov))
4733 DPLANE_PROV_UNLOCK(prov);
4734 }
4735
4736 /*
4737 * Dequeue and maintain associated counter
4738 */
4739 struct zebra_dplane_ctx *dplane_provider_dequeue_in_ctx(
4740 struct zebra_dplane_provider *prov)
4741 {
4742 struct zebra_dplane_ctx *ctx = NULL;
4743
4744 dplane_provider_lock(prov);
4745
4746 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
4747 if (ctx) {
4748 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
4749
4750 atomic_fetch_sub_explicit(&prov->dp_in_queued, 1,
4751 memory_order_relaxed);
4752 }
4753
4754 dplane_provider_unlock(prov);
4755
4756 return ctx;
4757 }
4758
4759 /*
4760 * Dequeue work to a list, return count
4761 */
4762 int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov,
4763 struct dplane_ctx_q *listp)
4764 {
4765 int limit, ret;
4766 struct zebra_dplane_ctx *ctx;
4767
4768 limit = zdplane_info.dg_updates_per_cycle;
4769
4770 dplane_provider_lock(prov);
4771
4772 for (ret = 0; ret < limit; ret++) {
4773 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
4774 if (ctx) {
4775 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
4776
4777 TAILQ_INSERT_TAIL(listp, ctx, zd_q_entries);
4778 } else {
4779 break;
4780 }
4781 }
4782
4783 if (ret > 0)
4784 atomic_fetch_sub_explicit(&prov->dp_in_queued, ret,
4785 memory_order_relaxed);
4786
4787 dplane_provider_unlock(prov);
4788
4789 return ret;
4790 }
4791
4792 uint32_t dplane_provider_out_ctx_queue_len(struct zebra_dplane_provider *prov)
4793 {
4794 return atomic_load_explicit(&(prov->dp_out_counter),
4795 memory_order_relaxed);
4796 }
4797
4798 /*
4799 * Enqueue and maintain associated counter
4800 */
4801 void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
4802 struct zebra_dplane_ctx *ctx)
4803 {
4804 uint64_t curr, high;
4805
4806 dplane_provider_lock(prov);
4807
4808 TAILQ_INSERT_TAIL(&(prov->dp_ctx_out_q), ctx,
4809 zd_q_entries);
4810
4811 /* Maintain out-queue counters */
4812 atomic_fetch_add_explicit(&(prov->dp_out_queued), 1,
4813 memory_order_relaxed);
4814 curr = atomic_load_explicit(&prov->dp_out_queued,
4815 memory_order_relaxed);
4816 high = atomic_load_explicit(&prov->dp_out_max,
4817 memory_order_relaxed);
4818 if (curr > high)
4819 atomic_store_explicit(&prov->dp_out_max, curr,
4820 memory_order_relaxed);
4821
4822 dplane_provider_unlock(prov);
4823
4824 atomic_fetch_add_explicit(&(prov->dp_out_counter), 1,
4825 memory_order_relaxed);
4826 }
4827
4828 /*
4829 * Accessor for provider object
4830 */
4831 bool dplane_provider_is_threaded(const struct zebra_dplane_provider *prov)
4832 {
4833 return (prov->dp_flags & DPLANE_PROV_FLAG_THREADED);
4834 }
4835
4836 /*
4837 * Internal helper that copies information from a zebra ns object; this is
4838 * called in the zebra main pthread context as part of dplane ctx init.
4839 */
4840 static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
4841 struct zebra_ns *zns)
4842 {
4843 ns_info->ns_id = zns->ns_id;
4844
4845 #if defined(HAVE_NETLINK)
4846 ns_info->is_cmd = true;
4847 ns_info->nls = zns->netlink_dplane_out;
4848 #endif /* NETLINK */
4849 }
4850
4851 #ifdef HAVE_NETLINK
4852 /*
4853 * Callback when an OS (netlink) incoming event read is ready. This runs
4854 * in the dplane pthread.
4855 */
4856 static int dplane_incoming_read(struct thread *event)
4857 {
4858 struct dplane_zns_info *zi = THREAD_ARG(event);
4859
4860 kernel_dplane_read(&zi->info);
4861
4862 /* Re-start read task */
4863 thread_add_read(zdplane_info.dg_master, dplane_incoming_read, zi,
4864 zi->info.nls.sock, &zi->t_read);
4865
4866 return 0;
4867 }
4868 #endif /* HAVE_NETLINK */
4869
4870 /*
4871 * Notify dplane when namespaces are enabled and disabled. The dplane
4872 * needs to start and stop reading incoming events from the zns. In the
4873 * common case where vrfs are _not_ namespaces, there will only be one
4874 * of these.
4875 *
4876 * This is called in the main pthread.
4877 */
4878 void zebra_dplane_ns_enable(struct zebra_ns *zns, bool enabled)
4879 {
4880 struct dplane_zns_info *zi;
4881
4882 if (IS_ZEBRA_DEBUG_DPLANE)
4883 zlog_debug("%s: %s for nsid %u", __func__,
4884 (enabled ? "ENABLED" : "DISABLED"), zns->ns_id);
4885
4886 /* Search for an existing zns info entry */
4887 frr_each (zns_info_list, &zdplane_info.dg_zns_list, zi) {
4888 if (zi->info.ns_id == zns->ns_id)
4889 break;
4890 }
4891
4892 if (enabled) {
4893 /* Create a new entry if necessary; start reading. */
4894 if (zi == NULL) {
4895 zi = XCALLOC(MTYPE_DP_NS, sizeof(*zi));
4896
4897 zi->info.ns_id = zns->ns_id;
4898
4899 zns_info_list_add_tail(&zdplane_info.dg_zns_list, zi);
4900
4901 if (IS_ZEBRA_DEBUG_DPLANE)
4902 zlog_debug("%s: nsid %u, new zi %p", __func__,
4903 zns->ns_id, zi);
4904 }
4905
4906 /* Make sure we're up-to-date with the zns object */
4907 #if defined(HAVE_NETLINK)
4908 zi->info.is_cmd = false;
4909 zi->info.nls = zns->netlink_dplane_in;
4910
4911 /* Start read task for the dplane pthread. */
4912 if (zdplane_info.dg_master)
4913 thread_add_read(zdplane_info.dg_master,
4914 dplane_incoming_read, zi,
4915 zi->info.nls.sock, &zi->t_read);
4916 #endif
4917 } else if (zi) {
4918 if (IS_ZEBRA_DEBUG_DPLANE)
4919 zlog_debug("%s: nsid %u, deleting zi %p", __func__,
4920 zns->ns_id, zi);
4921
4922 /* Stop reading, free memory */
4923 zns_info_list_del(&zdplane_info.dg_zns_list, zi);
4924
4925 if (zdplane_info.dg_master)
4926 thread_cancel_async(zdplane_info.dg_master, &zi->t_read,
4927 NULL);
4928
4929 XFREE(MTYPE_DP_NS, zi);
4930 }
4931 }
4932
4933 /*
4934 * Provider api to signal that work/events are available
4935 * for the dataplane pthread.
4936 */
4937 int dplane_provider_work_ready(void)
4938 {
4939 /* Note that during zebra startup, we may be offered work before
4940 * the dataplane pthread (and thread-master) are ready. We want to
4941 * enqueue the work, but the event-scheduling machinery may not be
4942 * available.
4943 */
4944 if (zdplane_info.dg_run) {
4945 thread_add_event(zdplane_info.dg_master,
4946 dplane_thread_loop, NULL, 0,
4947 &zdplane_info.dg_t_update);
4948 }
4949
4950 return AOK;
4951 }
4952
4953 /*
4954 * Enqueue a context directly to zebra main.
4955 */
4956 void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx)
4957 {
4958 struct dplane_ctx_q temp_list;
4959
4960 /* Zebra's api takes a list, so we need to use a temporary list */
4961 TAILQ_INIT(&temp_list);
4962
4963 TAILQ_INSERT_TAIL(&temp_list, ctx, zd_q_entries);
4964 (zdplane_info.dg_results_cb)(&temp_list);
4965 }
4966
4967 /*
4968 * Kernel dataplane provider
4969 */
4970
4971 static void kernel_dplane_log_detail(struct zebra_dplane_ctx *ctx)
4972 {
4973 char buf[PREFIX_STRLEN];
4974
4975 switch (dplane_ctx_get_op(ctx)) {
4976
4977 case DPLANE_OP_ROUTE_INSTALL:
4978 case DPLANE_OP_ROUTE_UPDATE:
4979 case DPLANE_OP_ROUTE_DELETE:
4980 zlog_debug("%u:%pFX Dplane route update ctx %p op %s",
4981 dplane_ctx_get_vrf(ctx), dplane_ctx_get_dest(ctx),
4982 ctx, dplane_op2str(dplane_ctx_get_op(ctx)));
4983 break;
4984
4985 case DPLANE_OP_NH_INSTALL:
4986 case DPLANE_OP_NH_UPDATE:
4987 case DPLANE_OP_NH_DELETE:
4988 zlog_debug("ID (%u) Dplane nexthop update ctx %p op %s",
4989 dplane_ctx_get_nhe_id(ctx), ctx,
4990 dplane_op2str(dplane_ctx_get_op(ctx)));
4991 break;
4992
4993 case DPLANE_OP_LSP_INSTALL:
4994 case DPLANE_OP_LSP_UPDATE:
4995 case DPLANE_OP_LSP_DELETE:
4996 break;
4997
4998 case DPLANE_OP_PW_INSTALL:
4999 case DPLANE_OP_PW_UNINSTALL:
5000 zlog_debug("Dplane pw %s: op %s af %d loc: %u rem: %u",
5001 dplane_ctx_get_ifname(ctx),
5002 dplane_op2str(ctx->zd_op), dplane_ctx_get_pw_af(ctx),
5003 dplane_ctx_get_pw_local_label(ctx),
5004 dplane_ctx_get_pw_remote_label(ctx));
5005 break;
5006
5007 case DPLANE_OP_ADDR_INSTALL:
5008 case DPLANE_OP_ADDR_UNINSTALL:
5009 zlog_debug("Dplane intf %s, idx %u, addr %pFX",
5010 dplane_op2str(dplane_ctx_get_op(ctx)),
5011 dplane_ctx_get_ifindex(ctx),
5012 dplane_ctx_get_intf_addr(ctx));
5013 break;
5014
5015 case DPLANE_OP_MAC_INSTALL:
5016 case DPLANE_OP_MAC_DELETE:
5017 prefix_mac2str(dplane_ctx_mac_get_addr(ctx), buf,
5018 sizeof(buf));
5019
5020 zlog_debug("Dplane %s, mac %s, ifindex %u",
5021 dplane_op2str(dplane_ctx_get_op(ctx)),
5022 buf, dplane_ctx_get_ifindex(ctx));
5023 break;
5024
5025 case DPLANE_OP_NEIGH_INSTALL:
5026 case DPLANE_OP_NEIGH_UPDATE:
5027 case DPLANE_OP_NEIGH_DELETE:
5028 case DPLANE_OP_VTEP_ADD:
5029 case DPLANE_OP_VTEP_DELETE:
5030 case DPLANE_OP_NEIGH_DISCOVER:
5031 case DPLANE_OP_NEIGH_IP_INSTALL:
5032 case DPLANE_OP_NEIGH_IP_DELETE:
5033 ipaddr2str(dplane_ctx_neigh_get_ipaddr(ctx), buf,
5034 sizeof(buf));
5035
5036 zlog_debug("Dplane %s, ip %s, ifindex %u",
5037 dplane_op2str(dplane_ctx_get_op(ctx)),
5038 buf, dplane_ctx_get_ifindex(ctx));
5039 break;
5040
5041 case DPLANE_OP_RULE_ADD:
5042 case DPLANE_OP_RULE_DELETE:
5043 case DPLANE_OP_RULE_UPDATE:
5044 zlog_debug("Dplane rule update op %s, if %s(%u), ctx %p",
5045 dplane_op2str(dplane_ctx_get_op(ctx)),
5046 dplane_ctx_get_ifname(ctx),
5047 dplane_ctx_get_ifindex(ctx), ctx);
5048 break;
5049
5050 case DPLANE_OP_SYS_ROUTE_ADD:
5051 case DPLANE_OP_SYS_ROUTE_DELETE:
5052 case DPLANE_OP_ROUTE_NOTIFY:
5053 case DPLANE_OP_LSP_NOTIFY:
5054 case DPLANE_OP_BR_PORT_UPDATE:
5055
5056 case DPLANE_OP_NONE:
5057 break;
5058
5059 case DPLANE_OP_IPTABLE_ADD:
5060 case DPLANE_OP_IPTABLE_DELETE: {
5061 struct zebra_pbr_iptable ipt;
5062
5063 dplane_ctx_get_pbr_iptable(ctx, &ipt);
5064 zlog_debug("Dplane iptable update op %s, unique(%u), ctx %p",
5065 dplane_op2str(dplane_ctx_get_op(ctx)), ipt.unique,
5066 ctx);
5067 } break;
5068 case DPLANE_OP_IPSET_ADD:
5069 case DPLANE_OP_IPSET_DELETE: {
5070 struct zebra_pbr_ipset ipset;
5071
5072 dplane_ctx_get_pbr_ipset(ctx, &ipset);
5073 zlog_debug("Dplane ipset update op %s, unique(%u), ctx %p",
5074 dplane_op2str(dplane_ctx_get_op(ctx)), ipset.unique,
5075 ctx);
5076 } break;
5077 case DPLANE_OP_IPSET_ENTRY_ADD:
5078 case DPLANE_OP_IPSET_ENTRY_DELETE: {
5079 struct zebra_pbr_ipset_entry ipent;
5080
5081 dplane_ctx_get_pbr_ipset_entry(ctx, &ipent);
5082 zlog_debug(
5083 "Dplane ipset entry update op %s, unique(%u), ctx %p",
5084 dplane_op2str(dplane_ctx_get_op(ctx)), ipent.unique,
5085 ctx);
5086 } break;
5087 case DPLANE_OP_NEIGH_TABLE_UPDATE:
5088 zlog_debug("Dplane neigh table op %s, ifp %s, family %s",
5089 dplane_op2str(dplane_ctx_get_op(ctx)),
5090 dplane_ctx_get_ifname(ctx),
5091 family2str(dplane_ctx_neightable_get_family(ctx)));
5092 break;
5093 case DPLANE_OP_GRE_SET:
5094 zlog_debug("Dplane gre set op %s, ifp %s, link %u",
5095 dplane_op2str(dplane_ctx_get_op(ctx)),
5096 dplane_ctx_get_ifname(ctx),
5097 ctx->u.gre.link_ifindex);
5098 break;
5099
5100 case DPLANE_OP_INTF_ADDR_ADD:
5101 case DPLANE_OP_INTF_ADDR_DEL:
5102 zlog_debug("Dplane incoming op %s, intf %s, addr %pFX",
5103 dplane_op2str(dplane_ctx_get_op(ctx)),
5104 dplane_ctx_get_ifname(ctx),
5105 dplane_ctx_get_intf_addr(ctx));
5106 break;
5107 }
5108 }
5109
5110 static void kernel_dplane_handle_result(struct zebra_dplane_ctx *ctx)
5111 {
5112 enum zebra_dplane_result res = dplane_ctx_get_status(ctx);
5113
5114 switch (dplane_ctx_get_op(ctx)) {
5115
5116 case DPLANE_OP_ROUTE_INSTALL:
5117 case DPLANE_OP_ROUTE_UPDATE:
5118 case DPLANE_OP_ROUTE_DELETE:
5119 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5120 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors,
5121 1, memory_order_relaxed);
5122
5123 if ((dplane_ctx_get_op(ctx) != DPLANE_OP_ROUTE_DELETE)
5124 && (res == ZEBRA_DPLANE_REQUEST_SUCCESS)) {
5125 struct nexthop *nexthop;
5126
5127 /* Update installed nexthops to signal which have been
5128 * installed.
5129 */
5130 for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx),
5131 nexthop)) {
5132 if (CHECK_FLAG(nexthop->flags,
5133 NEXTHOP_FLAG_RECURSIVE))
5134 continue;
5135
5136 if (CHECK_FLAG(nexthop->flags,
5137 NEXTHOP_FLAG_ACTIVE)) {
5138 SET_FLAG(nexthop->flags,
5139 NEXTHOP_FLAG_FIB);
5140 }
5141 }
5142 }
5143 break;
5144
5145 case DPLANE_OP_NH_INSTALL:
5146 case DPLANE_OP_NH_UPDATE:
5147 case DPLANE_OP_NH_DELETE:
5148 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5149 atomic_fetch_add_explicit(
5150 &zdplane_info.dg_nexthop_errors, 1,
5151 memory_order_relaxed);
5152 break;
5153
5154 case DPLANE_OP_LSP_INSTALL:
5155 case DPLANE_OP_LSP_UPDATE:
5156 case DPLANE_OP_LSP_DELETE:
5157 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5158 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors,
5159 1, memory_order_relaxed);
5160 break;
5161
5162 case DPLANE_OP_PW_INSTALL:
5163 case DPLANE_OP_PW_UNINSTALL:
5164 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5165 atomic_fetch_add_explicit(&zdplane_info.dg_pw_errors, 1,
5166 memory_order_relaxed);
5167 break;
5168
5169 case DPLANE_OP_ADDR_INSTALL:
5170 case DPLANE_OP_ADDR_UNINSTALL:
5171 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5172 atomic_fetch_add_explicit(
5173 &zdplane_info.dg_intf_addr_errors, 1,
5174 memory_order_relaxed);
5175 break;
5176
5177 case DPLANE_OP_MAC_INSTALL:
5178 case DPLANE_OP_MAC_DELETE:
5179 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5180 atomic_fetch_add_explicit(&zdplane_info.dg_mac_errors,
5181 1, memory_order_relaxed);
5182 break;
5183
5184 case DPLANE_OP_NEIGH_INSTALL:
5185 case DPLANE_OP_NEIGH_UPDATE:
5186 case DPLANE_OP_NEIGH_DELETE:
5187 case DPLANE_OP_VTEP_ADD:
5188 case DPLANE_OP_VTEP_DELETE:
5189 case DPLANE_OP_NEIGH_DISCOVER:
5190 case DPLANE_OP_NEIGH_IP_INSTALL:
5191 case DPLANE_OP_NEIGH_IP_DELETE:
5192 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5193 atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors,
5194 1, memory_order_relaxed);
5195 break;
5196
5197 case DPLANE_OP_RULE_ADD:
5198 case DPLANE_OP_RULE_DELETE:
5199 case DPLANE_OP_RULE_UPDATE:
5200 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5201 atomic_fetch_add_explicit(&zdplane_info.dg_rule_errors,
5202 1, memory_order_relaxed);
5203 break;
5204
5205 case DPLANE_OP_IPTABLE_ADD:
5206 case DPLANE_OP_IPTABLE_DELETE:
5207 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5208 atomic_fetch_add_explicit(
5209 &zdplane_info.dg_iptable_errors, 1,
5210 memory_order_relaxed);
5211 break;
5212
5213 case DPLANE_OP_IPSET_ADD:
5214 case DPLANE_OP_IPSET_DELETE:
5215 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5216 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_errors,
5217 1, memory_order_relaxed);
5218 break;
5219
5220 case DPLANE_OP_IPSET_ENTRY_ADD:
5221 case DPLANE_OP_IPSET_ENTRY_DELETE:
5222 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5223 atomic_fetch_add_explicit(
5224 &zdplane_info.dg_ipset_entry_errors, 1,
5225 memory_order_relaxed);
5226 break;
5227
5228 case DPLANE_OP_NEIGH_TABLE_UPDATE:
5229 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5230 atomic_fetch_add_explicit(
5231 &zdplane_info.dg_neightable_errors, 1,
5232 memory_order_relaxed);
5233 break;
5234
5235 case DPLANE_OP_GRE_SET:
5236 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5237 atomic_fetch_add_explicit(
5238 &zdplane_info.dg_gre_set_errors, 1,
5239 memory_order_relaxed);
5240 break;
5241 /* Ignore 'notifications' - no-op */
5242 case DPLANE_OP_SYS_ROUTE_ADD:
5243 case DPLANE_OP_SYS_ROUTE_DELETE:
5244 case DPLANE_OP_ROUTE_NOTIFY:
5245 case DPLANE_OP_LSP_NOTIFY:
5246 case DPLANE_OP_BR_PORT_UPDATE:
5247 break;
5248
5249 /* TODO -- error counters for incoming events? */
5250 case DPLANE_OP_INTF_ADDR_ADD:
5251 case DPLANE_OP_INTF_ADDR_DEL:
5252 break;
5253
5254 case DPLANE_OP_NONE:
5255 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5256 atomic_fetch_add_explicit(&zdplane_info.dg_other_errors,
5257 1, memory_order_relaxed);
5258 break;
5259 }
5260 }
5261
5262 static void kernel_dplane_process_iptable(struct zebra_dplane_provider *prov,
5263 struct zebra_dplane_ctx *ctx)
5264 {
5265 zebra_pbr_process_iptable(ctx);
5266 dplane_provider_enqueue_out_ctx(prov, ctx);
5267 }
5268
5269 static void kernel_dplane_process_ipset(struct zebra_dplane_provider *prov,
5270 struct zebra_dplane_ctx *ctx)
5271 {
5272 zebra_pbr_process_ipset(ctx);
5273 dplane_provider_enqueue_out_ctx(prov, ctx);
5274 }
5275
5276 static void
5277 kernel_dplane_process_ipset_entry(struct zebra_dplane_provider *prov,
5278 struct zebra_dplane_ctx *ctx)
5279 {
5280 zebra_pbr_process_ipset_entry(ctx);
5281 dplane_provider_enqueue_out_ctx(prov, ctx);
5282 }
5283
5284 /*
5285 * Kernel provider callback
5286 */
5287 static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
5288 {
5289 struct zebra_dplane_ctx *ctx, *tctx;
5290 struct dplane_ctx_q work_list;
5291 int counter, limit;
5292
5293 TAILQ_INIT(&work_list);
5294
5295 limit = dplane_provider_get_work_limit(prov);
5296
5297 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5298 zlog_debug("dplane provider '%s': processing",
5299 dplane_provider_get_name(prov));
5300
5301 for (counter = 0; counter < limit; counter++) {
5302 ctx = dplane_provider_dequeue_in_ctx(prov);
5303 if (ctx == NULL)
5304 break;
5305 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5306 kernel_dplane_log_detail(ctx);
5307
5308 if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPTABLE_ADD
5309 || dplane_ctx_get_op(ctx) == DPLANE_OP_IPTABLE_DELETE))
5310 kernel_dplane_process_iptable(prov, ctx);
5311 else if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_ADD
5312 || dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_DELETE))
5313 kernel_dplane_process_ipset(prov, ctx);
5314 else if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_ENTRY_ADD
5315 || dplane_ctx_get_op(ctx)
5316 == DPLANE_OP_IPSET_ENTRY_DELETE))
5317 kernel_dplane_process_ipset_entry(prov, ctx);
5318 else
5319 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
5320 }
5321
5322 kernel_update_multi(&work_list);
5323
5324 TAILQ_FOREACH_SAFE (ctx, &work_list, zd_q_entries, tctx) {
5325 kernel_dplane_handle_result(ctx);
5326
5327 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
5328 dplane_provider_enqueue_out_ctx(prov, ctx);
5329 }
5330
5331 /* Ensure that we'll run the work loop again if there's still
5332 * more work to do.
5333 */
5334 if (counter >= limit) {
5335 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5336 zlog_debug("dplane provider '%s' reached max updates %d",
5337 dplane_provider_get_name(prov), counter);
5338
5339 atomic_fetch_add_explicit(&zdplane_info.dg_update_yields,
5340 1, memory_order_relaxed);
5341
5342 dplane_provider_work_ready();
5343 }
5344
5345 return 0;
5346 }
5347
5348 #ifdef DPLANE_TEST_PROVIDER
5349
5350 /*
5351 * Test dataplane provider plugin
5352 */
5353
5354 /*
5355 * Test provider process callback
5356 */
5357 static int test_dplane_process_func(struct zebra_dplane_provider *prov)
5358 {
5359 struct zebra_dplane_ctx *ctx;
5360 int counter, limit;
5361
5362 /* Just moving from 'in' queue to 'out' queue */
5363
5364 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5365 zlog_debug("dplane provider '%s': processing",
5366 dplane_provider_get_name(prov));
5367
5368 limit = dplane_provider_get_work_limit(prov);
5369
5370 for (counter = 0; counter < limit; counter++) {
5371 ctx = dplane_provider_dequeue_in_ctx(prov);
5372 if (ctx == NULL)
5373 break;
5374
5375 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5376 zlog_debug("dplane provider '%s': op %s",
5377 dplane_provider_get_name(prov),
5378 dplane_op2str(dplane_ctx_get_op(ctx)));
5379
5380 dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
5381
5382 dplane_provider_enqueue_out_ctx(prov, ctx);
5383 }
5384
5385 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5386 zlog_debug("dplane provider '%s': processed %d",
5387 dplane_provider_get_name(prov), counter);
5388
5389 /* Ensure that we'll run the work loop again if there's still
5390 * more work to do.
5391 */
5392 if (counter >= limit)
5393 dplane_provider_work_ready();
5394
5395 return 0;
5396 }
5397
5398 /*
5399 * Test provider shutdown/fini callback
5400 */
5401 static int test_dplane_shutdown_func(struct zebra_dplane_provider *prov,
5402 bool early)
5403 {
5404 if (IS_ZEBRA_DEBUG_DPLANE)
5405 zlog_debug("dplane provider '%s': %sshutdown",
5406 dplane_provider_get_name(prov),
5407 early ? "early " : "");
5408
5409 return 0;
5410 }
5411 #endif /* DPLANE_TEST_PROVIDER */
5412
5413 /*
5414 * Register default kernel provider
5415 */
5416 static void dplane_provider_init(void)
5417 {
5418 int ret;
5419
5420 ret = dplane_provider_register("Kernel",
5421 DPLANE_PRIO_KERNEL,
5422 DPLANE_PROV_FLAGS_DEFAULT, NULL,
5423 kernel_dplane_process_func,
5424 NULL,
5425 NULL, NULL);
5426
5427 if (ret != AOK)
5428 zlog_err("Unable to register kernel dplane provider: %d",
5429 ret);
5430
5431 #ifdef DPLANE_TEST_PROVIDER
5432 /* Optional test provider ... */
5433 ret = dplane_provider_register("Test",
5434 DPLANE_PRIO_PRE_KERNEL,
5435 DPLANE_PROV_FLAGS_DEFAULT, NULL,
5436 test_dplane_process_func,
5437 test_dplane_shutdown_func,
5438 NULL /* data */, NULL);
5439
5440 if (ret != AOK)
5441 zlog_err("Unable to register test dplane provider: %d",
5442 ret);
5443 #endif /* DPLANE_TEST_PROVIDER */
5444 }
5445
5446 /*
5447 * Allow zebra code to walk the queue of pending contexts, evaluate each one
5448 * using a callback function. If the function returns 'true', the context
5449 * will be dequeued and freed without being processed.
5450 */
5451 int dplane_clean_ctx_queue(bool (*context_cb)(struct zebra_dplane_ctx *ctx,
5452 void *arg), void *val)
5453 {
5454 struct zebra_dplane_ctx *ctx, *temp;
5455 struct dplane_ctx_q work_list;
5456
5457 TAILQ_INIT(&work_list);
5458
5459 if (context_cb == NULL)
5460 goto done;
5461
5462 /* Walk the pending context queue under the dplane lock. */
5463 DPLANE_LOCK();
5464
5465 TAILQ_FOREACH_SAFE(ctx, &zdplane_info.dg_update_ctx_q, zd_q_entries,
5466 temp) {
5467 if (context_cb(ctx, val)) {
5468 TAILQ_REMOVE(&zdplane_info.dg_update_ctx_q, ctx,
5469 zd_q_entries);
5470 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
5471 }
5472 }
5473
5474 DPLANE_UNLOCK();
5475
5476 /* Now free any contexts selected by the caller, without holding
5477 * the lock.
5478 */
5479 TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, temp) {
5480 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
5481 dplane_ctx_fini(&ctx);
5482 }
5483
5484 done:
5485
5486 return 0;
5487 }
5488
5489 /* Indicates zebra shutdown/exit is in progress. Some operations may be
5490 * simplified or skipped during shutdown processing.
5491 */
5492 bool dplane_is_in_shutdown(void)
5493 {
5494 return zdplane_info.dg_is_shutdown;
5495 }
5496
5497 /*
5498 * Enable collection of extra info about interfaces in route updates.
5499 */
5500 void dplane_enable_intf_extra_info(void)
5501 {
5502 dplane_collect_extra_intf_info = true;
5503 }
5504
5505 /*
5506 * Early or pre-shutdown, de-init notification api. This runs pretty
5507 * early during zebra shutdown, as a signal to stop new work and prepare
5508 * for updates generated by shutdown/cleanup activity, as zebra tries to
5509 * remove everything it's responsible for.
5510 * NB: This runs in the main zebra pthread context.
5511 */
5512 void zebra_dplane_pre_finish(void)
5513 {
5514 struct zebra_dplane_provider *prov;
5515
5516 if (IS_ZEBRA_DEBUG_DPLANE)
5517 zlog_debug("Zebra dataplane pre-finish called");
5518
5519 zdplane_info.dg_is_shutdown = true;
5520
5521 /* Notify provider(s) of pending shutdown. */
5522 TAILQ_FOREACH(prov, &zdplane_info.dg_providers_q, dp_prov_link) {
5523 if (prov->dp_fini == NULL)
5524 continue;
5525
5526 prov->dp_fini(prov, true /* early */);
5527 }
5528 }
5529
5530 /*
5531 * Utility to determine whether work remains enqueued within the dplane;
5532 * used during system shutdown processing.
5533 */
5534 static bool dplane_work_pending(void)
5535 {
5536 bool ret = false;
5537 struct zebra_dplane_ctx *ctx;
5538 struct zebra_dplane_provider *prov;
5539
5540 /* TODO -- just checking incoming/pending work for now, must check
5541 * providers
5542 */
5543 DPLANE_LOCK();
5544 {
5545 ctx = TAILQ_FIRST(&zdplane_info.dg_update_ctx_q);
5546 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
5547 }
5548 DPLANE_UNLOCK();
5549
5550 if (ctx != NULL) {
5551 ret = true;
5552 goto done;
5553 }
5554
5555 while (prov) {
5556
5557 dplane_provider_lock(prov);
5558
5559 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
5560 if (ctx == NULL)
5561 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
5562
5563 dplane_provider_unlock(prov);
5564
5565 if (ctx != NULL)
5566 break;
5567
5568 DPLANE_LOCK();
5569 prov = TAILQ_NEXT(prov, dp_prov_link);
5570 DPLANE_UNLOCK();
5571 }
5572
5573 if (ctx != NULL)
5574 ret = true;
5575
5576 done:
5577 return ret;
5578 }
5579
5580 /*
5581 * Shutdown-time intermediate callback, used to determine when all pending
5582 * in-flight updates are done. If there's still work to do, reschedules itself.
5583 * If all work is done, schedules an event to the main zebra thread for
5584 * final zebra shutdown.
5585 * This runs in the dplane pthread context.
5586 */
5587 static int dplane_check_shutdown_status(struct thread *event)
5588 {
5589 struct dplane_zns_info *zi;
5590
5591 if (IS_ZEBRA_DEBUG_DPLANE)
5592 zlog_debug("Zebra dataplane shutdown status check called");
5593
5594 /* Remove any zns info entries as we stop the dplane pthread. */
5595 frr_each_safe (zns_info_list, &zdplane_info.dg_zns_list, zi) {
5596 zns_info_list_del(&zdplane_info.dg_zns_list, zi);
5597
5598 if (zdplane_info.dg_master)
5599 thread_cancel(&zi->t_read);
5600
5601 XFREE(MTYPE_DP_NS, zi);
5602 }
5603
5604 if (dplane_work_pending()) {
5605 /* Reschedule dplane check on a short timer */
5606 thread_add_timer_msec(zdplane_info.dg_master,
5607 dplane_check_shutdown_status,
5608 NULL, 100,
5609 &zdplane_info.dg_t_shutdown_check);
5610
5611 /* TODO - give up and stop waiting after a short time? */
5612
5613 } else {
5614 /* We appear to be done - schedule a final callback event
5615 * for the zebra main pthread.
5616 */
5617 thread_add_event(zrouter.master, zebra_finalize, NULL, 0, NULL);
5618 }
5619
5620 return 0;
5621 }
5622
5623 /*
5624 * Shutdown, de-init api. This runs pretty late during shutdown,
5625 * after zebra has tried to free/remove/uninstall all routes during shutdown.
5626 * At this point, dplane work may still remain to be done, so we can't just
5627 * blindly terminate. If there's still work to do, we'll periodically check
5628 * and when done, we'll enqueue a task to the zebra main thread for final
5629 * termination processing.
5630 *
5631 * NB: This runs in the main zebra thread context.
5632 */
5633 void zebra_dplane_finish(void)
5634 {
5635 if (IS_ZEBRA_DEBUG_DPLANE)
5636 zlog_debug("Zebra dataplane fini called");
5637
5638 thread_add_event(zdplane_info.dg_master,
5639 dplane_check_shutdown_status, NULL, 0,
5640 &zdplane_info.dg_t_shutdown_check);
5641 }
5642
5643 /*
5644 * Main dataplane pthread event loop. The thread takes new incoming work
5645 * and offers it to the first provider. It then iterates through the
5646 * providers, taking complete work from each one and offering it
5647 * to the next in order. At each step, a limited number of updates are
5648 * processed during a cycle in order to provide some fairness.
5649 *
5650 * This loop through the providers is only run once, so that the dataplane
5651 * pthread can look for other pending work - such as i/o work on behalf of
5652 * providers.
5653 */
5654 static int dplane_thread_loop(struct thread *event)
5655 {
5656 struct dplane_ctx_q work_list;
5657 struct dplane_ctx_q error_list;
5658 struct zebra_dplane_provider *prov;
5659 struct zebra_dplane_ctx *ctx, *tctx;
5660 int limit, counter, error_counter;
5661 uint64_t curr, high;
5662 bool reschedule = false;
5663
5664 /* Capture work limit per cycle */
5665 limit = zdplane_info.dg_updates_per_cycle;
5666
5667 /* Init temporary lists used to move contexts among providers */
5668 TAILQ_INIT(&work_list);
5669 TAILQ_INIT(&error_list);
5670 error_counter = 0;
5671
5672 /* Check for zebra shutdown */
5673 if (!zdplane_info.dg_run)
5674 goto done;
5675
5676 /* Dequeue some incoming work from zebra (if any) onto the temporary
5677 * working list.
5678 */
5679 DPLANE_LOCK();
5680
5681 /* Locate initial registered provider */
5682 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
5683
5684 /* Move new work from incoming list to temp list */
5685 for (counter = 0; counter < limit; counter++) {
5686 ctx = TAILQ_FIRST(&zdplane_info.dg_update_ctx_q);
5687 if (ctx) {
5688 TAILQ_REMOVE(&zdplane_info.dg_update_ctx_q, ctx,
5689 zd_q_entries);
5690
5691 ctx->zd_provider = prov->dp_id;
5692
5693 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
5694 } else {
5695 break;
5696 }
5697 }
5698
5699 DPLANE_UNLOCK();
5700
5701 atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, counter,
5702 memory_order_relaxed);
5703
5704 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5705 zlog_debug("dplane: incoming new work counter: %d", counter);
5706
5707 /* Iterate through the registered providers, offering new incoming
5708 * work. If the provider has outgoing work in its queue, take that
5709 * work for the next provider
5710 */
5711 while (prov) {
5712
5713 /* At each iteration, the temporary work list has 'counter'
5714 * items.
5715 */
5716 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5717 zlog_debug("dplane enqueues %d new work to provider '%s'",
5718 counter, dplane_provider_get_name(prov));
5719
5720 /* Capture current provider id in each context; check for
5721 * error status.
5722 */
5723 TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, tctx) {
5724 if (dplane_ctx_get_status(ctx) ==
5725 ZEBRA_DPLANE_REQUEST_SUCCESS) {
5726 ctx->zd_provider = prov->dp_id;
5727 } else {
5728 /*
5729 * TODO -- improve error-handling: recirc
5730 * errors backwards so that providers can
5731 * 'undo' their work (if they want to)
5732 */
5733
5734 /* Move to error list; will be returned
5735 * zebra main.
5736 */
5737 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
5738 TAILQ_INSERT_TAIL(&error_list,
5739 ctx, zd_q_entries);
5740 error_counter++;
5741 }
5742 }
5743
5744 /* Enqueue new work to the provider */
5745 dplane_provider_lock(prov);
5746
5747 if (TAILQ_FIRST(&work_list))
5748 TAILQ_CONCAT(&(prov->dp_ctx_in_q), &work_list,
5749 zd_q_entries);
5750
5751 atomic_fetch_add_explicit(&prov->dp_in_counter, counter,
5752 memory_order_relaxed);
5753 atomic_fetch_add_explicit(&prov->dp_in_queued, counter,
5754 memory_order_relaxed);
5755 curr = atomic_load_explicit(&prov->dp_in_queued,
5756 memory_order_relaxed);
5757 high = atomic_load_explicit(&prov->dp_in_max,
5758 memory_order_relaxed);
5759 if (curr > high)
5760 atomic_store_explicit(&prov->dp_in_max, curr,
5761 memory_order_relaxed);
5762
5763 dplane_provider_unlock(prov);
5764
5765 /* Reset the temp list (though the 'concat' may have done this
5766 * already), and the counter
5767 */
5768 TAILQ_INIT(&work_list);
5769 counter = 0;
5770
5771 /* Call into the provider code. Note that this is
5772 * unconditional: we offer to do work even if we don't enqueue
5773 * any _new_ work.
5774 */
5775 (*prov->dp_fp)(prov);
5776
5777 /* Check for zebra shutdown */
5778 if (!zdplane_info.dg_run)
5779 break;
5780
5781 /* Dequeue completed work from the provider */
5782 dplane_provider_lock(prov);
5783
5784 while (counter < limit) {
5785 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
5786 if (ctx) {
5787 TAILQ_REMOVE(&(prov->dp_ctx_out_q), ctx,
5788 zd_q_entries);
5789
5790 TAILQ_INSERT_TAIL(&work_list,
5791 ctx, zd_q_entries);
5792 counter++;
5793 } else
5794 break;
5795 }
5796
5797 dplane_provider_unlock(prov);
5798
5799 if (counter >= limit)
5800 reschedule = true;
5801
5802 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5803 zlog_debug("dplane dequeues %d completed work from provider %s",
5804 counter, dplane_provider_get_name(prov));
5805
5806 /* Locate next provider */
5807 DPLANE_LOCK();
5808 prov = TAILQ_NEXT(prov, dp_prov_link);
5809 DPLANE_UNLOCK();
5810 }
5811
5812 /*
5813 * We hit the work limit while processing at least one provider's
5814 * output queue - ensure we come back and finish it.
5815 */
5816 if (reschedule)
5817 dplane_provider_work_ready();
5818
5819 /* After all providers have been serviced, enqueue any completed
5820 * work and any errors back to zebra so it can process the results.
5821 */
5822 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5823 zlog_debug("dplane has %d completed, %d errors, for zebra main",
5824 counter, error_counter);
5825
5826 /*
5827 * Hand lists through the api to zebra main,
5828 * to reduce the number of lock/unlock cycles
5829 */
5830
5831 /* Call through to zebra main */
5832 (zdplane_info.dg_results_cb)(&error_list);
5833
5834 TAILQ_INIT(&error_list);
5835
5836 /* Call through to zebra main */
5837 (zdplane_info.dg_results_cb)(&work_list);
5838
5839 TAILQ_INIT(&work_list);
5840
5841 done:
5842 return 0;
5843 }
5844
5845 /*
5846 * Final phase of shutdown, after all work enqueued to dplane has been
5847 * processed. This is called from the zebra main pthread context.
5848 */
5849 void zebra_dplane_shutdown(void)
5850 {
5851 struct zebra_dplane_provider *dp;
5852
5853 if (IS_ZEBRA_DEBUG_DPLANE)
5854 zlog_debug("Zebra dataplane shutdown called");
5855
5856 /* Stop dplane thread, if it's running */
5857
5858 zdplane_info.dg_run = false;
5859
5860 if (zdplane_info.dg_t_update)
5861 thread_cancel_async(zdplane_info.dg_t_update->master,
5862 &zdplane_info.dg_t_update, NULL);
5863
5864 frr_pthread_stop(zdplane_info.dg_pthread, NULL);
5865
5866 /* Destroy pthread */
5867 frr_pthread_destroy(zdplane_info.dg_pthread);
5868 zdplane_info.dg_pthread = NULL;
5869 zdplane_info.dg_master = NULL;
5870
5871 /* Notify provider(s) of final shutdown.
5872 * Note that this call is in the main pthread, so providers must
5873 * be prepared for that.
5874 */
5875 TAILQ_FOREACH(dp, &zdplane_info.dg_providers_q, dp_prov_link) {
5876 if (dp->dp_fini == NULL)
5877 continue;
5878
5879 dp->dp_fini(dp, false);
5880 }
5881
5882 /* TODO -- Clean-up provider objects */
5883
5884 /* TODO -- Clean queue(s), free memory */
5885 }
5886
5887 /*
5888 * Initialize the dataplane module during startup, internal/private version
5889 */
5890 static void zebra_dplane_init_internal(void)
5891 {
5892 memset(&zdplane_info, 0, sizeof(zdplane_info));
5893
5894 pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
5895
5896 TAILQ_INIT(&zdplane_info.dg_update_ctx_q);
5897 TAILQ_INIT(&zdplane_info.dg_providers_q);
5898 zns_info_list_init(&zdplane_info.dg_zns_list);
5899
5900 zdplane_info.dg_updates_per_cycle = DPLANE_DEFAULT_NEW_WORK;
5901
5902 zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;
5903
5904 /* Register default kernel 'provider' during init */
5905 dplane_provider_init();
5906 }
5907
5908 /*
5909 * Start the dataplane pthread. This step needs to be run later than the
5910 * 'init' step, in case zebra has fork-ed.
5911 */
5912 void zebra_dplane_start(void)
5913 {
5914 struct dplane_zns_info *zi;
5915 struct zebra_dplane_provider *prov;
5916 struct frr_pthread_attr pattr = {
5917 .start = frr_pthread_attr_default.start,
5918 .stop = frr_pthread_attr_default.stop
5919 };
5920
5921 /* Start dataplane pthread */
5922
5923 zdplane_info.dg_pthread = frr_pthread_new(&pattr, "Zebra dplane thread",
5924 "zebra_dplane");
5925
5926 zdplane_info.dg_master = zdplane_info.dg_pthread->master;
5927
5928 zdplane_info.dg_run = true;
5929
5930 /* Enqueue an initial event for the dataplane pthread */
5931 thread_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
5932 &zdplane_info.dg_t_update);
5933
5934 /* Enqueue reads if necessary */
5935 frr_each (zns_info_list, &zdplane_info.dg_zns_list, zi) {
5936 #if defined(HAVE_NETLINK)
5937 thread_add_read(zdplane_info.dg_master, dplane_incoming_read,
5938 zi, zi->info.nls.sock, &zi->t_read);
5939 #endif
5940 }
5941
5942 /* Call start callbacks for registered providers */
5943
5944 DPLANE_LOCK();
5945 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
5946 DPLANE_UNLOCK();
5947
5948 while (prov) {
5949
5950 if (prov->dp_start)
5951 (prov->dp_start)(prov);
5952
5953 /* Locate next provider */
5954 DPLANE_LOCK();
5955 prov = TAILQ_NEXT(prov, dp_prov_link);
5956 DPLANE_UNLOCK();
5957 }
5958
5959 frr_pthread_run(zdplane_info.dg_pthread, NULL);
5960 }
5961
5962 /*
5963 * Initialize the dataplane module at startup; called by zebra rib_init()
5964 */
5965 void zebra_dplane_init(int (*results_fp)(struct dplane_ctx_q *))
5966 {
5967 zebra_dplane_init_internal();
5968 zdplane_info.dg_results_cb = results_fp;
5969 }