]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_dplane.c
zebra: add dplane type for NETCONF data
[mirror_frr.git] / zebra / zebra_dplane.c
1 /*
2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "lib/libfrr.h"
25 #include "lib/debug.h"
26 #include "lib/frratomic.h"
27 #include "lib/frr_pthread.h"
28 #include "lib/memory.h"
29 #include "lib/queue.h"
30 #include "lib/zebra.h"
31 #include "zebra/zebra_router.h"
32 #include "zebra/zebra_dplane.h"
33 #include "zebra/zebra_vxlan_private.h"
34 #include "zebra/zebra_mpls.h"
35 #include "zebra/rt.h"
36 #include "zebra/debug.h"
37 #include "zebra/zebra_pbr.h"
38 #include "printfrr.h"
39
40 /* Memory types */
41 DEFINE_MTYPE_STATIC(ZEBRA, DP_CTX, "Zebra DPlane Ctx");
42 DEFINE_MTYPE_STATIC(ZEBRA, DP_INTF, "Zebra DPlane Intf");
43 DEFINE_MTYPE_STATIC(ZEBRA, DP_PROV, "Zebra DPlane Provider");
44 DEFINE_MTYPE_STATIC(ZEBRA, DP_NETFILTER, "Zebra Netfilter Internal Object");
45 DEFINE_MTYPE_STATIC(ZEBRA, DP_NS, "DPlane NSes");
46
47 #ifndef AOK
48 # define AOK 0
49 #endif
50
51 /* Control for collection of extra interface info with route updates; a plugin
52 * can enable the extra info via a dplane api.
53 */
54 static bool dplane_collect_extra_intf_info;
55
56 /* Enable test dataplane provider */
57 /*#define DPLANE_TEST_PROVIDER 1 */
58
59 /* Default value for max queued incoming updates */
60 const uint32_t DPLANE_DEFAULT_MAX_QUEUED = 200;
61
62 /* Default value for new work per cycle */
63 const uint32_t DPLANE_DEFAULT_NEW_WORK = 100;
64
65 /* Validation check macro for context blocks */
66 /* #define DPLANE_DEBUG 1 */
67
68 #ifdef DPLANE_DEBUG
69
70 # define DPLANE_CTX_VALID(p) \
71 assert((p) != NULL)
72
73 #else
74
75 # define DPLANE_CTX_VALID(p)
76
77 #endif /* DPLANE_DEBUG */
78
79 /*
80 * Nexthop information captured for nexthop/nexthop group updates
81 */
82 struct dplane_nexthop_info {
83 uint32_t id;
84 uint32_t old_id;
85 afi_t afi;
86 vrf_id_t vrf_id;
87 int type;
88
89 struct nexthop_group ng;
90 struct nh_grp nh_grp[MULTIPATH_NUM];
91 uint8_t nh_grp_count;
92 };
93
94 /*
95 * Optional extra info about interfaces used in route updates' nexthops.
96 */
97 struct dplane_intf_extra {
98 vrf_id_t vrf_id;
99 uint32_t ifindex;
100 uint32_t flags;
101 uint32_t status;
102
103 TAILQ_ENTRY(dplane_intf_extra) link;
104 };
105
106 /*
107 * Route information captured for route updates.
108 */
109 struct dplane_route_info {
110
111 /* Dest and (optional) source prefixes */
112 struct prefix zd_dest;
113 struct prefix zd_src;
114
115 afi_t zd_afi;
116 safi_t zd_safi;
117
118 int zd_type;
119 int zd_old_type;
120
121 route_tag_t zd_tag;
122 route_tag_t zd_old_tag;
123 uint32_t zd_metric;
124 uint32_t zd_old_metric;
125
126 uint16_t zd_instance;
127 uint16_t zd_old_instance;
128
129 uint8_t zd_distance;
130 uint8_t zd_old_distance;
131
132 uint32_t zd_mtu;
133 uint32_t zd_nexthop_mtu;
134
135 /* Nexthop hash entry info */
136 struct dplane_nexthop_info nhe;
137
138 /* Nexthops */
139 uint32_t zd_nhg_id;
140 struct nexthop_group zd_ng;
141
142 /* Backup nexthops (if present) */
143 struct nexthop_group backup_ng;
144
145 /* "Previous" nexthops, used only in route updates without netlink */
146 struct nexthop_group zd_old_ng;
147 struct nexthop_group old_backup_ng;
148
149 /* Optional list of extra interface info */
150 TAILQ_HEAD(dp_intf_extra_q, dplane_intf_extra) intf_extra_q;
151 };
152
153 /*
154 * Pseudowire info for the dataplane
155 */
156 struct dplane_pw_info {
157 int type;
158 int af;
159 int status;
160 uint32_t flags;
161 uint32_t nhg_id;
162 union g_addr dest;
163 mpls_label_t local_label;
164 mpls_label_t remote_label;
165
166 /* Nexthops that are valid and installed */
167 struct nexthop_group fib_nhg;
168
169 /* Primary and backup nexthop sets, copied from the resolving route. */
170 struct nexthop_group primary_nhg;
171 struct nexthop_group backup_nhg;
172
173 union pw_protocol_fields fields;
174 };
175
176 /*
177 * Bridge port info for the dataplane
178 */
179 struct dplane_br_port_info {
180 uint32_t sph_filter_cnt;
181 struct in_addr sph_filters[ES_VTEP_MAX_CNT];
182 /* DPLANE_BR_PORT_XXX - see zebra_dplane.h*/
183 uint32_t flags;
184 uint32_t backup_nhg_id;
185 };
186
187 /*
188 * Interface/prefix info for the dataplane
189 */
190 struct dplane_intf_info {
191
192 uint32_t metric;
193 uint32_t flags;
194
195 #define DPLANE_INTF_CONNECTED (1 << 0) /* Connected peer, p2p */
196 #define DPLANE_INTF_SECONDARY (1 << 1)
197 #define DPLANE_INTF_BROADCAST (1 << 2)
198 #define DPLANE_INTF_HAS_DEST DPLANE_INTF_CONNECTED
199 #define DPLANE_INTF_HAS_LABEL (1 << 4)
200
201 /* Interface address/prefix */
202 struct prefix prefix;
203
204 /* Dest address, for p2p, or broadcast prefix */
205 struct prefix dest_prefix;
206
207 char *label;
208 char label_buf[32];
209 };
210
211 /*
212 * EVPN MAC address info for the dataplane.
213 */
214 struct dplane_mac_info {
215 vlanid_t vid;
216 ifindex_t br_ifindex;
217 struct ethaddr mac;
218 struct in_addr vtep_ip;
219 bool is_sticky;
220 uint32_t nhg_id;
221 uint32_t update_flags;
222 };
223
224 /*
225 * Neighbor info for the dataplane
226 */
227 struct dplane_neigh_info {
228 struct ipaddr ip_addr;
229 union {
230 struct ethaddr mac;
231 struct ipaddr ip_addr;
232 } link;
233 uint32_t flags;
234 uint16_t state;
235 uint32_t update_flags;
236 };
237
238 /*
239 * Neighbor Table
240 */
241 struct dplane_neigh_table {
242 uint8_t family;
243 uint32_t app_probes;
244 uint32_t ucast_probes;
245 uint32_t mcast_probes;
246 };
247
248 /*
249 * Policy based routing rule info for the dataplane
250 */
251 struct dplane_ctx_rule {
252 uint32_t priority;
253
254 /* The route table pointed by this rule */
255 uint32_t table;
256
257 /* Filter criteria */
258 uint32_t filter_bm;
259 uint32_t fwmark;
260 uint8_t dsfield;
261 struct prefix src_ip;
262 struct prefix dst_ip;
263 uint8_t ip_proto;
264
265 uint8_t action_pcp;
266 uint16_t action_vlan_id;
267 uint16_t action_vlan_flags;
268
269 uint32_t action_queue_id;
270
271 char ifname[INTERFACE_NAMSIZ + 1];
272 };
273
274 struct dplane_rule_info {
275 /*
276 * Originating zclient sock fd, so we can know who to send
277 * back to.
278 */
279 int sock;
280
281 int unique;
282 int seq;
283
284 struct dplane_ctx_rule new;
285 struct dplane_ctx_rule old;
286 };
287
288 struct dplane_gre_ctx {
289 uint32_t link_ifindex;
290 unsigned int mtu;
291 struct zebra_l2info_gre info;
292 };
293
294
295 /*
296 * Network interface configuration info - aligned with netlink's NETCONF
297 * info. The flags values are public, in the dplane.h file...
298 */
299 struct dplane_netconf_info {
300 ns_id_t ns_id;
301 ifindex_t ifindex;
302 enum dplane_netconf_status_e mpls_val;
303 enum dplane_netconf_status_e mcast_val;
304 };
305
306 /*
307 * The context block used to exchange info about route updates across
308 * the boundary between the zebra main context (and pthread) and the
309 * dataplane layer (and pthread).
310 */
311 struct zebra_dplane_ctx {
312
313 /* Operation code */
314 enum dplane_op_e zd_op;
315
316 /* Status on return */
317 enum zebra_dplane_result zd_status;
318
319 /* Dplane provider id */
320 uint32_t zd_provider;
321
322 /* Flags - used by providers, e.g. */
323 int zd_flags;
324
325 bool zd_is_update;
326
327 uint32_t zd_seq;
328 uint32_t zd_old_seq;
329
330 /* Some updates may be generated by notifications: allow the
331 * plugin to notice and ignore results from its own notifications.
332 */
333 uint32_t zd_notif_provider;
334
335 /* TODO -- internal/sub-operation status? */
336 enum zebra_dplane_result zd_remote_status;
337 enum zebra_dplane_result zd_kernel_status;
338
339 vrf_id_t zd_vrf_id;
340 uint32_t zd_table_id;
341
342 char zd_ifname[INTERFACE_NAMSIZ];
343 ifindex_t zd_ifindex;
344
345 /* Support info for different kinds of updates */
346 union {
347 struct dplane_route_info rinfo;
348 struct zebra_lsp lsp;
349 struct dplane_pw_info pw;
350 struct dplane_br_port_info br_port;
351 struct dplane_intf_info intf;
352 struct dplane_mac_info macinfo;
353 struct dplane_neigh_info neigh;
354 struct dplane_rule_info rule;
355 struct zebra_pbr_iptable iptable;
356 struct zebra_pbr_ipset ipset;
357 struct {
358 struct zebra_pbr_ipset_entry entry;
359 struct zebra_pbr_ipset_info info;
360 } ipset_entry;
361 struct dplane_neigh_table neightable;
362 struct dplane_gre_ctx gre;
363 struct dplane_netconf_info netconf;
364 } u;
365
366 /* Namespace info, used especially for netlink kernel communication */
367 struct zebra_dplane_info zd_ns_info;
368
369 /* Embedded list linkage */
370 TAILQ_ENTRY(zebra_dplane_ctx) zd_q_entries;
371 };
372
373 /* Flag that can be set by a pre-kernel provider as a signal that an update
374 * should bypass the kernel.
375 */
376 #define DPLANE_CTX_FLAG_NO_KERNEL 0x01
377
378
379 /*
380 * Registration block for one dataplane provider.
381 */
382 struct zebra_dplane_provider {
383 /* Name */
384 char dp_name[DPLANE_PROVIDER_NAMELEN + 1];
385
386 /* Priority, for ordering among providers */
387 uint8_t dp_priority;
388
389 /* Id value */
390 uint32_t dp_id;
391
392 /* Mutex */
393 pthread_mutex_t dp_mutex;
394
395 /* Plugin-provided extra data */
396 void *dp_data;
397
398 /* Flags */
399 int dp_flags;
400
401 int (*dp_start)(struct zebra_dplane_provider *prov);
402
403 int (*dp_fp)(struct zebra_dplane_provider *prov);
404
405 int (*dp_fini)(struct zebra_dplane_provider *prov, bool early_p);
406
407 _Atomic uint32_t dp_in_counter;
408 _Atomic uint32_t dp_in_queued;
409 _Atomic uint32_t dp_in_max;
410 _Atomic uint32_t dp_out_counter;
411 _Atomic uint32_t dp_out_queued;
412 _Atomic uint32_t dp_out_max;
413 _Atomic uint32_t dp_error_counter;
414
415 /* Queue of contexts inbound to the provider */
416 struct dplane_ctx_q dp_ctx_in_q;
417
418 /* Queue of completed contexts outbound from the provider back
419 * towards the dataplane module.
420 */
421 struct dplane_ctx_q dp_ctx_out_q;
422
423 /* Embedded list linkage for provider objects */
424 TAILQ_ENTRY(zebra_dplane_provider) dp_prov_link;
425 };
426
427 /* Declare types for list of zns info objects */
428 PREDECL_DLIST(zns_info_list);
429
430 struct dplane_zns_info {
431 struct zebra_dplane_info info;
432
433 /* Read event */
434 struct thread *t_read;
435
436 /* List linkage */
437 struct zns_info_list_item link;
438 };
439
440 /*
441 * Globals
442 */
443 static struct zebra_dplane_globals {
444 /* Mutex to control access to dataplane components */
445 pthread_mutex_t dg_mutex;
446
447 /* Results callback registered by zebra 'core' */
448 int (*dg_results_cb)(struct dplane_ctx_q *ctxlist);
449
450 /* Sentinel for beginning of shutdown */
451 volatile bool dg_is_shutdown;
452
453 /* Sentinel for end of shutdown */
454 volatile bool dg_run;
455
456 /* Update context queue inbound to the dataplane */
457 TAILQ_HEAD(zdg_ctx_q, zebra_dplane_ctx) dg_update_ctx_q;
458
459 /* Ordered list of providers */
460 TAILQ_HEAD(zdg_prov_q, zebra_dplane_provider) dg_providers_q;
461
462 /* List of info about each zns */
463 struct zns_info_list_head dg_zns_list;
464
465 /* Counter used to assign internal ids to providers */
466 uint32_t dg_provider_id;
467
468 /* Limit number of pending, unprocessed updates */
469 _Atomic uint32_t dg_max_queued_updates;
470
471 /* Control whether system route notifications should be produced. */
472 bool dg_sys_route_notifs;
473
474 /* Limit number of new updates dequeued at once, to pace an
475 * incoming burst.
476 */
477 uint32_t dg_updates_per_cycle;
478
479 _Atomic uint32_t dg_routes_in;
480 _Atomic uint32_t dg_routes_queued;
481 _Atomic uint32_t dg_routes_queued_max;
482 _Atomic uint32_t dg_route_errors;
483 _Atomic uint32_t dg_other_errors;
484
485 _Atomic uint32_t dg_nexthops_in;
486 _Atomic uint32_t dg_nexthop_errors;
487
488 _Atomic uint32_t dg_lsps_in;
489 _Atomic uint32_t dg_lsp_errors;
490
491 _Atomic uint32_t dg_pws_in;
492 _Atomic uint32_t dg_pw_errors;
493
494 _Atomic uint32_t dg_br_port_in;
495 _Atomic uint32_t dg_br_port_errors;
496
497 _Atomic uint32_t dg_intf_addrs_in;
498 _Atomic uint32_t dg_intf_addr_errors;
499
500 _Atomic uint32_t dg_macs_in;
501 _Atomic uint32_t dg_mac_errors;
502
503 _Atomic uint32_t dg_neighs_in;
504 _Atomic uint32_t dg_neigh_errors;
505
506 _Atomic uint32_t dg_rules_in;
507 _Atomic uint32_t dg_rule_errors;
508
509 _Atomic uint32_t dg_update_yields;
510
511 _Atomic uint32_t dg_iptable_in;
512 _Atomic uint32_t dg_iptable_errors;
513
514 _Atomic uint32_t dg_ipset_in;
515 _Atomic uint32_t dg_ipset_errors;
516 _Atomic uint32_t dg_ipset_entry_in;
517 _Atomic uint32_t dg_ipset_entry_errors;
518
519 _Atomic uint32_t dg_neightable_in;
520 _Atomic uint32_t dg_neightable_errors;
521
522 _Atomic uint32_t dg_gre_set_in;
523 _Atomic uint32_t dg_gre_set_errors;
524
525 /* Dataplane pthread */
526 struct frr_pthread *dg_pthread;
527
528 /* Event-delivery context 'master' for the dplane */
529 struct thread_master *dg_master;
530
531 /* Event/'thread' pointer for queued updates */
532 struct thread *dg_t_update;
533
534 /* Event pointer for pending shutdown check loop */
535 struct thread *dg_t_shutdown_check;
536
537 } zdplane_info;
538
539 /* Instantiate zns list type */
540 DECLARE_DLIST(zns_info_list, struct dplane_zns_info, link);
541
542 /*
543 * Lock and unlock for interactions with the zebra 'core' pthread
544 */
545 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
546 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
547
548
549 /*
550 * Lock and unlock for individual providers
551 */
552 #define DPLANE_PROV_LOCK(p) pthread_mutex_lock(&((p)->dp_mutex))
553 #define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
554
555 /* Prototypes */
556 static void dplane_thread_loop(struct thread *event);
557 static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
558 struct zebra_ns *zns);
559 static enum zebra_dplane_result lsp_update_internal(struct zebra_lsp *lsp,
560 enum dplane_op_e op);
561 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
562 enum dplane_op_e op);
563 static enum zebra_dplane_result intf_addr_update_internal(
564 const struct interface *ifp, const struct connected *ifc,
565 enum dplane_op_e op);
566 static enum zebra_dplane_result mac_update_common(
567 enum dplane_op_e op, const struct interface *ifp,
568 const struct interface *br_ifp,
569 vlanid_t vid, const struct ethaddr *mac,
570 struct in_addr vtep_ip, bool sticky, uint32_t nhg_id,
571 uint32_t update_flags);
572 static enum zebra_dplane_result
573 neigh_update_internal(enum dplane_op_e op, const struct interface *ifp,
574 const void *link, int link_family,
575 const struct ipaddr *ip, uint32_t flags, uint16_t state,
576 uint32_t update_flags, int protocol);
577
578 /*
579 * Public APIs
580 */
581
582 /* Obtain thread_master for dataplane thread */
583 struct thread_master *dplane_get_thread_master(void)
584 {
585 return zdplane_info.dg_master;
586 }
587
588 /*
589 * Allocate a dataplane update context
590 */
591 struct zebra_dplane_ctx *dplane_ctx_alloc(void)
592 {
593 struct zebra_dplane_ctx *p;
594
595 /* TODO -- just alloc'ing memory, but would like to maintain
596 * a pool
597 */
598 p = XCALLOC(MTYPE_DP_CTX, sizeof(struct zebra_dplane_ctx));
599
600 return p;
601 }
602
603 /* Enable system route notifications */
604 void dplane_enable_sys_route_notifs(void)
605 {
606 zdplane_info.dg_sys_route_notifs = true;
607 }
608
609 /*
610 * Clean up dependent/internal allocations inside a context object
611 */
612 static void dplane_ctx_free_internal(struct zebra_dplane_ctx *ctx)
613 {
614 struct dplane_intf_extra *if_extra, *if_tmp;
615
616 /*
617 * Some internal allocations may need to be freed, depending on
618 * the type of info captured in the ctx.
619 */
620 switch (ctx->zd_op) {
621 case DPLANE_OP_ROUTE_INSTALL:
622 case DPLANE_OP_ROUTE_UPDATE:
623 case DPLANE_OP_ROUTE_DELETE:
624 case DPLANE_OP_SYS_ROUTE_ADD:
625 case DPLANE_OP_SYS_ROUTE_DELETE:
626 case DPLANE_OP_ROUTE_NOTIFY:
627
628 /* Free allocated nexthops */
629 if (ctx->u.rinfo.zd_ng.nexthop) {
630 /* This deals with recursive nexthops too */
631 nexthops_free(ctx->u.rinfo.zd_ng.nexthop);
632
633 ctx->u.rinfo.zd_ng.nexthop = NULL;
634 }
635
636 /* Free backup info also (if present) */
637 if (ctx->u.rinfo.backup_ng.nexthop) {
638 /* This deals with recursive nexthops too */
639 nexthops_free(ctx->u.rinfo.backup_ng.nexthop);
640
641 ctx->u.rinfo.backup_ng.nexthop = NULL;
642 }
643
644 if (ctx->u.rinfo.zd_old_ng.nexthop) {
645 /* This deals with recursive nexthops too */
646 nexthops_free(ctx->u.rinfo.zd_old_ng.nexthop);
647
648 ctx->u.rinfo.zd_old_ng.nexthop = NULL;
649 }
650
651 if (ctx->u.rinfo.old_backup_ng.nexthop) {
652 /* This deals with recursive nexthops too */
653 nexthops_free(ctx->u.rinfo.old_backup_ng.nexthop);
654
655 ctx->u.rinfo.old_backup_ng.nexthop = NULL;
656 }
657
658 /* Optional extra interface info */
659 TAILQ_FOREACH_SAFE(if_extra, &ctx->u.rinfo.intf_extra_q,
660 link, if_tmp) {
661 TAILQ_REMOVE(&ctx->u.rinfo.intf_extra_q, if_extra,
662 link);
663 XFREE(MTYPE_DP_INTF, if_extra);
664 }
665
666 break;
667
668 case DPLANE_OP_NH_INSTALL:
669 case DPLANE_OP_NH_UPDATE:
670 case DPLANE_OP_NH_DELETE: {
671 if (ctx->u.rinfo.nhe.ng.nexthop) {
672 /* This deals with recursive nexthops too */
673 nexthops_free(ctx->u.rinfo.nhe.ng.nexthop);
674
675 ctx->u.rinfo.nhe.ng.nexthop = NULL;
676 }
677 break;
678 }
679
680 case DPLANE_OP_LSP_INSTALL:
681 case DPLANE_OP_LSP_UPDATE:
682 case DPLANE_OP_LSP_DELETE:
683 case DPLANE_OP_LSP_NOTIFY:
684 {
685 struct zebra_nhlfe *nhlfe;
686
687 /* Unlink and free allocated NHLFEs */
688 frr_each_safe(nhlfe_list, &ctx->u.lsp.nhlfe_list, nhlfe) {
689 nhlfe_list_del(&ctx->u.lsp.nhlfe_list, nhlfe);
690 zebra_mpls_nhlfe_free(nhlfe);
691 }
692
693 /* Unlink and free allocated backup NHLFEs, if present */
694 frr_each_safe(nhlfe_list,
695 &(ctx->u.lsp.backup_nhlfe_list), nhlfe) {
696 nhlfe_list_del(&ctx->u.lsp.backup_nhlfe_list,
697 nhlfe);
698 zebra_mpls_nhlfe_free(nhlfe);
699 }
700
701 /* Clear pointers in lsp struct, in case we're caching
702 * free context structs.
703 */
704 nhlfe_list_init(&ctx->u.lsp.nhlfe_list);
705 ctx->u.lsp.best_nhlfe = NULL;
706 nhlfe_list_init(&ctx->u.lsp.backup_nhlfe_list);
707
708 break;
709 }
710
711 case DPLANE_OP_PW_INSTALL:
712 case DPLANE_OP_PW_UNINSTALL:
713 /* Free allocated nexthops */
714 if (ctx->u.pw.fib_nhg.nexthop) {
715 /* This deals with recursive nexthops too */
716 nexthops_free(ctx->u.pw.fib_nhg.nexthop);
717
718 ctx->u.pw.fib_nhg.nexthop = NULL;
719 }
720 if (ctx->u.pw.primary_nhg.nexthop) {
721 nexthops_free(ctx->u.pw.primary_nhg.nexthop);
722
723 ctx->u.pw.primary_nhg.nexthop = NULL;
724 }
725 if (ctx->u.pw.backup_nhg.nexthop) {
726 nexthops_free(ctx->u.pw.backup_nhg.nexthop);
727
728 ctx->u.pw.backup_nhg.nexthop = NULL;
729 }
730 break;
731
732 case DPLANE_OP_ADDR_INSTALL:
733 case DPLANE_OP_ADDR_UNINSTALL:
734 case DPLANE_OP_INTF_ADDR_ADD:
735 case DPLANE_OP_INTF_ADDR_DEL:
736 /* Maybe free label string, if allocated */
737 if (ctx->u.intf.label != NULL &&
738 ctx->u.intf.label != ctx->u.intf.label_buf) {
739 XFREE(MTYPE_DP_CTX, ctx->u.intf.label);
740 ctx->u.intf.label = NULL;
741 }
742 break;
743
744 case DPLANE_OP_MAC_INSTALL:
745 case DPLANE_OP_MAC_DELETE:
746 case DPLANE_OP_NEIGH_INSTALL:
747 case DPLANE_OP_NEIGH_UPDATE:
748 case DPLANE_OP_NEIGH_DELETE:
749 case DPLANE_OP_VTEP_ADD:
750 case DPLANE_OP_VTEP_DELETE:
751 case DPLANE_OP_RULE_ADD:
752 case DPLANE_OP_RULE_DELETE:
753 case DPLANE_OP_RULE_UPDATE:
754 case DPLANE_OP_NEIGH_DISCOVER:
755 case DPLANE_OP_BR_PORT_UPDATE:
756 case DPLANE_OP_NEIGH_IP_INSTALL:
757 case DPLANE_OP_NEIGH_IP_DELETE:
758 case DPLANE_OP_NONE:
759 case DPLANE_OP_IPSET_ADD:
760 case DPLANE_OP_IPSET_DELETE:
761 break;
762
763 case DPLANE_OP_IPSET_ENTRY_ADD:
764 case DPLANE_OP_IPSET_ENTRY_DELETE:
765 break;
766 case DPLANE_OP_NEIGH_TABLE_UPDATE:
767 break;
768 case DPLANE_OP_IPTABLE_ADD:
769 case DPLANE_OP_IPTABLE_DELETE:
770 if (ctx->u.iptable.interface_name_list) {
771 struct listnode *node, *nnode;
772 char *ifname;
773
774 for (ALL_LIST_ELEMENTS(
775 ctx->u.iptable.interface_name_list, node,
776 nnode, ifname)) {
777 LISTNODE_DETACH(
778 ctx->u.iptable.interface_name_list,
779 node);
780 XFREE(MTYPE_DP_NETFILTER, ifname);
781 }
782 list_delete(&ctx->u.iptable.interface_name_list);
783 }
784 break;
785 case DPLANE_OP_GRE_SET:
786 case DPLANE_OP_INTF_NETCONFIG:
787 break;
788 }
789 }
790
791 /*
792 * Free a dataplane results context.
793 */
794 static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
795 {
796 if (pctx == NULL)
797 return;
798
799 DPLANE_CTX_VALID(*pctx);
800
801 /* TODO -- just freeing memory, but would like to maintain
802 * a pool
803 */
804
805 /* Some internal allocations may need to be freed, depending on
806 * the type of info captured in the ctx.
807 */
808 dplane_ctx_free_internal(*pctx);
809
810 XFREE(MTYPE_DP_CTX, *pctx);
811 }
812
813 /*
814 * Reset an allocated context object for re-use. All internal allocations are
815 * freed and the context is memset.
816 */
817 void dplane_ctx_reset(struct zebra_dplane_ctx *ctx)
818 {
819 dplane_ctx_free_internal(ctx);
820 memset(ctx, 0, sizeof(*ctx));
821 }
822
823 /*
824 * Return a context block to the dplane module after processing
825 */
826 void dplane_ctx_fini(struct zebra_dplane_ctx **pctx)
827 {
828 /* TODO -- maintain pool; for now, just free */
829 dplane_ctx_free(pctx);
830 }
831
832 /* Enqueue a context block */
833 void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
834 const struct zebra_dplane_ctx *ctx)
835 {
836 TAILQ_INSERT_TAIL(q, (struct zebra_dplane_ctx *)ctx, zd_q_entries);
837 }
838
839 /* Append a list of context blocks to another list */
840 void dplane_ctx_list_append(struct dplane_ctx_q *to_list,
841 struct dplane_ctx_q *from_list)
842 {
843 if (TAILQ_FIRST(from_list)) {
844 TAILQ_CONCAT(to_list, from_list, zd_q_entries);
845
846 /* And clear 'from' list */
847 TAILQ_INIT(from_list);
848 }
849 }
850
851 struct zebra_dplane_ctx *dplane_ctx_get_head(struct dplane_ctx_q *q)
852 {
853 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
854
855 return ctx;
856 }
857
858 /* Dequeue a context block from the head of a list */
859 struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_q *q)
860 {
861 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
862
863 if (ctx)
864 TAILQ_REMOVE(q, ctx, zd_q_entries);
865
866 return ctx;
867 }
868
869 /*
870 * Accessors for information from the context object
871 */
872 enum zebra_dplane_result dplane_ctx_get_status(
873 const struct zebra_dplane_ctx *ctx)
874 {
875 DPLANE_CTX_VALID(ctx);
876
877 return ctx->zd_status;
878 }
879
880 void dplane_ctx_set_status(struct zebra_dplane_ctx *ctx,
881 enum zebra_dplane_result status)
882 {
883 DPLANE_CTX_VALID(ctx);
884
885 ctx->zd_status = status;
886 }
887
888 /* Retrieve last/current provider id */
889 uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx *ctx)
890 {
891 DPLANE_CTX_VALID(ctx);
892 return ctx->zd_provider;
893 }
894
895 /* Providers run before the kernel can control whether a kernel
896 * update should be done.
897 */
898 void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx *ctx)
899 {
900 DPLANE_CTX_VALID(ctx);
901
902 SET_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
903 }
904
905 bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx *ctx)
906 {
907 DPLANE_CTX_VALID(ctx);
908
909 return CHECK_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
910 }
911
912 void dplane_ctx_set_op(struct zebra_dplane_ctx *ctx, enum dplane_op_e op)
913 {
914 DPLANE_CTX_VALID(ctx);
915 ctx->zd_op = op;
916 }
917
918 enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx)
919 {
920 DPLANE_CTX_VALID(ctx);
921
922 return ctx->zd_op;
923 }
924
925 const char *dplane_op2str(enum dplane_op_e op)
926 {
927 const char *ret = "UNKNOWN";
928
929 switch (op) {
930 case DPLANE_OP_NONE:
931 ret = "NONE";
932 break;
933
934 /* Route update */
935 case DPLANE_OP_ROUTE_INSTALL:
936 ret = "ROUTE_INSTALL";
937 break;
938 case DPLANE_OP_ROUTE_UPDATE:
939 ret = "ROUTE_UPDATE";
940 break;
941 case DPLANE_OP_ROUTE_DELETE:
942 ret = "ROUTE_DELETE";
943 break;
944 case DPLANE_OP_ROUTE_NOTIFY:
945 ret = "ROUTE_NOTIFY";
946 break;
947
948 /* Nexthop update */
949 case DPLANE_OP_NH_INSTALL:
950 ret = "NH_INSTALL";
951 break;
952 case DPLANE_OP_NH_UPDATE:
953 ret = "NH_UPDATE";
954 break;
955 case DPLANE_OP_NH_DELETE:
956 ret = "NH_DELETE";
957 break;
958
959 case DPLANE_OP_LSP_INSTALL:
960 ret = "LSP_INSTALL";
961 break;
962 case DPLANE_OP_LSP_UPDATE:
963 ret = "LSP_UPDATE";
964 break;
965 case DPLANE_OP_LSP_DELETE:
966 ret = "LSP_DELETE";
967 break;
968 case DPLANE_OP_LSP_NOTIFY:
969 ret = "LSP_NOTIFY";
970 break;
971
972 case DPLANE_OP_PW_INSTALL:
973 ret = "PW_INSTALL";
974 break;
975 case DPLANE_OP_PW_UNINSTALL:
976 ret = "PW_UNINSTALL";
977 break;
978
979 case DPLANE_OP_SYS_ROUTE_ADD:
980 ret = "SYS_ROUTE_ADD";
981 break;
982 case DPLANE_OP_SYS_ROUTE_DELETE:
983 ret = "SYS_ROUTE_DEL";
984 break;
985
986 case DPLANE_OP_BR_PORT_UPDATE:
987 ret = "BR_PORT_UPDATE";
988 break;
989
990 case DPLANE_OP_ADDR_INSTALL:
991 ret = "ADDR_INSTALL";
992 break;
993 case DPLANE_OP_ADDR_UNINSTALL:
994 ret = "ADDR_UNINSTALL";
995 break;
996
997 case DPLANE_OP_MAC_INSTALL:
998 ret = "MAC_INSTALL";
999 break;
1000 case DPLANE_OP_MAC_DELETE:
1001 ret = "MAC_DELETE";
1002 break;
1003
1004 case DPLANE_OP_NEIGH_INSTALL:
1005 ret = "NEIGH_INSTALL";
1006 break;
1007 case DPLANE_OP_NEIGH_UPDATE:
1008 ret = "NEIGH_UPDATE";
1009 break;
1010 case DPLANE_OP_NEIGH_DELETE:
1011 ret = "NEIGH_DELETE";
1012 break;
1013 case DPLANE_OP_VTEP_ADD:
1014 ret = "VTEP_ADD";
1015 break;
1016 case DPLANE_OP_VTEP_DELETE:
1017 ret = "VTEP_DELETE";
1018 break;
1019
1020 case DPLANE_OP_RULE_ADD:
1021 ret = "RULE_ADD";
1022 break;
1023 case DPLANE_OP_RULE_DELETE:
1024 ret = "RULE_DELETE";
1025 break;
1026 case DPLANE_OP_RULE_UPDATE:
1027 ret = "RULE_UPDATE";
1028 break;
1029
1030 case DPLANE_OP_NEIGH_DISCOVER:
1031 ret = "NEIGH_DISCOVER";
1032 break;
1033
1034 case DPLANE_OP_IPTABLE_ADD:
1035 ret = "IPTABLE_ADD";
1036 break;
1037 case DPLANE_OP_IPTABLE_DELETE:
1038 ret = "IPTABLE_DELETE";
1039 break;
1040 case DPLANE_OP_IPSET_ADD:
1041 ret = "IPSET_ADD";
1042 break;
1043 case DPLANE_OP_IPSET_DELETE:
1044 ret = "IPSET_DELETE";
1045 break;
1046 case DPLANE_OP_IPSET_ENTRY_ADD:
1047 ret = "IPSET_ENTRY_ADD";
1048 break;
1049 case DPLANE_OP_IPSET_ENTRY_DELETE:
1050 ret = "IPSET_ENTRY_DELETE";
1051 break;
1052 case DPLANE_OP_NEIGH_IP_INSTALL:
1053 ret = "NEIGH_IP_INSTALL";
1054 break;
1055 case DPLANE_OP_NEIGH_IP_DELETE:
1056 ret = "NEIGH_IP_DELETE";
1057 break;
1058 case DPLANE_OP_NEIGH_TABLE_UPDATE:
1059 ret = "NEIGH_TABLE_UPDATE";
1060 break;
1061
1062 case DPLANE_OP_GRE_SET:
1063 ret = "GRE_SET";
1064 break;
1065
1066 case DPLANE_OP_INTF_ADDR_ADD:
1067 return "INTF_ADDR_ADD";
1068
1069 case DPLANE_OP_INTF_ADDR_DEL:
1070 return "INTF_ADDR_DEL";
1071
1072 case DPLANE_OP_INTF_NETCONFIG:
1073 return "INTF_NETCONFIG";
1074 }
1075
1076 return ret;
1077 }
1078
1079 const char *dplane_res2str(enum zebra_dplane_result res)
1080 {
1081 const char *ret = "<Unknown>";
1082
1083 switch (res) {
1084 case ZEBRA_DPLANE_REQUEST_FAILURE:
1085 ret = "FAILURE";
1086 break;
1087 case ZEBRA_DPLANE_REQUEST_QUEUED:
1088 ret = "QUEUED";
1089 break;
1090 case ZEBRA_DPLANE_REQUEST_SUCCESS:
1091 ret = "SUCCESS";
1092 break;
1093 }
1094
1095 return ret;
1096 }
1097
1098 void dplane_ctx_set_dest(struct zebra_dplane_ctx *ctx,
1099 const struct prefix *dest)
1100 {
1101 DPLANE_CTX_VALID(ctx);
1102
1103 prefix_copy(&(ctx->u.rinfo.zd_dest), dest);
1104 }
1105
1106 const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
1107 {
1108 DPLANE_CTX_VALID(ctx);
1109
1110 return &(ctx->u.rinfo.zd_dest);
1111 }
1112
1113 void dplane_ctx_set_src(struct zebra_dplane_ctx *ctx, const struct prefix *src)
1114 {
1115 DPLANE_CTX_VALID(ctx);
1116
1117 if (src)
1118 prefix_copy(&(ctx->u.rinfo.zd_src), src);
1119 else
1120 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(struct prefix));
1121 }
1122
1123 /* Source prefix is a little special - return NULL for "no src prefix" */
1124 const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx)
1125 {
1126 DPLANE_CTX_VALID(ctx);
1127
1128 if (ctx->u.rinfo.zd_src.prefixlen == 0 &&
1129 IN6_IS_ADDR_UNSPECIFIED(&(ctx->u.rinfo.zd_src.u.prefix6))) {
1130 return NULL;
1131 } else {
1132 return &(ctx->u.rinfo.zd_src);
1133 }
1134 }
1135
1136 bool dplane_ctx_is_update(const struct zebra_dplane_ctx *ctx)
1137 {
1138 DPLANE_CTX_VALID(ctx);
1139
1140 return ctx->zd_is_update;
1141 }
1142
1143 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx *ctx)
1144 {
1145 DPLANE_CTX_VALID(ctx);
1146
1147 return ctx->zd_seq;
1148 }
1149
1150 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx)
1151 {
1152 DPLANE_CTX_VALID(ctx);
1153
1154 return ctx->zd_old_seq;
1155 }
1156
1157 void dplane_ctx_set_vrf(struct zebra_dplane_ctx *ctx, vrf_id_t vrf)
1158 {
1159 DPLANE_CTX_VALID(ctx);
1160
1161 ctx->zd_vrf_id = vrf;
1162 }
1163
1164 vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx)
1165 {
1166 DPLANE_CTX_VALID(ctx);
1167
1168 return ctx->zd_vrf_id;
1169 }
1170
1171 /* In some paths we have only a namespace id */
1172 void dplane_ctx_set_ns_id(struct zebra_dplane_ctx *ctx, ns_id_t nsid)
1173 {
1174 DPLANE_CTX_VALID(ctx);
1175
1176 ctx->zd_ns_info.ns_id = nsid;
1177 }
1178
1179 ns_id_t dplane_ctx_get_ns_id(const struct zebra_dplane_ctx *ctx)
1180 {
1181 DPLANE_CTX_VALID(ctx);
1182
1183 return ctx->zd_ns_info.ns_id;
1184 }
1185
1186 bool dplane_ctx_is_from_notif(const struct zebra_dplane_ctx *ctx)
1187 {
1188 DPLANE_CTX_VALID(ctx);
1189
1190 return (ctx->zd_notif_provider != 0);
1191 }
1192
1193 uint32_t dplane_ctx_get_notif_provider(const struct zebra_dplane_ctx *ctx)
1194 {
1195 DPLANE_CTX_VALID(ctx);
1196
1197 return ctx->zd_notif_provider;
1198 }
1199
1200 void dplane_ctx_set_notif_provider(struct zebra_dplane_ctx *ctx,
1201 uint32_t id)
1202 {
1203 DPLANE_CTX_VALID(ctx);
1204
1205 ctx->zd_notif_provider = id;
1206 }
1207
1208 const char *dplane_ctx_get_ifname(const struct zebra_dplane_ctx *ctx)
1209 {
1210 DPLANE_CTX_VALID(ctx);
1211
1212 return ctx->zd_ifname;
1213 }
1214
1215 void dplane_ctx_set_ifname(struct zebra_dplane_ctx *ctx, const char *ifname)
1216 {
1217 DPLANE_CTX_VALID(ctx);
1218
1219 if (!ifname)
1220 return;
1221
1222 strlcpy(ctx->zd_ifname, ifname, sizeof(ctx->zd_ifname));
1223 }
1224
1225 ifindex_t dplane_ctx_get_ifindex(const struct zebra_dplane_ctx *ctx)
1226 {
1227 DPLANE_CTX_VALID(ctx);
1228
1229 return ctx->zd_ifindex;
1230 }
1231
1232 void dplane_ctx_set_ifindex(struct zebra_dplane_ctx *ctx, ifindex_t ifindex)
1233 {
1234 DPLANE_CTX_VALID(ctx);
1235
1236 ctx->zd_ifindex = ifindex;
1237 }
1238
1239 void dplane_ctx_set_type(struct zebra_dplane_ctx *ctx, int type)
1240 {
1241 DPLANE_CTX_VALID(ctx);
1242
1243 ctx->u.rinfo.zd_type = type;
1244 }
1245
1246 int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx)
1247 {
1248 DPLANE_CTX_VALID(ctx);
1249
1250 return ctx->u.rinfo.zd_type;
1251 }
1252
1253 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx)
1254 {
1255 DPLANE_CTX_VALID(ctx);
1256
1257 return ctx->u.rinfo.zd_old_type;
1258 }
1259
1260 void dplane_ctx_set_afi(struct zebra_dplane_ctx *ctx, afi_t afi)
1261 {
1262 DPLANE_CTX_VALID(ctx);
1263
1264 ctx->u.rinfo.zd_afi = afi;
1265 }
1266
1267 afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
1268 {
1269 DPLANE_CTX_VALID(ctx);
1270
1271 return ctx->u.rinfo.zd_afi;
1272 }
1273
1274 void dplane_ctx_set_safi(struct zebra_dplane_ctx *ctx, safi_t safi)
1275 {
1276 DPLANE_CTX_VALID(ctx);
1277
1278 ctx->u.rinfo.zd_safi = safi;
1279 }
1280
1281 safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
1282 {
1283 DPLANE_CTX_VALID(ctx);
1284
1285 return ctx->u.rinfo.zd_safi;
1286 }
1287
1288 void dplane_ctx_set_table(struct zebra_dplane_ctx *ctx, uint32_t table)
1289 {
1290 DPLANE_CTX_VALID(ctx);
1291
1292 ctx->zd_table_id = table;
1293 }
1294
1295 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx)
1296 {
1297 DPLANE_CTX_VALID(ctx);
1298
1299 return ctx->zd_table_id;
1300 }
1301
1302 route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx)
1303 {
1304 DPLANE_CTX_VALID(ctx);
1305
1306 return ctx->u.rinfo.zd_tag;
1307 }
1308
1309 void dplane_ctx_set_tag(struct zebra_dplane_ctx *ctx, route_tag_t tag)
1310 {
1311 DPLANE_CTX_VALID(ctx);
1312
1313 ctx->u.rinfo.zd_tag = tag;
1314 }
1315
1316 route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx)
1317 {
1318 DPLANE_CTX_VALID(ctx);
1319
1320 return ctx->u.rinfo.zd_old_tag;
1321 }
1322
1323 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx)
1324 {
1325 DPLANE_CTX_VALID(ctx);
1326
1327 return ctx->u.rinfo.zd_instance;
1328 }
1329
1330 void dplane_ctx_set_instance(struct zebra_dplane_ctx *ctx, uint16_t instance)
1331 {
1332 DPLANE_CTX_VALID(ctx);
1333
1334 ctx->u.rinfo.zd_instance = instance;
1335 }
1336
1337 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
1338 {
1339 DPLANE_CTX_VALID(ctx);
1340
1341 return ctx->u.rinfo.zd_old_instance;
1342 }
1343
1344 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx)
1345 {
1346 DPLANE_CTX_VALID(ctx);
1347
1348 return ctx->u.rinfo.zd_metric;
1349 }
1350
1351 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx)
1352 {
1353 DPLANE_CTX_VALID(ctx);
1354
1355 return ctx->u.rinfo.zd_old_metric;
1356 }
1357
1358 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx)
1359 {
1360 DPLANE_CTX_VALID(ctx);
1361
1362 return ctx->u.rinfo.zd_mtu;
1363 }
1364
1365 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx)
1366 {
1367 DPLANE_CTX_VALID(ctx);
1368
1369 return ctx->u.rinfo.zd_nexthop_mtu;
1370 }
1371
1372 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx)
1373 {
1374 DPLANE_CTX_VALID(ctx);
1375
1376 return ctx->u.rinfo.zd_distance;
1377 }
1378
1379 void dplane_ctx_set_distance(struct zebra_dplane_ctx *ctx, uint8_t distance)
1380 {
1381 DPLANE_CTX_VALID(ctx);
1382
1383 ctx->u.rinfo.zd_distance = distance;
1384 }
1385
1386 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
1387 {
1388 DPLANE_CTX_VALID(ctx);
1389
1390 return ctx->u.rinfo.zd_old_distance;
1391 }
1392
1393 /*
1394 * Set the nexthops associated with a context: note that processing code
1395 * may well expect that nexthops are in canonical (sorted) order, so we
1396 * will enforce that here.
1397 */
1398 void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh)
1399 {
1400 DPLANE_CTX_VALID(ctx);
1401
1402 if (ctx->u.rinfo.zd_ng.nexthop) {
1403 nexthops_free(ctx->u.rinfo.zd_ng.nexthop);
1404 ctx->u.rinfo.zd_ng.nexthop = NULL;
1405 }
1406 nexthop_group_copy_nh_sorted(&(ctx->u.rinfo.zd_ng), nh);
1407 }
1408
1409 /*
1410 * Set the list of backup nexthops; their ordering is preserved (they're not
1411 * re-sorted.)
1412 */
1413 void dplane_ctx_set_backup_nhg(struct zebra_dplane_ctx *ctx,
1414 const struct nexthop_group *nhg)
1415 {
1416 struct nexthop *nh, *last_nh, *nexthop;
1417
1418 DPLANE_CTX_VALID(ctx);
1419
1420 if (ctx->u.rinfo.backup_ng.nexthop) {
1421 nexthops_free(ctx->u.rinfo.backup_ng.nexthop);
1422 ctx->u.rinfo.backup_ng.nexthop = NULL;
1423 }
1424
1425 last_nh = NULL;
1426
1427 /* Be careful to preserve the order of the backup list */
1428 for (nh = nhg->nexthop; nh; nh = nh->next) {
1429 nexthop = nexthop_dup(nh, NULL);
1430
1431 if (last_nh)
1432 NEXTHOP_APPEND(last_nh, nexthop);
1433 else
1434 ctx->u.rinfo.backup_ng.nexthop = nexthop;
1435
1436 last_nh = nexthop;
1437 }
1438 }
1439
1440 uint32_t dplane_ctx_get_nhg_id(const struct zebra_dplane_ctx *ctx)
1441 {
1442 DPLANE_CTX_VALID(ctx);
1443 return ctx->u.rinfo.zd_nhg_id;
1444 }
1445
1446 const struct nexthop_group *dplane_ctx_get_ng(
1447 const struct zebra_dplane_ctx *ctx)
1448 {
1449 DPLANE_CTX_VALID(ctx);
1450
1451 return &(ctx->u.rinfo.zd_ng);
1452 }
1453
1454 const struct nexthop_group *
1455 dplane_ctx_get_backup_ng(const struct zebra_dplane_ctx *ctx)
1456 {
1457 DPLANE_CTX_VALID(ctx);
1458
1459 return &(ctx->u.rinfo.backup_ng);
1460 }
1461
1462 const struct nexthop_group *
1463 dplane_ctx_get_old_ng(const struct zebra_dplane_ctx *ctx)
1464 {
1465 DPLANE_CTX_VALID(ctx);
1466
1467 return &(ctx->u.rinfo.zd_old_ng);
1468 }
1469
1470 const struct nexthop_group *
1471 dplane_ctx_get_old_backup_ng(const struct zebra_dplane_ctx *ctx)
1472 {
1473 DPLANE_CTX_VALID(ctx);
1474
1475 return &(ctx->u.rinfo.old_backup_ng);
1476 }
1477
1478 const struct zebra_dplane_info *dplane_ctx_get_ns(
1479 const struct zebra_dplane_ctx *ctx)
1480 {
1481 DPLANE_CTX_VALID(ctx);
1482
1483 return &(ctx->zd_ns_info);
1484 }
1485
1486 int dplane_ctx_get_ns_sock(const struct zebra_dplane_ctx *ctx)
1487 {
1488 DPLANE_CTX_VALID(ctx);
1489
1490 #ifdef HAVE_NETLINK
1491 return ctx->zd_ns_info.sock;
1492 #else
1493 return -1;
1494 #endif
1495 }
1496
1497 /* Accessors for nexthop information */
1498 uint32_t dplane_ctx_get_nhe_id(const struct zebra_dplane_ctx *ctx)
1499 {
1500 DPLANE_CTX_VALID(ctx);
1501 return ctx->u.rinfo.nhe.id;
1502 }
1503
1504 uint32_t dplane_ctx_get_old_nhe_id(const struct zebra_dplane_ctx *ctx)
1505 {
1506 DPLANE_CTX_VALID(ctx);
1507 return ctx->u.rinfo.nhe.old_id;
1508 }
1509
1510 afi_t dplane_ctx_get_nhe_afi(const struct zebra_dplane_ctx *ctx)
1511 {
1512 DPLANE_CTX_VALID(ctx);
1513 return ctx->u.rinfo.nhe.afi;
1514 }
1515
1516 vrf_id_t dplane_ctx_get_nhe_vrf_id(const struct zebra_dplane_ctx *ctx)
1517 {
1518 DPLANE_CTX_VALID(ctx);
1519 return ctx->u.rinfo.nhe.vrf_id;
1520 }
1521
1522 int dplane_ctx_get_nhe_type(const struct zebra_dplane_ctx *ctx)
1523 {
1524 DPLANE_CTX_VALID(ctx);
1525 return ctx->u.rinfo.nhe.type;
1526 }
1527
1528 const struct nexthop_group *
1529 dplane_ctx_get_nhe_ng(const struct zebra_dplane_ctx *ctx)
1530 {
1531 DPLANE_CTX_VALID(ctx);
1532 return &(ctx->u.rinfo.nhe.ng);
1533 }
1534
1535 const struct nh_grp *
1536 dplane_ctx_get_nhe_nh_grp(const struct zebra_dplane_ctx *ctx)
1537 {
1538 DPLANE_CTX_VALID(ctx);
1539 return ctx->u.rinfo.nhe.nh_grp;
1540 }
1541
1542 uint8_t dplane_ctx_get_nhe_nh_grp_count(const struct zebra_dplane_ctx *ctx)
1543 {
1544 DPLANE_CTX_VALID(ctx);
1545 return ctx->u.rinfo.nhe.nh_grp_count;
1546 }
1547
1548 /* Accessors for LSP information */
1549
1550 mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx)
1551 {
1552 DPLANE_CTX_VALID(ctx);
1553
1554 return ctx->u.lsp.ile.in_label;
1555 }
1556
1557 void dplane_ctx_set_in_label(struct zebra_dplane_ctx *ctx, mpls_label_t label)
1558 {
1559 DPLANE_CTX_VALID(ctx);
1560
1561 ctx->u.lsp.ile.in_label = label;
1562 }
1563
1564 uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx *ctx)
1565 {
1566 DPLANE_CTX_VALID(ctx);
1567
1568 return ctx->u.lsp.addr_family;
1569 }
1570
1571 void dplane_ctx_set_addr_family(struct zebra_dplane_ctx *ctx,
1572 uint8_t family)
1573 {
1574 DPLANE_CTX_VALID(ctx);
1575
1576 ctx->u.lsp.addr_family = family;
1577 }
1578
1579 uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx)
1580 {
1581 DPLANE_CTX_VALID(ctx);
1582
1583 return ctx->u.lsp.flags;
1584 }
1585
1586 void dplane_ctx_set_lsp_flags(struct zebra_dplane_ctx *ctx,
1587 uint32_t flags)
1588 {
1589 DPLANE_CTX_VALID(ctx);
1590
1591 ctx->u.lsp.flags = flags;
1592 }
1593
1594 const struct nhlfe_list_head *dplane_ctx_get_nhlfe_list(
1595 const struct zebra_dplane_ctx *ctx)
1596 {
1597 DPLANE_CTX_VALID(ctx);
1598 return &(ctx->u.lsp.nhlfe_list);
1599 }
1600
1601 const struct nhlfe_list_head *dplane_ctx_get_backup_nhlfe_list(
1602 const struct zebra_dplane_ctx *ctx)
1603 {
1604 DPLANE_CTX_VALID(ctx);
1605 return &(ctx->u.lsp.backup_nhlfe_list);
1606 }
1607
1608 struct zebra_nhlfe *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
1609 enum lsp_types_t lsp_type,
1610 enum nexthop_types_t nh_type,
1611 const union g_addr *gate,
1612 ifindex_t ifindex, uint8_t num_labels,
1613 mpls_label_t *out_labels)
1614 {
1615 struct zebra_nhlfe *nhlfe;
1616
1617 DPLANE_CTX_VALID(ctx);
1618
1619 nhlfe = zebra_mpls_lsp_add_nhlfe(&(ctx->u.lsp),
1620 lsp_type, nh_type, gate,
1621 ifindex, num_labels, out_labels);
1622
1623 return nhlfe;
1624 }
1625
1626 struct zebra_nhlfe *dplane_ctx_add_backup_nhlfe(
1627 struct zebra_dplane_ctx *ctx, enum lsp_types_t lsp_type,
1628 enum nexthop_types_t nh_type, const union g_addr *gate,
1629 ifindex_t ifindex, uint8_t num_labels, mpls_label_t *out_labels)
1630 {
1631 struct zebra_nhlfe *nhlfe;
1632
1633 DPLANE_CTX_VALID(ctx);
1634
1635 nhlfe = zebra_mpls_lsp_add_backup_nhlfe(&(ctx->u.lsp),
1636 lsp_type, nh_type, gate,
1637 ifindex, num_labels,
1638 out_labels);
1639
1640 return nhlfe;
1641 }
1642
1643 const struct zebra_nhlfe *
1644 dplane_ctx_get_best_nhlfe(const struct zebra_dplane_ctx *ctx)
1645 {
1646 DPLANE_CTX_VALID(ctx);
1647
1648 return ctx->u.lsp.best_nhlfe;
1649 }
1650
1651 const struct zebra_nhlfe *
1652 dplane_ctx_set_best_nhlfe(struct zebra_dplane_ctx *ctx,
1653 struct zebra_nhlfe *nhlfe)
1654 {
1655 DPLANE_CTX_VALID(ctx);
1656
1657 ctx->u.lsp.best_nhlfe = nhlfe;
1658 return ctx->u.lsp.best_nhlfe;
1659 }
1660
1661 uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx *ctx)
1662 {
1663 DPLANE_CTX_VALID(ctx);
1664
1665 return ctx->u.lsp.num_ecmp;
1666 }
1667
1668 mpls_label_t dplane_ctx_get_pw_local_label(const struct zebra_dplane_ctx *ctx)
1669 {
1670 DPLANE_CTX_VALID(ctx);
1671
1672 return ctx->u.pw.local_label;
1673 }
1674
1675 mpls_label_t dplane_ctx_get_pw_remote_label(const struct zebra_dplane_ctx *ctx)
1676 {
1677 DPLANE_CTX_VALID(ctx);
1678
1679 return ctx->u.pw.remote_label;
1680 }
1681
1682 int dplane_ctx_get_pw_type(const struct zebra_dplane_ctx *ctx)
1683 {
1684 DPLANE_CTX_VALID(ctx);
1685
1686 return ctx->u.pw.type;
1687 }
1688
1689 int dplane_ctx_get_pw_af(const struct zebra_dplane_ctx *ctx)
1690 {
1691 DPLANE_CTX_VALID(ctx);
1692
1693 return ctx->u.pw.af;
1694 }
1695
1696 uint32_t dplane_ctx_get_pw_flags(const struct zebra_dplane_ctx *ctx)
1697 {
1698 DPLANE_CTX_VALID(ctx);
1699
1700 return ctx->u.pw.flags;
1701 }
1702
1703 int dplane_ctx_get_pw_status(const struct zebra_dplane_ctx *ctx)
1704 {
1705 DPLANE_CTX_VALID(ctx);
1706
1707 return ctx->u.pw.status;
1708 }
1709
1710 void dplane_ctx_set_pw_status(struct zebra_dplane_ctx *ctx, int status)
1711 {
1712 DPLANE_CTX_VALID(ctx);
1713
1714 ctx->u.pw.status = status;
1715 }
1716
1717 const union g_addr *dplane_ctx_get_pw_dest(
1718 const struct zebra_dplane_ctx *ctx)
1719 {
1720 DPLANE_CTX_VALID(ctx);
1721
1722 return &(ctx->u.pw.dest);
1723 }
1724
1725 const union pw_protocol_fields *dplane_ctx_get_pw_proto(
1726 const struct zebra_dplane_ctx *ctx)
1727 {
1728 DPLANE_CTX_VALID(ctx);
1729
1730 return &(ctx->u.pw.fields);
1731 }
1732
1733 const struct nexthop_group *
1734 dplane_ctx_get_pw_nhg(const struct zebra_dplane_ctx *ctx)
1735 {
1736 DPLANE_CTX_VALID(ctx);
1737
1738 return &(ctx->u.pw.fib_nhg);
1739 }
1740
1741 const struct nexthop_group *
1742 dplane_ctx_get_pw_primary_nhg(const struct zebra_dplane_ctx *ctx)
1743 {
1744 DPLANE_CTX_VALID(ctx);
1745
1746 return &(ctx->u.pw.primary_nhg);
1747 }
1748
1749 const struct nexthop_group *
1750 dplane_ctx_get_pw_backup_nhg(const struct zebra_dplane_ctx *ctx)
1751 {
1752 DPLANE_CTX_VALID(ctx);
1753
1754 return &(ctx->u.pw.backup_nhg);
1755 }
1756
1757 /* Accessors for interface information */
1758 uint32_t dplane_ctx_get_intf_metric(const struct zebra_dplane_ctx *ctx)
1759 {
1760 DPLANE_CTX_VALID(ctx);
1761
1762 return ctx->u.intf.metric;
1763 }
1764
1765 void dplane_ctx_set_intf_metric(struct zebra_dplane_ctx *ctx, uint32_t metric)
1766 {
1767 DPLANE_CTX_VALID(ctx);
1768
1769 ctx->u.intf.metric = metric;
1770 }
1771
1772 /* Is interface addr p2p? */
1773 bool dplane_ctx_intf_is_connected(const struct zebra_dplane_ctx *ctx)
1774 {
1775 DPLANE_CTX_VALID(ctx);
1776
1777 return (ctx->u.intf.flags & DPLANE_INTF_CONNECTED);
1778 }
1779
1780 bool dplane_ctx_intf_is_secondary(const struct zebra_dplane_ctx *ctx)
1781 {
1782 DPLANE_CTX_VALID(ctx);
1783
1784 return (ctx->u.intf.flags & DPLANE_INTF_SECONDARY);
1785 }
1786
1787 bool dplane_ctx_intf_is_broadcast(const struct zebra_dplane_ctx *ctx)
1788 {
1789 DPLANE_CTX_VALID(ctx);
1790
1791 return (ctx->u.intf.flags & DPLANE_INTF_BROADCAST);
1792 }
1793
1794 void dplane_ctx_intf_set_connected(struct zebra_dplane_ctx *ctx)
1795 {
1796 DPLANE_CTX_VALID(ctx);
1797
1798 ctx->u.intf.flags |= DPLANE_INTF_CONNECTED;
1799 }
1800
1801 void dplane_ctx_intf_set_secondary(struct zebra_dplane_ctx *ctx)
1802 {
1803 DPLANE_CTX_VALID(ctx);
1804
1805 ctx->u.intf.flags |= DPLANE_INTF_SECONDARY;
1806 }
1807
1808 void dplane_ctx_intf_set_broadcast(struct zebra_dplane_ctx *ctx)
1809 {
1810 DPLANE_CTX_VALID(ctx);
1811
1812 ctx->u.intf.flags |= DPLANE_INTF_BROADCAST;
1813 }
1814
1815 const struct prefix *dplane_ctx_get_intf_addr(
1816 const struct zebra_dplane_ctx *ctx)
1817 {
1818 DPLANE_CTX_VALID(ctx);
1819
1820 return &(ctx->u.intf.prefix);
1821 }
1822
1823 void dplane_ctx_set_intf_addr(struct zebra_dplane_ctx *ctx,
1824 const struct prefix *p)
1825 {
1826 DPLANE_CTX_VALID(ctx);
1827
1828 prefix_copy(&(ctx->u.intf.prefix), p);
1829 }
1830
1831 bool dplane_ctx_intf_has_dest(const struct zebra_dplane_ctx *ctx)
1832 {
1833 DPLANE_CTX_VALID(ctx);
1834
1835 return (ctx->u.intf.flags & DPLANE_INTF_HAS_DEST);
1836 }
1837
1838 const struct prefix *dplane_ctx_get_intf_dest(
1839 const struct zebra_dplane_ctx *ctx)
1840 {
1841 DPLANE_CTX_VALID(ctx);
1842
1843 return &(ctx->u.intf.dest_prefix);
1844 }
1845
1846 void dplane_ctx_set_intf_dest(struct zebra_dplane_ctx *ctx,
1847 const struct prefix *p)
1848 {
1849 DPLANE_CTX_VALID(ctx);
1850
1851 prefix_copy(&(ctx->u.intf.dest_prefix), p);
1852 }
1853
1854 bool dplane_ctx_intf_has_label(const struct zebra_dplane_ctx *ctx)
1855 {
1856 DPLANE_CTX_VALID(ctx);
1857
1858 return (ctx->u.intf.flags & DPLANE_INTF_HAS_LABEL);
1859 }
1860
1861 const char *dplane_ctx_get_intf_label(const struct zebra_dplane_ctx *ctx)
1862 {
1863 DPLANE_CTX_VALID(ctx);
1864
1865 return ctx->u.intf.label;
1866 }
1867
1868 void dplane_ctx_set_intf_label(struct zebra_dplane_ctx *ctx, const char *label)
1869 {
1870 size_t len;
1871
1872 DPLANE_CTX_VALID(ctx);
1873
1874 if (ctx->u.intf.label && ctx->u.intf.label != ctx->u.intf.label_buf)
1875 XFREE(MTYPE_DP_CTX, ctx->u.intf.label);
1876
1877 ctx->u.intf.label = NULL;
1878
1879 if (label) {
1880 ctx->u.intf.flags |= DPLANE_INTF_HAS_LABEL;
1881
1882 /* Use embedded buffer if it's adequate; else allocate. */
1883 len = strlen(label);
1884
1885 if (len < sizeof(ctx->u.intf.label_buf)) {
1886 strlcpy(ctx->u.intf.label_buf, label,
1887 sizeof(ctx->u.intf.label_buf));
1888 ctx->u.intf.label = ctx->u.intf.label_buf;
1889 } else {
1890 ctx->u.intf.label = XSTRDUP(MTYPE_DP_CTX, label);
1891 }
1892 } else {
1893 ctx->u.intf.flags &= ~DPLANE_INTF_HAS_LABEL;
1894 }
1895 }
1896
1897 /* Accessors for MAC information */
1898 vlanid_t dplane_ctx_mac_get_vlan(const struct zebra_dplane_ctx *ctx)
1899 {
1900 DPLANE_CTX_VALID(ctx);
1901 return ctx->u.macinfo.vid;
1902 }
1903
1904 bool dplane_ctx_mac_is_sticky(const struct zebra_dplane_ctx *ctx)
1905 {
1906 DPLANE_CTX_VALID(ctx);
1907 return ctx->u.macinfo.is_sticky;
1908 }
1909
1910 uint32_t dplane_ctx_mac_get_nhg_id(const struct zebra_dplane_ctx *ctx)
1911 {
1912 DPLANE_CTX_VALID(ctx);
1913 return ctx->u.macinfo.nhg_id;
1914 }
1915
1916 uint32_t dplane_ctx_mac_get_update_flags(const struct zebra_dplane_ctx *ctx)
1917 {
1918 DPLANE_CTX_VALID(ctx);
1919 return ctx->u.macinfo.update_flags;
1920 }
1921
1922 const struct ethaddr *dplane_ctx_mac_get_addr(
1923 const struct zebra_dplane_ctx *ctx)
1924 {
1925 DPLANE_CTX_VALID(ctx);
1926 return &(ctx->u.macinfo.mac);
1927 }
1928
1929 const struct in_addr *dplane_ctx_mac_get_vtep_ip(
1930 const struct zebra_dplane_ctx *ctx)
1931 {
1932 DPLANE_CTX_VALID(ctx);
1933 return &(ctx->u.macinfo.vtep_ip);
1934 }
1935
1936 ifindex_t dplane_ctx_mac_get_br_ifindex(const struct zebra_dplane_ctx *ctx)
1937 {
1938 DPLANE_CTX_VALID(ctx);
1939 return ctx->u.macinfo.br_ifindex;
1940 }
1941
1942 /* Accessors for neighbor information */
1943 const struct ipaddr *dplane_ctx_neigh_get_ipaddr(
1944 const struct zebra_dplane_ctx *ctx)
1945 {
1946 DPLANE_CTX_VALID(ctx);
1947 return &(ctx->u.neigh.ip_addr);
1948 }
1949
1950 const struct ipaddr *
1951 dplane_ctx_neigh_get_link_ip(const struct zebra_dplane_ctx *ctx)
1952 {
1953 DPLANE_CTX_VALID(ctx);
1954 return &(ctx->u.neigh.link.ip_addr);
1955 }
1956
1957 const struct ethaddr *dplane_ctx_neigh_get_mac(
1958 const struct zebra_dplane_ctx *ctx)
1959 {
1960 DPLANE_CTX_VALID(ctx);
1961 return &(ctx->u.neigh.link.mac);
1962 }
1963
1964 uint32_t dplane_ctx_neigh_get_flags(const struct zebra_dplane_ctx *ctx)
1965 {
1966 DPLANE_CTX_VALID(ctx);
1967 return ctx->u.neigh.flags;
1968 }
1969
1970 uint16_t dplane_ctx_neigh_get_state(const struct zebra_dplane_ctx *ctx)
1971 {
1972 DPLANE_CTX_VALID(ctx);
1973 return ctx->u.neigh.state;
1974 }
1975
1976 uint32_t dplane_ctx_neigh_get_update_flags(const struct zebra_dplane_ctx *ctx)
1977 {
1978 DPLANE_CTX_VALID(ctx);
1979 return ctx->u.neigh.update_flags;
1980 }
1981
1982 /* Accessor for GRE set */
1983 uint32_t
1984 dplane_ctx_gre_get_link_ifindex(const struct zebra_dplane_ctx *ctx)
1985 {
1986 DPLANE_CTX_VALID(ctx);
1987
1988 return ctx->u.gre.link_ifindex;
1989 }
1990
1991 unsigned int
1992 dplane_ctx_gre_get_mtu(const struct zebra_dplane_ctx *ctx)
1993 {
1994 DPLANE_CTX_VALID(ctx);
1995
1996 return ctx->u.gre.mtu;
1997 }
1998
1999 const struct zebra_l2info_gre *
2000 dplane_ctx_gre_get_info(const struct zebra_dplane_ctx *ctx)
2001 {
2002 DPLANE_CTX_VALID(ctx);
2003
2004 return &ctx->u.gre.info;
2005 }
2006
2007 /* Accessors for PBR rule information */
2008 int dplane_ctx_rule_get_sock(const struct zebra_dplane_ctx *ctx)
2009 {
2010 DPLANE_CTX_VALID(ctx);
2011
2012 return ctx->u.rule.sock;
2013 }
2014
2015 const char *dplane_ctx_rule_get_ifname(const struct zebra_dplane_ctx *ctx)
2016 {
2017 DPLANE_CTX_VALID(ctx);
2018
2019 return ctx->u.rule.new.ifname;
2020 }
2021
2022 int dplane_ctx_rule_get_unique(const struct zebra_dplane_ctx *ctx)
2023 {
2024 DPLANE_CTX_VALID(ctx);
2025
2026 return ctx->u.rule.unique;
2027 }
2028
2029 int dplane_ctx_rule_get_seq(const struct zebra_dplane_ctx *ctx)
2030 {
2031 DPLANE_CTX_VALID(ctx);
2032
2033 return ctx->u.rule.seq;
2034 }
2035
2036 uint32_t dplane_ctx_rule_get_priority(const struct zebra_dplane_ctx *ctx)
2037 {
2038 DPLANE_CTX_VALID(ctx);
2039
2040 return ctx->u.rule.new.priority;
2041 }
2042
2043 uint32_t dplane_ctx_rule_get_old_priority(const struct zebra_dplane_ctx *ctx)
2044 {
2045 DPLANE_CTX_VALID(ctx);
2046
2047 return ctx->u.rule.old.priority;
2048 }
2049
2050 uint32_t dplane_ctx_rule_get_table(const struct zebra_dplane_ctx *ctx)
2051 {
2052 DPLANE_CTX_VALID(ctx);
2053
2054 return ctx->u.rule.new.table;
2055 }
2056
2057 uint32_t dplane_ctx_rule_get_old_table(const struct zebra_dplane_ctx *ctx)
2058 {
2059 DPLANE_CTX_VALID(ctx);
2060
2061 return ctx->u.rule.old.table;
2062 }
2063
2064 uint32_t dplane_ctx_rule_get_filter_bm(const struct zebra_dplane_ctx *ctx)
2065 {
2066 DPLANE_CTX_VALID(ctx);
2067
2068 return ctx->u.rule.new.filter_bm;
2069 }
2070
2071 uint32_t dplane_ctx_rule_get_old_filter_bm(const struct zebra_dplane_ctx *ctx)
2072 {
2073 DPLANE_CTX_VALID(ctx);
2074
2075 return ctx->u.rule.old.filter_bm;
2076 }
2077
2078 uint32_t dplane_ctx_rule_get_fwmark(const struct zebra_dplane_ctx *ctx)
2079 {
2080 DPLANE_CTX_VALID(ctx);
2081
2082 return ctx->u.rule.new.fwmark;
2083 }
2084
2085 uint32_t dplane_ctx_rule_get_old_fwmark(const struct zebra_dplane_ctx *ctx)
2086 {
2087 DPLANE_CTX_VALID(ctx);
2088
2089 return ctx->u.rule.old.fwmark;
2090 }
2091
2092 uint8_t dplane_ctx_rule_get_ipproto(const struct zebra_dplane_ctx *ctx)
2093 {
2094 DPLANE_CTX_VALID(ctx);
2095
2096 return ctx->u.rule.new.ip_proto;
2097 }
2098
2099 uint8_t dplane_ctx_rule_get_old_ipproto(const struct zebra_dplane_ctx *ctx)
2100 {
2101 DPLANE_CTX_VALID(ctx);
2102
2103 return ctx->u.rule.old.ip_proto;
2104 }
2105
2106 uint8_t dplane_ctx_rule_get_dsfield(const struct zebra_dplane_ctx *ctx)
2107 {
2108 DPLANE_CTX_VALID(ctx);
2109
2110 return ctx->u.rule.new.dsfield;
2111 }
2112
2113 uint8_t dplane_ctx_rule_get_old_dsfield(const struct zebra_dplane_ctx *ctx)
2114 {
2115 DPLANE_CTX_VALID(ctx);
2116
2117 return ctx->u.rule.old.dsfield;
2118 }
2119
2120 const struct prefix *
2121 dplane_ctx_rule_get_src_ip(const struct zebra_dplane_ctx *ctx)
2122 {
2123 DPLANE_CTX_VALID(ctx);
2124
2125 return &(ctx->u.rule.new.src_ip);
2126 }
2127
2128 const struct prefix *
2129 dplane_ctx_rule_get_old_src_ip(const struct zebra_dplane_ctx *ctx)
2130 {
2131 DPLANE_CTX_VALID(ctx);
2132
2133 return &(ctx->u.rule.old.src_ip);
2134 }
2135
2136 const struct prefix *
2137 dplane_ctx_rule_get_dst_ip(const struct zebra_dplane_ctx *ctx)
2138 {
2139 DPLANE_CTX_VALID(ctx);
2140
2141 return &(ctx->u.rule.new.dst_ip);
2142 }
2143
2144 const struct prefix *
2145 dplane_ctx_rule_get_old_dst_ip(const struct zebra_dplane_ctx *ctx)
2146 {
2147 DPLANE_CTX_VALID(ctx);
2148
2149 return &(ctx->u.rule.old.dst_ip);
2150 }
2151
2152 uint32_t dplane_ctx_get_br_port_flags(const struct zebra_dplane_ctx *ctx)
2153 {
2154 DPLANE_CTX_VALID(ctx);
2155
2156 return ctx->u.br_port.flags;
2157 }
2158
2159 uint32_t
2160 dplane_ctx_get_br_port_sph_filter_cnt(const struct zebra_dplane_ctx *ctx)
2161 {
2162 DPLANE_CTX_VALID(ctx);
2163
2164 return ctx->u.br_port.sph_filter_cnt;
2165 }
2166
2167 const struct in_addr *
2168 dplane_ctx_get_br_port_sph_filters(const struct zebra_dplane_ctx *ctx)
2169 {
2170 DPLANE_CTX_VALID(ctx);
2171
2172 return ctx->u.br_port.sph_filters;
2173 }
2174
2175 uint32_t
2176 dplane_ctx_get_br_port_backup_nhg_id(const struct zebra_dplane_ctx *ctx)
2177 {
2178 DPLANE_CTX_VALID(ctx);
2179
2180 return ctx->u.br_port.backup_nhg_id;
2181 }
2182
2183 /* Accessors for PBR iptable information */
2184 void dplane_ctx_get_pbr_iptable(const struct zebra_dplane_ctx *ctx,
2185 struct zebra_pbr_iptable *table)
2186 {
2187 DPLANE_CTX_VALID(ctx);
2188
2189 memcpy(table, &ctx->u.iptable, sizeof(struct zebra_pbr_iptable));
2190 }
2191
2192 void dplane_ctx_get_pbr_ipset(const struct zebra_dplane_ctx *ctx,
2193 struct zebra_pbr_ipset *ipset)
2194 {
2195 DPLANE_CTX_VALID(ctx);
2196
2197 assert(ipset);
2198
2199 if (ctx->zd_op == DPLANE_OP_IPSET_ENTRY_ADD ||
2200 ctx->zd_op == DPLANE_OP_IPSET_ENTRY_DELETE) {
2201 memset(ipset, 0, sizeof(struct zebra_pbr_ipset));
2202 ipset->type = ctx->u.ipset_entry.info.type;
2203 ipset->family = ctx->u.ipset_entry.info.family;
2204 memcpy(&ipset->ipset_name, &ctx->u.ipset_entry.info.ipset_name,
2205 ZEBRA_IPSET_NAME_SIZE);
2206 } else
2207 memcpy(ipset, &ctx->u.ipset, sizeof(struct zebra_pbr_ipset));
2208 }
2209
2210 void dplane_ctx_get_pbr_ipset_entry(const struct zebra_dplane_ctx *ctx,
2211 struct zebra_pbr_ipset_entry *entry)
2212 {
2213 DPLANE_CTX_VALID(ctx);
2214
2215 assert(entry);
2216
2217 memcpy(entry, &ctx->u.ipset_entry.entry, sizeof(struct zebra_pbr_ipset_entry));
2218 }
2219
2220 /*
2221 * End of dplane context accessors
2222 */
2223
2224 /* Optional extra info about interfaces in nexthops - a plugin must enable
2225 * this extra info.
2226 */
2227 const struct dplane_intf_extra *
2228 dplane_ctx_get_intf_extra(const struct zebra_dplane_ctx *ctx)
2229 {
2230 return TAILQ_FIRST(&ctx->u.rinfo.intf_extra_q);
2231 }
2232
2233 const struct dplane_intf_extra *
2234 dplane_ctx_intf_extra_next(const struct zebra_dplane_ctx *ctx,
2235 const struct dplane_intf_extra *ptr)
2236 {
2237 return TAILQ_NEXT(ptr, link);
2238 }
2239
2240 vrf_id_t dplane_intf_extra_get_vrfid(const struct dplane_intf_extra *ptr)
2241 {
2242 return ptr->vrf_id;
2243 }
2244
2245 uint32_t dplane_intf_extra_get_ifindex(const struct dplane_intf_extra *ptr)
2246 {
2247 return ptr->ifindex;
2248 }
2249
2250 uint32_t dplane_intf_extra_get_flags(const struct dplane_intf_extra *ptr)
2251 {
2252 return ptr->flags;
2253 }
2254
2255 uint32_t dplane_intf_extra_get_status(const struct dplane_intf_extra *ptr)
2256 {
2257 return ptr->status;
2258 }
2259
2260 /*
2261 * End of interface extra info accessors
2262 */
2263
2264 uint8_t dplane_ctx_neightable_get_family(const struct zebra_dplane_ctx *ctx)
2265 {
2266 DPLANE_CTX_VALID(ctx);
2267
2268 return ctx->u.neightable.family;
2269 }
2270
2271 uint32_t
2272 dplane_ctx_neightable_get_app_probes(const struct zebra_dplane_ctx *ctx)
2273 {
2274 DPLANE_CTX_VALID(ctx);
2275
2276 return ctx->u.neightable.app_probes;
2277 }
2278
2279 uint32_t
2280 dplane_ctx_neightable_get_ucast_probes(const struct zebra_dplane_ctx *ctx)
2281 {
2282 DPLANE_CTX_VALID(ctx);
2283
2284 return ctx->u.neightable.ucast_probes;
2285 }
2286
2287 uint32_t
2288 dplane_ctx_neightable_get_mcast_probes(const struct zebra_dplane_ctx *ctx)
2289 {
2290 DPLANE_CTX_VALID(ctx);
2291
2292 return ctx->u.neightable.mcast_probes;
2293 }
2294
2295 ifindex_t dplane_ctx_get_netconf_ifindex(const struct zebra_dplane_ctx *ctx)
2296 {
2297 DPLANE_CTX_VALID(ctx);
2298
2299 return ctx->u.netconf.ifindex;
2300 }
2301
2302 ns_id_t dplane_ctx_get_netconf_ns_id(const struct zebra_dplane_ctx *ctx)
2303 {
2304 DPLANE_CTX_VALID(ctx);
2305
2306 return ctx->u.netconf.ns_id;
2307 }
2308
2309 void dplane_ctx_set_netconf_ifindex(struct zebra_dplane_ctx *ctx,
2310 ifindex_t ifindex)
2311 {
2312 DPLANE_CTX_VALID(ctx);
2313
2314 ctx->u.netconf.ifindex = ifindex;
2315 }
2316
2317 void dplane_ctx_set_netconf_ns_id(struct zebra_dplane_ctx *ctx, ns_id_t ns_id)
2318 {
2319 DPLANE_CTX_VALID(ctx);
2320
2321 ctx->u.netconf.ns_id = ns_id;
2322 }
2323
2324 enum dplane_netconf_status_e
2325 dplane_ctx_get_netconf_mpls(const struct zebra_dplane_ctx *ctx)
2326 {
2327 DPLANE_CTX_VALID(ctx);
2328
2329 return ctx->u.netconf.mpls_val;
2330 }
2331
2332 enum dplane_netconf_status_e
2333 dplane_ctx_get_netconf_mcast(const struct zebra_dplane_ctx *ctx)
2334 {
2335 DPLANE_CTX_VALID(ctx);
2336
2337 return ctx->u.netconf.mcast_val;
2338 }
2339
2340 void dplane_ctx_set_netconf_mpls(struct zebra_dplane_ctx *ctx,
2341 enum dplane_netconf_status_e val)
2342 {
2343 DPLANE_CTX_VALID(ctx);
2344
2345 ctx->u.netconf.mpls_val = val;
2346 }
2347
2348 void dplane_ctx_set_netconf_mcast(struct zebra_dplane_ctx *ctx,
2349 enum dplane_netconf_status_e val)
2350 {
2351 DPLANE_CTX_VALID(ctx);
2352
2353 ctx->u.netconf.mcast_val = val;
2354 }
2355
2356 /*
2357 * Retrieve the limit on the number of pending, unprocessed updates.
2358 */
2359 uint32_t dplane_get_in_queue_limit(void)
2360 {
2361 return atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
2362 memory_order_relaxed);
2363 }
2364
2365 /*
2366 * Configure limit on the number of pending, queued updates.
2367 */
2368 void dplane_set_in_queue_limit(uint32_t limit, bool set)
2369 {
2370 /* Reset to default on 'unset' */
2371 if (!set)
2372 limit = DPLANE_DEFAULT_MAX_QUEUED;
2373
2374 atomic_store_explicit(&zdplane_info.dg_max_queued_updates, limit,
2375 memory_order_relaxed);
2376 }
2377
2378 /*
2379 * Retrieve the current queue depth of incoming, unprocessed updates
2380 */
2381 uint32_t dplane_get_in_queue_len(void)
2382 {
2383 return atomic_load_explicit(&zdplane_info.dg_routes_queued,
2384 memory_order_seq_cst);
2385 }
2386
2387 /*
2388 * Common dataplane context init with zebra namespace info.
2389 */
2390 static int dplane_ctx_ns_init(struct zebra_dplane_ctx *ctx,
2391 struct zebra_ns *zns,
2392 bool is_update)
2393 {
2394 dplane_info_from_zns(&(ctx->zd_ns_info), zns);
2395
2396 ctx->zd_is_update = is_update;
2397
2398 #if defined(HAVE_NETLINK)
2399 /* Increment message counter after copying to context struct - may need
2400 * two messages in some 'update' cases.
2401 */
2402 if (is_update)
2403 zns->netlink_dplane_out.seq += 2;
2404 else
2405 zns->netlink_dplane_out.seq++;
2406 #endif /* HAVE_NETLINK */
2407
2408 return AOK;
2409 }
2410
2411 /*
2412 * Initialize a context block for a route update from zebra data structs.
2413 */
2414 int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2415 struct route_node *rn, struct route_entry *re)
2416 {
2417 int ret = EINVAL;
2418 const struct route_table *table = NULL;
2419 const struct rib_table_info *info;
2420 const struct prefix *p, *src_p;
2421 struct zebra_ns *zns;
2422 struct zebra_vrf *zvrf;
2423 struct nexthop *nexthop;
2424 struct zebra_l3vni *zl3vni;
2425 const struct interface *ifp;
2426 struct dplane_intf_extra *if_extra;
2427
2428 if (!ctx || !rn || !re)
2429 goto done;
2430
2431 TAILQ_INIT(&ctx->u.rinfo.intf_extra_q);
2432
2433 ctx->zd_op = op;
2434 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2435
2436 ctx->u.rinfo.zd_type = re->type;
2437 ctx->u.rinfo.zd_old_type = re->type;
2438
2439 /* Prefixes: dest, and optional source */
2440 srcdest_rnode_prefixes(rn, &p, &src_p);
2441
2442 prefix_copy(&(ctx->u.rinfo.zd_dest), p);
2443
2444 if (src_p)
2445 prefix_copy(&(ctx->u.rinfo.zd_src), src_p);
2446 else
2447 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(ctx->u.rinfo.zd_src));
2448
2449 ctx->zd_table_id = re->table;
2450
2451 ctx->u.rinfo.zd_metric = re->metric;
2452 ctx->u.rinfo.zd_old_metric = re->metric;
2453 ctx->zd_vrf_id = re->vrf_id;
2454 ctx->u.rinfo.zd_mtu = re->mtu;
2455 ctx->u.rinfo.zd_nexthop_mtu = re->nexthop_mtu;
2456 ctx->u.rinfo.zd_instance = re->instance;
2457 ctx->u.rinfo.zd_tag = re->tag;
2458 ctx->u.rinfo.zd_old_tag = re->tag;
2459 ctx->u.rinfo.zd_distance = re->distance;
2460
2461 table = srcdest_rnode_table(rn);
2462 info = table->info;
2463
2464 ctx->u.rinfo.zd_afi = info->afi;
2465 ctx->u.rinfo.zd_safi = info->safi;
2466
2467 /* Copy nexthops; recursive info is included too */
2468 copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop),
2469 re->nhe->nhg.nexthop, NULL);
2470 ctx->u.rinfo.zd_nhg_id = re->nhe->id;
2471
2472 /* Copy backup nexthop info, if present */
2473 if (re->nhe->backup_info && re->nhe->backup_info->nhe) {
2474 copy_nexthops(&(ctx->u.rinfo.backup_ng.nexthop),
2475 re->nhe->backup_info->nhe->nhg.nexthop, NULL);
2476 }
2477
2478 /*
2479 * Ensure that the dplane nexthops' flags are clear and copy
2480 * encapsulation information.
2481 */
2482 for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop)) {
2483 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
2484
2485 /* Optionally capture extra interface info while we're in the
2486 * main zebra pthread - a plugin has to ask for this info.
2487 */
2488 if (dplane_collect_extra_intf_info) {
2489 ifp = if_lookup_by_index(nexthop->ifindex,
2490 nexthop->vrf_id);
2491
2492 if (ifp) {
2493 if_extra = XCALLOC(
2494 MTYPE_DP_INTF,
2495 sizeof(struct dplane_intf_extra));
2496 if_extra->vrf_id = nexthop->vrf_id;
2497 if_extra->ifindex = nexthop->ifindex;
2498 if_extra->flags = ifp->flags;
2499 if_extra->status = ifp->status;
2500
2501 TAILQ_INSERT_TAIL(&ctx->u.rinfo.intf_extra_q,
2502 if_extra, link);
2503 }
2504 }
2505
2506 /* Check for available evpn encapsulations. */
2507 if (!CHECK_FLAG(re->flags, ZEBRA_FLAG_EVPN_ROUTE))
2508 continue;
2509
2510 zl3vni = zl3vni_from_vrf(nexthop->vrf_id);
2511 if (zl3vni && is_l3vni_oper_up(zl3vni)) {
2512 nexthop->nh_encap_type = NET_VXLAN;
2513 nexthop->nh_encap.vni = zl3vni->vni;
2514 }
2515 }
2516
2517 /* Don't need some info when capturing a system notification */
2518 if (op == DPLANE_OP_SYS_ROUTE_ADD ||
2519 op == DPLANE_OP_SYS_ROUTE_DELETE) {
2520 ret = AOK;
2521 goto done;
2522 }
2523
2524 /* Extract ns info - can't use pointers to 'core' structs */
2525 zvrf = vrf_info_lookup(re->vrf_id);
2526 zns = zvrf->zns;
2527 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_ROUTE_UPDATE));
2528
2529 #ifdef HAVE_NETLINK
2530 {
2531 struct nhg_hash_entry *nhe = zebra_nhg_resolve(re->nhe);
2532
2533 ctx->u.rinfo.nhe.id = nhe->id;
2534 ctx->u.rinfo.nhe.old_id = 0;
2535 /*
2536 * Check if the nhe is installed/queued before doing anything
2537 * with this route.
2538 *
2539 * If its a delete we only use the prefix anyway, so this only
2540 * matters for INSTALL/UPDATE.
2541 */
2542 if (zebra_nhg_kernel_nexthops_enabled()
2543 && (((op == DPLANE_OP_ROUTE_INSTALL)
2544 || (op == DPLANE_OP_ROUTE_UPDATE))
2545 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)
2546 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED))) {
2547 ret = ENOENT;
2548 goto done;
2549 }
2550
2551 re->nhe_installed_id = nhe->id;
2552 }
2553 #endif /* HAVE_NETLINK */
2554
2555 /* Trying out the sequence number idea, so we can try to detect
2556 * when a result is stale.
2557 */
2558 re->dplane_sequence = zebra_router_get_next_sequence();
2559 ctx->zd_seq = re->dplane_sequence;
2560
2561 ret = AOK;
2562
2563 done:
2564 return ret;
2565 }
2566
2567 /**
2568 * dplane_ctx_nexthop_init() - Initialize a context block for a nexthop update
2569 *
2570 * @ctx: Dataplane context to init
2571 * @op: Operation being performed
2572 * @nhe: Nexthop group hash entry
2573 *
2574 * Return: Result status
2575 */
2576 int dplane_ctx_nexthop_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2577 struct nhg_hash_entry *nhe)
2578 {
2579 struct zebra_vrf *zvrf = NULL;
2580 struct zebra_ns *zns = NULL;
2581 int ret = EINVAL;
2582
2583 if (!ctx || !nhe)
2584 goto done;
2585
2586 ctx->zd_op = op;
2587 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2588
2589 /* Copy over nhe info */
2590 ctx->u.rinfo.nhe.id = nhe->id;
2591 ctx->u.rinfo.nhe.afi = nhe->afi;
2592 ctx->u.rinfo.nhe.vrf_id = nhe->vrf_id;
2593 ctx->u.rinfo.nhe.type = nhe->type;
2594
2595 nexthop_group_copy(&(ctx->u.rinfo.nhe.ng), &(nhe->nhg));
2596
2597 /* If this is a group, convert it to a grp array of ids */
2598 if (!zebra_nhg_depends_is_empty(nhe)
2599 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_RECURSIVE))
2600 ctx->u.rinfo.nhe.nh_grp_count = zebra_nhg_nhe2grp(
2601 ctx->u.rinfo.nhe.nh_grp, nhe, MULTIPATH_NUM);
2602
2603 zvrf = vrf_info_lookup(nhe->vrf_id);
2604
2605 /*
2606 * Fallback to default namespace if the vrf got ripped out from under
2607 * us.
2608 */
2609 zns = zvrf ? zvrf->zns : zebra_ns_lookup(NS_DEFAULT);
2610
2611 /*
2612 * TODO: Might not need to mark this as an update, since
2613 * it probably won't require two messages
2614 */
2615 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_NH_UPDATE));
2616
2617 ret = AOK;
2618
2619 done:
2620 return ret;
2621 }
2622
2623 /*
2624 * Capture information for an LSP update in a dplane context.
2625 */
2626 int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2627 struct zebra_lsp *lsp)
2628 {
2629 int ret = AOK;
2630 struct zebra_nhlfe *nhlfe, *new_nhlfe;
2631
2632 ctx->zd_op = op;
2633 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2634
2635 /* Capture namespace info */
2636 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
2637 (op == DPLANE_OP_LSP_UPDATE));
2638
2639 memset(&ctx->u.lsp, 0, sizeof(ctx->u.lsp));
2640
2641 nhlfe_list_init(&(ctx->u.lsp.nhlfe_list));
2642 nhlfe_list_init(&(ctx->u.lsp.backup_nhlfe_list));
2643
2644 /* This may be called to create/init a dplane context, not necessarily
2645 * to copy an lsp object.
2646 */
2647 if (lsp == NULL) {
2648 ret = AOK;
2649 goto done;
2650 }
2651
2652 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2653 zlog_debug("init dplane ctx %s: in-label %u ecmp# %d",
2654 dplane_op2str(op), lsp->ile.in_label,
2655 lsp->num_ecmp);
2656
2657 ctx->u.lsp.ile = lsp->ile;
2658 ctx->u.lsp.addr_family = lsp->addr_family;
2659 ctx->u.lsp.num_ecmp = lsp->num_ecmp;
2660 ctx->u.lsp.flags = lsp->flags;
2661
2662 /* Copy source LSP's nhlfes, and capture 'best' nhlfe */
2663 frr_each(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
2664 /* Not sure if this is meaningful... */
2665 if (nhlfe->nexthop == NULL)
2666 continue;
2667
2668 new_nhlfe = zebra_mpls_lsp_add_nh(&(ctx->u.lsp), nhlfe->type,
2669 nhlfe->nexthop);
2670 if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
2671 ret = ENOMEM;
2672 break;
2673 }
2674
2675 /* Need to copy flags and backup info too */
2676 new_nhlfe->flags = nhlfe->flags;
2677 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
2678
2679 if (CHECK_FLAG(new_nhlfe->nexthop->flags,
2680 NEXTHOP_FLAG_HAS_BACKUP)) {
2681 new_nhlfe->nexthop->backup_num =
2682 nhlfe->nexthop->backup_num;
2683 memcpy(new_nhlfe->nexthop->backup_idx,
2684 nhlfe->nexthop->backup_idx,
2685 new_nhlfe->nexthop->backup_num);
2686 }
2687
2688 if (nhlfe == lsp->best_nhlfe)
2689 ctx->u.lsp.best_nhlfe = new_nhlfe;
2690 }
2691
2692 if (ret != AOK)
2693 goto done;
2694
2695 /* Capture backup nhlfes/nexthops */
2696 frr_each(nhlfe_list, &lsp->backup_nhlfe_list, nhlfe) {
2697 /* Not sure if this is meaningful... */
2698 if (nhlfe->nexthop == NULL)
2699 continue;
2700
2701 new_nhlfe = zebra_mpls_lsp_add_backup_nh(&(ctx->u.lsp),
2702 nhlfe->type,
2703 nhlfe->nexthop);
2704 if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
2705 ret = ENOMEM;
2706 break;
2707 }
2708
2709 /* Need to copy flags too */
2710 new_nhlfe->flags = nhlfe->flags;
2711 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
2712 }
2713
2714 /* On error the ctx will be cleaned-up, so we don't need to
2715 * deal with any allocated nhlfe or nexthop structs here.
2716 */
2717 done:
2718
2719 return ret;
2720 }
2721
2722 /*
2723 * Capture information for an LSP update in a dplane context.
2724 */
2725 static int dplane_ctx_pw_init(struct zebra_dplane_ctx *ctx,
2726 enum dplane_op_e op,
2727 struct zebra_pw *pw)
2728 {
2729 int ret = EINVAL;
2730 struct prefix p;
2731 afi_t afi;
2732 struct route_table *table;
2733 struct route_node *rn;
2734 struct route_entry *re;
2735 const struct nexthop_group *nhg;
2736 struct nexthop *nh, *newnh, *last_nh;
2737
2738 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2739 zlog_debug("init dplane ctx %s: pw '%s', loc %u, rem %u",
2740 dplane_op2str(op), pw->ifname, pw->local_label,
2741 pw->remote_label);
2742
2743 ctx->zd_op = op;
2744 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2745
2746 /* Capture namespace info: no netlink support as of 12/18,
2747 * but just in case...
2748 */
2749 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
2750
2751 memset(&ctx->u.pw, 0, sizeof(ctx->u.pw));
2752
2753 /* This name appears to be c-string, so we use string copy. */
2754 strlcpy(ctx->zd_ifname, pw->ifname, sizeof(ctx->zd_ifname));
2755
2756 ctx->zd_vrf_id = pw->vrf_id;
2757 ctx->zd_ifindex = pw->ifindex;
2758 ctx->u.pw.type = pw->type;
2759 ctx->u.pw.af = pw->af;
2760 ctx->u.pw.local_label = pw->local_label;
2761 ctx->u.pw.remote_label = pw->remote_label;
2762 ctx->u.pw.flags = pw->flags;
2763
2764 ctx->u.pw.dest = pw->nexthop;
2765
2766 ctx->u.pw.fields = pw->data;
2767
2768 /* Capture nexthop info for the pw destination. We need to look
2769 * up and use zebra datastructs, but we're running in the zebra
2770 * pthread here so that should be ok.
2771 */
2772 memcpy(&p.u, &pw->nexthop, sizeof(pw->nexthop));
2773 p.family = pw->af;
2774 p.prefixlen = ((pw->af == AF_INET) ? IPV4_MAX_BITLEN : IPV6_MAX_BITLEN);
2775
2776 afi = (pw->af == AF_INET) ? AFI_IP : AFI_IP6;
2777 table = zebra_vrf_table(afi, SAFI_UNICAST, pw->vrf_id);
2778 if (table == NULL)
2779 goto done;
2780
2781 rn = route_node_match(table, &p);
2782 if (rn == NULL)
2783 goto done;
2784
2785 re = NULL;
2786 RNODE_FOREACH_RE(rn, re) {
2787 if (CHECK_FLAG(re->flags, ZEBRA_FLAG_SELECTED))
2788 break;
2789 }
2790
2791 if (re) {
2792 /* We'll capture a 'fib' list of nexthops that meet our
2793 * criteria: installed, and labelled.
2794 */
2795 nhg = rib_get_fib_nhg(re);
2796 last_nh = NULL;
2797
2798 if (nhg && nhg->nexthop) {
2799 for (ALL_NEXTHOPS_PTR(nhg, nh)) {
2800 if (!CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE)
2801 || CHECK_FLAG(nh->flags,
2802 NEXTHOP_FLAG_RECURSIVE)
2803 || nh->nh_label == NULL)
2804 continue;
2805
2806 newnh = nexthop_dup(nh, NULL);
2807
2808 if (last_nh)
2809 NEXTHOP_APPEND(last_nh, newnh);
2810 else
2811 ctx->u.pw.fib_nhg.nexthop = newnh;
2812 last_nh = newnh;
2813 }
2814 }
2815
2816 /* Include any installed backup nexthops also. */
2817 nhg = rib_get_fib_backup_nhg(re);
2818 if (nhg && nhg->nexthop) {
2819 for (ALL_NEXTHOPS_PTR(nhg, nh)) {
2820 if (!CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE)
2821 || CHECK_FLAG(nh->flags,
2822 NEXTHOP_FLAG_RECURSIVE)
2823 || nh->nh_label == NULL)
2824 continue;
2825
2826 newnh = nexthop_dup(nh, NULL);
2827
2828 if (last_nh)
2829 NEXTHOP_APPEND(last_nh, newnh);
2830 else
2831 ctx->u.pw.fib_nhg.nexthop = newnh;
2832 last_nh = newnh;
2833 }
2834 }
2835
2836 /* Copy primary nexthops; recursive info is included too */
2837 assert(re->nhe != NULL); /* SA warning */
2838 copy_nexthops(&(ctx->u.pw.primary_nhg.nexthop),
2839 re->nhe->nhg.nexthop, NULL);
2840 ctx->u.pw.nhg_id = re->nhe->id;
2841
2842 /* Copy backup nexthop info, if present */
2843 if (re->nhe->backup_info && re->nhe->backup_info->nhe) {
2844 copy_nexthops(&(ctx->u.pw.backup_nhg.nexthop),
2845 re->nhe->backup_info->nhe->nhg.nexthop,
2846 NULL);
2847 }
2848 }
2849 route_unlock_node(rn);
2850
2851 ret = AOK;
2852
2853 done:
2854 return ret;
2855 }
2856
2857 /**
2858 * dplane_ctx_rule_init_single() - Initialize a dataplane representation of a
2859 * PBR rule.
2860 *
2861 * @dplane_rule: Dataplane internal representation of a rule
2862 * @rule: PBR rule
2863 */
2864 static void dplane_ctx_rule_init_single(struct dplane_ctx_rule *dplane_rule,
2865 struct zebra_pbr_rule *rule)
2866 {
2867 dplane_rule->priority = rule->rule.priority;
2868 dplane_rule->table = rule->rule.action.table;
2869
2870 dplane_rule->filter_bm = rule->rule.filter.filter_bm;
2871 dplane_rule->fwmark = rule->rule.filter.fwmark;
2872 dplane_rule->dsfield = rule->rule.filter.dsfield;
2873 dplane_rule->ip_proto = rule->rule.filter.ip_proto;
2874 prefix_copy(&(dplane_rule->dst_ip), &rule->rule.filter.dst_ip);
2875 prefix_copy(&(dplane_rule->src_ip), &rule->rule.filter.src_ip);
2876
2877 dplane_rule->action_pcp = rule->rule.action.pcp;
2878 dplane_rule->action_vlan_flags = rule->rule.action.vlan_flags;
2879 dplane_rule->action_vlan_id = rule->rule.action.vlan_id;
2880 dplane_rule->action_queue_id = rule->rule.action.queue_id;
2881
2882 strlcpy(dplane_rule->ifname, rule->ifname, INTERFACE_NAMSIZ);
2883 }
2884
2885 /**
2886 * dplane_ctx_rule_init() - Initialize a context block for a PBR rule update.
2887 *
2888 * @ctx: Dataplane context to init
2889 * @op: Operation being performed
2890 * @new_rule: PBR rule
2891 *
2892 * Return: Result status
2893 */
2894 static int dplane_ctx_rule_init(struct zebra_dplane_ctx *ctx,
2895 enum dplane_op_e op,
2896 struct zebra_pbr_rule *new_rule,
2897 struct zebra_pbr_rule *old_rule)
2898 {
2899 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2900 zlog_debug(
2901 "init dplane ctx %s: IF %s Prio %u Fwmark %u Src %pFX Dst %pFX Table %u",
2902 dplane_op2str(op), new_rule->ifname,
2903 new_rule->rule.priority, new_rule->rule.filter.fwmark,
2904 &new_rule->rule.filter.src_ip,
2905 &new_rule->rule.filter.dst_ip,
2906 new_rule->rule.action.table);
2907
2908 ctx->zd_op = op;
2909 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2910
2911 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
2912 op == DPLANE_OP_RULE_UPDATE);
2913
2914 ctx->zd_vrf_id = new_rule->vrf_id;
2915 strlcpy(ctx->zd_ifname, new_rule->ifname, sizeof(ctx->zd_ifname));
2916
2917 ctx->u.rule.sock = new_rule->sock;
2918 ctx->u.rule.unique = new_rule->rule.unique;
2919 ctx->u.rule.seq = new_rule->rule.seq;
2920
2921 dplane_ctx_rule_init_single(&ctx->u.rule.new, new_rule);
2922 if (op == DPLANE_OP_RULE_UPDATE)
2923 dplane_ctx_rule_init_single(&ctx->u.rule.old, old_rule);
2924
2925 return AOK;
2926 }
2927
2928 /**
2929 * dplane_ctx_iptable_init() - Initialize a context block for a PBR iptable
2930 * update.
2931 *
2932 * @ctx: Dataplane context to init
2933 * @op: Operation being performed
2934 * @new_rule: PBR iptable
2935 *
2936 * Return: Result status
2937 */
2938 static int dplane_ctx_iptable_init(struct zebra_dplane_ctx *ctx,
2939 enum dplane_op_e op,
2940 struct zebra_pbr_iptable *iptable)
2941 {
2942 char *ifname;
2943 struct listnode *node;
2944
2945 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2946 zlog_debug(
2947 "init dplane ctx %s: Unique %u Fwmark %u Family %s Action %s",
2948 dplane_op2str(op), iptable->unique, iptable->fwmark,
2949 family2str(iptable->family),
2950 iptable->action == ZEBRA_IPTABLES_DROP ? "Drop"
2951 : "Forward");
2952 }
2953
2954 ctx->zd_op = op;
2955 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2956
2957 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
2958
2959 ctx->zd_vrf_id = iptable->vrf_id;
2960 memcpy(&ctx->u.iptable, iptable, sizeof(struct zebra_pbr_iptable));
2961 ctx->u.iptable.interface_name_list = NULL;
2962 if (iptable->nb_interface > 0) {
2963 ctx->u.iptable.interface_name_list = list_new();
2964 for (ALL_LIST_ELEMENTS_RO(iptable->interface_name_list, node,
2965 ifname)) {
2966 listnode_add(ctx->u.iptable.interface_name_list,
2967 XSTRDUP(MTYPE_DP_NETFILTER, ifname));
2968 }
2969 }
2970 return AOK;
2971 }
2972
2973 /**
2974 * dplane_ctx_ipset_init() - Initialize a context block for a PBR ipset update.
2975 *
2976 * @ctx: Dataplane context to init
2977 * @op: Operation being performed
2978 * @new_rule: PBR ipset
2979 *
2980 * Return: Result status
2981 */
2982 static int dplane_ctx_ipset_init(struct zebra_dplane_ctx *ctx,
2983 enum dplane_op_e op,
2984 struct zebra_pbr_ipset *ipset)
2985 {
2986 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
2987 zlog_debug("init dplane ctx %s: %s Unique %u Family %s Type %s",
2988 dplane_op2str(op), ipset->ipset_name, ipset->unique,
2989 family2str(ipset->family),
2990 zebra_pbr_ipset_type2str(ipset->type));
2991 }
2992
2993 ctx->zd_op = op;
2994 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2995
2996 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
2997
2998 ctx->zd_vrf_id = ipset->vrf_id;
2999
3000 memcpy(&ctx->u.ipset, ipset, sizeof(struct zebra_pbr_ipset));
3001 return AOK;
3002 }
3003
3004 /**
3005 * dplane_ctx_ipset_entry_init() - Initialize a context block for a PBR ipset
3006 * update.
3007 *
3008 * @ctx: Dataplane context to init
3009 * @op: Operation being performed
3010 * @new_rule: PBR ipset
3011 *
3012 * Return: Result status
3013 */
3014 static int
3015 dplane_ctx_ipset_entry_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
3016 struct zebra_pbr_ipset_entry *ipset_entry)
3017 {
3018 struct zebra_pbr_ipset *ipset;
3019
3020 ipset = ipset_entry->backpointer;
3021 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3022 zlog_debug("init dplane ctx %s: %s Unique %u filter %u",
3023 dplane_op2str(op), ipset->ipset_name,
3024 ipset_entry->unique, ipset_entry->filter_bm);
3025 }
3026
3027 ctx->zd_op = op;
3028 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3029
3030 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
3031
3032 ctx->zd_vrf_id = ipset->vrf_id;
3033
3034 memcpy(&ctx->u.ipset_entry.entry, ipset_entry,
3035 sizeof(struct zebra_pbr_ipset_entry));
3036 ctx->u.ipset_entry.entry.backpointer = NULL;
3037 ctx->u.ipset_entry.info.type = ipset->type;
3038 ctx->u.ipset_entry.info.family = ipset->family;
3039 memcpy(&ctx->u.ipset_entry.info.ipset_name, &ipset->ipset_name,
3040 ZEBRA_IPSET_NAME_SIZE);
3041
3042 return AOK;
3043 }
3044
3045
3046 /*
3047 * Enqueue a new update,
3048 * and ensure an event is active for the dataplane pthread.
3049 */
3050 static int dplane_update_enqueue(struct zebra_dplane_ctx *ctx)
3051 {
3052 int ret = EINVAL;
3053 uint32_t high, curr;
3054
3055 /* Enqueue for processing by the dataplane pthread */
3056 DPLANE_LOCK();
3057 {
3058 TAILQ_INSERT_TAIL(&zdplane_info.dg_update_ctx_q, ctx,
3059 zd_q_entries);
3060 }
3061 DPLANE_UNLOCK();
3062
3063 curr = atomic_fetch_add_explicit(
3064 &(zdplane_info.dg_routes_queued),
3065 1, memory_order_seq_cst);
3066
3067 curr++; /* We got the pre-incremented value */
3068
3069 /* Maybe update high-water counter also */
3070 high = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
3071 memory_order_seq_cst);
3072 while (high < curr) {
3073 if (atomic_compare_exchange_weak_explicit(
3074 &zdplane_info.dg_routes_queued_max,
3075 &high, curr,
3076 memory_order_seq_cst,
3077 memory_order_seq_cst))
3078 break;
3079 }
3080
3081 /* Ensure that an event for the dataplane thread is active */
3082 ret = dplane_provider_work_ready();
3083
3084 return ret;
3085 }
3086
3087 /*
3088 * Utility that prepares a route update and enqueues it for processing
3089 */
3090 static enum zebra_dplane_result
3091 dplane_route_update_internal(struct route_node *rn,
3092 struct route_entry *re,
3093 struct route_entry *old_re,
3094 enum dplane_op_e op)
3095 {
3096 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3097 int ret = EINVAL;
3098 struct zebra_dplane_ctx *ctx = NULL;
3099
3100 /* Obtain context block */
3101 ctx = dplane_ctx_alloc();
3102
3103 /* Init context with info from zebra data structs */
3104 ret = dplane_ctx_route_init(ctx, op, rn, re);
3105 if (ret == AOK) {
3106 /* Capture some extra info for update case
3107 * where there's a different 'old' route.
3108 */
3109 if ((op == DPLANE_OP_ROUTE_UPDATE) &&
3110 old_re && (old_re != re)) {
3111
3112 old_re->dplane_sequence =
3113 zebra_router_get_next_sequence();
3114 ctx->zd_old_seq = old_re->dplane_sequence;
3115
3116 ctx->u.rinfo.zd_old_tag = old_re->tag;
3117 ctx->u.rinfo.zd_old_type = old_re->type;
3118 ctx->u.rinfo.zd_old_instance = old_re->instance;
3119 ctx->u.rinfo.zd_old_distance = old_re->distance;
3120 ctx->u.rinfo.zd_old_metric = old_re->metric;
3121 ctx->u.rinfo.nhe.old_id = old_re->nhe->id;
3122
3123 #ifndef HAVE_NETLINK
3124 /* For bsd, capture previous re's nexthops too, sigh.
3125 * We'll need these to do per-nexthop deletes.
3126 */
3127 copy_nexthops(&(ctx->u.rinfo.zd_old_ng.nexthop),
3128 old_re->nhe->nhg.nexthop, NULL);
3129
3130 if (zebra_nhg_get_backup_nhg(old_re->nhe) != NULL) {
3131 struct nexthop_group *nhg;
3132 struct nexthop **nh;
3133
3134 nhg = zebra_nhg_get_backup_nhg(old_re->nhe);
3135 nh = &(ctx->u.rinfo.old_backup_ng.nexthop);
3136
3137 if (nhg->nexthop)
3138 copy_nexthops(nh, nhg->nexthop, NULL);
3139 }
3140 #endif /* !HAVE_NETLINK */
3141 }
3142
3143 /*
3144 * If the old and new context type, and nexthop group id
3145 * are the same there is no need to send down a route replace
3146 * as that we know we have sent a nexthop group replace
3147 * or an upper level protocol has sent us the exact
3148 * same route again.
3149 */
3150 if ((dplane_ctx_get_type(ctx) == dplane_ctx_get_old_type(ctx))
3151 && (dplane_ctx_get_nhe_id(ctx)
3152 == dplane_ctx_get_old_nhe_id(ctx))
3153 && (dplane_ctx_get_nhe_id(ctx) >= ZEBRA_NHG_PROTO_LOWER)) {
3154 struct nexthop *nexthop;
3155
3156 if (IS_ZEBRA_DEBUG_DPLANE)
3157 zlog_debug(
3158 "%s: Ignoring Route exactly the same",
3159 __func__);
3160
3161 for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx),
3162 nexthop)) {
3163 if (CHECK_FLAG(nexthop->flags,
3164 NEXTHOP_FLAG_RECURSIVE))
3165 continue;
3166
3167 if (CHECK_FLAG(nexthop->flags,
3168 NEXTHOP_FLAG_ACTIVE))
3169 SET_FLAG(nexthop->flags,
3170 NEXTHOP_FLAG_FIB);
3171 }
3172
3173 dplane_ctx_free(&ctx);
3174 return ZEBRA_DPLANE_REQUEST_SUCCESS;
3175 }
3176
3177 /* Enqueue context for processing */
3178 ret = dplane_update_enqueue(ctx);
3179 }
3180
3181 /* Update counter */
3182 atomic_fetch_add_explicit(&zdplane_info.dg_routes_in, 1,
3183 memory_order_relaxed);
3184
3185 if (ret == AOK)
3186 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3187 else {
3188 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors, 1,
3189 memory_order_relaxed);
3190 if (ctx)
3191 dplane_ctx_free(&ctx);
3192 }
3193
3194 return result;
3195 }
3196
3197 /**
3198 * dplane_nexthop_update_internal() - Helper for enqueuing nexthop changes
3199 *
3200 * @nhe: Nexthop group hash entry where the change occured
3201 * @op: The operation to be enqued
3202 *
3203 * Return: Result of the change
3204 */
3205 static enum zebra_dplane_result
3206 dplane_nexthop_update_internal(struct nhg_hash_entry *nhe, enum dplane_op_e op)
3207 {
3208 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3209 int ret = EINVAL;
3210 struct zebra_dplane_ctx *ctx = NULL;
3211
3212 /* Obtain context block */
3213 ctx = dplane_ctx_alloc();
3214 if (!ctx) {
3215 ret = ENOMEM;
3216 goto done;
3217 }
3218
3219 ret = dplane_ctx_nexthop_init(ctx, op, nhe);
3220 if (ret == AOK)
3221 ret = dplane_update_enqueue(ctx);
3222
3223 done:
3224 /* Update counter */
3225 atomic_fetch_add_explicit(&zdplane_info.dg_nexthops_in, 1,
3226 memory_order_relaxed);
3227
3228 if (ret == AOK)
3229 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3230 else {
3231 atomic_fetch_add_explicit(&zdplane_info.dg_nexthop_errors, 1,
3232 memory_order_relaxed);
3233 if (ctx)
3234 dplane_ctx_free(&ctx);
3235 }
3236
3237 return result;
3238 }
3239
3240 /*
3241 * Enqueue a route 'add' for the dataplane.
3242 */
3243 enum zebra_dplane_result dplane_route_add(struct route_node *rn,
3244 struct route_entry *re)
3245 {
3246 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3247
3248 if (rn == NULL || re == NULL)
3249 goto done;
3250
3251 ret = dplane_route_update_internal(rn, re, NULL,
3252 DPLANE_OP_ROUTE_INSTALL);
3253
3254 done:
3255 return ret;
3256 }
3257
3258 /*
3259 * Enqueue a route update for the dataplane.
3260 */
3261 enum zebra_dplane_result dplane_route_update(struct route_node *rn,
3262 struct route_entry *re,
3263 struct route_entry *old_re)
3264 {
3265 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3266
3267 if (rn == NULL || re == NULL)
3268 goto done;
3269
3270 ret = dplane_route_update_internal(rn, re, old_re,
3271 DPLANE_OP_ROUTE_UPDATE);
3272 done:
3273 return ret;
3274 }
3275
3276 /*
3277 * Enqueue a route removal for the dataplane.
3278 */
3279 enum zebra_dplane_result dplane_route_delete(struct route_node *rn,
3280 struct route_entry *re)
3281 {
3282 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3283
3284 if (rn == NULL || re == NULL)
3285 goto done;
3286
3287 ret = dplane_route_update_internal(rn, re, NULL,
3288 DPLANE_OP_ROUTE_DELETE);
3289
3290 done:
3291 return ret;
3292 }
3293
3294 /*
3295 * Notify the dplane when system/connected routes change.
3296 */
3297 enum zebra_dplane_result dplane_sys_route_add(struct route_node *rn,
3298 struct route_entry *re)
3299 {
3300 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3301
3302 /* Ignore this event unless a provider plugin has requested it. */
3303 if (!zdplane_info.dg_sys_route_notifs) {
3304 ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
3305 goto done;
3306 }
3307
3308 if (rn == NULL || re == NULL)
3309 goto done;
3310
3311 ret = dplane_route_update_internal(rn, re, NULL,
3312 DPLANE_OP_SYS_ROUTE_ADD);
3313
3314 done:
3315 return ret;
3316 }
3317
3318 /*
3319 * Notify the dplane when system/connected routes are deleted.
3320 */
3321 enum zebra_dplane_result dplane_sys_route_del(struct route_node *rn,
3322 struct route_entry *re)
3323 {
3324 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3325
3326 /* Ignore this event unless a provider plugin has requested it. */
3327 if (!zdplane_info.dg_sys_route_notifs) {
3328 ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
3329 goto done;
3330 }
3331
3332 if (rn == NULL || re == NULL)
3333 goto done;
3334
3335 ret = dplane_route_update_internal(rn, re, NULL,
3336 DPLANE_OP_SYS_ROUTE_DELETE);
3337
3338 done:
3339 return ret;
3340 }
3341
3342 /*
3343 * Update from an async notification, to bring other fibs up-to-date.
3344 */
3345 enum zebra_dplane_result
3346 dplane_route_notif_update(struct route_node *rn,
3347 struct route_entry *re,
3348 enum dplane_op_e op,
3349 struct zebra_dplane_ctx *ctx)
3350 {
3351 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3352 int ret = EINVAL;
3353 struct zebra_dplane_ctx *new_ctx = NULL;
3354 struct nexthop *nexthop;
3355 struct nexthop_group *nhg;
3356
3357 if (rn == NULL || re == NULL)
3358 goto done;
3359
3360 new_ctx = dplane_ctx_alloc();
3361 if (new_ctx == NULL)
3362 goto done;
3363
3364 /* Init context with info from zebra data structs */
3365 dplane_ctx_route_init(new_ctx, op, rn, re);
3366
3367 /* For add/update, need to adjust the nexthops so that we match
3368 * the notification state, which may not be the route-entry/RIB
3369 * state.
3370 */
3371 if (op == DPLANE_OP_ROUTE_UPDATE ||
3372 op == DPLANE_OP_ROUTE_INSTALL) {
3373
3374 nexthops_free(new_ctx->u.rinfo.zd_ng.nexthop);
3375 new_ctx->u.rinfo.zd_ng.nexthop = NULL;
3376
3377 nhg = rib_get_fib_nhg(re);
3378 if (nhg && nhg->nexthop)
3379 copy_nexthops(&(new_ctx->u.rinfo.zd_ng.nexthop),
3380 nhg->nexthop, NULL);
3381
3382 /* Check for installed backup nexthops also */
3383 nhg = rib_get_fib_backup_nhg(re);
3384 if (nhg && nhg->nexthop) {
3385 copy_nexthops(&(new_ctx->u.rinfo.zd_ng.nexthop),
3386 nhg->nexthop, NULL);
3387 }
3388
3389 for (ALL_NEXTHOPS(new_ctx->u.rinfo.zd_ng, nexthop))
3390 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
3391
3392 }
3393
3394 /* Capture info about the source of the notification, in 'ctx' */
3395 dplane_ctx_set_notif_provider(new_ctx,
3396 dplane_ctx_get_notif_provider(ctx));
3397
3398 ret = dplane_update_enqueue(new_ctx);
3399
3400 done:
3401 if (ret == AOK)
3402 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3403 else if (new_ctx)
3404 dplane_ctx_free(&new_ctx);
3405
3406 return result;
3407 }
3408
3409 /*
3410 * Enqueue a nexthop add for the dataplane.
3411 */
3412 enum zebra_dplane_result dplane_nexthop_add(struct nhg_hash_entry *nhe)
3413 {
3414 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3415
3416 if (nhe)
3417 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_INSTALL);
3418 return ret;
3419 }
3420
3421 /*
3422 * Enqueue a nexthop update for the dataplane.
3423 *
3424 * Might not need this func since zebra's nexthop objects should be immutable?
3425 */
3426 enum zebra_dplane_result dplane_nexthop_update(struct nhg_hash_entry *nhe)
3427 {
3428 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3429
3430 if (nhe)
3431 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_UPDATE);
3432 return ret;
3433 }
3434
3435 /*
3436 * Enqueue a nexthop removal for the dataplane.
3437 */
3438 enum zebra_dplane_result dplane_nexthop_delete(struct nhg_hash_entry *nhe)
3439 {
3440 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3441
3442 if (nhe)
3443 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_DELETE);
3444
3445 return ret;
3446 }
3447
3448 /*
3449 * Enqueue LSP add for the dataplane.
3450 */
3451 enum zebra_dplane_result dplane_lsp_add(struct zebra_lsp *lsp)
3452 {
3453 enum zebra_dplane_result ret =
3454 lsp_update_internal(lsp, DPLANE_OP_LSP_INSTALL);
3455
3456 return ret;
3457 }
3458
3459 /*
3460 * Enqueue LSP update for the dataplane.
3461 */
3462 enum zebra_dplane_result dplane_lsp_update(struct zebra_lsp *lsp)
3463 {
3464 enum zebra_dplane_result ret =
3465 lsp_update_internal(lsp, DPLANE_OP_LSP_UPDATE);
3466
3467 return ret;
3468 }
3469
3470 /*
3471 * Enqueue LSP delete for the dataplane.
3472 */
3473 enum zebra_dplane_result dplane_lsp_delete(struct zebra_lsp *lsp)
3474 {
3475 enum zebra_dplane_result ret =
3476 lsp_update_internal(lsp, DPLANE_OP_LSP_DELETE);
3477
3478 return ret;
3479 }
3480
3481 /* Update or un-install resulting from an async notification */
3482 enum zebra_dplane_result
3483 dplane_lsp_notif_update(struct zebra_lsp *lsp, enum dplane_op_e op,
3484 struct zebra_dplane_ctx *notif_ctx)
3485 {
3486 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3487 int ret = EINVAL;
3488 struct zebra_dplane_ctx *ctx = NULL;
3489 struct nhlfe_list_head *head;
3490 struct zebra_nhlfe *nhlfe, *new_nhlfe;
3491
3492 /* Obtain context block */
3493 ctx = dplane_ctx_alloc();
3494 if (ctx == NULL) {
3495 ret = ENOMEM;
3496 goto done;
3497 }
3498
3499 /* Copy info from zebra LSP */
3500 ret = dplane_ctx_lsp_init(ctx, op, lsp);
3501 if (ret != AOK)
3502 goto done;
3503
3504 /* Add any installed backup nhlfes */
3505 head = &(ctx->u.lsp.backup_nhlfe_list);
3506 frr_each(nhlfe_list, head, nhlfe) {
3507
3508 if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED) &&
3509 CHECK_FLAG(nhlfe->nexthop->flags, NEXTHOP_FLAG_FIB)) {
3510 new_nhlfe = zebra_mpls_lsp_add_nh(&(ctx->u.lsp),
3511 nhlfe->type,
3512 nhlfe->nexthop);
3513
3514 /* Need to copy flags too */
3515 new_nhlfe->flags = nhlfe->flags;
3516 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
3517 }
3518 }
3519
3520 /* Capture info about the source of the notification */
3521 dplane_ctx_set_notif_provider(
3522 ctx,
3523 dplane_ctx_get_notif_provider(notif_ctx));
3524
3525 ret = dplane_update_enqueue(ctx);
3526
3527 done:
3528 /* Update counter */
3529 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
3530 memory_order_relaxed);
3531
3532 if (ret == AOK)
3533 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3534 else {
3535 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
3536 memory_order_relaxed);
3537 if (ctx)
3538 dplane_ctx_free(&ctx);
3539 }
3540 return result;
3541 }
3542
3543 /*
3544 * Enqueue pseudowire install for the dataplane.
3545 */
3546 enum zebra_dplane_result dplane_pw_install(struct zebra_pw *pw)
3547 {
3548 return pw_update_internal(pw, DPLANE_OP_PW_INSTALL);
3549 }
3550
3551 /*
3552 * Enqueue pseudowire un-install for the dataplane.
3553 */
3554 enum zebra_dplane_result dplane_pw_uninstall(struct zebra_pw *pw)
3555 {
3556 return pw_update_internal(pw, DPLANE_OP_PW_UNINSTALL);
3557 }
3558
3559 /*
3560 * Common internal LSP update utility
3561 */
3562 static enum zebra_dplane_result lsp_update_internal(struct zebra_lsp *lsp,
3563 enum dplane_op_e op)
3564 {
3565 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3566 int ret = EINVAL;
3567 struct zebra_dplane_ctx *ctx = NULL;
3568
3569 /* Obtain context block */
3570 ctx = dplane_ctx_alloc();
3571
3572 ret = dplane_ctx_lsp_init(ctx, op, lsp);
3573 if (ret != AOK)
3574 goto done;
3575
3576 ret = dplane_update_enqueue(ctx);
3577
3578 done:
3579 /* Update counter */
3580 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
3581 memory_order_relaxed);
3582
3583 if (ret == AOK)
3584 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3585 else {
3586 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
3587 memory_order_relaxed);
3588 dplane_ctx_free(&ctx);
3589 }
3590
3591 return result;
3592 }
3593
3594 /*
3595 * Internal, common handler for pseudowire updates.
3596 */
3597 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
3598 enum dplane_op_e op)
3599 {
3600 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3601 int ret;
3602 struct zebra_dplane_ctx *ctx = NULL;
3603
3604 ctx = dplane_ctx_alloc();
3605
3606 ret = dplane_ctx_pw_init(ctx, op, pw);
3607 if (ret != AOK)
3608 goto done;
3609
3610 ret = dplane_update_enqueue(ctx);
3611
3612 done:
3613 /* Update counter */
3614 atomic_fetch_add_explicit(&zdplane_info.dg_pws_in, 1,
3615 memory_order_relaxed);
3616
3617 if (ret == AOK)
3618 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3619 else {
3620 atomic_fetch_add_explicit(&zdplane_info.dg_pw_errors, 1,
3621 memory_order_relaxed);
3622 dplane_ctx_free(&ctx);
3623 }
3624
3625 return result;
3626 }
3627
3628 /*
3629 * Enqueue access br_port update.
3630 */
3631 enum zebra_dplane_result
3632 dplane_br_port_update(const struct interface *ifp, bool non_df,
3633 uint32_t sph_filter_cnt,
3634 const struct in_addr *sph_filters, uint32_t backup_nhg_id)
3635 {
3636 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3637 uint32_t flags = 0;
3638 int ret;
3639 struct zebra_dplane_ctx *ctx = NULL;
3640 struct zebra_ns *zns;
3641 enum dplane_op_e op = DPLANE_OP_BR_PORT_UPDATE;
3642
3643 if (non_df)
3644 flags |= DPLANE_BR_PORT_NON_DF;
3645
3646 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL || IS_ZEBRA_DEBUG_EVPN_MH_ES) {
3647 uint32_t i;
3648 char vtep_str[ES_VTEP_LIST_STR_SZ];
3649
3650 vtep_str[0] = '\0';
3651 for (i = 0; i < sph_filter_cnt; ++i) {
3652 snprintfrr(vtep_str + strlen(vtep_str),
3653 sizeof(vtep_str) - strlen(vtep_str), "%pI4 ",
3654 &sph_filters[i]);
3655 }
3656 zlog_debug(
3657 "init br_port ctx %s: ifp %s, flags 0x%x backup_nhg 0x%x sph %s",
3658 dplane_op2str(op), ifp->name, flags, backup_nhg_id,
3659 vtep_str);
3660 }
3661
3662 ctx = dplane_ctx_alloc();
3663
3664 ctx->zd_op = op;
3665 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3666 ctx->zd_vrf_id = ifp->vrf->vrf_id;
3667
3668 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
3669 dplane_ctx_ns_init(ctx, zns, false);
3670
3671 ctx->zd_ifindex = ifp->ifindex;
3672 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
3673
3674 /* Init the br-port-specific data area */
3675 memset(&ctx->u.br_port, 0, sizeof(ctx->u.br_port));
3676
3677 ctx->u.br_port.flags = flags;
3678 ctx->u.br_port.backup_nhg_id = backup_nhg_id;
3679 ctx->u.br_port.sph_filter_cnt = sph_filter_cnt;
3680 memcpy(ctx->u.br_port.sph_filters, sph_filters,
3681 sizeof(ctx->u.br_port.sph_filters[0]) * sph_filter_cnt);
3682
3683 /* Enqueue for processing on the dplane pthread */
3684 ret = dplane_update_enqueue(ctx);
3685
3686 /* Increment counter */
3687 atomic_fetch_add_explicit(&zdplane_info.dg_br_port_in, 1,
3688 memory_order_relaxed);
3689
3690 if (ret == AOK) {
3691 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3692 } else {
3693 /* Error counter */
3694 atomic_fetch_add_explicit(&zdplane_info.dg_br_port_errors, 1,
3695 memory_order_relaxed);
3696 dplane_ctx_free(&ctx);
3697 }
3698
3699 return result;
3700 }
3701
3702 /*
3703 * Enqueue interface address add for the dataplane.
3704 */
3705 enum zebra_dplane_result dplane_intf_addr_set(const struct interface *ifp,
3706 const struct connected *ifc)
3707 {
3708 #if !defined(HAVE_NETLINK) && defined(HAVE_STRUCT_IFALIASREQ)
3709 /* Extra checks for this OS path. */
3710
3711 /* Don't configure PtP addresses on broadcast ifs or reverse */
3712 if (!(ifp->flags & IFF_POINTOPOINT) != !CONNECTED_PEER(ifc)) {
3713 if (IS_ZEBRA_DEBUG_KERNEL || IS_ZEBRA_DEBUG_DPLANE)
3714 zlog_debug("Failed to set intf addr: mismatch p2p and connected");
3715
3716 return ZEBRA_DPLANE_REQUEST_FAILURE;
3717 }
3718 #endif
3719
3720 return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_INSTALL);
3721 }
3722
3723 /*
3724 * Enqueue interface address remove/uninstall for the dataplane.
3725 */
3726 enum zebra_dplane_result dplane_intf_addr_unset(const struct interface *ifp,
3727 const struct connected *ifc)
3728 {
3729 return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_UNINSTALL);
3730 }
3731
3732 static enum zebra_dplane_result intf_addr_update_internal(
3733 const struct interface *ifp, const struct connected *ifc,
3734 enum dplane_op_e op)
3735 {
3736 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3737 int ret = EINVAL;
3738 struct zebra_dplane_ctx *ctx = NULL;
3739 struct zebra_ns *zns;
3740
3741 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3742 zlog_debug("init intf ctx %s: idx %d, addr %u:%pFX",
3743 dplane_op2str(op), ifp->ifindex, ifp->vrf->vrf_id,
3744 ifc->address);
3745
3746 ctx = dplane_ctx_alloc();
3747
3748 ctx->zd_op = op;
3749 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3750 ctx->zd_vrf_id = ifp->vrf->vrf_id;
3751
3752 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
3753 dplane_ctx_ns_init(ctx, zns, false);
3754
3755 /* Init the interface-addr-specific area */
3756 memset(&ctx->u.intf, 0, sizeof(ctx->u.intf));
3757
3758 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
3759 ctx->zd_ifindex = ifp->ifindex;
3760 ctx->u.intf.prefix = *(ifc->address);
3761
3762 if (if_is_broadcast(ifp))
3763 ctx->u.intf.flags |= DPLANE_INTF_BROADCAST;
3764
3765 if (CONNECTED_PEER(ifc)) {
3766 ctx->u.intf.dest_prefix = *(ifc->destination);
3767 ctx->u.intf.flags |=
3768 (DPLANE_INTF_CONNECTED | DPLANE_INTF_HAS_DEST);
3769 }
3770
3771 if (CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY))
3772 ctx->u.intf.flags |= DPLANE_INTF_SECONDARY;
3773
3774 if (ifc->label) {
3775 size_t len;
3776
3777 ctx->u.intf.flags |= DPLANE_INTF_HAS_LABEL;
3778
3779 /* Use embedded buffer if it's adequate; else allocate. */
3780 len = strlen(ifc->label);
3781
3782 if (len < sizeof(ctx->u.intf.label_buf)) {
3783 strlcpy(ctx->u.intf.label_buf, ifc->label,
3784 sizeof(ctx->u.intf.label_buf));
3785 ctx->u.intf.label = ctx->u.intf.label_buf;
3786 } else {
3787 ctx->u.intf.label = XSTRDUP(MTYPE_DP_CTX, ifc->label);
3788 }
3789 }
3790
3791 ret = dplane_update_enqueue(ctx);
3792
3793 /* Increment counter */
3794 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addrs_in, 1,
3795 memory_order_relaxed);
3796
3797 if (ret == AOK)
3798 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3799 else {
3800 /* Error counter */
3801 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addr_errors,
3802 1, memory_order_relaxed);
3803 dplane_ctx_free(&ctx);
3804 }
3805
3806 return result;
3807 }
3808
3809 /*
3810 * Enqueue vxlan/evpn mac add (or update).
3811 */
3812 enum zebra_dplane_result dplane_rem_mac_add(const struct interface *ifp,
3813 const struct interface *bridge_ifp,
3814 vlanid_t vid,
3815 const struct ethaddr *mac,
3816 struct in_addr vtep_ip,
3817 bool sticky,
3818 uint32_t nhg_id,
3819 bool was_static)
3820 {
3821 enum zebra_dplane_result result;
3822 uint32_t update_flags = 0;
3823
3824 update_flags |= DPLANE_MAC_REMOTE;
3825 if (was_static)
3826 update_flags |= DPLANE_MAC_WAS_STATIC;
3827
3828 /* Use common helper api */
3829 result = mac_update_common(DPLANE_OP_MAC_INSTALL, ifp, bridge_ifp,
3830 vid, mac, vtep_ip, sticky, nhg_id, update_flags);
3831 return result;
3832 }
3833
3834 /*
3835 * Enqueue vxlan/evpn mac delete.
3836 */
3837 enum zebra_dplane_result dplane_rem_mac_del(const struct interface *ifp,
3838 const struct interface *bridge_ifp,
3839 vlanid_t vid,
3840 const struct ethaddr *mac,
3841 struct in_addr vtep_ip)
3842 {
3843 enum zebra_dplane_result result;
3844 uint32_t update_flags = 0;
3845
3846 update_flags |= DPLANE_MAC_REMOTE;
3847
3848 /* Use common helper api */
3849 result = mac_update_common(DPLANE_OP_MAC_DELETE, ifp, bridge_ifp,
3850 vid, mac, vtep_ip, false, 0, update_flags);
3851 return result;
3852 }
3853
3854 /*
3855 * API to configure link local with either MAC address or IP information
3856 */
3857 enum zebra_dplane_result dplane_neigh_ip_update(enum dplane_op_e op,
3858 const struct interface *ifp,
3859 struct ipaddr *link_ip,
3860 struct ipaddr *ip,
3861 uint32_t ndm_state, int protocol)
3862 {
3863 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3864 uint16_t state = 0;
3865 uint32_t update_flags;
3866
3867 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3868 zlog_debug("%s: init link ctx %s: ifp %s, link_ip %pIA ip %pIA",
3869 __func__, dplane_op2str(op), ifp->name, link_ip, ip);
3870
3871 if (ndm_state == ZEBRA_NEIGH_STATE_REACHABLE)
3872 state = DPLANE_NUD_REACHABLE;
3873 else if (ndm_state == ZEBRA_NEIGH_STATE_FAILED)
3874 state = DPLANE_NUD_FAILED;
3875
3876 update_flags = DPLANE_NEIGH_NO_EXTENSION;
3877
3878 result = neigh_update_internal(op, ifp, (const void *)link_ip,
3879 ipaddr_family(link_ip), ip, 0, state,
3880 update_flags, protocol);
3881
3882 return result;
3883 }
3884
3885 /*
3886 * Enqueue local mac add (or update).
3887 */
3888 enum zebra_dplane_result dplane_local_mac_add(const struct interface *ifp,
3889 const struct interface *bridge_ifp,
3890 vlanid_t vid,
3891 const struct ethaddr *mac,
3892 bool sticky,
3893 uint32_t set_static,
3894 uint32_t set_inactive)
3895 {
3896 enum zebra_dplane_result result;
3897 uint32_t update_flags = 0;
3898 struct in_addr vtep_ip;
3899
3900 if (set_static)
3901 update_flags |= DPLANE_MAC_SET_STATIC;
3902
3903 if (set_inactive)
3904 update_flags |= DPLANE_MAC_SET_INACTIVE;
3905
3906 vtep_ip.s_addr = 0;
3907
3908 /* Use common helper api */
3909 result = mac_update_common(DPLANE_OP_MAC_INSTALL, ifp, bridge_ifp,
3910 vid, mac, vtep_ip, sticky, 0,
3911 update_flags);
3912 return result;
3913 }
3914
3915 /*
3916 * Enqueue local mac del
3917 */
3918 enum zebra_dplane_result
3919 dplane_local_mac_del(const struct interface *ifp,
3920 const struct interface *bridge_ifp, vlanid_t vid,
3921 const struct ethaddr *mac)
3922 {
3923 enum zebra_dplane_result result;
3924 struct in_addr vtep_ip;
3925
3926 vtep_ip.s_addr = 0;
3927
3928 /* Use common helper api */
3929 result = mac_update_common(DPLANE_OP_MAC_DELETE, ifp, bridge_ifp, vid,
3930 mac, vtep_ip, false, 0, 0);
3931 return result;
3932 }
3933 /*
3934 * Public api to init an empty context - either newly-allocated or
3935 * reset/cleared - for a MAC update.
3936 */
3937 void dplane_mac_init(struct zebra_dplane_ctx *ctx,
3938 const struct interface *ifp,
3939 const struct interface *br_ifp,
3940 vlanid_t vid,
3941 const struct ethaddr *mac,
3942 struct in_addr vtep_ip,
3943 bool sticky,
3944 uint32_t nhg_id,
3945 uint32_t update_flags)
3946 {
3947 struct zebra_ns *zns;
3948
3949 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3950 ctx->zd_vrf_id = ifp->vrf->vrf_id;
3951
3952 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
3953 dplane_ctx_ns_init(ctx, zns, false);
3954
3955 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
3956 ctx->zd_ifindex = ifp->ifindex;
3957
3958 /* Init the mac-specific data area */
3959 memset(&ctx->u.macinfo, 0, sizeof(ctx->u.macinfo));
3960
3961 ctx->u.macinfo.br_ifindex = br_ifp->ifindex;
3962 ctx->u.macinfo.vtep_ip = vtep_ip;
3963 ctx->u.macinfo.mac = *mac;
3964 ctx->u.macinfo.vid = vid;
3965 ctx->u.macinfo.is_sticky = sticky;
3966 ctx->u.macinfo.nhg_id = nhg_id;
3967 ctx->u.macinfo.update_flags = update_flags;
3968 }
3969
3970 /*
3971 * Common helper api for MAC address/vxlan updates
3972 */
3973 static enum zebra_dplane_result
3974 mac_update_common(enum dplane_op_e op,
3975 const struct interface *ifp,
3976 const struct interface *br_ifp,
3977 vlanid_t vid,
3978 const struct ethaddr *mac,
3979 struct in_addr vtep_ip,
3980 bool sticky,
3981 uint32_t nhg_id,
3982 uint32_t update_flags)
3983 {
3984 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3985 int ret;
3986 struct zebra_dplane_ctx *ctx = NULL;
3987
3988 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3989 zlog_debug("init mac ctx %s: mac %pEA, ifp %s, vtep %pI4",
3990 dplane_op2str(op), mac, ifp->name, &vtep_ip);
3991
3992 ctx = dplane_ctx_alloc();
3993 ctx->zd_op = op;
3994
3995 /* Common init for the ctx */
3996 dplane_mac_init(ctx, ifp, br_ifp, vid, mac, vtep_ip, sticky,
3997 nhg_id, update_flags);
3998
3999 /* Enqueue for processing on the dplane pthread */
4000 ret = dplane_update_enqueue(ctx);
4001
4002 /* Increment counter */
4003 atomic_fetch_add_explicit(&zdplane_info.dg_macs_in, 1,
4004 memory_order_relaxed);
4005
4006 if (ret == AOK)
4007 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4008 else {
4009 /* Error counter */
4010 atomic_fetch_add_explicit(&zdplane_info.dg_mac_errors, 1,
4011 memory_order_relaxed);
4012 dplane_ctx_free(&ctx);
4013 }
4014
4015 return result;
4016 }
4017
4018 /*
4019 * Enqueue evpn neighbor add for the dataplane.
4020 */
4021 enum zebra_dplane_result dplane_rem_neigh_add(const struct interface *ifp,
4022 const struct ipaddr *ip,
4023 const struct ethaddr *mac,
4024 uint32_t flags, bool was_static)
4025 {
4026 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4027 uint32_t update_flags = 0;
4028
4029 update_flags |= DPLANE_NEIGH_REMOTE;
4030
4031 if (was_static)
4032 update_flags |= DPLANE_NEIGH_WAS_STATIC;
4033
4034 result = neigh_update_internal(
4035 DPLANE_OP_NEIGH_INSTALL, ifp, (const void *)mac, AF_ETHERNET,
4036 ip, flags, DPLANE_NUD_NOARP, update_flags, 0);
4037
4038 return result;
4039 }
4040
4041 /*
4042 * Enqueue local neighbor add for the dataplane.
4043 */
4044 enum zebra_dplane_result dplane_local_neigh_add(const struct interface *ifp,
4045 const struct ipaddr *ip,
4046 const struct ethaddr *mac,
4047 bool set_router, bool set_static,
4048 bool set_inactive)
4049 {
4050 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4051 uint32_t update_flags = 0;
4052 uint32_t ntf = 0;
4053 uint16_t state;
4054
4055 if (set_static)
4056 update_flags |= DPLANE_NEIGH_SET_STATIC;
4057
4058 if (set_inactive) {
4059 update_flags |= DPLANE_NEIGH_SET_INACTIVE;
4060 state = DPLANE_NUD_STALE;
4061 } else {
4062 state = DPLANE_NUD_REACHABLE;
4063 }
4064
4065 if (set_router)
4066 ntf |= DPLANE_NTF_ROUTER;
4067
4068 result = neigh_update_internal(DPLANE_OP_NEIGH_INSTALL, ifp,
4069 (const void *)mac, AF_ETHERNET, ip, ntf,
4070 state, update_flags, 0);
4071
4072 return result;
4073 }
4074
4075 /*
4076 * Enqueue evpn neighbor delete for the dataplane.
4077 */
4078 enum zebra_dplane_result dplane_rem_neigh_delete(const struct interface *ifp,
4079 const struct ipaddr *ip)
4080 {
4081 enum zebra_dplane_result result;
4082 uint32_t update_flags = 0;
4083
4084 update_flags |= DPLANE_NEIGH_REMOTE;
4085
4086 result = neigh_update_internal(DPLANE_OP_NEIGH_DELETE, ifp, NULL,
4087 AF_ETHERNET, ip, 0, 0, update_flags, 0);
4088
4089 return result;
4090 }
4091
4092 /*
4093 * Enqueue evpn VTEP add for the dataplane.
4094 */
4095 enum zebra_dplane_result dplane_vtep_add(const struct interface *ifp,
4096 const struct in_addr *ip,
4097 vni_t vni)
4098 {
4099 enum zebra_dplane_result result;
4100 struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
4101 struct ipaddr addr;
4102
4103 if (IS_ZEBRA_DEBUG_VXLAN)
4104 zlog_debug("Install %pI4 into flood list for VNI %u intf %s(%u)",
4105 ip, vni, ifp->name, ifp->ifindex);
4106
4107 SET_IPADDR_V4(&addr);
4108 addr.ipaddr_v4 = *ip;
4109
4110 result = neigh_update_internal(DPLANE_OP_VTEP_ADD, ifp, &mac,
4111 AF_ETHERNET, &addr, 0, 0, 0, 0);
4112
4113 return result;
4114 }
4115
4116 /*
4117 * Enqueue evpn VTEP add for the dataplane.
4118 */
4119 enum zebra_dplane_result dplane_vtep_delete(const struct interface *ifp,
4120 const struct in_addr *ip,
4121 vni_t vni)
4122 {
4123 enum zebra_dplane_result result;
4124 struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
4125 struct ipaddr addr;
4126
4127 if (IS_ZEBRA_DEBUG_VXLAN)
4128 zlog_debug(
4129 "Uninstall %pI4 from flood list for VNI %u intf %s(%u)",
4130 ip, vni, ifp->name, ifp->ifindex);
4131
4132 SET_IPADDR_V4(&addr);
4133 addr.ipaddr_v4 = *ip;
4134
4135 result = neigh_update_internal(DPLANE_OP_VTEP_DELETE, ifp,
4136 (const void *)&mac, AF_ETHERNET, &addr,
4137 0, 0, 0, 0);
4138
4139 return result;
4140 }
4141
4142 enum zebra_dplane_result dplane_neigh_discover(const struct interface *ifp,
4143 const struct ipaddr *ip)
4144 {
4145 enum zebra_dplane_result result;
4146
4147 result = neigh_update_internal(DPLANE_OP_NEIGH_DISCOVER, ifp, NULL,
4148 AF_ETHERNET, ip, DPLANE_NTF_USE,
4149 DPLANE_NUD_INCOMPLETE, 0, 0);
4150
4151 return result;
4152 }
4153
4154 enum zebra_dplane_result dplane_neigh_table_update(const struct interface *ifp,
4155 const uint8_t family,
4156 const uint32_t app_probes,
4157 const uint32_t ucast_probes,
4158 const uint32_t mcast_probes)
4159 {
4160 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4161 int ret;
4162 struct zebra_dplane_ctx *ctx = NULL;
4163 struct zebra_ns *zns;
4164 enum dplane_op_e op = DPLANE_OP_NEIGH_TABLE_UPDATE;
4165
4166 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
4167 zlog_debug("set neigh ctx %s: ifp %s, family %s",
4168 dplane_op2str(op), ifp->name, family2str(family));
4169 }
4170
4171 ctx = dplane_ctx_alloc();
4172
4173 ctx->zd_op = op;
4174 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4175 ctx->zd_vrf_id = ifp->vrf->vrf_id;
4176
4177 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
4178 dplane_ctx_ns_init(ctx, zns, false);
4179
4180 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
4181 ctx->zd_ifindex = ifp->ifindex;
4182
4183 /* Init the neighbor-specific data area */
4184 memset(&ctx->u.neightable, 0, sizeof(ctx->u.neightable));
4185
4186 ctx->u.neightable.family = family;
4187 ctx->u.neightable.app_probes = app_probes;
4188 ctx->u.neightable.ucast_probes = ucast_probes;
4189 ctx->u.neightable.mcast_probes = mcast_probes;
4190
4191 /* Enqueue for processing on the dplane pthread */
4192 ret = dplane_update_enqueue(ctx);
4193
4194 /* Increment counter */
4195 atomic_fetch_add_explicit(&zdplane_info.dg_neightable_in, 1,
4196 memory_order_relaxed);
4197
4198 if (ret == AOK)
4199 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4200 else {
4201 /* Error counter */
4202 atomic_fetch_add_explicit(&zdplane_info.dg_neightable_errors, 1,
4203 memory_order_relaxed);
4204 dplane_ctx_free(&ctx);
4205 }
4206
4207 return result;
4208 }
4209
4210 /*
4211 * Common helper api for neighbor updates
4212 */
4213 static enum zebra_dplane_result
4214 neigh_update_internal(enum dplane_op_e op, const struct interface *ifp,
4215 const void *link, const int link_family,
4216 const struct ipaddr *ip, uint32_t flags, uint16_t state,
4217 uint32_t update_flags, int protocol)
4218 {
4219 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4220 int ret;
4221 struct zebra_dplane_ctx *ctx = NULL;
4222 struct zebra_ns *zns;
4223 const struct ethaddr *mac = NULL;
4224 const struct ipaddr *link_ip = NULL;
4225
4226 if (link_family == AF_ETHERNET)
4227 mac = (const struct ethaddr *)link;
4228 else
4229 link_ip = (const struct ipaddr *)link;
4230
4231 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
4232 char buf1[PREFIX_STRLEN];
4233
4234 buf1[0] = '\0';
4235 if (link_family == AF_ETHERNET)
4236 prefix_mac2str(mac, buf1, sizeof(buf1));
4237 else
4238 ipaddr2str(link_ip, buf1, sizeof(buf1));
4239 zlog_debug("init neigh ctx %s: ifp %s, %s %s, ip %pIA",
4240 dplane_op2str(op), ifp->name,
4241 link_family == AF_ETHERNET ? "mac " : "link ",
4242 buf1, ip);
4243 }
4244
4245 ctx = dplane_ctx_alloc();
4246
4247 ctx->zd_op = op;
4248 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4249 ctx->zd_vrf_id = ifp->vrf->vrf_id;
4250 dplane_ctx_set_type(ctx, protocol);
4251
4252 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
4253 dplane_ctx_ns_init(ctx, zns, false);
4254
4255 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
4256 ctx->zd_ifindex = ifp->ifindex;
4257
4258 /* Init the neighbor-specific data area */
4259 memset(&ctx->u.neigh, 0, sizeof(ctx->u.neigh));
4260
4261 ctx->u.neigh.ip_addr = *ip;
4262 if (mac)
4263 ctx->u.neigh.link.mac = *mac;
4264 else if (link_ip)
4265 ctx->u.neigh.link.ip_addr = *link_ip;
4266
4267 ctx->u.neigh.flags = flags;
4268 ctx->u.neigh.state = state;
4269 ctx->u.neigh.update_flags = update_flags;
4270
4271 /* Enqueue for processing on the dplane pthread */
4272 ret = dplane_update_enqueue(ctx);
4273
4274 /* Increment counter */
4275 atomic_fetch_add_explicit(&zdplane_info.dg_neighs_in, 1,
4276 memory_order_relaxed);
4277
4278 if (ret == AOK)
4279 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4280 else {
4281 /* Error counter */
4282 atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors, 1,
4283 memory_order_relaxed);
4284 dplane_ctx_free(&ctx);
4285 }
4286
4287 return result;
4288 }
4289
4290 /*
4291 * Common helper api for PBR rule updates
4292 */
4293 static enum zebra_dplane_result
4294 rule_update_internal(enum dplane_op_e op, struct zebra_pbr_rule *new_rule,
4295 struct zebra_pbr_rule *old_rule)
4296 {
4297 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4298 struct zebra_dplane_ctx *ctx;
4299 int ret;
4300
4301 ctx = dplane_ctx_alloc();
4302
4303 ret = dplane_ctx_rule_init(ctx, op, new_rule, old_rule);
4304 if (ret != AOK)
4305 goto done;
4306
4307 ret = dplane_update_enqueue(ctx);
4308
4309 done:
4310 atomic_fetch_add_explicit(&zdplane_info.dg_rules_in, 1,
4311 memory_order_relaxed);
4312
4313 if (ret == AOK)
4314 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4315 else {
4316 atomic_fetch_add_explicit(&zdplane_info.dg_rule_errors, 1,
4317 memory_order_relaxed);
4318 dplane_ctx_free(&ctx);
4319 }
4320
4321 return result;
4322 }
4323
4324 enum zebra_dplane_result dplane_pbr_rule_add(struct zebra_pbr_rule *rule)
4325 {
4326 return rule_update_internal(DPLANE_OP_RULE_ADD, rule, NULL);
4327 }
4328
4329 enum zebra_dplane_result dplane_pbr_rule_delete(struct zebra_pbr_rule *rule)
4330 {
4331 return rule_update_internal(DPLANE_OP_RULE_DELETE, rule, NULL);
4332 }
4333
4334 enum zebra_dplane_result dplane_pbr_rule_update(struct zebra_pbr_rule *old_rule,
4335 struct zebra_pbr_rule *new_rule)
4336 {
4337 return rule_update_internal(DPLANE_OP_RULE_UPDATE, new_rule, old_rule);
4338 }
4339 /*
4340 * Common helper api for iptable updates
4341 */
4342 static enum zebra_dplane_result
4343 iptable_update_internal(enum dplane_op_e op, struct zebra_pbr_iptable *iptable)
4344 {
4345 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4346 struct zebra_dplane_ctx *ctx;
4347 int ret;
4348
4349 ctx = dplane_ctx_alloc();
4350
4351 ret = dplane_ctx_iptable_init(ctx, op, iptable);
4352 if (ret != AOK)
4353 goto done;
4354
4355 ret = dplane_update_enqueue(ctx);
4356
4357 done:
4358 atomic_fetch_add_explicit(&zdplane_info.dg_iptable_in, 1,
4359 memory_order_relaxed);
4360
4361 if (ret == AOK)
4362 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4363 else {
4364 atomic_fetch_add_explicit(&zdplane_info.dg_iptable_errors, 1,
4365 memory_order_relaxed);
4366 dplane_ctx_free(&ctx);
4367 }
4368
4369 return result;
4370 }
4371
4372 enum zebra_dplane_result
4373 dplane_pbr_iptable_add(struct zebra_pbr_iptable *iptable)
4374 {
4375 return iptable_update_internal(DPLANE_OP_IPTABLE_ADD, iptable);
4376 }
4377
4378 enum zebra_dplane_result
4379 dplane_pbr_iptable_delete(struct zebra_pbr_iptable *iptable)
4380 {
4381 return iptable_update_internal(DPLANE_OP_IPTABLE_DELETE, iptable);
4382 }
4383
4384 /*
4385 * Common helper api for ipset updates
4386 */
4387 static enum zebra_dplane_result
4388 ipset_update_internal(enum dplane_op_e op, struct zebra_pbr_ipset *ipset)
4389 {
4390 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4391 struct zebra_dplane_ctx *ctx;
4392 int ret;
4393
4394 ctx = dplane_ctx_alloc();
4395
4396 ret = dplane_ctx_ipset_init(ctx, op, ipset);
4397 if (ret != AOK)
4398 goto done;
4399
4400 ret = dplane_update_enqueue(ctx);
4401
4402 done:
4403 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_in, 1,
4404 memory_order_relaxed);
4405
4406 if (ret == AOK)
4407 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4408 else {
4409 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_errors, 1,
4410 memory_order_relaxed);
4411 dplane_ctx_free(&ctx);
4412 }
4413
4414 return result;
4415 }
4416
4417 enum zebra_dplane_result dplane_pbr_ipset_add(struct zebra_pbr_ipset *ipset)
4418 {
4419 return ipset_update_internal(DPLANE_OP_IPSET_ADD, ipset);
4420 }
4421
4422 enum zebra_dplane_result dplane_pbr_ipset_delete(struct zebra_pbr_ipset *ipset)
4423 {
4424 return ipset_update_internal(DPLANE_OP_IPSET_DELETE, ipset);
4425 }
4426
4427 /*
4428 * Common helper api for ipset updates
4429 */
4430 static enum zebra_dplane_result
4431 ipset_entry_update_internal(enum dplane_op_e op,
4432 struct zebra_pbr_ipset_entry *ipset_entry)
4433 {
4434 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4435 struct zebra_dplane_ctx *ctx;
4436 int ret;
4437
4438 ctx = dplane_ctx_alloc();
4439
4440 ret = dplane_ctx_ipset_entry_init(ctx, op, ipset_entry);
4441 if (ret != AOK)
4442 goto done;
4443
4444 ret = dplane_update_enqueue(ctx);
4445
4446 done:
4447 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_entry_in, 1,
4448 memory_order_relaxed);
4449
4450 if (ret == AOK)
4451 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4452 else {
4453 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_entry_errors,
4454 1, memory_order_relaxed);
4455 dplane_ctx_free(&ctx);
4456 }
4457
4458 return result;
4459 }
4460
4461 enum zebra_dplane_result
4462 dplane_pbr_ipset_entry_add(struct zebra_pbr_ipset_entry *ipset)
4463 {
4464 return ipset_entry_update_internal(DPLANE_OP_IPSET_ENTRY_ADD, ipset);
4465 }
4466
4467 enum zebra_dplane_result
4468 dplane_pbr_ipset_entry_delete(struct zebra_pbr_ipset_entry *ipset)
4469 {
4470 return ipset_entry_update_internal(DPLANE_OP_IPSET_ENTRY_DELETE, ipset);
4471 }
4472
4473 /*
4474 * Common helper api for GRE set
4475 */
4476 enum zebra_dplane_result
4477 dplane_gre_set(struct interface *ifp, struct interface *ifp_link,
4478 unsigned int mtu, const struct zebra_l2info_gre *gre_info)
4479 {
4480 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4481 struct zebra_dplane_ctx *ctx;
4482 enum dplane_op_e op = DPLANE_OP_GRE_SET;
4483 int ret;
4484 struct zebra_ns *zns;
4485
4486 ctx = dplane_ctx_alloc();
4487
4488 if (!ifp)
4489 return result;
4490
4491 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
4492 zlog_debug("init dplane ctx %s: if %s link %s%s",
4493 dplane_op2str(op), ifp->name,
4494 ifp_link ? "set" : "unset", ifp_link ?
4495 ifp_link->name : "");
4496 }
4497
4498 ctx->zd_op = op;
4499 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4500 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
4501 if (!zns)
4502 return result;
4503 dplane_ctx_ns_init(ctx, zns, false);
4504
4505 dplane_ctx_set_ifname(ctx, ifp->name);
4506 ctx->zd_vrf_id = ifp->vrf->vrf_id;
4507 ctx->zd_ifindex = ifp->ifindex;
4508 if (ifp_link)
4509 ctx->u.gre.link_ifindex = ifp_link->ifindex;
4510 else
4511 ctx->u.gre.link_ifindex = 0;
4512 if (gre_info)
4513 memcpy(&ctx->u.gre.info, gre_info, sizeof(ctx->u.gre.info));
4514 ctx->u.gre.mtu = mtu;
4515
4516 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4517
4518 /* Enqueue context for processing */
4519 ret = dplane_update_enqueue(ctx);
4520
4521 /* Update counter */
4522 atomic_fetch_add_explicit(&zdplane_info.dg_gre_set_in, 1,
4523 memory_order_relaxed);
4524
4525 if (ret == AOK)
4526 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4527 else {
4528 atomic_fetch_add_explicit(
4529 &zdplane_info.dg_gre_set_errors, 1,
4530 memory_order_relaxed);
4531 if (ctx)
4532 dplane_ctx_free(&ctx);
4533 result = ZEBRA_DPLANE_REQUEST_FAILURE;
4534 }
4535 return result;
4536 }
4537
4538 /*
4539 * Handler for 'show dplane'
4540 */
4541 int dplane_show_helper(struct vty *vty, bool detailed)
4542 {
4543 uint64_t queued, queue_max, limit, errs, incoming, yields,
4544 other_errs;
4545
4546 /* Using atomics because counters are being changed in different
4547 * pthread contexts.
4548 */
4549 incoming = atomic_load_explicit(&zdplane_info.dg_routes_in,
4550 memory_order_relaxed);
4551 limit = atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
4552 memory_order_relaxed);
4553 queued = atomic_load_explicit(&zdplane_info.dg_routes_queued,
4554 memory_order_relaxed);
4555 queue_max = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
4556 memory_order_relaxed);
4557 errs = atomic_load_explicit(&zdplane_info.dg_route_errors,
4558 memory_order_relaxed);
4559 yields = atomic_load_explicit(&zdplane_info.dg_update_yields,
4560 memory_order_relaxed);
4561 other_errs = atomic_load_explicit(&zdplane_info.dg_other_errors,
4562 memory_order_relaxed);
4563
4564 vty_out(vty, "Zebra dataplane:\nRoute updates: %"PRIu64"\n",
4565 incoming);
4566 vty_out(vty, "Route update errors: %"PRIu64"\n", errs);
4567 vty_out(vty, "Other errors : %"PRIu64"\n", other_errs);
4568 vty_out(vty, "Route update queue limit: %"PRIu64"\n", limit);
4569 vty_out(vty, "Route update queue depth: %"PRIu64"\n", queued);
4570 vty_out(vty, "Route update queue max: %"PRIu64"\n", queue_max);
4571 vty_out(vty, "Dplane update yields: %"PRIu64"\n", yields);
4572
4573 incoming = atomic_load_explicit(&zdplane_info.dg_lsps_in,
4574 memory_order_relaxed);
4575 errs = atomic_load_explicit(&zdplane_info.dg_lsp_errors,
4576 memory_order_relaxed);
4577 vty_out(vty, "LSP updates: %"PRIu64"\n", incoming);
4578 vty_out(vty, "LSP update errors: %"PRIu64"\n", errs);
4579
4580 incoming = atomic_load_explicit(&zdplane_info.dg_pws_in,
4581 memory_order_relaxed);
4582 errs = atomic_load_explicit(&zdplane_info.dg_pw_errors,
4583 memory_order_relaxed);
4584 vty_out(vty, "PW updates: %"PRIu64"\n", incoming);
4585 vty_out(vty, "PW update errors: %"PRIu64"\n", errs);
4586
4587 incoming = atomic_load_explicit(&zdplane_info.dg_intf_addrs_in,
4588 memory_order_relaxed);
4589 errs = atomic_load_explicit(&zdplane_info.dg_intf_addr_errors,
4590 memory_order_relaxed);
4591 vty_out(vty, "Intf addr updates: %"PRIu64"\n", incoming);
4592 vty_out(vty, "Intf addr errors: %"PRIu64"\n", errs);
4593
4594 incoming = atomic_load_explicit(&zdplane_info.dg_macs_in,
4595 memory_order_relaxed);
4596 errs = atomic_load_explicit(&zdplane_info.dg_mac_errors,
4597 memory_order_relaxed);
4598 vty_out(vty, "EVPN MAC updates: %"PRIu64"\n", incoming);
4599 vty_out(vty, "EVPN MAC errors: %"PRIu64"\n", errs);
4600
4601 incoming = atomic_load_explicit(&zdplane_info.dg_neighs_in,
4602 memory_order_relaxed);
4603 errs = atomic_load_explicit(&zdplane_info.dg_neigh_errors,
4604 memory_order_relaxed);
4605 vty_out(vty, "EVPN neigh updates: %"PRIu64"\n", incoming);
4606 vty_out(vty, "EVPN neigh errors: %"PRIu64"\n", errs);
4607
4608 incoming = atomic_load_explicit(&zdplane_info.dg_rules_in,
4609 memory_order_relaxed);
4610 errs = atomic_load_explicit(&zdplane_info.dg_rule_errors,
4611 memory_order_relaxed);
4612 vty_out(vty, "Rule updates: %" PRIu64 "\n", incoming);
4613 vty_out(vty, "Rule errors: %" PRIu64 "\n", errs);
4614
4615 incoming = atomic_load_explicit(&zdplane_info.dg_br_port_in,
4616 memory_order_relaxed);
4617 errs = atomic_load_explicit(&zdplane_info.dg_br_port_errors,
4618 memory_order_relaxed);
4619 vty_out(vty, "Bridge port updates: %" PRIu64 "\n", incoming);
4620 vty_out(vty, "Bridge port errors: %" PRIu64 "\n", errs);
4621
4622 incoming = atomic_load_explicit(&zdplane_info.dg_iptable_in,
4623 memory_order_relaxed);
4624 errs = atomic_load_explicit(&zdplane_info.dg_iptable_errors,
4625 memory_order_relaxed);
4626 vty_out(vty, "IPtable updates: %" PRIu64 "\n", incoming);
4627 vty_out(vty, "IPtable errors: %" PRIu64 "\n", errs);
4628 incoming = atomic_load_explicit(&zdplane_info.dg_ipset_in,
4629 memory_order_relaxed);
4630 errs = atomic_load_explicit(&zdplane_info.dg_ipset_errors,
4631 memory_order_relaxed);
4632 vty_out(vty, "IPset updates: %" PRIu64 "\n", incoming);
4633 vty_out(vty, "IPset errors: %" PRIu64 "\n", errs);
4634 incoming = atomic_load_explicit(&zdplane_info.dg_ipset_entry_in,
4635 memory_order_relaxed);
4636 errs = atomic_load_explicit(&zdplane_info.dg_ipset_entry_errors,
4637 memory_order_relaxed);
4638 vty_out(vty, "IPset entry updates: %" PRIu64 "\n", incoming);
4639 vty_out(vty, "IPset entry errors: %" PRIu64 "\n", errs);
4640
4641 incoming = atomic_load_explicit(&zdplane_info.dg_neightable_in,
4642 memory_order_relaxed);
4643 errs = atomic_load_explicit(&zdplane_info.dg_neightable_errors,
4644 memory_order_relaxed);
4645 vty_out(vty, "Neighbor Table updates: %"PRIu64"\n", incoming);
4646 vty_out(vty, "Neighbor Table errors: %"PRIu64"\n", errs);
4647
4648 incoming = atomic_load_explicit(&zdplane_info.dg_gre_set_in,
4649 memory_order_relaxed);
4650 errs = atomic_load_explicit(&zdplane_info.dg_gre_set_errors,
4651 memory_order_relaxed);
4652 vty_out(vty, "GRE set updates: %"PRIu64"\n", incoming);
4653 vty_out(vty, "GRE set errors: %"PRIu64"\n", errs);
4654 return CMD_SUCCESS;
4655 }
4656
4657 /*
4658 * Handler for 'show dplane providers'
4659 */
4660 int dplane_show_provs_helper(struct vty *vty, bool detailed)
4661 {
4662 struct zebra_dplane_provider *prov;
4663 uint64_t in, in_q, in_max, out, out_q, out_max;
4664
4665 vty_out(vty, "Zebra dataplane providers:\n");
4666
4667 DPLANE_LOCK();
4668 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
4669 DPLANE_UNLOCK();
4670
4671 /* Show counters, useful info from each registered provider */
4672 while (prov) {
4673
4674 in = atomic_load_explicit(&prov->dp_in_counter,
4675 memory_order_relaxed);
4676 in_q = atomic_load_explicit(&prov->dp_in_queued,
4677 memory_order_relaxed);
4678 in_max = atomic_load_explicit(&prov->dp_in_max,
4679 memory_order_relaxed);
4680 out = atomic_load_explicit(&prov->dp_out_counter,
4681 memory_order_relaxed);
4682 out_q = atomic_load_explicit(&prov->dp_out_queued,
4683 memory_order_relaxed);
4684 out_max = atomic_load_explicit(&prov->dp_out_max,
4685 memory_order_relaxed);
4686
4687 vty_out(vty, "%s (%u): in: %"PRIu64", q: %"PRIu64", q_max: %"PRIu64", out: %"PRIu64", q: %"PRIu64", q_max: %"PRIu64"\n",
4688 prov->dp_name, prov->dp_id, in, in_q, in_max,
4689 out, out_q, out_max);
4690
4691 DPLANE_LOCK();
4692 prov = TAILQ_NEXT(prov, dp_prov_link);
4693 DPLANE_UNLOCK();
4694 }
4695
4696 return CMD_SUCCESS;
4697 }
4698
4699 /*
4700 * Helper for 'show run' etc.
4701 */
4702 int dplane_config_write_helper(struct vty *vty)
4703 {
4704 if (zdplane_info.dg_max_queued_updates != DPLANE_DEFAULT_MAX_QUEUED)
4705 vty_out(vty, "zebra dplane limit %u\n",
4706 zdplane_info.dg_max_queued_updates);
4707
4708 return 0;
4709 }
4710
4711 /*
4712 * Provider registration
4713 */
4714 int dplane_provider_register(const char *name,
4715 enum dplane_provider_prio prio,
4716 int flags,
4717 int (*start_fp)(struct zebra_dplane_provider *),
4718 int (*fp)(struct zebra_dplane_provider *),
4719 int (*fini_fp)(struct zebra_dplane_provider *,
4720 bool early),
4721 void *data,
4722 struct zebra_dplane_provider **prov_p)
4723 {
4724 int ret = 0;
4725 struct zebra_dplane_provider *p = NULL, *last;
4726
4727 /* Validate */
4728 if (fp == NULL) {
4729 ret = EINVAL;
4730 goto done;
4731 }
4732
4733 if (prio <= DPLANE_PRIO_NONE ||
4734 prio > DPLANE_PRIO_LAST) {
4735 ret = EINVAL;
4736 goto done;
4737 }
4738
4739 /* Allocate and init new provider struct */
4740 p = XCALLOC(MTYPE_DP_PROV, sizeof(struct zebra_dplane_provider));
4741
4742 pthread_mutex_init(&(p->dp_mutex), NULL);
4743 TAILQ_INIT(&(p->dp_ctx_in_q));
4744 TAILQ_INIT(&(p->dp_ctx_out_q));
4745
4746 p->dp_flags = flags;
4747 p->dp_priority = prio;
4748 p->dp_fp = fp;
4749 p->dp_start = start_fp;
4750 p->dp_fini = fini_fp;
4751 p->dp_data = data;
4752
4753 /* Lock - the dplane pthread may be running */
4754 DPLANE_LOCK();
4755
4756 p->dp_id = ++zdplane_info.dg_provider_id;
4757
4758 if (name)
4759 strlcpy(p->dp_name, name, DPLANE_PROVIDER_NAMELEN);
4760 else
4761 snprintf(p->dp_name, DPLANE_PROVIDER_NAMELEN,
4762 "provider-%u", p->dp_id);
4763
4764 /* Insert into list ordered by priority */
4765 TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_prov_link) {
4766 if (last->dp_priority > p->dp_priority)
4767 break;
4768 }
4769
4770 if (last)
4771 TAILQ_INSERT_BEFORE(last, p, dp_prov_link);
4772 else
4773 TAILQ_INSERT_TAIL(&zdplane_info.dg_providers_q, p,
4774 dp_prov_link);
4775
4776 /* And unlock */
4777 DPLANE_UNLOCK();
4778
4779 if (IS_ZEBRA_DEBUG_DPLANE)
4780 zlog_debug("dplane: registered new provider '%s' (%u), prio %d",
4781 p->dp_name, p->dp_id, p->dp_priority);
4782
4783 done:
4784 if (prov_p)
4785 *prov_p = p;
4786
4787 return ret;
4788 }
4789
4790 /* Accessors for provider attributes */
4791 const char *dplane_provider_get_name(const struct zebra_dplane_provider *prov)
4792 {
4793 return prov->dp_name;
4794 }
4795
4796 uint32_t dplane_provider_get_id(const struct zebra_dplane_provider *prov)
4797 {
4798 return prov->dp_id;
4799 }
4800
4801 void *dplane_provider_get_data(const struct zebra_dplane_provider *prov)
4802 {
4803 return prov->dp_data;
4804 }
4805
4806 int dplane_provider_get_work_limit(const struct zebra_dplane_provider *prov)
4807 {
4808 return zdplane_info.dg_updates_per_cycle;
4809 }
4810
4811 /* Lock/unlock a provider's mutex - iff the provider was registered with
4812 * the THREADED flag.
4813 */
4814 void dplane_provider_lock(struct zebra_dplane_provider *prov)
4815 {
4816 if (dplane_provider_is_threaded(prov))
4817 DPLANE_PROV_LOCK(prov);
4818 }
4819
4820 void dplane_provider_unlock(struct zebra_dplane_provider *prov)
4821 {
4822 if (dplane_provider_is_threaded(prov))
4823 DPLANE_PROV_UNLOCK(prov);
4824 }
4825
4826 /*
4827 * Dequeue and maintain associated counter
4828 */
4829 struct zebra_dplane_ctx *dplane_provider_dequeue_in_ctx(
4830 struct zebra_dplane_provider *prov)
4831 {
4832 struct zebra_dplane_ctx *ctx = NULL;
4833
4834 dplane_provider_lock(prov);
4835
4836 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
4837 if (ctx) {
4838 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
4839
4840 atomic_fetch_sub_explicit(&prov->dp_in_queued, 1,
4841 memory_order_relaxed);
4842 }
4843
4844 dplane_provider_unlock(prov);
4845
4846 return ctx;
4847 }
4848
4849 /*
4850 * Dequeue work to a list, return count
4851 */
4852 int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov,
4853 struct dplane_ctx_q *listp)
4854 {
4855 int limit, ret;
4856 struct zebra_dplane_ctx *ctx;
4857
4858 limit = zdplane_info.dg_updates_per_cycle;
4859
4860 dplane_provider_lock(prov);
4861
4862 for (ret = 0; ret < limit; ret++) {
4863 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
4864 if (ctx) {
4865 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
4866
4867 TAILQ_INSERT_TAIL(listp, ctx, zd_q_entries);
4868 } else {
4869 break;
4870 }
4871 }
4872
4873 if (ret > 0)
4874 atomic_fetch_sub_explicit(&prov->dp_in_queued, ret,
4875 memory_order_relaxed);
4876
4877 dplane_provider_unlock(prov);
4878
4879 return ret;
4880 }
4881
4882 uint32_t dplane_provider_out_ctx_queue_len(struct zebra_dplane_provider *prov)
4883 {
4884 return atomic_load_explicit(&(prov->dp_out_counter),
4885 memory_order_relaxed);
4886 }
4887
4888 /*
4889 * Enqueue and maintain associated counter
4890 */
4891 void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
4892 struct zebra_dplane_ctx *ctx)
4893 {
4894 uint64_t curr, high;
4895
4896 dplane_provider_lock(prov);
4897
4898 TAILQ_INSERT_TAIL(&(prov->dp_ctx_out_q), ctx,
4899 zd_q_entries);
4900
4901 /* Maintain out-queue counters */
4902 atomic_fetch_add_explicit(&(prov->dp_out_queued), 1,
4903 memory_order_relaxed);
4904 curr = atomic_load_explicit(&prov->dp_out_queued,
4905 memory_order_relaxed);
4906 high = atomic_load_explicit(&prov->dp_out_max,
4907 memory_order_relaxed);
4908 if (curr > high)
4909 atomic_store_explicit(&prov->dp_out_max, curr,
4910 memory_order_relaxed);
4911
4912 dplane_provider_unlock(prov);
4913
4914 atomic_fetch_add_explicit(&(prov->dp_out_counter), 1,
4915 memory_order_relaxed);
4916 }
4917
4918 /*
4919 * Accessor for provider object
4920 */
4921 bool dplane_provider_is_threaded(const struct zebra_dplane_provider *prov)
4922 {
4923 return (prov->dp_flags & DPLANE_PROV_FLAG_THREADED);
4924 }
4925
4926 /*
4927 * Internal helper that copies information from a zebra ns object; this is
4928 * called in the zebra main pthread context as part of dplane ctx init.
4929 */
4930 static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
4931 struct zebra_ns *zns)
4932 {
4933 ns_info->ns_id = zns->ns_id;
4934
4935 #if defined(HAVE_NETLINK)
4936 ns_info->is_cmd = true;
4937 ns_info->sock = zns->netlink_dplane_out.sock;
4938 #endif /* NETLINK */
4939 }
4940
4941 #ifdef HAVE_NETLINK
4942 /*
4943 * Callback when an OS (netlink) incoming event read is ready. This runs
4944 * in the dplane pthread.
4945 */
4946 static void dplane_incoming_read(struct thread *event)
4947 {
4948 struct dplane_zns_info *zi = THREAD_ARG(event);
4949
4950 kernel_dplane_read(&zi->info);
4951
4952 /* Re-start read task */
4953 thread_add_read(zdplane_info.dg_master, dplane_incoming_read, zi,
4954 zi->info.sock, &zi->t_read);
4955 }
4956 #endif /* HAVE_NETLINK */
4957
4958 /*
4959 * Notify dplane when namespaces are enabled and disabled. The dplane
4960 * needs to start and stop reading incoming events from the zns. In the
4961 * common case where vrfs are _not_ namespaces, there will only be one
4962 * of these.
4963 *
4964 * This is called in the main pthread.
4965 */
4966 void zebra_dplane_ns_enable(struct zebra_ns *zns, bool enabled)
4967 {
4968 struct dplane_zns_info *zi;
4969
4970 if (IS_ZEBRA_DEBUG_DPLANE)
4971 zlog_debug("%s: %s for nsid %u", __func__,
4972 (enabled ? "ENABLED" : "DISABLED"), zns->ns_id);
4973
4974 /* Search for an existing zns info entry */
4975 frr_each (zns_info_list, &zdplane_info.dg_zns_list, zi) {
4976 if (zi->info.ns_id == zns->ns_id)
4977 break;
4978 }
4979
4980 if (enabled) {
4981 /* Create a new entry if necessary; start reading. */
4982 if (zi == NULL) {
4983 zi = XCALLOC(MTYPE_DP_NS, sizeof(*zi));
4984
4985 zi->info.ns_id = zns->ns_id;
4986
4987 zns_info_list_add_tail(&zdplane_info.dg_zns_list, zi);
4988
4989 if (IS_ZEBRA_DEBUG_DPLANE)
4990 zlog_debug("%s: nsid %u, new zi %p", __func__,
4991 zns->ns_id, zi);
4992 }
4993
4994 /* Make sure we're up-to-date with the zns object */
4995 #if defined(HAVE_NETLINK)
4996 zi->info.is_cmd = false;
4997 zi->info.sock = zns->netlink_dplane_in.sock;
4998
4999 /* Start read task for the dplane pthread. */
5000 if (zdplane_info.dg_master)
5001 thread_add_read(zdplane_info.dg_master,
5002 dplane_incoming_read, zi, zi->info.sock,
5003 &zi->t_read);
5004 #endif
5005 } else if (zi) {
5006 if (IS_ZEBRA_DEBUG_DPLANE)
5007 zlog_debug("%s: nsid %u, deleting zi %p", __func__,
5008 zns->ns_id, zi);
5009
5010 /* Stop reading, free memory */
5011 zns_info_list_del(&zdplane_info.dg_zns_list, zi);
5012
5013 if (zdplane_info.dg_master)
5014 thread_cancel_async(zdplane_info.dg_master, &zi->t_read,
5015 NULL);
5016
5017 XFREE(MTYPE_DP_NS, zi);
5018 }
5019 }
5020
5021 /*
5022 * Provider api to signal that work/events are available
5023 * for the dataplane pthread.
5024 */
5025 int dplane_provider_work_ready(void)
5026 {
5027 /* Note that during zebra startup, we may be offered work before
5028 * the dataplane pthread (and thread-master) are ready. We want to
5029 * enqueue the work, but the event-scheduling machinery may not be
5030 * available.
5031 */
5032 if (zdplane_info.dg_run) {
5033 thread_add_event(zdplane_info.dg_master,
5034 dplane_thread_loop, NULL, 0,
5035 &zdplane_info.dg_t_update);
5036 }
5037
5038 return AOK;
5039 }
5040
5041 /*
5042 * Enqueue a context directly to zebra main.
5043 */
5044 void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx)
5045 {
5046 struct dplane_ctx_q temp_list;
5047
5048 /* Zebra's api takes a list, so we need to use a temporary list */
5049 TAILQ_INIT(&temp_list);
5050
5051 TAILQ_INSERT_TAIL(&temp_list, ctx, zd_q_entries);
5052 (zdplane_info.dg_results_cb)(&temp_list);
5053 }
5054
5055 /*
5056 * Kernel dataplane provider
5057 */
5058
5059 static void kernel_dplane_log_detail(struct zebra_dplane_ctx *ctx)
5060 {
5061 char buf[PREFIX_STRLEN];
5062
5063 switch (dplane_ctx_get_op(ctx)) {
5064
5065 case DPLANE_OP_ROUTE_INSTALL:
5066 case DPLANE_OP_ROUTE_UPDATE:
5067 case DPLANE_OP_ROUTE_DELETE:
5068 zlog_debug("%u:%pFX Dplane route update ctx %p op %s",
5069 dplane_ctx_get_vrf(ctx), dplane_ctx_get_dest(ctx),
5070 ctx, dplane_op2str(dplane_ctx_get_op(ctx)));
5071 break;
5072
5073 case DPLANE_OP_NH_INSTALL:
5074 case DPLANE_OP_NH_UPDATE:
5075 case DPLANE_OP_NH_DELETE:
5076 zlog_debug("ID (%u) Dplane nexthop update ctx %p op %s",
5077 dplane_ctx_get_nhe_id(ctx), ctx,
5078 dplane_op2str(dplane_ctx_get_op(ctx)));
5079 break;
5080
5081 case DPLANE_OP_LSP_INSTALL:
5082 case DPLANE_OP_LSP_UPDATE:
5083 case DPLANE_OP_LSP_DELETE:
5084 break;
5085
5086 case DPLANE_OP_PW_INSTALL:
5087 case DPLANE_OP_PW_UNINSTALL:
5088 zlog_debug("Dplane pw %s: op %s af %d loc: %u rem: %u",
5089 dplane_ctx_get_ifname(ctx),
5090 dplane_op2str(ctx->zd_op), dplane_ctx_get_pw_af(ctx),
5091 dplane_ctx_get_pw_local_label(ctx),
5092 dplane_ctx_get_pw_remote_label(ctx));
5093 break;
5094
5095 case DPLANE_OP_ADDR_INSTALL:
5096 case DPLANE_OP_ADDR_UNINSTALL:
5097 zlog_debug("Dplane intf %s, idx %u, addr %pFX",
5098 dplane_op2str(dplane_ctx_get_op(ctx)),
5099 dplane_ctx_get_ifindex(ctx),
5100 dplane_ctx_get_intf_addr(ctx));
5101 break;
5102
5103 case DPLANE_OP_MAC_INSTALL:
5104 case DPLANE_OP_MAC_DELETE:
5105 prefix_mac2str(dplane_ctx_mac_get_addr(ctx), buf,
5106 sizeof(buf));
5107
5108 zlog_debug("Dplane %s, mac %s, ifindex %u",
5109 dplane_op2str(dplane_ctx_get_op(ctx)),
5110 buf, dplane_ctx_get_ifindex(ctx));
5111 break;
5112
5113 case DPLANE_OP_NEIGH_INSTALL:
5114 case DPLANE_OP_NEIGH_UPDATE:
5115 case DPLANE_OP_NEIGH_DELETE:
5116 case DPLANE_OP_VTEP_ADD:
5117 case DPLANE_OP_VTEP_DELETE:
5118 case DPLANE_OP_NEIGH_DISCOVER:
5119 case DPLANE_OP_NEIGH_IP_INSTALL:
5120 case DPLANE_OP_NEIGH_IP_DELETE:
5121 ipaddr2str(dplane_ctx_neigh_get_ipaddr(ctx), buf,
5122 sizeof(buf));
5123
5124 zlog_debug("Dplane %s, ip %s, ifindex %u",
5125 dplane_op2str(dplane_ctx_get_op(ctx)),
5126 buf, dplane_ctx_get_ifindex(ctx));
5127 break;
5128
5129 case DPLANE_OP_RULE_ADD:
5130 case DPLANE_OP_RULE_DELETE:
5131 case DPLANE_OP_RULE_UPDATE:
5132 zlog_debug("Dplane rule update op %s, if %s(%u), ctx %p",
5133 dplane_op2str(dplane_ctx_get_op(ctx)),
5134 dplane_ctx_get_ifname(ctx),
5135 dplane_ctx_get_ifindex(ctx), ctx);
5136 break;
5137
5138 case DPLANE_OP_SYS_ROUTE_ADD:
5139 case DPLANE_OP_SYS_ROUTE_DELETE:
5140 case DPLANE_OP_ROUTE_NOTIFY:
5141 case DPLANE_OP_LSP_NOTIFY:
5142 case DPLANE_OP_BR_PORT_UPDATE:
5143
5144 case DPLANE_OP_NONE:
5145 break;
5146
5147 case DPLANE_OP_IPTABLE_ADD:
5148 case DPLANE_OP_IPTABLE_DELETE: {
5149 struct zebra_pbr_iptable ipt;
5150
5151 dplane_ctx_get_pbr_iptable(ctx, &ipt);
5152 zlog_debug("Dplane iptable update op %s, unique(%u), ctx %p",
5153 dplane_op2str(dplane_ctx_get_op(ctx)), ipt.unique,
5154 ctx);
5155 } break;
5156 case DPLANE_OP_IPSET_ADD:
5157 case DPLANE_OP_IPSET_DELETE: {
5158 struct zebra_pbr_ipset ipset;
5159
5160 dplane_ctx_get_pbr_ipset(ctx, &ipset);
5161 zlog_debug("Dplane ipset update op %s, unique(%u), ctx %p",
5162 dplane_op2str(dplane_ctx_get_op(ctx)), ipset.unique,
5163 ctx);
5164 } break;
5165 case DPLANE_OP_IPSET_ENTRY_ADD:
5166 case DPLANE_OP_IPSET_ENTRY_DELETE: {
5167 struct zebra_pbr_ipset_entry ipent;
5168
5169 dplane_ctx_get_pbr_ipset_entry(ctx, &ipent);
5170 zlog_debug(
5171 "Dplane ipset entry update op %s, unique(%u), ctx %p",
5172 dplane_op2str(dplane_ctx_get_op(ctx)), ipent.unique,
5173 ctx);
5174 } break;
5175 case DPLANE_OP_NEIGH_TABLE_UPDATE:
5176 zlog_debug("Dplane neigh table op %s, ifp %s, family %s",
5177 dplane_op2str(dplane_ctx_get_op(ctx)),
5178 dplane_ctx_get_ifname(ctx),
5179 family2str(dplane_ctx_neightable_get_family(ctx)));
5180 break;
5181 case DPLANE_OP_GRE_SET:
5182 zlog_debug("Dplane gre set op %s, ifp %s, link %u",
5183 dplane_op2str(dplane_ctx_get_op(ctx)),
5184 dplane_ctx_get_ifname(ctx),
5185 ctx->u.gre.link_ifindex);
5186 break;
5187
5188 case DPLANE_OP_INTF_ADDR_ADD:
5189 case DPLANE_OP_INTF_ADDR_DEL:
5190 zlog_debug("Dplane incoming op %s, intf %s, addr %pFX",
5191 dplane_op2str(dplane_ctx_get_op(ctx)),
5192 dplane_ctx_get_ifname(ctx),
5193 dplane_ctx_get_intf_addr(ctx));
5194 break;
5195
5196 case DPLANE_OP_INTF_NETCONFIG:
5197 zlog_debug("%s: ifindex %d, mpls %d, mcast %d",
5198 dplane_op2str(dplane_ctx_get_op(ctx)),
5199 dplane_ctx_get_netconf_ifindex(ctx),
5200 dplane_ctx_get_netconf_mpls(ctx),
5201 dplane_ctx_get_netconf_mcast(ctx));
5202 break;
5203 }
5204 }
5205
5206 static void kernel_dplane_handle_result(struct zebra_dplane_ctx *ctx)
5207 {
5208 enum zebra_dplane_result res = dplane_ctx_get_status(ctx);
5209
5210 switch (dplane_ctx_get_op(ctx)) {
5211
5212 case DPLANE_OP_ROUTE_INSTALL:
5213 case DPLANE_OP_ROUTE_UPDATE:
5214 case DPLANE_OP_ROUTE_DELETE:
5215 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5216 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors,
5217 1, memory_order_relaxed);
5218
5219 if ((dplane_ctx_get_op(ctx) != DPLANE_OP_ROUTE_DELETE)
5220 && (res == ZEBRA_DPLANE_REQUEST_SUCCESS)) {
5221 struct nexthop *nexthop;
5222
5223 /* Update installed nexthops to signal which have been
5224 * installed.
5225 */
5226 for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx),
5227 nexthop)) {
5228 if (CHECK_FLAG(nexthop->flags,
5229 NEXTHOP_FLAG_RECURSIVE))
5230 continue;
5231
5232 if (CHECK_FLAG(nexthop->flags,
5233 NEXTHOP_FLAG_ACTIVE)) {
5234 SET_FLAG(nexthop->flags,
5235 NEXTHOP_FLAG_FIB);
5236 }
5237 }
5238 }
5239 break;
5240
5241 case DPLANE_OP_NH_INSTALL:
5242 case DPLANE_OP_NH_UPDATE:
5243 case DPLANE_OP_NH_DELETE:
5244 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5245 atomic_fetch_add_explicit(
5246 &zdplane_info.dg_nexthop_errors, 1,
5247 memory_order_relaxed);
5248 break;
5249
5250 case DPLANE_OP_LSP_INSTALL:
5251 case DPLANE_OP_LSP_UPDATE:
5252 case DPLANE_OP_LSP_DELETE:
5253 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5254 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors,
5255 1, memory_order_relaxed);
5256 break;
5257
5258 case DPLANE_OP_PW_INSTALL:
5259 case DPLANE_OP_PW_UNINSTALL:
5260 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5261 atomic_fetch_add_explicit(&zdplane_info.dg_pw_errors, 1,
5262 memory_order_relaxed);
5263 break;
5264
5265 case DPLANE_OP_ADDR_INSTALL:
5266 case DPLANE_OP_ADDR_UNINSTALL:
5267 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5268 atomic_fetch_add_explicit(
5269 &zdplane_info.dg_intf_addr_errors, 1,
5270 memory_order_relaxed);
5271 break;
5272
5273 case DPLANE_OP_MAC_INSTALL:
5274 case DPLANE_OP_MAC_DELETE:
5275 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5276 atomic_fetch_add_explicit(&zdplane_info.dg_mac_errors,
5277 1, memory_order_relaxed);
5278 break;
5279
5280 case DPLANE_OP_NEIGH_INSTALL:
5281 case DPLANE_OP_NEIGH_UPDATE:
5282 case DPLANE_OP_NEIGH_DELETE:
5283 case DPLANE_OP_VTEP_ADD:
5284 case DPLANE_OP_VTEP_DELETE:
5285 case DPLANE_OP_NEIGH_DISCOVER:
5286 case DPLANE_OP_NEIGH_IP_INSTALL:
5287 case DPLANE_OP_NEIGH_IP_DELETE:
5288 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5289 atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors,
5290 1, memory_order_relaxed);
5291 break;
5292
5293 case DPLANE_OP_RULE_ADD:
5294 case DPLANE_OP_RULE_DELETE:
5295 case DPLANE_OP_RULE_UPDATE:
5296 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5297 atomic_fetch_add_explicit(&zdplane_info.dg_rule_errors,
5298 1, memory_order_relaxed);
5299 break;
5300
5301 case DPLANE_OP_IPTABLE_ADD:
5302 case DPLANE_OP_IPTABLE_DELETE:
5303 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5304 atomic_fetch_add_explicit(
5305 &zdplane_info.dg_iptable_errors, 1,
5306 memory_order_relaxed);
5307 break;
5308
5309 case DPLANE_OP_IPSET_ADD:
5310 case DPLANE_OP_IPSET_DELETE:
5311 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5312 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_errors,
5313 1, memory_order_relaxed);
5314 break;
5315
5316 case DPLANE_OP_IPSET_ENTRY_ADD:
5317 case DPLANE_OP_IPSET_ENTRY_DELETE:
5318 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5319 atomic_fetch_add_explicit(
5320 &zdplane_info.dg_ipset_entry_errors, 1,
5321 memory_order_relaxed);
5322 break;
5323
5324 case DPLANE_OP_NEIGH_TABLE_UPDATE:
5325 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5326 atomic_fetch_add_explicit(
5327 &zdplane_info.dg_neightable_errors, 1,
5328 memory_order_relaxed);
5329 break;
5330
5331 case DPLANE_OP_GRE_SET:
5332 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5333 atomic_fetch_add_explicit(
5334 &zdplane_info.dg_gre_set_errors, 1,
5335 memory_order_relaxed);
5336 break;
5337 /* Ignore 'notifications' - no-op */
5338 case DPLANE_OP_SYS_ROUTE_ADD:
5339 case DPLANE_OP_SYS_ROUTE_DELETE:
5340 case DPLANE_OP_ROUTE_NOTIFY:
5341 case DPLANE_OP_LSP_NOTIFY:
5342 case DPLANE_OP_BR_PORT_UPDATE:
5343 break;
5344
5345 /* TODO -- error counters for incoming events? */
5346 case DPLANE_OP_INTF_ADDR_ADD:
5347 case DPLANE_OP_INTF_ADDR_DEL:
5348 case DPLANE_OP_INTF_NETCONFIG:
5349 break;
5350
5351 case DPLANE_OP_NONE:
5352 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5353 atomic_fetch_add_explicit(&zdplane_info.dg_other_errors,
5354 1, memory_order_relaxed);
5355 break;
5356 }
5357 }
5358
5359 static void kernel_dplane_process_iptable(struct zebra_dplane_provider *prov,
5360 struct zebra_dplane_ctx *ctx)
5361 {
5362 zebra_pbr_process_iptable(ctx);
5363 dplane_provider_enqueue_out_ctx(prov, ctx);
5364 }
5365
5366 static void kernel_dplane_process_ipset(struct zebra_dplane_provider *prov,
5367 struct zebra_dplane_ctx *ctx)
5368 {
5369 zebra_pbr_process_ipset(ctx);
5370 dplane_provider_enqueue_out_ctx(prov, ctx);
5371 }
5372
5373 static void
5374 kernel_dplane_process_ipset_entry(struct zebra_dplane_provider *prov,
5375 struct zebra_dplane_ctx *ctx)
5376 {
5377 zebra_pbr_process_ipset_entry(ctx);
5378 dplane_provider_enqueue_out_ctx(prov, ctx);
5379 }
5380
5381 /*
5382 * Kernel provider callback
5383 */
5384 static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
5385 {
5386 struct zebra_dplane_ctx *ctx, *tctx;
5387 struct dplane_ctx_q work_list;
5388 int counter, limit;
5389
5390 TAILQ_INIT(&work_list);
5391
5392 limit = dplane_provider_get_work_limit(prov);
5393
5394 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5395 zlog_debug("dplane provider '%s': processing",
5396 dplane_provider_get_name(prov));
5397
5398 for (counter = 0; counter < limit; counter++) {
5399 ctx = dplane_provider_dequeue_in_ctx(prov);
5400 if (ctx == NULL)
5401 break;
5402 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5403 kernel_dplane_log_detail(ctx);
5404
5405 if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPTABLE_ADD
5406 || dplane_ctx_get_op(ctx) == DPLANE_OP_IPTABLE_DELETE))
5407 kernel_dplane_process_iptable(prov, ctx);
5408 else if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_ADD
5409 || dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_DELETE))
5410 kernel_dplane_process_ipset(prov, ctx);
5411 else if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_ENTRY_ADD
5412 || dplane_ctx_get_op(ctx)
5413 == DPLANE_OP_IPSET_ENTRY_DELETE))
5414 kernel_dplane_process_ipset_entry(prov, ctx);
5415 else
5416 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
5417 }
5418
5419 kernel_update_multi(&work_list);
5420
5421 TAILQ_FOREACH_SAFE (ctx, &work_list, zd_q_entries, tctx) {
5422 kernel_dplane_handle_result(ctx);
5423
5424 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
5425 dplane_provider_enqueue_out_ctx(prov, ctx);
5426 }
5427
5428 /* Ensure that we'll run the work loop again if there's still
5429 * more work to do.
5430 */
5431 if (counter >= limit) {
5432 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5433 zlog_debug("dplane provider '%s' reached max updates %d",
5434 dplane_provider_get_name(prov), counter);
5435
5436 atomic_fetch_add_explicit(&zdplane_info.dg_update_yields,
5437 1, memory_order_relaxed);
5438
5439 dplane_provider_work_ready();
5440 }
5441
5442 return 0;
5443 }
5444
5445 #ifdef DPLANE_TEST_PROVIDER
5446
5447 /*
5448 * Test dataplane provider plugin
5449 */
5450
5451 /*
5452 * Test provider process callback
5453 */
5454 static int test_dplane_process_func(struct zebra_dplane_provider *prov)
5455 {
5456 struct zebra_dplane_ctx *ctx;
5457 int counter, limit;
5458
5459 /* Just moving from 'in' queue to 'out' queue */
5460
5461 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5462 zlog_debug("dplane provider '%s': processing",
5463 dplane_provider_get_name(prov));
5464
5465 limit = dplane_provider_get_work_limit(prov);
5466
5467 for (counter = 0; counter < limit; counter++) {
5468 ctx = dplane_provider_dequeue_in_ctx(prov);
5469 if (ctx == NULL)
5470 break;
5471
5472 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5473 zlog_debug("dplane provider '%s': op %s",
5474 dplane_provider_get_name(prov),
5475 dplane_op2str(dplane_ctx_get_op(ctx)));
5476
5477 dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
5478
5479 dplane_provider_enqueue_out_ctx(prov, ctx);
5480 }
5481
5482 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5483 zlog_debug("dplane provider '%s': processed %d",
5484 dplane_provider_get_name(prov), counter);
5485
5486 /* Ensure that we'll run the work loop again if there's still
5487 * more work to do.
5488 */
5489 if (counter >= limit)
5490 dplane_provider_work_ready();
5491
5492 return 0;
5493 }
5494
5495 /*
5496 * Test provider shutdown/fini callback
5497 */
5498 static int test_dplane_shutdown_func(struct zebra_dplane_provider *prov,
5499 bool early)
5500 {
5501 if (IS_ZEBRA_DEBUG_DPLANE)
5502 zlog_debug("dplane provider '%s': %sshutdown",
5503 dplane_provider_get_name(prov),
5504 early ? "early " : "");
5505
5506 return 0;
5507 }
5508 #endif /* DPLANE_TEST_PROVIDER */
5509
5510 /*
5511 * Register default kernel provider
5512 */
5513 static void dplane_provider_init(void)
5514 {
5515 int ret;
5516
5517 ret = dplane_provider_register("Kernel",
5518 DPLANE_PRIO_KERNEL,
5519 DPLANE_PROV_FLAGS_DEFAULT, NULL,
5520 kernel_dplane_process_func,
5521 NULL,
5522 NULL, NULL);
5523
5524 if (ret != AOK)
5525 zlog_err("Unable to register kernel dplane provider: %d",
5526 ret);
5527
5528 #ifdef DPLANE_TEST_PROVIDER
5529 /* Optional test provider ... */
5530 ret = dplane_provider_register("Test",
5531 DPLANE_PRIO_PRE_KERNEL,
5532 DPLANE_PROV_FLAGS_DEFAULT, NULL,
5533 test_dplane_process_func,
5534 test_dplane_shutdown_func,
5535 NULL /* data */, NULL);
5536
5537 if (ret != AOK)
5538 zlog_err("Unable to register test dplane provider: %d",
5539 ret);
5540 #endif /* DPLANE_TEST_PROVIDER */
5541 }
5542
5543 /*
5544 * Allow zebra code to walk the queue of pending contexts, evaluate each one
5545 * using a callback function. If the function returns 'true', the context
5546 * will be dequeued and freed without being processed.
5547 */
5548 int dplane_clean_ctx_queue(bool (*context_cb)(struct zebra_dplane_ctx *ctx,
5549 void *arg), void *val)
5550 {
5551 struct zebra_dplane_ctx *ctx, *temp;
5552 struct dplane_ctx_q work_list;
5553
5554 TAILQ_INIT(&work_list);
5555
5556 if (context_cb == NULL)
5557 goto done;
5558
5559 /* Walk the pending context queue under the dplane lock. */
5560 DPLANE_LOCK();
5561
5562 TAILQ_FOREACH_SAFE(ctx, &zdplane_info.dg_update_ctx_q, zd_q_entries,
5563 temp) {
5564 if (context_cb(ctx, val)) {
5565 TAILQ_REMOVE(&zdplane_info.dg_update_ctx_q, ctx,
5566 zd_q_entries);
5567 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
5568 }
5569 }
5570
5571 DPLANE_UNLOCK();
5572
5573 /* Now free any contexts selected by the caller, without holding
5574 * the lock.
5575 */
5576 TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, temp) {
5577 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
5578 dplane_ctx_fini(&ctx);
5579 }
5580
5581 done:
5582
5583 return 0;
5584 }
5585
5586 /* Indicates zebra shutdown/exit is in progress. Some operations may be
5587 * simplified or skipped during shutdown processing.
5588 */
5589 bool dplane_is_in_shutdown(void)
5590 {
5591 return zdplane_info.dg_is_shutdown;
5592 }
5593
5594 /*
5595 * Enable collection of extra info about interfaces in route updates.
5596 */
5597 void dplane_enable_intf_extra_info(void)
5598 {
5599 dplane_collect_extra_intf_info = true;
5600 }
5601
5602 /*
5603 * Early or pre-shutdown, de-init notification api. This runs pretty
5604 * early during zebra shutdown, as a signal to stop new work and prepare
5605 * for updates generated by shutdown/cleanup activity, as zebra tries to
5606 * remove everything it's responsible for.
5607 * NB: This runs in the main zebra pthread context.
5608 */
5609 void zebra_dplane_pre_finish(void)
5610 {
5611 struct zebra_dplane_provider *prov;
5612
5613 if (IS_ZEBRA_DEBUG_DPLANE)
5614 zlog_debug("Zebra dataplane pre-finish called");
5615
5616 zdplane_info.dg_is_shutdown = true;
5617
5618 /* Notify provider(s) of pending shutdown. */
5619 TAILQ_FOREACH(prov, &zdplane_info.dg_providers_q, dp_prov_link) {
5620 if (prov->dp_fini == NULL)
5621 continue;
5622
5623 prov->dp_fini(prov, true /* early */);
5624 }
5625 }
5626
5627 /*
5628 * Utility to determine whether work remains enqueued within the dplane;
5629 * used during system shutdown processing.
5630 */
5631 static bool dplane_work_pending(void)
5632 {
5633 bool ret = false;
5634 struct zebra_dplane_ctx *ctx;
5635 struct zebra_dplane_provider *prov;
5636
5637 /* TODO -- just checking incoming/pending work for now, must check
5638 * providers
5639 */
5640 DPLANE_LOCK();
5641 {
5642 ctx = TAILQ_FIRST(&zdplane_info.dg_update_ctx_q);
5643 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
5644 }
5645 DPLANE_UNLOCK();
5646
5647 if (ctx != NULL) {
5648 ret = true;
5649 goto done;
5650 }
5651
5652 while (prov) {
5653
5654 dplane_provider_lock(prov);
5655
5656 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
5657 if (ctx == NULL)
5658 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
5659
5660 dplane_provider_unlock(prov);
5661
5662 if (ctx != NULL)
5663 break;
5664
5665 DPLANE_LOCK();
5666 prov = TAILQ_NEXT(prov, dp_prov_link);
5667 DPLANE_UNLOCK();
5668 }
5669
5670 if (ctx != NULL)
5671 ret = true;
5672
5673 done:
5674 return ret;
5675 }
5676
5677 /*
5678 * Shutdown-time intermediate callback, used to determine when all pending
5679 * in-flight updates are done. If there's still work to do, reschedules itself.
5680 * If all work is done, schedules an event to the main zebra thread for
5681 * final zebra shutdown.
5682 * This runs in the dplane pthread context.
5683 */
5684 static void dplane_check_shutdown_status(struct thread *event)
5685 {
5686 struct dplane_zns_info *zi;
5687
5688 if (IS_ZEBRA_DEBUG_DPLANE)
5689 zlog_debug("Zebra dataplane shutdown status check called");
5690
5691 /* Remove any zns info entries as we stop the dplane pthread. */
5692 frr_each_safe (zns_info_list, &zdplane_info.dg_zns_list, zi) {
5693 zns_info_list_del(&zdplane_info.dg_zns_list, zi);
5694
5695 if (zdplane_info.dg_master)
5696 thread_cancel(&zi->t_read);
5697
5698 XFREE(MTYPE_DP_NS, zi);
5699 }
5700
5701 if (dplane_work_pending()) {
5702 /* Reschedule dplane check on a short timer */
5703 thread_add_timer_msec(zdplane_info.dg_master,
5704 dplane_check_shutdown_status,
5705 NULL, 100,
5706 &zdplane_info.dg_t_shutdown_check);
5707
5708 /* TODO - give up and stop waiting after a short time? */
5709
5710 } else {
5711 /* We appear to be done - schedule a final callback event
5712 * for the zebra main pthread.
5713 */
5714 thread_add_event(zrouter.master, zebra_finalize, NULL, 0, NULL);
5715 }
5716 }
5717
5718 /*
5719 * Shutdown, de-init api. This runs pretty late during shutdown,
5720 * after zebra has tried to free/remove/uninstall all routes during shutdown.
5721 * At this point, dplane work may still remain to be done, so we can't just
5722 * blindly terminate. If there's still work to do, we'll periodically check
5723 * and when done, we'll enqueue a task to the zebra main thread for final
5724 * termination processing.
5725 *
5726 * NB: This runs in the main zebra thread context.
5727 */
5728 void zebra_dplane_finish(void)
5729 {
5730 if (IS_ZEBRA_DEBUG_DPLANE)
5731 zlog_debug("Zebra dataplane fini called");
5732
5733 thread_add_event(zdplane_info.dg_master,
5734 dplane_check_shutdown_status, NULL, 0,
5735 &zdplane_info.dg_t_shutdown_check);
5736 }
5737
5738 /*
5739 * Main dataplane pthread event loop. The thread takes new incoming work
5740 * and offers it to the first provider. It then iterates through the
5741 * providers, taking complete work from each one and offering it
5742 * to the next in order. At each step, a limited number of updates are
5743 * processed during a cycle in order to provide some fairness.
5744 *
5745 * This loop through the providers is only run once, so that the dataplane
5746 * pthread can look for other pending work - such as i/o work on behalf of
5747 * providers.
5748 */
5749 static void dplane_thread_loop(struct thread *event)
5750 {
5751 struct dplane_ctx_q work_list;
5752 struct dplane_ctx_q error_list;
5753 struct zebra_dplane_provider *prov;
5754 struct zebra_dplane_ctx *ctx, *tctx;
5755 int limit, counter, error_counter;
5756 uint64_t curr, high;
5757 bool reschedule = false;
5758
5759 /* Capture work limit per cycle */
5760 limit = zdplane_info.dg_updates_per_cycle;
5761
5762 /* Init temporary lists used to move contexts among providers */
5763 TAILQ_INIT(&work_list);
5764 TAILQ_INIT(&error_list);
5765 error_counter = 0;
5766
5767 /* Check for zebra shutdown */
5768 if (!zdplane_info.dg_run)
5769 return;
5770
5771 /* Dequeue some incoming work from zebra (if any) onto the temporary
5772 * working list.
5773 */
5774 DPLANE_LOCK();
5775
5776 /* Locate initial registered provider */
5777 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
5778
5779 /* Move new work from incoming list to temp list */
5780 for (counter = 0; counter < limit; counter++) {
5781 ctx = TAILQ_FIRST(&zdplane_info.dg_update_ctx_q);
5782 if (ctx) {
5783 TAILQ_REMOVE(&zdplane_info.dg_update_ctx_q, ctx,
5784 zd_q_entries);
5785
5786 ctx->zd_provider = prov->dp_id;
5787
5788 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
5789 } else {
5790 break;
5791 }
5792 }
5793
5794 DPLANE_UNLOCK();
5795
5796 atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, counter,
5797 memory_order_relaxed);
5798
5799 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5800 zlog_debug("dplane: incoming new work counter: %d", counter);
5801
5802 /* Iterate through the registered providers, offering new incoming
5803 * work. If the provider has outgoing work in its queue, take that
5804 * work for the next provider
5805 */
5806 while (prov) {
5807
5808 /* At each iteration, the temporary work list has 'counter'
5809 * items.
5810 */
5811 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5812 zlog_debug("dplane enqueues %d new work to provider '%s'",
5813 counter, dplane_provider_get_name(prov));
5814
5815 /* Capture current provider id in each context; check for
5816 * error status.
5817 */
5818 TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, tctx) {
5819 if (dplane_ctx_get_status(ctx) ==
5820 ZEBRA_DPLANE_REQUEST_SUCCESS) {
5821 ctx->zd_provider = prov->dp_id;
5822 } else {
5823 /*
5824 * TODO -- improve error-handling: recirc
5825 * errors backwards so that providers can
5826 * 'undo' their work (if they want to)
5827 */
5828
5829 /* Move to error list; will be returned
5830 * zebra main.
5831 */
5832 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
5833 TAILQ_INSERT_TAIL(&error_list,
5834 ctx, zd_q_entries);
5835 error_counter++;
5836 }
5837 }
5838
5839 /* Enqueue new work to the provider */
5840 dplane_provider_lock(prov);
5841
5842 if (TAILQ_FIRST(&work_list))
5843 TAILQ_CONCAT(&(prov->dp_ctx_in_q), &work_list,
5844 zd_q_entries);
5845
5846 atomic_fetch_add_explicit(&prov->dp_in_counter, counter,
5847 memory_order_relaxed);
5848 atomic_fetch_add_explicit(&prov->dp_in_queued, counter,
5849 memory_order_relaxed);
5850 curr = atomic_load_explicit(&prov->dp_in_queued,
5851 memory_order_relaxed);
5852 high = atomic_load_explicit(&prov->dp_in_max,
5853 memory_order_relaxed);
5854 if (curr > high)
5855 atomic_store_explicit(&prov->dp_in_max, curr,
5856 memory_order_relaxed);
5857
5858 dplane_provider_unlock(prov);
5859
5860 /* Reset the temp list (though the 'concat' may have done this
5861 * already), and the counter
5862 */
5863 TAILQ_INIT(&work_list);
5864 counter = 0;
5865
5866 /* Call into the provider code. Note that this is
5867 * unconditional: we offer to do work even if we don't enqueue
5868 * any _new_ work.
5869 */
5870 (*prov->dp_fp)(prov);
5871
5872 /* Check for zebra shutdown */
5873 if (!zdplane_info.dg_run)
5874 break;
5875
5876 /* Dequeue completed work from the provider */
5877 dplane_provider_lock(prov);
5878
5879 while (counter < limit) {
5880 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
5881 if (ctx) {
5882 TAILQ_REMOVE(&(prov->dp_ctx_out_q), ctx,
5883 zd_q_entries);
5884
5885 TAILQ_INSERT_TAIL(&work_list,
5886 ctx, zd_q_entries);
5887 counter++;
5888 } else
5889 break;
5890 }
5891
5892 dplane_provider_unlock(prov);
5893
5894 if (counter >= limit)
5895 reschedule = true;
5896
5897 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5898 zlog_debug("dplane dequeues %d completed work from provider %s",
5899 counter, dplane_provider_get_name(prov));
5900
5901 /* Locate next provider */
5902 DPLANE_LOCK();
5903 prov = TAILQ_NEXT(prov, dp_prov_link);
5904 DPLANE_UNLOCK();
5905 }
5906
5907 /*
5908 * We hit the work limit while processing at least one provider's
5909 * output queue - ensure we come back and finish it.
5910 */
5911 if (reschedule)
5912 dplane_provider_work_ready();
5913
5914 /* After all providers have been serviced, enqueue any completed
5915 * work and any errors back to zebra so it can process the results.
5916 */
5917 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5918 zlog_debug("dplane has %d completed, %d errors, for zebra main",
5919 counter, error_counter);
5920
5921 /*
5922 * Hand lists through the api to zebra main,
5923 * to reduce the number of lock/unlock cycles
5924 */
5925
5926 /* Call through to zebra main */
5927 (zdplane_info.dg_results_cb)(&error_list);
5928
5929 TAILQ_INIT(&error_list);
5930
5931 /* Call through to zebra main */
5932 (zdplane_info.dg_results_cb)(&work_list);
5933
5934 TAILQ_INIT(&work_list);
5935 }
5936
5937 /*
5938 * Final phase of shutdown, after all work enqueued to dplane has been
5939 * processed. This is called from the zebra main pthread context.
5940 */
5941 void zebra_dplane_shutdown(void)
5942 {
5943 struct zebra_dplane_provider *dp;
5944
5945 if (IS_ZEBRA_DEBUG_DPLANE)
5946 zlog_debug("Zebra dataplane shutdown called");
5947
5948 /* Stop dplane thread, if it's running */
5949
5950 zdplane_info.dg_run = false;
5951
5952 if (zdplane_info.dg_t_update)
5953 thread_cancel_async(zdplane_info.dg_t_update->master,
5954 &zdplane_info.dg_t_update, NULL);
5955
5956 frr_pthread_stop(zdplane_info.dg_pthread, NULL);
5957
5958 /* Destroy pthread */
5959 frr_pthread_destroy(zdplane_info.dg_pthread);
5960 zdplane_info.dg_pthread = NULL;
5961 zdplane_info.dg_master = NULL;
5962
5963 /* Notify provider(s) of final shutdown.
5964 * Note that this call is in the main pthread, so providers must
5965 * be prepared for that.
5966 */
5967 TAILQ_FOREACH(dp, &zdplane_info.dg_providers_q, dp_prov_link) {
5968 if (dp->dp_fini == NULL)
5969 continue;
5970
5971 dp->dp_fini(dp, false);
5972 }
5973
5974 /* TODO -- Clean-up provider objects */
5975
5976 /* TODO -- Clean queue(s), free memory */
5977 }
5978
5979 /*
5980 * Initialize the dataplane module during startup, internal/private version
5981 */
5982 static void zebra_dplane_init_internal(void)
5983 {
5984 memset(&zdplane_info, 0, sizeof(zdplane_info));
5985
5986 pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
5987
5988 TAILQ_INIT(&zdplane_info.dg_update_ctx_q);
5989 TAILQ_INIT(&zdplane_info.dg_providers_q);
5990 zns_info_list_init(&zdplane_info.dg_zns_list);
5991
5992 zdplane_info.dg_updates_per_cycle = DPLANE_DEFAULT_NEW_WORK;
5993
5994 zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;
5995
5996 /* Register default kernel 'provider' during init */
5997 dplane_provider_init();
5998 }
5999
6000 /*
6001 * Start the dataplane pthread. This step needs to be run later than the
6002 * 'init' step, in case zebra has fork-ed.
6003 */
6004 void zebra_dplane_start(void)
6005 {
6006 struct dplane_zns_info *zi;
6007 struct zebra_dplane_provider *prov;
6008 struct frr_pthread_attr pattr = {
6009 .start = frr_pthread_attr_default.start,
6010 .stop = frr_pthread_attr_default.stop
6011 };
6012
6013 /* Start dataplane pthread */
6014
6015 zdplane_info.dg_pthread = frr_pthread_new(&pattr, "Zebra dplane thread",
6016 "zebra_dplane");
6017
6018 zdplane_info.dg_master = zdplane_info.dg_pthread->master;
6019
6020 zdplane_info.dg_run = true;
6021
6022 /* Enqueue an initial event for the dataplane pthread */
6023 thread_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
6024 &zdplane_info.dg_t_update);
6025
6026 /* Enqueue reads if necessary */
6027 frr_each (zns_info_list, &zdplane_info.dg_zns_list, zi) {
6028 #if defined(HAVE_NETLINK)
6029 thread_add_read(zdplane_info.dg_master, dplane_incoming_read,
6030 zi, zi->info.sock, &zi->t_read);
6031 #endif
6032 }
6033
6034 /* Call start callbacks for registered providers */
6035
6036 DPLANE_LOCK();
6037 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
6038 DPLANE_UNLOCK();
6039
6040 while (prov) {
6041
6042 if (prov->dp_start)
6043 (prov->dp_start)(prov);
6044
6045 /* Locate next provider */
6046 DPLANE_LOCK();
6047 prov = TAILQ_NEXT(prov, dp_prov_link);
6048 DPLANE_UNLOCK();
6049 }
6050
6051 frr_pthread_run(zdplane_info.dg_pthread, NULL);
6052 }
6053
6054 /*
6055 * Initialize the dataplane module at startup; called by zebra rib_init()
6056 */
6057 void zebra_dplane_init(int (*results_fp)(struct dplane_ctx_q *))
6058 {
6059 zebra_dplane_init_internal();
6060 zdplane_info.dg_results_cb = results_fp;
6061 }