]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_dplane.c
Merge pull request #11229 from anlancs/fix/zebra-nb-remove-checknode
[mirror_frr.git] / zebra / zebra_dplane.c
1 /*
2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "lib/libfrr.h"
25 #include "lib/debug.h"
26 #include "lib/frratomic.h"
27 #include "lib/frr_pthread.h"
28 #include "lib/memory.h"
29 #include "lib/queue.h"
30 #include "lib/zebra.h"
31 #include "zebra/netconf_netlink.h"
32 #include "zebra/zebra_router.h"
33 #include "zebra/zebra_dplane.h"
34 #include "zebra/zebra_vxlan_private.h"
35 #include "zebra/zebra_mpls.h"
36 #include "zebra/rt.h"
37 #include "zebra/debug.h"
38 #include "zebra/zebra_pbr.h"
39 #include "printfrr.h"
40
41 /* Memory types */
42 DEFINE_MTYPE_STATIC(ZEBRA, DP_CTX, "Zebra DPlane Ctx");
43 DEFINE_MTYPE_STATIC(ZEBRA, DP_INTF, "Zebra DPlane Intf");
44 DEFINE_MTYPE_STATIC(ZEBRA, DP_PROV, "Zebra DPlane Provider");
45 DEFINE_MTYPE_STATIC(ZEBRA, DP_NETFILTER, "Zebra Netfilter Internal Object");
46 DEFINE_MTYPE_STATIC(ZEBRA, DP_NS, "DPlane NSes");
47
48 #ifndef AOK
49 # define AOK 0
50 #endif
51
52 /* Control for collection of extra interface info with route updates; a plugin
53 * can enable the extra info via a dplane api.
54 */
55 static bool dplane_collect_extra_intf_info;
56
57 /* Enable test dataplane provider */
58 /*#define DPLANE_TEST_PROVIDER 1 */
59
60 /* Default value for max queued incoming updates */
61 const uint32_t DPLANE_DEFAULT_MAX_QUEUED = 200;
62
63 /* Default value for new work per cycle */
64 const uint32_t DPLANE_DEFAULT_NEW_WORK = 100;
65
66 /* Validation check macro for context blocks */
67 /* #define DPLANE_DEBUG 1 */
68
69 #ifdef DPLANE_DEBUG
70
71 # define DPLANE_CTX_VALID(p) \
72 assert((p) != NULL)
73
74 #else
75
76 # define DPLANE_CTX_VALID(p)
77
78 #endif /* DPLANE_DEBUG */
79
80 /*
81 * Nexthop information captured for nexthop/nexthop group updates
82 */
83 struct dplane_nexthop_info {
84 uint32_t id;
85 uint32_t old_id;
86 afi_t afi;
87 vrf_id_t vrf_id;
88 int type;
89
90 struct nexthop_group ng;
91 struct nh_grp nh_grp[MULTIPATH_NUM];
92 uint8_t nh_grp_count;
93 };
94
95 /*
96 * Optional extra info about interfaces used in route updates' nexthops.
97 */
98 struct dplane_intf_extra {
99 vrf_id_t vrf_id;
100 uint32_t ifindex;
101 uint32_t flags;
102 uint32_t status;
103
104 TAILQ_ENTRY(dplane_intf_extra) link;
105 };
106
107 /*
108 * Route information captured for route updates.
109 */
110 struct dplane_route_info {
111
112 /* Dest and (optional) source prefixes */
113 struct prefix zd_dest;
114 struct prefix zd_src;
115
116 afi_t zd_afi;
117 safi_t zd_safi;
118
119 int zd_type;
120 int zd_old_type;
121
122 route_tag_t zd_tag;
123 route_tag_t zd_old_tag;
124 uint32_t zd_metric;
125 uint32_t zd_old_metric;
126
127 uint16_t zd_instance;
128 uint16_t zd_old_instance;
129
130 uint8_t zd_distance;
131 uint8_t zd_old_distance;
132
133 uint32_t zd_mtu;
134 uint32_t zd_nexthop_mtu;
135
136 /* Nexthop hash entry info */
137 struct dplane_nexthop_info nhe;
138
139 /* Nexthops */
140 uint32_t zd_nhg_id;
141 struct nexthop_group zd_ng;
142
143 /* Backup nexthops (if present) */
144 struct nexthop_group backup_ng;
145
146 /* "Previous" nexthops, used only in route updates without netlink */
147 struct nexthop_group zd_old_ng;
148 struct nexthop_group old_backup_ng;
149
150 /* Optional list of extra interface info */
151 TAILQ_HEAD(dp_intf_extra_q, dplane_intf_extra) intf_extra_q;
152 };
153
154 /*
155 * Pseudowire info for the dataplane
156 */
157 struct dplane_pw_info {
158 int type;
159 int af;
160 int status;
161 uint32_t flags;
162 uint32_t nhg_id;
163 union g_addr dest;
164 mpls_label_t local_label;
165 mpls_label_t remote_label;
166
167 /* Nexthops that are valid and installed */
168 struct nexthop_group fib_nhg;
169
170 /* Primary and backup nexthop sets, copied from the resolving route. */
171 struct nexthop_group primary_nhg;
172 struct nexthop_group backup_nhg;
173
174 union pw_protocol_fields fields;
175 };
176
177 /*
178 * Bridge port info for the dataplane
179 */
180 struct dplane_br_port_info {
181 uint32_t sph_filter_cnt;
182 struct in_addr sph_filters[ES_VTEP_MAX_CNT];
183 /* DPLANE_BR_PORT_XXX - see zebra_dplane.h*/
184 uint32_t flags;
185 uint32_t backup_nhg_id;
186 };
187
188 /*
189 * Interface/prefix info for the dataplane
190 */
191 struct dplane_intf_info {
192
193 uint32_t metric;
194 uint32_t flags;
195
196 bool protodown;
197 bool pd_reason_val;
198
199 #define DPLANE_INTF_CONNECTED (1 << 0) /* Connected peer, p2p */
200 #define DPLANE_INTF_SECONDARY (1 << 1)
201 #define DPLANE_INTF_BROADCAST (1 << 2)
202 #define DPLANE_INTF_HAS_DEST DPLANE_INTF_CONNECTED
203 #define DPLANE_INTF_HAS_LABEL (1 << 4)
204
205 /* Interface address/prefix */
206 struct prefix prefix;
207
208 /* Dest address, for p2p, or broadcast prefix */
209 struct prefix dest_prefix;
210
211 char *label;
212 char label_buf[32];
213 };
214
215 /*
216 * EVPN MAC address info for the dataplane.
217 */
218 struct dplane_mac_info {
219 vlanid_t vid;
220 ifindex_t br_ifindex;
221 struct ethaddr mac;
222 struct in_addr vtep_ip;
223 bool is_sticky;
224 uint32_t nhg_id;
225 uint32_t update_flags;
226 };
227
228 /*
229 * Neighbor info for the dataplane
230 */
231 struct dplane_neigh_info {
232 struct ipaddr ip_addr;
233 union {
234 struct ethaddr mac;
235 struct ipaddr ip_addr;
236 } link;
237 uint32_t flags;
238 uint16_t state;
239 uint32_t update_flags;
240 };
241
242 /*
243 * Neighbor Table
244 */
245 struct dplane_neigh_table {
246 uint8_t family;
247 uint32_t app_probes;
248 uint32_t ucast_probes;
249 uint32_t mcast_probes;
250 };
251
252 /*
253 * Policy based routing rule info for the dataplane
254 */
255 struct dplane_ctx_rule {
256 uint32_t priority;
257
258 /* The route table pointed by this rule */
259 uint32_t table;
260
261 /* Filter criteria */
262 uint32_t filter_bm;
263 uint32_t fwmark;
264 uint8_t dsfield;
265 struct prefix src_ip;
266 struct prefix dst_ip;
267 uint8_t ip_proto;
268
269 uint8_t action_pcp;
270 uint16_t action_vlan_id;
271 uint16_t action_vlan_flags;
272
273 uint32_t action_queue_id;
274
275 char ifname[INTERFACE_NAMSIZ + 1];
276 };
277
278 struct dplane_rule_info {
279 /*
280 * Originating zclient sock fd, so we can know who to send
281 * back to.
282 */
283 int sock;
284
285 int unique;
286 int seq;
287
288 struct dplane_ctx_rule new;
289 struct dplane_ctx_rule old;
290 };
291
292 struct dplane_gre_ctx {
293 uint32_t link_ifindex;
294 unsigned int mtu;
295 struct zebra_l2info_gre info;
296 };
297
298
299 /*
300 * Network interface configuration info - aligned with netlink's NETCONF
301 * info. The flags values are public, in the dplane.h file...
302 */
303 struct dplane_netconf_info {
304 ns_id_t ns_id;
305 ifindex_t ifindex;
306 enum dplane_netconf_status_e mpls_val;
307 enum dplane_netconf_status_e mcast_val;
308 };
309
310 /*
311 * The context block used to exchange info about route updates across
312 * the boundary between the zebra main context (and pthread) and the
313 * dataplane layer (and pthread).
314 */
315 struct zebra_dplane_ctx {
316
317 /* Operation code */
318 enum dplane_op_e zd_op;
319
320 /* Status on return */
321 enum zebra_dplane_result zd_status;
322
323 /* Dplane provider id */
324 uint32_t zd_provider;
325
326 /* Flags - used by providers, e.g. */
327 int zd_flags;
328
329 bool zd_is_update;
330
331 uint32_t zd_seq;
332 uint32_t zd_old_seq;
333
334 /* Some updates may be generated by notifications: allow the
335 * plugin to notice and ignore results from its own notifications.
336 */
337 uint32_t zd_notif_provider;
338
339 /* TODO -- internal/sub-operation status? */
340 enum zebra_dplane_result zd_remote_status;
341 enum zebra_dplane_result zd_kernel_status;
342
343 vrf_id_t zd_vrf_id;
344 uint32_t zd_table_id;
345
346 char zd_ifname[INTERFACE_NAMSIZ];
347 ifindex_t zd_ifindex;
348
349 /* Support info for different kinds of updates */
350 union {
351 struct dplane_route_info rinfo;
352 struct zebra_lsp lsp;
353 struct dplane_pw_info pw;
354 struct dplane_br_port_info br_port;
355 struct dplane_intf_info intf;
356 struct dplane_mac_info macinfo;
357 struct dplane_neigh_info neigh;
358 struct dplane_rule_info rule;
359 struct zebra_pbr_iptable iptable;
360 struct zebra_pbr_ipset ipset;
361 struct {
362 struct zebra_pbr_ipset_entry entry;
363 struct zebra_pbr_ipset_info info;
364 } ipset_entry;
365 struct dplane_neigh_table neightable;
366 struct dplane_gre_ctx gre;
367 struct dplane_netconf_info netconf;
368 } u;
369
370 /* Namespace info, used especially for netlink kernel communication */
371 struct zebra_dplane_info zd_ns_info;
372
373 /* Embedded list linkage */
374 TAILQ_ENTRY(zebra_dplane_ctx) zd_q_entries;
375 };
376
377 /* Flag that can be set by a pre-kernel provider as a signal that an update
378 * should bypass the kernel.
379 */
380 #define DPLANE_CTX_FLAG_NO_KERNEL 0x01
381
382
383 /*
384 * Registration block for one dataplane provider.
385 */
386 struct zebra_dplane_provider {
387 /* Name */
388 char dp_name[DPLANE_PROVIDER_NAMELEN + 1];
389
390 /* Priority, for ordering among providers */
391 uint8_t dp_priority;
392
393 /* Id value */
394 uint32_t dp_id;
395
396 /* Mutex */
397 pthread_mutex_t dp_mutex;
398
399 /* Plugin-provided extra data */
400 void *dp_data;
401
402 /* Flags */
403 int dp_flags;
404
405 int (*dp_start)(struct zebra_dplane_provider *prov);
406
407 int (*dp_fp)(struct zebra_dplane_provider *prov);
408
409 int (*dp_fini)(struct zebra_dplane_provider *prov, bool early_p);
410
411 _Atomic uint32_t dp_in_counter;
412 _Atomic uint32_t dp_in_queued;
413 _Atomic uint32_t dp_in_max;
414 _Atomic uint32_t dp_out_counter;
415 _Atomic uint32_t dp_out_queued;
416 _Atomic uint32_t dp_out_max;
417 _Atomic uint32_t dp_error_counter;
418
419 /* Queue of contexts inbound to the provider */
420 struct dplane_ctx_q dp_ctx_in_q;
421
422 /* Queue of completed contexts outbound from the provider back
423 * towards the dataplane module.
424 */
425 struct dplane_ctx_q dp_ctx_out_q;
426
427 /* Embedded list linkage for provider objects */
428 TAILQ_ENTRY(zebra_dplane_provider) dp_prov_link;
429 };
430
431 /* Declare types for list of zns info objects */
432 PREDECL_DLIST(zns_info_list);
433
434 struct dplane_zns_info {
435 struct zebra_dplane_info info;
436
437 /* Request data from the OS */
438 struct thread *t_request;
439
440 /* Read event */
441 struct thread *t_read;
442
443 /* List linkage */
444 struct zns_info_list_item link;
445 };
446
447 /*
448 * Globals
449 */
450 static struct zebra_dplane_globals {
451 /* Mutex to control access to dataplane components */
452 pthread_mutex_t dg_mutex;
453
454 /* Results callback registered by zebra 'core' */
455 int (*dg_results_cb)(struct dplane_ctx_q *ctxlist);
456
457 /* Sentinel for beginning of shutdown */
458 volatile bool dg_is_shutdown;
459
460 /* Sentinel for end of shutdown */
461 volatile bool dg_run;
462
463 /* Update context queue inbound to the dataplane */
464 TAILQ_HEAD(zdg_ctx_q, zebra_dplane_ctx) dg_update_ctx_q;
465
466 /* Ordered list of providers */
467 TAILQ_HEAD(zdg_prov_q, zebra_dplane_provider) dg_providers_q;
468
469 /* List of info about each zns */
470 struct zns_info_list_head dg_zns_list;
471
472 /* Counter used to assign internal ids to providers */
473 uint32_t dg_provider_id;
474
475 /* Limit number of pending, unprocessed updates */
476 _Atomic uint32_t dg_max_queued_updates;
477
478 /* Control whether system route notifications should be produced. */
479 bool dg_sys_route_notifs;
480
481 /* Limit number of new updates dequeued at once, to pace an
482 * incoming burst.
483 */
484 uint32_t dg_updates_per_cycle;
485
486 _Atomic uint32_t dg_routes_in;
487 _Atomic uint32_t dg_routes_queued;
488 _Atomic uint32_t dg_routes_queued_max;
489 _Atomic uint32_t dg_route_errors;
490 _Atomic uint32_t dg_other_errors;
491
492 _Atomic uint32_t dg_nexthops_in;
493 _Atomic uint32_t dg_nexthop_errors;
494
495 _Atomic uint32_t dg_lsps_in;
496 _Atomic uint32_t dg_lsp_errors;
497
498 _Atomic uint32_t dg_pws_in;
499 _Atomic uint32_t dg_pw_errors;
500
501 _Atomic uint32_t dg_br_port_in;
502 _Atomic uint32_t dg_br_port_errors;
503
504 _Atomic uint32_t dg_intf_addrs_in;
505 _Atomic uint32_t dg_intf_addr_errors;
506
507 _Atomic uint32_t dg_macs_in;
508 _Atomic uint32_t dg_mac_errors;
509
510 _Atomic uint32_t dg_neighs_in;
511 _Atomic uint32_t dg_neigh_errors;
512
513 _Atomic uint32_t dg_rules_in;
514 _Atomic uint32_t dg_rule_errors;
515
516 _Atomic uint32_t dg_update_yields;
517
518 _Atomic uint32_t dg_iptable_in;
519 _Atomic uint32_t dg_iptable_errors;
520
521 _Atomic uint32_t dg_ipset_in;
522 _Atomic uint32_t dg_ipset_errors;
523 _Atomic uint32_t dg_ipset_entry_in;
524 _Atomic uint32_t dg_ipset_entry_errors;
525
526 _Atomic uint32_t dg_neightable_in;
527 _Atomic uint32_t dg_neightable_errors;
528
529 _Atomic uint32_t dg_gre_set_in;
530 _Atomic uint32_t dg_gre_set_errors;
531
532 _Atomic uint32_t dg_intfs_in;
533 _Atomic uint32_t dg_intf_errors;
534
535 /* Dataplane pthread */
536 struct frr_pthread *dg_pthread;
537
538 /* Event-delivery context 'master' for the dplane */
539 struct thread_master *dg_master;
540
541 /* Event/'thread' pointer for queued updates */
542 struct thread *dg_t_update;
543
544 /* Event pointer for pending shutdown check loop */
545 struct thread *dg_t_shutdown_check;
546
547 } zdplane_info;
548
549 /* Instantiate zns list type */
550 DECLARE_DLIST(zns_info_list, struct dplane_zns_info, link);
551
552 /*
553 * Lock and unlock for interactions with the zebra 'core' pthread
554 */
555 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
556 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
557
558
559 /*
560 * Lock and unlock for individual providers
561 */
562 #define DPLANE_PROV_LOCK(p) pthread_mutex_lock(&((p)->dp_mutex))
563 #define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
564
565 /* Prototypes */
566 static void dplane_thread_loop(struct thread *event);
567 static enum zebra_dplane_result lsp_update_internal(struct zebra_lsp *lsp,
568 enum dplane_op_e op);
569 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
570 enum dplane_op_e op);
571 static enum zebra_dplane_result intf_addr_update_internal(
572 const struct interface *ifp, const struct connected *ifc,
573 enum dplane_op_e op);
574 static enum zebra_dplane_result mac_update_common(
575 enum dplane_op_e op, const struct interface *ifp,
576 const struct interface *br_ifp,
577 vlanid_t vid, const struct ethaddr *mac,
578 struct in_addr vtep_ip, bool sticky, uint32_t nhg_id,
579 uint32_t update_flags);
580 static enum zebra_dplane_result
581 neigh_update_internal(enum dplane_op_e op, const struct interface *ifp,
582 const void *link, int link_family,
583 const struct ipaddr *ip, uint32_t flags, uint16_t state,
584 uint32_t update_flags, int protocol);
585
586 /*
587 * Public APIs
588 */
589
590 /* Obtain thread_master for dataplane thread */
591 struct thread_master *dplane_get_thread_master(void)
592 {
593 return zdplane_info.dg_master;
594 }
595
596 /*
597 * Allocate a dataplane update context
598 */
599 struct zebra_dplane_ctx *dplane_ctx_alloc(void)
600 {
601 struct zebra_dplane_ctx *p;
602
603 /* TODO -- just alloc'ing memory, but would like to maintain
604 * a pool
605 */
606 p = XCALLOC(MTYPE_DP_CTX, sizeof(struct zebra_dplane_ctx));
607
608 return p;
609 }
610
611 /* Enable system route notifications */
612 void dplane_enable_sys_route_notifs(void)
613 {
614 zdplane_info.dg_sys_route_notifs = true;
615 }
616
617 /*
618 * Clean up dependent/internal allocations inside a context object
619 */
620 static void dplane_ctx_free_internal(struct zebra_dplane_ctx *ctx)
621 {
622 struct dplane_intf_extra *if_extra, *if_tmp;
623
624 /*
625 * Some internal allocations may need to be freed, depending on
626 * the type of info captured in the ctx.
627 */
628 switch (ctx->zd_op) {
629 case DPLANE_OP_ROUTE_INSTALL:
630 case DPLANE_OP_ROUTE_UPDATE:
631 case DPLANE_OP_ROUTE_DELETE:
632 case DPLANE_OP_SYS_ROUTE_ADD:
633 case DPLANE_OP_SYS_ROUTE_DELETE:
634 case DPLANE_OP_ROUTE_NOTIFY:
635
636 /* Free allocated nexthops */
637 if (ctx->u.rinfo.zd_ng.nexthop) {
638 /* This deals with recursive nexthops too */
639 nexthops_free(ctx->u.rinfo.zd_ng.nexthop);
640
641 ctx->u.rinfo.zd_ng.nexthop = NULL;
642 }
643
644 /* Free backup info also (if present) */
645 if (ctx->u.rinfo.backup_ng.nexthop) {
646 /* This deals with recursive nexthops too */
647 nexthops_free(ctx->u.rinfo.backup_ng.nexthop);
648
649 ctx->u.rinfo.backup_ng.nexthop = NULL;
650 }
651
652 if (ctx->u.rinfo.zd_old_ng.nexthop) {
653 /* This deals with recursive nexthops too */
654 nexthops_free(ctx->u.rinfo.zd_old_ng.nexthop);
655
656 ctx->u.rinfo.zd_old_ng.nexthop = NULL;
657 }
658
659 if (ctx->u.rinfo.old_backup_ng.nexthop) {
660 /* This deals with recursive nexthops too */
661 nexthops_free(ctx->u.rinfo.old_backup_ng.nexthop);
662
663 ctx->u.rinfo.old_backup_ng.nexthop = NULL;
664 }
665
666 /* Optional extra interface info */
667 TAILQ_FOREACH_SAFE(if_extra, &ctx->u.rinfo.intf_extra_q,
668 link, if_tmp) {
669 TAILQ_REMOVE(&ctx->u.rinfo.intf_extra_q, if_extra,
670 link);
671 XFREE(MTYPE_DP_INTF, if_extra);
672 }
673
674 break;
675
676 case DPLANE_OP_NH_INSTALL:
677 case DPLANE_OP_NH_UPDATE:
678 case DPLANE_OP_NH_DELETE: {
679 if (ctx->u.rinfo.nhe.ng.nexthop) {
680 /* This deals with recursive nexthops too */
681 nexthops_free(ctx->u.rinfo.nhe.ng.nexthop);
682
683 ctx->u.rinfo.nhe.ng.nexthop = NULL;
684 }
685 break;
686 }
687
688 case DPLANE_OP_LSP_INSTALL:
689 case DPLANE_OP_LSP_UPDATE:
690 case DPLANE_OP_LSP_DELETE:
691 case DPLANE_OP_LSP_NOTIFY:
692 {
693 struct zebra_nhlfe *nhlfe;
694
695 /* Unlink and free allocated NHLFEs */
696 frr_each_safe(nhlfe_list, &ctx->u.lsp.nhlfe_list, nhlfe) {
697 nhlfe_list_del(&ctx->u.lsp.nhlfe_list, nhlfe);
698 zebra_mpls_nhlfe_free(nhlfe);
699 }
700
701 /* Unlink and free allocated backup NHLFEs, if present */
702 frr_each_safe(nhlfe_list,
703 &(ctx->u.lsp.backup_nhlfe_list), nhlfe) {
704 nhlfe_list_del(&ctx->u.lsp.backup_nhlfe_list,
705 nhlfe);
706 zebra_mpls_nhlfe_free(nhlfe);
707 }
708
709 /* Clear pointers in lsp struct, in case we're caching
710 * free context structs.
711 */
712 nhlfe_list_init(&ctx->u.lsp.nhlfe_list);
713 ctx->u.lsp.best_nhlfe = NULL;
714 nhlfe_list_init(&ctx->u.lsp.backup_nhlfe_list);
715
716 break;
717 }
718
719 case DPLANE_OP_PW_INSTALL:
720 case DPLANE_OP_PW_UNINSTALL:
721 /* Free allocated nexthops */
722 if (ctx->u.pw.fib_nhg.nexthop) {
723 /* This deals with recursive nexthops too */
724 nexthops_free(ctx->u.pw.fib_nhg.nexthop);
725
726 ctx->u.pw.fib_nhg.nexthop = NULL;
727 }
728 if (ctx->u.pw.primary_nhg.nexthop) {
729 nexthops_free(ctx->u.pw.primary_nhg.nexthop);
730
731 ctx->u.pw.primary_nhg.nexthop = NULL;
732 }
733 if (ctx->u.pw.backup_nhg.nexthop) {
734 nexthops_free(ctx->u.pw.backup_nhg.nexthop);
735
736 ctx->u.pw.backup_nhg.nexthop = NULL;
737 }
738 break;
739
740 case DPLANE_OP_ADDR_INSTALL:
741 case DPLANE_OP_ADDR_UNINSTALL:
742 case DPLANE_OP_INTF_ADDR_ADD:
743 case DPLANE_OP_INTF_ADDR_DEL:
744 /* Maybe free label string, if allocated */
745 if (ctx->u.intf.label != NULL &&
746 ctx->u.intf.label != ctx->u.intf.label_buf) {
747 XFREE(MTYPE_DP_CTX, ctx->u.intf.label);
748 ctx->u.intf.label = NULL;
749 }
750 break;
751
752 case DPLANE_OP_MAC_INSTALL:
753 case DPLANE_OP_MAC_DELETE:
754 case DPLANE_OP_NEIGH_INSTALL:
755 case DPLANE_OP_NEIGH_UPDATE:
756 case DPLANE_OP_NEIGH_DELETE:
757 case DPLANE_OP_VTEP_ADD:
758 case DPLANE_OP_VTEP_DELETE:
759 case DPLANE_OP_RULE_ADD:
760 case DPLANE_OP_RULE_DELETE:
761 case DPLANE_OP_RULE_UPDATE:
762 case DPLANE_OP_NEIGH_DISCOVER:
763 case DPLANE_OP_BR_PORT_UPDATE:
764 case DPLANE_OP_NEIGH_IP_INSTALL:
765 case DPLANE_OP_NEIGH_IP_DELETE:
766 case DPLANE_OP_NONE:
767 case DPLANE_OP_IPSET_ADD:
768 case DPLANE_OP_IPSET_DELETE:
769 case DPLANE_OP_INTF_INSTALL:
770 case DPLANE_OP_INTF_UPDATE:
771 case DPLANE_OP_INTF_DELETE:
772 break;
773
774 case DPLANE_OP_IPSET_ENTRY_ADD:
775 case DPLANE_OP_IPSET_ENTRY_DELETE:
776 break;
777 case DPLANE_OP_NEIGH_TABLE_UPDATE:
778 break;
779 case DPLANE_OP_IPTABLE_ADD:
780 case DPLANE_OP_IPTABLE_DELETE:
781 if (ctx->u.iptable.interface_name_list) {
782 struct listnode *node, *nnode;
783 char *ifname;
784
785 for (ALL_LIST_ELEMENTS(
786 ctx->u.iptable.interface_name_list, node,
787 nnode, ifname)) {
788 LISTNODE_DETACH(
789 ctx->u.iptable.interface_name_list,
790 node);
791 XFREE(MTYPE_DP_NETFILTER, ifname);
792 }
793 list_delete(&ctx->u.iptable.interface_name_list);
794 }
795 break;
796 case DPLANE_OP_GRE_SET:
797 case DPLANE_OP_INTF_NETCONFIG:
798 break;
799 }
800 }
801
802 /*
803 * Free a dataplane results context.
804 */
805 static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
806 {
807 if (pctx == NULL)
808 return;
809
810 DPLANE_CTX_VALID(*pctx);
811
812 /* TODO -- just freeing memory, but would like to maintain
813 * a pool
814 */
815
816 /* Some internal allocations may need to be freed, depending on
817 * the type of info captured in the ctx.
818 */
819 dplane_ctx_free_internal(*pctx);
820
821 XFREE(MTYPE_DP_CTX, *pctx);
822 }
823
824 /*
825 * Reset an allocated context object for re-use. All internal allocations are
826 * freed and the context is memset.
827 */
828 void dplane_ctx_reset(struct zebra_dplane_ctx *ctx)
829 {
830 dplane_ctx_free_internal(ctx);
831 memset(ctx, 0, sizeof(*ctx));
832 }
833
834 /*
835 * Return a context block to the dplane module after processing
836 */
837 void dplane_ctx_fini(struct zebra_dplane_ctx **pctx)
838 {
839 /* TODO -- maintain pool; for now, just free */
840 dplane_ctx_free(pctx);
841 }
842
843 /* Enqueue a context block */
844 void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
845 const struct zebra_dplane_ctx *ctx)
846 {
847 TAILQ_INSERT_TAIL(q, (struct zebra_dplane_ctx *)ctx, zd_q_entries);
848 }
849
850 /* Append a list of context blocks to another list */
851 void dplane_ctx_list_append(struct dplane_ctx_q *to_list,
852 struct dplane_ctx_q *from_list)
853 {
854 if (TAILQ_FIRST(from_list)) {
855 TAILQ_CONCAT(to_list, from_list, zd_q_entries);
856
857 /* And clear 'from' list */
858 TAILQ_INIT(from_list);
859 }
860 }
861
862 struct zebra_dplane_ctx *dplane_ctx_get_head(struct dplane_ctx_q *q)
863 {
864 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
865
866 return ctx;
867 }
868
869 /* Dequeue a context block from the head of a list */
870 struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_q *q)
871 {
872 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
873
874 if (ctx)
875 TAILQ_REMOVE(q, ctx, zd_q_entries);
876
877 return ctx;
878 }
879
880 /*
881 * Accessors for information from the context object
882 */
883 enum zebra_dplane_result dplane_ctx_get_status(
884 const struct zebra_dplane_ctx *ctx)
885 {
886 DPLANE_CTX_VALID(ctx);
887
888 return ctx->zd_status;
889 }
890
891 void dplane_ctx_set_status(struct zebra_dplane_ctx *ctx,
892 enum zebra_dplane_result status)
893 {
894 DPLANE_CTX_VALID(ctx);
895
896 ctx->zd_status = status;
897 }
898
899 /* Retrieve last/current provider id */
900 uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx *ctx)
901 {
902 DPLANE_CTX_VALID(ctx);
903 return ctx->zd_provider;
904 }
905
906 /* Providers run before the kernel can control whether a kernel
907 * update should be done.
908 */
909 void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx *ctx)
910 {
911 DPLANE_CTX_VALID(ctx);
912
913 SET_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
914 }
915
916 bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx *ctx)
917 {
918 DPLANE_CTX_VALID(ctx);
919
920 return CHECK_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
921 }
922
923 void dplane_ctx_set_op(struct zebra_dplane_ctx *ctx, enum dplane_op_e op)
924 {
925 DPLANE_CTX_VALID(ctx);
926 ctx->zd_op = op;
927 }
928
929 enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx)
930 {
931 DPLANE_CTX_VALID(ctx);
932
933 return ctx->zd_op;
934 }
935
936 const char *dplane_op2str(enum dplane_op_e op)
937 {
938 const char *ret = "UNKNOWN";
939
940 switch (op) {
941 case DPLANE_OP_NONE:
942 ret = "NONE";
943 break;
944
945 /* Route update */
946 case DPLANE_OP_ROUTE_INSTALL:
947 ret = "ROUTE_INSTALL";
948 break;
949 case DPLANE_OP_ROUTE_UPDATE:
950 ret = "ROUTE_UPDATE";
951 break;
952 case DPLANE_OP_ROUTE_DELETE:
953 ret = "ROUTE_DELETE";
954 break;
955 case DPLANE_OP_ROUTE_NOTIFY:
956 ret = "ROUTE_NOTIFY";
957 break;
958
959 /* Nexthop update */
960 case DPLANE_OP_NH_INSTALL:
961 ret = "NH_INSTALL";
962 break;
963 case DPLANE_OP_NH_UPDATE:
964 ret = "NH_UPDATE";
965 break;
966 case DPLANE_OP_NH_DELETE:
967 ret = "NH_DELETE";
968 break;
969
970 case DPLANE_OP_LSP_INSTALL:
971 ret = "LSP_INSTALL";
972 break;
973 case DPLANE_OP_LSP_UPDATE:
974 ret = "LSP_UPDATE";
975 break;
976 case DPLANE_OP_LSP_DELETE:
977 ret = "LSP_DELETE";
978 break;
979 case DPLANE_OP_LSP_NOTIFY:
980 ret = "LSP_NOTIFY";
981 break;
982
983 case DPLANE_OP_PW_INSTALL:
984 ret = "PW_INSTALL";
985 break;
986 case DPLANE_OP_PW_UNINSTALL:
987 ret = "PW_UNINSTALL";
988 break;
989
990 case DPLANE_OP_SYS_ROUTE_ADD:
991 ret = "SYS_ROUTE_ADD";
992 break;
993 case DPLANE_OP_SYS_ROUTE_DELETE:
994 ret = "SYS_ROUTE_DEL";
995 break;
996
997 case DPLANE_OP_BR_PORT_UPDATE:
998 ret = "BR_PORT_UPDATE";
999 break;
1000
1001 case DPLANE_OP_ADDR_INSTALL:
1002 ret = "ADDR_INSTALL";
1003 break;
1004 case DPLANE_OP_ADDR_UNINSTALL:
1005 ret = "ADDR_UNINSTALL";
1006 break;
1007
1008 case DPLANE_OP_MAC_INSTALL:
1009 ret = "MAC_INSTALL";
1010 break;
1011 case DPLANE_OP_MAC_DELETE:
1012 ret = "MAC_DELETE";
1013 break;
1014
1015 case DPLANE_OP_NEIGH_INSTALL:
1016 ret = "NEIGH_INSTALL";
1017 break;
1018 case DPLANE_OP_NEIGH_UPDATE:
1019 ret = "NEIGH_UPDATE";
1020 break;
1021 case DPLANE_OP_NEIGH_DELETE:
1022 ret = "NEIGH_DELETE";
1023 break;
1024 case DPLANE_OP_VTEP_ADD:
1025 ret = "VTEP_ADD";
1026 break;
1027 case DPLANE_OP_VTEP_DELETE:
1028 ret = "VTEP_DELETE";
1029 break;
1030
1031 case DPLANE_OP_RULE_ADD:
1032 ret = "RULE_ADD";
1033 break;
1034 case DPLANE_OP_RULE_DELETE:
1035 ret = "RULE_DELETE";
1036 break;
1037 case DPLANE_OP_RULE_UPDATE:
1038 ret = "RULE_UPDATE";
1039 break;
1040
1041 case DPLANE_OP_NEIGH_DISCOVER:
1042 ret = "NEIGH_DISCOVER";
1043 break;
1044
1045 case DPLANE_OP_IPTABLE_ADD:
1046 ret = "IPTABLE_ADD";
1047 break;
1048 case DPLANE_OP_IPTABLE_DELETE:
1049 ret = "IPTABLE_DELETE";
1050 break;
1051 case DPLANE_OP_IPSET_ADD:
1052 ret = "IPSET_ADD";
1053 break;
1054 case DPLANE_OP_IPSET_DELETE:
1055 ret = "IPSET_DELETE";
1056 break;
1057 case DPLANE_OP_IPSET_ENTRY_ADD:
1058 ret = "IPSET_ENTRY_ADD";
1059 break;
1060 case DPLANE_OP_IPSET_ENTRY_DELETE:
1061 ret = "IPSET_ENTRY_DELETE";
1062 break;
1063 case DPLANE_OP_NEIGH_IP_INSTALL:
1064 ret = "NEIGH_IP_INSTALL";
1065 break;
1066 case DPLANE_OP_NEIGH_IP_DELETE:
1067 ret = "NEIGH_IP_DELETE";
1068 break;
1069 case DPLANE_OP_NEIGH_TABLE_UPDATE:
1070 ret = "NEIGH_TABLE_UPDATE";
1071 break;
1072
1073 case DPLANE_OP_GRE_SET:
1074 ret = "GRE_SET";
1075 break;
1076
1077 case DPLANE_OP_INTF_ADDR_ADD:
1078 return "INTF_ADDR_ADD";
1079
1080 case DPLANE_OP_INTF_ADDR_DEL:
1081 return "INTF_ADDR_DEL";
1082
1083 case DPLANE_OP_INTF_NETCONFIG:
1084 return "INTF_NETCONFIG";
1085
1086 case DPLANE_OP_INTF_INSTALL:
1087 ret = "INTF_INSTALL";
1088 break;
1089 case DPLANE_OP_INTF_UPDATE:
1090 ret = "INTF_UPDATE";
1091 break;
1092 case DPLANE_OP_INTF_DELETE:
1093 ret = "INTF_DELETE";
1094 break;
1095 }
1096
1097 return ret;
1098 }
1099
1100 const char *dplane_res2str(enum zebra_dplane_result res)
1101 {
1102 const char *ret = "<Unknown>";
1103
1104 switch (res) {
1105 case ZEBRA_DPLANE_REQUEST_FAILURE:
1106 ret = "FAILURE";
1107 break;
1108 case ZEBRA_DPLANE_REQUEST_QUEUED:
1109 ret = "QUEUED";
1110 break;
1111 case ZEBRA_DPLANE_REQUEST_SUCCESS:
1112 ret = "SUCCESS";
1113 break;
1114 }
1115
1116 return ret;
1117 }
1118
1119 void dplane_ctx_set_dest(struct zebra_dplane_ctx *ctx,
1120 const struct prefix *dest)
1121 {
1122 DPLANE_CTX_VALID(ctx);
1123
1124 prefix_copy(&(ctx->u.rinfo.zd_dest), dest);
1125 }
1126
1127 const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
1128 {
1129 DPLANE_CTX_VALID(ctx);
1130
1131 return &(ctx->u.rinfo.zd_dest);
1132 }
1133
1134 void dplane_ctx_set_src(struct zebra_dplane_ctx *ctx, const struct prefix *src)
1135 {
1136 DPLANE_CTX_VALID(ctx);
1137
1138 if (src)
1139 prefix_copy(&(ctx->u.rinfo.zd_src), src);
1140 else
1141 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(struct prefix));
1142 }
1143
1144 /* Source prefix is a little special - return NULL for "no src prefix" */
1145 const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx)
1146 {
1147 DPLANE_CTX_VALID(ctx);
1148
1149 if (ctx->u.rinfo.zd_src.prefixlen == 0 &&
1150 IN6_IS_ADDR_UNSPECIFIED(&(ctx->u.rinfo.zd_src.u.prefix6))) {
1151 return NULL;
1152 } else {
1153 return &(ctx->u.rinfo.zd_src);
1154 }
1155 }
1156
1157 bool dplane_ctx_is_update(const struct zebra_dplane_ctx *ctx)
1158 {
1159 DPLANE_CTX_VALID(ctx);
1160
1161 return ctx->zd_is_update;
1162 }
1163
1164 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx *ctx)
1165 {
1166 DPLANE_CTX_VALID(ctx);
1167
1168 return ctx->zd_seq;
1169 }
1170
1171 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx)
1172 {
1173 DPLANE_CTX_VALID(ctx);
1174
1175 return ctx->zd_old_seq;
1176 }
1177
1178 void dplane_ctx_set_vrf(struct zebra_dplane_ctx *ctx, vrf_id_t vrf)
1179 {
1180 DPLANE_CTX_VALID(ctx);
1181
1182 ctx->zd_vrf_id = vrf;
1183 }
1184
1185 vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx)
1186 {
1187 DPLANE_CTX_VALID(ctx);
1188
1189 return ctx->zd_vrf_id;
1190 }
1191
1192 /* In some paths we have only a namespace id */
1193 void dplane_ctx_set_ns_id(struct zebra_dplane_ctx *ctx, ns_id_t nsid)
1194 {
1195 DPLANE_CTX_VALID(ctx);
1196
1197 ctx->zd_ns_info.ns_id = nsid;
1198 }
1199
1200 ns_id_t dplane_ctx_get_ns_id(const struct zebra_dplane_ctx *ctx)
1201 {
1202 DPLANE_CTX_VALID(ctx);
1203
1204 return ctx->zd_ns_info.ns_id;
1205 }
1206
1207 bool dplane_ctx_is_from_notif(const struct zebra_dplane_ctx *ctx)
1208 {
1209 DPLANE_CTX_VALID(ctx);
1210
1211 return (ctx->zd_notif_provider != 0);
1212 }
1213
1214 uint32_t dplane_ctx_get_notif_provider(const struct zebra_dplane_ctx *ctx)
1215 {
1216 DPLANE_CTX_VALID(ctx);
1217
1218 return ctx->zd_notif_provider;
1219 }
1220
1221 void dplane_ctx_set_notif_provider(struct zebra_dplane_ctx *ctx,
1222 uint32_t id)
1223 {
1224 DPLANE_CTX_VALID(ctx);
1225
1226 ctx->zd_notif_provider = id;
1227 }
1228
1229 const char *dplane_ctx_get_ifname(const struct zebra_dplane_ctx *ctx)
1230 {
1231 DPLANE_CTX_VALID(ctx);
1232
1233 return ctx->zd_ifname;
1234 }
1235
1236 void dplane_ctx_set_ifname(struct zebra_dplane_ctx *ctx, const char *ifname)
1237 {
1238 DPLANE_CTX_VALID(ctx);
1239
1240 if (!ifname)
1241 return;
1242
1243 strlcpy(ctx->zd_ifname, ifname, sizeof(ctx->zd_ifname));
1244 }
1245
1246 ifindex_t dplane_ctx_get_ifindex(const struct zebra_dplane_ctx *ctx)
1247 {
1248 DPLANE_CTX_VALID(ctx);
1249
1250 return ctx->zd_ifindex;
1251 }
1252
1253 void dplane_ctx_set_ifindex(struct zebra_dplane_ctx *ctx, ifindex_t ifindex)
1254 {
1255 DPLANE_CTX_VALID(ctx);
1256
1257 ctx->zd_ifindex = ifindex;
1258 }
1259
1260 void dplane_ctx_set_type(struct zebra_dplane_ctx *ctx, int type)
1261 {
1262 DPLANE_CTX_VALID(ctx);
1263
1264 ctx->u.rinfo.zd_type = type;
1265 }
1266
1267 int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx)
1268 {
1269 DPLANE_CTX_VALID(ctx);
1270
1271 return ctx->u.rinfo.zd_type;
1272 }
1273
1274 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx)
1275 {
1276 DPLANE_CTX_VALID(ctx);
1277
1278 return ctx->u.rinfo.zd_old_type;
1279 }
1280
1281 void dplane_ctx_set_afi(struct zebra_dplane_ctx *ctx, afi_t afi)
1282 {
1283 DPLANE_CTX_VALID(ctx);
1284
1285 ctx->u.rinfo.zd_afi = afi;
1286 }
1287
1288 afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
1289 {
1290 DPLANE_CTX_VALID(ctx);
1291
1292 return ctx->u.rinfo.zd_afi;
1293 }
1294
1295 void dplane_ctx_set_safi(struct zebra_dplane_ctx *ctx, safi_t safi)
1296 {
1297 DPLANE_CTX_VALID(ctx);
1298
1299 ctx->u.rinfo.zd_safi = safi;
1300 }
1301
1302 safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
1303 {
1304 DPLANE_CTX_VALID(ctx);
1305
1306 return ctx->u.rinfo.zd_safi;
1307 }
1308
1309 void dplane_ctx_set_table(struct zebra_dplane_ctx *ctx, uint32_t table)
1310 {
1311 DPLANE_CTX_VALID(ctx);
1312
1313 ctx->zd_table_id = table;
1314 }
1315
1316 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx)
1317 {
1318 DPLANE_CTX_VALID(ctx);
1319
1320 return ctx->zd_table_id;
1321 }
1322
1323 route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx)
1324 {
1325 DPLANE_CTX_VALID(ctx);
1326
1327 return ctx->u.rinfo.zd_tag;
1328 }
1329
1330 void dplane_ctx_set_tag(struct zebra_dplane_ctx *ctx, route_tag_t tag)
1331 {
1332 DPLANE_CTX_VALID(ctx);
1333
1334 ctx->u.rinfo.zd_tag = tag;
1335 }
1336
1337 route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx)
1338 {
1339 DPLANE_CTX_VALID(ctx);
1340
1341 return ctx->u.rinfo.zd_old_tag;
1342 }
1343
1344 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx)
1345 {
1346 DPLANE_CTX_VALID(ctx);
1347
1348 return ctx->u.rinfo.zd_instance;
1349 }
1350
1351 void dplane_ctx_set_instance(struct zebra_dplane_ctx *ctx, uint16_t instance)
1352 {
1353 DPLANE_CTX_VALID(ctx);
1354
1355 ctx->u.rinfo.zd_instance = instance;
1356 }
1357
1358 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
1359 {
1360 DPLANE_CTX_VALID(ctx);
1361
1362 return ctx->u.rinfo.zd_old_instance;
1363 }
1364
1365 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx)
1366 {
1367 DPLANE_CTX_VALID(ctx);
1368
1369 return ctx->u.rinfo.zd_metric;
1370 }
1371
1372 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx)
1373 {
1374 DPLANE_CTX_VALID(ctx);
1375
1376 return ctx->u.rinfo.zd_old_metric;
1377 }
1378
1379 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx)
1380 {
1381 DPLANE_CTX_VALID(ctx);
1382
1383 return ctx->u.rinfo.zd_mtu;
1384 }
1385
1386 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx)
1387 {
1388 DPLANE_CTX_VALID(ctx);
1389
1390 return ctx->u.rinfo.zd_nexthop_mtu;
1391 }
1392
1393 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx)
1394 {
1395 DPLANE_CTX_VALID(ctx);
1396
1397 return ctx->u.rinfo.zd_distance;
1398 }
1399
1400 void dplane_ctx_set_distance(struct zebra_dplane_ctx *ctx, uint8_t distance)
1401 {
1402 DPLANE_CTX_VALID(ctx);
1403
1404 ctx->u.rinfo.zd_distance = distance;
1405 }
1406
1407 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
1408 {
1409 DPLANE_CTX_VALID(ctx);
1410
1411 return ctx->u.rinfo.zd_old_distance;
1412 }
1413
1414 /*
1415 * Set the nexthops associated with a context: note that processing code
1416 * may well expect that nexthops are in canonical (sorted) order, so we
1417 * will enforce that here.
1418 */
1419 void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh)
1420 {
1421 DPLANE_CTX_VALID(ctx);
1422
1423 if (ctx->u.rinfo.zd_ng.nexthop) {
1424 nexthops_free(ctx->u.rinfo.zd_ng.nexthop);
1425 ctx->u.rinfo.zd_ng.nexthop = NULL;
1426 }
1427 nexthop_group_copy_nh_sorted(&(ctx->u.rinfo.zd_ng), nh);
1428 }
1429
1430 /*
1431 * Set the list of backup nexthops; their ordering is preserved (they're not
1432 * re-sorted.)
1433 */
1434 void dplane_ctx_set_backup_nhg(struct zebra_dplane_ctx *ctx,
1435 const struct nexthop_group *nhg)
1436 {
1437 struct nexthop *nh, *last_nh, *nexthop;
1438
1439 DPLANE_CTX_VALID(ctx);
1440
1441 if (ctx->u.rinfo.backup_ng.nexthop) {
1442 nexthops_free(ctx->u.rinfo.backup_ng.nexthop);
1443 ctx->u.rinfo.backup_ng.nexthop = NULL;
1444 }
1445
1446 last_nh = NULL;
1447
1448 /* Be careful to preserve the order of the backup list */
1449 for (nh = nhg->nexthop; nh; nh = nh->next) {
1450 nexthop = nexthop_dup(nh, NULL);
1451
1452 if (last_nh)
1453 NEXTHOP_APPEND(last_nh, nexthop);
1454 else
1455 ctx->u.rinfo.backup_ng.nexthop = nexthop;
1456
1457 last_nh = nexthop;
1458 }
1459 }
1460
1461 uint32_t dplane_ctx_get_nhg_id(const struct zebra_dplane_ctx *ctx)
1462 {
1463 DPLANE_CTX_VALID(ctx);
1464 return ctx->u.rinfo.zd_nhg_id;
1465 }
1466
1467 const struct nexthop_group *dplane_ctx_get_ng(
1468 const struct zebra_dplane_ctx *ctx)
1469 {
1470 DPLANE_CTX_VALID(ctx);
1471
1472 return &(ctx->u.rinfo.zd_ng);
1473 }
1474
1475 const struct nexthop_group *
1476 dplane_ctx_get_backup_ng(const struct zebra_dplane_ctx *ctx)
1477 {
1478 DPLANE_CTX_VALID(ctx);
1479
1480 return &(ctx->u.rinfo.backup_ng);
1481 }
1482
1483 const struct nexthop_group *
1484 dplane_ctx_get_old_ng(const struct zebra_dplane_ctx *ctx)
1485 {
1486 DPLANE_CTX_VALID(ctx);
1487
1488 return &(ctx->u.rinfo.zd_old_ng);
1489 }
1490
1491 const struct nexthop_group *
1492 dplane_ctx_get_old_backup_ng(const struct zebra_dplane_ctx *ctx)
1493 {
1494 DPLANE_CTX_VALID(ctx);
1495
1496 return &(ctx->u.rinfo.old_backup_ng);
1497 }
1498
1499 const struct zebra_dplane_info *dplane_ctx_get_ns(
1500 const struct zebra_dplane_ctx *ctx)
1501 {
1502 DPLANE_CTX_VALID(ctx);
1503
1504 return &(ctx->zd_ns_info);
1505 }
1506
1507 int dplane_ctx_get_ns_sock(const struct zebra_dplane_ctx *ctx)
1508 {
1509 DPLANE_CTX_VALID(ctx);
1510
1511 #ifdef HAVE_NETLINK
1512 return ctx->zd_ns_info.sock;
1513 #else
1514 return -1;
1515 #endif
1516 }
1517
1518 /* Accessors for nexthop information */
1519 uint32_t dplane_ctx_get_nhe_id(const struct zebra_dplane_ctx *ctx)
1520 {
1521 DPLANE_CTX_VALID(ctx);
1522 return ctx->u.rinfo.nhe.id;
1523 }
1524
1525 uint32_t dplane_ctx_get_old_nhe_id(const struct zebra_dplane_ctx *ctx)
1526 {
1527 DPLANE_CTX_VALID(ctx);
1528 return ctx->u.rinfo.nhe.old_id;
1529 }
1530
1531 afi_t dplane_ctx_get_nhe_afi(const struct zebra_dplane_ctx *ctx)
1532 {
1533 DPLANE_CTX_VALID(ctx);
1534 return ctx->u.rinfo.nhe.afi;
1535 }
1536
1537 vrf_id_t dplane_ctx_get_nhe_vrf_id(const struct zebra_dplane_ctx *ctx)
1538 {
1539 DPLANE_CTX_VALID(ctx);
1540 return ctx->u.rinfo.nhe.vrf_id;
1541 }
1542
1543 int dplane_ctx_get_nhe_type(const struct zebra_dplane_ctx *ctx)
1544 {
1545 DPLANE_CTX_VALID(ctx);
1546 return ctx->u.rinfo.nhe.type;
1547 }
1548
1549 const struct nexthop_group *
1550 dplane_ctx_get_nhe_ng(const struct zebra_dplane_ctx *ctx)
1551 {
1552 DPLANE_CTX_VALID(ctx);
1553 return &(ctx->u.rinfo.nhe.ng);
1554 }
1555
1556 const struct nh_grp *
1557 dplane_ctx_get_nhe_nh_grp(const struct zebra_dplane_ctx *ctx)
1558 {
1559 DPLANE_CTX_VALID(ctx);
1560 return ctx->u.rinfo.nhe.nh_grp;
1561 }
1562
1563 uint8_t dplane_ctx_get_nhe_nh_grp_count(const struct zebra_dplane_ctx *ctx)
1564 {
1565 DPLANE_CTX_VALID(ctx);
1566 return ctx->u.rinfo.nhe.nh_grp_count;
1567 }
1568
1569 /* Accessors for LSP information */
1570
1571 mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx)
1572 {
1573 DPLANE_CTX_VALID(ctx);
1574
1575 return ctx->u.lsp.ile.in_label;
1576 }
1577
1578 void dplane_ctx_set_in_label(struct zebra_dplane_ctx *ctx, mpls_label_t label)
1579 {
1580 DPLANE_CTX_VALID(ctx);
1581
1582 ctx->u.lsp.ile.in_label = label;
1583 }
1584
1585 uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx *ctx)
1586 {
1587 DPLANE_CTX_VALID(ctx);
1588
1589 return ctx->u.lsp.addr_family;
1590 }
1591
1592 void dplane_ctx_set_addr_family(struct zebra_dplane_ctx *ctx,
1593 uint8_t family)
1594 {
1595 DPLANE_CTX_VALID(ctx);
1596
1597 ctx->u.lsp.addr_family = family;
1598 }
1599
1600 uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx)
1601 {
1602 DPLANE_CTX_VALID(ctx);
1603
1604 return ctx->u.lsp.flags;
1605 }
1606
1607 void dplane_ctx_set_lsp_flags(struct zebra_dplane_ctx *ctx,
1608 uint32_t flags)
1609 {
1610 DPLANE_CTX_VALID(ctx);
1611
1612 ctx->u.lsp.flags = flags;
1613 }
1614
1615 const struct nhlfe_list_head *dplane_ctx_get_nhlfe_list(
1616 const struct zebra_dplane_ctx *ctx)
1617 {
1618 DPLANE_CTX_VALID(ctx);
1619 return &(ctx->u.lsp.nhlfe_list);
1620 }
1621
1622 const struct nhlfe_list_head *dplane_ctx_get_backup_nhlfe_list(
1623 const struct zebra_dplane_ctx *ctx)
1624 {
1625 DPLANE_CTX_VALID(ctx);
1626 return &(ctx->u.lsp.backup_nhlfe_list);
1627 }
1628
1629 struct zebra_nhlfe *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
1630 enum lsp_types_t lsp_type,
1631 enum nexthop_types_t nh_type,
1632 const union g_addr *gate,
1633 ifindex_t ifindex, uint8_t num_labels,
1634 mpls_label_t *out_labels)
1635 {
1636 struct zebra_nhlfe *nhlfe;
1637
1638 DPLANE_CTX_VALID(ctx);
1639
1640 nhlfe = zebra_mpls_lsp_add_nhlfe(&(ctx->u.lsp),
1641 lsp_type, nh_type, gate,
1642 ifindex, num_labels, out_labels);
1643
1644 return nhlfe;
1645 }
1646
1647 struct zebra_nhlfe *dplane_ctx_add_backup_nhlfe(
1648 struct zebra_dplane_ctx *ctx, enum lsp_types_t lsp_type,
1649 enum nexthop_types_t nh_type, const union g_addr *gate,
1650 ifindex_t ifindex, uint8_t num_labels, mpls_label_t *out_labels)
1651 {
1652 struct zebra_nhlfe *nhlfe;
1653
1654 DPLANE_CTX_VALID(ctx);
1655
1656 nhlfe = zebra_mpls_lsp_add_backup_nhlfe(&(ctx->u.lsp),
1657 lsp_type, nh_type, gate,
1658 ifindex, num_labels,
1659 out_labels);
1660
1661 return nhlfe;
1662 }
1663
1664 const struct zebra_nhlfe *
1665 dplane_ctx_get_best_nhlfe(const struct zebra_dplane_ctx *ctx)
1666 {
1667 DPLANE_CTX_VALID(ctx);
1668
1669 return ctx->u.lsp.best_nhlfe;
1670 }
1671
1672 const struct zebra_nhlfe *
1673 dplane_ctx_set_best_nhlfe(struct zebra_dplane_ctx *ctx,
1674 struct zebra_nhlfe *nhlfe)
1675 {
1676 DPLANE_CTX_VALID(ctx);
1677
1678 ctx->u.lsp.best_nhlfe = nhlfe;
1679 return ctx->u.lsp.best_nhlfe;
1680 }
1681
1682 uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx *ctx)
1683 {
1684 DPLANE_CTX_VALID(ctx);
1685
1686 return ctx->u.lsp.num_ecmp;
1687 }
1688
1689 mpls_label_t dplane_ctx_get_pw_local_label(const struct zebra_dplane_ctx *ctx)
1690 {
1691 DPLANE_CTX_VALID(ctx);
1692
1693 return ctx->u.pw.local_label;
1694 }
1695
1696 mpls_label_t dplane_ctx_get_pw_remote_label(const struct zebra_dplane_ctx *ctx)
1697 {
1698 DPLANE_CTX_VALID(ctx);
1699
1700 return ctx->u.pw.remote_label;
1701 }
1702
1703 int dplane_ctx_get_pw_type(const struct zebra_dplane_ctx *ctx)
1704 {
1705 DPLANE_CTX_VALID(ctx);
1706
1707 return ctx->u.pw.type;
1708 }
1709
1710 int dplane_ctx_get_pw_af(const struct zebra_dplane_ctx *ctx)
1711 {
1712 DPLANE_CTX_VALID(ctx);
1713
1714 return ctx->u.pw.af;
1715 }
1716
1717 uint32_t dplane_ctx_get_pw_flags(const struct zebra_dplane_ctx *ctx)
1718 {
1719 DPLANE_CTX_VALID(ctx);
1720
1721 return ctx->u.pw.flags;
1722 }
1723
1724 int dplane_ctx_get_pw_status(const struct zebra_dplane_ctx *ctx)
1725 {
1726 DPLANE_CTX_VALID(ctx);
1727
1728 return ctx->u.pw.status;
1729 }
1730
1731 void dplane_ctx_set_pw_status(struct zebra_dplane_ctx *ctx, int status)
1732 {
1733 DPLANE_CTX_VALID(ctx);
1734
1735 ctx->u.pw.status = status;
1736 }
1737
1738 const union g_addr *dplane_ctx_get_pw_dest(
1739 const struct zebra_dplane_ctx *ctx)
1740 {
1741 DPLANE_CTX_VALID(ctx);
1742
1743 return &(ctx->u.pw.dest);
1744 }
1745
1746 const union pw_protocol_fields *dplane_ctx_get_pw_proto(
1747 const struct zebra_dplane_ctx *ctx)
1748 {
1749 DPLANE_CTX_VALID(ctx);
1750
1751 return &(ctx->u.pw.fields);
1752 }
1753
1754 const struct nexthop_group *
1755 dplane_ctx_get_pw_nhg(const struct zebra_dplane_ctx *ctx)
1756 {
1757 DPLANE_CTX_VALID(ctx);
1758
1759 return &(ctx->u.pw.fib_nhg);
1760 }
1761
1762 const struct nexthop_group *
1763 dplane_ctx_get_pw_primary_nhg(const struct zebra_dplane_ctx *ctx)
1764 {
1765 DPLANE_CTX_VALID(ctx);
1766
1767 return &(ctx->u.pw.primary_nhg);
1768 }
1769
1770 const struct nexthop_group *
1771 dplane_ctx_get_pw_backup_nhg(const struct zebra_dplane_ctx *ctx)
1772 {
1773 DPLANE_CTX_VALID(ctx);
1774
1775 return &(ctx->u.pw.backup_nhg);
1776 }
1777
1778 /* Accessors for interface information */
1779 uint32_t dplane_ctx_get_intf_metric(const struct zebra_dplane_ctx *ctx)
1780 {
1781 DPLANE_CTX_VALID(ctx);
1782
1783 return ctx->u.intf.metric;
1784 }
1785
1786 void dplane_ctx_set_intf_metric(struct zebra_dplane_ctx *ctx, uint32_t metric)
1787 {
1788 DPLANE_CTX_VALID(ctx);
1789
1790 ctx->u.intf.metric = metric;
1791 }
1792
1793 uint32_t dplane_ctx_get_intf_pd_reason_val(const struct zebra_dplane_ctx *ctx)
1794 {
1795 DPLANE_CTX_VALID(ctx);
1796
1797 return ctx->u.intf.pd_reason_val;
1798 }
1799
1800 void dplane_ctx_set_intf_pd_reason_val(struct zebra_dplane_ctx *ctx, bool val)
1801 {
1802 DPLANE_CTX_VALID(ctx);
1803
1804 ctx->u.intf.pd_reason_val = val;
1805 }
1806
1807 bool dplane_ctx_intf_is_protodown(const struct zebra_dplane_ctx *ctx)
1808 {
1809 DPLANE_CTX_VALID(ctx);
1810
1811 return ctx->u.intf.protodown;
1812 }
1813
1814 /* Is interface addr p2p? */
1815 bool dplane_ctx_intf_is_connected(const struct zebra_dplane_ctx *ctx)
1816 {
1817 DPLANE_CTX_VALID(ctx);
1818
1819 return (ctx->u.intf.flags & DPLANE_INTF_CONNECTED);
1820 }
1821
1822 bool dplane_ctx_intf_is_secondary(const struct zebra_dplane_ctx *ctx)
1823 {
1824 DPLANE_CTX_VALID(ctx);
1825
1826 return (ctx->u.intf.flags & DPLANE_INTF_SECONDARY);
1827 }
1828
1829 bool dplane_ctx_intf_is_broadcast(const struct zebra_dplane_ctx *ctx)
1830 {
1831 DPLANE_CTX_VALID(ctx);
1832
1833 return (ctx->u.intf.flags & DPLANE_INTF_BROADCAST);
1834 }
1835
1836 void dplane_ctx_intf_set_connected(struct zebra_dplane_ctx *ctx)
1837 {
1838 DPLANE_CTX_VALID(ctx);
1839
1840 ctx->u.intf.flags |= DPLANE_INTF_CONNECTED;
1841 }
1842
1843 void dplane_ctx_intf_set_secondary(struct zebra_dplane_ctx *ctx)
1844 {
1845 DPLANE_CTX_VALID(ctx);
1846
1847 ctx->u.intf.flags |= DPLANE_INTF_SECONDARY;
1848 }
1849
1850 void dplane_ctx_intf_set_broadcast(struct zebra_dplane_ctx *ctx)
1851 {
1852 DPLANE_CTX_VALID(ctx);
1853
1854 ctx->u.intf.flags |= DPLANE_INTF_BROADCAST;
1855 }
1856
1857 const struct prefix *dplane_ctx_get_intf_addr(
1858 const struct zebra_dplane_ctx *ctx)
1859 {
1860 DPLANE_CTX_VALID(ctx);
1861
1862 return &(ctx->u.intf.prefix);
1863 }
1864
1865 void dplane_ctx_set_intf_addr(struct zebra_dplane_ctx *ctx,
1866 const struct prefix *p)
1867 {
1868 DPLANE_CTX_VALID(ctx);
1869
1870 prefix_copy(&(ctx->u.intf.prefix), p);
1871 }
1872
1873 bool dplane_ctx_intf_has_dest(const struct zebra_dplane_ctx *ctx)
1874 {
1875 DPLANE_CTX_VALID(ctx);
1876
1877 return (ctx->u.intf.flags & DPLANE_INTF_HAS_DEST);
1878 }
1879
1880 const struct prefix *dplane_ctx_get_intf_dest(
1881 const struct zebra_dplane_ctx *ctx)
1882 {
1883 DPLANE_CTX_VALID(ctx);
1884
1885 return &(ctx->u.intf.dest_prefix);
1886 }
1887
1888 void dplane_ctx_set_intf_dest(struct zebra_dplane_ctx *ctx,
1889 const struct prefix *p)
1890 {
1891 DPLANE_CTX_VALID(ctx);
1892
1893 prefix_copy(&(ctx->u.intf.dest_prefix), p);
1894 }
1895
1896 bool dplane_ctx_intf_has_label(const struct zebra_dplane_ctx *ctx)
1897 {
1898 DPLANE_CTX_VALID(ctx);
1899
1900 return (ctx->u.intf.flags & DPLANE_INTF_HAS_LABEL);
1901 }
1902
1903 const char *dplane_ctx_get_intf_label(const struct zebra_dplane_ctx *ctx)
1904 {
1905 DPLANE_CTX_VALID(ctx);
1906
1907 return ctx->u.intf.label;
1908 }
1909
1910 void dplane_ctx_set_intf_label(struct zebra_dplane_ctx *ctx, const char *label)
1911 {
1912 size_t len;
1913
1914 DPLANE_CTX_VALID(ctx);
1915
1916 if (ctx->u.intf.label && ctx->u.intf.label != ctx->u.intf.label_buf)
1917 XFREE(MTYPE_DP_CTX, ctx->u.intf.label);
1918
1919 ctx->u.intf.label = NULL;
1920
1921 if (label) {
1922 ctx->u.intf.flags |= DPLANE_INTF_HAS_LABEL;
1923
1924 /* Use embedded buffer if it's adequate; else allocate. */
1925 len = strlen(label);
1926
1927 if (len < sizeof(ctx->u.intf.label_buf)) {
1928 strlcpy(ctx->u.intf.label_buf, label,
1929 sizeof(ctx->u.intf.label_buf));
1930 ctx->u.intf.label = ctx->u.intf.label_buf;
1931 } else {
1932 ctx->u.intf.label = XSTRDUP(MTYPE_DP_CTX, label);
1933 }
1934 } else {
1935 ctx->u.intf.flags &= ~DPLANE_INTF_HAS_LABEL;
1936 }
1937 }
1938
1939 /* Accessors for MAC information */
1940 vlanid_t dplane_ctx_mac_get_vlan(const struct zebra_dplane_ctx *ctx)
1941 {
1942 DPLANE_CTX_VALID(ctx);
1943 return ctx->u.macinfo.vid;
1944 }
1945
1946 bool dplane_ctx_mac_is_sticky(const struct zebra_dplane_ctx *ctx)
1947 {
1948 DPLANE_CTX_VALID(ctx);
1949 return ctx->u.macinfo.is_sticky;
1950 }
1951
1952 uint32_t dplane_ctx_mac_get_nhg_id(const struct zebra_dplane_ctx *ctx)
1953 {
1954 DPLANE_CTX_VALID(ctx);
1955 return ctx->u.macinfo.nhg_id;
1956 }
1957
1958 uint32_t dplane_ctx_mac_get_update_flags(const struct zebra_dplane_ctx *ctx)
1959 {
1960 DPLANE_CTX_VALID(ctx);
1961 return ctx->u.macinfo.update_flags;
1962 }
1963
1964 const struct ethaddr *dplane_ctx_mac_get_addr(
1965 const struct zebra_dplane_ctx *ctx)
1966 {
1967 DPLANE_CTX_VALID(ctx);
1968 return &(ctx->u.macinfo.mac);
1969 }
1970
1971 const struct in_addr *dplane_ctx_mac_get_vtep_ip(
1972 const struct zebra_dplane_ctx *ctx)
1973 {
1974 DPLANE_CTX_VALID(ctx);
1975 return &(ctx->u.macinfo.vtep_ip);
1976 }
1977
1978 ifindex_t dplane_ctx_mac_get_br_ifindex(const struct zebra_dplane_ctx *ctx)
1979 {
1980 DPLANE_CTX_VALID(ctx);
1981 return ctx->u.macinfo.br_ifindex;
1982 }
1983
1984 /* Accessors for neighbor information */
1985 const struct ipaddr *dplane_ctx_neigh_get_ipaddr(
1986 const struct zebra_dplane_ctx *ctx)
1987 {
1988 DPLANE_CTX_VALID(ctx);
1989 return &(ctx->u.neigh.ip_addr);
1990 }
1991
1992 const struct ipaddr *
1993 dplane_ctx_neigh_get_link_ip(const struct zebra_dplane_ctx *ctx)
1994 {
1995 DPLANE_CTX_VALID(ctx);
1996 return &(ctx->u.neigh.link.ip_addr);
1997 }
1998
1999 const struct ethaddr *dplane_ctx_neigh_get_mac(
2000 const struct zebra_dplane_ctx *ctx)
2001 {
2002 DPLANE_CTX_VALID(ctx);
2003 return &(ctx->u.neigh.link.mac);
2004 }
2005
2006 uint32_t dplane_ctx_neigh_get_flags(const struct zebra_dplane_ctx *ctx)
2007 {
2008 DPLANE_CTX_VALID(ctx);
2009 return ctx->u.neigh.flags;
2010 }
2011
2012 uint16_t dplane_ctx_neigh_get_state(const struct zebra_dplane_ctx *ctx)
2013 {
2014 DPLANE_CTX_VALID(ctx);
2015 return ctx->u.neigh.state;
2016 }
2017
2018 uint32_t dplane_ctx_neigh_get_update_flags(const struct zebra_dplane_ctx *ctx)
2019 {
2020 DPLANE_CTX_VALID(ctx);
2021 return ctx->u.neigh.update_flags;
2022 }
2023
2024 /* Accessor for GRE set */
2025 uint32_t
2026 dplane_ctx_gre_get_link_ifindex(const struct zebra_dplane_ctx *ctx)
2027 {
2028 DPLANE_CTX_VALID(ctx);
2029
2030 return ctx->u.gre.link_ifindex;
2031 }
2032
2033 unsigned int
2034 dplane_ctx_gre_get_mtu(const struct zebra_dplane_ctx *ctx)
2035 {
2036 DPLANE_CTX_VALID(ctx);
2037
2038 return ctx->u.gre.mtu;
2039 }
2040
2041 const struct zebra_l2info_gre *
2042 dplane_ctx_gre_get_info(const struct zebra_dplane_ctx *ctx)
2043 {
2044 DPLANE_CTX_VALID(ctx);
2045
2046 return &ctx->u.gre.info;
2047 }
2048
2049 /* Accessors for PBR rule information */
2050 int dplane_ctx_rule_get_sock(const struct zebra_dplane_ctx *ctx)
2051 {
2052 DPLANE_CTX_VALID(ctx);
2053
2054 return ctx->u.rule.sock;
2055 }
2056
2057 const char *dplane_ctx_rule_get_ifname(const struct zebra_dplane_ctx *ctx)
2058 {
2059 DPLANE_CTX_VALID(ctx);
2060
2061 return ctx->u.rule.new.ifname;
2062 }
2063
2064 int dplane_ctx_rule_get_unique(const struct zebra_dplane_ctx *ctx)
2065 {
2066 DPLANE_CTX_VALID(ctx);
2067
2068 return ctx->u.rule.unique;
2069 }
2070
2071 int dplane_ctx_rule_get_seq(const struct zebra_dplane_ctx *ctx)
2072 {
2073 DPLANE_CTX_VALID(ctx);
2074
2075 return ctx->u.rule.seq;
2076 }
2077
2078 uint32_t dplane_ctx_rule_get_priority(const struct zebra_dplane_ctx *ctx)
2079 {
2080 DPLANE_CTX_VALID(ctx);
2081
2082 return ctx->u.rule.new.priority;
2083 }
2084
2085 uint32_t dplane_ctx_rule_get_old_priority(const struct zebra_dplane_ctx *ctx)
2086 {
2087 DPLANE_CTX_VALID(ctx);
2088
2089 return ctx->u.rule.old.priority;
2090 }
2091
2092 uint32_t dplane_ctx_rule_get_table(const struct zebra_dplane_ctx *ctx)
2093 {
2094 DPLANE_CTX_VALID(ctx);
2095
2096 return ctx->u.rule.new.table;
2097 }
2098
2099 uint32_t dplane_ctx_rule_get_old_table(const struct zebra_dplane_ctx *ctx)
2100 {
2101 DPLANE_CTX_VALID(ctx);
2102
2103 return ctx->u.rule.old.table;
2104 }
2105
2106 uint32_t dplane_ctx_rule_get_filter_bm(const struct zebra_dplane_ctx *ctx)
2107 {
2108 DPLANE_CTX_VALID(ctx);
2109
2110 return ctx->u.rule.new.filter_bm;
2111 }
2112
2113 uint32_t dplane_ctx_rule_get_old_filter_bm(const struct zebra_dplane_ctx *ctx)
2114 {
2115 DPLANE_CTX_VALID(ctx);
2116
2117 return ctx->u.rule.old.filter_bm;
2118 }
2119
2120 uint32_t dplane_ctx_rule_get_fwmark(const struct zebra_dplane_ctx *ctx)
2121 {
2122 DPLANE_CTX_VALID(ctx);
2123
2124 return ctx->u.rule.new.fwmark;
2125 }
2126
2127 uint32_t dplane_ctx_rule_get_old_fwmark(const struct zebra_dplane_ctx *ctx)
2128 {
2129 DPLANE_CTX_VALID(ctx);
2130
2131 return ctx->u.rule.old.fwmark;
2132 }
2133
2134 uint8_t dplane_ctx_rule_get_ipproto(const struct zebra_dplane_ctx *ctx)
2135 {
2136 DPLANE_CTX_VALID(ctx);
2137
2138 return ctx->u.rule.new.ip_proto;
2139 }
2140
2141 uint8_t dplane_ctx_rule_get_old_ipproto(const struct zebra_dplane_ctx *ctx)
2142 {
2143 DPLANE_CTX_VALID(ctx);
2144
2145 return ctx->u.rule.old.ip_proto;
2146 }
2147
2148 uint8_t dplane_ctx_rule_get_dsfield(const struct zebra_dplane_ctx *ctx)
2149 {
2150 DPLANE_CTX_VALID(ctx);
2151
2152 return ctx->u.rule.new.dsfield;
2153 }
2154
2155 uint8_t dplane_ctx_rule_get_old_dsfield(const struct zebra_dplane_ctx *ctx)
2156 {
2157 DPLANE_CTX_VALID(ctx);
2158
2159 return ctx->u.rule.old.dsfield;
2160 }
2161
2162 const struct prefix *
2163 dplane_ctx_rule_get_src_ip(const struct zebra_dplane_ctx *ctx)
2164 {
2165 DPLANE_CTX_VALID(ctx);
2166
2167 return &(ctx->u.rule.new.src_ip);
2168 }
2169
2170 const struct prefix *
2171 dplane_ctx_rule_get_old_src_ip(const struct zebra_dplane_ctx *ctx)
2172 {
2173 DPLANE_CTX_VALID(ctx);
2174
2175 return &(ctx->u.rule.old.src_ip);
2176 }
2177
2178 const struct prefix *
2179 dplane_ctx_rule_get_dst_ip(const struct zebra_dplane_ctx *ctx)
2180 {
2181 DPLANE_CTX_VALID(ctx);
2182
2183 return &(ctx->u.rule.new.dst_ip);
2184 }
2185
2186 const struct prefix *
2187 dplane_ctx_rule_get_old_dst_ip(const struct zebra_dplane_ctx *ctx)
2188 {
2189 DPLANE_CTX_VALID(ctx);
2190
2191 return &(ctx->u.rule.old.dst_ip);
2192 }
2193
2194 uint32_t dplane_ctx_get_br_port_flags(const struct zebra_dplane_ctx *ctx)
2195 {
2196 DPLANE_CTX_VALID(ctx);
2197
2198 return ctx->u.br_port.flags;
2199 }
2200
2201 uint32_t
2202 dplane_ctx_get_br_port_sph_filter_cnt(const struct zebra_dplane_ctx *ctx)
2203 {
2204 DPLANE_CTX_VALID(ctx);
2205
2206 return ctx->u.br_port.sph_filter_cnt;
2207 }
2208
2209 const struct in_addr *
2210 dplane_ctx_get_br_port_sph_filters(const struct zebra_dplane_ctx *ctx)
2211 {
2212 DPLANE_CTX_VALID(ctx);
2213
2214 return ctx->u.br_port.sph_filters;
2215 }
2216
2217 uint32_t
2218 dplane_ctx_get_br_port_backup_nhg_id(const struct zebra_dplane_ctx *ctx)
2219 {
2220 DPLANE_CTX_VALID(ctx);
2221
2222 return ctx->u.br_port.backup_nhg_id;
2223 }
2224
2225 /* Accessors for PBR iptable information */
2226 void dplane_ctx_get_pbr_iptable(const struct zebra_dplane_ctx *ctx,
2227 struct zebra_pbr_iptable *table)
2228 {
2229 DPLANE_CTX_VALID(ctx);
2230
2231 memcpy(table, &ctx->u.iptable, sizeof(struct zebra_pbr_iptable));
2232 }
2233
2234 void dplane_ctx_get_pbr_ipset(const struct zebra_dplane_ctx *ctx,
2235 struct zebra_pbr_ipset *ipset)
2236 {
2237 DPLANE_CTX_VALID(ctx);
2238
2239 assert(ipset);
2240
2241 if (ctx->zd_op == DPLANE_OP_IPSET_ENTRY_ADD ||
2242 ctx->zd_op == DPLANE_OP_IPSET_ENTRY_DELETE) {
2243 memset(ipset, 0, sizeof(struct zebra_pbr_ipset));
2244 ipset->type = ctx->u.ipset_entry.info.type;
2245 ipset->family = ctx->u.ipset_entry.info.family;
2246 memcpy(&ipset->ipset_name, &ctx->u.ipset_entry.info.ipset_name,
2247 ZEBRA_IPSET_NAME_SIZE);
2248 } else
2249 memcpy(ipset, &ctx->u.ipset, sizeof(struct zebra_pbr_ipset));
2250 }
2251
2252 void dplane_ctx_get_pbr_ipset_entry(const struct zebra_dplane_ctx *ctx,
2253 struct zebra_pbr_ipset_entry *entry)
2254 {
2255 DPLANE_CTX_VALID(ctx);
2256
2257 assert(entry);
2258
2259 memcpy(entry, &ctx->u.ipset_entry.entry, sizeof(struct zebra_pbr_ipset_entry));
2260 }
2261
2262 /*
2263 * End of dplane context accessors
2264 */
2265
2266 /* Optional extra info about interfaces in nexthops - a plugin must enable
2267 * this extra info.
2268 */
2269 const struct dplane_intf_extra *
2270 dplane_ctx_get_intf_extra(const struct zebra_dplane_ctx *ctx)
2271 {
2272 return TAILQ_FIRST(&ctx->u.rinfo.intf_extra_q);
2273 }
2274
2275 const struct dplane_intf_extra *
2276 dplane_ctx_intf_extra_next(const struct zebra_dplane_ctx *ctx,
2277 const struct dplane_intf_extra *ptr)
2278 {
2279 return TAILQ_NEXT(ptr, link);
2280 }
2281
2282 vrf_id_t dplane_intf_extra_get_vrfid(const struct dplane_intf_extra *ptr)
2283 {
2284 return ptr->vrf_id;
2285 }
2286
2287 uint32_t dplane_intf_extra_get_ifindex(const struct dplane_intf_extra *ptr)
2288 {
2289 return ptr->ifindex;
2290 }
2291
2292 uint32_t dplane_intf_extra_get_flags(const struct dplane_intf_extra *ptr)
2293 {
2294 return ptr->flags;
2295 }
2296
2297 uint32_t dplane_intf_extra_get_status(const struct dplane_intf_extra *ptr)
2298 {
2299 return ptr->status;
2300 }
2301
2302 /*
2303 * End of interface extra info accessors
2304 */
2305
2306 uint8_t dplane_ctx_neightable_get_family(const struct zebra_dplane_ctx *ctx)
2307 {
2308 DPLANE_CTX_VALID(ctx);
2309
2310 return ctx->u.neightable.family;
2311 }
2312
2313 uint32_t
2314 dplane_ctx_neightable_get_app_probes(const struct zebra_dplane_ctx *ctx)
2315 {
2316 DPLANE_CTX_VALID(ctx);
2317
2318 return ctx->u.neightable.app_probes;
2319 }
2320
2321 uint32_t
2322 dplane_ctx_neightable_get_ucast_probes(const struct zebra_dplane_ctx *ctx)
2323 {
2324 DPLANE_CTX_VALID(ctx);
2325
2326 return ctx->u.neightable.ucast_probes;
2327 }
2328
2329 uint32_t
2330 dplane_ctx_neightable_get_mcast_probes(const struct zebra_dplane_ctx *ctx)
2331 {
2332 DPLANE_CTX_VALID(ctx);
2333
2334 return ctx->u.neightable.mcast_probes;
2335 }
2336
2337 ifindex_t dplane_ctx_get_netconf_ifindex(const struct zebra_dplane_ctx *ctx)
2338 {
2339 DPLANE_CTX_VALID(ctx);
2340
2341 return ctx->u.netconf.ifindex;
2342 }
2343
2344 ns_id_t dplane_ctx_get_netconf_ns_id(const struct zebra_dplane_ctx *ctx)
2345 {
2346 DPLANE_CTX_VALID(ctx);
2347
2348 return ctx->u.netconf.ns_id;
2349 }
2350
2351 void dplane_ctx_set_netconf_ifindex(struct zebra_dplane_ctx *ctx,
2352 ifindex_t ifindex)
2353 {
2354 DPLANE_CTX_VALID(ctx);
2355
2356 ctx->u.netconf.ifindex = ifindex;
2357 }
2358
2359 void dplane_ctx_set_netconf_ns_id(struct zebra_dplane_ctx *ctx, ns_id_t ns_id)
2360 {
2361 DPLANE_CTX_VALID(ctx);
2362
2363 ctx->u.netconf.ns_id = ns_id;
2364 }
2365
2366 enum dplane_netconf_status_e
2367 dplane_ctx_get_netconf_mpls(const struct zebra_dplane_ctx *ctx)
2368 {
2369 DPLANE_CTX_VALID(ctx);
2370
2371 return ctx->u.netconf.mpls_val;
2372 }
2373
2374 enum dplane_netconf_status_e
2375 dplane_ctx_get_netconf_mcast(const struct zebra_dplane_ctx *ctx)
2376 {
2377 DPLANE_CTX_VALID(ctx);
2378
2379 return ctx->u.netconf.mcast_val;
2380 }
2381
2382 void dplane_ctx_set_netconf_mpls(struct zebra_dplane_ctx *ctx,
2383 enum dplane_netconf_status_e val)
2384 {
2385 DPLANE_CTX_VALID(ctx);
2386
2387 ctx->u.netconf.mpls_val = val;
2388 }
2389
2390 void dplane_ctx_set_netconf_mcast(struct zebra_dplane_ctx *ctx,
2391 enum dplane_netconf_status_e val)
2392 {
2393 DPLANE_CTX_VALID(ctx);
2394
2395 ctx->u.netconf.mcast_val = val;
2396 }
2397
2398 /*
2399 * Retrieve the limit on the number of pending, unprocessed updates.
2400 */
2401 uint32_t dplane_get_in_queue_limit(void)
2402 {
2403 return atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
2404 memory_order_relaxed);
2405 }
2406
2407 /*
2408 * Configure limit on the number of pending, queued updates.
2409 */
2410 void dplane_set_in_queue_limit(uint32_t limit, bool set)
2411 {
2412 /* Reset to default on 'unset' */
2413 if (!set)
2414 limit = DPLANE_DEFAULT_MAX_QUEUED;
2415
2416 atomic_store_explicit(&zdplane_info.dg_max_queued_updates, limit,
2417 memory_order_relaxed);
2418 }
2419
2420 /*
2421 * Retrieve the current queue depth of incoming, unprocessed updates
2422 */
2423 uint32_t dplane_get_in_queue_len(void)
2424 {
2425 return atomic_load_explicit(&zdplane_info.dg_routes_queued,
2426 memory_order_seq_cst);
2427 }
2428
2429 /*
2430 * Internal helper that copies information from a zebra ns object; this is
2431 * called in the zebra main pthread context as part of dplane ctx init.
2432 */
2433 static void ctx_info_from_zns(struct zebra_dplane_info *ns_info,
2434 struct zebra_ns *zns)
2435 {
2436 ns_info->ns_id = zns->ns_id;
2437
2438 #if defined(HAVE_NETLINK)
2439 ns_info->is_cmd = true;
2440 ns_info->sock = zns->netlink_dplane_out.sock;
2441 ns_info->seq = zns->netlink_dplane_out.seq;
2442 #endif /* NETLINK */
2443 }
2444
2445 /*
2446 * Common dataplane context init with zebra namespace info.
2447 */
2448 static int dplane_ctx_ns_init(struct zebra_dplane_ctx *ctx,
2449 struct zebra_ns *zns,
2450 bool is_update)
2451 {
2452 ctx_info_from_zns(&(ctx->zd_ns_info), zns); /* */
2453
2454 ctx->zd_is_update = is_update;
2455
2456 #if defined(HAVE_NETLINK)
2457 /* Increment message counter after copying to context struct - may need
2458 * two messages in some 'update' cases.
2459 */
2460 if (is_update)
2461 zns->netlink_dplane_out.seq += 2;
2462 else
2463 zns->netlink_dplane_out.seq++;
2464 #endif /* HAVE_NETLINK */
2465
2466 return AOK;
2467 }
2468
2469 /*
2470 * Initialize a context block for a route update from zebra data structs.
2471 */
2472 int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2473 struct route_node *rn, struct route_entry *re)
2474 {
2475 int ret = EINVAL;
2476 const struct route_table *table = NULL;
2477 const struct rib_table_info *info;
2478 const struct prefix *p, *src_p;
2479 struct zebra_ns *zns;
2480 struct zebra_vrf *zvrf;
2481 struct nexthop *nexthop;
2482 struct zebra_l3vni *zl3vni;
2483 const struct interface *ifp;
2484 struct dplane_intf_extra *if_extra;
2485
2486 if (!ctx || !rn || !re)
2487 goto done;
2488
2489 TAILQ_INIT(&ctx->u.rinfo.intf_extra_q);
2490
2491 ctx->zd_op = op;
2492 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2493
2494 ctx->u.rinfo.zd_type = re->type;
2495 ctx->u.rinfo.zd_old_type = re->type;
2496
2497 /* Prefixes: dest, and optional source */
2498 srcdest_rnode_prefixes(rn, &p, &src_p);
2499
2500 prefix_copy(&(ctx->u.rinfo.zd_dest), p);
2501
2502 if (src_p)
2503 prefix_copy(&(ctx->u.rinfo.zd_src), src_p);
2504 else
2505 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(ctx->u.rinfo.zd_src));
2506
2507 ctx->zd_table_id = re->table;
2508
2509 ctx->u.rinfo.zd_metric = re->metric;
2510 ctx->u.rinfo.zd_old_metric = re->metric;
2511 ctx->zd_vrf_id = re->vrf_id;
2512 ctx->u.rinfo.zd_mtu = re->mtu;
2513 ctx->u.rinfo.zd_nexthop_mtu = re->nexthop_mtu;
2514 ctx->u.rinfo.zd_instance = re->instance;
2515 ctx->u.rinfo.zd_tag = re->tag;
2516 ctx->u.rinfo.zd_old_tag = re->tag;
2517 ctx->u.rinfo.zd_distance = re->distance;
2518
2519 table = srcdest_rnode_table(rn);
2520 info = table->info;
2521
2522 ctx->u.rinfo.zd_afi = info->afi;
2523 ctx->u.rinfo.zd_safi = info->safi;
2524
2525 /* Copy nexthops; recursive info is included too */
2526 copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop),
2527 re->nhe->nhg.nexthop, NULL);
2528 ctx->u.rinfo.zd_nhg_id = re->nhe->id;
2529
2530 /* Copy backup nexthop info, if present */
2531 if (re->nhe->backup_info && re->nhe->backup_info->nhe) {
2532 copy_nexthops(&(ctx->u.rinfo.backup_ng.nexthop),
2533 re->nhe->backup_info->nhe->nhg.nexthop, NULL);
2534 }
2535
2536 /*
2537 * Ensure that the dplane nexthops' flags are clear and copy
2538 * encapsulation information.
2539 */
2540 for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop)) {
2541 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
2542
2543 /* Optionally capture extra interface info while we're in the
2544 * main zebra pthread - a plugin has to ask for this info.
2545 */
2546 if (dplane_collect_extra_intf_info) {
2547 ifp = if_lookup_by_index(nexthop->ifindex,
2548 nexthop->vrf_id);
2549
2550 if (ifp) {
2551 if_extra = XCALLOC(
2552 MTYPE_DP_INTF,
2553 sizeof(struct dplane_intf_extra));
2554 if_extra->vrf_id = nexthop->vrf_id;
2555 if_extra->ifindex = nexthop->ifindex;
2556 if_extra->flags = ifp->flags;
2557 if_extra->status = ifp->status;
2558
2559 TAILQ_INSERT_TAIL(&ctx->u.rinfo.intf_extra_q,
2560 if_extra, link);
2561 }
2562 }
2563
2564 /* Check for available evpn encapsulations. */
2565 if (!CHECK_FLAG(re->flags, ZEBRA_FLAG_EVPN_ROUTE))
2566 continue;
2567
2568 zl3vni = zl3vni_from_vrf(nexthop->vrf_id);
2569 if (zl3vni && is_l3vni_oper_up(zl3vni)) {
2570 nexthop->nh_encap_type = NET_VXLAN;
2571 nexthop->nh_encap.vni = zl3vni->vni;
2572 }
2573 }
2574
2575 /* Don't need some info when capturing a system notification */
2576 if (op == DPLANE_OP_SYS_ROUTE_ADD ||
2577 op == DPLANE_OP_SYS_ROUTE_DELETE) {
2578 ret = AOK;
2579 goto done;
2580 }
2581
2582 /* Extract ns info - can't use pointers to 'core' structs */
2583 zvrf = vrf_info_lookup(re->vrf_id);
2584 zns = zvrf->zns;
2585 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_ROUTE_UPDATE));
2586
2587 #ifdef HAVE_NETLINK
2588 {
2589 struct nhg_hash_entry *nhe = zebra_nhg_resolve(re->nhe);
2590
2591 ctx->u.rinfo.nhe.id = nhe->id;
2592 ctx->u.rinfo.nhe.old_id = 0;
2593 /*
2594 * Check if the nhe is installed/queued before doing anything
2595 * with this route.
2596 *
2597 * If its a delete we only use the prefix anyway, so this only
2598 * matters for INSTALL/UPDATE.
2599 */
2600 if (zebra_nhg_kernel_nexthops_enabled()
2601 && (((op == DPLANE_OP_ROUTE_INSTALL)
2602 || (op == DPLANE_OP_ROUTE_UPDATE))
2603 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)
2604 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED))) {
2605 ret = ENOENT;
2606 goto done;
2607 }
2608
2609 re->nhe_installed_id = nhe->id;
2610 }
2611 #endif /* HAVE_NETLINK */
2612
2613 /* Trying out the sequence number idea, so we can try to detect
2614 * when a result is stale.
2615 */
2616 re->dplane_sequence = zebra_router_get_next_sequence();
2617 ctx->zd_seq = re->dplane_sequence;
2618
2619 ret = AOK;
2620
2621 done:
2622 return ret;
2623 }
2624
2625 /**
2626 * dplane_ctx_nexthop_init() - Initialize a context block for a nexthop update
2627 *
2628 * @ctx: Dataplane context to init
2629 * @op: Operation being performed
2630 * @nhe: Nexthop group hash entry
2631 *
2632 * Return: Result status
2633 */
2634 int dplane_ctx_nexthop_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2635 struct nhg_hash_entry *nhe)
2636 {
2637 struct zebra_vrf *zvrf = NULL;
2638 struct zebra_ns *zns = NULL;
2639 int ret = EINVAL;
2640
2641 if (!ctx || !nhe)
2642 goto done;
2643
2644 ctx->zd_op = op;
2645 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2646
2647 /* Copy over nhe info */
2648 ctx->u.rinfo.nhe.id = nhe->id;
2649 ctx->u.rinfo.nhe.afi = nhe->afi;
2650 ctx->u.rinfo.nhe.vrf_id = nhe->vrf_id;
2651 ctx->u.rinfo.nhe.type = nhe->type;
2652
2653 nexthop_group_copy(&(ctx->u.rinfo.nhe.ng), &(nhe->nhg));
2654
2655 /* If this is a group, convert it to a grp array of ids */
2656 if (!zebra_nhg_depends_is_empty(nhe)
2657 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_RECURSIVE))
2658 ctx->u.rinfo.nhe.nh_grp_count = zebra_nhg_nhe2grp(
2659 ctx->u.rinfo.nhe.nh_grp, nhe, MULTIPATH_NUM);
2660
2661 zvrf = vrf_info_lookup(nhe->vrf_id);
2662
2663 /*
2664 * Fallback to default namespace if the vrf got ripped out from under
2665 * us.
2666 */
2667 zns = zvrf ? zvrf->zns : zebra_ns_lookup(NS_DEFAULT);
2668
2669 /*
2670 * TODO: Might not need to mark this as an update, since
2671 * it probably won't require two messages
2672 */
2673 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_NH_UPDATE));
2674
2675 ret = AOK;
2676
2677 done:
2678 return ret;
2679 }
2680
2681 /**
2682 * dplane_ctx_intf_init() - Initialize a context block for a interface update
2683 *
2684 * @ctx: Dataplane context to init
2685 * @op: Operation being performed
2686 * @ifp: Interface
2687 *
2688 * Return: Result status
2689 */
2690 int dplane_ctx_intf_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2691 const struct interface *ifp)
2692 {
2693 struct zebra_ns *zns;
2694 struct zebra_if *zif;
2695 int ret = EINVAL;
2696 bool set_pdown, unset_pdown;
2697
2698 if (!ctx || !ifp)
2699 goto done;
2700
2701 ctx->zd_op = op;
2702 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2703 ctx->zd_vrf_id = ifp->vrf->vrf_id;
2704
2705 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
2706 ctx->zd_ifindex = ifp->ifindex;
2707
2708 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
2709 dplane_ctx_ns_init(ctx, zns, false);
2710
2711
2712 /* Copy over ifp info */
2713 ctx->u.intf.metric = ifp->metric;
2714 ctx->u.intf.flags = ifp->flags;
2715
2716 /* Copy over extra zebra info, if available */
2717 zif = (struct zebra_if *)ifp->info;
2718
2719 if (zif) {
2720 set_pdown = !!(zif->flags & ZIF_FLAG_SET_PROTODOWN);
2721 unset_pdown = !!(zif->flags & ZIF_FLAG_UNSET_PROTODOWN);
2722
2723 if (zif->protodown_rc &&
2724 ZEBRA_IF_IS_PROTODOWN_ONLY_EXTERNAL(zif) == false)
2725 ctx->u.intf.pd_reason_val = true;
2726
2727 /*
2728 * See if we have new protodown state to set, otherwise keep
2729 * current state
2730 */
2731 if (set_pdown)
2732 ctx->u.intf.protodown = true;
2733 else if (unset_pdown)
2734 ctx->u.intf.protodown = false;
2735 else
2736 ctx->u.intf.protodown = !!ZEBRA_IF_IS_PROTODOWN(zif);
2737 }
2738
2739 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_INTF_UPDATE));
2740 ctx->zd_is_update = (op == DPLANE_OP_INTF_UPDATE);
2741
2742 ret = AOK;
2743
2744 done:
2745 return ret;
2746 }
2747
2748 /*
2749 * Capture information for an LSP update in a dplane context.
2750 */
2751 int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2752 struct zebra_lsp *lsp)
2753 {
2754 int ret = AOK;
2755 struct zebra_nhlfe *nhlfe, *new_nhlfe;
2756
2757 ctx->zd_op = op;
2758 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2759
2760 /* Capture namespace info */
2761 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
2762 (op == DPLANE_OP_LSP_UPDATE));
2763
2764 memset(&ctx->u.lsp, 0, sizeof(ctx->u.lsp));
2765
2766 nhlfe_list_init(&(ctx->u.lsp.nhlfe_list));
2767 nhlfe_list_init(&(ctx->u.lsp.backup_nhlfe_list));
2768
2769 /* This may be called to create/init a dplane context, not necessarily
2770 * to copy an lsp object.
2771 */
2772 if (lsp == NULL) {
2773 ret = AOK;
2774 goto done;
2775 }
2776
2777 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2778 zlog_debug("init dplane ctx %s: in-label %u ecmp# %d",
2779 dplane_op2str(op), lsp->ile.in_label,
2780 lsp->num_ecmp);
2781
2782 ctx->u.lsp.ile = lsp->ile;
2783 ctx->u.lsp.addr_family = lsp->addr_family;
2784 ctx->u.lsp.num_ecmp = lsp->num_ecmp;
2785 ctx->u.lsp.flags = lsp->flags;
2786
2787 /* Copy source LSP's nhlfes, and capture 'best' nhlfe */
2788 frr_each(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
2789 /* Not sure if this is meaningful... */
2790 if (nhlfe->nexthop == NULL)
2791 continue;
2792
2793 new_nhlfe = zebra_mpls_lsp_add_nh(&(ctx->u.lsp), nhlfe->type,
2794 nhlfe->nexthop);
2795 if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
2796 ret = ENOMEM;
2797 break;
2798 }
2799
2800 /* Need to copy flags and backup info too */
2801 new_nhlfe->flags = nhlfe->flags;
2802 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
2803
2804 if (CHECK_FLAG(new_nhlfe->nexthop->flags,
2805 NEXTHOP_FLAG_HAS_BACKUP)) {
2806 new_nhlfe->nexthop->backup_num =
2807 nhlfe->nexthop->backup_num;
2808 memcpy(new_nhlfe->nexthop->backup_idx,
2809 nhlfe->nexthop->backup_idx,
2810 new_nhlfe->nexthop->backup_num);
2811 }
2812
2813 if (nhlfe == lsp->best_nhlfe)
2814 ctx->u.lsp.best_nhlfe = new_nhlfe;
2815 }
2816
2817 if (ret != AOK)
2818 goto done;
2819
2820 /* Capture backup nhlfes/nexthops */
2821 frr_each(nhlfe_list, &lsp->backup_nhlfe_list, nhlfe) {
2822 /* Not sure if this is meaningful... */
2823 if (nhlfe->nexthop == NULL)
2824 continue;
2825
2826 new_nhlfe = zebra_mpls_lsp_add_backup_nh(&(ctx->u.lsp),
2827 nhlfe->type,
2828 nhlfe->nexthop);
2829 if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
2830 ret = ENOMEM;
2831 break;
2832 }
2833
2834 /* Need to copy flags too */
2835 new_nhlfe->flags = nhlfe->flags;
2836 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
2837 }
2838
2839 /* On error the ctx will be cleaned-up, so we don't need to
2840 * deal with any allocated nhlfe or nexthop structs here.
2841 */
2842 done:
2843
2844 return ret;
2845 }
2846
2847 /*
2848 * Capture information for an LSP update in a dplane context.
2849 */
2850 static int dplane_ctx_pw_init(struct zebra_dplane_ctx *ctx,
2851 enum dplane_op_e op,
2852 struct zebra_pw *pw)
2853 {
2854 int ret = EINVAL;
2855 struct prefix p;
2856 afi_t afi;
2857 struct route_table *table;
2858 struct route_node *rn;
2859 struct route_entry *re;
2860 const struct nexthop_group *nhg;
2861 struct nexthop *nh, *newnh, *last_nh;
2862
2863 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2864 zlog_debug("init dplane ctx %s: pw '%s', loc %u, rem %u",
2865 dplane_op2str(op), pw->ifname, pw->local_label,
2866 pw->remote_label);
2867
2868 ctx->zd_op = op;
2869 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2870
2871 /* Capture namespace info: no netlink support as of 12/18,
2872 * but just in case...
2873 */
2874 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
2875
2876 memset(&ctx->u.pw, 0, sizeof(ctx->u.pw));
2877
2878 /* This name appears to be c-string, so we use string copy. */
2879 strlcpy(ctx->zd_ifname, pw->ifname, sizeof(ctx->zd_ifname));
2880
2881 ctx->zd_vrf_id = pw->vrf_id;
2882 ctx->zd_ifindex = pw->ifindex;
2883 ctx->u.pw.type = pw->type;
2884 ctx->u.pw.af = pw->af;
2885 ctx->u.pw.local_label = pw->local_label;
2886 ctx->u.pw.remote_label = pw->remote_label;
2887 ctx->u.pw.flags = pw->flags;
2888
2889 ctx->u.pw.dest = pw->nexthop;
2890
2891 ctx->u.pw.fields = pw->data;
2892
2893 /* Capture nexthop info for the pw destination. We need to look
2894 * up and use zebra datastructs, but we're running in the zebra
2895 * pthread here so that should be ok.
2896 */
2897 memcpy(&p.u, &pw->nexthop, sizeof(pw->nexthop));
2898 p.family = pw->af;
2899 p.prefixlen = ((pw->af == AF_INET) ? IPV4_MAX_BITLEN : IPV6_MAX_BITLEN);
2900
2901 afi = (pw->af == AF_INET) ? AFI_IP : AFI_IP6;
2902 table = zebra_vrf_table(afi, SAFI_UNICAST, pw->vrf_id);
2903 if (table == NULL)
2904 goto done;
2905
2906 rn = route_node_match(table, &p);
2907 if (rn == NULL)
2908 goto done;
2909
2910 re = NULL;
2911 RNODE_FOREACH_RE(rn, re) {
2912 if (CHECK_FLAG(re->flags, ZEBRA_FLAG_SELECTED))
2913 break;
2914 }
2915
2916 if (re) {
2917 /* We'll capture a 'fib' list of nexthops that meet our
2918 * criteria: installed, and labelled.
2919 */
2920 nhg = rib_get_fib_nhg(re);
2921 last_nh = NULL;
2922
2923 if (nhg && nhg->nexthop) {
2924 for (ALL_NEXTHOPS_PTR(nhg, nh)) {
2925 if (!CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE)
2926 || CHECK_FLAG(nh->flags,
2927 NEXTHOP_FLAG_RECURSIVE)
2928 || nh->nh_label == NULL)
2929 continue;
2930
2931 newnh = nexthop_dup(nh, NULL);
2932
2933 if (last_nh)
2934 NEXTHOP_APPEND(last_nh, newnh);
2935 else
2936 ctx->u.pw.fib_nhg.nexthop = newnh;
2937 last_nh = newnh;
2938 }
2939 }
2940
2941 /* Include any installed backup nexthops also. */
2942 nhg = rib_get_fib_backup_nhg(re);
2943 if (nhg && nhg->nexthop) {
2944 for (ALL_NEXTHOPS_PTR(nhg, nh)) {
2945 if (!CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE)
2946 || CHECK_FLAG(nh->flags,
2947 NEXTHOP_FLAG_RECURSIVE)
2948 || nh->nh_label == NULL)
2949 continue;
2950
2951 newnh = nexthop_dup(nh, NULL);
2952
2953 if (last_nh)
2954 NEXTHOP_APPEND(last_nh, newnh);
2955 else
2956 ctx->u.pw.fib_nhg.nexthop = newnh;
2957 last_nh = newnh;
2958 }
2959 }
2960
2961 /* Copy primary nexthops; recursive info is included too */
2962 assert(re->nhe != NULL); /* SA warning */
2963 copy_nexthops(&(ctx->u.pw.primary_nhg.nexthop),
2964 re->nhe->nhg.nexthop, NULL);
2965 ctx->u.pw.nhg_id = re->nhe->id;
2966
2967 /* Copy backup nexthop info, if present */
2968 if (re->nhe->backup_info && re->nhe->backup_info->nhe) {
2969 copy_nexthops(&(ctx->u.pw.backup_nhg.nexthop),
2970 re->nhe->backup_info->nhe->nhg.nexthop,
2971 NULL);
2972 }
2973 }
2974 route_unlock_node(rn);
2975
2976 ret = AOK;
2977
2978 done:
2979 return ret;
2980 }
2981
2982 /**
2983 * dplane_ctx_rule_init_single() - Initialize a dataplane representation of a
2984 * PBR rule.
2985 *
2986 * @dplane_rule: Dataplane internal representation of a rule
2987 * @rule: PBR rule
2988 */
2989 static void dplane_ctx_rule_init_single(struct dplane_ctx_rule *dplane_rule,
2990 struct zebra_pbr_rule *rule)
2991 {
2992 dplane_rule->priority = rule->rule.priority;
2993 dplane_rule->table = rule->rule.action.table;
2994
2995 dplane_rule->filter_bm = rule->rule.filter.filter_bm;
2996 dplane_rule->fwmark = rule->rule.filter.fwmark;
2997 dplane_rule->dsfield = rule->rule.filter.dsfield;
2998 dplane_rule->ip_proto = rule->rule.filter.ip_proto;
2999 prefix_copy(&(dplane_rule->dst_ip), &rule->rule.filter.dst_ip);
3000 prefix_copy(&(dplane_rule->src_ip), &rule->rule.filter.src_ip);
3001
3002 dplane_rule->action_pcp = rule->rule.action.pcp;
3003 dplane_rule->action_vlan_flags = rule->rule.action.vlan_flags;
3004 dplane_rule->action_vlan_id = rule->rule.action.vlan_id;
3005 dplane_rule->action_queue_id = rule->rule.action.queue_id;
3006
3007 strlcpy(dplane_rule->ifname, rule->ifname, INTERFACE_NAMSIZ);
3008 }
3009
3010 /**
3011 * dplane_ctx_rule_init() - Initialize a context block for a PBR rule update.
3012 *
3013 * @ctx: Dataplane context to init
3014 * @op: Operation being performed
3015 * @new_rule: PBR rule
3016 *
3017 * Return: Result status
3018 */
3019 static int dplane_ctx_rule_init(struct zebra_dplane_ctx *ctx,
3020 enum dplane_op_e op,
3021 struct zebra_pbr_rule *new_rule,
3022 struct zebra_pbr_rule *old_rule)
3023 {
3024 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3025 zlog_debug(
3026 "init dplane ctx %s: IF %s Prio %u Fwmark %u Src %pFX Dst %pFX Table %u",
3027 dplane_op2str(op), new_rule->ifname,
3028 new_rule->rule.priority, new_rule->rule.filter.fwmark,
3029 &new_rule->rule.filter.src_ip,
3030 &new_rule->rule.filter.dst_ip,
3031 new_rule->rule.action.table);
3032
3033 ctx->zd_op = op;
3034 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3035
3036 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
3037 op == DPLANE_OP_RULE_UPDATE);
3038
3039 ctx->zd_vrf_id = new_rule->vrf_id;
3040 strlcpy(ctx->zd_ifname, new_rule->ifname, sizeof(ctx->zd_ifname));
3041
3042 ctx->u.rule.sock = new_rule->sock;
3043 ctx->u.rule.unique = new_rule->rule.unique;
3044 ctx->u.rule.seq = new_rule->rule.seq;
3045
3046 dplane_ctx_rule_init_single(&ctx->u.rule.new, new_rule);
3047 if (op == DPLANE_OP_RULE_UPDATE)
3048 dplane_ctx_rule_init_single(&ctx->u.rule.old, old_rule);
3049
3050 return AOK;
3051 }
3052
3053 /**
3054 * dplane_ctx_iptable_init() - Initialize a context block for a PBR iptable
3055 * update.
3056 *
3057 * @ctx: Dataplane context to init
3058 * @op: Operation being performed
3059 * @new_rule: PBR iptable
3060 *
3061 * Return: Result status
3062 */
3063 static int dplane_ctx_iptable_init(struct zebra_dplane_ctx *ctx,
3064 enum dplane_op_e op,
3065 struct zebra_pbr_iptable *iptable)
3066 {
3067 char *ifname;
3068 struct listnode *node;
3069
3070 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3071 zlog_debug(
3072 "init dplane ctx %s: Unique %u Fwmark %u Family %s Action %s",
3073 dplane_op2str(op), iptable->unique, iptable->fwmark,
3074 family2str(iptable->family),
3075 iptable->action == ZEBRA_IPTABLES_DROP ? "Drop"
3076 : "Forward");
3077 }
3078
3079 ctx->zd_op = op;
3080 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3081
3082 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
3083
3084 ctx->zd_vrf_id = iptable->vrf_id;
3085 memcpy(&ctx->u.iptable, iptable, sizeof(struct zebra_pbr_iptable));
3086 ctx->u.iptable.interface_name_list = NULL;
3087 if (iptable->nb_interface > 0) {
3088 ctx->u.iptable.interface_name_list = list_new();
3089 for (ALL_LIST_ELEMENTS_RO(iptable->interface_name_list, node,
3090 ifname)) {
3091 listnode_add(ctx->u.iptable.interface_name_list,
3092 XSTRDUP(MTYPE_DP_NETFILTER, ifname));
3093 }
3094 }
3095 return AOK;
3096 }
3097
3098 /**
3099 * dplane_ctx_ipset_init() - Initialize a context block for a PBR ipset update.
3100 *
3101 * @ctx: Dataplane context to init
3102 * @op: Operation being performed
3103 * @new_rule: PBR ipset
3104 *
3105 * Return: Result status
3106 */
3107 static int dplane_ctx_ipset_init(struct zebra_dplane_ctx *ctx,
3108 enum dplane_op_e op,
3109 struct zebra_pbr_ipset *ipset)
3110 {
3111 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3112 zlog_debug("init dplane ctx %s: %s Unique %u Family %s Type %s",
3113 dplane_op2str(op), ipset->ipset_name, ipset->unique,
3114 family2str(ipset->family),
3115 zebra_pbr_ipset_type2str(ipset->type));
3116 }
3117
3118 ctx->zd_op = op;
3119 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3120
3121 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
3122
3123 ctx->zd_vrf_id = ipset->vrf_id;
3124
3125 memcpy(&ctx->u.ipset, ipset, sizeof(struct zebra_pbr_ipset));
3126 return AOK;
3127 }
3128
3129 /**
3130 * dplane_ctx_ipset_entry_init() - Initialize a context block for a PBR ipset
3131 * update.
3132 *
3133 * @ctx: Dataplane context to init
3134 * @op: Operation being performed
3135 * @new_rule: PBR ipset
3136 *
3137 * Return: Result status
3138 */
3139 static int
3140 dplane_ctx_ipset_entry_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
3141 struct zebra_pbr_ipset_entry *ipset_entry)
3142 {
3143 struct zebra_pbr_ipset *ipset;
3144
3145 ipset = ipset_entry->backpointer;
3146 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3147 zlog_debug("init dplane ctx %s: %s Unique %u filter %u",
3148 dplane_op2str(op), ipset->ipset_name,
3149 ipset_entry->unique, ipset_entry->filter_bm);
3150 }
3151
3152 ctx->zd_op = op;
3153 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3154
3155 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
3156
3157 ctx->zd_vrf_id = ipset->vrf_id;
3158
3159 memcpy(&ctx->u.ipset_entry.entry, ipset_entry,
3160 sizeof(struct zebra_pbr_ipset_entry));
3161 ctx->u.ipset_entry.entry.backpointer = NULL;
3162 ctx->u.ipset_entry.info.type = ipset->type;
3163 ctx->u.ipset_entry.info.family = ipset->family;
3164 memcpy(&ctx->u.ipset_entry.info.ipset_name, &ipset->ipset_name,
3165 ZEBRA_IPSET_NAME_SIZE);
3166
3167 return AOK;
3168 }
3169
3170
3171 /*
3172 * Enqueue a new update,
3173 * and ensure an event is active for the dataplane pthread.
3174 */
3175 static int dplane_update_enqueue(struct zebra_dplane_ctx *ctx)
3176 {
3177 int ret = EINVAL;
3178 uint32_t high, curr;
3179
3180 /* Enqueue for processing by the dataplane pthread */
3181 DPLANE_LOCK();
3182 {
3183 TAILQ_INSERT_TAIL(&zdplane_info.dg_update_ctx_q, ctx,
3184 zd_q_entries);
3185 }
3186 DPLANE_UNLOCK();
3187
3188 curr = atomic_fetch_add_explicit(
3189 &(zdplane_info.dg_routes_queued),
3190 1, memory_order_seq_cst);
3191
3192 curr++; /* We got the pre-incremented value */
3193
3194 /* Maybe update high-water counter also */
3195 high = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
3196 memory_order_seq_cst);
3197 while (high < curr) {
3198 if (atomic_compare_exchange_weak_explicit(
3199 &zdplane_info.dg_routes_queued_max,
3200 &high, curr,
3201 memory_order_seq_cst,
3202 memory_order_seq_cst))
3203 break;
3204 }
3205
3206 /* Ensure that an event for the dataplane thread is active */
3207 ret = dplane_provider_work_ready();
3208
3209 return ret;
3210 }
3211
3212 /*
3213 * Utility that prepares a route update and enqueues it for processing
3214 */
3215 static enum zebra_dplane_result
3216 dplane_route_update_internal(struct route_node *rn,
3217 struct route_entry *re,
3218 struct route_entry *old_re,
3219 enum dplane_op_e op)
3220 {
3221 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3222 int ret = EINVAL;
3223 struct zebra_dplane_ctx *ctx = NULL;
3224
3225 /* Obtain context block */
3226 ctx = dplane_ctx_alloc();
3227
3228 /* Init context with info from zebra data structs */
3229 ret = dplane_ctx_route_init(ctx, op, rn, re);
3230 if (ret == AOK) {
3231 /* Capture some extra info for update case
3232 * where there's a different 'old' route.
3233 */
3234 if ((op == DPLANE_OP_ROUTE_UPDATE) &&
3235 old_re && (old_re != re)) {
3236
3237 old_re->dplane_sequence =
3238 zebra_router_get_next_sequence();
3239 ctx->zd_old_seq = old_re->dplane_sequence;
3240
3241 ctx->u.rinfo.zd_old_tag = old_re->tag;
3242 ctx->u.rinfo.zd_old_type = old_re->type;
3243 ctx->u.rinfo.zd_old_instance = old_re->instance;
3244 ctx->u.rinfo.zd_old_distance = old_re->distance;
3245 ctx->u.rinfo.zd_old_metric = old_re->metric;
3246 ctx->u.rinfo.nhe.old_id = old_re->nhe->id;
3247
3248 #ifndef HAVE_NETLINK
3249 /* For bsd, capture previous re's nexthops too, sigh.
3250 * We'll need these to do per-nexthop deletes.
3251 */
3252 copy_nexthops(&(ctx->u.rinfo.zd_old_ng.nexthop),
3253 old_re->nhe->nhg.nexthop, NULL);
3254
3255 if (zebra_nhg_get_backup_nhg(old_re->nhe) != NULL) {
3256 struct nexthop_group *nhg;
3257 struct nexthop **nh;
3258
3259 nhg = zebra_nhg_get_backup_nhg(old_re->nhe);
3260 nh = &(ctx->u.rinfo.old_backup_ng.nexthop);
3261
3262 if (nhg->nexthop)
3263 copy_nexthops(nh, nhg->nexthop, NULL);
3264 }
3265 #endif /* !HAVE_NETLINK */
3266 }
3267
3268 /*
3269 * If the old and new context type, and nexthop group id
3270 * are the same there is no need to send down a route replace
3271 * as that we know we have sent a nexthop group replace
3272 * or an upper level protocol has sent us the exact
3273 * same route again.
3274 */
3275 if ((dplane_ctx_get_type(ctx) == dplane_ctx_get_old_type(ctx))
3276 && (dplane_ctx_get_nhe_id(ctx)
3277 == dplane_ctx_get_old_nhe_id(ctx))
3278 && (dplane_ctx_get_nhe_id(ctx) >= ZEBRA_NHG_PROTO_LOWER)) {
3279 struct nexthop *nexthop;
3280
3281 if (IS_ZEBRA_DEBUG_DPLANE)
3282 zlog_debug(
3283 "%s: Ignoring Route exactly the same",
3284 __func__);
3285
3286 for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx),
3287 nexthop)) {
3288 if (CHECK_FLAG(nexthop->flags,
3289 NEXTHOP_FLAG_RECURSIVE))
3290 continue;
3291
3292 if (CHECK_FLAG(nexthop->flags,
3293 NEXTHOP_FLAG_ACTIVE))
3294 SET_FLAG(nexthop->flags,
3295 NEXTHOP_FLAG_FIB);
3296 }
3297
3298 dplane_ctx_free(&ctx);
3299 return ZEBRA_DPLANE_REQUEST_SUCCESS;
3300 }
3301
3302 /* Enqueue context for processing */
3303 ret = dplane_update_enqueue(ctx);
3304 }
3305
3306 /* Update counter */
3307 atomic_fetch_add_explicit(&zdplane_info.dg_routes_in, 1,
3308 memory_order_relaxed);
3309
3310 if (ret == AOK)
3311 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3312 else {
3313 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors, 1,
3314 memory_order_relaxed);
3315 if (ctx)
3316 dplane_ctx_free(&ctx);
3317 }
3318
3319 return result;
3320 }
3321
3322 /**
3323 * dplane_nexthop_update_internal() - Helper for enqueuing nexthop changes
3324 *
3325 * @nhe: Nexthop group hash entry where the change occured
3326 * @op: The operation to be enqued
3327 *
3328 * Return: Result of the change
3329 */
3330 static enum zebra_dplane_result
3331 dplane_nexthop_update_internal(struct nhg_hash_entry *nhe, enum dplane_op_e op)
3332 {
3333 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3334 int ret = EINVAL;
3335 struct zebra_dplane_ctx *ctx = NULL;
3336
3337 /* Obtain context block */
3338 ctx = dplane_ctx_alloc();
3339 if (!ctx) {
3340 ret = ENOMEM;
3341 goto done;
3342 }
3343
3344 ret = dplane_ctx_nexthop_init(ctx, op, nhe);
3345 if (ret == AOK)
3346 ret = dplane_update_enqueue(ctx);
3347
3348 done:
3349 /* Update counter */
3350 atomic_fetch_add_explicit(&zdplane_info.dg_nexthops_in, 1,
3351 memory_order_relaxed);
3352
3353 if (ret == AOK)
3354 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3355 else {
3356 atomic_fetch_add_explicit(&zdplane_info.dg_nexthop_errors, 1,
3357 memory_order_relaxed);
3358 if (ctx)
3359 dplane_ctx_free(&ctx);
3360 }
3361
3362 return result;
3363 }
3364
3365 /*
3366 * Enqueue a route 'add' for the dataplane.
3367 */
3368 enum zebra_dplane_result dplane_route_add(struct route_node *rn,
3369 struct route_entry *re)
3370 {
3371 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3372
3373 if (rn == NULL || re == NULL)
3374 goto done;
3375
3376 ret = dplane_route_update_internal(rn, re, NULL,
3377 DPLANE_OP_ROUTE_INSTALL);
3378
3379 done:
3380 return ret;
3381 }
3382
3383 /*
3384 * Enqueue a route update for the dataplane.
3385 */
3386 enum zebra_dplane_result dplane_route_update(struct route_node *rn,
3387 struct route_entry *re,
3388 struct route_entry *old_re)
3389 {
3390 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3391
3392 if (rn == NULL || re == NULL)
3393 goto done;
3394
3395 ret = dplane_route_update_internal(rn, re, old_re,
3396 DPLANE_OP_ROUTE_UPDATE);
3397 done:
3398 return ret;
3399 }
3400
3401 /*
3402 * Enqueue a route removal for the dataplane.
3403 */
3404 enum zebra_dplane_result dplane_route_delete(struct route_node *rn,
3405 struct route_entry *re)
3406 {
3407 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3408
3409 if (rn == NULL || re == NULL)
3410 goto done;
3411
3412 ret = dplane_route_update_internal(rn, re, NULL,
3413 DPLANE_OP_ROUTE_DELETE);
3414
3415 done:
3416 return ret;
3417 }
3418
3419 /*
3420 * Notify the dplane when system/connected routes change.
3421 */
3422 enum zebra_dplane_result dplane_sys_route_add(struct route_node *rn,
3423 struct route_entry *re)
3424 {
3425 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3426
3427 /* Ignore this event unless a provider plugin has requested it. */
3428 if (!zdplane_info.dg_sys_route_notifs) {
3429 ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
3430 goto done;
3431 }
3432
3433 if (rn == NULL || re == NULL)
3434 goto done;
3435
3436 ret = dplane_route_update_internal(rn, re, NULL,
3437 DPLANE_OP_SYS_ROUTE_ADD);
3438
3439 done:
3440 return ret;
3441 }
3442
3443 /*
3444 * Notify the dplane when system/connected routes are deleted.
3445 */
3446 enum zebra_dplane_result dplane_sys_route_del(struct route_node *rn,
3447 struct route_entry *re)
3448 {
3449 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3450
3451 /* Ignore this event unless a provider plugin has requested it. */
3452 if (!zdplane_info.dg_sys_route_notifs) {
3453 ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
3454 goto done;
3455 }
3456
3457 if (rn == NULL || re == NULL)
3458 goto done;
3459
3460 ret = dplane_route_update_internal(rn, re, NULL,
3461 DPLANE_OP_SYS_ROUTE_DELETE);
3462
3463 done:
3464 return ret;
3465 }
3466
3467 /*
3468 * Update from an async notification, to bring other fibs up-to-date.
3469 */
3470 enum zebra_dplane_result
3471 dplane_route_notif_update(struct route_node *rn,
3472 struct route_entry *re,
3473 enum dplane_op_e op,
3474 struct zebra_dplane_ctx *ctx)
3475 {
3476 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3477 int ret = EINVAL;
3478 struct zebra_dplane_ctx *new_ctx = NULL;
3479 struct nexthop *nexthop;
3480 struct nexthop_group *nhg;
3481
3482 if (rn == NULL || re == NULL)
3483 goto done;
3484
3485 new_ctx = dplane_ctx_alloc();
3486 if (new_ctx == NULL)
3487 goto done;
3488
3489 /* Init context with info from zebra data structs */
3490 dplane_ctx_route_init(new_ctx, op, rn, re);
3491
3492 /* For add/update, need to adjust the nexthops so that we match
3493 * the notification state, which may not be the route-entry/RIB
3494 * state.
3495 */
3496 if (op == DPLANE_OP_ROUTE_UPDATE ||
3497 op == DPLANE_OP_ROUTE_INSTALL) {
3498
3499 nexthops_free(new_ctx->u.rinfo.zd_ng.nexthop);
3500 new_ctx->u.rinfo.zd_ng.nexthop = NULL;
3501
3502 nhg = rib_get_fib_nhg(re);
3503 if (nhg && nhg->nexthop)
3504 copy_nexthops(&(new_ctx->u.rinfo.zd_ng.nexthop),
3505 nhg->nexthop, NULL);
3506
3507 /* Check for installed backup nexthops also */
3508 nhg = rib_get_fib_backup_nhg(re);
3509 if (nhg && nhg->nexthop) {
3510 copy_nexthops(&(new_ctx->u.rinfo.zd_ng.nexthop),
3511 nhg->nexthop, NULL);
3512 }
3513
3514 for (ALL_NEXTHOPS(new_ctx->u.rinfo.zd_ng, nexthop))
3515 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
3516
3517 }
3518
3519 /* Capture info about the source of the notification, in 'ctx' */
3520 dplane_ctx_set_notif_provider(new_ctx,
3521 dplane_ctx_get_notif_provider(ctx));
3522
3523 ret = dplane_update_enqueue(new_ctx);
3524
3525 done:
3526 if (ret == AOK)
3527 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3528 else if (new_ctx)
3529 dplane_ctx_free(&new_ctx);
3530
3531 return result;
3532 }
3533
3534 /*
3535 * Enqueue a nexthop add for the dataplane.
3536 */
3537 enum zebra_dplane_result dplane_nexthop_add(struct nhg_hash_entry *nhe)
3538 {
3539 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3540
3541 if (nhe)
3542 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_INSTALL);
3543 return ret;
3544 }
3545
3546 /*
3547 * Enqueue a nexthop update for the dataplane.
3548 *
3549 * Might not need this func since zebra's nexthop objects should be immutable?
3550 */
3551 enum zebra_dplane_result dplane_nexthop_update(struct nhg_hash_entry *nhe)
3552 {
3553 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3554
3555 if (nhe)
3556 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_UPDATE);
3557 return ret;
3558 }
3559
3560 /*
3561 * Enqueue a nexthop removal for the dataplane.
3562 */
3563 enum zebra_dplane_result dplane_nexthop_delete(struct nhg_hash_entry *nhe)
3564 {
3565 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3566
3567 if (nhe)
3568 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_DELETE);
3569
3570 return ret;
3571 }
3572
3573 /*
3574 * Enqueue LSP add for the dataplane.
3575 */
3576 enum zebra_dplane_result dplane_lsp_add(struct zebra_lsp *lsp)
3577 {
3578 enum zebra_dplane_result ret =
3579 lsp_update_internal(lsp, DPLANE_OP_LSP_INSTALL);
3580
3581 return ret;
3582 }
3583
3584 /*
3585 * Enqueue LSP update for the dataplane.
3586 */
3587 enum zebra_dplane_result dplane_lsp_update(struct zebra_lsp *lsp)
3588 {
3589 enum zebra_dplane_result ret =
3590 lsp_update_internal(lsp, DPLANE_OP_LSP_UPDATE);
3591
3592 return ret;
3593 }
3594
3595 /*
3596 * Enqueue LSP delete for the dataplane.
3597 */
3598 enum zebra_dplane_result dplane_lsp_delete(struct zebra_lsp *lsp)
3599 {
3600 enum zebra_dplane_result ret =
3601 lsp_update_internal(lsp, DPLANE_OP_LSP_DELETE);
3602
3603 return ret;
3604 }
3605
3606 /* Update or un-install resulting from an async notification */
3607 enum zebra_dplane_result
3608 dplane_lsp_notif_update(struct zebra_lsp *lsp, enum dplane_op_e op,
3609 struct zebra_dplane_ctx *notif_ctx)
3610 {
3611 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3612 int ret = EINVAL;
3613 struct zebra_dplane_ctx *ctx = NULL;
3614 struct nhlfe_list_head *head;
3615 struct zebra_nhlfe *nhlfe, *new_nhlfe;
3616
3617 /* Obtain context block */
3618 ctx = dplane_ctx_alloc();
3619 if (ctx == NULL) {
3620 ret = ENOMEM;
3621 goto done;
3622 }
3623
3624 /* Copy info from zebra LSP */
3625 ret = dplane_ctx_lsp_init(ctx, op, lsp);
3626 if (ret != AOK)
3627 goto done;
3628
3629 /* Add any installed backup nhlfes */
3630 head = &(ctx->u.lsp.backup_nhlfe_list);
3631 frr_each(nhlfe_list, head, nhlfe) {
3632
3633 if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED) &&
3634 CHECK_FLAG(nhlfe->nexthop->flags, NEXTHOP_FLAG_FIB)) {
3635 new_nhlfe = zebra_mpls_lsp_add_nh(&(ctx->u.lsp),
3636 nhlfe->type,
3637 nhlfe->nexthop);
3638
3639 /* Need to copy flags too */
3640 new_nhlfe->flags = nhlfe->flags;
3641 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
3642 }
3643 }
3644
3645 /* Capture info about the source of the notification */
3646 dplane_ctx_set_notif_provider(
3647 ctx,
3648 dplane_ctx_get_notif_provider(notif_ctx));
3649
3650 ret = dplane_update_enqueue(ctx);
3651
3652 done:
3653 /* Update counter */
3654 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
3655 memory_order_relaxed);
3656
3657 if (ret == AOK)
3658 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3659 else {
3660 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
3661 memory_order_relaxed);
3662 if (ctx)
3663 dplane_ctx_free(&ctx);
3664 }
3665 return result;
3666 }
3667
3668 /*
3669 * Enqueue pseudowire install for the dataplane.
3670 */
3671 enum zebra_dplane_result dplane_pw_install(struct zebra_pw *pw)
3672 {
3673 return pw_update_internal(pw, DPLANE_OP_PW_INSTALL);
3674 }
3675
3676 /*
3677 * Enqueue pseudowire un-install for the dataplane.
3678 */
3679 enum zebra_dplane_result dplane_pw_uninstall(struct zebra_pw *pw)
3680 {
3681 return pw_update_internal(pw, DPLANE_OP_PW_UNINSTALL);
3682 }
3683
3684 /*
3685 * Common internal LSP update utility
3686 */
3687 static enum zebra_dplane_result lsp_update_internal(struct zebra_lsp *lsp,
3688 enum dplane_op_e op)
3689 {
3690 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3691 int ret = EINVAL;
3692 struct zebra_dplane_ctx *ctx = NULL;
3693
3694 /* Obtain context block */
3695 ctx = dplane_ctx_alloc();
3696
3697 ret = dplane_ctx_lsp_init(ctx, op, lsp);
3698 if (ret != AOK)
3699 goto done;
3700
3701 ret = dplane_update_enqueue(ctx);
3702
3703 done:
3704 /* Update counter */
3705 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
3706 memory_order_relaxed);
3707
3708 if (ret == AOK)
3709 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3710 else {
3711 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
3712 memory_order_relaxed);
3713 dplane_ctx_free(&ctx);
3714 }
3715
3716 return result;
3717 }
3718
3719 /*
3720 * Internal, common handler for pseudowire updates.
3721 */
3722 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
3723 enum dplane_op_e op)
3724 {
3725 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3726 int ret;
3727 struct zebra_dplane_ctx *ctx = NULL;
3728
3729 ctx = dplane_ctx_alloc();
3730
3731 ret = dplane_ctx_pw_init(ctx, op, pw);
3732 if (ret != AOK)
3733 goto done;
3734
3735 ret = dplane_update_enqueue(ctx);
3736
3737 done:
3738 /* Update counter */
3739 atomic_fetch_add_explicit(&zdplane_info.dg_pws_in, 1,
3740 memory_order_relaxed);
3741
3742 if (ret == AOK)
3743 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3744 else {
3745 atomic_fetch_add_explicit(&zdplane_info.dg_pw_errors, 1,
3746 memory_order_relaxed);
3747 dplane_ctx_free(&ctx);
3748 }
3749
3750 return result;
3751 }
3752
3753 /*
3754 * Enqueue access br_port update.
3755 */
3756 enum zebra_dplane_result
3757 dplane_br_port_update(const struct interface *ifp, bool non_df,
3758 uint32_t sph_filter_cnt,
3759 const struct in_addr *sph_filters, uint32_t backup_nhg_id)
3760 {
3761 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3762 uint32_t flags = 0;
3763 int ret;
3764 struct zebra_dplane_ctx *ctx = NULL;
3765 struct zebra_ns *zns;
3766 enum dplane_op_e op = DPLANE_OP_BR_PORT_UPDATE;
3767
3768 if (non_df)
3769 flags |= DPLANE_BR_PORT_NON_DF;
3770
3771 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL || IS_ZEBRA_DEBUG_EVPN_MH_ES) {
3772 uint32_t i;
3773 char vtep_str[ES_VTEP_LIST_STR_SZ];
3774
3775 vtep_str[0] = '\0';
3776 for (i = 0; i < sph_filter_cnt; ++i) {
3777 snprintfrr(vtep_str + strlen(vtep_str),
3778 sizeof(vtep_str) - strlen(vtep_str), "%pI4 ",
3779 &sph_filters[i]);
3780 }
3781 zlog_debug(
3782 "init br_port ctx %s: ifp %s, flags 0x%x backup_nhg 0x%x sph %s",
3783 dplane_op2str(op), ifp->name, flags, backup_nhg_id,
3784 vtep_str);
3785 }
3786
3787 ctx = dplane_ctx_alloc();
3788
3789 ctx->zd_op = op;
3790 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3791 ctx->zd_vrf_id = ifp->vrf->vrf_id;
3792
3793 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
3794 dplane_ctx_ns_init(ctx, zns, false);
3795
3796 ctx->zd_ifindex = ifp->ifindex;
3797 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
3798
3799 /* Init the br-port-specific data area */
3800 memset(&ctx->u.br_port, 0, sizeof(ctx->u.br_port));
3801
3802 ctx->u.br_port.flags = flags;
3803 ctx->u.br_port.backup_nhg_id = backup_nhg_id;
3804 ctx->u.br_port.sph_filter_cnt = sph_filter_cnt;
3805 memcpy(ctx->u.br_port.sph_filters, sph_filters,
3806 sizeof(ctx->u.br_port.sph_filters[0]) * sph_filter_cnt);
3807
3808 /* Enqueue for processing on the dplane pthread */
3809 ret = dplane_update_enqueue(ctx);
3810
3811 /* Increment counter */
3812 atomic_fetch_add_explicit(&zdplane_info.dg_br_port_in, 1,
3813 memory_order_relaxed);
3814
3815 if (ret == AOK) {
3816 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3817 } else {
3818 /* Error counter */
3819 atomic_fetch_add_explicit(&zdplane_info.dg_br_port_errors, 1,
3820 memory_order_relaxed);
3821 dplane_ctx_free(&ctx);
3822 }
3823
3824 return result;
3825 }
3826
3827 /*
3828 * Enqueue interface address add for the dataplane.
3829 */
3830 enum zebra_dplane_result dplane_intf_addr_set(const struct interface *ifp,
3831 const struct connected *ifc)
3832 {
3833 #if !defined(HAVE_NETLINK) && defined(HAVE_STRUCT_IFALIASREQ)
3834 /* Extra checks for this OS path. */
3835
3836 /* Don't configure PtP addresses on broadcast ifs or reverse */
3837 if (!(ifp->flags & IFF_POINTOPOINT) != !CONNECTED_PEER(ifc)) {
3838 if (IS_ZEBRA_DEBUG_KERNEL || IS_ZEBRA_DEBUG_DPLANE)
3839 zlog_debug("Failed to set intf addr: mismatch p2p and connected");
3840
3841 return ZEBRA_DPLANE_REQUEST_FAILURE;
3842 }
3843 #endif
3844
3845 return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_INSTALL);
3846 }
3847
3848 /*
3849 * Enqueue interface address remove/uninstall for the dataplane.
3850 */
3851 enum zebra_dplane_result dplane_intf_addr_unset(const struct interface *ifp,
3852 const struct connected *ifc)
3853 {
3854 return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_UNINSTALL);
3855 }
3856
3857 static enum zebra_dplane_result intf_addr_update_internal(
3858 const struct interface *ifp, const struct connected *ifc,
3859 enum dplane_op_e op)
3860 {
3861 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3862 int ret = EINVAL;
3863 struct zebra_dplane_ctx *ctx = NULL;
3864 struct zebra_ns *zns;
3865
3866 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3867 zlog_debug("init intf ctx %s: idx %d, addr %u:%pFX",
3868 dplane_op2str(op), ifp->ifindex, ifp->vrf->vrf_id,
3869 ifc->address);
3870
3871 ctx = dplane_ctx_alloc();
3872
3873 ctx->zd_op = op;
3874 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3875 ctx->zd_vrf_id = ifp->vrf->vrf_id;
3876
3877 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
3878 dplane_ctx_ns_init(ctx, zns, false);
3879
3880 /* Init the interface-addr-specific area */
3881 memset(&ctx->u.intf, 0, sizeof(ctx->u.intf));
3882
3883 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
3884 ctx->zd_ifindex = ifp->ifindex;
3885 ctx->u.intf.prefix = *(ifc->address);
3886
3887 if (if_is_broadcast(ifp))
3888 ctx->u.intf.flags |= DPLANE_INTF_BROADCAST;
3889
3890 if (CONNECTED_PEER(ifc)) {
3891 ctx->u.intf.dest_prefix = *(ifc->destination);
3892 ctx->u.intf.flags |=
3893 (DPLANE_INTF_CONNECTED | DPLANE_INTF_HAS_DEST);
3894 }
3895
3896 if (CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY))
3897 ctx->u.intf.flags |= DPLANE_INTF_SECONDARY;
3898
3899 if (ifc->label) {
3900 size_t len;
3901
3902 ctx->u.intf.flags |= DPLANE_INTF_HAS_LABEL;
3903
3904 /* Use embedded buffer if it's adequate; else allocate. */
3905 len = strlen(ifc->label);
3906
3907 if (len < sizeof(ctx->u.intf.label_buf)) {
3908 strlcpy(ctx->u.intf.label_buf, ifc->label,
3909 sizeof(ctx->u.intf.label_buf));
3910 ctx->u.intf.label = ctx->u.intf.label_buf;
3911 } else {
3912 ctx->u.intf.label = XSTRDUP(MTYPE_DP_CTX, ifc->label);
3913 }
3914 }
3915
3916 ret = dplane_update_enqueue(ctx);
3917
3918 /* Increment counter */
3919 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addrs_in, 1,
3920 memory_order_relaxed);
3921
3922 if (ret == AOK)
3923 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3924 else {
3925 /* Error counter */
3926 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addr_errors,
3927 1, memory_order_relaxed);
3928 dplane_ctx_free(&ctx);
3929 }
3930
3931 return result;
3932 }
3933
3934 /**
3935 * dplane_intf_update_internal() - Helper for enqueuing interface changes
3936 *
3937 * @ifp: Interface where the change occured
3938 * @op: The operation to be enqued
3939 *
3940 * Return: Result of the change
3941 */
3942 static enum zebra_dplane_result
3943 dplane_intf_update_internal(const struct interface *ifp, enum dplane_op_e op)
3944 {
3945 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3946 int ret = EINVAL;
3947 struct zebra_dplane_ctx *ctx = NULL;
3948
3949 /* Obtain context block */
3950 ctx = dplane_ctx_alloc();
3951 if (!ctx) {
3952 ret = ENOMEM;
3953 goto done;
3954 }
3955
3956 ret = dplane_ctx_intf_init(ctx, op, ifp);
3957 if (ret == AOK)
3958 ret = dplane_update_enqueue(ctx);
3959
3960 done:
3961 /* Update counter */
3962 atomic_fetch_add_explicit(&zdplane_info.dg_intfs_in, 1,
3963 memory_order_relaxed);
3964
3965 if (ret == AOK)
3966 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3967 else {
3968 atomic_fetch_add_explicit(&zdplane_info.dg_intf_errors, 1,
3969 memory_order_relaxed);
3970 if (ctx)
3971 dplane_ctx_free(&ctx);
3972 }
3973
3974 return result;
3975 }
3976
3977 /*
3978 * Enqueue a interface add for the dataplane.
3979 */
3980 enum zebra_dplane_result dplane_intf_add(const struct interface *ifp)
3981 {
3982 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3983
3984 if (ifp)
3985 ret = dplane_intf_update_internal(ifp, DPLANE_OP_INTF_INSTALL);
3986 return ret;
3987 }
3988
3989 /*
3990 * Enqueue a interface update for the dataplane.
3991 */
3992 enum zebra_dplane_result dplane_intf_update(const struct interface *ifp)
3993 {
3994 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3995
3996 if (ifp)
3997 ret = dplane_intf_update_internal(ifp, DPLANE_OP_INTF_UPDATE);
3998 return ret;
3999 }
4000
4001 /*
4002 * Enqueue a interface delete for the dataplane.
4003 */
4004 enum zebra_dplane_result dplane_intf_delete(const struct interface *ifp)
4005 {
4006 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
4007
4008 if (ifp)
4009 ret = dplane_intf_update_internal(ifp, DPLANE_OP_INTF_DELETE);
4010 return ret;
4011 }
4012
4013 /*
4014 * Enqueue vxlan/evpn mac add (or update).
4015 */
4016 enum zebra_dplane_result dplane_rem_mac_add(const struct interface *ifp,
4017 const struct interface *bridge_ifp,
4018 vlanid_t vid,
4019 const struct ethaddr *mac,
4020 struct in_addr vtep_ip,
4021 bool sticky,
4022 uint32_t nhg_id,
4023 bool was_static)
4024 {
4025 enum zebra_dplane_result result;
4026 uint32_t update_flags = 0;
4027
4028 update_flags |= DPLANE_MAC_REMOTE;
4029 if (was_static)
4030 update_flags |= DPLANE_MAC_WAS_STATIC;
4031
4032 /* Use common helper api */
4033 result = mac_update_common(DPLANE_OP_MAC_INSTALL, ifp, bridge_ifp,
4034 vid, mac, vtep_ip, sticky, nhg_id, update_flags);
4035 return result;
4036 }
4037
4038 /*
4039 * Enqueue vxlan/evpn mac delete.
4040 */
4041 enum zebra_dplane_result dplane_rem_mac_del(const struct interface *ifp,
4042 const struct interface *bridge_ifp,
4043 vlanid_t vid,
4044 const struct ethaddr *mac,
4045 struct in_addr vtep_ip)
4046 {
4047 enum zebra_dplane_result result;
4048 uint32_t update_flags = 0;
4049
4050 update_flags |= DPLANE_MAC_REMOTE;
4051
4052 /* Use common helper api */
4053 result = mac_update_common(DPLANE_OP_MAC_DELETE, ifp, bridge_ifp,
4054 vid, mac, vtep_ip, false, 0, update_flags);
4055 return result;
4056 }
4057
4058 /*
4059 * API to configure link local with either MAC address or IP information
4060 */
4061 enum zebra_dplane_result dplane_neigh_ip_update(enum dplane_op_e op,
4062 const struct interface *ifp,
4063 struct ipaddr *link_ip,
4064 struct ipaddr *ip,
4065 uint32_t ndm_state, int protocol)
4066 {
4067 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4068 uint16_t state = 0;
4069 uint32_t update_flags;
4070
4071 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
4072 zlog_debug("%s: init link ctx %s: ifp %s, link_ip %pIA ip %pIA",
4073 __func__, dplane_op2str(op), ifp->name, link_ip, ip);
4074
4075 if (ndm_state == ZEBRA_NEIGH_STATE_REACHABLE)
4076 state = DPLANE_NUD_REACHABLE;
4077 else if (ndm_state == ZEBRA_NEIGH_STATE_FAILED)
4078 state = DPLANE_NUD_FAILED;
4079
4080 update_flags = DPLANE_NEIGH_NO_EXTENSION;
4081
4082 result = neigh_update_internal(op, ifp, (const void *)link_ip,
4083 ipaddr_family(link_ip), ip, 0, state,
4084 update_flags, protocol);
4085
4086 return result;
4087 }
4088
4089 /*
4090 * Enqueue local mac add (or update).
4091 */
4092 enum zebra_dplane_result dplane_local_mac_add(const struct interface *ifp,
4093 const struct interface *bridge_ifp,
4094 vlanid_t vid,
4095 const struct ethaddr *mac,
4096 bool sticky,
4097 uint32_t set_static,
4098 uint32_t set_inactive)
4099 {
4100 enum zebra_dplane_result result;
4101 uint32_t update_flags = 0;
4102 struct in_addr vtep_ip;
4103
4104 if (set_static)
4105 update_flags |= DPLANE_MAC_SET_STATIC;
4106
4107 if (set_inactive)
4108 update_flags |= DPLANE_MAC_SET_INACTIVE;
4109
4110 vtep_ip.s_addr = 0;
4111
4112 /* Use common helper api */
4113 result = mac_update_common(DPLANE_OP_MAC_INSTALL, ifp, bridge_ifp,
4114 vid, mac, vtep_ip, sticky, 0,
4115 update_flags);
4116 return result;
4117 }
4118
4119 /*
4120 * Enqueue local mac del
4121 */
4122 enum zebra_dplane_result
4123 dplane_local_mac_del(const struct interface *ifp,
4124 const struct interface *bridge_ifp, vlanid_t vid,
4125 const struct ethaddr *mac)
4126 {
4127 enum zebra_dplane_result result;
4128 struct in_addr vtep_ip;
4129
4130 vtep_ip.s_addr = 0;
4131
4132 /* Use common helper api */
4133 result = mac_update_common(DPLANE_OP_MAC_DELETE, ifp, bridge_ifp, vid,
4134 mac, vtep_ip, false, 0, 0);
4135 return result;
4136 }
4137 /*
4138 * Public api to init an empty context - either newly-allocated or
4139 * reset/cleared - for a MAC update.
4140 */
4141 void dplane_mac_init(struct zebra_dplane_ctx *ctx,
4142 const struct interface *ifp,
4143 const struct interface *br_ifp,
4144 vlanid_t vid,
4145 const struct ethaddr *mac,
4146 struct in_addr vtep_ip,
4147 bool sticky,
4148 uint32_t nhg_id,
4149 uint32_t update_flags)
4150 {
4151 struct zebra_ns *zns;
4152
4153 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4154 ctx->zd_vrf_id = ifp->vrf->vrf_id;
4155
4156 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
4157 dplane_ctx_ns_init(ctx, zns, false);
4158
4159 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
4160 ctx->zd_ifindex = ifp->ifindex;
4161
4162 /* Init the mac-specific data area */
4163 memset(&ctx->u.macinfo, 0, sizeof(ctx->u.macinfo));
4164
4165 ctx->u.macinfo.br_ifindex = br_ifp->ifindex;
4166 ctx->u.macinfo.vtep_ip = vtep_ip;
4167 ctx->u.macinfo.mac = *mac;
4168 ctx->u.macinfo.vid = vid;
4169 ctx->u.macinfo.is_sticky = sticky;
4170 ctx->u.macinfo.nhg_id = nhg_id;
4171 ctx->u.macinfo.update_flags = update_flags;
4172 }
4173
4174 /*
4175 * Common helper api for MAC address/vxlan updates
4176 */
4177 static enum zebra_dplane_result
4178 mac_update_common(enum dplane_op_e op,
4179 const struct interface *ifp,
4180 const struct interface *br_ifp,
4181 vlanid_t vid,
4182 const struct ethaddr *mac,
4183 struct in_addr vtep_ip,
4184 bool sticky,
4185 uint32_t nhg_id,
4186 uint32_t update_flags)
4187 {
4188 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4189 int ret;
4190 struct zebra_dplane_ctx *ctx = NULL;
4191
4192 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
4193 zlog_debug("init mac ctx %s: mac %pEA, ifp %s, vtep %pI4",
4194 dplane_op2str(op), mac, ifp->name, &vtep_ip);
4195
4196 ctx = dplane_ctx_alloc();
4197 ctx->zd_op = op;
4198
4199 /* Common init for the ctx */
4200 dplane_mac_init(ctx, ifp, br_ifp, vid, mac, vtep_ip, sticky,
4201 nhg_id, update_flags);
4202
4203 /* Enqueue for processing on the dplane pthread */
4204 ret = dplane_update_enqueue(ctx);
4205
4206 /* Increment counter */
4207 atomic_fetch_add_explicit(&zdplane_info.dg_macs_in, 1,
4208 memory_order_relaxed);
4209
4210 if (ret == AOK)
4211 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4212 else {
4213 /* Error counter */
4214 atomic_fetch_add_explicit(&zdplane_info.dg_mac_errors, 1,
4215 memory_order_relaxed);
4216 dplane_ctx_free(&ctx);
4217 }
4218
4219 return result;
4220 }
4221
4222 /*
4223 * Enqueue evpn neighbor add for the dataplane.
4224 */
4225 enum zebra_dplane_result dplane_rem_neigh_add(const struct interface *ifp,
4226 const struct ipaddr *ip,
4227 const struct ethaddr *mac,
4228 uint32_t flags, bool was_static)
4229 {
4230 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4231 uint32_t update_flags = 0;
4232
4233 update_flags |= DPLANE_NEIGH_REMOTE;
4234
4235 if (was_static)
4236 update_flags |= DPLANE_NEIGH_WAS_STATIC;
4237
4238 result = neigh_update_internal(
4239 DPLANE_OP_NEIGH_INSTALL, ifp, (const void *)mac, AF_ETHERNET,
4240 ip, flags, DPLANE_NUD_NOARP, update_flags, 0);
4241
4242 return result;
4243 }
4244
4245 /*
4246 * Enqueue local neighbor add for the dataplane.
4247 */
4248 enum zebra_dplane_result dplane_local_neigh_add(const struct interface *ifp,
4249 const struct ipaddr *ip,
4250 const struct ethaddr *mac,
4251 bool set_router, bool set_static,
4252 bool set_inactive)
4253 {
4254 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4255 uint32_t update_flags = 0;
4256 uint32_t ntf = 0;
4257 uint16_t state;
4258
4259 if (set_static)
4260 update_flags |= DPLANE_NEIGH_SET_STATIC;
4261
4262 if (set_inactive) {
4263 update_flags |= DPLANE_NEIGH_SET_INACTIVE;
4264 state = DPLANE_NUD_STALE;
4265 } else {
4266 state = DPLANE_NUD_REACHABLE;
4267 }
4268
4269 if (set_router)
4270 ntf |= DPLANE_NTF_ROUTER;
4271
4272 result = neigh_update_internal(DPLANE_OP_NEIGH_INSTALL, ifp,
4273 (const void *)mac, AF_ETHERNET, ip, ntf,
4274 state, update_flags, 0);
4275
4276 return result;
4277 }
4278
4279 /*
4280 * Enqueue evpn neighbor delete for the dataplane.
4281 */
4282 enum zebra_dplane_result dplane_rem_neigh_delete(const struct interface *ifp,
4283 const struct ipaddr *ip)
4284 {
4285 enum zebra_dplane_result result;
4286 uint32_t update_flags = 0;
4287
4288 update_flags |= DPLANE_NEIGH_REMOTE;
4289
4290 result = neigh_update_internal(DPLANE_OP_NEIGH_DELETE, ifp, NULL,
4291 AF_ETHERNET, ip, 0, 0, update_flags, 0);
4292
4293 return result;
4294 }
4295
4296 /*
4297 * Enqueue evpn VTEP add for the dataplane.
4298 */
4299 enum zebra_dplane_result dplane_vtep_add(const struct interface *ifp,
4300 const struct in_addr *ip,
4301 vni_t vni)
4302 {
4303 enum zebra_dplane_result result;
4304 struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
4305 struct ipaddr addr;
4306
4307 if (IS_ZEBRA_DEBUG_VXLAN)
4308 zlog_debug("Install %pI4 into flood list for VNI %u intf %s(%u)",
4309 ip, vni, ifp->name, ifp->ifindex);
4310
4311 SET_IPADDR_V4(&addr);
4312 addr.ipaddr_v4 = *ip;
4313
4314 result = neigh_update_internal(DPLANE_OP_VTEP_ADD, ifp, &mac,
4315 AF_ETHERNET, &addr, 0, 0, 0, 0);
4316
4317 return result;
4318 }
4319
4320 /*
4321 * Enqueue evpn VTEP add for the dataplane.
4322 */
4323 enum zebra_dplane_result dplane_vtep_delete(const struct interface *ifp,
4324 const struct in_addr *ip,
4325 vni_t vni)
4326 {
4327 enum zebra_dplane_result result;
4328 struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
4329 struct ipaddr addr;
4330
4331 if (IS_ZEBRA_DEBUG_VXLAN)
4332 zlog_debug(
4333 "Uninstall %pI4 from flood list for VNI %u intf %s(%u)",
4334 ip, vni, ifp->name, ifp->ifindex);
4335
4336 SET_IPADDR_V4(&addr);
4337 addr.ipaddr_v4 = *ip;
4338
4339 result = neigh_update_internal(DPLANE_OP_VTEP_DELETE, ifp,
4340 (const void *)&mac, AF_ETHERNET, &addr,
4341 0, 0, 0, 0);
4342
4343 return result;
4344 }
4345
4346 enum zebra_dplane_result dplane_neigh_discover(const struct interface *ifp,
4347 const struct ipaddr *ip)
4348 {
4349 enum zebra_dplane_result result;
4350
4351 result = neigh_update_internal(DPLANE_OP_NEIGH_DISCOVER, ifp, NULL,
4352 AF_ETHERNET, ip, DPLANE_NTF_USE,
4353 DPLANE_NUD_INCOMPLETE, 0, 0);
4354
4355 return result;
4356 }
4357
4358 enum zebra_dplane_result dplane_neigh_table_update(const struct interface *ifp,
4359 const uint8_t family,
4360 const uint32_t app_probes,
4361 const uint32_t ucast_probes,
4362 const uint32_t mcast_probes)
4363 {
4364 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4365 int ret;
4366 struct zebra_dplane_ctx *ctx = NULL;
4367 struct zebra_ns *zns;
4368 enum dplane_op_e op = DPLANE_OP_NEIGH_TABLE_UPDATE;
4369
4370 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
4371 zlog_debug("set neigh ctx %s: ifp %s, family %s",
4372 dplane_op2str(op), ifp->name, family2str(family));
4373 }
4374
4375 ctx = dplane_ctx_alloc();
4376
4377 ctx->zd_op = op;
4378 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4379 ctx->zd_vrf_id = ifp->vrf->vrf_id;
4380
4381 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
4382 dplane_ctx_ns_init(ctx, zns, false);
4383
4384 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
4385 ctx->zd_ifindex = ifp->ifindex;
4386
4387 /* Init the neighbor-specific data area */
4388 memset(&ctx->u.neightable, 0, sizeof(ctx->u.neightable));
4389
4390 ctx->u.neightable.family = family;
4391 ctx->u.neightable.app_probes = app_probes;
4392 ctx->u.neightable.ucast_probes = ucast_probes;
4393 ctx->u.neightable.mcast_probes = mcast_probes;
4394
4395 /* Enqueue for processing on the dplane pthread */
4396 ret = dplane_update_enqueue(ctx);
4397
4398 /* Increment counter */
4399 atomic_fetch_add_explicit(&zdplane_info.dg_neightable_in, 1,
4400 memory_order_relaxed);
4401
4402 if (ret == AOK)
4403 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4404 else {
4405 /* Error counter */
4406 atomic_fetch_add_explicit(&zdplane_info.dg_neightable_errors, 1,
4407 memory_order_relaxed);
4408 dplane_ctx_free(&ctx);
4409 }
4410
4411 return result;
4412 }
4413
4414 /*
4415 * Common helper api for neighbor updates
4416 */
4417 static enum zebra_dplane_result
4418 neigh_update_internal(enum dplane_op_e op, const struct interface *ifp,
4419 const void *link, const int link_family,
4420 const struct ipaddr *ip, uint32_t flags, uint16_t state,
4421 uint32_t update_flags, int protocol)
4422 {
4423 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4424 int ret;
4425 struct zebra_dplane_ctx *ctx = NULL;
4426 struct zebra_ns *zns;
4427 const struct ethaddr *mac = NULL;
4428 const struct ipaddr *link_ip = NULL;
4429
4430 if (link_family == AF_ETHERNET)
4431 mac = (const struct ethaddr *)link;
4432 else
4433 link_ip = (const struct ipaddr *)link;
4434
4435 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
4436 char buf1[PREFIX_STRLEN];
4437
4438 buf1[0] = '\0';
4439 if (link_family == AF_ETHERNET)
4440 prefix_mac2str(mac, buf1, sizeof(buf1));
4441 else
4442 ipaddr2str(link_ip, buf1, sizeof(buf1));
4443 zlog_debug("init neigh ctx %s: ifp %s, %s %s, ip %pIA",
4444 dplane_op2str(op), ifp->name,
4445 link_family == AF_ETHERNET ? "mac " : "link ",
4446 buf1, ip);
4447 }
4448
4449 ctx = dplane_ctx_alloc();
4450
4451 ctx->zd_op = op;
4452 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4453 ctx->zd_vrf_id = ifp->vrf->vrf_id;
4454 dplane_ctx_set_type(ctx, protocol);
4455
4456 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
4457 dplane_ctx_ns_init(ctx, zns, false);
4458
4459 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
4460 ctx->zd_ifindex = ifp->ifindex;
4461
4462 /* Init the neighbor-specific data area */
4463 memset(&ctx->u.neigh, 0, sizeof(ctx->u.neigh));
4464
4465 ctx->u.neigh.ip_addr = *ip;
4466 if (mac)
4467 ctx->u.neigh.link.mac = *mac;
4468 else if (link_ip)
4469 ctx->u.neigh.link.ip_addr = *link_ip;
4470
4471 ctx->u.neigh.flags = flags;
4472 ctx->u.neigh.state = state;
4473 ctx->u.neigh.update_flags = update_flags;
4474
4475 /* Enqueue for processing on the dplane pthread */
4476 ret = dplane_update_enqueue(ctx);
4477
4478 /* Increment counter */
4479 atomic_fetch_add_explicit(&zdplane_info.dg_neighs_in, 1,
4480 memory_order_relaxed);
4481
4482 if (ret == AOK)
4483 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4484 else {
4485 /* Error counter */
4486 atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors, 1,
4487 memory_order_relaxed);
4488 dplane_ctx_free(&ctx);
4489 }
4490
4491 return result;
4492 }
4493
4494 /*
4495 * Common helper api for PBR rule updates
4496 */
4497 static enum zebra_dplane_result
4498 rule_update_internal(enum dplane_op_e op, struct zebra_pbr_rule *new_rule,
4499 struct zebra_pbr_rule *old_rule)
4500 {
4501 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4502 struct zebra_dplane_ctx *ctx;
4503 int ret;
4504
4505 ctx = dplane_ctx_alloc();
4506
4507 ret = dplane_ctx_rule_init(ctx, op, new_rule, old_rule);
4508 if (ret != AOK)
4509 goto done;
4510
4511 ret = dplane_update_enqueue(ctx);
4512
4513 done:
4514 atomic_fetch_add_explicit(&zdplane_info.dg_rules_in, 1,
4515 memory_order_relaxed);
4516
4517 if (ret == AOK)
4518 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4519 else {
4520 atomic_fetch_add_explicit(&zdplane_info.dg_rule_errors, 1,
4521 memory_order_relaxed);
4522 dplane_ctx_free(&ctx);
4523 }
4524
4525 return result;
4526 }
4527
4528 enum zebra_dplane_result dplane_pbr_rule_add(struct zebra_pbr_rule *rule)
4529 {
4530 return rule_update_internal(DPLANE_OP_RULE_ADD, rule, NULL);
4531 }
4532
4533 enum zebra_dplane_result dplane_pbr_rule_delete(struct zebra_pbr_rule *rule)
4534 {
4535 return rule_update_internal(DPLANE_OP_RULE_DELETE, rule, NULL);
4536 }
4537
4538 enum zebra_dplane_result dplane_pbr_rule_update(struct zebra_pbr_rule *old_rule,
4539 struct zebra_pbr_rule *new_rule)
4540 {
4541 return rule_update_internal(DPLANE_OP_RULE_UPDATE, new_rule, old_rule);
4542 }
4543 /*
4544 * Common helper api for iptable updates
4545 */
4546 static enum zebra_dplane_result
4547 iptable_update_internal(enum dplane_op_e op, struct zebra_pbr_iptable *iptable)
4548 {
4549 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4550 struct zebra_dplane_ctx *ctx;
4551 int ret;
4552
4553 if ((op == DPLANE_OP_IPTABLE_ADD &&
4554 CHECK_FLAG(iptable->internal_flags, IPTABLE_INSTALL_QUEUED)) ||
4555 (op == DPLANE_OP_IPTABLE_DELETE &&
4556 CHECK_FLAG(iptable->internal_flags, IPTABLE_UNINSTALL_QUEUED))) {
4557 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
4558 zlog_debug(
4559 "update dplane ctx %s: iptable %s already in progress",
4560 dplane_op2str(op), iptable->ipset_name);
4561 return result;
4562 }
4563
4564 ctx = dplane_ctx_alloc();
4565
4566 ret = dplane_ctx_iptable_init(ctx, op, iptable);
4567 if (ret != AOK)
4568 goto done;
4569
4570 ret = dplane_update_enqueue(ctx);
4571
4572 done:
4573 atomic_fetch_add_explicit(&zdplane_info.dg_iptable_in, 1,
4574 memory_order_relaxed);
4575
4576 if (ret == AOK) {
4577 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4578 if (op == DPLANE_OP_IPTABLE_ADD)
4579 SET_FLAG(iptable->internal_flags,
4580 IPTABLE_INSTALL_QUEUED);
4581 else
4582 SET_FLAG(iptable->internal_flags,
4583 IPTABLE_UNINSTALL_QUEUED);
4584 } else {
4585 atomic_fetch_add_explicit(&zdplane_info.dg_iptable_errors, 1,
4586 memory_order_relaxed);
4587 dplane_ctx_free(&ctx);
4588 }
4589 return result;
4590 }
4591
4592 enum zebra_dplane_result
4593 dplane_pbr_iptable_add(struct zebra_pbr_iptable *iptable)
4594 {
4595 return iptable_update_internal(DPLANE_OP_IPTABLE_ADD, iptable);
4596 }
4597
4598 enum zebra_dplane_result
4599 dplane_pbr_iptable_delete(struct zebra_pbr_iptable *iptable)
4600 {
4601 return iptable_update_internal(DPLANE_OP_IPTABLE_DELETE, iptable);
4602 }
4603
4604 /*
4605 * Common helper api for ipset updates
4606 */
4607 static enum zebra_dplane_result
4608 ipset_update_internal(enum dplane_op_e op, struct zebra_pbr_ipset *ipset)
4609 {
4610 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4611 struct zebra_dplane_ctx *ctx;
4612 int ret;
4613
4614 ctx = dplane_ctx_alloc();
4615
4616 ret = dplane_ctx_ipset_init(ctx, op, ipset);
4617 if (ret != AOK)
4618 goto done;
4619
4620 ret = dplane_update_enqueue(ctx);
4621
4622 done:
4623 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_in, 1,
4624 memory_order_relaxed);
4625
4626 if (ret == AOK)
4627 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4628 else {
4629 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_errors, 1,
4630 memory_order_relaxed);
4631 dplane_ctx_free(&ctx);
4632 }
4633
4634 return result;
4635 }
4636
4637 enum zebra_dplane_result dplane_pbr_ipset_add(struct zebra_pbr_ipset *ipset)
4638 {
4639 return ipset_update_internal(DPLANE_OP_IPSET_ADD, ipset);
4640 }
4641
4642 enum zebra_dplane_result dplane_pbr_ipset_delete(struct zebra_pbr_ipset *ipset)
4643 {
4644 return ipset_update_internal(DPLANE_OP_IPSET_DELETE, ipset);
4645 }
4646
4647 /*
4648 * Common helper api for ipset updates
4649 */
4650 static enum zebra_dplane_result
4651 ipset_entry_update_internal(enum dplane_op_e op,
4652 struct zebra_pbr_ipset_entry *ipset_entry)
4653 {
4654 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4655 struct zebra_dplane_ctx *ctx;
4656 int ret;
4657
4658 ctx = dplane_ctx_alloc();
4659
4660 ret = dplane_ctx_ipset_entry_init(ctx, op, ipset_entry);
4661 if (ret != AOK)
4662 goto done;
4663
4664 ret = dplane_update_enqueue(ctx);
4665
4666 done:
4667 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_entry_in, 1,
4668 memory_order_relaxed);
4669
4670 if (ret == AOK)
4671 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4672 else {
4673 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_entry_errors,
4674 1, memory_order_relaxed);
4675 dplane_ctx_free(&ctx);
4676 }
4677
4678 return result;
4679 }
4680
4681 enum zebra_dplane_result
4682 dplane_pbr_ipset_entry_add(struct zebra_pbr_ipset_entry *ipset)
4683 {
4684 return ipset_entry_update_internal(DPLANE_OP_IPSET_ENTRY_ADD, ipset);
4685 }
4686
4687 enum zebra_dplane_result
4688 dplane_pbr_ipset_entry_delete(struct zebra_pbr_ipset_entry *ipset)
4689 {
4690 return ipset_entry_update_internal(DPLANE_OP_IPSET_ENTRY_DELETE, ipset);
4691 }
4692
4693 /*
4694 * Common helper api for GRE set
4695 */
4696 enum zebra_dplane_result
4697 dplane_gre_set(struct interface *ifp, struct interface *ifp_link,
4698 unsigned int mtu, const struct zebra_l2info_gre *gre_info)
4699 {
4700 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4701 struct zebra_dplane_ctx *ctx;
4702 enum dplane_op_e op = DPLANE_OP_GRE_SET;
4703 int ret;
4704 struct zebra_ns *zns;
4705
4706 ctx = dplane_ctx_alloc();
4707
4708 if (!ifp)
4709 return result;
4710
4711 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
4712 zlog_debug("init dplane ctx %s: if %s link %s%s",
4713 dplane_op2str(op), ifp->name,
4714 ifp_link ? "set" : "unset", ifp_link ?
4715 ifp_link->name : "");
4716 }
4717
4718 ctx->zd_op = op;
4719 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4720 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
4721 if (!zns)
4722 return result;
4723 dplane_ctx_ns_init(ctx, zns, false);
4724
4725 dplane_ctx_set_ifname(ctx, ifp->name);
4726 ctx->zd_vrf_id = ifp->vrf->vrf_id;
4727 ctx->zd_ifindex = ifp->ifindex;
4728 if (ifp_link)
4729 ctx->u.gre.link_ifindex = ifp_link->ifindex;
4730 else
4731 ctx->u.gre.link_ifindex = 0;
4732 if (gre_info)
4733 memcpy(&ctx->u.gre.info, gre_info, sizeof(ctx->u.gre.info));
4734 ctx->u.gre.mtu = mtu;
4735
4736 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4737
4738 /* Enqueue context for processing */
4739 ret = dplane_update_enqueue(ctx);
4740
4741 /* Update counter */
4742 atomic_fetch_add_explicit(&zdplane_info.dg_gre_set_in, 1,
4743 memory_order_relaxed);
4744
4745 if (ret == AOK)
4746 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4747 else {
4748 atomic_fetch_add_explicit(
4749 &zdplane_info.dg_gre_set_errors, 1,
4750 memory_order_relaxed);
4751 if (ctx)
4752 dplane_ctx_free(&ctx);
4753 result = ZEBRA_DPLANE_REQUEST_FAILURE;
4754 }
4755 return result;
4756 }
4757
4758 /*
4759 * Handler for 'show dplane'
4760 */
4761 int dplane_show_helper(struct vty *vty, bool detailed)
4762 {
4763 uint64_t queued, queue_max, limit, errs, incoming, yields,
4764 other_errs;
4765
4766 /* Using atomics because counters are being changed in different
4767 * pthread contexts.
4768 */
4769 incoming = atomic_load_explicit(&zdplane_info.dg_routes_in,
4770 memory_order_relaxed);
4771 limit = atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
4772 memory_order_relaxed);
4773 queued = atomic_load_explicit(&zdplane_info.dg_routes_queued,
4774 memory_order_relaxed);
4775 queue_max = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
4776 memory_order_relaxed);
4777 errs = atomic_load_explicit(&zdplane_info.dg_route_errors,
4778 memory_order_relaxed);
4779 yields = atomic_load_explicit(&zdplane_info.dg_update_yields,
4780 memory_order_relaxed);
4781 other_errs = atomic_load_explicit(&zdplane_info.dg_other_errors,
4782 memory_order_relaxed);
4783
4784 vty_out(vty, "Zebra dataplane:\nRoute updates: %"PRIu64"\n",
4785 incoming);
4786 vty_out(vty, "Route update errors: %"PRIu64"\n", errs);
4787 vty_out(vty, "Other errors : %"PRIu64"\n", other_errs);
4788 vty_out(vty, "Route update queue limit: %"PRIu64"\n", limit);
4789 vty_out(vty, "Route update queue depth: %"PRIu64"\n", queued);
4790 vty_out(vty, "Route update queue max: %"PRIu64"\n", queue_max);
4791 vty_out(vty, "Dplane update yields: %"PRIu64"\n", yields);
4792
4793 incoming = atomic_load_explicit(&zdplane_info.dg_lsps_in,
4794 memory_order_relaxed);
4795 errs = atomic_load_explicit(&zdplane_info.dg_lsp_errors,
4796 memory_order_relaxed);
4797 vty_out(vty, "LSP updates: %"PRIu64"\n", incoming);
4798 vty_out(vty, "LSP update errors: %"PRIu64"\n", errs);
4799
4800 incoming = atomic_load_explicit(&zdplane_info.dg_pws_in,
4801 memory_order_relaxed);
4802 errs = atomic_load_explicit(&zdplane_info.dg_pw_errors,
4803 memory_order_relaxed);
4804 vty_out(vty, "PW updates: %"PRIu64"\n", incoming);
4805 vty_out(vty, "PW update errors: %"PRIu64"\n", errs);
4806
4807 incoming = atomic_load_explicit(&zdplane_info.dg_intf_addrs_in,
4808 memory_order_relaxed);
4809 errs = atomic_load_explicit(&zdplane_info.dg_intf_addr_errors,
4810 memory_order_relaxed);
4811 vty_out(vty, "Intf addr updates: %"PRIu64"\n", incoming);
4812 vty_out(vty, "Intf addr errors: %"PRIu64"\n", errs);
4813
4814 incoming = atomic_load_explicit(&zdplane_info.dg_macs_in,
4815 memory_order_relaxed);
4816 errs = atomic_load_explicit(&zdplane_info.dg_mac_errors,
4817 memory_order_relaxed);
4818 vty_out(vty, "EVPN MAC updates: %"PRIu64"\n", incoming);
4819 vty_out(vty, "EVPN MAC errors: %"PRIu64"\n", errs);
4820
4821 incoming = atomic_load_explicit(&zdplane_info.dg_neighs_in,
4822 memory_order_relaxed);
4823 errs = atomic_load_explicit(&zdplane_info.dg_neigh_errors,
4824 memory_order_relaxed);
4825 vty_out(vty, "EVPN neigh updates: %"PRIu64"\n", incoming);
4826 vty_out(vty, "EVPN neigh errors: %"PRIu64"\n", errs);
4827
4828 incoming = atomic_load_explicit(&zdplane_info.dg_rules_in,
4829 memory_order_relaxed);
4830 errs = atomic_load_explicit(&zdplane_info.dg_rule_errors,
4831 memory_order_relaxed);
4832 vty_out(vty, "Rule updates: %" PRIu64 "\n", incoming);
4833 vty_out(vty, "Rule errors: %" PRIu64 "\n", errs);
4834
4835 incoming = atomic_load_explicit(&zdplane_info.dg_br_port_in,
4836 memory_order_relaxed);
4837 errs = atomic_load_explicit(&zdplane_info.dg_br_port_errors,
4838 memory_order_relaxed);
4839 vty_out(vty, "Bridge port updates: %" PRIu64 "\n", incoming);
4840 vty_out(vty, "Bridge port errors: %" PRIu64 "\n", errs);
4841
4842 incoming = atomic_load_explicit(&zdplane_info.dg_iptable_in,
4843 memory_order_relaxed);
4844 errs = atomic_load_explicit(&zdplane_info.dg_iptable_errors,
4845 memory_order_relaxed);
4846 vty_out(vty, "IPtable updates: %" PRIu64 "\n", incoming);
4847 vty_out(vty, "IPtable errors: %" PRIu64 "\n", errs);
4848 incoming = atomic_load_explicit(&zdplane_info.dg_ipset_in,
4849 memory_order_relaxed);
4850 errs = atomic_load_explicit(&zdplane_info.dg_ipset_errors,
4851 memory_order_relaxed);
4852 vty_out(vty, "IPset updates: %" PRIu64 "\n", incoming);
4853 vty_out(vty, "IPset errors: %" PRIu64 "\n", errs);
4854 incoming = atomic_load_explicit(&zdplane_info.dg_ipset_entry_in,
4855 memory_order_relaxed);
4856 errs = atomic_load_explicit(&zdplane_info.dg_ipset_entry_errors,
4857 memory_order_relaxed);
4858 vty_out(vty, "IPset entry updates: %" PRIu64 "\n", incoming);
4859 vty_out(vty, "IPset entry errors: %" PRIu64 "\n", errs);
4860
4861 incoming = atomic_load_explicit(&zdplane_info.dg_neightable_in,
4862 memory_order_relaxed);
4863 errs = atomic_load_explicit(&zdplane_info.dg_neightable_errors,
4864 memory_order_relaxed);
4865 vty_out(vty, "Neighbor Table updates: %"PRIu64"\n", incoming);
4866 vty_out(vty, "Neighbor Table errors: %"PRIu64"\n", errs);
4867
4868 incoming = atomic_load_explicit(&zdplane_info.dg_gre_set_in,
4869 memory_order_relaxed);
4870 errs = atomic_load_explicit(&zdplane_info.dg_gre_set_errors,
4871 memory_order_relaxed);
4872 vty_out(vty, "GRE set updates: %"PRIu64"\n", incoming);
4873 vty_out(vty, "GRE set errors: %"PRIu64"\n", errs);
4874 return CMD_SUCCESS;
4875 }
4876
4877 /*
4878 * Handler for 'show dplane providers'
4879 */
4880 int dplane_show_provs_helper(struct vty *vty, bool detailed)
4881 {
4882 struct zebra_dplane_provider *prov;
4883 uint64_t in, in_q, in_max, out, out_q, out_max;
4884
4885 vty_out(vty, "Zebra dataplane providers:\n");
4886
4887 DPLANE_LOCK();
4888 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
4889 DPLANE_UNLOCK();
4890
4891 /* Show counters, useful info from each registered provider */
4892 while (prov) {
4893
4894 in = atomic_load_explicit(&prov->dp_in_counter,
4895 memory_order_relaxed);
4896 in_q = atomic_load_explicit(&prov->dp_in_queued,
4897 memory_order_relaxed);
4898 in_max = atomic_load_explicit(&prov->dp_in_max,
4899 memory_order_relaxed);
4900 out = atomic_load_explicit(&prov->dp_out_counter,
4901 memory_order_relaxed);
4902 out_q = atomic_load_explicit(&prov->dp_out_queued,
4903 memory_order_relaxed);
4904 out_max = atomic_load_explicit(&prov->dp_out_max,
4905 memory_order_relaxed);
4906
4907 vty_out(vty, "%s (%u): in: %"PRIu64", q: %"PRIu64", q_max: %"PRIu64", out: %"PRIu64", q: %"PRIu64", q_max: %"PRIu64"\n",
4908 prov->dp_name, prov->dp_id, in, in_q, in_max,
4909 out, out_q, out_max);
4910
4911 DPLANE_LOCK();
4912 prov = TAILQ_NEXT(prov, dp_prov_link);
4913 DPLANE_UNLOCK();
4914 }
4915
4916 return CMD_SUCCESS;
4917 }
4918
4919 /*
4920 * Helper for 'show run' etc.
4921 */
4922 int dplane_config_write_helper(struct vty *vty)
4923 {
4924 if (zdplane_info.dg_max_queued_updates != DPLANE_DEFAULT_MAX_QUEUED)
4925 vty_out(vty, "zebra dplane limit %u\n",
4926 zdplane_info.dg_max_queued_updates);
4927
4928 return 0;
4929 }
4930
4931 /*
4932 * Provider registration
4933 */
4934 int dplane_provider_register(const char *name,
4935 enum dplane_provider_prio prio,
4936 int flags,
4937 int (*start_fp)(struct zebra_dplane_provider *),
4938 int (*fp)(struct zebra_dplane_provider *),
4939 int (*fini_fp)(struct zebra_dplane_provider *,
4940 bool early),
4941 void *data,
4942 struct zebra_dplane_provider **prov_p)
4943 {
4944 int ret = 0;
4945 struct zebra_dplane_provider *p = NULL, *last;
4946
4947 /* Validate */
4948 if (fp == NULL) {
4949 ret = EINVAL;
4950 goto done;
4951 }
4952
4953 if (prio <= DPLANE_PRIO_NONE ||
4954 prio > DPLANE_PRIO_LAST) {
4955 ret = EINVAL;
4956 goto done;
4957 }
4958
4959 /* Allocate and init new provider struct */
4960 p = XCALLOC(MTYPE_DP_PROV, sizeof(struct zebra_dplane_provider));
4961
4962 pthread_mutex_init(&(p->dp_mutex), NULL);
4963 TAILQ_INIT(&(p->dp_ctx_in_q));
4964 TAILQ_INIT(&(p->dp_ctx_out_q));
4965
4966 p->dp_flags = flags;
4967 p->dp_priority = prio;
4968 p->dp_fp = fp;
4969 p->dp_start = start_fp;
4970 p->dp_fini = fini_fp;
4971 p->dp_data = data;
4972
4973 /* Lock - the dplane pthread may be running */
4974 DPLANE_LOCK();
4975
4976 p->dp_id = ++zdplane_info.dg_provider_id;
4977
4978 if (name)
4979 strlcpy(p->dp_name, name, DPLANE_PROVIDER_NAMELEN);
4980 else
4981 snprintf(p->dp_name, DPLANE_PROVIDER_NAMELEN,
4982 "provider-%u", p->dp_id);
4983
4984 /* Insert into list ordered by priority */
4985 TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_prov_link) {
4986 if (last->dp_priority > p->dp_priority)
4987 break;
4988 }
4989
4990 if (last)
4991 TAILQ_INSERT_BEFORE(last, p, dp_prov_link);
4992 else
4993 TAILQ_INSERT_TAIL(&zdplane_info.dg_providers_q, p,
4994 dp_prov_link);
4995
4996 /* And unlock */
4997 DPLANE_UNLOCK();
4998
4999 if (IS_ZEBRA_DEBUG_DPLANE)
5000 zlog_debug("dplane: registered new provider '%s' (%u), prio %d",
5001 p->dp_name, p->dp_id, p->dp_priority);
5002
5003 done:
5004 if (prov_p)
5005 *prov_p = p;
5006
5007 return ret;
5008 }
5009
5010 /* Accessors for provider attributes */
5011 const char *dplane_provider_get_name(const struct zebra_dplane_provider *prov)
5012 {
5013 return prov->dp_name;
5014 }
5015
5016 uint32_t dplane_provider_get_id(const struct zebra_dplane_provider *prov)
5017 {
5018 return prov->dp_id;
5019 }
5020
5021 void *dplane_provider_get_data(const struct zebra_dplane_provider *prov)
5022 {
5023 return prov->dp_data;
5024 }
5025
5026 int dplane_provider_get_work_limit(const struct zebra_dplane_provider *prov)
5027 {
5028 return zdplane_info.dg_updates_per_cycle;
5029 }
5030
5031 /* Lock/unlock a provider's mutex - iff the provider was registered with
5032 * the THREADED flag.
5033 */
5034 void dplane_provider_lock(struct zebra_dplane_provider *prov)
5035 {
5036 if (dplane_provider_is_threaded(prov))
5037 DPLANE_PROV_LOCK(prov);
5038 }
5039
5040 void dplane_provider_unlock(struct zebra_dplane_provider *prov)
5041 {
5042 if (dplane_provider_is_threaded(prov))
5043 DPLANE_PROV_UNLOCK(prov);
5044 }
5045
5046 /*
5047 * Dequeue and maintain associated counter
5048 */
5049 struct zebra_dplane_ctx *dplane_provider_dequeue_in_ctx(
5050 struct zebra_dplane_provider *prov)
5051 {
5052 struct zebra_dplane_ctx *ctx = NULL;
5053
5054 dplane_provider_lock(prov);
5055
5056 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
5057 if (ctx) {
5058 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
5059
5060 atomic_fetch_sub_explicit(&prov->dp_in_queued, 1,
5061 memory_order_relaxed);
5062 }
5063
5064 dplane_provider_unlock(prov);
5065
5066 return ctx;
5067 }
5068
5069 /*
5070 * Dequeue work to a list, return count
5071 */
5072 int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov,
5073 struct dplane_ctx_q *listp)
5074 {
5075 int limit, ret;
5076 struct zebra_dplane_ctx *ctx;
5077
5078 limit = zdplane_info.dg_updates_per_cycle;
5079
5080 dplane_provider_lock(prov);
5081
5082 for (ret = 0; ret < limit; ret++) {
5083 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
5084 if (ctx) {
5085 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
5086
5087 TAILQ_INSERT_TAIL(listp, ctx, zd_q_entries);
5088 } else {
5089 break;
5090 }
5091 }
5092
5093 if (ret > 0)
5094 atomic_fetch_sub_explicit(&prov->dp_in_queued, ret,
5095 memory_order_relaxed);
5096
5097 dplane_provider_unlock(prov);
5098
5099 return ret;
5100 }
5101
5102 uint32_t dplane_provider_out_ctx_queue_len(struct zebra_dplane_provider *prov)
5103 {
5104 return atomic_load_explicit(&(prov->dp_out_counter),
5105 memory_order_relaxed);
5106 }
5107
5108 /*
5109 * Enqueue and maintain associated counter
5110 */
5111 void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
5112 struct zebra_dplane_ctx *ctx)
5113 {
5114 uint64_t curr, high;
5115
5116 dplane_provider_lock(prov);
5117
5118 TAILQ_INSERT_TAIL(&(prov->dp_ctx_out_q), ctx,
5119 zd_q_entries);
5120
5121 /* Maintain out-queue counters */
5122 atomic_fetch_add_explicit(&(prov->dp_out_queued), 1,
5123 memory_order_relaxed);
5124 curr = atomic_load_explicit(&prov->dp_out_queued,
5125 memory_order_relaxed);
5126 high = atomic_load_explicit(&prov->dp_out_max,
5127 memory_order_relaxed);
5128 if (curr > high)
5129 atomic_store_explicit(&prov->dp_out_max, curr,
5130 memory_order_relaxed);
5131
5132 dplane_provider_unlock(prov);
5133
5134 atomic_fetch_add_explicit(&(prov->dp_out_counter), 1,
5135 memory_order_relaxed);
5136 }
5137
5138 /*
5139 * Accessor for provider object
5140 */
5141 bool dplane_provider_is_threaded(const struct zebra_dplane_provider *prov)
5142 {
5143 return (prov->dp_flags & DPLANE_PROV_FLAG_THREADED);
5144 }
5145
5146 #ifdef HAVE_NETLINK
5147 /*
5148 * Callback when an OS (netlink) incoming event read is ready. This runs
5149 * in the dplane pthread.
5150 */
5151 static void dplane_incoming_read(struct thread *event)
5152 {
5153 struct dplane_zns_info *zi = THREAD_ARG(event);
5154
5155 kernel_dplane_read(&zi->info);
5156
5157 /* Re-start read task */
5158 thread_add_read(zdplane_info.dg_master, dplane_incoming_read, zi,
5159 zi->info.sock, &zi->t_read);
5160 }
5161
5162 /*
5163 * Callback in the dataplane pthread that requests info from the OS and
5164 * initiates netlink reads.
5165 */
5166 static void dplane_incoming_request(struct thread *event)
5167 {
5168 struct dplane_zns_info *zi = THREAD_ARG(event);
5169
5170 /* Start read task */
5171 thread_add_read(zdplane_info.dg_master, dplane_incoming_read, zi,
5172 zi->info.sock, &zi->t_read);
5173
5174 /* Send requests */
5175 netlink_request_netconf(zi->info.sock);
5176 }
5177
5178 /*
5179 * Initiate requests for existing info from the OS. This is called by the
5180 * main pthread, but we want all activity on the dplane netlink socket to
5181 * take place on the dplane pthread, so we schedule an event to accomplish
5182 * that.
5183 */
5184 static void dplane_kernel_info_request(struct dplane_zns_info *zi)
5185 {
5186 /* If we happen to encounter an enabled zns before the dplane
5187 * pthread is running, we'll initiate this later on.
5188 */
5189 if (zdplane_info.dg_master)
5190 thread_add_event(zdplane_info.dg_master,
5191 dplane_incoming_request, zi, 0,
5192 &zi->t_request);
5193 }
5194
5195 #endif /* HAVE_NETLINK */
5196
5197 /*
5198 * Notify dplane when namespaces are enabled and disabled. The dplane
5199 * needs to start and stop reading incoming events from the zns. In the
5200 * common case where vrfs are _not_ namespaces, there will only be one
5201 * of these.
5202 *
5203 * This is called in the main pthread.
5204 */
5205 void zebra_dplane_ns_enable(struct zebra_ns *zns, bool enabled)
5206 {
5207 struct dplane_zns_info *zi;
5208
5209 if (IS_ZEBRA_DEBUG_DPLANE)
5210 zlog_debug("%s: %s for nsid %u", __func__,
5211 (enabled ? "ENABLED" : "DISABLED"), zns->ns_id);
5212
5213 /* Search for an existing zns info entry */
5214 frr_each (zns_info_list, &zdplane_info.dg_zns_list, zi) {
5215 if (zi->info.ns_id == zns->ns_id)
5216 break;
5217 }
5218
5219 if (enabled) {
5220 /* Create a new entry if necessary; start reading. */
5221 if (zi == NULL) {
5222 zi = XCALLOC(MTYPE_DP_NS, sizeof(*zi));
5223
5224 zi->info.ns_id = zns->ns_id;
5225
5226 zns_info_list_add_tail(&zdplane_info.dg_zns_list, zi);
5227
5228 if (IS_ZEBRA_DEBUG_DPLANE)
5229 zlog_debug("%s: nsid %u, new zi %p", __func__,
5230 zns->ns_id, zi);
5231 }
5232
5233 /* Make sure we're up-to-date with the zns object */
5234 #if defined(HAVE_NETLINK)
5235 zi->info.is_cmd = false;
5236 zi->info.sock = zns->netlink_dplane_in.sock;
5237
5238 /* Initiate requests for existing info from the OS, and
5239 * begin reading from the netlink socket.
5240 */
5241 dplane_kernel_info_request(zi);
5242 #endif
5243 } else if (zi) {
5244 if (IS_ZEBRA_DEBUG_DPLANE)
5245 zlog_debug("%s: nsid %u, deleting zi %p", __func__,
5246 zns->ns_id, zi);
5247
5248 /* Stop reading, free memory */
5249 zns_info_list_del(&zdplane_info.dg_zns_list, zi);
5250
5251 /* Stop any outstanding tasks */
5252 if (zdplane_info.dg_master) {
5253 thread_cancel_async(zdplane_info.dg_master,
5254 &zi->t_request, NULL);
5255
5256 thread_cancel_async(zdplane_info.dg_master, &zi->t_read,
5257 NULL);
5258 }
5259
5260 XFREE(MTYPE_DP_NS, zi);
5261 }
5262 }
5263
5264 /*
5265 * Provider api to signal that work/events are available
5266 * for the dataplane pthread.
5267 */
5268 int dplane_provider_work_ready(void)
5269 {
5270 /* Note that during zebra startup, we may be offered work before
5271 * the dataplane pthread (and thread-master) are ready. We want to
5272 * enqueue the work, but the event-scheduling machinery may not be
5273 * available.
5274 */
5275 if (zdplane_info.dg_run) {
5276 thread_add_event(zdplane_info.dg_master,
5277 dplane_thread_loop, NULL, 0,
5278 &zdplane_info.dg_t_update);
5279 }
5280
5281 return AOK;
5282 }
5283
5284 /*
5285 * Enqueue a context directly to zebra main.
5286 */
5287 void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx)
5288 {
5289 struct dplane_ctx_q temp_list;
5290
5291 /* Zebra's api takes a list, so we need to use a temporary list */
5292 TAILQ_INIT(&temp_list);
5293
5294 TAILQ_INSERT_TAIL(&temp_list, ctx, zd_q_entries);
5295 (zdplane_info.dg_results_cb)(&temp_list);
5296 }
5297
5298 /*
5299 * Kernel dataplane provider
5300 */
5301
5302 static void kernel_dplane_log_detail(struct zebra_dplane_ctx *ctx)
5303 {
5304 char buf[PREFIX_STRLEN];
5305
5306 switch (dplane_ctx_get_op(ctx)) {
5307
5308 case DPLANE_OP_ROUTE_INSTALL:
5309 case DPLANE_OP_ROUTE_UPDATE:
5310 case DPLANE_OP_ROUTE_DELETE:
5311 zlog_debug("%u:%pFX Dplane route update ctx %p op %s",
5312 dplane_ctx_get_vrf(ctx), dplane_ctx_get_dest(ctx),
5313 ctx, dplane_op2str(dplane_ctx_get_op(ctx)));
5314 break;
5315
5316 case DPLANE_OP_NH_INSTALL:
5317 case DPLANE_OP_NH_UPDATE:
5318 case DPLANE_OP_NH_DELETE:
5319 zlog_debug("ID (%u) Dplane nexthop update ctx %p op %s",
5320 dplane_ctx_get_nhe_id(ctx), ctx,
5321 dplane_op2str(dplane_ctx_get_op(ctx)));
5322 break;
5323
5324 case DPLANE_OP_LSP_INSTALL:
5325 case DPLANE_OP_LSP_UPDATE:
5326 case DPLANE_OP_LSP_DELETE:
5327 break;
5328
5329 case DPLANE_OP_PW_INSTALL:
5330 case DPLANE_OP_PW_UNINSTALL:
5331 zlog_debug("Dplane pw %s: op %s af %d loc: %u rem: %u",
5332 dplane_ctx_get_ifname(ctx),
5333 dplane_op2str(ctx->zd_op), dplane_ctx_get_pw_af(ctx),
5334 dplane_ctx_get_pw_local_label(ctx),
5335 dplane_ctx_get_pw_remote_label(ctx));
5336 break;
5337
5338 case DPLANE_OP_ADDR_INSTALL:
5339 case DPLANE_OP_ADDR_UNINSTALL:
5340 zlog_debug("Dplane intf %s, idx %u, addr %pFX",
5341 dplane_op2str(dplane_ctx_get_op(ctx)),
5342 dplane_ctx_get_ifindex(ctx),
5343 dplane_ctx_get_intf_addr(ctx));
5344 break;
5345
5346 case DPLANE_OP_MAC_INSTALL:
5347 case DPLANE_OP_MAC_DELETE:
5348 prefix_mac2str(dplane_ctx_mac_get_addr(ctx), buf,
5349 sizeof(buf));
5350
5351 zlog_debug("Dplane %s, mac %s, ifindex %u",
5352 dplane_op2str(dplane_ctx_get_op(ctx)),
5353 buf, dplane_ctx_get_ifindex(ctx));
5354 break;
5355
5356 case DPLANE_OP_NEIGH_INSTALL:
5357 case DPLANE_OP_NEIGH_UPDATE:
5358 case DPLANE_OP_NEIGH_DELETE:
5359 case DPLANE_OP_VTEP_ADD:
5360 case DPLANE_OP_VTEP_DELETE:
5361 case DPLANE_OP_NEIGH_DISCOVER:
5362 case DPLANE_OP_NEIGH_IP_INSTALL:
5363 case DPLANE_OP_NEIGH_IP_DELETE:
5364 ipaddr2str(dplane_ctx_neigh_get_ipaddr(ctx), buf,
5365 sizeof(buf));
5366
5367 zlog_debug("Dplane %s, ip %s, ifindex %u",
5368 dplane_op2str(dplane_ctx_get_op(ctx)),
5369 buf, dplane_ctx_get_ifindex(ctx));
5370 break;
5371
5372 case DPLANE_OP_RULE_ADD:
5373 case DPLANE_OP_RULE_DELETE:
5374 case DPLANE_OP_RULE_UPDATE:
5375 zlog_debug("Dplane rule update op %s, if %s(%u), ctx %p",
5376 dplane_op2str(dplane_ctx_get_op(ctx)),
5377 dplane_ctx_get_ifname(ctx),
5378 dplane_ctx_get_ifindex(ctx), ctx);
5379 break;
5380
5381 case DPLANE_OP_SYS_ROUTE_ADD:
5382 case DPLANE_OP_SYS_ROUTE_DELETE:
5383 case DPLANE_OP_ROUTE_NOTIFY:
5384 case DPLANE_OP_LSP_NOTIFY:
5385 case DPLANE_OP_BR_PORT_UPDATE:
5386
5387 case DPLANE_OP_NONE:
5388 break;
5389
5390 case DPLANE_OP_IPTABLE_ADD:
5391 case DPLANE_OP_IPTABLE_DELETE: {
5392 struct zebra_pbr_iptable ipt;
5393
5394 dplane_ctx_get_pbr_iptable(ctx, &ipt);
5395 zlog_debug("Dplane iptable update op %s, unique(%u), ctx %p",
5396 dplane_op2str(dplane_ctx_get_op(ctx)), ipt.unique,
5397 ctx);
5398 } break;
5399 case DPLANE_OP_IPSET_ADD:
5400 case DPLANE_OP_IPSET_DELETE: {
5401 struct zebra_pbr_ipset ipset;
5402
5403 dplane_ctx_get_pbr_ipset(ctx, &ipset);
5404 zlog_debug("Dplane ipset update op %s, unique(%u), ctx %p",
5405 dplane_op2str(dplane_ctx_get_op(ctx)), ipset.unique,
5406 ctx);
5407 } break;
5408 case DPLANE_OP_IPSET_ENTRY_ADD:
5409 case DPLANE_OP_IPSET_ENTRY_DELETE: {
5410 struct zebra_pbr_ipset_entry ipent;
5411
5412 dplane_ctx_get_pbr_ipset_entry(ctx, &ipent);
5413 zlog_debug(
5414 "Dplane ipset entry update op %s, unique(%u), ctx %p",
5415 dplane_op2str(dplane_ctx_get_op(ctx)), ipent.unique,
5416 ctx);
5417 } break;
5418 case DPLANE_OP_NEIGH_TABLE_UPDATE:
5419 zlog_debug("Dplane neigh table op %s, ifp %s, family %s",
5420 dplane_op2str(dplane_ctx_get_op(ctx)),
5421 dplane_ctx_get_ifname(ctx),
5422 family2str(dplane_ctx_neightable_get_family(ctx)));
5423 break;
5424 case DPLANE_OP_GRE_SET:
5425 zlog_debug("Dplane gre set op %s, ifp %s, link %u",
5426 dplane_op2str(dplane_ctx_get_op(ctx)),
5427 dplane_ctx_get_ifname(ctx),
5428 ctx->u.gre.link_ifindex);
5429 break;
5430
5431 case DPLANE_OP_INTF_ADDR_ADD:
5432 case DPLANE_OP_INTF_ADDR_DEL:
5433 zlog_debug("Dplane incoming op %s, intf %s, addr %pFX",
5434 dplane_op2str(dplane_ctx_get_op(ctx)),
5435 dplane_ctx_get_ifname(ctx),
5436 dplane_ctx_get_intf_addr(ctx));
5437 break;
5438
5439 case DPLANE_OP_INTF_NETCONFIG:
5440 zlog_debug("%s: ifindex %d, mpls %d, mcast %d",
5441 dplane_op2str(dplane_ctx_get_op(ctx)),
5442 dplane_ctx_get_netconf_ifindex(ctx),
5443 dplane_ctx_get_netconf_mpls(ctx),
5444 dplane_ctx_get_netconf_mcast(ctx));
5445 break;
5446
5447 case DPLANE_OP_INTF_INSTALL:
5448 case DPLANE_OP_INTF_UPDATE:
5449 case DPLANE_OP_INTF_DELETE:
5450 zlog_debug("Dplane intf %s, idx %u, protodown %d",
5451 dplane_op2str(dplane_ctx_get_op(ctx)),
5452 dplane_ctx_get_ifindex(ctx),
5453 dplane_ctx_intf_is_protodown(ctx));
5454 break;
5455 }
5456 }
5457
5458 static void kernel_dplane_handle_result(struct zebra_dplane_ctx *ctx)
5459 {
5460 enum zebra_dplane_result res = dplane_ctx_get_status(ctx);
5461
5462 switch (dplane_ctx_get_op(ctx)) {
5463
5464 case DPLANE_OP_ROUTE_INSTALL:
5465 case DPLANE_OP_ROUTE_UPDATE:
5466 case DPLANE_OP_ROUTE_DELETE:
5467 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5468 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors,
5469 1, memory_order_relaxed);
5470
5471 if ((dplane_ctx_get_op(ctx) != DPLANE_OP_ROUTE_DELETE)
5472 && (res == ZEBRA_DPLANE_REQUEST_SUCCESS)) {
5473 struct nexthop *nexthop;
5474
5475 /* Update installed nexthops to signal which have been
5476 * installed.
5477 */
5478 for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx),
5479 nexthop)) {
5480 if (CHECK_FLAG(nexthop->flags,
5481 NEXTHOP_FLAG_RECURSIVE))
5482 continue;
5483
5484 if (CHECK_FLAG(nexthop->flags,
5485 NEXTHOP_FLAG_ACTIVE)) {
5486 SET_FLAG(nexthop->flags,
5487 NEXTHOP_FLAG_FIB);
5488 }
5489 }
5490 }
5491 break;
5492
5493 case DPLANE_OP_NH_INSTALL:
5494 case DPLANE_OP_NH_UPDATE:
5495 case DPLANE_OP_NH_DELETE:
5496 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5497 atomic_fetch_add_explicit(
5498 &zdplane_info.dg_nexthop_errors, 1,
5499 memory_order_relaxed);
5500 break;
5501
5502 case DPLANE_OP_LSP_INSTALL:
5503 case DPLANE_OP_LSP_UPDATE:
5504 case DPLANE_OP_LSP_DELETE:
5505 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5506 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors,
5507 1, memory_order_relaxed);
5508 break;
5509
5510 case DPLANE_OP_PW_INSTALL:
5511 case DPLANE_OP_PW_UNINSTALL:
5512 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5513 atomic_fetch_add_explicit(&zdplane_info.dg_pw_errors, 1,
5514 memory_order_relaxed);
5515 break;
5516
5517 case DPLANE_OP_ADDR_INSTALL:
5518 case DPLANE_OP_ADDR_UNINSTALL:
5519 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5520 atomic_fetch_add_explicit(
5521 &zdplane_info.dg_intf_addr_errors, 1,
5522 memory_order_relaxed);
5523 break;
5524
5525 case DPLANE_OP_MAC_INSTALL:
5526 case DPLANE_OP_MAC_DELETE:
5527 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5528 atomic_fetch_add_explicit(&zdplane_info.dg_mac_errors,
5529 1, memory_order_relaxed);
5530 break;
5531
5532 case DPLANE_OP_NEIGH_INSTALL:
5533 case DPLANE_OP_NEIGH_UPDATE:
5534 case DPLANE_OP_NEIGH_DELETE:
5535 case DPLANE_OP_VTEP_ADD:
5536 case DPLANE_OP_VTEP_DELETE:
5537 case DPLANE_OP_NEIGH_DISCOVER:
5538 case DPLANE_OP_NEIGH_IP_INSTALL:
5539 case DPLANE_OP_NEIGH_IP_DELETE:
5540 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5541 atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors,
5542 1, memory_order_relaxed);
5543 break;
5544
5545 case DPLANE_OP_RULE_ADD:
5546 case DPLANE_OP_RULE_DELETE:
5547 case DPLANE_OP_RULE_UPDATE:
5548 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5549 atomic_fetch_add_explicit(&zdplane_info.dg_rule_errors,
5550 1, memory_order_relaxed);
5551 break;
5552
5553 case DPLANE_OP_IPTABLE_ADD:
5554 case DPLANE_OP_IPTABLE_DELETE:
5555 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5556 atomic_fetch_add_explicit(
5557 &zdplane_info.dg_iptable_errors, 1,
5558 memory_order_relaxed);
5559 break;
5560
5561 case DPLANE_OP_IPSET_ADD:
5562 case DPLANE_OP_IPSET_DELETE:
5563 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5564 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_errors,
5565 1, memory_order_relaxed);
5566 break;
5567
5568 case DPLANE_OP_IPSET_ENTRY_ADD:
5569 case DPLANE_OP_IPSET_ENTRY_DELETE:
5570 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5571 atomic_fetch_add_explicit(
5572 &zdplane_info.dg_ipset_entry_errors, 1,
5573 memory_order_relaxed);
5574 break;
5575
5576 case DPLANE_OP_NEIGH_TABLE_UPDATE:
5577 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5578 atomic_fetch_add_explicit(
5579 &zdplane_info.dg_neightable_errors, 1,
5580 memory_order_relaxed);
5581 break;
5582
5583 case DPLANE_OP_GRE_SET:
5584 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5585 atomic_fetch_add_explicit(
5586 &zdplane_info.dg_gre_set_errors, 1,
5587 memory_order_relaxed);
5588 break;
5589
5590 case DPLANE_OP_INTF_INSTALL:
5591 case DPLANE_OP_INTF_UPDATE:
5592 case DPLANE_OP_INTF_DELETE:
5593 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5594 atomic_fetch_add_explicit(&zdplane_info.dg_intf_errors,
5595 1, memory_order_relaxed);
5596 break;
5597
5598 /* Ignore 'notifications' - no-op */
5599 case DPLANE_OP_SYS_ROUTE_ADD:
5600 case DPLANE_OP_SYS_ROUTE_DELETE:
5601 case DPLANE_OP_ROUTE_NOTIFY:
5602 case DPLANE_OP_LSP_NOTIFY:
5603 case DPLANE_OP_BR_PORT_UPDATE:
5604 break;
5605
5606 /* TODO -- error counters for incoming events? */
5607 case DPLANE_OP_INTF_ADDR_ADD:
5608 case DPLANE_OP_INTF_ADDR_DEL:
5609 case DPLANE_OP_INTF_NETCONFIG:
5610 break;
5611
5612 case DPLANE_OP_NONE:
5613 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5614 atomic_fetch_add_explicit(&zdplane_info.dg_other_errors,
5615 1, memory_order_relaxed);
5616 break;
5617 }
5618 }
5619
5620 static void kernel_dplane_process_iptable(struct zebra_dplane_provider *prov,
5621 struct zebra_dplane_ctx *ctx)
5622 {
5623 zebra_pbr_process_iptable(ctx);
5624 dplane_provider_enqueue_out_ctx(prov, ctx);
5625 }
5626
5627 static void kernel_dplane_process_ipset(struct zebra_dplane_provider *prov,
5628 struct zebra_dplane_ctx *ctx)
5629 {
5630 zebra_pbr_process_ipset(ctx);
5631 dplane_provider_enqueue_out_ctx(prov, ctx);
5632 }
5633
5634 static void
5635 kernel_dplane_process_ipset_entry(struct zebra_dplane_provider *prov,
5636 struct zebra_dplane_ctx *ctx)
5637 {
5638 zebra_pbr_process_ipset_entry(ctx);
5639 dplane_provider_enqueue_out_ctx(prov, ctx);
5640 }
5641
5642 /*
5643 * Kernel provider callback
5644 */
5645 static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
5646 {
5647 struct zebra_dplane_ctx *ctx, *tctx;
5648 struct dplane_ctx_q work_list;
5649 int counter, limit;
5650
5651 TAILQ_INIT(&work_list);
5652
5653 limit = dplane_provider_get_work_limit(prov);
5654
5655 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5656 zlog_debug("dplane provider '%s': processing",
5657 dplane_provider_get_name(prov));
5658
5659 for (counter = 0; counter < limit; counter++) {
5660 ctx = dplane_provider_dequeue_in_ctx(prov);
5661 if (ctx == NULL)
5662 break;
5663 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5664 kernel_dplane_log_detail(ctx);
5665
5666 if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPTABLE_ADD
5667 || dplane_ctx_get_op(ctx) == DPLANE_OP_IPTABLE_DELETE))
5668 kernel_dplane_process_iptable(prov, ctx);
5669 else if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_ADD
5670 || dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_DELETE))
5671 kernel_dplane_process_ipset(prov, ctx);
5672 else if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_ENTRY_ADD
5673 || dplane_ctx_get_op(ctx)
5674 == DPLANE_OP_IPSET_ENTRY_DELETE))
5675 kernel_dplane_process_ipset_entry(prov, ctx);
5676 else
5677 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
5678 }
5679
5680 kernel_update_multi(&work_list);
5681
5682 TAILQ_FOREACH_SAFE (ctx, &work_list, zd_q_entries, tctx) {
5683 kernel_dplane_handle_result(ctx);
5684
5685 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
5686 dplane_provider_enqueue_out_ctx(prov, ctx);
5687 }
5688
5689 /* Ensure that we'll run the work loop again if there's still
5690 * more work to do.
5691 */
5692 if (counter >= limit) {
5693 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5694 zlog_debug("dplane provider '%s' reached max updates %d",
5695 dplane_provider_get_name(prov), counter);
5696
5697 atomic_fetch_add_explicit(&zdplane_info.dg_update_yields,
5698 1, memory_order_relaxed);
5699
5700 dplane_provider_work_ready();
5701 }
5702
5703 return 0;
5704 }
5705
5706 #ifdef DPLANE_TEST_PROVIDER
5707
5708 /*
5709 * Test dataplane provider plugin
5710 */
5711
5712 /*
5713 * Test provider process callback
5714 */
5715 static int test_dplane_process_func(struct zebra_dplane_provider *prov)
5716 {
5717 struct zebra_dplane_ctx *ctx;
5718 int counter, limit;
5719
5720 /* Just moving from 'in' queue to 'out' queue */
5721
5722 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5723 zlog_debug("dplane provider '%s': processing",
5724 dplane_provider_get_name(prov));
5725
5726 limit = dplane_provider_get_work_limit(prov);
5727
5728 for (counter = 0; counter < limit; counter++) {
5729 ctx = dplane_provider_dequeue_in_ctx(prov);
5730 if (ctx == NULL)
5731 break;
5732
5733 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5734 zlog_debug("dplane provider '%s': op %s",
5735 dplane_provider_get_name(prov),
5736 dplane_op2str(dplane_ctx_get_op(ctx)));
5737
5738 dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
5739
5740 dplane_provider_enqueue_out_ctx(prov, ctx);
5741 }
5742
5743 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5744 zlog_debug("dplane provider '%s': processed %d",
5745 dplane_provider_get_name(prov), counter);
5746
5747 /* Ensure that we'll run the work loop again if there's still
5748 * more work to do.
5749 */
5750 if (counter >= limit)
5751 dplane_provider_work_ready();
5752
5753 return 0;
5754 }
5755
5756 /*
5757 * Test provider shutdown/fini callback
5758 */
5759 static int test_dplane_shutdown_func(struct zebra_dplane_provider *prov,
5760 bool early)
5761 {
5762 if (IS_ZEBRA_DEBUG_DPLANE)
5763 zlog_debug("dplane provider '%s': %sshutdown",
5764 dplane_provider_get_name(prov),
5765 early ? "early " : "");
5766
5767 return 0;
5768 }
5769 #endif /* DPLANE_TEST_PROVIDER */
5770
5771 /*
5772 * Register default kernel provider
5773 */
5774 static void dplane_provider_init(void)
5775 {
5776 int ret;
5777
5778 ret = dplane_provider_register("Kernel",
5779 DPLANE_PRIO_KERNEL,
5780 DPLANE_PROV_FLAGS_DEFAULT, NULL,
5781 kernel_dplane_process_func,
5782 NULL,
5783 NULL, NULL);
5784
5785 if (ret != AOK)
5786 zlog_err("Unable to register kernel dplane provider: %d",
5787 ret);
5788
5789 #ifdef DPLANE_TEST_PROVIDER
5790 /* Optional test provider ... */
5791 ret = dplane_provider_register("Test",
5792 DPLANE_PRIO_PRE_KERNEL,
5793 DPLANE_PROV_FLAGS_DEFAULT, NULL,
5794 test_dplane_process_func,
5795 test_dplane_shutdown_func,
5796 NULL /* data */, NULL);
5797
5798 if (ret != AOK)
5799 zlog_err("Unable to register test dplane provider: %d",
5800 ret);
5801 #endif /* DPLANE_TEST_PROVIDER */
5802 }
5803
5804 /*
5805 * Allow zebra code to walk the queue of pending contexts, evaluate each one
5806 * using a callback function. If the function returns 'true', the context
5807 * will be dequeued and freed without being processed.
5808 */
5809 int dplane_clean_ctx_queue(bool (*context_cb)(struct zebra_dplane_ctx *ctx,
5810 void *arg), void *val)
5811 {
5812 struct zebra_dplane_ctx *ctx, *temp;
5813 struct dplane_ctx_q work_list;
5814
5815 TAILQ_INIT(&work_list);
5816
5817 if (context_cb == NULL)
5818 goto done;
5819
5820 /* Walk the pending context queue under the dplane lock. */
5821 DPLANE_LOCK();
5822
5823 TAILQ_FOREACH_SAFE(ctx, &zdplane_info.dg_update_ctx_q, zd_q_entries,
5824 temp) {
5825 if (context_cb(ctx, val)) {
5826 TAILQ_REMOVE(&zdplane_info.dg_update_ctx_q, ctx,
5827 zd_q_entries);
5828 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
5829 }
5830 }
5831
5832 DPLANE_UNLOCK();
5833
5834 /* Now free any contexts selected by the caller, without holding
5835 * the lock.
5836 */
5837 TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, temp) {
5838 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
5839 dplane_ctx_fini(&ctx);
5840 }
5841
5842 done:
5843
5844 return 0;
5845 }
5846
5847 /* Indicates zebra shutdown/exit is in progress. Some operations may be
5848 * simplified or skipped during shutdown processing.
5849 */
5850 bool dplane_is_in_shutdown(void)
5851 {
5852 return zdplane_info.dg_is_shutdown;
5853 }
5854
5855 /*
5856 * Enable collection of extra info about interfaces in route updates.
5857 */
5858 void dplane_enable_intf_extra_info(void)
5859 {
5860 dplane_collect_extra_intf_info = true;
5861 }
5862
5863 /*
5864 * Early or pre-shutdown, de-init notification api. This runs pretty
5865 * early during zebra shutdown, as a signal to stop new work and prepare
5866 * for updates generated by shutdown/cleanup activity, as zebra tries to
5867 * remove everything it's responsible for.
5868 * NB: This runs in the main zebra pthread context.
5869 */
5870 void zebra_dplane_pre_finish(void)
5871 {
5872 struct zebra_dplane_provider *prov;
5873
5874 if (IS_ZEBRA_DEBUG_DPLANE)
5875 zlog_debug("Zebra dataplane pre-finish called");
5876
5877 zdplane_info.dg_is_shutdown = true;
5878
5879 /* Notify provider(s) of pending shutdown. */
5880 TAILQ_FOREACH(prov, &zdplane_info.dg_providers_q, dp_prov_link) {
5881 if (prov->dp_fini == NULL)
5882 continue;
5883
5884 prov->dp_fini(prov, true /* early */);
5885 }
5886 }
5887
5888 /*
5889 * Utility to determine whether work remains enqueued within the dplane;
5890 * used during system shutdown processing.
5891 */
5892 static bool dplane_work_pending(void)
5893 {
5894 bool ret = false;
5895 struct zebra_dplane_ctx *ctx;
5896 struct zebra_dplane_provider *prov;
5897
5898 /* TODO -- just checking incoming/pending work for now, must check
5899 * providers
5900 */
5901 DPLANE_LOCK();
5902 {
5903 ctx = TAILQ_FIRST(&zdplane_info.dg_update_ctx_q);
5904 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
5905 }
5906 DPLANE_UNLOCK();
5907
5908 if (ctx != NULL) {
5909 ret = true;
5910 goto done;
5911 }
5912
5913 while (prov) {
5914
5915 dplane_provider_lock(prov);
5916
5917 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
5918 if (ctx == NULL)
5919 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
5920
5921 dplane_provider_unlock(prov);
5922
5923 if (ctx != NULL)
5924 break;
5925
5926 DPLANE_LOCK();
5927 prov = TAILQ_NEXT(prov, dp_prov_link);
5928 DPLANE_UNLOCK();
5929 }
5930
5931 if (ctx != NULL)
5932 ret = true;
5933
5934 done:
5935 return ret;
5936 }
5937
5938 /*
5939 * Shutdown-time intermediate callback, used to determine when all pending
5940 * in-flight updates are done. If there's still work to do, reschedules itself.
5941 * If all work is done, schedules an event to the main zebra thread for
5942 * final zebra shutdown.
5943 * This runs in the dplane pthread context.
5944 */
5945 static void dplane_check_shutdown_status(struct thread *event)
5946 {
5947 struct dplane_zns_info *zi;
5948
5949 if (IS_ZEBRA_DEBUG_DPLANE)
5950 zlog_debug("Zebra dataplane shutdown status check called");
5951
5952 /* Remove any zns info entries as we stop the dplane pthread. */
5953 frr_each_safe (zns_info_list, &zdplane_info.dg_zns_list, zi) {
5954 zns_info_list_del(&zdplane_info.dg_zns_list, zi);
5955
5956 if (zdplane_info.dg_master) {
5957 thread_cancel(&zi->t_read);
5958 thread_cancel(&zi->t_request);
5959 }
5960
5961 XFREE(MTYPE_DP_NS, zi);
5962 }
5963
5964 if (dplane_work_pending()) {
5965 /* Reschedule dplane check on a short timer */
5966 thread_add_timer_msec(zdplane_info.dg_master,
5967 dplane_check_shutdown_status,
5968 NULL, 100,
5969 &zdplane_info.dg_t_shutdown_check);
5970
5971 /* TODO - give up and stop waiting after a short time? */
5972
5973 } else {
5974 /* We appear to be done - schedule a final callback event
5975 * for the zebra main pthread.
5976 */
5977 thread_add_event(zrouter.master, zebra_finalize, NULL, 0, NULL);
5978 }
5979 }
5980
5981 /*
5982 * Shutdown, de-init api. This runs pretty late during shutdown,
5983 * after zebra has tried to free/remove/uninstall all routes during shutdown.
5984 * At this point, dplane work may still remain to be done, so we can't just
5985 * blindly terminate. If there's still work to do, we'll periodically check
5986 * and when done, we'll enqueue a task to the zebra main thread for final
5987 * termination processing.
5988 *
5989 * NB: This runs in the main zebra thread context.
5990 */
5991 void zebra_dplane_finish(void)
5992 {
5993 if (IS_ZEBRA_DEBUG_DPLANE)
5994 zlog_debug("Zebra dataplane fini called");
5995
5996 thread_add_event(zdplane_info.dg_master,
5997 dplane_check_shutdown_status, NULL, 0,
5998 &zdplane_info.dg_t_shutdown_check);
5999 }
6000
6001 /*
6002 * Main dataplane pthread event loop. The thread takes new incoming work
6003 * and offers it to the first provider. It then iterates through the
6004 * providers, taking complete work from each one and offering it
6005 * to the next in order. At each step, a limited number of updates are
6006 * processed during a cycle in order to provide some fairness.
6007 *
6008 * This loop through the providers is only run once, so that the dataplane
6009 * pthread can look for other pending work - such as i/o work on behalf of
6010 * providers.
6011 */
6012 static void dplane_thread_loop(struct thread *event)
6013 {
6014 struct dplane_ctx_q work_list;
6015 struct dplane_ctx_q error_list;
6016 struct zebra_dplane_provider *prov;
6017 struct zebra_dplane_ctx *ctx, *tctx;
6018 int limit, counter, error_counter;
6019 uint64_t curr, high;
6020 bool reschedule = false;
6021
6022 /* Capture work limit per cycle */
6023 limit = zdplane_info.dg_updates_per_cycle;
6024
6025 /* Init temporary lists used to move contexts among providers */
6026 TAILQ_INIT(&work_list);
6027 TAILQ_INIT(&error_list);
6028 error_counter = 0;
6029
6030 /* Check for zebra shutdown */
6031 if (!zdplane_info.dg_run)
6032 return;
6033
6034 /* Dequeue some incoming work from zebra (if any) onto the temporary
6035 * working list.
6036 */
6037 DPLANE_LOCK();
6038
6039 /* Locate initial registered provider */
6040 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
6041
6042 /* Move new work from incoming list to temp list */
6043 for (counter = 0; counter < limit; counter++) {
6044 ctx = TAILQ_FIRST(&zdplane_info.dg_update_ctx_q);
6045 if (ctx) {
6046 TAILQ_REMOVE(&zdplane_info.dg_update_ctx_q, ctx,
6047 zd_q_entries);
6048
6049 ctx->zd_provider = prov->dp_id;
6050
6051 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
6052 } else {
6053 break;
6054 }
6055 }
6056
6057 DPLANE_UNLOCK();
6058
6059 atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, counter,
6060 memory_order_relaxed);
6061
6062 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6063 zlog_debug("dplane: incoming new work counter: %d", counter);
6064
6065 /* Iterate through the registered providers, offering new incoming
6066 * work. If the provider has outgoing work in its queue, take that
6067 * work for the next provider
6068 */
6069 while (prov) {
6070
6071 /* At each iteration, the temporary work list has 'counter'
6072 * items.
6073 */
6074 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6075 zlog_debug("dplane enqueues %d new work to provider '%s'",
6076 counter, dplane_provider_get_name(prov));
6077
6078 /* Capture current provider id in each context; check for
6079 * error status.
6080 */
6081 TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, tctx) {
6082 if (dplane_ctx_get_status(ctx) ==
6083 ZEBRA_DPLANE_REQUEST_SUCCESS) {
6084 ctx->zd_provider = prov->dp_id;
6085 } else {
6086 /*
6087 * TODO -- improve error-handling: recirc
6088 * errors backwards so that providers can
6089 * 'undo' their work (if they want to)
6090 */
6091
6092 /* Move to error list; will be returned
6093 * zebra main.
6094 */
6095 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
6096 TAILQ_INSERT_TAIL(&error_list,
6097 ctx, zd_q_entries);
6098 error_counter++;
6099 }
6100 }
6101
6102 /* Enqueue new work to the provider */
6103 dplane_provider_lock(prov);
6104
6105 if (TAILQ_FIRST(&work_list))
6106 TAILQ_CONCAT(&(prov->dp_ctx_in_q), &work_list,
6107 zd_q_entries);
6108
6109 atomic_fetch_add_explicit(&prov->dp_in_counter, counter,
6110 memory_order_relaxed);
6111 atomic_fetch_add_explicit(&prov->dp_in_queued, counter,
6112 memory_order_relaxed);
6113 curr = atomic_load_explicit(&prov->dp_in_queued,
6114 memory_order_relaxed);
6115 high = atomic_load_explicit(&prov->dp_in_max,
6116 memory_order_relaxed);
6117 if (curr > high)
6118 atomic_store_explicit(&prov->dp_in_max, curr,
6119 memory_order_relaxed);
6120
6121 dplane_provider_unlock(prov);
6122
6123 /* Reset the temp list (though the 'concat' may have done this
6124 * already), and the counter
6125 */
6126 TAILQ_INIT(&work_list);
6127 counter = 0;
6128
6129 /* Call into the provider code. Note that this is
6130 * unconditional: we offer to do work even if we don't enqueue
6131 * any _new_ work.
6132 */
6133 (*prov->dp_fp)(prov);
6134
6135 /* Check for zebra shutdown */
6136 if (!zdplane_info.dg_run)
6137 break;
6138
6139 /* Dequeue completed work from the provider */
6140 dplane_provider_lock(prov);
6141
6142 while (counter < limit) {
6143 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
6144 if (ctx) {
6145 TAILQ_REMOVE(&(prov->dp_ctx_out_q), ctx,
6146 zd_q_entries);
6147
6148 TAILQ_INSERT_TAIL(&work_list,
6149 ctx, zd_q_entries);
6150 counter++;
6151 } else
6152 break;
6153 }
6154
6155 dplane_provider_unlock(prov);
6156
6157 if (counter >= limit)
6158 reschedule = true;
6159
6160 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6161 zlog_debug("dplane dequeues %d completed work from provider %s",
6162 counter, dplane_provider_get_name(prov));
6163
6164 /* Locate next provider */
6165 DPLANE_LOCK();
6166 prov = TAILQ_NEXT(prov, dp_prov_link);
6167 DPLANE_UNLOCK();
6168 }
6169
6170 /*
6171 * We hit the work limit while processing at least one provider's
6172 * output queue - ensure we come back and finish it.
6173 */
6174 if (reschedule)
6175 dplane_provider_work_ready();
6176
6177 /* After all providers have been serviced, enqueue any completed
6178 * work and any errors back to zebra so it can process the results.
6179 */
6180 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6181 zlog_debug("dplane has %d completed, %d errors, for zebra main",
6182 counter, error_counter);
6183
6184 /*
6185 * Hand lists through the api to zebra main,
6186 * to reduce the number of lock/unlock cycles
6187 */
6188
6189 /* Call through to zebra main */
6190 (zdplane_info.dg_results_cb)(&error_list);
6191
6192 TAILQ_INIT(&error_list);
6193
6194 /* Call through to zebra main */
6195 (zdplane_info.dg_results_cb)(&work_list);
6196
6197 TAILQ_INIT(&work_list);
6198 }
6199
6200 /*
6201 * Final phase of shutdown, after all work enqueued to dplane has been
6202 * processed. This is called from the zebra main pthread context.
6203 */
6204 void zebra_dplane_shutdown(void)
6205 {
6206 struct zebra_dplane_provider *dp;
6207
6208 if (IS_ZEBRA_DEBUG_DPLANE)
6209 zlog_debug("Zebra dataplane shutdown called");
6210
6211 /* Stop dplane thread, if it's running */
6212
6213 zdplane_info.dg_run = false;
6214
6215 if (zdplane_info.dg_t_update)
6216 thread_cancel_async(zdplane_info.dg_t_update->master,
6217 &zdplane_info.dg_t_update, NULL);
6218
6219 frr_pthread_stop(zdplane_info.dg_pthread, NULL);
6220
6221 /* Destroy pthread */
6222 frr_pthread_destroy(zdplane_info.dg_pthread);
6223 zdplane_info.dg_pthread = NULL;
6224 zdplane_info.dg_master = NULL;
6225
6226 /* Notify provider(s) of final shutdown.
6227 * Note that this call is in the main pthread, so providers must
6228 * be prepared for that.
6229 */
6230 TAILQ_FOREACH(dp, &zdplane_info.dg_providers_q, dp_prov_link) {
6231 if (dp->dp_fini == NULL)
6232 continue;
6233
6234 dp->dp_fini(dp, false);
6235 }
6236
6237 /* TODO -- Clean-up provider objects */
6238
6239 /* TODO -- Clean queue(s), free memory */
6240 }
6241
6242 /*
6243 * Initialize the dataplane module during startup, internal/private version
6244 */
6245 static void zebra_dplane_init_internal(void)
6246 {
6247 memset(&zdplane_info, 0, sizeof(zdplane_info));
6248
6249 pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
6250
6251 TAILQ_INIT(&zdplane_info.dg_update_ctx_q);
6252 TAILQ_INIT(&zdplane_info.dg_providers_q);
6253 zns_info_list_init(&zdplane_info.dg_zns_list);
6254
6255 zdplane_info.dg_updates_per_cycle = DPLANE_DEFAULT_NEW_WORK;
6256
6257 zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;
6258
6259 /* Register default kernel 'provider' during init */
6260 dplane_provider_init();
6261 }
6262
6263 /*
6264 * Start the dataplane pthread. This step needs to be run later than the
6265 * 'init' step, in case zebra has fork-ed.
6266 */
6267 void zebra_dplane_start(void)
6268 {
6269 struct dplane_zns_info *zi;
6270 struct zebra_dplane_provider *prov;
6271 struct frr_pthread_attr pattr = {
6272 .start = frr_pthread_attr_default.start,
6273 .stop = frr_pthread_attr_default.stop
6274 };
6275
6276 /* Start dataplane pthread */
6277
6278 zdplane_info.dg_pthread = frr_pthread_new(&pattr, "Zebra dplane thread",
6279 "zebra_dplane");
6280
6281 zdplane_info.dg_master = zdplane_info.dg_pthread->master;
6282
6283 zdplane_info.dg_run = true;
6284
6285 /* Enqueue an initial event for the dataplane pthread */
6286 thread_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
6287 &zdplane_info.dg_t_update);
6288
6289 /* Enqueue requests and reads if necessary */
6290 frr_each (zns_info_list, &zdplane_info.dg_zns_list, zi) {
6291 #if defined(HAVE_NETLINK)
6292 thread_add_read(zdplane_info.dg_master, dplane_incoming_read,
6293 zi, zi->info.sock, &zi->t_read);
6294 dplane_kernel_info_request(zi);
6295 #endif
6296 }
6297
6298 /* Call start callbacks for registered providers */
6299
6300 DPLANE_LOCK();
6301 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
6302 DPLANE_UNLOCK();
6303
6304 while (prov) {
6305
6306 if (prov->dp_start)
6307 (prov->dp_start)(prov);
6308
6309 /* Locate next provider */
6310 DPLANE_LOCK();
6311 prov = TAILQ_NEXT(prov, dp_prov_link);
6312 DPLANE_UNLOCK();
6313 }
6314
6315 frr_pthread_run(zdplane_info.dg_pthread, NULL);
6316 }
6317
6318 /*
6319 * Initialize the dataplane module at startup; called by zebra rib_init()
6320 */
6321 void zebra_dplane_init(int (*results_fp)(struct dplane_ctx_q *))
6322 {
6323 zebra_dplane_init_internal();
6324 zdplane_info.dg_results_cb = results_fp;
6325 }