]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_dplane.c
d464f4a4e6ed298186e59a3ab6e99fa209628531
[mirror_frr.git] / zebra / zebra_dplane.c
1 /*
2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "lib/libfrr.h"
25 #include "lib/debug.h"
26 #include "lib/frratomic.h"
27 #include "lib/frr_pthread.h"
28 #include "lib/memory.h"
29 #include "lib/queue.h"
30 #include "lib/zebra.h"
31 #include "zebra/netconf_netlink.h"
32 #include "zebra/zebra_router.h"
33 #include "zebra/zebra_dplane.h"
34 #include "zebra/zebra_vxlan_private.h"
35 #include "zebra/zebra_mpls.h"
36 #include "zebra/rt.h"
37 #include "zebra/debug.h"
38 #include "zebra/zebra_pbr.h"
39 #include "printfrr.h"
40
41 /* Memory types */
42 DEFINE_MTYPE_STATIC(ZEBRA, DP_CTX, "Zebra DPlane Ctx");
43 DEFINE_MTYPE_STATIC(ZEBRA, DP_INTF, "Zebra DPlane Intf");
44 DEFINE_MTYPE_STATIC(ZEBRA, DP_PROV, "Zebra DPlane Provider");
45 DEFINE_MTYPE_STATIC(ZEBRA, DP_NETFILTER, "Zebra Netfilter Internal Object");
46 DEFINE_MTYPE_STATIC(ZEBRA, DP_NS, "DPlane NSes");
47
48 #ifndef AOK
49 # define AOK 0
50 #endif
51
52 /* Control for collection of extra interface info with route updates; a plugin
53 * can enable the extra info via a dplane api.
54 */
55 static bool dplane_collect_extra_intf_info;
56
57 /* Enable test dataplane provider */
58 /*#define DPLANE_TEST_PROVIDER 1 */
59
60 /* Default value for max queued incoming updates */
61 const uint32_t DPLANE_DEFAULT_MAX_QUEUED = 200;
62
63 /* Default value for new work per cycle */
64 const uint32_t DPLANE_DEFAULT_NEW_WORK = 100;
65
66 /* Validation check macro for context blocks */
67 /* #define DPLANE_DEBUG 1 */
68
69 #ifdef DPLANE_DEBUG
70
71 # define DPLANE_CTX_VALID(p) \
72 assert((p) != NULL)
73
74 #else
75
76 # define DPLANE_CTX_VALID(p)
77
78 #endif /* DPLANE_DEBUG */
79
80 /*
81 * Nexthop information captured for nexthop/nexthop group updates
82 */
83 struct dplane_nexthop_info {
84 uint32_t id;
85 uint32_t old_id;
86 afi_t afi;
87 vrf_id_t vrf_id;
88 int type;
89
90 struct nexthop_group ng;
91 struct nh_grp nh_grp[MULTIPATH_NUM];
92 uint8_t nh_grp_count;
93 };
94
95 /*
96 * Optional extra info about interfaces used in route updates' nexthops.
97 */
98 struct dplane_intf_extra {
99 vrf_id_t vrf_id;
100 uint32_t ifindex;
101 uint32_t flags;
102 uint32_t status;
103
104 TAILQ_ENTRY(dplane_intf_extra) link;
105 };
106
107 /*
108 * Route information captured for route updates.
109 */
110 struct dplane_route_info {
111
112 /* Dest and (optional) source prefixes */
113 struct prefix zd_dest;
114 struct prefix zd_src;
115
116 afi_t zd_afi;
117 safi_t zd_safi;
118
119 int zd_type;
120 int zd_old_type;
121
122 route_tag_t zd_tag;
123 route_tag_t zd_old_tag;
124 uint32_t zd_metric;
125 uint32_t zd_old_metric;
126
127 uint16_t zd_instance;
128 uint16_t zd_old_instance;
129
130 uint8_t zd_distance;
131 uint8_t zd_old_distance;
132
133 uint32_t zd_mtu;
134 uint32_t zd_nexthop_mtu;
135
136 /* Nexthop hash entry info */
137 struct dplane_nexthop_info nhe;
138
139 /* Nexthops */
140 uint32_t zd_nhg_id;
141 struct nexthop_group zd_ng;
142
143 /* Backup nexthops (if present) */
144 struct nexthop_group backup_ng;
145
146 /* "Previous" nexthops, used only in route updates without netlink */
147 struct nexthop_group zd_old_ng;
148 struct nexthop_group old_backup_ng;
149
150 /* Optional list of extra interface info */
151 TAILQ_HEAD(dp_intf_extra_q, dplane_intf_extra) intf_extra_q;
152 };
153
154 /*
155 * Pseudowire info for the dataplane
156 */
157 struct dplane_pw_info {
158 int type;
159 int af;
160 int status;
161 uint32_t flags;
162 uint32_t nhg_id;
163 union g_addr dest;
164 mpls_label_t local_label;
165 mpls_label_t remote_label;
166
167 /* Nexthops that are valid and installed */
168 struct nexthop_group fib_nhg;
169
170 /* Primary and backup nexthop sets, copied from the resolving route. */
171 struct nexthop_group primary_nhg;
172 struct nexthop_group backup_nhg;
173
174 union pw_protocol_fields fields;
175 };
176
177 /*
178 * Bridge port info for the dataplane
179 */
180 struct dplane_br_port_info {
181 uint32_t sph_filter_cnt;
182 struct in_addr sph_filters[ES_VTEP_MAX_CNT];
183 /* DPLANE_BR_PORT_XXX - see zebra_dplane.h*/
184 uint32_t flags;
185 uint32_t backup_nhg_id;
186 };
187
188 /*
189 * Interface/prefix info for the dataplane
190 */
191 struct dplane_intf_info {
192
193 uint32_t metric;
194 uint32_t flags;
195
196 bool protodown;
197 bool pd_reason_val;
198
199 #define DPLANE_INTF_CONNECTED (1 << 0) /* Connected peer, p2p */
200 #define DPLANE_INTF_SECONDARY (1 << 1)
201 #define DPLANE_INTF_BROADCAST (1 << 2)
202 #define DPLANE_INTF_HAS_DEST DPLANE_INTF_CONNECTED
203 #define DPLANE_INTF_HAS_LABEL (1 << 4)
204
205 /* Interface address/prefix */
206 struct prefix prefix;
207
208 /* Dest address, for p2p, or broadcast prefix */
209 struct prefix dest_prefix;
210
211 char *label;
212 char label_buf[32];
213 };
214
215 /*
216 * EVPN MAC address info for the dataplane.
217 */
218 struct dplane_mac_info {
219 vlanid_t vid;
220 ifindex_t br_ifindex;
221 struct ethaddr mac;
222 struct in_addr vtep_ip;
223 bool is_sticky;
224 uint32_t nhg_id;
225 uint32_t update_flags;
226 };
227
228 /*
229 * Neighbor info for the dataplane
230 */
231 struct dplane_neigh_info {
232 struct ipaddr ip_addr;
233 union {
234 struct ethaddr mac;
235 struct ipaddr ip_addr;
236 } link;
237 uint32_t flags;
238 uint16_t state;
239 uint32_t update_flags;
240 };
241
242 /*
243 * Neighbor Table
244 */
245 struct dplane_neigh_table {
246 uint8_t family;
247 uint32_t app_probes;
248 uint32_t ucast_probes;
249 uint32_t mcast_probes;
250 };
251
252 /*
253 * Policy based routing rule info for the dataplane
254 */
255 struct dplane_ctx_rule {
256 uint32_t priority;
257
258 /* The route table pointed by this rule */
259 uint32_t table;
260
261 /* Filter criteria */
262 uint32_t filter_bm;
263 uint32_t fwmark;
264 uint8_t dsfield;
265 struct prefix src_ip;
266 struct prefix dst_ip;
267 uint8_t ip_proto;
268
269 uint8_t action_pcp;
270 uint16_t action_vlan_id;
271 uint16_t action_vlan_flags;
272
273 uint32_t action_queue_id;
274
275 char ifname[INTERFACE_NAMSIZ + 1];
276 };
277
278 struct dplane_rule_info {
279 /*
280 * Originating zclient sock fd, so we can know who to send
281 * back to.
282 */
283 int sock;
284
285 int unique;
286 int seq;
287
288 struct dplane_ctx_rule new;
289 struct dplane_ctx_rule old;
290 };
291
292 struct dplane_gre_ctx {
293 uint32_t link_ifindex;
294 unsigned int mtu;
295 struct zebra_l2info_gre info;
296 };
297
298
299 /*
300 * Network interface configuration info - aligned with netlink's NETCONF
301 * info. The flags values are public, in the dplane.h file...
302 */
303 struct dplane_netconf_info {
304 enum dplane_netconf_status_e mpls_val;
305 enum dplane_netconf_status_e mcast_val;
306 };
307
308 /*
309 * The context block used to exchange info about route updates across
310 * the boundary between the zebra main context (and pthread) and the
311 * dataplane layer (and pthread).
312 */
313 struct zebra_dplane_ctx {
314
315 /* Operation code */
316 enum dplane_op_e zd_op;
317
318 /* Status on return */
319 enum zebra_dplane_result zd_status;
320
321 /* Dplane provider id */
322 uint32_t zd_provider;
323
324 /* Flags - used by providers, e.g. */
325 int zd_flags;
326
327 bool zd_is_update;
328
329 uint32_t zd_seq;
330 uint32_t zd_old_seq;
331
332 /* Some updates may be generated by notifications: allow the
333 * plugin to notice and ignore results from its own notifications.
334 */
335 uint32_t zd_notif_provider;
336
337 /* TODO -- internal/sub-operation status? */
338 enum zebra_dplane_result zd_remote_status;
339 enum zebra_dplane_result zd_kernel_status;
340
341 vrf_id_t zd_vrf_id;
342 uint32_t zd_table_id;
343
344 char zd_ifname[INTERFACE_NAMSIZ];
345 ifindex_t zd_ifindex;
346
347 /* Support info for different kinds of updates */
348 union {
349 struct dplane_route_info rinfo;
350 struct zebra_lsp lsp;
351 struct dplane_pw_info pw;
352 struct dplane_br_port_info br_port;
353 struct dplane_intf_info intf;
354 struct dplane_mac_info macinfo;
355 struct dplane_neigh_info neigh;
356 struct dplane_rule_info rule;
357 struct zebra_pbr_iptable iptable;
358 struct zebra_pbr_ipset ipset;
359 struct {
360 struct zebra_pbr_ipset_entry entry;
361 struct zebra_pbr_ipset_info info;
362 } ipset_entry;
363 struct dplane_neigh_table neightable;
364 struct dplane_gre_ctx gre;
365 struct dplane_netconf_info netconf;
366 } u;
367
368 /* Namespace info, used especially for netlink kernel communication */
369 struct zebra_dplane_info zd_ns_info;
370
371 /* Embedded list linkage */
372 TAILQ_ENTRY(zebra_dplane_ctx) zd_q_entries;
373 };
374
375 /* Flag that can be set by a pre-kernel provider as a signal that an update
376 * should bypass the kernel.
377 */
378 #define DPLANE_CTX_FLAG_NO_KERNEL 0x01
379
380
381 /*
382 * Registration block for one dataplane provider.
383 */
384 struct zebra_dplane_provider {
385 /* Name */
386 char dp_name[DPLANE_PROVIDER_NAMELEN + 1];
387
388 /* Priority, for ordering among providers */
389 uint8_t dp_priority;
390
391 /* Id value */
392 uint32_t dp_id;
393
394 /* Mutex */
395 pthread_mutex_t dp_mutex;
396
397 /* Plugin-provided extra data */
398 void *dp_data;
399
400 /* Flags */
401 int dp_flags;
402
403 int (*dp_start)(struct zebra_dplane_provider *prov);
404
405 int (*dp_fp)(struct zebra_dplane_provider *prov);
406
407 int (*dp_fini)(struct zebra_dplane_provider *prov, bool early_p);
408
409 _Atomic uint32_t dp_in_counter;
410 _Atomic uint32_t dp_in_queued;
411 _Atomic uint32_t dp_in_max;
412 _Atomic uint32_t dp_out_counter;
413 _Atomic uint32_t dp_out_queued;
414 _Atomic uint32_t dp_out_max;
415 _Atomic uint32_t dp_error_counter;
416
417 /* Queue of contexts inbound to the provider */
418 struct dplane_ctx_q dp_ctx_in_q;
419
420 /* Queue of completed contexts outbound from the provider back
421 * towards the dataplane module.
422 */
423 struct dplane_ctx_q dp_ctx_out_q;
424
425 /* Embedded list linkage for provider objects */
426 TAILQ_ENTRY(zebra_dplane_provider) dp_prov_link;
427 };
428
429 /* Declare types for list of zns info objects */
430 PREDECL_DLIST(zns_info_list);
431
432 struct dplane_zns_info {
433 struct zebra_dplane_info info;
434
435 /* Request data from the OS */
436 struct thread *t_request;
437
438 /* Read event */
439 struct thread *t_read;
440
441 /* List linkage */
442 struct zns_info_list_item link;
443 };
444
445 /*
446 * Globals
447 */
448 static struct zebra_dplane_globals {
449 /* Mutex to control access to dataplane components */
450 pthread_mutex_t dg_mutex;
451
452 /* Results callback registered by zebra 'core' */
453 int (*dg_results_cb)(struct dplane_ctx_q *ctxlist);
454
455 /* Sentinel for beginning of shutdown */
456 volatile bool dg_is_shutdown;
457
458 /* Sentinel for end of shutdown */
459 volatile bool dg_run;
460
461 /* Update context queue inbound to the dataplane */
462 TAILQ_HEAD(zdg_ctx_q, zebra_dplane_ctx) dg_update_ctx_q;
463
464 /* Ordered list of providers */
465 TAILQ_HEAD(zdg_prov_q, zebra_dplane_provider) dg_providers_q;
466
467 /* List of info about each zns */
468 struct zns_info_list_head dg_zns_list;
469
470 /* Counter used to assign internal ids to providers */
471 uint32_t dg_provider_id;
472
473 /* Limit number of pending, unprocessed updates */
474 _Atomic uint32_t dg_max_queued_updates;
475
476 /* Control whether system route notifications should be produced. */
477 bool dg_sys_route_notifs;
478
479 /* Limit number of new updates dequeued at once, to pace an
480 * incoming burst.
481 */
482 uint32_t dg_updates_per_cycle;
483
484 _Atomic uint32_t dg_routes_in;
485 _Atomic uint32_t dg_routes_queued;
486 _Atomic uint32_t dg_routes_queued_max;
487 _Atomic uint32_t dg_route_errors;
488 _Atomic uint32_t dg_other_errors;
489
490 _Atomic uint32_t dg_nexthops_in;
491 _Atomic uint32_t dg_nexthop_errors;
492
493 _Atomic uint32_t dg_lsps_in;
494 _Atomic uint32_t dg_lsp_errors;
495
496 _Atomic uint32_t dg_pws_in;
497 _Atomic uint32_t dg_pw_errors;
498
499 _Atomic uint32_t dg_br_port_in;
500 _Atomic uint32_t dg_br_port_errors;
501
502 _Atomic uint32_t dg_intf_addrs_in;
503 _Atomic uint32_t dg_intf_addr_errors;
504
505 _Atomic uint32_t dg_macs_in;
506 _Atomic uint32_t dg_mac_errors;
507
508 _Atomic uint32_t dg_neighs_in;
509 _Atomic uint32_t dg_neigh_errors;
510
511 _Atomic uint32_t dg_rules_in;
512 _Atomic uint32_t dg_rule_errors;
513
514 _Atomic uint32_t dg_update_yields;
515
516 _Atomic uint32_t dg_iptable_in;
517 _Atomic uint32_t dg_iptable_errors;
518
519 _Atomic uint32_t dg_ipset_in;
520 _Atomic uint32_t dg_ipset_errors;
521 _Atomic uint32_t dg_ipset_entry_in;
522 _Atomic uint32_t dg_ipset_entry_errors;
523
524 _Atomic uint32_t dg_neightable_in;
525 _Atomic uint32_t dg_neightable_errors;
526
527 _Atomic uint32_t dg_gre_set_in;
528 _Atomic uint32_t dg_gre_set_errors;
529
530 _Atomic uint32_t dg_intfs_in;
531 _Atomic uint32_t dg_intf_errors;
532
533 /* Dataplane pthread */
534 struct frr_pthread *dg_pthread;
535
536 /* Event-delivery context 'master' for the dplane */
537 struct thread_master *dg_master;
538
539 /* Event/'thread' pointer for queued updates */
540 struct thread *dg_t_update;
541
542 /* Event pointer for pending shutdown check loop */
543 struct thread *dg_t_shutdown_check;
544
545 } zdplane_info;
546
547 /* Instantiate zns list type */
548 DECLARE_DLIST(zns_info_list, struct dplane_zns_info, link);
549
550 /*
551 * Lock and unlock for interactions with the zebra 'core' pthread
552 */
553 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
554 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
555
556
557 /*
558 * Lock and unlock for individual providers
559 */
560 #define DPLANE_PROV_LOCK(p) pthread_mutex_lock(&((p)->dp_mutex))
561 #define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
562
563 /* Prototypes */
564 static void dplane_thread_loop(struct thread *event);
565 static enum zebra_dplane_result lsp_update_internal(struct zebra_lsp *lsp,
566 enum dplane_op_e op);
567 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
568 enum dplane_op_e op);
569 static enum zebra_dplane_result intf_addr_update_internal(
570 const struct interface *ifp, const struct connected *ifc,
571 enum dplane_op_e op);
572 static enum zebra_dplane_result mac_update_common(
573 enum dplane_op_e op, const struct interface *ifp,
574 const struct interface *br_ifp,
575 vlanid_t vid, const struct ethaddr *mac,
576 struct in_addr vtep_ip, bool sticky, uint32_t nhg_id,
577 uint32_t update_flags);
578 static enum zebra_dplane_result
579 neigh_update_internal(enum dplane_op_e op, const struct interface *ifp,
580 const void *link, int link_family,
581 const struct ipaddr *ip, uint32_t flags, uint16_t state,
582 uint32_t update_flags, int protocol);
583
584 /*
585 * Public APIs
586 */
587
588 /* Obtain thread_master for dataplane thread */
589 struct thread_master *dplane_get_thread_master(void)
590 {
591 return zdplane_info.dg_master;
592 }
593
594 /*
595 * Allocate a dataplane update context
596 */
597 struct zebra_dplane_ctx *dplane_ctx_alloc(void)
598 {
599 struct zebra_dplane_ctx *p;
600
601 /* TODO -- just alloc'ing memory, but would like to maintain
602 * a pool
603 */
604 p = XCALLOC(MTYPE_DP_CTX, sizeof(struct zebra_dplane_ctx));
605
606 return p;
607 }
608
609 /* Enable system route notifications */
610 void dplane_enable_sys_route_notifs(void)
611 {
612 zdplane_info.dg_sys_route_notifs = true;
613 }
614
615 /*
616 * Clean up dependent/internal allocations inside a context object
617 */
618 static void dplane_ctx_free_internal(struct zebra_dplane_ctx *ctx)
619 {
620 struct dplane_intf_extra *if_extra, *if_tmp;
621
622 /*
623 * Some internal allocations may need to be freed, depending on
624 * the type of info captured in the ctx.
625 */
626 switch (ctx->zd_op) {
627 case DPLANE_OP_ROUTE_INSTALL:
628 case DPLANE_OP_ROUTE_UPDATE:
629 case DPLANE_OP_ROUTE_DELETE:
630 case DPLANE_OP_SYS_ROUTE_ADD:
631 case DPLANE_OP_SYS_ROUTE_DELETE:
632 case DPLANE_OP_ROUTE_NOTIFY:
633
634 /* Free allocated nexthops */
635 if (ctx->u.rinfo.zd_ng.nexthop) {
636 /* This deals with recursive nexthops too */
637 nexthops_free(ctx->u.rinfo.zd_ng.nexthop);
638
639 ctx->u.rinfo.zd_ng.nexthop = NULL;
640 }
641
642 /* Free backup info also (if present) */
643 if (ctx->u.rinfo.backup_ng.nexthop) {
644 /* This deals with recursive nexthops too */
645 nexthops_free(ctx->u.rinfo.backup_ng.nexthop);
646
647 ctx->u.rinfo.backup_ng.nexthop = NULL;
648 }
649
650 if (ctx->u.rinfo.zd_old_ng.nexthop) {
651 /* This deals with recursive nexthops too */
652 nexthops_free(ctx->u.rinfo.zd_old_ng.nexthop);
653
654 ctx->u.rinfo.zd_old_ng.nexthop = NULL;
655 }
656
657 if (ctx->u.rinfo.old_backup_ng.nexthop) {
658 /* This deals with recursive nexthops too */
659 nexthops_free(ctx->u.rinfo.old_backup_ng.nexthop);
660
661 ctx->u.rinfo.old_backup_ng.nexthop = NULL;
662 }
663
664 /* Optional extra interface info */
665 TAILQ_FOREACH_SAFE(if_extra, &ctx->u.rinfo.intf_extra_q,
666 link, if_tmp) {
667 TAILQ_REMOVE(&ctx->u.rinfo.intf_extra_q, if_extra,
668 link);
669 XFREE(MTYPE_DP_INTF, if_extra);
670 }
671
672 break;
673
674 case DPLANE_OP_NH_INSTALL:
675 case DPLANE_OP_NH_UPDATE:
676 case DPLANE_OP_NH_DELETE: {
677 if (ctx->u.rinfo.nhe.ng.nexthop) {
678 /* This deals with recursive nexthops too */
679 nexthops_free(ctx->u.rinfo.nhe.ng.nexthop);
680
681 ctx->u.rinfo.nhe.ng.nexthop = NULL;
682 }
683 break;
684 }
685
686 case DPLANE_OP_LSP_INSTALL:
687 case DPLANE_OP_LSP_UPDATE:
688 case DPLANE_OP_LSP_DELETE:
689 case DPLANE_OP_LSP_NOTIFY:
690 {
691 struct zebra_nhlfe *nhlfe;
692
693 /* Unlink and free allocated NHLFEs */
694 frr_each_safe(nhlfe_list, &ctx->u.lsp.nhlfe_list, nhlfe) {
695 nhlfe_list_del(&ctx->u.lsp.nhlfe_list, nhlfe);
696 zebra_mpls_nhlfe_free(nhlfe);
697 }
698
699 /* Unlink and free allocated backup NHLFEs, if present */
700 frr_each_safe(nhlfe_list,
701 &(ctx->u.lsp.backup_nhlfe_list), nhlfe) {
702 nhlfe_list_del(&ctx->u.lsp.backup_nhlfe_list,
703 nhlfe);
704 zebra_mpls_nhlfe_free(nhlfe);
705 }
706
707 /* Clear pointers in lsp struct, in case we're caching
708 * free context structs.
709 */
710 nhlfe_list_init(&ctx->u.lsp.nhlfe_list);
711 ctx->u.lsp.best_nhlfe = NULL;
712 nhlfe_list_init(&ctx->u.lsp.backup_nhlfe_list);
713
714 break;
715 }
716
717 case DPLANE_OP_PW_INSTALL:
718 case DPLANE_OP_PW_UNINSTALL:
719 /* Free allocated nexthops */
720 if (ctx->u.pw.fib_nhg.nexthop) {
721 /* This deals with recursive nexthops too */
722 nexthops_free(ctx->u.pw.fib_nhg.nexthop);
723
724 ctx->u.pw.fib_nhg.nexthop = NULL;
725 }
726 if (ctx->u.pw.primary_nhg.nexthop) {
727 nexthops_free(ctx->u.pw.primary_nhg.nexthop);
728
729 ctx->u.pw.primary_nhg.nexthop = NULL;
730 }
731 if (ctx->u.pw.backup_nhg.nexthop) {
732 nexthops_free(ctx->u.pw.backup_nhg.nexthop);
733
734 ctx->u.pw.backup_nhg.nexthop = NULL;
735 }
736 break;
737
738 case DPLANE_OP_ADDR_INSTALL:
739 case DPLANE_OP_ADDR_UNINSTALL:
740 case DPLANE_OP_INTF_ADDR_ADD:
741 case DPLANE_OP_INTF_ADDR_DEL:
742 /* Maybe free label string, if allocated */
743 if (ctx->u.intf.label != NULL &&
744 ctx->u.intf.label != ctx->u.intf.label_buf) {
745 XFREE(MTYPE_DP_CTX, ctx->u.intf.label);
746 ctx->u.intf.label = NULL;
747 }
748 break;
749
750 case DPLANE_OP_MAC_INSTALL:
751 case DPLANE_OP_MAC_DELETE:
752 case DPLANE_OP_NEIGH_INSTALL:
753 case DPLANE_OP_NEIGH_UPDATE:
754 case DPLANE_OP_NEIGH_DELETE:
755 case DPLANE_OP_VTEP_ADD:
756 case DPLANE_OP_VTEP_DELETE:
757 case DPLANE_OP_RULE_ADD:
758 case DPLANE_OP_RULE_DELETE:
759 case DPLANE_OP_RULE_UPDATE:
760 case DPLANE_OP_NEIGH_DISCOVER:
761 case DPLANE_OP_BR_PORT_UPDATE:
762 case DPLANE_OP_NEIGH_IP_INSTALL:
763 case DPLANE_OP_NEIGH_IP_DELETE:
764 case DPLANE_OP_NONE:
765 case DPLANE_OP_IPSET_ADD:
766 case DPLANE_OP_IPSET_DELETE:
767 case DPLANE_OP_INTF_INSTALL:
768 case DPLANE_OP_INTF_UPDATE:
769 case DPLANE_OP_INTF_DELETE:
770 break;
771
772 case DPLANE_OP_IPSET_ENTRY_ADD:
773 case DPLANE_OP_IPSET_ENTRY_DELETE:
774 break;
775 case DPLANE_OP_NEIGH_TABLE_UPDATE:
776 break;
777 case DPLANE_OP_IPTABLE_ADD:
778 case DPLANE_OP_IPTABLE_DELETE:
779 if (ctx->u.iptable.interface_name_list) {
780 struct listnode *node, *nnode;
781 char *ifname;
782
783 for (ALL_LIST_ELEMENTS(
784 ctx->u.iptable.interface_name_list, node,
785 nnode, ifname)) {
786 LISTNODE_DETACH(
787 ctx->u.iptable.interface_name_list,
788 node);
789 XFREE(MTYPE_DP_NETFILTER, ifname);
790 }
791 list_delete(&ctx->u.iptable.interface_name_list);
792 }
793 break;
794 case DPLANE_OP_GRE_SET:
795 case DPLANE_OP_INTF_NETCONFIG:
796 break;
797 }
798 }
799
800 /*
801 * Free a dataplane results context.
802 */
803 static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
804 {
805 if (pctx == NULL)
806 return;
807
808 DPLANE_CTX_VALID(*pctx);
809
810 /* TODO -- just freeing memory, but would like to maintain
811 * a pool
812 */
813
814 /* Some internal allocations may need to be freed, depending on
815 * the type of info captured in the ctx.
816 */
817 dplane_ctx_free_internal(*pctx);
818
819 XFREE(MTYPE_DP_CTX, *pctx);
820 }
821
822 /*
823 * Reset an allocated context object for re-use. All internal allocations are
824 * freed and the context is memset.
825 */
826 void dplane_ctx_reset(struct zebra_dplane_ctx *ctx)
827 {
828 dplane_ctx_free_internal(ctx);
829 memset(ctx, 0, sizeof(*ctx));
830 }
831
832 /*
833 * Return a context block to the dplane module after processing
834 */
835 void dplane_ctx_fini(struct zebra_dplane_ctx **pctx)
836 {
837 /* TODO -- maintain pool; for now, just free */
838 dplane_ctx_free(pctx);
839 }
840
841 /* Enqueue a context block */
842 void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
843 const struct zebra_dplane_ctx *ctx)
844 {
845 TAILQ_INSERT_TAIL(q, (struct zebra_dplane_ctx *)ctx, zd_q_entries);
846 }
847
848 /* Append a list of context blocks to another list */
849 void dplane_ctx_list_append(struct dplane_ctx_q *to_list,
850 struct dplane_ctx_q *from_list)
851 {
852 if (TAILQ_FIRST(from_list)) {
853 TAILQ_CONCAT(to_list, from_list, zd_q_entries);
854
855 /* And clear 'from' list */
856 TAILQ_INIT(from_list);
857 }
858 }
859
860 struct zebra_dplane_ctx *dplane_ctx_get_head(struct dplane_ctx_q *q)
861 {
862 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
863
864 return ctx;
865 }
866
867 /* Dequeue a context block from the head of a list */
868 struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_q *q)
869 {
870 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
871
872 if (ctx)
873 TAILQ_REMOVE(q, ctx, zd_q_entries);
874
875 return ctx;
876 }
877
878 /*
879 * Accessors for information from the context object
880 */
881 enum zebra_dplane_result dplane_ctx_get_status(
882 const struct zebra_dplane_ctx *ctx)
883 {
884 DPLANE_CTX_VALID(ctx);
885
886 return ctx->zd_status;
887 }
888
889 void dplane_ctx_set_status(struct zebra_dplane_ctx *ctx,
890 enum zebra_dplane_result status)
891 {
892 DPLANE_CTX_VALID(ctx);
893
894 ctx->zd_status = status;
895 }
896
897 /* Retrieve last/current provider id */
898 uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx *ctx)
899 {
900 DPLANE_CTX_VALID(ctx);
901 return ctx->zd_provider;
902 }
903
904 /* Providers run before the kernel can control whether a kernel
905 * update should be done.
906 */
907 void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx *ctx)
908 {
909 DPLANE_CTX_VALID(ctx);
910
911 SET_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
912 }
913
914 bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx *ctx)
915 {
916 DPLANE_CTX_VALID(ctx);
917
918 return CHECK_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
919 }
920
921 void dplane_ctx_set_op(struct zebra_dplane_ctx *ctx, enum dplane_op_e op)
922 {
923 DPLANE_CTX_VALID(ctx);
924 ctx->zd_op = op;
925 }
926
927 enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx)
928 {
929 DPLANE_CTX_VALID(ctx);
930
931 return ctx->zd_op;
932 }
933
934 const char *dplane_op2str(enum dplane_op_e op)
935 {
936 const char *ret = "UNKNOWN";
937
938 switch (op) {
939 case DPLANE_OP_NONE:
940 ret = "NONE";
941 break;
942
943 /* Route update */
944 case DPLANE_OP_ROUTE_INSTALL:
945 ret = "ROUTE_INSTALL";
946 break;
947 case DPLANE_OP_ROUTE_UPDATE:
948 ret = "ROUTE_UPDATE";
949 break;
950 case DPLANE_OP_ROUTE_DELETE:
951 ret = "ROUTE_DELETE";
952 break;
953 case DPLANE_OP_ROUTE_NOTIFY:
954 ret = "ROUTE_NOTIFY";
955 break;
956
957 /* Nexthop update */
958 case DPLANE_OP_NH_INSTALL:
959 ret = "NH_INSTALL";
960 break;
961 case DPLANE_OP_NH_UPDATE:
962 ret = "NH_UPDATE";
963 break;
964 case DPLANE_OP_NH_DELETE:
965 ret = "NH_DELETE";
966 break;
967
968 case DPLANE_OP_LSP_INSTALL:
969 ret = "LSP_INSTALL";
970 break;
971 case DPLANE_OP_LSP_UPDATE:
972 ret = "LSP_UPDATE";
973 break;
974 case DPLANE_OP_LSP_DELETE:
975 ret = "LSP_DELETE";
976 break;
977 case DPLANE_OP_LSP_NOTIFY:
978 ret = "LSP_NOTIFY";
979 break;
980
981 case DPLANE_OP_PW_INSTALL:
982 ret = "PW_INSTALL";
983 break;
984 case DPLANE_OP_PW_UNINSTALL:
985 ret = "PW_UNINSTALL";
986 break;
987
988 case DPLANE_OP_SYS_ROUTE_ADD:
989 ret = "SYS_ROUTE_ADD";
990 break;
991 case DPLANE_OP_SYS_ROUTE_DELETE:
992 ret = "SYS_ROUTE_DEL";
993 break;
994
995 case DPLANE_OP_BR_PORT_UPDATE:
996 ret = "BR_PORT_UPDATE";
997 break;
998
999 case DPLANE_OP_ADDR_INSTALL:
1000 ret = "ADDR_INSTALL";
1001 break;
1002 case DPLANE_OP_ADDR_UNINSTALL:
1003 ret = "ADDR_UNINSTALL";
1004 break;
1005
1006 case DPLANE_OP_MAC_INSTALL:
1007 ret = "MAC_INSTALL";
1008 break;
1009 case DPLANE_OP_MAC_DELETE:
1010 ret = "MAC_DELETE";
1011 break;
1012
1013 case DPLANE_OP_NEIGH_INSTALL:
1014 ret = "NEIGH_INSTALL";
1015 break;
1016 case DPLANE_OP_NEIGH_UPDATE:
1017 ret = "NEIGH_UPDATE";
1018 break;
1019 case DPLANE_OP_NEIGH_DELETE:
1020 ret = "NEIGH_DELETE";
1021 break;
1022 case DPLANE_OP_VTEP_ADD:
1023 ret = "VTEP_ADD";
1024 break;
1025 case DPLANE_OP_VTEP_DELETE:
1026 ret = "VTEP_DELETE";
1027 break;
1028
1029 case DPLANE_OP_RULE_ADD:
1030 ret = "RULE_ADD";
1031 break;
1032 case DPLANE_OP_RULE_DELETE:
1033 ret = "RULE_DELETE";
1034 break;
1035 case DPLANE_OP_RULE_UPDATE:
1036 ret = "RULE_UPDATE";
1037 break;
1038
1039 case DPLANE_OP_NEIGH_DISCOVER:
1040 ret = "NEIGH_DISCOVER";
1041 break;
1042
1043 case DPLANE_OP_IPTABLE_ADD:
1044 ret = "IPTABLE_ADD";
1045 break;
1046 case DPLANE_OP_IPTABLE_DELETE:
1047 ret = "IPTABLE_DELETE";
1048 break;
1049 case DPLANE_OP_IPSET_ADD:
1050 ret = "IPSET_ADD";
1051 break;
1052 case DPLANE_OP_IPSET_DELETE:
1053 ret = "IPSET_DELETE";
1054 break;
1055 case DPLANE_OP_IPSET_ENTRY_ADD:
1056 ret = "IPSET_ENTRY_ADD";
1057 break;
1058 case DPLANE_OP_IPSET_ENTRY_DELETE:
1059 ret = "IPSET_ENTRY_DELETE";
1060 break;
1061 case DPLANE_OP_NEIGH_IP_INSTALL:
1062 ret = "NEIGH_IP_INSTALL";
1063 break;
1064 case DPLANE_OP_NEIGH_IP_DELETE:
1065 ret = "NEIGH_IP_DELETE";
1066 break;
1067 case DPLANE_OP_NEIGH_TABLE_UPDATE:
1068 ret = "NEIGH_TABLE_UPDATE";
1069 break;
1070
1071 case DPLANE_OP_GRE_SET:
1072 ret = "GRE_SET";
1073 break;
1074
1075 case DPLANE_OP_INTF_ADDR_ADD:
1076 return "INTF_ADDR_ADD";
1077
1078 case DPLANE_OP_INTF_ADDR_DEL:
1079 return "INTF_ADDR_DEL";
1080
1081 case DPLANE_OP_INTF_NETCONFIG:
1082 return "INTF_NETCONFIG";
1083
1084 case DPLANE_OP_INTF_INSTALL:
1085 ret = "INTF_INSTALL";
1086 break;
1087 case DPLANE_OP_INTF_UPDATE:
1088 ret = "INTF_UPDATE";
1089 break;
1090 case DPLANE_OP_INTF_DELETE:
1091 ret = "INTF_DELETE";
1092 break;
1093 }
1094
1095 return ret;
1096 }
1097
1098 const char *dplane_res2str(enum zebra_dplane_result res)
1099 {
1100 const char *ret = "<Unknown>";
1101
1102 switch (res) {
1103 case ZEBRA_DPLANE_REQUEST_FAILURE:
1104 ret = "FAILURE";
1105 break;
1106 case ZEBRA_DPLANE_REQUEST_QUEUED:
1107 ret = "QUEUED";
1108 break;
1109 case ZEBRA_DPLANE_REQUEST_SUCCESS:
1110 ret = "SUCCESS";
1111 break;
1112 }
1113
1114 return ret;
1115 }
1116
1117 void dplane_ctx_set_dest(struct zebra_dplane_ctx *ctx,
1118 const struct prefix *dest)
1119 {
1120 DPLANE_CTX_VALID(ctx);
1121
1122 prefix_copy(&(ctx->u.rinfo.zd_dest), dest);
1123 }
1124
1125 const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
1126 {
1127 DPLANE_CTX_VALID(ctx);
1128
1129 return &(ctx->u.rinfo.zd_dest);
1130 }
1131
1132 void dplane_ctx_set_src(struct zebra_dplane_ctx *ctx, const struct prefix *src)
1133 {
1134 DPLANE_CTX_VALID(ctx);
1135
1136 if (src)
1137 prefix_copy(&(ctx->u.rinfo.zd_src), src);
1138 else
1139 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(struct prefix));
1140 }
1141
1142 /* Source prefix is a little special - return NULL for "no src prefix" */
1143 const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx)
1144 {
1145 DPLANE_CTX_VALID(ctx);
1146
1147 if (ctx->u.rinfo.zd_src.prefixlen == 0 &&
1148 IN6_IS_ADDR_UNSPECIFIED(&(ctx->u.rinfo.zd_src.u.prefix6))) {
1149 return NULL;
1150 } else {
1151 return &(ctx->u.rinfo.zd_src);
1152 }
1153 }
1154
1155 bool dplane_ctx_is_update(const struct zebra_dplane_ctx *ctx)
1156 {
1157 DPLANE_CTX_VALID(ctx);
1158
1159 return ctx->zd_is_update;
1160 }
1161
1162 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx *ctx)
1163 {
1164 DPLANE_CTX_VALID(ctx);
1165
1166 return ctx->zd_seq;
1167 }
1168
1169 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx)
1170 {
1171 DPLANE_CTX_VALID(ctx);
1172
1173 return ctx->zd_old_seq;
1174 }
1175
1176 void dplane_ctx_set_vrf(struct zebra_dplane_ctx *ctx, vrf_id_t vrf)
1177 {
1178 DPLANE_CTX_VALID(ctx);
1179
1180 ctx->zd_vrf_id = vrf;
1181 }
1182
1183 vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx)
1184 {
1185 DPLANE_CTX_VALID(ctx);
1186
1187 return ctx->zd_vrf_id;
1188 }
1189
1190 /* In some paths we have only a namespace id */
1191 void dplane_ctx_set_ns_id(struct zebra_dplane_ctx *ctx, ns_id_t nsid)
1192 {
1193 DPLANE_CTX_VALID(ctx);
1194
1195 ctx->zd_ns_info.ns_id = nsid;
1196 }
1197
1198 ns_id_t dplane_ctx_get_ns_id(const struct zebra_dplane_ctx *ctx)
1199 {
1200 DPLANE_CTX_VALID(ctx);
1201
1202 return ctx->zd_ns_info.ns_id;
1203 }
1204
1205 bool dplane_ctx_is_from_notif(const struct zebra_dplane_ctx *ctx)
1206 {
1207 DPLANE_CTX_VALID(ctx);
1208
1209 return (ctx->zd_notif_provider != 0);
1210 }
1211
1212 uint32_t dplane_ctx_get_notif_provider(const struct zebra_dplane_ctx *ctx)
1213 {
1214 DPLANE_CTX_VALID(ctx);
1215
1216 return ctx->zd_notif_provider;
1217 }
1218
1219 void dplane_ctx_set_notif_provider(struct zebra_dplane_ctx *ctx,
1220 uint32_t id)
1221 {
1222 DPLANE_CTX_VALID(ctx);
1223
1224 ctx->zd_notif_provider = id;
1225 }
1226
1227 const char *dplane_ctx_get_ifname(const struct zebra_dplane_ctx *ctx)
1228 {
1229 DPLANE_CTX_VALID(ctx);
1230
1231 return ctx->zd_ifname;
1232 }
1233
1234 void dplane_ctx_set_ifname(struct zebra_dplane_ctx *ctx, const char *ifname)
1235 {
1236 DPLANE_CTX_VALID(ctx);
1237
1238 if (!ifname)
1239 return;
1240
1241 strlcpy(ctx->zd_ifname, ifname, sizeof(ctx->zd_ifname));
1242 }
1243
1244 ifindex_t dplane_ctx_get_ifindex(const struct zebra_dplane_ctx *ctx)
1245 {
1246 DPLANE_CTX_VALID(ctx);
1247
1248 return ctx->zd_ifindex;
1249 }
1250
1251 void dplane_ctx_set_ifindex(struct zebra_dplane_ctx *ctx, ifindex_t ifindex)
1252 {
1253 DPLANE_CTX_VALID(ctx);
1254
1255 ctx->zd_ifindex = ifindex;
1256 }
1257
1258 void dplane_ctx_set_type(struct zebra_dplane_ctx *ctx, int type)
1259 {
1260 DPLANE_CTX_VALID(ctx);
1261
1262 ctx->u.rinfo.zd_type = type;
1263 }
1264
1265 int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx)
1266 {
1267 DPLANE_CTX_VALID(ctx);
1268
1269 return ctx->u.rinfo.zd_type;
1270 }
1271
1272 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx)
1273 {
1274 DPLANE_CTX_VALID(ctx);
1275
1276 return ctx->u.rinfo.zd_old_type;
1277 }
1278
1279 void dplane_ctx_set_afi(struct zebra_dplane_ctx *ctx, afi_t afi)
1280 {
1281 DPLANE_CTX_VALID(ctx);
1282
1283 ctx->u.rinfo.zd_afi = afi;
1284 }
1285
1286 afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
1287 {
1288 DPLANE_CTX_VALID(ctx);
1289
1290 return ctx->u.rinfo.zd_afi;
1291 }
1292
1293 void dplane_ctx_set_safi(struct zebra_dplane_ctx *ctx, safi_t safi)
1294 {
1295 DPLANE_CTX_VALID(ctx);
1296
1297 ctx->u.rinfo.zd_safi = safi;
1298 }
1299
1300 safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
1301 {
1302 DPLANE_CTX_VALID(ctx);
1303
1304 return ctx->u.rinfo.zd_safi;
1305 }
1306
1307 void dplane_ctx_set_table(struct zebra_dplane_ctx *ctx, uint32_t table)
1308 {
1309 DPLANE_CTX_VALID(ctx);
1310
1311 ctx->zd_table_id = table;
1312 }
1313
1314 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx)
1315 {
1316 DPLANE_CTX_VALID(ctx);
1317
1318 return ctx->zd_table_id;
1319 }
1320
1321 route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx)
1322 {
1323 DPLANE_CTX_VALID(ctx);
1324
1325 return ctx->u.rinfo.zd_tag;
1326 }
1327
1328 void dplane_ctx_set_tag(struct zebra_dplane_ctx *ctx, route_tag_t tag)
1329 {
1330 DPLANE_CTX_VALID(ctx);
1331
1332 ctx->u.rinfo.zd_tag = tag;
1333 }
1334
1335 route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx)
1336 {
1337 DPLANE_CTX_VALID(ctx);
1338
1339 return ctx->u.rinfo.zd_old_tag;
1340 }
1341
1342 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx)
1343 {
1344 DPLANE_CTX_VALID(ctx);
1345
1346 return ctx->u.rinfo.zd_instance;
1347 }
1348
1349 void dplane_ctx_set_instance(struct zebra_dplane_ctx *ctx, uint16_t instance)
1350 {
1351 DPLANE_CTX_VALID(ctx);
1352
1353 ctx->u.rinfo.zd_instance = instance;
1354 }
1355
1356 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
1357 {
1358 DPLANE_CTX_VALID(ctx);
1359
1360 return ctx->u.rinfo.zd_old_instance;
1361 }
1362
1363 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx)
1364 {
1365 DPLANE_CTX_VALID(ctx);
1366
1367 return ctx->u.rinfo.zd_metric;
1368 }
1369
1370 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx)
1371 {
1372 DPLANE_CTX_VALID(ctx);
1373
1374 return ctx->u.rinfo.zd_old_metric;
1375 }
1376
1377 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx)
1378 {
1379 DPLANE_CTX_VALID(ctx);
1380
1381 return ctx->u.rinfo.zd_mtu;
1382 }
1383
1384 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx)
1385 {
1386 DPLANE_CTX_VALID(ctx);
1387
1388 return ctx->u.rinfo.zd_nexthop_mtu;
1389 }
1390
1391 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx)
1392 {
1393 DPLANE_CTX_VALID(ctx);
1394
1395 return ctx->u.rinfo.zd_distance;
1396 }
1397
1398 void dplane_ctx_set_distance(struct zebra_dplane_ctx *ctx, uint8_t distance)
1399 {
1400 DPLANE_CTX_VALID(ctx);
1401
1402 ctx->u.rinfo.zd_distance = distance;
1403 }
1404
1405 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
1406 {
1407 DPLANE_CTX_VALID(ctx);
1408
1409 return ctx->u.rinfo.zd_old_distance;
1410 }
1411
1412 /*
1413 * Set the nexthops associated with a context: note that processing code
1414 * may well expect that nexthops are in canonical (sorted) order, so we
1415 * will enforce that here.
1416 */
1417 void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh)
1418 {
1419 DPLANE_CTX_VALID(ctx);
1420
1421 if (ctx->u.rinfo.zd_ng.nexthop) {
1422 nexthops_free(ctx->u.rinfo.zd_ng.nexthop);
1423 ctx->u.rinfo.zd_ng.nexthop = NULL;
1424 }
1425 nexthop_group_copy_nh_sorted(&(ctx->u.rinfo.zd_ng), nh);
1426 }
1427
1428 /*
1429 * Set the list of backup nexthops; their ordering is preserved (they're not
1430 * re-sorted.)
1431 */
1432 void dplane_ctx_set_backup_nhg(struct zebra_dplane_ctx *ctx,
1433 const struct nexthop_group *nhg)
1434 {
1435 struct nexthop *nh, *last_nh, *nexthop;
1436
1437 DPLANE_CTX_VALID(ctx);
1438
1439 if (ctx->u.rinfo.backup_ng.nexthop) {
1440 nexthops_free(ctx->u.rinfo.backup_ng.nexthop);
1441 ctx->u.rinfo.backup_ng.nexthop = NULL;
1442 }
1443
1444 last_nh = NULL;
1445
1446 /* Be careful to preserve the order of the backup list */
1447 for (nh = nhg->nexthop; nh; nh = nh->next) {
1448 nexthop = nexthop_dup(nh, NULL);
1449
1450 if (last_nh)
1451 NEXTHOP_APPEND(last_nh, nexthop);
1452 else
1453 ctx->u.rinfo.backup_ng.nexthop = nexthop;
1454
1455 last_nh = nexthop;
1456 }
1457 }
1458
1459 uint32_t dplane_ctx_get_nhg_id(const struct zebra_dplane_ctx *ctx)
1460 {
1461 DPLANE_CTX_VALID(ctx);
1462 return ctx->u.rinfo.zd_nhg_id;
1463 }
1464
1465 const struct nexthop_group *dplane_ctx_get_ng(
1466 const struct zebra_dplane_ctx *ctx)
1467 {
1468 DPLANE_CTX_VALID(ctx);
1469
1470 return &(ctx->u.rinfo.zd_ng);
1471 }
1472
1473 const struct nexthop_group *
1474 dplane_ctx_get_backup_ng(const struct zebra_dplane_ctx *ctx)
1475 {
1476 DPLANE_CTX_VALID(ctx);
1477
1478 return &(ctx->u.rinfo.backup_ng);
1479 }
1480
1481 const struct nexthop_group *
1482 dplane_ctx_get_old_ng(const struct zebra_dplane_ctx *ctx)
1483 {
1484 DPLANE_CTX_VALID(ctx);
1485
1486 return &(ctx->u.rinfo.zd_old_ng);
1487 }
1488
1489 const struct nexthop_group *
1490 dplane_ctx_get_old_backup_ng(const struct zebra_dplane_ctx *ctx)
1491 {
1492 DPLANE_CTX_VALID(ctx);
1493
1494 return &(ctx->u.rinfo.old_backup_ng);
1495 }
1496
1497 const struct zebra_dplane_info *dplane_ctx_get_ns(
1498 const struct zebra_dplane_ctx *ctx)
1499 {
1500 DPLANE_CTX_VALID(ctx);
1501
1502 return &(ctx->zd_ns_info);
1503 }
1504
1505 int dplane_ctx_get_ns_sock(const struct zebra_dplane_ctx *ctx)
1506 {
1507 DPLANE_CTX_VALID(ctx);
1508
1509 #ifdef HAVE_NETLINK
1510 return ctx->zd_ns_info.sock;
1511 #else
1512 return -1;
1513 #endif
1514 }
1515
1516 /* Accessors for nexthop information */
1517 uint32_t dplane_ctx_get_nhe_id(const struct zebra_dplane_ctx *ctx)
1518 {
1519 DPLANE_CTX_VALID(ctx);
1520 return ctx->u.rinfo.nhe.id;
1521 }
1522
1523 uint32_t dplane_ctx_get_old_nhe_id(const struct zebra_dplane_ctx *ctx)
1524 {
1525 DPLANE_CTX_VALID(ctx);
1526 return ctx->u.rinfo.nhe.old_id;
1527 }
1528
1529 afi_t dplane_ctx_get_nhe_afi(const struct zebra_dplane_ctx *ctx)
1530 {
1531 DPLANE_CTX_VALID(ctx);
1532 return ctx->u.rinfo.nhe.afi;
1533 }
1534
1535 vrf_id_t dplane_ctx_get_nhe_vrf_id(const struct zebra_dplane_ctx *ctx)
1536 {
1537 DPLANE_CTX_VALID(ctx);
1538 return ctx->u.rinfo.nhe.vrf_id;
1539 }
1540
1541 int dplane_ctx_get_nhe_type(const struct zebra_dplane_ctx *ctx)
1542 {
1543 DPLANE_CTX_VALID(ctx);
1544 return ctx->u.rinfo.nhe.type;
1545 }
1546
1547 const struct nexthop_group *
1548 dplane_ctx_get_nhe_ng(const struct zebra_dplane_ctx *ctx)
1549 {
1550 DPLANE_CTX_VALID(ctx);
1551 return &(ctx->u.rinfo.nhe.ng);
1552 }
1553
1554 const struct nh_grp *
1555 dplane_ctx_get_nhe_nh_grp(const struct zebra_dplane_ctx *ctx)
1556 {
1557 DPLANE_CTX_VALID(ctx);
1558 return ctx->u.rinfo.nhe.nh_grp;
1559 }
1560
1561 uint8_t dplane_ctx_get_nhe_nh_grp_count(const struct zebra_dplane_ctx *ctx)
1562 {
1563 DPLANE_CTX_VALID(ctx);
1564 return ctx->u.rinfo.nhe.nh_grp_count;
1565 }
1566
1567 /* Accessors for LSP information */
1568
1569 mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx)
1570 {
1571 DPLANE_CTX_VALID(ctx);
1572
1573 return ctx->u.lsp.ile.in_label;
1574 }
1575
1576 void dplane_ctx_set_in_label(struct zebra_dplane_ctx *ctx, mpls_label_t label)
1577 {
1578 DPLANE_CTX_VALID(ctx);
1579
1580 ctx->u.lsp.ile.in_label = label;
1581 }
1582
1583 uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx *ctx)
1584 {
1585 DPLANE_CTX_VALID(ctx);
1586
1587 return ctx->u.lsp.addr_family;
1588 }
1589
1590 void dplane_ctx_set_addr_family(struct zebra_dplane_ctx *ctx,
1591 uint8_t family)
1592 {
1593 DPLANE_CTX_VALID(ctx);
1594
1595 ctx->u.lsp.addr_family = family;
1596 }
1597
1598 uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx)
1599 {
1600 DPLANE_CTX_VALID(ctx);
1601
1602 return ctx->u.lsp.flags;
1603 }
1604
1605 void dplane_ctx_set_lsp_flags(struct zebra_dplane_ctx *ctx,
1606 uint32_t flags)
1607 {
1608 DPLANE_CTX_VALID(ctx);
1609
1610 ctx->u.lsp.flags = flags;
1611 }
1612
1613 const struct nhlfe_list_head *dplane_ctx_get_nhlfe_list(
1614 const struct zebra_dplane_ctx *ctx)
1615 {
1616 DPLANE_CTX_VALID(ctx);
1617 return &(ctx->u.lsp.nhlfe_list);
1618 }
1619
1620 const struct nhlfe_list_head *dplane_ctx_get_backup_nhlfe_list(
1621 const struct zebra_dplane_ctx *ctx)
1622 {
1623 DPLANE_CTX_VALID(ctx);
1624 return &(ctx->u.lsp.backup_nhlfe_list);
1625 }
1626
1627 struct zebra_nhlfe *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
1628 enum lsp_types_t lsp_type,
1629 enum nexthop_types_t nh_type,
1630 const union g_addr *gate,
1631 ifindex_t ifindex, uint8_t num_labels,
1632 mpls_label_t *out_labels)
1633 {
1634 struct zebra_nhlfe *nhlfe;
1635
1636 DPLANE_CTX_VALID(ctx);
1637
1638 nhlfe = zebra_mpls_lsp_add_nhlfe(&(ctx->u.lsp),
1639 lsp_type, nh_type, gate,
1640 ifindex, num_labels, out_labels);
1641
1642 return nhlfe;
1643 }
1644
1645 struct zebra_nhlfe *dplane_ctx_add_backup_nhlfe(
1646 struct zebra_dplane_ctx *ctx, enum lsp_types_t lsp_type,
1647 enum nexthop_types_t nh_type, const union g_addr *gate,
1648 ifindex_t ifindex, uint8_t num_labels, mpls_label_t *out_labels)
1649 {
1650 struct zebra_nhlfe *nhlfe;
1651
1652 DPLANE_CTX_VALID(ctx);
1653
1654 nhlfe = zebra_mpls_lsp_add_backup_nhlfe(&(ctx->u.lsp),
1655 lsp_type, nh_type, gate,
1656 ifindex, num_labels,
1657 out_labels);
1658
1659 return nhlfe;
1660 }
1661
1662 const struct zebra_nhlfe *
1663 dplane_ctx_get_best_nhlfe(const struct zebra_dplane_ctx *ctx)
1664 {
1665 DPLANE_CTX_VALID(ctx);
1666
1667 return ctx->u.lsp.best_nhlfe;
1668 }
1669
1670 const struct zebra_nhlfe *
1671 dplane_ctx_set_best_nhlfe(struct zebra_dplane_ctx *ctx,
1672 struct zebra_nhlfe *nhlfe)
1673 {
1674 DPLANE_CTX_VALID(ctx);
1675
1676 ctx->u.lsp.best_nhlfe = nhlfe;
1677 return ctx->u.lsp.best_nhlfe;
1678 }
1679
1680 uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx *ctx)
1681 {
1682 DPLANE_CTX_VALID(ctx);
1683
1684 return ctx->u.lsp.num_ecmp;
1685 }
1686
1687 mpls_label_t dplane_ctx_get_pw_local_label(const struct zebra_dplane_ctx *ctx)
1688 {
1689 DPLANE_CTX_VALID(ctx);
1690
1691 return ctx->u.pw.local_label;
1692 }
1693
1694 mpls_label_t dplane_ctx_get_pw_remote_label(const struct zebra_dplane_ctx *ctx)
1695 {
1696 DPLANE_CTX_VALID(ctx);
1697
1698 return ctx->u.pw.remote_label;
1699 }
1700
1701 int dplane_ctx_get_pw_type(const struct zebra_dplane_ctx *ctx)
1702 {
1703 DPLANE_CTX_VALID(ctx);
1704
1705 return ctx->u.pw.type;
1706 }
1707
1708 int dplane_ctx_get_pw_af(const struct zebra_dplane_ctx *ctx)
1709 {
1710 DPLANE_CTX_VALID(ctx);
1711
1712 return ctx->u.pw.af;
1713 }
1714
1715 uint32_t dplane_ctx_get_pw_flags(const struct zebra_dplane_ctx *ctx)
1716 {
1717 DPLANE_CTX_VALID(ctx);
1718
1719 return ctx->u.pw.flags;
1720 }
1721
1722 int dplane_ctx_get_pw_status(const struct zebra_dplane_ctx *ctx)
1723 {
1724 DPLANE_CTX_VALID(ctx);
1725
1726 return ctx->u.pw.status;
1727 }
1728
1729 void dplane_ctx_set_pw_status(struct zebra_dplane_ctx *ctx, int status)
1730 {
1731 DPLANE_CTX_VALID(ctx);
1732
1733 ctx->u.pw.status = status;
1734 }
1735
1736 const union g_addr *dplane_ctx_get_pw_dest(
1737 const struct zebra_dplane_ctx *ctx)
1738 {
1739 DPLANE_CTX_VALID(ctx);
1740
1741 return &(ctx->u.pw.dest);
1742 }
1743
1744 const union pw_protocol_fields *dplane_ctx_get_pw_proto(
1745 const struct zebra_dplane_ctx *ctx)
1746 {
1747 DPLANE_CTX_VALID(ctx);
1748
1749 return &(ctx->u.pw.fields);
1750 }
1751
1752 const struct nexthop_group *
1753 dplane_ctx_get_pw_nhg(const struct zebra_dplane_ctx *ctx)
1754 {
1755 DPLANE_CTX_VALID(ctx);
1756
1757 return &(ctx->u.pw.fib_nhg);
1758 }
1759
1760 const struct nexthop_group *
1761 dplane_ctx_get_pw_primary_nhg(const struct zebra_dplane_ctx *ctx)
1762 {
1763 DPLANE_CTX_VALID(ctx);
1764
1765 return &(ctx->u.pw.primary_nhg);
1766 }
1767
1768 const struct nexthop_group *
1769 dplane_ctx_get_pw_backup_nhg(const struct zebra_dplane_ctx *ctx)
1770 {
1771 DPLANE_CTX_VALID(ctx);
1772
1773 return &(ctx->u.pw.backup_nhg);
1774 }
1775
1776 /* Accessors for interface information */
1777 uint32_t dplane_ctx_get_intf_metric(const struct zebra_dplane_ctx *ctx)
1778 {
1779 DPLANE_CTX_VALID(ctx);
1780
1781 return ctx->u.intf.metric;
1782 }
1783
1784 void dplane_ctx_set_intf_metric(struct zebra_dplane_ctx *ctx, uint32_t metric)
1785 {
1786 DPLANE_CTX_VALID(ctx);
1787
1788 ctx->u.intf.metric = metric;
1789 }
1790
1791 uint32_t dplane_ctx_get_intf_pd_reason_val(const struct zebra_dplane_ctx *ctx)
1792 {
1793 DPLANE_CTX_VALID(ctx);
1794
1795 return ctx->u.intf.pd_reason_val;
1796 }
1797
1798 void dplane_ctx_set_intf_pd_reason_val(struct zebra_dplane_ctx *ctx, bool val)
1799 {
1800 DPLANE_CTX_VALID(ctx);
1801
1802 ctx->u.intf.pd_reason_val = val;
1803 }
1804
1805 bool dplane_ctx_intf_is_protodown(const struct zebra_dplane_ctx *ctx)
1806 {
1807 DPLANE_CTX_VALID(ctx);
1808
1809 return ctx->u.intf.protodown;
1810 }
1811
1812 /* Is interface addr p2p? */
1813 bool dplane_ctx_intf_is_connected(const struct zebra_dplane_ctx *ctx)
1814 {
1815 DPLANE_CTX_VALID(ctx);
1816
1817 return (ctx->u.intf.flags & DPLANE_INTF_CONNECTED);
1818 }
1819
1820 bool dplane_ctx_intf_is_secondary(const struct zebra_dplane_ctx *ctx)
1821 {
1822 DPLANE_CTX_VALID(ctx);
1823
1824 return (ctx->u.intf.flags & DPLANE_INTF_SECONDARY);
1825 }
1826
1827 bool dplane_ctx_intf_is_broadcast(const struct zebra_dplane_ctx *ctx)
1828 {
1829 DPLANE_CTX_VALID(ctx);
1830
1831 return (ctx->u.intf.flags & DPLANE_INTF_BROADCAST);
1832 }
1833
1834 void dplane_ctx_intf_set_connected(struct zebra_dplane_ctx *ctx)
1835 {
1836 DPLANE_CTX_VALID(ctx);
1837
1838 ctx->u.intf.flags |= DPLANE_INTF_CONNECTED;
1839 }
1840
1841 void dplane_ctx_intf_set_secondary(struct zebra_dplane_ctx *ctx)
1842 {
1843 DPLANE_CTX_VALID(ctx);
1844
1845 ctx->u.intf.flags |= DPLANE_INTF_SECONDARY;
1846 }
1847
1848 void dplane_ctx_intf_set_broadcast(struct zebra_dplane_ctx *ctx)
1849 {
1850 DPLANE_CTX_VALID(ctx);
1851
1852 ctx->u.intf.flags |= DPLANE_INTF_BROADCAST;
1853 }
1854
1855 const struct prefix *dplane_ctx_get_intf_addr(
1856 const struct zebra_dplane_ctx *ctx)
1857 {
1858 DPLANE_CTX_VALID(ctx);
1859
1860 return &(ctx->u.intf.prefix);
1861 }
1862
1863 void dplane_ctx_set_intf_addr(struct zebra_dplane_ctx *ctx,
1864 const struct prefix *p)
1865 {
1866 DPLANE_CTX_VALID(ctx);
1867
1868 prefix_copy(&(ctx->u.intf.prefix), p);
1869 }
1870
1871 bool dplane_ctx_intf_has_dest(const struct zebra_dplane_ctx *ctx)
1872 {
1873 DPLANE_CTX_VALID(ctx);
1874
1875 return (ctx->u.intf.flags & DPLANE_INTF_HAS_DEST);
1876 }
1877
1878 const struct prefix *dplane_ctx_get_intf_dest(
1879 const struct zebra_dplane_ctx *ctx)
1880 {
1881 DPLANE_CTX_VALID(ctx);
1882
1883 return &(ctx->u.intf.dest_prefix);
1884 }
1885
1886 void dplane_ctx_set_intf_dest(struct zebra_dplane_ctx *ctx,
1887 const struct prefix *p)
1888 {
1889 DPLANE_CTX_VALID(ctx);
1890
1891 prefix_copy(&(ctx->u.intf.dest_prefix), p);
1892 }
1893
1894 bool dplane_ctx_intf_has_label(const struct zebra_dplane_ctx *ctx)
1895 {
1896 DPLANE_CTX_VALID(ctx);
1897
1898 return (ctx->u.intf.flags & DPLANE_INTF_HAS_LABEL);
1899 }
1900
1901 const char *dplane_ctx_get_intf_label(const struct zebra_dplane_ctx *ctx)
1902 {
1903 DPLANE_CTX_VALID(ctx);
1904
1905 return ctx->u.intf.label;
1906 }
1907
1908 void dplane_ctx_set_intf_label(struct zebra_dplane_ctx *ctx, const char *label)
1909 {
1910 size_t len;
1911
1912 DPLANE_CTX_VALID(ctx);
1913
1914 if (ctx->u.intf.label && ctx->u.intf.label != ctx->u.intf.label_buf)
1915 XFREE(MTYPE_DP_CTX, ctx->u.intf.label);
1916
1917 ctx->u.intf.label = NULL;
1918
1919 if (label) {
1920 ctx->u.intf.flags |= DPLANE_INTF_HAS_LABEL;
1921
1922 /* Use embedded buffer if it's adequate; else allocate. */
1923 len = strlen(label);
1924
1925 if (len < sizeof(ctx->u.intf.label_buf)) {
1926 strlcpy(ctx->u.intf.label_buf, label,
1927 sizeof(ctx->u.intf.label_buf));
1928 ctx->u.intf.label = ctx->u.intf.label_buf;
1929 } else {
1930 ctx->u.intf.label = XSTRDUP(MTYPE_DP_CTX, label);
1931 }
1932 } else {
1933 ctx->u.intf.flags &= ~DPLANE_INTF_HAS_LABEL;
1934 }
1935 }
1936
1937 /* Accessors for MAC information */
1938 vlanid_t dplane_ctx_mac_get_vlan(const struct zebra_dplane_ctx *ctx)
1939 {
1940 DPLANE_CTX_VALID(ctx);
1941 return ctx->u.macinfo.vid;
1942 }
1943
1944 bool dplane_ctx_mac_is_sticky(const struct zebra_dplane_ctx *ctx)
1945 {
1946 DPLANE_CTX_VALID(ctx);
1947 return ctx->u.macinfo.is_sticky;
1948 }
1949
1950 uint32_t dplane_ctx_mac_get_nhg_id(const struct zebra_dplane_ctx *ctx)
1951 {
1952 DPLANE_CTX_VALID(ctx);
1953 return ctx->u.macinfo.nhg_id;
1954 }
1955
1956 uint32_t dplane_ctx_mac_get_update_flags(const struct zebra_dplane_ctx *ctx)
1957 {
1958 DPLANE_CTX_VALID(ctx);
1959 return ctx->u.macinfo.update_flags;
1960 }
1961
1962 const struct ethaddr *dplane_ctx_mac_get_addr(
1963 const struct zebra_dplane_ctx *ctx)
1964 {
1965 DPLANE_CTX_VALID(ctx);
1966 return &(ctx->u.macinfo.mac);
1967 }
1968
1969 const struct in_addr *dplane_ctx_mac_get_vtep_ip(
1970 const struct zebra_dplane_ctx *ctx)
1971 {
1972 DPLANE_CTX_VALID(ctx);
1973 return &(ctx->u.macinfo.vtep_ip);
1974 }
1975
1976 ifindex_t dplane_ctx_mac_get_br_ifindex(const struct zebra_dplane_ctx *ctx)
1977 {
1978 DPLANE_CTX_VALID(ctx);
1979 return ctx->u.macinfo.br_ifindex;
1980 }
1981
1982 /* Accessors for neighbor information */
1983 const struct ipaddr *dplane_ctx_neigh_get_ipaddr(
1984 const struct zebra_dplane_ctx *ctx)
1985 {
1986 DPLANE_CTX_VALID(ctx);
1987 return &(ctx->u.neigh.ip_addr);
1988 }
1989
1990 const struct ipaddr *
1991 dplane_ctx_neigh_get_link_ip(const struct zebra_dplane_ctx *ctx)
1992 {
1993 DPLANE_CTX_VALID(ctx);
1994 return &(ctx->u.neigh.link.ip_addr);
1995 }
1996
1997 const struct ethaddr *dplane_ctx_neigh_get_mac(
1998 const struct zebra_dplane_ctx *ctx)
1999 {
2000 DPLANE_CTX_VALID(ctx);
2001 return &(ctx->u.neigh.link.mac);
2002 }
2003
2004 uint32_t dplane_ctx_neigh_get_flags(const struct zebra_dplane_ctx *ctx)
2005 {
2006 DPLANE_CTX_VALID(ctx);
2007 return ctx->u.neigh.flags;
2008 }
2009
2010 uint16_t dplane_ctx_neigh_get_state(const struct zebra_dplane_ctx *ctx)
2011 {
2012 DPLANE_CTX_VALID(ctx);
2013 return ctx->u.neigh.state;
2014 }
2015
2016 uint32_t dplane_ctx_neigh_get_update_flags(const struct zebra_dplane_ctx *ctx)
2017 {
2018 DPLANE_CTX_VALID(ctx);
2019 return ctx->u.neigh.update_flags;
2020 }
2021
2022 /* Accessor for GRE set */
2023 uint32_t
2024 dplane_ctx_gre_get_link_ifindex(const struct zebra_dplane_ctx *ctx)
2025 {
2026 DPLANE_CTX_VALID(ctx);
2027
2028 return ctx->u.gre.link_ifindex;
2029 }
2030
2031 unsigned int
2032 dplane_ctx_gre_get_mtu(const struct zebra_dplane_ctx *ctx)
2033 {
2034 DPLANE_CTX_VALID(ctx);
2035
2036 return ctx->u.gre.mtu;
2037 }
2038
2039 const struct zebra_l2info_gre *
2040 dplane_ctx_gre_get_info(const struct zebra_dplane_ctx *ctx)
2041 {
2042 DPLANE_CTX_VALID(ctx);
2043
2044 return &ctx->u.gre.info;
2045 }
2046
2047 /* Accessors for PBR rule information */
2048 int dplane_ctx_rule_get_sock(const struct zebra_dplane_ctx *ctx)
2049 {
2050 DPLANE_CTX_VALID(ctx);
2051
2052 return ctx->u.rule.sock;
2053 }
2054
2055 const char *dplane_ctx_rule_get_ifname(const struct zebra_dplane_ctx *ctx)
2056 {
2057 DPLANE_CTX_VALID(ctx);
2058
2059 return ctx->u.rule.new.ifname;
2060 }
2061
2062 int dplane_ctx_rule_get_unique(const struct zebra_dplane_ctx *ctx)
2063 {
2064 DPLANE_CTX_VALID(ctx);
2065
2066 return ctx->u.rule.unique;
2067 }
2068
2069 int dplane_ctx_rule_get_seq(const struct zebra_dplane_ctx *ctx)
2070 {
2071 DPLANE_CTX_VALID(ctx);
2072
2073 return ctx->u.rule.seq;
2074 }
2075
2076 uint32_t dplane_ctx_rule_get_priority(const struct zebra_dplane_ctx *ctx)
2077 {
2078 DPLANE_CTX_VALID(ctx);
2079
2080 return ctx->u.rule.new.priority;
2081 }
2082
2083 uint32_t dplane_ctx_rule_get_old_priority(const struct zebra_dplane_ctx *ctx)
2084 {
2085 DPLANE_CTX_VALID(ctx);
2086
2087 return ctx->u.rule.old.priority;
2088 }
2089
2090 uint32_t dplane_ctx_rule_get_table(const struct zebra_dplane_ctx *ctx)
2091 {
2092 DPLANE_CTX_VALID(ctx);
2093
2094 return ctx->u.rule.new.table;
2095 }
2096
2097 uint32_t dplane_ctx_rule_get_old_table(const struct zebra_dplane_ctx *ctx)
2098 {
2099 DPLANE_CTX_VALID(ctx);
2100
2101 return ctx->u.rule.old.table;
2102 }
2103
2104 uint32_t dplane_ctx_rule_get_filter_bm(const struct zebra_dplane_ctx *ctx)
2105 {
2106 DPLANE_CTX_VALID(ctx);
2107
2108 return ctx->u.rule.new.filter_bm;
2109 }
2110
2111 uint32_t dplane_ctx_rule_get_old_filter_bm(const struct zebra_dplane_ctx *ctx)
2112 {
2113 DPLANE_CTX_VALID(ctx);
2114
2115 return ctx->u.rule.old.filter_bm;
2116 }
2117
2118 uint32_t dplane_ctx_rule_get_fwmark(const struct zebra_dplane_ctx *ctx)
2119 {
2120 DPLANE_CTX_VALID(ctx);
2121
2122 return ctx->u.rule.new.fwmark;
2123 }
2124
2125 uint32_t dplane_ctx_rule_get_old_fwmark(const struct zebra_dplane_ctx *ctx)
2126 {
2127 DPLANE_CTX_VALID(ctx);
2128
2129 return ctx->u.rule.old.fwmark;
2130 }
2131
2132 uint8_t dplane_ctx_rule_get_ipproto(const struct zebra_dplane_ctx *ctx)
2133 {
2134 DPLANE_CTX_VALID(ctx);
2135
2136 return ctx->u.rule.new.ip_proto;
2137 }
2138
2139 uint8_t dplane_ctx_rule_get_old_ipproto(const struct zebra_dplane_ctx *ctx)
2140 {
2141 DPLANE_CTX_VALID(ctx);
2142
2143 return ctx->u.rule.old.ip_proto;
2144 }
2145
2146 uint8_t dplane_ctx_rule_get_dsfield(const struct zebra_dplane_ctx *ctx)
2147 {
2148 DPLANE_CTX_VALID(ctx);
2149
2150 return ctx->u.rule.new.dsfield;
2151 }
2152
2153 uint8_t dplane_ctx_rule_get_old_dsfield(const struct zebra_dplane_ctx *ctx)
2154 {
2155 DPLANE_CTX_VALID(ctx);
2156
2157 return ctx->u.rule.old.dsfield;
2158 }
2159
2160 const struct prefix *
2161 dplane_ctx_rule_get_src_ip(const struct zebra_dplane_ctx *ctx)
2162 {
2163 DPLANE_CTX_VALID(ctx);
2164
2165 return &(ctx->u.rule.new.src_ip);
2166 }
2167
2168 const struct prefix *
2169 dplane_ctx_rule_get_old_src_ip(const struct zebra_dplane_ctx *ctx)
2170 {
2171 DPLANE_CTX_VALID(ctx);
2172
2173 return &(ctx->u.rule.old.src_ip);
2174 }
2175
2176 const struct prefix *
2177 dplane_ctx_rule_get_dst_ip(const struct zebra_dplane_ctx *ctx)
2178 {
2179 DPLANE_CTX_VALID(ctx);
2180
2181 return &(ctx->u.rule.new.dst_ip);
2182 }
2183
2184 const struct prefix *
2185 dplane_ctx_rule_get_old_dst_ip(const struct zebra_dplane_ctx *ctx)
2186 {
2187 DPLANE_CTX_VALID(ctx);
2188
2189 return &(ctx->u.rule.old.dst_ip);
2190 }
2191
2192 uint32_t dplane_ctx_get_br_port_flags(const struct zebra_dplane_ctx *ctx)
2193 {
2194 DPLANE_CTX_VALID(ctx);
2195
2196 return ctx->u.br_port.flags;
2197 }
2198
2199 uint32_t
2200 dplane_ctx_get_br_port_sph_filter_cnt(const struct zebra_dplane_ctx *ctx)
2201 {
2202 DPLANE_CTX_VALID(ctx);
2203
2204 return ctx->u.br_port.sph_filter_cnt;
2205 }
2206
2207 const struct in_addr *
2208 dplane_ctx_get_br_port_sph_filters(const struct zebra_dplane_ctx *ctx)
2209 {
2210 DPLANE_CTX_VALID(ctx);
2211
2212 return ctx->u.br_port.sph_filters;
2213 }
2214
2215 uint32_t
2216 dplane_ctx_get_br_port_backup_nhg_id(const struct zebra_dplane_ctx *ctx)
2217 {
2218 DPLANE_CTX_VALID(ctx);
2219
2220 return ctx->u.br_port.backup_nhg_id;
2221 }
2222
2223 /* Accessors for PBR iptable information */
2224 void dplane_ctx_get_pbr_iptable(const struct zebra_dplane_ctx *ctx,
2225 struct zebra_pbr_iptable *table)
2226 {
2227 DPLANE_CTX_VALID(ctx);
2228
2229 memcpy(table, &ctx->u.iptable, sizeof(struct zebra_pbr_iptable));
2230 }
2231
2232 void dplane_ctx_get_pbr_ipset(const struct zebra_dplane_ctx *ctx,
2233 struct zebra_pbr_ipset *ipset)
2234 {
2235 DPLANE_CTX_VALID(ctx);
2236
2237 assert(ipset);
2238
2239 if (ctx->zd_op == DPLANE_OP_IPSET_ENTRY_ADD ||
2240 ctx->zd_op == DPLANE_OP_IPSET_ENTRY_DELETE) {
2241 memset(ipset, 0, sizeof(struct zebra_pbr_ipset));
2242 ipset->type = ctx->u.ipset_entry.info.type;
2243 ipset->family = ctx->u.ipset_entry.info.family;
2244 memcpy(&ipset->ipset_name, &ctx->u.ipset_entry.info.ipset_name,
2245 ZEBRA_IPSET_NAME_SIZE);
2246 } else
2247 memcpy(ipset, &ctx->u.ipset, sizeof(struct zebra_pbr_ipset));
2248 }
2249
2250 void dplane_ctx_get_pbr_ipset_entry(const struct zebra_dplane_ctx *ctx,
2251 struct zebra_pbr_ipset_entry *entry)
2252 {
2253 DPLANE_CTX_VALID(ctx);
2254
2255 assert(entry);
2256
2257 memcpy(entry, &ctx->u.ipset_entry.entry, sizeof(struct zebra_pbr_ipset_entry));
2258 }
2259
2260 /*
2261 * End of dplane context accessors
2262 */
2263
2264 /* Optional extra info about interfaces in nexthops - a plugin must enable
2265 * this extra info.
2266 */
2267 const struct dplane_intf_extra *
2268 dplane_ctx_get_intf_extra(const struct zebra_dplane_ctx *ctx)
2269 {
2270 return TAILQ_FIRST(&ctx->u.rinfo.intf_extra_q);
2271 }
2272
2273 const struct dplane_intf_extra *
2274 dplane_ctx_intf_extra_next(const struct zebra_dplane_ctx *ctx,
2275 const struct dplane_intf_extra *ptr)
2276 {
2277 return TAILQ_NEXT(ptr, link);
2278 }
2279
2280 vrf_id_t dplane_intf_extra_get_vrfid(const struct dplane_intf_extra *ptr)
2281 {
2282 return ptr->vrf_id;
2283 }
2284
2285 uint32_t dplane_intf_extra_get_ifindex(const struct dplane_intf_extra *ptr)
2286 {
2287 return ptr->ifindex;
2288 }
2289
2290 uint32_t dplane_intf_extra_get_flags(const struct dplane_intf_extra *ptr)
2291 {
2292 return ptr->flags;
2293 }
2294
2295 uint32_t dplane_intf_extra_get_status(const struct dplane_intf_extra *ptr)
2296 {
2297 return ptr->status;
2298 }
2299
2300 /*
2301 * End of interface extra info accessors
2302 */
2303
2304 uint8_t dplane_ctx_neightable_get_family(const struct zebra_dplane_ctx *ctx)
2305 {
2306 DPLANE_CTX_VALID(ctx);
2307
2308 return ctx->u.neightable.family;
2309 }
2310
2311 uint32_t
2312 dplane_ctx_neightable_get_app_probes(const struct zebra_dplane_ctx *ctx)
2313 {
2314 DPLANE_CTX_VALID(ctx);
2315
2316 return ctx->u.neightable.app_probes;
2317 }
2318
2319 uint32_t
2320 dplane_ctx_neightable_get_ucast_probes(const struct zebra_dplane_ctx *ctx)
2321 {
2322 DPLANE_CTX_VALID(ctx);
2323
2324 return ctx->u.neightable.ucast_probes;
2325 }
2326
2327 uint32_t
2328 dplane_ctx_neightable_get_mcast_probes(const struct zebra_dplane_ctx *ctx)
2329 {
2330 DPLANE_CTX_VALID(ctx);
2331
2332 return ctx->u.neightable.mcast_probes;
2333 }
2334
2335 enum dplane_netconf_status_e
2336 dplane_ctx_get_netconf_mpls(const struct zebra_dplane_ctx *ctx)
2337 {
2338 DPLANE_CTX_VALID(ctx);
2339
2340 return ctx->u.netconf.mpls_val;
2341 }
2342
2343 enum dplane_netconf_status_e
2344 dplane_ctx_get_netconf_mcast(const struct zebra_dplane_ctx *ctx)
2345 {
2346 DPLANE_CTX_VALID(ctx);
2347
2348 return ctx->u.netconf.mcast_val;
2349 }
2350
2351 void dplane_ctx_set_netconf_mpls(struct zebra_dplane_ctx *ctx,
2352 enum dplane_netconf_status_e val)
2353 {
2354 DPLANE_CTX_VALID(ctx);
2355
2356 ctx->u.netconf.mpls_val = val;
2357 }
2358
2359 void dplane_ctx_set_netconf_mcast(struct zebra_dplane_ctx *ctx,
2360 enum dplane_netconf_status_e val)
2361 {
2362 DPLANE_CTX_VALID(ctx);
2363
2364 ctx->u.netconf.mcast_val = val;
2365 }
2366
2367 /*
2368 * Retrieve the limit on the number of pending, unprocessed updates.
2369 */
2370 uint32_t dplane_get_in_queue_limit(void)
2371 {
2372 return atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
2373 memory_order_relaxed);
2374 }
2375
2376 /*
2377 * Configure limit on the number of pending, queued updates.
2378 */
2379 void dplane_set_in_queue_limit(uint32_t limit, bool set)
2380 {
2381 /* Reset to default on 'unset' */
2382 if (!set)
2383 limit = DPLANE_DEFAULT_MAX_QUEUED;
2384
2385 atomic_store_explicit(&zdplane_info.dg_max_queued_updates, limit,
2386 memory_order_relaxed);
2387 }
2388
2389 /*
2390 * Retrieve the current queue depth of incoming, unprocessed updates
2391 */
2392 uint32_t dplane_get_in_queue_len(void)
2393 {
2394 return atomic_load_explicit(&zdplane_info.dg_routes_queued,
2395 memory_order_seq_cst);
2396 }
2397
2398 /*
2399 * Internal helper that copies information from a zebra ns object; this is
2400 * called in the zebra main pthread context as part of dplane ctx init.
2401 */
2402 static void ctx_info_from_zns(struct zebra_dplane_info *ns_info,
2403 struct zebra_ns *zns)
2404 {
2405 ns_info->ns_id = zns->ns_id;
2406
2407 #if defined(HAVE_NETLINK)
2408 ns_info->is_cmd = true;
2409 ns_info->sock = zns->netlink_dplane_out.sock;
2410 ns_info->seq = zns->netlink_dplane_out.seq;
2411 #endif /* NETLINK */
2412 }
2413
2414 /*
2415 * Common dataplane context init with zebra namespace info.
2416 */
2417 static int dplane_ctx_ns_init(struct zebra_dplane_ctx *ctx,
2418 struct zebra_ns *zns,
2419 bool is_update)
2420 {
2421 ctx_info_from_zns(&(ctx->zd_ns_info), zns); /* */
2422
2423 ctx->zd_is_update = is_update;
2424
2425 #if defined(HAVE_NETLINK)
2426 /* Increment message counter after copying to context struct - may need
2427 * two messages in some 'update' cases.
2428 */
2429 if (is_update)
2430 zns->netlink_dplane_out.seq += 2;
2431 else
2432 zns->netlink_dplane_out.seq++;
2433 #endif /* HAVE_NETLINK */
2434
2435 return AOK;
2436 }
2437
2438 /*
2439 * Initialize a context block for a route update from zebra data structs.
2440 */
2441 int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2442 struct route_node *rn, struct route_entry *re)
2443 {
2444 int ret = EINVAL;
2445 const struct route_table *table = NULL;
2446 const struct rib_table_info *info;
2447 const struct prefix *p, *src_p;
2448 struct zebra_ns *zns;
2449 struct zebra_vrf *zvrf;
2450 struct nexthop *nexthop;
2451 struct zebra_l3vni *zl3vni;
2452 const struct interface *ifp;
2453 struct dplane_intf_extra *if_extra;
2454
2455 if (!ctx || !rn || !re)
2456 goto done;
2457
2458 TAILQ_INIT(&ctx->u.rinfo.intf_extra_q);
2459
2460 ctx->zd_op = op;
2461 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2462
2463 ctx->u.rinfo.zd_type = re->type;
2464 ctx->u.rinfo.zd_old_type = re->type;
2465
2466 /* Prefixes: dest, and optional source */
2467 srcdest_rnode_prefixes(rn, &p, &src_p);
2468
2469 prefix_copy(&(ctx->u.rinfo.zd_dest), p);
2470
2471 if (src_p)
2472 prefix_copy(&(ctx->u.rinfo.zd_src), src_p);
2473 else
2474 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(ctx->u.rinfo.zd_src));
2475
2476 ctx->zd_table_id = re->table;
2477
2478 ctx->u.rinfo.zd_metric = re->metric;
2479 ctx->u.rinfo.zd_old_metric = re->metric;
2480 ctx->zd_vrf_id = re->vrf_id;
2481 ctx->u.rinfo.zd_mtu = re->mtu;
2482 ctx->u.rinfo.zd_nexthop_mtu = re->nexthop_mtu;
2483 ctx->u.rinfo.zd_instance = re->instance;
2484 ctx->u.rinfo.zd_tag = re->tag;
2485 ctx->u.rinfo.zd_old_tag = re->tag;
2486 ctx->u.rinfo.zd_distance = re->distance;
2487
2488 table = srcdest_rnode_table(rn);
2489 info = table->info;
2490
2491 ctx->u.rinfo.zd_afi = info->afi;
2492 ctx->u.rinfo.zd_safi = info->safi;
2493
2494 /* Copy nexthops; recursive info is included too */
2495 copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop),
2496 re->nhe->nhg.nexthop, NULL);
2497 ctx->u.rinfo.zd_nhg_id = re->nhe->id;
2498
2499 /* Copy backup nexthop info, if present */
2500 if (re->nhe->backup_info && re->nhe->backup_info->nhe) {
2501 copy_nexthops(&(ctx->u.rinfo.backup_ng.nexthop),
2502 re->nhe->backup_info->nhe->nhg.nexthop, NULL);
2503 }
2504
2505 /*
2506 * Ensure that the dplane nexthops' flags are clear and copy
2507 * encapsulation information.
2508 */
2509 for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop)) {
2510 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
2511
2512 /* Optionally capture extra interface info while we're in the
2513 * main zebra pthread - a plugin has to ask for this info.
2514 */
2515 if (dplane_collect_extra_intf_info) {
2516 ifp = if_lookup_by_index(nexthop->ifindex,
2517 nexthop->vrf_id);
2518
2519 if (ifp) {
2520 if_extra = XCALLOC(
2521 MTYPE_DP_INTF,
2522 sizeof(struct dplane_intf_extra));
2523 if_extra->vrf_id = nexthop->vrf_id;
2524 if_extra->ifindex = nexthop->ifindex;
2525 if_extra->flags = ifp->flags;
2526 if_extra->status = ifp->status;
2527
2528 TAILQ_INSERT_TAIL(&ctx->u.rinfo.intf_extra_q,
2529 if_extra, link);
2530 }
2531 }
2532
2533 /* Check for available evpn encapsulations. */
2534 if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_EVPN))
2535 continue;
2536
2537 zl3vni = zl3vni_from_vrf(nexthop->vrf_id);
2538 if (zl3vni && is_l3vni_oper_up(zl3vni)) {
2539 nexthop->nh_encap_type = NET_VXLAN;
2540 nexthop->nh_encap.vni = zl3vni->vni;
2541 }
2542 }
2543
2544 /* Don't need some info when capturing a system notification */
2545 if (op == DPLANE_OP_SYS_ROUTE_ADD ||
2546 op == DPLANE_OP_SYS_ROUTE_DELETE) {
2547 ret = AOK;
2548 goto done;
2549 }
2550
2551 /* Extract ns info - can't use pointers to 'core' structs */
2552 zvrf = vrf_info_lookup(re->vrf_id);
2553 zns = zvrf->zns;
2554 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_ROUTE_UPDATE));
2555
2556 #ifdef HAVE_NETLINK
2557 {
2558 struct nhg_hash_entry *nhe = zebra_nhg_resolve(re->nhe);
2559
2560 ctx->u.rinfo.nhe.id = nhe->id;
2561 ctx->u.rinfo.nhe.old_id = 0;
2562 /*
2563 * Check if the nhe is installed/queued before doing anything
2564 * with this route.
2565 *
2566 * If its a delete we only use the prefix anyway, so this only
2567 * matters for INSTALL/UPDATE.
2568 */
2569 if (zebra_nhg_kernel_nexthops_enabled()
2570 && (((op == DPLANE_OP_ROUTE_INSTALL)
2571 || (op == DPLANE_OP_ROUTE_UPDATE))
2572 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)
2573 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED))) {
2574 ret = ENOENT;
2575 goto done;
2576 }
2577
2578 re->nhe_installed_id = nhe->id;
2579 }
2580 #endif /* HAVE_NETLINK */
2581
2582 /* Trying out the sequence number idea, so we can try to detect
2583 * when a result is stale.
2584 */
2585 re->dplane_sequence = zebra_router_get_next_sequence();
2586 ctx->zd_seq = re->dplane_sequence;
2587
2588 ret = AOK;
2589
2590 done:
2591 return ret;
2592 }
2593
2594 /**
2595 * dplane_ctx_nexthop_init() - Initialize a context block for a nexthop update
2596 *
2597 * @ctx: Dataplane context to init
2598 * @op: Operation being performed
2599 * @nhe: Nexthop group hash entry
2600 *
2601 * Return: Result status
2602 */
2603 int dplane_ctx_nexthop_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2604 struct nhg_hash_entry *nhe)
2605 {
2606 struct zebra_vrf *zvrf = NULL;
2607 struct zebra_ns *zns = NULL;
2608 int ret = EINVAL;
2609
2610 if (!ctx || !nhe)
2611 goto done;
2612
2613 ctx->zd_op = op;
2614 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2615
2616 /* Copy over nhe info */
2617 ctx->u.rinfo.nhe.id = nhe->id;
2618 ctx->u.rinfo.nhe.afi = nhe->afi;
2619 ctx->u.rinfo.nhe.vrf_id = nhe->vrf_id;
2620 ctx->u.rinfo.nhe.type = nhe->type;
2621
2622 nexthop_group_copy(&(ctx->u.rinfo.nhe.ng), &(nhe->nhg));
2623
2624 /* If this is a group, convert it to a grp array of ids */
2625 if (!zebra_nhg_depends_is_empty(nhe)
2626 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_RECURSIVE))
2627 ctx->u.rinfo.nhe.nh_grp_count = zebra_nhg_nhe2grp(
2628 ctx->u.rinfo.nhe.nh_grp, nhe, MULTIPATH_NUM);
2629
2630 zvrf = vrf_info_lookup(nhe->vrf_id);
2631
2632 /*
2633 * Fallback to default namespace if the vrf got ripped out from under
2634 * us.
2635 */
2636 zns = zvrf ? zvrf->zns : zebra_ns_lookup(NS_DEFAULT);
2637
2638 /*
2639 * TODO: Might not need to mark this as an update, since
2640 * it probably won't require two messages
2641 */
2642 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_NH_UPDATE));
2643
2644 ret = AOK;
2645
2646 done:
2647 return ret;
2648 }
2649
2650 /**
2651 * dplane_ctx_intf_init() - Initialize a context block for a interface update
2652 *
2653 * @ctx: Dataplane context to init
2654 * @op: Operation being performed
2655 * @ifp: Interface
2656 *
2657 * Return: Result status
2658 */
2659 int dplane_ctx_intf_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2660 const struct interface *ifp)
2661 {
2662 struct zebra_ns *zns;
2663 struct zebra_if *zif;
2664 int ret = EINVAL;
2665 bool set_pdown, unset_pdown;
2666
2667 if (!ctx || !ifp)
2668 goto done;
2669
2670 ctx->zd_op = op;
2671 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2672 ctx->zd_vrf_id = ifp->vrf->vrf_id;
2673
2674 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
2675 ctx->zd_ifindex = ifp->ifindex;
2676
2677 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
2678 dplane_ctx_ns_init(ctx, zns, false);
2679
2680
2681 /* Copy over ifp info */
2682 ctx->u.intf.metric = ifp->metric;
2683 ctx->u.intf.flags = ifp->flags;
2684
2685 /* Copy over extra zebra info, if available */
2686 zif = (struct zebra_if *)ifp->info;
2687
2688 if (zif) {
2689 set_pdown = !!(zif->flags & ZIF_FLAG_SET_PROTODOWN);
2690 unset_pdown = !!(zif->flags & ZIF_FLAG_UNSET_PROTODOWN);
2691
2692 if (zif->protodown_rc &&
2693 ZEBRA_IF_IS_PROTODOWN_ONLY_EXTERNAL(zif) == false)
2694 ctx->u.intf.pd_reason_val = true;
2695
2696 /*
2697 * See if we have new protodown state to set, otherwise keep
2698 * current state
2699 */
2700 if (set_pdown)
2701 ctx->u.intf.protodown = true;
2702 else if (unset_pdown)
2703 ctx->u.intf.protodown = false;
2704 else
2705 ctx->u.intf.protodown = !!ZEBRA_IF_IS_PROTODOWN(zif);
2706 }
2707
2708 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_INTF_UPDATE));
2709 ctx->zd_is_update = (op == DPLANE_OP_INTF_UPDATE);
2710
2711 ret = AOK;
2712
2713 done:
2714 return ret;
2715 }
2716
2717 /*
2718 * Capture information for an LSP update in a dplane context.
2719 */
2720 int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2721 struct zebra_lsp *lsp)
2722 {
2723 int ret = AOK;
2724 struct zebra_nhlfe *nhlfe, *new_nhlfe;
2725
2726 ctx->zd_op = op;
2727 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2728
2729 /* Capture namespace info */
2730 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
2731 (op == DPLANE_OP_LSP_UPDATE));
2732
2733 memset(&ctx->u.lsp, 0, sizeof(ctx->u.lsp));
2734
2735 nhlfe_list_init(&(ctx->u.lsp.nhlfe_list));
2736 nhlfe_list_init(&(ctx->u.lsp.backup_nhlfe_list));
2737
2738 /* This may be called to create/init a dplane context, not necessarily
2739 * to copy an lsp object.
2740 */
2741 if (lsp == NULL) {
2742 ret = AOK;
2743 goto done;
2744 }
2745
2746 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2747 zlog_debug("init dplane ctx %s: in-label %u ecmp# %d",
2748 dplane_op2str(op), lsp->ile.in_label,
2749 lsp->num_ecmp);
2750
2751 ctx->u.lsp.ile = lsp->ile;
2752 ctx->u.lsp.addr_family = lsp->addr_family;
2753 ctx->u.lsp.num_ecmp = lsp->num_ecmp;
2754 ctx->u.lsp.flags = lsp->flags;
2755
2756 /* Copy source LSP's nhlfes, and capture 'best' nhlfe */
2757 frr_each(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
2758 /* Not sure if this is meaningful... */
2759 if (nhlfe->nexthop == NULL)
2760 continue;
2761
2762 new_nhlfe = zebra_mpls_lsp_add_nh(&(ctx->u.lsp), nhlfe->type,
2763 nhlfe->nexthop);
2764 if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
2765 ret = ENOMEM;
2766 break;
2767 }
2768
2769 /* Need to copy flags and backup info too */
2770 new_nhlfe->flags = nhlfe->flags;
2771 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
2772
2773 if (CHECK_FLAG(new_nhlfe->nexthop->flags,
2774 NEXTHOP_FLAG_HAS_BACKUP)) {
2775 new_nhlfe->nexthop->backup_num =
2776 nhlfe->nexthop->backup_num;
2777 memcpy(new_nhlfe->nexthop->backup_idx,
2778 nhlfe->nexthop->backup_idx,
2779 new_nhlfe->nexthop->backup_num);
2780 }
2781
2782 if (nhlfe == lsp->best_nhlfe)
2783 ctx->u.lsp.best_nhlfe = new_nhlfe;
2784 }
2785
2786 if (ret != AOK)
2787 goto done;
2788
2789 /* Capture backup nhlfes/nexthops */
2790 frr_each(nhlfe_list, &lsp->backup_nhlfe_list, nhlfe) {
2791 /* Not sure if this is meaningful... */
2792 if (nhlfe->nexthop == NULL)
2793 continue;
2794
2795 new_nhlfe = zebra_mpls_lsp_add_backup_nh(&(ctx->u.lsp),
2796 nhlfe->type,
2797 nhlfe->nexthop);
2798 if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
2799 ret = ENOMEM;
2800 break;
2801 }
2802
2803 /* Need to copy flags too */
2804 new_nhlfe->flags = nhlfe->flags;
2805 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
2806 }
2807
2808 /* On error the ctx will be cleaned-up, so we don't need to
2809 * deal with any allocated nhlfe or nexthop structs here.
2810 */
2811 done:
2812
2813 return ret;
2814 }
2815
2816 /*
2817 * Capture information for an LSP update in a dplane context.
2818 */
2819 static int dplane_ctx_pw_init(struct zebra_dplane_ctx *ctx,
2820 enum dplane_op_e op,
2821 struct zebra_pw *pw)
2822 {
2823 int ret = EINVAL;
2824 struct prefix p;
2825 afi_t afi;
2826 struct route_table *table;
2827 struct route_node *rn;
2828 struct route_entry *re;
2829 const struct nexthop_group *nhg;
2830 struct nexthop *nh, *newnh, *last_nh;
2831
2832 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2833 zlog_debug("init dplane ctx %s: pw '%s', loc %u, rem %u",
2834 dplane_op2str(op), pw->ifname, pw->local_label,
2835 pw->remote_label);
2836
2837 ctx->zd_op = op;
2838 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2839
2840 /* Capture namespace info: no netlink support as of 12/18,
2841 * but just in case...
2842 */
2843 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
2844
2845 memset(&ctx->u.pw, 0, sizeof(ctx->u.pw));
2846
2847 /* This name appears to be c-string, so we use string copy. */
2848 strlcpy(ctx->zd_ifname, pw->ifname, sizeof(ctx->zd_ifname));
2849
2850 ctx->zd_vrf_id = pw->vrf_id;
2851 ctx->zd_ifindex = pw->ifindex;
2852 ctx->u.pw.type = pw->type;
2853 ctx->u.pw.af = pw->af;
2854 ctx->u.pw.local_label = pw->local_label;
2855 ctx->u.pw.remote_label = pw->remote_label;
2856 ctx->u.pw.flags = pw->flags;
2857
2858 ctx->u.pw.dest = pw->nexthop;
2859
2860 ctx->u.pw.fields = pw->data;
2861
2862 /* Capture nexthop info for the pw destination. We need to look
2863 * up and use zebra datastructs, but we're running in the zebra
2864 * pthread here so that should be ok.
2865 */
2866 memcpy(&p.u, &pw->nexthop, sizeof(pw->nexthop));
2867 p.family = pw->af;
2868 p.prefixlen = ((pw->af == AF_INET) ? IPV4_MAX_BITLEN : IPV6_MAX_BITLEN);
2869
2870 afi = (pw->af == AF_INET) ? AFI_IP : AFI_IP6;
2871 table = zebra_vrf_table(afi, SAFI_UNICAST, pw->vrf_id);
2872 if (table == NULL)
2873 goto done;
2874
2875 rn = route_node_match(table, &p);
2876 if (rn == NULL)
2877 goto done;
2878
2879 re = NULL;
2880 RNODE_FOREACH_RE(rn, re) {
2881 if (CHECK_FLAG(re->flags, ZEBRA_FLAG_SELECTED))
2882 break;
2883 }
2884
2885 if (re) {
2886 /* We'll capture a 'fib' list of nexthops that meet our
2887 * criteria: installed, and labelled.
2888 */
2889 nhg = rib_get_fib_nhg(re);
2890 last_nh = NULL;
2891
2892 if (nhg && nhg->nexthop) {
2893 for (ALL_NEXTHOPS_PTR(nhg, nh)) {
2894 if (!CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE)
2895 || CHECK_FLAG(nh->flags,
2896 NEXTHOP_FLAG_RECURSIVE)
2897 || nh->nh_label == NULL)
2898 continue;
2899
2900 newnh = nexthop_dup(nh, NULL);
2901
2902 if (last_nh)
2903 NEXTHOP_APPEND(last_nh, newnh);
2904 else
2905 ctx->u.pw.fib_nhg.nexthop = newnh;
2906 last_nh = newnh;
2907 }
2908 }
2909
2910 /* Include any installed backup nexthops also. */
2911 nhg = rib_get_fib_backup_nhg(re);
2912 if (nhg && nhg->nexthop) {
2913 for (ALL_NEXTHOPS_PTR(nhg, nh)) {
2914 if (!CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE)
2915 || CHECK_FLAG(nh->flags,
2916 NEXTHOP_FLAG_RECURSIVE)
2917 || nh->nh_label == NULL)
2918 continue;
2919
2920 newnh = nexthop_dup(nh, NULL);
2921
2922 if (last_nh)
2923 NEXTHOP_APPEND(last_nh, newnh);
2924 else
2925 ctx->u.pw.fib_nhg.nexthop = newnh;
2926 last_nh = newnh;
2927 }
2928 }
2929
2930 /* Copy primary nexthops; recursive info is included too */
2931 assert(re->nhe != NULL); /* SA warning */
2932 copy_nexthops(&(ctx->u.pw.primary_nhg.nexthop),
2933 re->nhe->nhg.nexthop, NULL);
2934 ctx->u.pw.nhg_id = re->nhe->id;
2935
2936 /* Copy backup nexthop info, if present */
2937 if (re->nhe->backup_info && re->nhe->backup_info->nhe) {
2938 copy_nexthops(&(ctx->u.pw.backup_nhg.nexthop),
2939 re->nhe->backup_info->nhe->nhg.nexthop,
2940 NULL);
2941 }
2942 }
2943 route_unlock_node(rn);
2944
2945 ret = AOK;
2946
2947 done:
2948 return ret;
2949 }
2950
2951 /**
2952 * dplane_ctx_rule_init_single() - Initialize a dataplane representation of a
2953 * PBR rule.
2954 *
2955 * @dplane_rule: Dataplane internal representation of a rule
2956 * @rule: PBR rule
2957 */
2958 static void dplane_ctx_rule_init_single(struct dplane_ctx_rule *dplane_rule,
2959 struct zebra_pbr_rule *rule)
2960 {
2961 dplane_rule->priority = rule->rule.priority;
2962 dplane_rule->table = rule->rule.action.table;
2963
2964 dplane_rule->filter_bm = rule->rule.filter.filter_bm;
2965 dplane_rule->fwmark = rule->rule.filter.fwmark;
2966 dplane_rule->dsfield = rule->rule.filter.dsfield;
2967 dplane_rule->ip_proto = rule->rule.filter.ip_proto;
2968 prefix_copy(&(dplane_rule->dst_ip), &rule->rule.filter.dst_ip);
2969 prefix_copy(&(dplane_rule->src_ip), &rule->rule.filter.src_ip);
2970
2971 dplane_rule->action_pcp = rule->rule.action.pcp;
2972 dplane_rule->action_vlan_flags = rule->rule.action.vlan_flags;
2973 dplane_rule->action_vlan_id = rule->rule.action.vlan_id;
2974 dplane_rule->action_queue_id = rule->rule.action.queue_id;
2975
2976 strlcpy(dplane_rule->ifname, rule->ifname, INTERFACE_NAMSIZ);
2977 }
2978
2979 /**
2980 * dplane_ctx_rule_init() - Initialize a context block for a PBR rule update.
2981 *
2982 * @ctx: Dataplane context to init
2983 * @op: Operation being performed
2984 * @new_rule: PBR rule
2985 *
2986 * Return: Result status
2987 */
2988 static int dplane_ctx_rule_init(struct zebra_dplane_ctx *ctx,
2989 enum dplane_op_e op,
2990 struct zebra_pbr_rule *new_rule,
2991 struct zebra_pbr_rule *old_rule)
2992 {
2993 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2994 zlog_debug(
2995 "init dplane ctx %s: IF %s Prio %u Fwmark %u Src %pFX Dst %pFX Table %u",
2996 dplane_op2str(op), new_rule->ifname,
2997 new_rule->rule.priority, new_rule->rule.filter.fwmark,
2998 &new_rule->rule.filter.src_ip,
2999 &new_rule->rule.filter.dst_ip,
3000 new_rule->rule.action.table);
3001
3002 ctx->zd_op = op;
3003 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3004
3005 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
3006 op == DPLANE_OP_RULE_UPDATE);
3007
3008 ctx->zd_vrf_id = new_rule->vrf_id;
3009 strlcpy(ctx->zd_ifname, new_rule->ifname, sizeof(ctx->zd_ifname));
3010
3011 ctx->u.rule.sock = new_rule->sock;
3012 ctx->u.rule.unique = new_rule->rule.unique;
3013 ctx->u.rule.seq = new_rule->rule.seq;
3014
3015 dplane_ctx_rule_init_single(&ctx->u.rule.new, new_rule);
3016 if (op == DPLANE_OP_RULE_UPDATE)
3017 dplane_ctx_rule_init_single(&ctx->u.rule.old, old_rule);
3018
3019 return AOK;
3020 }
3021
3022 /**
3023 * dplane_ctx_iptable_init() - Initialize a context block for a PBR iptable
3024 * update.
3025 *
3026 * @ctx: Dataplane context to init
3027 * @op: Operation being performed
3028 * @new_rule: PBR iptable
3029 *
3030 * Return: Result status
3031 */
3032 static int dplane_ctx_iptable_init(struct zebra_dplane_ctx *ctx,
3033 enum dplane_op_e op,
3034 struct zebra_pbr_iptable *iptable)
3035 {
3036 char *ifname;
3037 struct listnode *node;
3038
3039 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3040 zlog_debug(
3041 "init dplane ctx %s: Unique %u Fwmark %u Family %s Action %s",
3042 dplane_op2str(op), iptable->unique, iptable->fwmark,
3043 family2str(iptable->family),
3044 iptable->action == ZEBRA_IPTABLES_DROP ? "Drop"
3045 : "Forward");
3046 }
3047
3048 ctx->zd_op = op;
3049 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3050
3051 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
3052
3053 ctx->zd_vrf_id = iptable->vrf_id;
3054 memcpy(&ctx->u.iptable, iptable, sizeof(struct zebra_pbr_iptable));
3055 ctx->u.iptable.interface_name_list = NULL;
3056 if (iptable->nb_interface > 0) {
3057 ctx->u.iptable.interface_name_list = list_new();
3058 for (ALL_LIST_ELEMENTS_RO(iptable->interface_name_list, node,
3059 ifname)) {
3060 listnode_add(ctx->u.iptable.interface_name_list,
3061 XSTRDUP(MTYPE_DP_NETFILTER, ifname));
3062 }
3063 }
3064 return AOK;
3065 }
3066
3067 /**
3068 * dplane_ctx_ipset_init() - Initialize a context block for a PBR ipset update.
3069 *
3070 * @ctx: Dataplane context to init
3071 * @op: Operation being performed
3072 * @new_rule: PBR ipset
3073 *
3074 * Return: Result status
3075 */
3076 static int dplane_ctx_ipset_init(struct zebra_dplane_ctx *ctx,
3077 enum dplane_op_e op,
3078 struct zebra_pbr_ipset *ipset)
3079 {
3080 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3081 zlog_debug("init dplane ctx %s: %s Unique %u Family %s Type %s",
3082 dplane_op2str(op), ipset->ipset_name, ipset->unique,
3083 family2str(ipset->family),
3084 zebra_pbr_ipset_type2str(ipset->type));
3085 }
3086
3087 ctx->zd_op = op;
3088 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3089
3090 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
3091
3092 ctx->zd_vrf_id = ipset->vrf_id;
3093
3094 memcpy(&ctx->u.ipset, ipset, sizeof(struct zebra_pbr_ipset));
3095 return AOK;
3096 }
3097
3098 /**
3099 * dplane_ctx_ipset_entry_init() - Initialize a context block for a PBR ipset
3100 * update.
3101 *
3102 * @ctx: Dataplane context to init
3103 * @op: Operation being performed
3104 * @new_rule: PBR ipset
3105 *
3106 * Return: Result status
3107 */
3108 static int
3109 dplane_ctx_ipset_entry_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
3110 struct zebra_pbr_ipset_entry *ipset_entry)
3111 {
3112 struct zebra_pbr_ipset *ipset;
3113
3114 ipset = ipset_entry->backpointer;
3115 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3116 zlog_debug("init dplane ctx %s: %s Unique %u filter %u",
3117 dplane_op2str(op), ipset->ipset_name,
3118 ipset_entry->unique, ipset_entry->filter_bm);
3119 }
3120
3121 ctx->zd_op = op;
3122 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3123
3124 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
3125
3126 ctx->zd_vrf_id = ipset->vrf_id;
3127
3128 memcpy(&ctx->u.ipset_entry.entry, ipset_entry,
3129 sizeof(struct zebra_pbr_ipset_entry));
3130 ctx->u.ipset_entry.entry.backpointer = NULL;
3131 ctx->u.ipset_entry.info.type = ipset->type;
3132 ctx->u.ipset_entry.info.family = ipset->family;
3133 memcpy(&ctx->u.ipset_entry.info.ipset_name, &ipset->ipset_name,
3134 ZEBRA_IPSET_NAME_SIZE);
3135
3136 return AOK;
3137 }
3138
3139
3140 /*
3141 * Enqueue a new update,
3142 * and ensure an event is active for the dataplane pthread.
3143 */
3144 static int dplane_update_enqueue(struct zebra_dplane_ctx *ctx)
3145 {
3146 int ret = EINVAL;
3147 uint32_t high, curr;
3148
3149 /* Enqueue for processing by the dataplane pthread */
3150 DPLANE_LOCK();
3151 {
3152 TAILQ_INSERT_TAIL(&zdplane_info.dg_update_ctx_q, ctx,
3153 zd_q_entries);
3154 }
3155 DPLANE_UNLOCK();
3156
3157 curr = atomic_fetch_add_explicit(
3158 &(zdplane_info.dg_routes_queued),
3159 1, memory_order_seq_cst);
3160
3161 curr++; /* We got the pre-incremented value */
3162
3163 /* Maybe update high-water counter also */
3164 high = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
3165 memory_order_seq_cst);
3166 while (high < curr) {
3167 if (atomic_compare_exchange_weak_explicit(
3168 &zdplane_info.dg_routes_queued_max,
3169 &high, curr,
3170 memory_order_seq_cst,
3171 memory_order_seq_cst))
3172 break;
3173 }
3174
3175 /* Ensure that an event for the dataplane thread is active */
3176 ret = dplane_provider_work_ready();
3177
3178 return ret;
3179 }
3180
3181 /*
3182 * Utility that prepares a route update and enqueues it for processing
3183 */
3184 static enum zebra_dplane_result
3185 dplane_route_update_internal(struct route_node *rn,
3186 struct route_entry *re,
3187 struct route_entry *old_re,
3188 enum dplane_op_e op)
3189 {
3190 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3191 int ret = EINVAL;
3192 struct zebra_dplane_ctx *ctx = NULL;
3193
3194 /* Obtain context block */
3195 ctx = dplane_ctx_alloc();
3196
3197 /* Init context with info from zebra data structs */
3198 ret = dplane_ctx_route_init(ctx, op, rn, re);
3199 if (ret == AOK) {
3200 /* Capture some extra info for update case
3201 * where there's a different 'old' route.
3202 */
3203 if ((op == DPLANE_OP_ROUTE_UPDATE) &&
3204 old_re && (old_re != re)) {
3205
3206 old_re->dplane_sequence =
3207 zebra_router_get_next_sequence();
3208 ctx->zd_old_seq = old_re->dplane_sequence;
3209
3210 ctx->u.rinfo.zd_old_tag = old_re->tag;
3211 ctx->u.rinfo.zd_old_type = old_re->type;
3212 ctx->u.rinfo.zd_old_instance = old_re->instance;
3213 ctx->u.rinfo.zd_old_distance = old_re->distance;
3214 ctx->u.rinfo.zd_old_metric = old_re->metric;
3215 ctx->u.rinfo.nhe.old_id = old_re->nhe->id;
3216
3217 #ifndef HAVE_NETLINK
3218 /* For bsd, capture previous re's nexthops too, sigh.
3219 * We'll need these to do per-nexthop deletes.
3220 */
3221 copy_nexthops(&(ctx->u.rinfo.zd_old_ng.nexthop),
3222 old_re->nhe->nhg.nexthop, NULL);
3223
3224 if (zebra_nhg_get_backup_nhg(old_re->nhe) != NULL) {
3225 struct nexthop_group *nhg;
3226 struct nexthop **nh;
3227
3228 nhg = zebra_nhg_get_backup_nhg(old_re->nhe);
3229 nh = &(ctx->u.rinfo.old_backup_ng.nexthop);
3230
3231 if (nhg->nexthop)
3232 copy_nexthops(nh, nhg->nexthop, NULL);
3233 }
3234 #endif /* !HAVE_NETLINK */
3235 }
3236
3237 /*
3238 * If the old and new context type, and nexthop group id
3239 * are the same there is no need to send down a route replace
3240 * as that we know we have sent a nexthop group replace
3241 * or an upper level protocol has sent us the exact
3242 * same route again.
3243 */
3244 if ((dplane_ctx_get_type(ctx) == dplane_ctx_get_old_type(ctx))
3245 && (dplane_ctx_get_nhe_id(ctx)
3246 == dplane_ctx_get_old_nhe_id(ctx))
3247 && (dplane_ctx_get_nhe_id(ctx) >= ZEBRA_NHG_PROTO_LOWER)) {
3248 struct nexthop *nexthop;
3249
3250 if (IS_ZEBRA_DEBUG_DPLANE)
3251 zlog_debug(
3252 "%s: Ignoring Route exactly the same",
3253 __func__);
3254
3255 for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx),
3256 nexthop)) {
3257 if (CHECK_FLAG(nexthop->flags,
3258 NEXTHOP_FLAG_RECURSIVE))
3259 continue;
3260
3261 if (CHECK_FLAG(nexthop->flags,
3262 NEXTHOP_FLAG_ACTIVE))
3263 SET_FLAG(nexthop->flags,
3264 NEXTHOP_FLAG_FIB);
3265 }
3266
3267 dplane_ctx_free(&ctx);
3268 return ZEBRA_DPLANE_REQUEST_SUCCESS;
3269 }
3270
3271 /* Enqueue context for processing */
3272 ret = dplane_update_enqueue(ctx);
3273 }
3274
3275 /* Update counter */
3276 atomic_fetch_add_explicit(&zdplane_info.dg_routes_in, 1,
3277 memory_order_relaxed);
3278
3279 if (ret == AOK)
3280 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3281 else {
3282 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors, 1,
3283 memory_order_relaxed);
3284 if (ctx)
3285 dplane_ctx_free(&ctx);
3286 }
3287
3288 return result;
3289 }
3290
3291 /**
3292 * dplane_nexthop_update_internal() - Helper for enqueuing nexthop changes
3293 *
3294 * @nhe: Nexthop group hash entry where the change occured
3295 * @op: The operation to be enqued
3296 *
3297 * Return: Result of the change
3298 */
3299 static enum zebra_dplane_result
3300 dplane_nexthop_update_internal(struct nhg_hash_entry *nhe, enum dplane_op_e op)
3301 {
3302 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3303 int ret = EINVAL;
3304 struct zebra_dplane_ctx *ctx = NULL;
3305
3306 /* Obtain context block */
3307 ctx = dplane_ctx_alloc();
3308 if (!ctx) {
3309 ret = ENOMEM;
3310 goto done;
3311 }
3312
3313 ret = dplane_ctx_nexthop_init(ctx, op, nhe);
3314 if (ret == AOK)
3315 ret = dplane_update_enqueue(ctx);
3316
3317 done:
3318 /* Update counter */
3319 atomic_fetch_add_explicit(&zdplane_info.dg_nexthops_in, 1,
3320 memory_order_relaxed);
3321
3322 if (ret == AOK)
3323 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3324 else {
3325 atomic_fetch_add_explicit(&zdplane_info.dg_nexthop_errors, 1,
3326 memory_order_relaxed);
3327 if (ctx)
3328 dplane_ctx_free(&ctx);
3329 }
3330
3331 return result;
3332 }
3333
3334 /*
3335 * Enqueue a route 'add' for the dataplane.
3336 */
3337 enum zebra_dplane_result dplane_route_add(struct route_node *rn,
3338 struct route_entry *re)
3339 {
3340 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3341
3342 if (rn == NULL || re == NULL)
3343 goto done;
3344
3345 ret = dplane_route_update_internal(rn, re, NULL,
3346 DPLANE_OP_ROUTE_INSTALL);
3347
3348 done:
3349 return ret;
3350 }
3351
3352 /*
3353 * Enqueue a route update for the dataplane.
3354 */
3355 enum zebra_dplane_result dplane_route_update(struct route_node *rn,
3356 struct route_entry *re,
3357 struct route_entry *old_re)
3358 {
3359 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3360
3361 if (rn == NULL || re == NULL)
3362 goto done;
3363
3364 ret = dplane_route_update_internal(rn, re, old_re,
3365 DPLANE_OP_ROUTE_UPDATE);
3366 done:
3367 return ret;
3368 }
3369
3370 /*
3371 * Enqueue a route removal for the dataplane.
3372 */
3373 enum zebra_dplane_result dplane_route_delete(struct route_node *rn,
3374 struct route_entry *re)
3375 {
3376 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3377
3378 if (rn == NULL || re == NULL)
3379 goto done;
3380
3381 ret = dplane_route_update_internal(rn, re, NULL,
3382 DPLANE_OP_ROUTE_DELETE);
3383
3384 done:
3385 return ret;
3386 }
3387
3388 /*
3389 * Notify the dplane when system/connected routes change.
3390 */
3391 enum zebra_dplane_result dplane_sys_route_add(struct route_node *rn,
3392 struct route_entry *re)
3393 {
3394 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3395
3396 /* Ignore this event unless a provider plugin has requested it. */
3397 if (!zdplane_info.dg_sys_route_notifs) {
3398 ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
3399 goto done;
3400 }
3401
3402 if (rn == NULL || re == NULL)
3403 goto done;
3404
3405 ret = dplane_route_update_internal(rn, re, NULL,
3406 DPLANE_OP_SYS_ROUTE_ADD);
3407
3408 done:
3409 return ret;
3410 }
3411
3412 /*
3413 * Notify the dplane when system/connected routes are deleted.
3414 */
3415 enum zebra_dplane_result dplane_sys_route_del(struct route_node *rn,
3416 struct route_entry *re)
3417 {
3418 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3419
3420 /* Ignore this event unless a provider plugin has requested it. */
3421 if (!zdplane_info.dg_sys_route_notifs) {
3422 ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
3423 goto done;
3424 }
3425
3426 if (rn == NULL || re == NULL)
3427 goto done;
3428
3429 ret = dplane_route_update_internal(rn, re, NULL,
3430 DPLANE_OP_SYS_ROUTE_DELETE);
3431
3432 done:
3433 return ret;
3434 }
3435
3436 /*
3437 * Update from an async notification, to bring other fibs up-to-date.
3438 */
3439 enum zebra_dplane_result
3440 dplane_route_notif_update(struct route_node *rn,
3441 struct route_entry *re,
3442 enum dplane_op_e op,
3443 struct zebra_dplane_ctx *ctx)
3444 {
3445 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3446 int ret = EINVAL;
3447 struct zebra_dplane_ctx *new_ctx = NULL;
3448 struct nexthop *nexthop;
3449 struct nexthop_group *nhg;
3450
3451 if (rn == NULL || re == NULL)
3452 goto done;
3453
3454 new_ctx = dplane_ctx_alloc();
3455 if (new_ctx == NULL)
3456 goto done;
3457
3458 /* Init context with info from zebra data structs */
3459 dplane_ctx_route_init(new_ctx, op, rn, re);
3460
3461 /* For add/update, need to adjust the nexthops so that we match
3462 * the notification state, which may not be the route-entry/RIB
3463 * state.
3464 */
3465 if (op == DPLANE_OP_ROUTE_UPDATE ||
3466 op == DPLANE_OP_ROUTE_INSTALL) {
3467
3468 nexthops_free(new_ctx->u.rinfo.zd_ng.nexthop);
3469 new_ctx->u.rinfo.zd_ng.nexthop = NULL;
3470
3471 nhg = rib_get_fib_nhg(re);
3472 if (nhg && nhg->nexthop)
3473 copy_nexthops(&(new_ctx->u.rinfo.zd_ng.nexthop),
3474 nhg->nexthop, NULL);
3475
3476 /* Check for installed backup nexthops also */
3477 nhg = rib_get_fib_backup_nhg(re);
3478 if (nhg && nhg->nexthop) {
3479 copy_nexthops(&(new_ctx->u.rinfo.zd_ng.nexthop),
3480 nhg->nexthop, NULL);
3481 }
3482
3483 for (ALL_NEXTHOPS(new_ctx->u.rinfo.zd_ng, nexthop))
3484 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
3485
3486 }
3487
3488 /* Capture info about the source of the notification, in 'ctx' */
3489 dplane_ctx_set_notif_provider(new_ctx,
3490 dplane_ctx_get_notif_provider(ctx));
3491
3492 ret = dplane_update_enqueue(new_ctx);
3493
3494 done:
3495 if (ret == AOK)
3496 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3497 else if (new_ctx)
3498 dplane_ctx_free(&new_ctx);
3499
3500 return result;
3501 }
3502
3503 /*
3504 * Enqueue a nexthop add for the dataplane.
3505 */
3506 enum zebra_dplane_result dplane_nexthop_add(struct nhg_hash_entry *nhe)
3507 {
3508 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3509
3510 if (nhe)
3511 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_INSTALL);
3512 return ret;
3513 }
3514
3515 /*
3516 * Enqueue a nexthop update for the dataplane.
3517 *
3518 * Might not need this func since zebra's nexthop objects should be immutable?
3519 */
3520 enum zebra_dplane_result dplane_nexthop_update(struct nhg_hash_entry *nhe)
3521 {
3522 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3523
3524 if (nhe)
3525 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_UPDATE);
3526 return ret;
3527 }
3528
3529 /*
3530 * Enqueue a nexthop removal for the dataplane.
3531 */
3532 enum zebra_dplane_result dplane_nexthop_delete(struct nhg_hash_entry *nhe)
3533 {
3534 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3535
3536 if (nhe)
3537 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_DELETE);
3538
3539 return ret;
3540 }
3541
3542 /*
3543 * Enqueue LSP add for the dataplane.
3544 */
3545 enum zebra_dplane_result dplane_lsp_add(struct zebra_lsp *lsp)
3546 {
3547 enum zebra_dplane_result ret =
3548 lsp_update_internal(lsp, DPLANE_OP_LSP_INSTALL);
3549
3550 return ret;
3551 }
3552
3553 /*
3554 * Enqueue LSP update for the dataplane.
3555 */
3556 enum zebra_dplane_result dplane_lsp_update(struct zebra_lsp *lsp)
3557 {
3558 enum zebra_dplane_result ret =
3559 lsp_update_internal(lsp, DPLANE_OP_LSP_UPDATE);
3560
3561 return ret;
3562 }
3563
3564 /*
3565 * Enqueue LSP delete for the dataplane.
3566 */
3567 enum zebra_dplane_result dplane_lsp_delete(struct zebra_lsp *lsp)
3568 {
3569 enum zebra_dplane_result ret =
3570 lsp_update_internal(lsp, DPLANE_OP_LSP_DELETE);
3571
3572 return ret;
3573 }
3574
3575 /* Update or un-install resulting from an async notification */
3576 enum zebra_dplane_result
3577 dplane_lsp_notif_update(struct zebra_lsp *lsp, enum dplane_op_e op,
3578 struct zebra_dplane_ctx *notif_ctx)
3579 {
3580 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3581 int ret = EINVAL;
3582 struct zebra_dplane_ctx *ctx = NULL;
3583 struct nhlfe_list_head *head;
3584 struct zebra_nhlfe *nhlfe, *new_nhlfe;
3585
3586 /* Obtain context block */
3587 ctx = dplane_ctx_alloc();
3588 if (ctx == NULL) {
3589 ret = ENOMEM;
3590 goto done;
3591 }
3592
3593 /* Copy info from zebra LSP */
3594 ret = dplane_ctx_lsp_init(ctx, op, lsp);
3595 if (ret != AOK)
3596 goto done;
3597
3598 /* Add any installed backup nhlfes */
3599 head = &(ctx->u.lsp.backup_nhlfe_list);
3600 frr_each(nhlfe_list, head, nhlfe) {
3601
3602 if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED) &&
3603 CHECK_FLAG(nhlfe->nexthop->flags, NEXTHOP_FLAG_FIB)) {
3604 new_nhlfe = zebra_mpls_lsp_add_nh(&(ctx->u.lsp),
3605 nhlfe->type,
3606 nhlfe->nexthop);
3607
3608 /* Need to copy flags too */
3609 new_nhlfe->flags = nhlfe->flags;
3610 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
3611 }
3612 }
3613
3614 /* Capture info about the source of the notification */
3615 dplane_ctx_set_notif_provider(
3616 ctx,
3617 dplane_ctx_get_notif_provider(notif_ctx));
3618
3619 ret = dplane_update_enqueue(ctx);
3620
3621 done:
3622 /* Update counter */
3623 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
3624 memory_order_relaxed);
3625
3626 if (ret == AOK)
3627 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3628 else {
3629 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
3630 memory_order_relaxed);
3631 if (ctx)
3632 dplane_ctx_free(&ctx);
3633 }
3634 return result;
3635 }
3636
3637 /*
3638 * Enqueue pseudowire install for the dataplane.
3639 */
3640 enum zebra_dplane_result dplane_pw_install(struct zebra_pw *pw)
3641 {
3642 return pw_update_internal(pw, DPLANE_OP_PW_INSTALL);
3643 }
3644
3645 /*
3646 * Enqueue pseudowire un-install for the dataplane.
3647 */
3648 enum zebra_dplane_result dplane_pw_uninstall(struct zebra_pw *pw)
3649 {
3650 return pw_update_internal(pw, DPLANE_OP_PW_UNINSTALL);
3651 }
3652
3653 /*
3654 * Common internal LSP update utility
3655 */
3656 static enum zebra_dplane_result lsp_update_internal(struct zebra_lsp *lsp,
3657 enum dplane_op_e op)
3658 {
3659 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3660 int ret = EINVAL;
3661 struct zebra_dplane_ctx *ctx = NULL;
3662
3663 /* Obtain context block */
3664 ctx = dplane_ctx_alloc();
3665
3666 ret = dplane_ctx_lsp_init(ctx, op, lsp);
3667 if (ret != AOK)
3668 goto done;
3669
3670 ret = dplane_update_enqueue(ctx);
3671
3672 done:
3673 /* Update counter */
3674 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
3675 memory_order_relaxed);
3676
3677 if (ret == AOK)
3678 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3679 else {
3680 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
3681 memory_order_relaxed);
3682 dplane_ctx_free(&ctx);
3683 }
3684
3685 return result;
3686 }
3687
3688 /*
3689 * Internal, common handler for pseudowire updates.
3690 */
3691 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
3692 enum dplane_op_e op)
3693 {
3694 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3695 int ret;
3696 struct zebra_dplane_ctx *ctx = NULL;
3697
3698 ctx = dplane_ctx_alloc();
3699
3700 ret = dplane_ctx_pw_init(ctx, op, pw);
3701 if (ret != AOK)
3702 goto done;
3703
3704 ret = dplane_update_enqueue(ctx);
3705
3706 done:
3707 /* Update counter */
3708 atomic_fetch_add_explicit(&zdplane_info.dg_pws_in, 1,
3709 memory_order_relaxed);
3710
3711 if (ret == AOK)
3712 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3713 else {
3714 atomic_fetch_add_explicit(&zdplane_info.dg_pw_errors, 1,
3715 memory_order_relaxed);
3716 dplane_ctx_free(&ctx);
3717 }
3718
3719 return result;
3720 }
3721
3722 /*
3723 * Enqueue access br_port update.
3724 */
3725 enum zebra_dplane_result
3726 dplane_br_port_update(const struct interface *ifp, bool non_df,
3727 uint32_t sph_filter_cnt,
3728 const struct in_addr *sph_filters, uint32_t backup_nhg_id)
3729 {
3730 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3731 uint32_t flags = 0;
3732 int ret;
3733 struct zebra_dplane_ctx *ctx = NULL;
3734 struct zebra_ns *zns;
3735 enum dplane_op_e op = DPLANE_OP_BR_PORT_UPDATE;
3736
3737 if (non_df)
3738 flags |= DPLANE_BR_PORT_NON_DF;
3739
3740 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL || IS_ZEBRA_DEBUG_EVPN_MH_ES) {
3741 uint32_t i;
3742 char vtep_str[ES_VTEP_LIST_STR_SZ];
3743
3744 vtep_str[0] = '\0';
3745 for (i = 0; i < sph_filter_cnt; ++i) {
3746 snprintfrr(vtep_str + strlen(vtep_str),
3747 sizeof(vtep_str) - strlen(vtep_str), "%pI4 ",
3748 &sph_filters[i]);
3749 }
3750 zlog_debug(
3751 "init br_port ctx %s: ifp %s, flags 0x%x backup_nhg 0x%x sph %s",
3752 dplane_op2str(op), ifp->name, flags, backup_nhg_id,
3753 vtep_str);
3754 }
3755
3756 ctx = dplane_ctx_alloc();
3757
3758 ctx->zd_op = op;
3759 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3760 ctx->zd_vrf_id = ifp->vrf->vrf_id;
3761
3762 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
3763 dplane_ctx_ns_init(ctx, zns, false);
3764
3765 ctx->zd_ifindex = ifp->ifindex;
3766 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
3767
3768 /* Init the br-port-specific data area */
3769 memset(&ctx->u.br_port, 0, sizeof(ctx->u.br_port));
3770
3771 ctx->u.br_port.flags = flags;
3772 ctx->u.br_port.backup_nhg_id = backup_nhg_id;
3773 ctx->u.br_port.sph_filter_cnt = sph_filter_cnt;
3774 memcpy(ctx->u.br_port.sph_filters, sph_filters,
3775 sizeof(ctx->u.br_port.sph_filters[0]) * sph_filter_cnt);
3776
3777 /* Enqueue for processing on the dplane pthread */
3778 ret = dplane_update_enqueue(ctx);
3779
3780 /* Increment counter */
3781 atomic_fetch_add_explicit(&zdplane_info.dg_br_port_in, 1,
3782 memory_order_relaxed);
3783
3784 if (ret == AOK) {
3785 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3786 } else {
3787 /* Error counter */
3788 atomic_fetch_add_explicit(&zdplane_info.dg_br_port_errors, 1,
3789 memory_order_relaxed);
3790 dplane_ctx_free(&ctx);
3791 }
3792
3793 return result;
3794 }
3795
3796 /*
3797 * Enqueue interface address add for the dataplane.
3798 */
3799 enum zebra_dplane_result dplane_intf_addr_set(const struct interface *ifp,
3800 const struct connected *ifc)
3801 {
3802 #if !defined(HAVE_NETLINK) && defined(HAVE_STRUCT_IFALIASREQ)
3803 /* Extra checks for this OS path. */
3804
3805 /* Don't configure PtP addresses on broadcast ifs or reverse */
3806 if (!(ifp->flags & IFF_POINTOPOINT) != !CONNECTED_PEER(ifc)) {
3807 if (IS_ZEBRA_DEBUG_KERNEL || IS_ZEBRA_DEBUG_DPLANE)
3808 zlog_debug("Failed to set intf addr: mismatch p2p and connected");
3809
3810 return ZEBRA_DPLANE_REQUEST_FAILURE;
3811 }
3812 #endif
3813
3814 return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_INSTALL);
3815 }
3816
3817 /*
3818 * Enqueue interface address remove/uninstall for the dataplane.
3819 */
3820 enum zebra_dplane_result dplane_intf_addr_unset(const struct interface *ifp,
3821 const struct connected *ifc)
3822 {
3823 return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_UNINSTALL);
3824 }
3825
3826 static enum zebra_dplane_result intf_addr_update_internal(
3827 const struct interface *ifp, const struct connected *ifc,
3828 enum dplane_op_e op)
3829 {
3830 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3831 int ret = EINVAL;
3832 struct zebra_dplane_ctx *ctx = NULL;
3833 struct zebra_ns *zns;
3834
3835 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3836 zlog_debug("init intf ctx %s: idx %d, addr %u:%pFX",
3837 dplane_op2str(op), ifp->ifindex, ifp->vrf->vrf_id,
3838 ifc->address);
3839
3840 ctx = dplane_ctx_alloc();
3841
3842 ctx->zd_op = op;
3843 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3844 ctx->zd_vrf_id = ifp->vrf->vrf_id;
3845
3846 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
3847 dplane_ctx_ns_init(ctx, zns, false);
3848
3849 /* Init the interface-addr-specific area */
3850 memset(&ctx->u.intf, 0, sizeof(ctx->u.intf));
3851
3852 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
3853 ctx->zd_ifindex = ifp->ifindex;
3854 ctx->u.intf.prefix = *(ifc->address);
3855
3856 if (if_is_broadcast(ifp))
3857 ctx->u.intf.flags |= DPLANE_INTF_BROADCAST;
3858
3859 if (CONNECTED_PEER(ifc)) {
3860 ctx->u.intf.dest_prefix = *(ifc->destination);
3861 ctx->u.intf.flags |=
3862 (DPLANE_INTF_CONNECTED | DPLANE_INTF_HAS_DEST);
3863 }
3864
3865 if (CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY))
3866 ctx->u.intf.flags |= DPLANE_INTF_SECONDARY;
3867
3868 if (ifc->label) {
3869 size_t len;
3870
3871 ctx->u.intf.flags |= DPLANE_INTF_HAS_LABEL;
3872
3873 /* Use embedded buffer if it's adequate; else allocate. */
3874 len = strlen(ifc->label);
3875
3876 if (len < sizeof(ctx->u.intf.label_buf)) {
3877 strlcpy(ctx->u.intf.label_buf, ifc->label,
3878 sizeof(ctx->u.intf.label_buf));
3879 ctx->u.intf.label = ctx->u.intf.label_buf;
3880 } else {
3881 ctx->u.intf.label = XSTRDUP(MTYPE_DP_CTX, ifc->label);
3882 }
3883 }
3884
3885 ret = dplane_update_enqueue(ctx);
3886
3887 /* Increment counter */
3888 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addrs_in, 1,
3889 memory_order_relaxed);
3890
3891 if (ret == AOK)
3892 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3893 else {
3894 /* Error counter */
3895 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addr_errors,
3896 1, memory_order_relaxed);
3897 dplane_ctx_free(&ctx);
3898 }
3899
3900 return result;
3901 }
3902
3903 /**
3904 * dplane_intf_update_internal() - Helper for enqueuing interface changes
3905 *
3906 * @ifp: Interface where the change occured
3907 * @op: The operation to be enqued
3908 *
3909 * Return: Result of the change
3910 */
3911 static enum zebra_dplane_result
3912 dplane_intf_update_internal(const struct interface *ifp, enum dplane_op_e op)
3913 {
3914 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3915 int ret = EINVAL;
3916 struct zebra_dplane_ctx *ctx = NULL;
3917
3918 /* Obtain context block */
3919 ctx = dplane_ctx_alloc();
3920 if (!ctx) {
3921 ret = ENOMEM;
3922 goto done;
3923 }
3924
3925 ret = dplane_ctx_intf_init(ctx, op, ifp);
3926 if (ret == AOK)
3927 ret = dplane_update_enqueue(ctx);
3928
3929 done:
3930 /* Update counter */
3931 atomic_fetch_add_explicit(&zdplane_info.dg_intfs_in, 1,
3932 memory_order_relaxed);
3933
3934 if (ret == AOK)
3935 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3936 else {
3937 atomic_fetch_add_explicit(&zdplane_info.dg_intf_errors, 1,
3938 memory_order_relaxed);
3939 if (ctx)
3940 dplane_ctx_free(&ctx);
3941 }
3942
3943 return result;
3944 }
3945
3946 /*
3947 * Enqueue a interface add for the dataplane.
3948 */
3949 enum zebra_dplane_result dplane_intf_add(const struct interface *ifp)
3950 {
3951 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3952
3953 if (ifp)
3954 ret = dplane_intf_update_internal(ifp, DPLANE_OP_INTF_INSTALL);
3955 return ret;
3956 }
3957
3958 /*
3959 * Enqueue a interface update for the dataplane.
3960 */
3961 enum zebra_dplane_result dplane_intf_update(const struct interface *ifp)
3962 {
3963 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3964
3965 if (ifp)
3966 ret = dplane_intf_update_internal(ifp, DPLANE_OP_INTF_UPDATE);
3967 return ret;
3968 }
3969
3970 /*
3971 * Enqueue a interface delete for the dataplane.
3972 */
3973 enum zebra_dplane_result dplane_intf_delete(const struct interface *ifp)
3974 {
3975 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3976
3977 if (ifp)
3978 ret = dplane_intf_update_internal(ifp, DPLANE_OP_INTF_DELETE);
3979 return ret;
3980 }
3981
3982 /*
3983 * Enqueue vxlan/evpn mac add (or update).
3984 */
3985 enum zebra_dplane_result dplane_rem_mac_add(const struct interface *ifp,
3986 const struct interface *bridge_ifp,
3987 vlanid_t vid,
3988 const struct ethaddr *mac,
3989 struct in_addr vtep_ip,
3990 bool sticky,
3991 uint32_t nhg_id,
3992 bool was_static)
3993 {
3994 enum zebra_dplane_result result;
3995 uint32_t update_flags = 0;
3996
3997 update_flags |= DPLANE_MAC_REMOTE;
3998 if (was_static)
3999 update_flags |= DPLANE_MAC_WAS_STATIC;
4000
4001 /* Use common helper api */
4002 result = mac_update_common(DPLANE_OP_MAC_INSTALL, ifp, bridge_ifp,
4003 vid, mac, vtep_ip, sticky, nhg_id, update_flags);
4004 return result;
4005 }
4006
4007 /*
4008 * Enqueue vxlan/evpn mac delete.
4009 */
4010 enum zebra_dplane_result dplane_rem_mac_del(const struct interface *ifp,
4011 const struct interface *bridge_ifp,
4012 vlanid_t vid,
4013 const struct ethaddr *mac,
4014 struct in_addr vtep_ip)
4015 {
4016 enum zebra_dplane_result result;
4017 uint32_t update_flags = 0;
4018
4019 update_flags |= DPLANE_MAC_REMOTE;
4020
4021 /* Use common helper api */
4022 result = mac_update_common(DPLANE_OP_MAC_DELETE, ifp, bridge_ifp,
4023 vid, mac, vtep_ip, false, 0, update_flags);
4024 return result;
4025 }
4026
4027 /*
4028 * API to configure link local with either MAC address or IP information
4029 */
4030 enum zebra_dplane_result dplane_neigh_ip_update(enum dplane_op_e op,
4031 const struct interface *ifp,
4032 struct ipaddr *link_ip,
4033 struct ipaddr *ip,
4034 uint32_t ndm_state, int protocol)
4035 {
4036 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4037 uint16_t state = 0;
4038 uint32_t update_flags;
4039
4040 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
4041 zlog_debug("%s: init link ctx %s: ifp %s, link_ip %pIA ip %pIA",
4042 __func__, dplane_op2str(op), ifp->name, link_ip, ip);
4043
4044 if (ndm_state == ZEBRA_NEIGH_STATE_REACHABLE)
4045 state = DPLANE_NUD_REACHABLE;
4046 else if (ndm_state == ZEBRA_NEIGH_STATE_FAILED)
4047 state = DPLANE_NUD_FAILED;
4048
4049 update_flags = DPLANE_NEIGH_NO_EXTENSION;
4050
4051 result = neigh_update_internal(op, ifp, (const void *)link_ip,
4052 ipaddr_family(link_ip), ip, 0, state,
4053 update_flags, protocol);
4054
4055 return result;
4056 }
4057
4058 /*
4059 * Enqueue local mac add (or update).
4060 */
4061 enum zebra_dplane_result dplane_local_mac_add(const struct interface *ifp,
4062 const struct interface *bridge_ifp,
4063 vlanid_t vid,
4064 const struct ethaddr *mac,
4065 bool sticky,
4066 uint32_t set_static,
4067 uint32_t set_inactive)
4068 {
4069 enum zebra_dplane_result result;
4070 uint32_t update_flags = 0;
4071 struct in_addr vtep_ip;
4072
4073 if (set_static)
4074 update_flags |= DPLANE_MAC_SET_STATIC;
4075
4076 if (set_inactive)
4077 update_flags |= DPLANE_MAC_SET_INACTIVE;
4078
4079 vtep_ip.s_addr = 0;
4080
4081 /* Use common helper api */
4082 result = mac_update_common(DPLANE_OP_MAC_INSTALL, ifp, bridge_ifp,
4083 vid, mac, vtep_ip, sticky, 0,
4084 update_flags);
4085 return result;
4086 }
4087
4088 /*
4089 * Enqueue local mac del
4090 */
4091 enum zebra_dplane_result
4092 dplane_local_mac_del(const struct interface *ifp,
4093 const struct interface *bridge_ifp, vlanid_t vid,
4094 const struct ethaddr *mac)
4095 {
4096 enum zebra_dplane_result result;
4097 struct in_addr vtep_ip;
4098
4099 vtep_ip.s_addr = 0;
4100
4101 /* Use common helper api */
4102 result = mac_update_common(DPLANE_OP_MAC_DELETE, ifp, bridge_ifp, vid,
4103 mac, vtep_ip, false, 0, 0);
4104 return result;
4105 }
4106 /*
4107 * Public api to init an empty context - either newly-allocated or
4108 * reset/cleared - for a MAC update.
4109 */
4110 void dplane_mac_init(struct zebra_dplane_ctx *ctx,
4111 const struct interface *ifp,
4112 const struct interface *br_ifp,
4113 vlanid_t vid,
4114 const struct ethaddr *mac,
4115 struct in_addr vtep_ip,
4116 bool sticky,
4117 uint32_t nhg_id,
4118 uint32_t update_flags)
4119 {
4120 struct zebra_ns *zns;
4121
4122 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4123 ctx->zd_vrf_id = ifp->vrf->vrf_id;
4124
4125 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
4126 dplane_ctx_ns_init(ctx, zns, false);
4127
4128 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
4129 ctx->zd_ifindex = ifp->ifindex;
4130
4131 /* Init the mac-specific data area */
4132 memset(&ctx->u.macinfo, 0, sizeof(ctx->u.macinfo));
4133
4134 ctx->u.macinfo.br_ifindex = br_ifp->ifindex;
4135 ctx->u.macinfo.vtep_ip = vtep_ip;
4136 ctx->u.macinfo.mac = *mac;
4137 ctx->u.macinfo.vid = vid;
4138 ctx->u.macinfo.is_sticky = sticky;
4139 ctx->u.macinfo.nhg_id = nhg_id;
4140 ctx->u.macinfo.update_flags = update_flags;
4141 }
4142
4143 /*
4144 * Common helper api for MAC address/vxlan updates
4145 */
4146 static enum zebra_dplane_result
4147 mac_update_common(enum dplane_op_e op,
4148 const struct interface *ifp,
4149 const struct interface *br_ifp,
4150 vlanid_t vid,
4151 const struct ethaddr *mac,
4152 struct in_addr vtep_ip,
4153 bool sticky,
4154 uint32_t nhg_id,
4155 uint32_t update_flags)
4156 {
4157 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4158 int ret;
4159 struct zebra_dplane_ctx *ctx = NULL;
4160
4161 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
4162 zlog_debug("init mac ctx %s: mac %pEA, ifp %s, vtep %pI4",
4163 dplane_op2str(op), mac, ifp->name, &vtep_ip);
4164
4165 ctx = dplane_ctx_alloc();
4166 ctx->zd_op = op;
4167
4168 /* Common init for the ctx */
4169 dplane_mac_init(ctx, ifp, br_ifp, vid, mac, vtep_ip, sticky,
4170 nhg_id, update_flags);
4171
4172 /* Enqueue for processing on the dplane pthread */
4173 ret = dplane_update_enqueue(ctx);
4174
4175 /* Increment counter */
4176 atomic_fetch_add_explicit(&zdplane_info.dg_macs_in, 1,
4177 memory_order_relaxed);
4178
4179 if (ret == AOK)
4180 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4181 else {
4182 /* Error counter */
4183 atomic_fetch_add_explicit(&zdplane_info.dg_mac_errors, 1,
4184 memory_order_relaxed);
4185 dplane_ctx_free(&ctx);
4186 }
4187
4188 return result;
4189 }
4190
4191 /*
4192 * Enqueue evpn neighbor add for the dataplane.
4193 */
4194 enum zebra_dplane_result dplane_rem_neigh_add(const struct interface *ifp,
4195 const struct ipaddr *ip,
4196 const struct ethaddr *mac,
4197 uint32_t flags, bool was_static)
4198 {
4199 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4200 uint32_t update_flags = 0;
4201
4202 update_flags |= DPLANE_NEIGH_REMOTE;
4203
4204 if (was_static)
4205 update_flags |= DPLANE_NEIGH_WAS_STATIC;
4206
4207 result = neigh_update_internal(
4208 DPLANE_OP_NEIGH_INSTALL, ifp, (const void *)mac, AF_ETHERNET,
4209 ip, flags, DPLANE_NUD_NOARP, update_flags, 0);
4210
4211 return result;
4212 }
4213
4214 /*
4215 * Enqueue local neighbor add for the dataplane.
4216 */
4217 enum zebra_dplane_result dplane_local_neigh_add(const struct interface *ifp,
4218 const struct ipaddr *ip,
4219 const struct ethaddr *mac,
4220 bool set_router, bool set_static,
4221 bool set_inactive)
4222 {
4223 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4224 uint32_t update_flags = 0;
4225 uint32_t ntf = 0;
4226 uint16_t state;
4227
4228 if (set_static)
4229 update_flags |= DPLANE_NEIGH_SET_STATIC;
4230
4231 if (set_inactive) {
4232 update_flags |= DPLANE_NEIGH_SET_INACTIVE;
4233 state = DPLANE_NUD_STALE;
4234 } else {
4235 state = DPLANE_NUD_REACHABLE;
4236 }
4237
4238 if (set_router)
4239 ntf |= DPLANE_NTF_ROUTER;
4240
4241 result = neigh_update_internal(DPLANE_OP_NEIGH_INSTALL, ifp,
4242 (const void *)mac, AF_ETHERNET, ip, ntf,
4243 state, update_flags, 0);
4244
4245 return result;
4246 }
4247
4248 /*
4249 * Enqueue evpn neighbor delete for the dataplane.
4250 */
4251 enum zebra_dplane_result dplane_rem_neigh_delete(const struct interface *ifp,
4252 const struct ipaddr *ip)
4253 {
4254 enum zebra_dplane_result result;
4255 uint32_t update_flags = 0;
4256
4257 update_flags |= DPLANE_NEIGH_REMOTE;
4258
4259 result = neigh_update_internal(DPLANE_OP_NEIGH_DELETE, ifp, NULL,
4260 AF_ETHERNET, ip, 0, 0, update_flags, 0);
4261
4262 return result;
4263 }
4264
4265 /*
4266 * Enqueue evpn VTEP add for the dataplane.
4267 */
4268 enum zebra_dplane_result dplane_vtep_add(const struct interface *ifp,
4269 const struct in_addr *ip,
4270 vni_t vni)
4271 {
4272 enum zebra_dplane_result result;
4273 struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
4274 struct ipaddr addr;
4275
4276 if (IS_ZEBRA_DEBUG_VXLAN)
4277 zlog_debug("Install %pI4 into flood list for VNI %u intf %s(%u)",
4278 ip, vni, ifp->name, ifp->ifindex);
4279
4280 SET_IPADDR_V4(&addr);
4281 addr.ipaddr_v4 = *ip;
4282
4283 result = neigh_update_internal(DPLANE_OP_VTEP_ADD, ifp, &mac,
4284 AF_ETHERNET, &addr, 0, 0, 0, 0);
4285
4286 return result;
4287 }
4288
4289 /*
4290 * Enqueue evpn VTEP add for the dataplane.
4291 */
4292 enum zebra_dplane_result dplane_vtep_delete(const struct interface *ifp,
4293 const struct in_addr *ip,
4294 vni_t vni)
4295 {
4296 enum zebra_dplane_result result;
4297 struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
4298 struct ipaddr addr;
4299
4300 if (IS_ZEBRA_DEBUG_VXLAN)
4301 zlog_debug(
4302 "Uninstall %pI4 from flood list for VNI %u intf %s(%u)",
4303 ip, vni, ifp->name, ifp->ifindex);
4304
4305 SET_IPADDR_V4(&addr);
4306 addr.ipaddr_v4 = *ip;
4307
4308 result = neigh_update_internal(DPLANE_OP_VTEP_DELETE, ifp,
4309 (const void *)&mac, AF_ETHERNET, &addr,
4310 0, 0, 0, 0);
4311
4312 return result;
4313 }
4314
4315 enum zebra_dplane_result dplane_neigh_discover(const struct interface *ifp,
4316 const struct ipaddr *ip)
4317 {
4318 enum zebra_dplane_result result;
4319
4320 result = neigh_update_internal(DPLANE_OP_NEIGH_DISCOVER, ifp, NULL,
4321 AF_ETHERNET, ip, DPLANE_NTF_USE,
4322 DPLANE_NUD_INCOMPLETE, 0, 0);
4323
4324 return result;
4325 }
4326
4327 enum zebra_dplane_result dplane_neigh_table_update(const struct interface *ifp,
4328 const uint8_t family,
4329 const uint32_t app_probes,
4330 const uint32_t ucast_probes,
4331 const uint32_t mcast_probes)
4332 {
4333 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4334 int ret;
4335 struct zebra_dplane_ctx *ctx = NULL;
4336 struct zebra_ns *zns;
4337 enum dplane_op_e op = DPLANE_OP_NEIGH_TABLE_UPDATE;
4338
4339 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
4340 zlog_debug("set neigh ctx %s: ifp %s, family %s",
4341 dplane_op2str(op), ifp->name, family2str(family));
4342 }
4343
4344 ctx = dplane_ctx_alloc();
4345
4346 ctx->zd_op = op;
4347 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4348 ctx->zd_vrf_id = ifp->vrf->vrf_id;
4349
4350 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
4351 dplane_ctx_ns_init(ctx, zns, false);
4352
4353 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
4354 ctx->zd_ifindex = ifp->ifindex;
4355
4356 /* Init the neighbor-specific data area */
4357 memset(&ctx->u.neightable, 0, sizeof(ctx->u.neightable));
4358
4359 ctx->u.neightable.family = family;
4360 ctx->u.neightable.app_probes = app_probes;
4361 ctx->u.neightable.ucast_probes = ucast_probes;
4362 ctx->u.neightable.mcast_probes = mcast_probes;
4363
4364 /* Enqueue for processing on the dplane pthread */
4365 ret = dplane_update_enqueue(ctx);
4366
4367 /* Increment counter */
4368 atomic_fetch_add_explicit(&zdplane_info.dg_neightable_in, 1,
4369 memory_order_relaxed);
4370
4371 if (ret == AOK)
4372 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4373 else {
4374 /* Error counter */
4375 atomic_fetch_add_explicit(&zdplane_info.dg_neightable_errors, 1,
4376 memory_order_relaxed);
4377 dplane_ctx_free(&ctx);
4378 }
4379
4380 return result;
4381 }
4382
4383 /*
4384 * Common helper api for neighbor updates
4385 */
4386 static enum zebra_dplane_result
4387 neigh_update_internal(enum dplane_op_e op, const struct interface *ifp,
4388 const void *link, const int link_family,
4389 const struct ipaddr *ip, uint32_t flags, uint16_t state,
4390 uint32_t update_flags, int protocol)
4391 {
4392 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4393 int ret;
4394 struct zebra_dplane_ctx *ctx = NULL;
4395 struct zebra_ns *zns;
4396 const struct ethaddr *mac = NULL;
4397 const struct ipaddr *link_ip = NULL;
4398
4399 if (link_family == AF_ETHERNET)
4400 mac = (const struct ethaddr *)link;
4401 else
4402 link_ip = (const struct ipaddr *)link;
4403
4404 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
4405 char buf1[PREFIX_STRLEN];
4406
4407 buf1[0] = '\0';
4408 if (link_family == AF_ETHERNET)
4409 prefix_mac2str(mac, buf1, sizeof(buf1));
4410 else
4411 ipaddr2str(link_ip, buf1, sizeof(buf1));
4412 zlog_debug("init neigh ctx %s: ifp %s, %s %s, ip %pIA",
4413 dplane_op2str(op), ifp->name,
4414 link_family == AF_ETHERNET ? "mac " : "link ",
4415 buf1, ip);
4416 }
4417
4418 ctx = dplane_ctx_alloc();
4419
4420 ctx->zd_op = op;
4421 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4422 ctx->zd_vrf_id = ifp->vrf->vrf_id;
4423 dplane_ctx_set_type(ctx, protocol);
4424
4425 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
4426 dplane_ctx_ns_init(ctx, zns, false);
4427
4428 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
4429 ctx->zd_ifindex = ifp->ifindex;
4430
4431 /* Init the neighbor-specific data area */
4432 memset(&ctx->u.neigh, 0, sizeof(ctx->u.neigh));
4433
4434 ctx->u.neigh.ip_addr = *ip;
4435 if (mac)
4436 ctx->u.neigh.link.mac = *mac;
4437 else if (link_ip)
4438 ctx->u.neigh.link.ip_addr = *link_ip;
4439
4440 ctx->u.neigh.flags = flags;
4441 ctx->u.neigh.state = state;
4442 ctx->u.neigh.update_flags = update_flags;
4443
4444 /* Enqueue for processing on the dplane pthread */
4445 ret = dplane_update_enqueue(ctx);
4446
4447 /* Increment counter */
4448 atomic_fetch_add_explicit(&zdplane_info.dg_neighs_in, 1,
4449 memory_order_relaxed);
4450
4451 if (ret == AOK)
4452 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4453 else {
4454 /* Error counter */
4455 atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors, 1,
4456 memory_order_relaxed);
4457 dplane_ctx_free(&ctx);
4458 }
4459
4460 return result;
4461 }
4462
4463 /*
4464 * Common helper api for PBR rule updates
4465 */
4466 static enum zebra_dplane_result
4467 rule_update_internal(enum dplane_op_e op, struct zebra_pbr_rule *new_rule,
4468 struct zebra_pbr_rule *old_rule)
4469 {
4470 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4471 struct zebra_dplane_ctx *ctx;
4472 int ret;
4473
4474 ctx = dplane_ctx_alloc();
4475
4476 ret = dplane_ctx_rule_init(ctx, op, new_rule, old_rule);
4477 if (ret != AOK)
4478 goto done;
4479
4480 ret = dplane_update_enqueue(ctx);
4481
4482 done:
4483 atomic_fetch_add_explicit(&zdplane_info.dg_rules_in, 1,
4484 memory_order_relaxed);
4485
4486 if (ret == AOK)
4487 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4488 else {
4489 atomic_fetch_add_explicit(&zdplane_info.dg_rule_errors, 1,
4490 memory_order_relaxed);
4491 dplane_ctx_free(&ctx);
4492 }
4493
4494 return result;
4495 }
4496
4497 enum zebra_dplane_result dplane_pbr_rule_add(struct zebra_pbr_rule *rule)
4498 {
4499 return rule_update_internal(DPLANE_OP_RULE_ADD, rule, NULL);
4500 }
4501
4502 enum zebra_dplane_result dplane_pbr_rule_delete(struct zebra_pbr_rule *rule)
4503 {
4504 return rule_update_internal(DPLANE_OP_RULE_DELETE, rule, NULL);
4505 }
4506
4507 enum zebra_dplane_result dplane_pbr_rule_update(struct zebra_pbr_rule *old_rule,
4508 struct zebra_pbr_rule *new_rule)
4509 {
4510 return rule_update_internal(DPLANE_OP_RULE_UPDATE, new_rule, old_rule);
4511 }
4512 /*
4513 * Common helper api for iptable updates
4514 */
4515 static enum zebra_dplane_result
4516 iptable_update_internal(enum dplane_op_e op, struct zebra_pbr_iptable *iptable)
4517 {
4518 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4519 struct zebra_dplane_ctx *ctx;
4520 int ret;
4521
4522 if ((op == DPLANE_OP_IPTABLE_ADD &&
4523 CHECK_FLAG(iptable->internal_flags, IPTABLE_INSTALL_QUEUED)) ||
4524 (op == DPLANE_OP_IPTABLE_DELETE &&
4525 CHECK_FLAG(iptable->internal_flags, IPTABLE_UNINSTALL_QUEUED))) {
4526 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
4527 zlog_debug(
4528 "update dplane ctx %s: iptable %s already in progress",
4529 dplane_op2str(op), iptable->ipset_name);
4530 return result;
4531 }
4532
4533 ctx = dplane_ctx_alloc();
4534
4535 ret = dplane_ctx_iptable_init(ctx, op, iptable);
4536 if (ret != AOK)
4537 goto done;
4538
4539 ret = dplane_update_enqueue(ctx);
4540
4541 done:
4542 atomic_fetch_add_explicit(&zdplane_info.dg_iptable_in, 1,
4543 memory_order_relaxed);
4544
4545 if (ret == AOK) {
4546 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4547 if (op == DPLANE_OP_IPTABLE_ADD)
4548 SET_FLAG(iptable->internal_flags,
4549 IPTABLE_INSTALL_QUEUED);
4550 else
4551 SET_FLAG(iptable->internal_flags,
4552 IPTABLE_UNINSTALL_QUEUED);
4553 } else {
4554 atomic_fetch_add_explicit(&zdplane_info.dg_iptable_errors, 1,
4555 memory_order_relaxed);
4556 dplane_ctx_free(&ctx);
4557 }
4558 return result;
4559 }
4560
4561 enum zebra_dplane_result
4562 dplane_pbr_iptable_add(struct zebra_pbr_iptable *iptable)
4563 {
4564 return iptable_update_internal(DPLANE_OP_IPTABLE_ADD, iptable);
4565 }
4566
4567 enum zebra_dplane_result
4568 dplane_pbr_iptable_delete(struct zebra_pbr_iptable *iptable)
4569 {
4570 return iptable_update_internal(DPLANE_OP_IPTABLE_DELETE, iptable);
4571 }
4572
4573 /*
4574 * Common helper api for ipset updates
4575 */
4576 static enum zebra_dplane_result
4577 ipset_update_internal(enum dplane_op_e op, struct zebra_pbr_ipset *ipset)
4578 {
4579 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4580 struct zebra_dplane_ctx *ctx;
4581 int ret;
4582
4583 ctx = dplane_ctx_alloc();
4584
4585 ret = dplane_ctx_ipset_init(ctx, op, ipset);
4586 if (ret != AOK)
4587 goto done;
4588
4589 ret = dplane_update_enqueue(ctx);
4590
4591 done:
4592 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_in, 1,
4593 memory_order_relaxed);
4594
4595 if (ret == AOK)
4596 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4597 else {
4598 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_errors, 1,
4599 memory_order_relaxed);
4600 dplane_ctx_free(&ctx);
4601 }
4602
4603 return result;
4604 }
4605
4606 enum zebra_dplane_result dplane_pbr_ipset_add(struct zebra_pbr_ipset *ipset)
4607 {
4608 return ipset_update_internal(DPLANE_OP_IPSET_ADD, ipset);
4609 }
4610
4611 enum zebra_dplane_result dplane_pbr_ipset_delete(struct zebra_pbr_ipset *ipset)
4612 {
4613 return ipset_update_internal(DPLANE_OP_IPSET_DELETE, ipset);
4614 }
4615
4616 /*
4617 * Common helper api for ipset updates
4618 */
4619 static enum zebra_dplane_result
4620 ipset_entry_update_internal(enum dplane_op_e op,
4621 struct zebra_pbr_ipset_entry *ipset_entry)
4622 {
4623 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4624 struct zebra_dplane_ctx *ctx;
4625 int ret;
4626
4627 ctx = dplane_ctx_alloc();
4628
4629 ret = dplane_ctx_ipset_entry_init(ctx, op, ipset_entry);
4630 if (ret != AOK)
4631 goto done;
4632
4633 ret = dplane_update_enqueue(ctx);
4634
4635 done:
4636 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_entry_in, 1,
4637 memory_order_relaxed);
4638
4639 if (ret == AOK)
4640 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4641 else {
4642 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_entry_errors,
4643 1, memory_order_relaxed);
4644 dplane_ctx_free(&ctx);
4645 }
4646
4647 return result;
4648 }
4649
4650 enum zebra_dplane_result
4651 dplane_pbr_ipset_entry_add(struct zebra_pbr_ipset_entry *ipset)
4652 {
4653 return ipset_entry_update_internal(DPLANE_OP_IPSET_ENTRY_ADD, ipset);
4654 }
4655
4656 enum zebra_dplane_result
4657 dplane_pbr_ipset_entry_delete(struct zebra_pbr_ipset_entry *ipset)
4658 {
4659 return ipset_entry_update_internal(DPLANE_OP_IPSET_ENTRY_DELETE, ipset);
4660 }
4661
4662 /*
4663 * Common helper api for GRE set
4664 */
4665 enum zebra_dplane_result
4666 dplane_gre_set(struct interface *ifp, struct interface *ifp_link,
4667 unsigned int mtu, const struct zebra_l2info_gre *gre_info)
4668 {
4669 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4670 struct zebra_dplane_ctx *ctx;
4671 enum dplane_op_e op = DPLANE_OP_GRE_SET;
4672 int ret;
4673 struct zebra_ns *zns;
4674
4675 ctx = dplane_ctx_alloc();
4676
4677 if (!ifp)
4678 return result;
4679
4680 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
4681 zlog_debug("init dplane ctx %s: if %s link %s%s",
4682 dplane_op2str(op), ifp->name,
4683 ifp_link ? "set" : "unset", ifp_link ?
4684 ifp_link->name : "");
4685 }
4686
4687 ctx->zd_op = op;
4688 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4689 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
4690 if (!zns)
4691 return result;
4692 dplane_ctx_ns_init(ctx, zns, false);
4693
4694 dplane_ctx_set_ifname(ctx, ifp->name);
4695 ctx->zd_vrf_id = ifp->vrf->vrf_id;
4696 ctx->zd_ifindex = ifp->ifindex;
4697 if (ifp_link)
4698 ctx->u.gre.link_ifindex = ifp_link->ifindex;
4699 else
4700 ctx->u.gre.link_ifindex = 0;
4701 if (gre_info)
4702 memcpy(&ctx->u.gre.info, gre_info, sizeof(ctx->u.gre.info));
4703 ctx->u.gre.mtu = mtu;
4704
4705 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4706
4707 /* Enqueue context for processing */
4708 ret = dplane_update_enqueue(ctx);
4709
4710 /* Update counter */
4711 atomic_fetch_add_explicit(&zdplane_info.dg_gre_set_in, 1,
4712 memory_order_relaxed);
4713
4714 if (ret == AOK)
4715 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4716 else {
4717 atomic_fetch_add_explicit(
4718 &zdplane_info.dg_gre_set_errors, 1,
4719 memory_order_relaxed);
4720 if (ctx)
4721 dplane_ctx_free(&ctx);
4722 result = ZEBRA_DPLANE_REQUEST_FAILURE;
4723 }
4724 return result;
4725 }
4726
4727 /*
4728 * Handler for 'show dplane'
4729 */
4730 int dplane_show_helper(struct vty *vty, bool detailed)
4731 {
4732 uint64_t queued, queue_max, limit, errs, incoming, yields,
4733 other_errs;
4734
4735 /* Using atomics because counters are being changed in different
4736 * pthread contexts.
4737 */
4738 incoming = atomic_load_explicit(&zdplane_info.dg_routes_in,
4739 memory_order_relaxed);
4740 limit = atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
4741 memory_order_relaxed);
4742 queued = atomic_load_explicit(&zdplane_info.dg_routes_queued,
4743 memory_order_relaxed);
4744 queue_max = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
4745 memory_order_relaxed);
4746 errs = atomic_load_explicit(&zdplane_info.dg_route_errors,
4747 memory_order_relaxed);
4748 yields = atomic_load_explicit(&zdplane_info.dg_update_yields,
4749 memory_order_relaxed);
4750 other_errs = atomic_load_explicit(&zdplane_info.dg_other_errors,
4751 memory_order_relaxed);
4752
4753 vty_out(vty, "Zebra dataplane:\nRoute updates: %"PRIu64"\n",
4754 incoming);
4755 vty_out(vty, "Route update errors: %"PRIu64"\n", errs);
4756 vty_out(vty, "Other errors : %"PRIu64"\n", other_errs);
4757 vty_out(vty, "Route update queue limit: %"PRIu64"\n", limit);
4758 vty_out(vty, "Route update queue depth: %"PRIu64"\n", queued);
4759 vty_out(vty, "Route update queue max: %"PRIu64"\n", queue_max);
4760 vty_out(vty, "Dplane update yields: %"PRIu64"\n", yields);
4761
4762 incoming = atomic_load_explicit(&zdplane_info.dg_lsps_in,
4763 memory_order_relaxed);
4764 errs = atomic_load_explicit(&zdplane_info.dg_lsp_errors,
4765 memory_order_relaxed);
4766 vty_out(vty, "LSP updates: %"PRIu64"\n", incoming);
4767 vty_out(vty, "LSP update errors: %"PRIu64"\n", errs);
4768
4769 incoming = atomic_load_explicit(&zdplane_info.dg_pws_in,
4770 memory_order_relaxed);
4771 errs = atomic_load_explicit(&zdplane_info.dg_pw_errors,
4772 memory_order_relaxed);
4773 vty_out(vty, "PW updates: %"PRIu64"\n", incoming);
4774 vty_out(vty, "PW update errors: %"PRIu64"\n", errs);
4775
4776 incoming = atomic_load_explicit(&zdplane_info.dg_intf_addrs_in,
4777 memory_order_relaxed);
4778 errs = atomic_load_explicit(&zdplane_info.dg_intf_addr_errors,
4779 memory_order_relaxed);
4780 vty_out(vty, "Intf addr updates: %"PRIu64"\n", incoming);
4781 vty_out(vty, "Intf addr errors: %"PRIu64"\n", errs);
4782
4783 incoming = atomic_load_explicit(&zdplane_info.dg_macs_in,
4784 memory_order_relaxed);
4785 errs = atomic_load_explicit(&zdplane_info.dg_mac_errors,
4786 memory_order_relaxed);
4787 vty_out(vty, "EVPN MAC updates: %"PRIu64"\n", incoming);
4788 vty_out(vty, "EVPN MAC errors: %"PRIu64"\n", errs);
4789
4790 incoming = atomic_load_explicit(&zdplane_info.dg_neighs_in,
4791 memory_order_relaxed);
4792 errs = atomic_load_explicit(&zdplane_info.dg_neigh_errors,
4793 memory_order_relaxed);
4794 vty_out(vty, "EVPN neigh updates: %"PRIu64"\n", incoming);
4795 vty_out(vty, "EVPN neigh errors: %"PRIu64"\n", errs);
4796
4797 incoming = atomic_load_explicit(&zdplane_info.dg_rules_in,
4798 memory_order_relaxed);
4799 errs = atomic_load_explicit(&zdplane_info.dg_rule_errors,
4800 memory_order_relaxed);
4801 vty_out(vty, "Rule updates: %" PRIu64 "\n", incoming);
4802 vty_out(vty, "Rule errors: %" PRIu64 "\n", errs);
4803
4804 incoming = atomic_load_explicit(&zdplane_info.dg_br_port_in,
4805 memory_order_relaxed);
4806 errs = atomic_load_explicit(&zdplane_info.dg_br_port_errors,
4807 memory_order_relaxed);
4808 vty_out(vty, "Bridge port updates: %" PRIu64 "\n", incoming);
4809 vty_out(vty, "Bridge port errors: %" PRIu64 "\n", errs);
4810
4811 incoming = atomic_load_explicit(&zdplane_info.dg_iptable_in,
4812 memory_order_relaxed);
4813 errs = atomic_load_explicit(&zdplane_info.dg_iptable_errors,
4814 memory_order_relaxed);
4815 vty_out(vty, "IPtable updates: %" PRIu64 "\n", incoming);
4816 vty_out(vty, "IPtable errors: %" PRIu64 "\n", errs);
4817 incoming = atomic_load_explicit(&zdplane_info.dg_ipset_in,
4818 memory_order_relaxed);
4819 errs = atomic_load_explicit(&zdplane_info.dg_ipset_errors,
4820 memory_order_relaxed);
4821 vty_out(vty, "IPset updates: %" PRIu64 "\n", incoming);
4822 vty_out(vty, "IPset errors: %" PRIu64 "\n", errs);
4823 incoming = atomic_load_explicit(&zdplane_info.dg_ipset_entry_in,
4824 memory_order_relaxed);
4825 errs = atomic_load_explicit(&zdplane_info.dg_ipset_entry_errors,
4826 memory_order_relaxed);
4827 vty_out(vty, "IPset entry updates: %" PRIu64 "\n", incoming);
4828 vty_out(vty, "IPset entry errors: %" PRIu64 "\n", errs);
4829
4830 incoming = atomic_load_explicit(&zdplane_info.dg_neightable_in,
4831 memory_order_relaxed);
4832 errs = atomic_load_explicit(&zdplane_info.dg_neightable_errors,
4833 memory_order_relaxed);
4834 vty_out(vty, "Neighbor Table updates: %"PRIu64"\n", incoming);
4835 vty_out(vty, "Neighbor Table errors: %"PRIu64"\n", errs);
4836
4837 incoming = atomic_load_explicit(&zdplane_info.dg_gre_set_in,
4838 memory_order_relaxed);
4839 errs = atomic_load_explicit(&zdplane_info.dg_gre_set_errors,
4840 memory_order_relaxed);
4841 vty_out(vty, "GRE set updates: %"PRIu64"\n", incoming);
4842 vty_out(vty, "GRE set errors: %"PRIu64"\n", errs);
4843 return CMD_SUCCESS;
4844 }
4845
4846 /*
4847 * Handler for 'show dplane providers'
4848 */
4849 int dplane_show_provs_helper(struct vty *vty, bool detailed)
4850 {
4851 struct zebra_dplane_provider *prov;
4852 uint64_t in, in_q, in_max, out, out_q, out_max;
4853
4854 vty_out(vty, "Zebra dataplane providers:\n");
4855
4856 DPLANE_LOCK();
4857 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
4858 DPLANE_UNLOCK();
4859
4860 /* Show counters, useful info from each registered provider */
4861 while (prov) {
4862
4863 in = atomic_load_explicit(&prov->dp_in_counter,
4864 memory_order_relaxed);
4865 in_q = atomic_load_explicit(&prov->dp_in_queued,
4866 memory_order_relaxed);
4867 in_max = atomic_load_explicit(&prov->dp_in_max,
4868 memory_order_relaxed);
4869 out = atomic_load_explicit(&prov->dp_out_counter,
4870 memory_order_relaxed);
4871 out_q = atomic_load_explicit(&prov->dp_out_queued,
4872 memory_order_relaxed);
4873 out_max = atomic_load_explicit(&prov->dp_out_max,
4874 memory_order_relaxed);
4875
4876 vty_out(vty, "%s (%u): in: %"PRIu64", q: %"PRIu64", q_max: %"PRIu64", out: %"PRIu64", q: %"PRIu64", q_max: %"PRIu64"\n",
4877 prov->dp_name, prov->dp_id, in, in_q, in_max,
4878 out, out_q, out_max);
4879
4880 DPLANE_LOCK();
4881 prov = TAILQ_NEXT(prov, dp_prov_link);
4882 DPLANE_UNLOCK();
4883 }
4884
4885 return CMD_SUCCESS;
4886 }
4887
4888 /*
4889 * Helper for 'show run' etc.
4890 */
4891 int dplane_config_write_helper(struct vty *vty)
4892 {
4893 if (zdplane_info.dg_max_queued_updates != DPLANE_DEFAULT_MAX_QUEUED)
4894 vty_out(vty, "zebra dplane limit %u\n",
4895 zdplane_info.dg_max_queued_updates);
4896
4897 return 0;
4898 }
4899
4900 /*
4901 * Provider registration
4902 */
4903 int dplane_provider_register(const char *name,
4904 enum dplane_provider_prio prio,
4905 int flags,
4906 int (*start_fp)(struct zebra_dplane_provider *),
4907 int (*fp)(struct zebra_dplane_provider *),
4908 int (*fini_fp)(struct zebra_dplane_provider *,
4909 bool early),
4910 void *data,
4911 struct zebra_dplane_provider **prov_p)
4912 {
4913 int ret = 0;
4914 struct zebra_dplane_provider *p = NULL, *last;
4915
4916 /* Validate */
4917 if (fp == NULL) {
4918 ret = EINVAL;
4919 goto done;
4920 }
4921
4922 if (prio <= DPLANE_PRIO_NONE ||
4923 prio > DPLANE_PRIO_LAST) {
4924 ret = EINVAL;
4925 goto done;
4926 }
4927
4928 /* Allocate and init new provider struct */
4929 p = XCALLOC(MTYPE_DP_PROV, sizeof(struct zebra_dplane_provider));
4930
4931 pthread_mutex_init(&(p->dp_mutex), NULL);
4932 TAILQ_INIT(&(p->dp_ctx_in_q));
4933 TAILQ_INIT(&(p->dp_ctx_out_q));
4934
4935 p->dp_flags = flags;
4936 p->dp_priority = prio;
4937 p->dp_fp = fp;
4938 p->dp_start = start_fp;
4939 p->dp_fini = fini_fp;
4940 p->dp_data = data;
4941
4942 /* Lock - the dplane pthread may be running */
4943 DPLANE_LOCK();
4944
4945 p->dp_id = ++zdplane_info.dg_provider_id;
4946
4947 if (name)
4948 strlcpy(p->dp_name, name, DPLANE_PROVIDER_NAMELEN);
4949 else
4950 snprintf(p->dp_name, DPLANE_PROVIDER_NAMELEN,
4951 "provider-%u", p->dp_id);
4952
4953 /* Insert into list ordered by priority */
4954 TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_prov_link) {
4955 if (last->dp_priority > p->dp_priority)
4956 break;
4957 }
4958
4959 if (last)
4960 TAILQ_INSERT_BEFORE(last, p, dp_prov_link);
4961 else
4962 TAILQ_INSERT_TAIL(&zdplane_info.dg_providers_q, p,
4963 dp_prov_link);
4964
4965 /* And unlock */
4966 DPLANE_UNLOCK();
4967
4968 if (IS_ZEBRA_DEBUG_DPLANE)
4969 zlog_debug("dplane: registered new provider '%s' (%u), prio %d",
4970 p->dp_name, p->dp_id, p->dp_priority);
4971
4972 done:
4973 if (prov_p)
4974 *prov_p = p;
4975
4976 return ret;
4977 }
4978
4979 /* Accessors for provider attributes */
4980 const char *dplane_provider_get_name(const struct zebra_dplane_provider *prov)
4981 {
4982 return prov->dp_name;
4983 }
4984
4985 uint32_t dplane_provider_get_id(const struct zebra_dplane_provider *prov)
4986 {
4987 return prov->dp_id;
4988 }
4989
4990 void *dplane_provider_get_data(const struct zebra_dplane_provider *prov)
4991 {
4992 return prov->dp_data;
4993 }
4994
4995 int dplane_provider_get_work_limit(const struct zebra_dplane_provider *prov)
4996 {
4997 return zdplane_info.dg_updates_per_cycle;
4998 }
4999
5000 /* Lock/unlock a provider's mutex - iff the provider was registered with
5001 * the THREADED flag.
5002 */
5003 void dplane_provider_lock(struct zebra_dplane_provider *prov)
5004 {
5005 if (dplane_provider_is_threaded(prov))
5006 DPLANE_PROV_LOCK(prov);
5007 }
5008
5009 void dplane_provider_unlock(struct zebra_dplane_provider *prov)
5010 {
5011 if (dplane_provider_is_threaded(prov))
5012 DPLANE_PROV_UNLOCK(prov);
5013 }
5014
5015 /*
5016 * Dequeue and maintain associated counter
5017 */
5018 struct zebra_dplane_ctx *dplane_provider_dequeue_in_ctx(
5019 struct zebra_dplane_provider *prov)
5020 {
5021 struct zebra_dplane_ctx *ctx = NULL;
5022
5023 dplane_provider_lock(prov);
5024
5025 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
5026 if (ctx) {
5027 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
5028
5029 atomic_fetch_sub_explicit(&prov->dp_in_queued, 1,
5030 memory_order_relaxed);
5031 }
5032
5033 dplane_provider_unlock(prov);
5034
5035 return ctx;
5036 }
5037
5038 /*
5039 * Dequeue work to a list, return count
5040 */
5041 int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov,
5042 struct dplane_ctx_q *listp)
5043 {
5044 int limit, ret;
5045 struct zebra_dplane_ctx *ctx;
5046
5047 limit = zdplane_info.dg_updates_per_cycle;
5048
5049 dplane_provider_lock(prov);
5050
5051 for (ret = 0; ret < limit; ret++) {
5052 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
5053 if (ctx) {
5054 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
5055
5056 TAILQ_INSERT_TAIL(listp, ctx, zd_q_entries);
5057 } else {
5058 break;
5059 }
5060 }
5061
5062 if (ret > 0)
5063 atomic_fetch_sub_explicit(&prov->dp_in_queued, ret,
5064 memory_order_relaxed);
5065
5066 dplane_provider_unlock(prov);
5067
5068 return ret;
5069 }
5070
5071 uint32_t dplane_provider_out_ctx_queue_len(struct zebra_dplane_provider *prov)
5072 {
5073 return atomic_load_explicit(&(prov->dp_out_counter),
5074 memory_order_relaxed);
5075 }
5076
5077 /*
5078 * Enqueue and maintain associated counter
5079 */
5080 void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
5081 struct zebra_dplane_ctx *ctx)
5082 {
5083 uint64_t curr, high;
5084
5085 dplane_provider_lock(prov);
5086
5087 TAILQ_INSERT_TAIL(&(prov->dp_ctx_out_q), ctx,
5088 zd_q_entries);
5089
5090 /* Maintain out-queue counters */
5091 atomic_fetch_add_explicit(&(prov->dp_out_queued), 1,
5092 memory_order_relaxed);
5093 curr = atomic_load_explicit(&prov->dp_out_queued,
5094 memory_order_relaxed);
5095 high = atomic_load_explicit(&prov->dp_out_max,
5096 memory_order_relaxed);
5097 if (curr > high)
5098 atomic_store_explicit(&prov->dp_out_max, curr,
5099 memory_order_relaxed);
5100
5101 dplane_provider_unlock(prov);
5102
5103 atomic_fetch_add_explicit(&(prov->dp_out_counter), 1,
5104 memory_order_relaxed);
5105 }
5106
5107 /*
5108 * Accessor for provider object
5109 */
5110 bool dplane_provider_is_threaded(const struct zebra_dplane_provider *prov)
5111 {
5112 return (prov->dp_flags & DPLANE_PROV_FLAG_THREADED);
5113 }
5114
5115 #ifdef HAVE_NETLINK
5116 /*
5117 * Callback when an OS (netlink) incoming event read is ready. This runs
5118 * in the dplane pthread.
5119 */
5120 static void dplane_incoming_read(struct thread *event)
5121 {
5122 struct dplane_zns_info *zi = THREAD_ARG(event);
5123
5124 kernel_dplane_read(&zi->info);
5125
5126 /* Re-start read task */
5127 thread_add_read(zdplane_info.dg_master, dplane_incoming_read, zi,
5128 zi->info.sock, &zi->t_read);
5129 }
5130
5131 /*
5132 * Callback in the dataplane pthread that requests info from the OS and
5133 * initiates netlink reads.
5134 */
5135 static void dplane_incoming_request(struct thread *event)
5136 {
5137 struct dplane_zns_info *zi = THREAD_ARG(event);
5138
5139 /* Start read task */
5140 thread_add_read(zdplane_info.dg_master, dplane_incoming_read, zi,
5141 zi->info.sock, &zi->t_read);
5142
5143 /* Send requests */
5144 netlink_request_netconf(zi->info.sock);
5145 }
5146
5147 /*
5148 * Initiate requests for existing info from the OS. This is called by the
5149 * main pthread, but we want all activity on the dplane netlink socket to
5150 * take place on the dplane pthread, so we schedule an event to accomplish
5151 * that.
5152 */
5153 static void dplane_kernel_info_request(struct dplane_zns_info *zi)
5154 {
5155 /* If we happen to encounter an enabled zns before the dplane
5156 * pthread is running, we'll initiate this later on.
5157 */
5158 if (zdplane_info.dg_master)
5159 thread_add_event(zdplane_info.dg_master,
5160 dplane_incoming_request, zi, 0,
5161 &zi->t_request);
5162 }
5163
5164 #endif /* HAVE_NETLINK */
5165
5166 /*
5167 * Notify dplane when namespaces are enabled and disabled. The dplane
5168 * needs to start and stop reading incoming events from the zns. In the
5169 * common case where vrfs are _not_ namespaces, there will only be one
5170 * of these.
5171 *
5172 * This is called in the main pthread.
5173 */
5174 void zebra_dplane_ns_enable(struct zebra_ns *zns, bool enabled)
5175 {
5176 struct dplane_zns_info *zi;
5177
5178 if (IS_ZEBRA_DEBUG_DPLANE)
5179 zlog_debug("%s: %s for nsid %u", __func__,
5180 (enabled ? "ENABLED" : "DISABLED"), zns->ns_id);
5181
5182 /* Search for an existing zns info entry */
5183 frr_each (zns_info_list, &zdplane_info.dg_zns_list, zi) {
5184 if (zi->info.ns_id == zns->ns_id)
5185 break;
5186 }
5187
5188 if (enabled) {
5189 /* Create a new entry if necessary; start reading. */
5190 if (zi == NULL) {
5191 zi = XCALLOC(MTYPE_DP_NS, sizeof(*zi));
5192
5193 zi->info.ns_id = zns->ns_id;
5194
5195 zns_info_list_add_tail(&zdplane_info.dg_zns_list, zi);
5196
5197 if (IS_ZEBRA_DEBUG_DPLANE)
5198 zlog_debug("%s: nsid %u, new zi %p", __func__,
5199 zns->ns_id, zi);
5200 }
5201
5202 /* Make sure we're up-to-date with the zns object */
5203 #if defined(HAVE_NETLINK)
5204 zi->info.is_cmd = false;
5205 zi->info.sock = zns->netlink_dplane_in.sock;
5206
5207 /* Initiate requests for existing info from the OS, and
5208 * begin reading from the netlink socket.
5209 */
5210 dplane_kernel_info_request(zi);
5211 #endif
5212 } else if (zi) {
5213 if (IS_ZEBRA_DEBUG_DPLANE)
5214 zlog_debug("%s: nsid %u, deleting zi %p", __func__,
5215 zns->ns_id, zi);
5216
5217 /* Stop reading, free memory */
5218 zns_info_list_del(&zdplane_info.dg_zns_list, zi);
5219
5220 /* Stop any outstanding tasks */
5221 if (zdplane_info.dg_master) {
5222 thread_cancel_async(zdplane_info.dg_master,
5223 &zi->t_request, NULL);
5224
5225 thread_cancel_async(zdplane_info.dg_master, &zi->t_read,
5226 NULL);
5227 }
5228
5229 XFREE(MTYPE_DP_NS, zi);
5230 }
5231 }
5232
5233 /*
5234 * Provider api to signal that work/events are available
5235 * for the dataplane pthread.
5236 */
5237 int dplane_provider_work_ready(void)
5238 {
5239 /* Note that during zebra startup, we may be offered work before
5240 * the dataplane pthread (and thread-master) are ready. We want to
5241 * enqueue the work, but the event-scheduling machinery may not be
5242 * available.
5243 */
5244 if (zdplane_info.dg_run) {
5245 thread_add_event(zdplane_info.dg_master,
5246 dplane_thread_loop, NULL, 0,
5247 &zdplane_info.dg_t_update);
5248 }
5249
5250 return AOK;
5251 }
5252
5253 /*
5254 * Enqueue a context directly to zebra main.
5255 */
5256 void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx)
5257 {
5258 struct dplane_ctx_q temp_list;
5259
5260 /* Zebra's api takes a list, so we need to use a temporary list */
5261 TAILQ_INIT(&temp_list);
5262
5263 TAILQ_INSERT_TAIL(&temp_list, ctx, zd_q_entries);
5264 (zdplane_info.dg_results_cb)(&temp_list);
5265 }
5266
5267 /*
5268 * Kernel dataplane provider
5269 */
5270
5271 static void kernel_dplane_log_detail(struct zebra_dplane_ctx *ctx)
5272 {
5273 char buf[PREFIX_STRLEN];
5274
5275 switch (dplane_ctx_get_op(ctx)) {
5276
5277 case DPLANE_OP_ROUTE_INSTALL:
5278 case DPLANE_OP_ROUTE_UPDATE:
5279 case DPLANE_OP_ROUTE_DELETE:
5280 zlog_debug("%u:%pFX Dplane route update ctx %p op %s",
5281 dplane_ctx_get_vrf(ctx), dplane_ctx_get_dest(ctx),
5282 ctx, dplane_op2str(dplane_ctx_get_op(ctx)));
5283 break;
5284
5285 case DPLANE_OP_NH_INSTALL:
5286 case DPLANE_OP_NH_UPDATE:
5287 case DPLANE_OP_NH_DELETE:
5288 zlog_debug("ID (%u) Dplane nexthop update ctx %p op %s",
5289 dplane_ctx_get_nhe_id(ctx), ctx,
5290 dplane_op2str(dplane_ctx_get_op(ctx)));
5291 break;
5292
5293 case DPLANE_OP_LSP_INSTALL:
5294 case DPLANE_OP_LSP_UPDATE:
5295 case DPLANE_OP_LSP_DELETE:
5296 break;
5297
5298 case DPLANE_OP_PW_INSTALL:
5299 case DPLANE_OP_PW_UNINSTALL:
5300 zlog_debug("Dplane pw %s: op %s af %d loc: %u rem: %u",
5301 dplane_ctx_get_ifname(ctx),
5302 dplane_op2str(ctx->zd_op), dplane_ctx_get_pw_af(ctx),
5303 dplane_ctx_get_pw_local_label(ctx),
5304 dplane_ctx_get_pw_remote_label(ctx));
5305 break;
5306
5307 case DPLANE_OP_ADDR_INSTALL:
5308 case DPLANE_OP_ADDR_UNINSTALL:
5309 zlog_debug("Dplane intf %s, idx %u, addr %pFX",
5310 dplane_op2str(dplane_ctx_get_op(ctx)),
5311 dplane_ctx_get_ifindex(ctx),
5312 dplane_ctx_get_intf_addr(ctx));
5313 break;
5314
5315 case DPLANE_OP_MAC_INSTALL:
5316 case DPLANE_OP_MAC_DELETE:
5317 prefix_mac2str(dplane_ctx_mac_get_addr(ctx), buf,
5318 sizeof(buf));
5319
5320 zlog_debug("Dplane %s, mac %s, ifindex %u",
5321 dplane_op2str(dplane_ctx_get_op(ctx)),
5322 buf, dplane_ctx_get_ifindex(ctx));
5323 break;
5324
5325 case DPLANE_OP_NEIGH_INSTALL:
5326 case DPLANE_OP_NEIGH_UPDATE:
5327 case DPLANE_OP_NEIGH_DELETE:
5328 case DPLANE_OP_VTEP_ADD:
5329 case DPLANE_OP_VTEP_DELETE:
5330 case DPLANE_OP_NEIGH_DISCOVER:
5331 case DPLANE_OP_NEIGH_IP_INSTALL:
5332 case DPLANE_OP_NEIGH_IP_DELETE:
5333 ipaddr2str(dplane_ctx_neigh_get_ipaddr(ctx), buf,
5334 sizeof(buf));
5335
5336 zlog_debug("Dplane %s, ip %s, ifindex %u",
5337 dplane_op2str(dplane_ctx_get_op(ctx)),
5338 buf, dplane_ctx_get_ifindex(ctx));
5339 break;
5340
5341 case DPLANE_OP_RULE_ADD:
5342 case DPLANE_OP_RULE_DELETE:
5343 case DPLANE_OP_RULE_UPDATE:
5344 zlog_debug("Dplane rule update op %s, if %s(%u), ctx %p",
5345 dplane_op2str(dplane_ctx_get_op(ctx)),
5346 dplane_ctx_get_ifname(ctx),
5347 dplane_ctx_get_ifindex(ctx), ctx);
5348 break;
5349
5350 case DPLANE_OP_SYS_ROUTE_ADD:
5351 case DPLANE_OP_SYS_ROUTE_DELETE:
5352 case DPLANE_OP_ROUTE_NOTIFY:
5353 case DPLANE_OP_LSP_NOTIFY:
5354 case DPLANE_OP_BR_PORT_UPDATE:
5355
5356 case DPLANE_OP_NONE:
5357 break;
5358
5359 case DPLANE_OP_IPTABLE_ADD:
5360 case DPLANE_OP_IPTABLE_DELETE: {
5361 struct zebra_pbr_iptable ipt;
5362
5363 dplane_ctx_get_pbr_iptable(ctx, &ipt);
5364 zlog_debug("Dplane iptable update op %s, unique(%u), ctx %p",
5365 dplane_op2str(dplane_ctx_get_op(ctx)), ipt.unique,
5366 ctx);
5367 } break;
5368 case DPLANE_OP_IPSET_ADD:
5369 case DPLANE_OP_IPSET_DELETE: {
5370 struct zebra_pbr_ipset ipset;
5371
5372 dplane_ctx_get_pbr_ipset(ctx, &ipset);
5373 zlog_debug("Dplane ipset update op %s, unique(%u), ctx %p",
5374 dplane_op2str(dplane_ctx_get_op(ctx)), ipset.unique,
5375 ctx);
5376 } break;
5377 case DPLANE_OP_IPSET_ENTRY_ADD:
5378 case DPLANE_OP_IPSET_ENTRY_DELETE: {
5379 struct zebra_pbr_ipset_entry ipent;
5380
5381 dplane_ctx_get_pbr_ipset_entry(ctx, &ipent);
5382 zlog_debug(
5383 "Dplane ipset entry update op %s, unique(%u), ctx %p",
5384 dplane_op2str(dplane_ctx_get_op(ctx)), ipent.unique,
5385 ctx);
5386 } break;
5387 case DPLANE_OP_NEIGH_TABLE_UPDATE:
5388 zlog_debug("Dplane neigh table op %s, ifp %s, family %s",
5389 dplane_op2str(dplane_ctx_get_op(ctx)),
5390 dplane_ctx_get_ifname(ctx),
5391 family2str(dplane_ctx_neightable_get_family(ctx)));
5392 break;
5393 case DPLANE_OP_GRE_SET:
5394 zlog_debug("Dplane gre set op %s, ifp %s, link %u",
5395 dplane_op2str(dplane_ctx_get_op(ctx)),
5396 dplane_ctx_get_ifname(ctx),
5397 ctx->u.gre.link_ifindex);
5398 break;
5399
5400 case DPLANE_OP_INTF_ADDR_ADD:
5401 case DPLANE_OP_INTF_ADDR_DEL:
5402 zlog_debug("Dplane incoming op %s, intf %s, addr %pFX",
5403 dplane_op2str(dplane_ctx_get_op(ctx)),
5404 dplane_ctx_get_ifname(ctx),
5405 dplane_ctx_get_intf_addr(ctx));
5406 break;
5407
5408 case DPLANE_OP_INTF_NETCONFIG:
5409 zlog_debug("%s: ifindex %d, mpls %d, mcast %d",
5410 dplane_op2str(dplane_ctx_get_op(ctx)),
5411 dplane_ctx_get_ifindex(ctx),
5412 dplane_ctx_get_netconf_mpls(ctx),
5413 dplane_ctx_get_netconf_mcast(ctx));
5414 break;
5415
5416 case DPLANE_OP_INTF_INSTALL:
5417 case DPLANE_OP_INTF_UPDATE:
5418 case DPLANE_OP_INTF_DELETE:
5419 zlog_debug("Dplane intf %s, idx %u, protodown %d",
5420 dplane_op2str(dplane_ctx_get_op(ctx)),
5421 dplane_ctx_get_ifindex(ctx),
5422 dplane_ctx_intf_is_protodown(ctx));
5423 break;
5424 }
5425 }
5426
5427 static void kernel_dplane_handle_result(struct zebra_dplane_ctx *ctx)
5428 {
5429 enum zebra_dplane_result res = dplane_ctx_get_status(ctx);
5430
5431 switch (dplane_ctx_get_op(ctx)) {
5432
5433 case DPLANE_OP_ROUTE_INSTALL:
5434 case DPLANE_OP_ROUTE_UPDATE:
5435 case DPLANE_OP_ROUTE_DELETE:
5436 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5437 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors,
5438 1, memory_order_relaxed);
5439
5440 if ((dplane_ctx_get_op(ctx) != DPLANE_OP_ROUTE_DELETE)
5441 && (res == ZEBRA_DPLANE_REQUEST_SUCCESS)) {
5442 struct nexthop *nexthop;
5443
5444 /* Update installed nexthops to signal which have been
5445 * installed.
5446 */
5447 for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx),
5448 nexthop)) {
5449 if (CHECK_FLAG(nexthop->flags,
5450 NEXTHOP_FLAG_RECURSIVE))
5451 continue;
5452
5453 if (CHECK_FLAG(nexthop->flags,
5454 NEXTHOP_FLAG_ACTIVE)) {
5455 SET_FLAG(nexthop->flags,
5456 NEXTHOP_FLAG_FIB);
5457 }
5458 }
5459 }
5460 break;
5461
5462 case DPLANE_OP_NH_INSTALL:
5463 case DPLANE_OP_NH_UPDATE:
5464 case DPLANE_OP_NH_DELETE:
5465 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5466 atomic_fetch_add_explicit(
5467 &zdplane_info.dg_nexthop_errors, 1,
5468 memory_order_relaxed);
5469 break;
5470
5471 case DPLANE_OP_LSP_INSTALL:
5472 case DPLANE_OP_LSP_UPDATE:
5473 case DPLANE_OP_LSP_DELETE:
5474 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5475 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors,
5476 1, memory_order_relaxed);
5477 break;
5478
5479 case DPLANE_OP_PW_INSTALL:
5480 case DPLANE_OP_PW_UNINSTALL:
5481 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5482 atomic_fetch_add_explicit(&zdplane_info.dg_pw_errors, 1,
5483 memory_order_relaxed);
5484 break;
5485
5486 case DPLANE_OP_ADDR_INSTALL:
5487 case DPLANE_OP_ADDR_UNINSTALL:
5488 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5489 atomic_fetch_add_explicit(
5490 &zdplane_info.dg_intf_addr_errors, 1,
5491 memory_order_relaxed);
5492 break;
5493
5494 case DPLANE_OP_MAC_INSTALL:
5495 case DPLANE_OP_MAC_DELETE:
5496 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5497 atomic_fetch_add_explicit(&zdplane_info.dg_mac_errors,
5498 1, memory_order_relaxed);
5499 break;
5500
5501 case DPLANE_OP_NEIGH_INSTALL:
5502 case DPLANE_OP_NEIGH_UPDATE:
5503 case DPLANE_OP_NEIGH_DELETE:
5504 case DPLANE_OP_VTEP_ADD:
5505 case DPLANE_OP_VTEP_DELETE:
5506 case DPLANE_OP_NEIGH_DISCOVER:
5507 case DPLANE_OP_NEIGH_IP_INSTALL:
5508 case DPLANE_OP_NEIGH_IP_DELETE:
5509 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5510 atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors,
5511 1, memory_order_relaxed);
5512 break;
5513
5514 case DPLANE_OP_RULE_ADD:
5515 case DPLANE_OP_RULE_DELETE:
5516 case DPLANE_OP_RULE_UPDATE:
5517 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5518 atomic_fetch_add_explicit(&zdplane_info.dg_rule_errors,
5519 1, memory_order_relaxed);
5520 break;
5521
5522 case DPLANE_OP_IPTABLE_ADD:
5523 case DPLANE_OP_IPTABLE_DELETE:
5524 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5525 atomic_fetch_add_explicit(
5526 &zdplane_info.dg_iptable_errors, 1,
5527 memory_order_relaxed);
5528 break;
5529
5530 case DPLANE_OP_IPSET_ADD:
5531 case DPLANE_OP_IPSET_DELETE:
5532 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5533 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_errors,
5534 1, memory_order_relaxed);
5535 break;
5536
5537 case DPLANE_OP_IPSET_ENTRY_ADD:
5538 case DPLANE_OP_IPSET_ENTRY_DELETE:
5539 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5540 atomic_fetch_add_explicit(
5541 &zdplane_info.dg_ipset_entry_errors, 1,
5542 memory_order_relaxed);
5543 break;
5544
5545 case DPLANE_OP_NEIGH_TABLE_UPDATE:
5546 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5547 atomic_fetch_add_explicit(
5548 &zdplane_info.dg_neightable_errors, 1,
5549 memory_order_relaxed);
5550 break;
5551
5552 case DPLANE_OP_GRE_SET:
5553 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5554 atomic_fetch_add_explicit(
5555 &zdplane_info.dg_gre_set_errors, 1,
5556 memory_order_relaxed);
5557 break;
5558
5559 case DPLANE_OP_INTF_INSTALL:
5560 case DPLANE_OP_INTF_UPDATE:
5561 case DPLANE_OP_INTF_DELETE:
5562 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5563 atomic_fetch_add_explicit(&zdplane_info.dg_intf_errors,
5564 1, memory_order_relaxed);
5565 break;
5566
5567 /* Ignore 'notifications' - no-op */
5568 case DPLANE_OP_SYS_ROUTE_ADD:
5569 case DPLANE_OP_SYS_ROUTE_DELETE:
5570 case DPLANE_OP_ROUTE_NOTIFY:
5571 case DPLANE_OP_LSP_NOTIFY:
5572 case DPLANE_OP_BR_PORT_UPDATE:
5573 break;
5574
5575 /* TODO -- error counters for incoming events? */
5576 case DPLANE_OP_INTF_ADDR_ADD:
5577 case DPLANE_OP_INTF_ADDR_DEL:
5578 case DPLANE_OP_INTF_NETCONFIG:
5579 break;
5580
5581 case DPLANE_OP_NONE:
5582 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5583 atomic_fetch_add_explicit(&zdplane_info.dg_other_errors,
5584 1, memory_order_relaxed);
5585 break;
5586 }
5587 }
5588
5589 static void kernel_dplane_process_iptable(struct zebra_dplane_provider *prov,
5590 struct zebra_dplane_ctx *ctx)
5591 {
5592 zebra_pbr_process_iptable(ctx);
5593 dplane_provider_enqueue_out_ctx(prov, ctx);
5594 }
5595
5596 static void kernel_dplane_process_ipset(struct zebra_dplane_provider *prov,
5597 struct zebra_dplane_ctx *ctx)
5598 {
5599 zebra_pbr_process_ipset(ctx);
5600 dplane_provider_enqueue_out_ctx(prov, ctx);
5601 }
5602
5603 static void
5604 kernel_dplane_process_ipset_entry(struct zebra_dplane_provider *prov,
5605 struct zebra_dplane_ctx *ctx)
5606 {
5607 zebra_pbr_process_ipset_entry(ctx);
5608 dplane_provider_enqueue_out_ctx(prov, ctx);
5609 }
5610
5611 /*
5612 * Kernel provider callback
5613 */
5614 static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
5615 {
5616 struct zebra_dplane_ctx *ctx, *tctx;
5617 struct dplane_ctx_q work_list;
5618 int counter, limit;
5619
5620 TAILQ_INIT(&work_list);
5621
5622 limit = dplane_provider_get_work_limit(prov);
5623
5624 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5625 zlog_debug("dplane provider '%s': processing",
5626 dplane_provider_get_name(prov));
5627
5628 for (counter = 0; counter < limit; counter++) {
5629 ctx = dplane_provider_dequeue_in_ctx(prov);
5630 if (ctx == NULL)
5631 break;
5632 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5633 kernel_dplane_log_detail(ctx);
5634
5635 if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPTABLE_ADD
5636 || dplane_ctx_get_op(ctx) == DPLANE_OP_IPTABLE_DELETE))
5637 kernel_dplane_process_iptable(prov, ctx);
5638 else if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_ADD
5639 || dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_DELETE))
5640 kernel_dplane_process_ipset(prov, ctx);
5641 else if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_ENTRY_ADD
5642 || dplane_ctx_get_op(ctx)
5643 == DPLANE_OP_IPSET_ENTRY_DELETE))
5644 kernel_dplane_process_ipset_entry(prov, ctx);
5645 else
5646 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
5647 }
5648
5649 kernel_update_multi(&work_list);
5650
5651 TAILQ_FOREACH_SAFE (ctx, &work_list, zd_q_entries, tctx) {
5652 kernel_dplane_handle_result(ctx);
5653
5654 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
5655 dplane_provider_enqueue_out_ctx(prov, ctx);
5656 }
5657
5658 /* Ensure that we'll run the work loop again if there's still
5659 * more work to do.
5660 */
5661 if (counter >= limit) {
5662 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5663 zlog_debug("dplane provider '%s' reached max updates %d",
5664 dplane_provider_get_name(prov), counter);
5665
5666 atomic_fetch_add_explicit(&zdplane_info.dg_update_yields,
5667 1, memory_order_relaxed);
5668
5669 dplane_provider_work_ready();
5670 }
5671
5672 return 0;
5673 }
5674
5675 #ifdef DPLANE_TEST_PROVIDER
5676
5677 /*
5678 * Test dataplane provider plugin
5679 */
5680
5681 /*
5682 * Test provider process callback
5683 */
5684 static int test_dplane_process_func(struct zebra_dplane_provider *prov)
5685 {
5686 struct zebra_dplane_ctx *ctx;
5687 int counter, limit;
5688
5689 /* Just moving from 'in' queue to 'out' queue */
5690
5691 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5692 zlog_debug("dplane provider '%s': processing",
5693 dplane_provider_get_name(prov));
5694
5695 limit = dplane_provider_get_work_limit(prov);
5696
5697 for (counter = 0; counter < limit; counter++) {
5698 ctx = dplane_provider_dequeue_in_ctx(prov);
5699 if (ctx == NULL)
5700 break;
5701
5702 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5703 zlog_debug("dplane provider '%s': op %s",
5704 dplane_provider_get_name(prov),
5705 dplane_op2str(dplane_ctx_get_op(ctx)));
5706
5707 dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
5708
5709 dplane_provider_enqueue_out_ctx(prov, ctx);
5710 }
5711
5712 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5713 zlog_debug("dplane provider '%s': processed %d",
5714 dplane_provider_get_name(prov), counter);
5715
5716 /* Ensure that we'll run the work loop again if there's still
5717 * more work to do.
5718 */
5719 if (counter >= limit)
5720 dplane_provider_work_ready();
5721
5722 return 0;
5723 }
5724
5725 /*
5726 * Test provider shutdown/fini callback
5727 */
5728 static int test_dplane_shutdown_func(struct zebra_dplane_provider *prov,
5729 bool early)
5730 {
5731 if (IS_ZEBRA_DEBUG_DPLANE)
5732 zlog_debug("dplane provider '%s': %sshutdown",
5733 dplane_provider_get_name(prov),
5734 early ? "early " : "");
5735
5736 return 0;
5737 }
5738 #endif /* DPLANE_TEST_PROVIDER */
5739
5740 /*
5741 * Register default kernel provider
5742 */
5743 static void dplane_provider_init(void)
5744 {
5745 int ret;
5746
5747 ret = dplane_provider_register("Kernel",
5748 DPLANE_PRIO_KERNEL,
5749 DPLANE_PROV_FLAGS_DEFAULT, NULL,
5750 kernel_dplane_process_func,
5751 NULL,
5752 NULL, NULL);
5753
5754 if (ret != AOK)
5755 zlog_err("Unable to register kernel dplane provider: %d",
5756 ret);
5757
5758 #ifdef DPLANE_TEST_PROVIDER
5759 /* Optional test provider ... */
5760 ret = dplane_provider_register("Test",
5761 DPLANE_PRIO_PRE_KERNEL,
5762 DPLANE_PROV_FLAGS_DEFAULT, NULL,
5763 test_dplane_process_func,
5764 test_dplane_shutdown_func,
5765 NULL /* data */, NULL);
5766
5767 if (ret != AOK)
5768 zlog_err("Unable to register test dplane provider: %d",
5769 ret);
5770 #endif /* DPLANE_TEST_PROVIDER */
5771 }
5772
5773 /*
5774 * Allow zebra code to walk the queue of pending contexts, evaluate each one
5775 * using a callback function. If the function returns 'true', the context
5776 * will be dequeued and freed without being processed.
5777 */
5778 int dplane_clean_ctx_queue(bool (*context_cb)(struct zebra_dplane_ctx *ctx,
5779 void *arg), void *val)
5780 {
5781 struct zebra_dplane_ctx *ctx, *temp;
5782 struct dplane_ctx_q work_list;
5783
5784 TAILQ_INIT(&work_list);
5785
5786 if (context_cb == NULL)
5787 goto done;
5788
5789 /* Walk the pending context queue under the dplane lock. */
5790 DPLANE_LOCK();
5791
5792 TAILQ_FOREACH_SAFE(ctx, &zdplane_info.dg_update_ctx_q, zd_q_entries,
5793 temp) {
5794 if (context_cb(ctx, val)) {
5795 TAILQ_REMOVE(&zdplane_info.dg_update_ctx_q, ctx,
5796 zd_q_entries);
5797 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
5798 }
5799 }
5800
5801 DPLANE_UNLOCK();
5802
5803 /* Now free any contexts selected by the caller, without holding
5804 * the lock.
5805 */
5806 TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, temp) {
5807 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
5808 dplane_ctx_fini(&ctx);
5809 }
5810
5811 done:
5812
5813 return 0;
5814 }
5815
5816 /* Indicates zebra shutdown/exit is in progress. Some operations may be
5817 * simplified or skipped during shutdown processing.
5818 */
5819 bool dplane_is_in_shutdown(void)
5820 {
5821 return zdplane_info.dg_is_shutdown;
5822 }
5823
5824 /*
5825 * Enable collection of extra info about interfaces in route updates.
5826 */
5827 void dplane_enable_intf_extra_info(void)
5828 {
5829 dplane_collect_extra_intf_info = true;
5830 }
5831
5832 /*
5833 * Early or pre-shutdown, de-init notification api. This runs pretty
5834 * early during zebra shutdown, as a signal to stop new work and prepare
5835 * for updates generated by shutdown/cleanup activity, as zebra tries to
5836 * remove everything it's responsible for.
5837 * NB: This runs in the main zebra pthread context.
5838 */
5839 void zebra_dplane_pre_finish(void)
5840 {
5841 struct zebra_dplane_provider *prov;
5842
5843 if (IS_ZEBRA_DEBUG_DPLANE)
5844 zlog_debug("Zebra dataplane pre-finish called");
5845
5846 zdplane_info.dg_is_shutdown = true;
5847
5848 /* Notify provider(s) of pending shutdown. */
5849 TAILQ_FOREACH(prov, &zdplane_info.dg_providers_q, dp_prov_link) {
5850 if (prov->dp_fini == NULL)
5851 continue;
5852
5853 prov->dp_fini(prov, true /* early */);
5854 }
5855 }
5856
5857 /*
5858 * Utility to determine whether work remains enqueued within the dplane;
5859 * used during system shutdown processing.
5860 */
5861 static bool dplane_work_pending(void)
5862 {
5863 bool ret = false;
5864 struct zebra_dplane_ctx *ctx;
5865 struct zebra_dplane_provider *prov;
5866
5867 /* TODO -- just checking incoming/pending work for now, must check
5868 * providers
5869 */
5870 DPLANE_LOCK();
5871 {
5872 ctx = TAILQ_FIRST(&zdplane_info.dg_update_ctx_q);
5873 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
5874 }
5875 DPLANE_UNLOCK();
5876
5877 if (ctx != NULL) {
5878 ret = true;
5879 goto done;
5880 }
5881
5882 while (prov) {
5883
5884 dplane_provider_lock(prov);
5885
5886 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
5887 if (ctx == NULL)
5888 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
5889
5890 dplane_provider_unlock(prov);
5891
5892 if (ctx != NULL)
5893 break;
5894
5895 DPLANE_LOCK();
5896 prov = TAILQ_NEXT(prov, dp_prov_link);
5897 DPLANE_UNLOCK();
5898 }
5899
5900 if (ctx != NULL)
5901 ret = true;
5902
5903 done:
5904 return ret;
5905 }
5906
5907 /*
5908 * Shutdown-time intermediate callback, used to determine when all pending
5909 * in-flight updates are done. If there's still work to do, reschedules itself.
5910 * If all work is done, schedules an event to the main zebra thread for
5911 * final zebra shutdown.
5912 * This runs in the dplane pthread context.
5913 */
5914 static void dplane_check_shutdown_status(struct thread *event)
5915 {
5916 struct dplane_zns_info *zi;
5917
5918 if (IS_ZEBRA_DEBUG_DPLANE)
5919 zlog_debug("Zebra dataplane shutdown status check called");
5920
5921 /* Remove any zns info entries as we stop the dplane pthread. */
5922 frr_each_safe (zns_info_list, &zdplane_info.dg_zns_list, zi) {
5923 zns_info_list_del(&zdplane_info.dg_zns_list, zi);
5924
5925 if (zdplane_info.dg_master) {
5926 thread_cancel(&zi->t_read);
5927 thread_cancel(&zi->t_request);
5928 }
5929
5930 XFREE(MTYPE_DP_NS, zi);
5931 }
5932
5933 if (dplane_work_pending()) {
5934 /* Reschedule dplane check on a short timer */
5935 thread_add_timer_msec(zdplane_info.dg_master,
5936 dplane_check_shutdown_status,
5937 NULL, 100,
5938 &zdplane_info.dg_t_shutdown_check);
5939
5940 /* TODO - give up and stop waiting after a short time? */
5941
5942 } else {
5943 /* We appear to be done - schedule a final callback event
5944 * for the zebra main pthread.
5945 */
5946 thread_add_event(zrouter.master, zebra_finalize, NULL, 0, NULL);
5947 }
5948 }
5949
5950 /*
5951 * Shutdown, de-init api. This runs pretty late during shutdown,
5952 * after zebra has tried to free/remove/uninstall all routes during shutdown.
5953 * At this point, dplane work may still remain to be done, so we can't just
5954 * blindly terminate. If there's still work to do, we'll periodically check
5955 * and when done, we'll enqueue a task to the zebra main thread for final
5956 * termination processing.
5957 *
5958 * NB: This runs in the main zebra thread context.
5959 */
5960 void zebra_dplane_finish(void)
5961 {
5962 if (IS_ZEBRA_DEBUG_DPLANE)
5963 zlog_debug("Zebra dataplane fini called");
5964
5965 thread_add_event(zdplane_info.dg_master,
5966 dplane_check_shutdown_status, NULL, 0,
5967 &zdplane_info.dg_t_shutdown_check);
5968 }
5969
5970 /*
5971 * Main dataplane pthread event loop. The thread takes new incoming work
5972 * and offers it to the first provider. It then iterates through the
5973 * providers, taking complete work from each one and offering it
5974 * to the next in order. At each step, a limited number of updates are
5975 * processed during a cycle in order to provide some fairness.
5976 *
5977 * This loop through the providers is only run once, so that the dataplane
5978 * pthread can look for other pending work - such as i/o work on behalf of
5979 * providers.
5980 */
5981 static void dplane_thread_loop(struct thread *event)
5982 {
5983 struct dplane_ctx_q work_list;
5984 struct dplane_ctx_q error_list;
5985 struct zebra_dplane_provider *prov;
5986 struct zebra_dplane_ctx *ctx, *tctx;
5987 int limit, counter, error_counter;
5988 uint64_t curr, high;
5989 bool reschedule = false;
5990
5991 /* Capture work limit per cycle */
5992 limit = zdplane_info.dg_updates_per_cycle;
5993
5994 /* Init temporary lists used to move contexts among providers */
5995 TAILQ_INIT(&work_list);
5996 TAILQ_INIT(&error_list);
5997 error_counter = 0;
5998
5999 /* Check for zebra shutdown */
6000 if (!zdplane_info.dg_run)
6001 return;
6002
6003 /* Dequeue some incoming work from zebra (if any) onto the temporary
6004 * working list.
6005 */
6006 DPLANE_LOCK();
6007
6008 /* Locate initial registered provider */
6009 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
6010
6011 /* Move new work from incoming list to temp list */
6012 for (counter = 0; counter < limit; counter++) {
6013 ctx = TAILQ_FIRST(&zdplane_info.dg_update_ctx_q);
6014 if (ctx) {
6015 TAILQ_REMOVE(&zdplane_info.dg_update_ctx_q, ctx,
6016 zd_q_entries);
6017
6018 ctx->zd_provider = prov->dp_id;
6019
6020 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
6021 } else {
6022 break;
6023 }
6024 }
6025
6026 DPLANE_UNLOCK();
6027
6028 atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, counter,
6029 memory_order_relaxed);
6030
6031 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6032 zlog_debug("dplane: incoming new work counter: %d", counter);
6033
6034 /* Iterate through the registered providers, offering new incoming
6035 * work. If the provider has outgoing work in its queue, take that
6036 * work for the next provider
6037 */
6038 while (prov) {
6039
6040 /* At each iteration, the temporary work list has 'counter'
6041 * items.
6042 */
6043 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6044 zlog_debug("dplane enqueues %d new work to provider '%s'",
6045 counter, dplane_provider_get_name(prov));
6046
6047 /* Capture current provider id in each context; check for
6048 * error status.
6049 */
6050 TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, tctx) {
6051 if (dplane_ctx_get_status(ctx) ==
6052 ZEBRA_DPLANE_REQUEST_SUCCESS) {
6053 ctx->zd_provider = prov->dp_id;
6054 } else {
6055 /*
6056 * TODO -- improve error-handling: recirc
6057 * errors backwards so that providers can
6058 * 'undo' their work (if they want to)
6059 */
6060
6061 /* Move to error list; will be returned
6062 * zebra main.
6063 */
6064 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
6065 TAILQ_INSERT_TAIL(&error_list,
6066 ctx, zd_q_entries);
6067 error_counter++;
6068 }
6069 }
6070
6071 /* Enqueue new work to the provider */
6072 dplane_provider_lock(prov);
6073
6074 if (TAILQ_FIRST(&work_list))
6075 TAILQ_CONCAT(&(prov->dp_ctx_in_q), &work_list,
6076 zd_q_entries);
6077
6078 atomic_fetch_add_explicit(&prov->dp_in_counter, counter,
6079 memory_order_relaxed);
6080 atomic_fetch_add_explicit(&prov->dp_in_queued, counter,
6081 memory_order_relaxed);
6082 curr = atomic_load_explicit(&prov->dp_in_queued,
6083 memory_order_relaxed);
6084 high = atomic_load_explicit(&prov->dp_in_max,
6085 memory_order_relaxed);
6086 if (curr > high)
6087 atomic_store_explicit(&prov->dp_in_max, curr,
6088 memory_order_relaxed);
6089
6090 dplane_provider_unlock(prov);
6091
6092 /* Reset the temp list (though the 'concat' may have done this
6093 * already), and the counter
6094 */
6095 TAILQ_INIT(&work_list);
6096 counter = 0;
6097
6098 /* Call into the provider code. Note that this is
6099 * unconditional: we offer to do work even if we don't enqueue
6100 * any _new_ work.
6101 */
6102 (*prov->dp_fp)(prov);
6103
6104 /* Check for zebra shutdown */
6105 if (!zdplane_info.dg_run)
6106 break;
6107
6108 /* Dequeue completed work from the provider */
6109 dplane_provider_lock(prov);
6110
6111 while (counter < limit) {
6112 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
6113 if (ctx) {
6114 TAILQ_REMOVE(&(prov->dp_ctx_out_q), ctx,
6115 zd_q_entries);
6116
6117 TAILQ_INSERT_TAIL(&work_list,
6118 ctx, zd_q_entries);
6119 counter++;
6120 } else
6121 break;
6122 }
6123
6124 dplane_provider_unlock(prov);
6125
6126 if (counter >= limit)
6127 reschedule = true;
6128
6129 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6130 zlog_debug("dplane dequeues %d completed work from provider %s",
6131 counter, dplane_provider_get_name(prov));
6132
6133 /* Locate next provider */
6134 DPLANE_LOCK();
6135 prov = TAILQ_NEXT(prov, dp_prov_link);
6136 DPLANE_UNLOCK();
6137 }
6138
6139 /*
6140 * We hit the work limit while processing at least one provider's
6141 * output queue - ensure we come back and finish it.
6142 */
6143 if (reschedule)
6144 dplane_provider_work_ready();
6145
6146 /* After all providers have been serviced, enqueue any completed
6147 * work and any errors back to zebra so it can process the results.
6148 */
6149 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6150 zlog_debug("dplane has %d completed, %d errors, for zebra main",
6151 counter, error_counter);
6152
6153 /*
6154 * Hand lists through the api to zebra main,
6155 * to reduce the number of lock/unlock cycles
6156 */
6157
6158 /* Call through to zebra main */
6159 (zdplane_info.dg_results_cb)(&error_list);
6160
6161 TAILQ_INIT(&error_list);
6162
6163 /* Call through to zebra main */
6164 (zdplane_info.dg_results_cb)(&work_list);
6165
6166 TAILQ_INIT(&work_list);
6167 }
6168
6169 /*
6170 * Final phase of shutdown, after all work enqueued to dplane has been
6171 * processed. This is called from the zebra main pthread context.
6172 */
6173 void zebra_dplane_shutdown(void)
6174 {
6175 struct zebra_dplane_provider *dp;
6176
6177 if (IS_ZEBRA_DEBUG_DPLANE)
6178 zlog_debug("Zebra dataplane shutdown called");
6179
6180 /* Stop dplane thread, if it's running */
6181
6182 zdplane_info.dg_run = false;
6183
6184 if (zdplane_info.dg_t_update)
6185 thread_cancel_async(zdplane_info.dg_t_update->master,
6186 &zdplane_info.dg_t_update, NULL);
6187
6188 frr_pthread_stop(zdplane_info.dg_pthread, NULL);
6189
6190 /* Destroy pthread */
6191 frr_pthread_destroy(zdplane_info.dg_pthread);
6192 zdplane_info.dg_pthread = NULL;
6193 zdplane_info.dg_master = NULL;
6194
6195 /* Notify provider(s) of final shutdown.
6196 * Note that this call is in the main pthread, so providers must
6197 * be prepared for that.
6198 */
6199 TAILQ_FOREACH(dp, &zdplane_info.dg_providers_q, dp_prov_link) {
6200 if (dp->dp_fini == NULL)
6201 continue;
6202
6203 dp->dp_fini(dp, false);
6204 }
6205
6206 /* TODO -- Clean-up provider objects */
6207
6208 /* TODO -- Clean queue(s), free memory */
6209 }
6210
6211 /*
6212 * Initialize the dataplane module during startup, internal/private version
6213 */
6214 static void zebra_dplane_init_internal(void)
6215 {
6216 memset(&zdplane_info, 0, sizeof(zdplane_info));
6217
6218 pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
6219
6220 TAILQ_INIT(&zdplane_info.dg_update_ctx_q);
6221 TAILQ_INIT(&zdplane_info.dg_providers_q);
6222 zns_info_list_init(&zdplane_info.dg_zns_list);
6223
6224 zdplane_info.dg_updates_per_cycle = DPLANE_DEFAULT_NEW_WORK;
6225
6226 zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;
6227
6228 /* Register default kernel 'provider' during init */
6229 dplane_provider_init();
6230 }
6231
6232 /*
6233 * Start the dataplane pthread. This step needs to be run later than the
6234 * 'init' step, in case zebra has fork-ed.
6235 */
6236 void zebra_dplane_start(void)
6237 {
6238 struct dplane_zns_info *zi;
6239 struct zebra_dplane_provider *prov;
6240 struct frr_pthread_attr pattr = {
6241 .start = frr_pthread_attr_default.start,
6242 .stop = frr_pthread_attr_default.stop
6243 };
6244
6245 /* Start dataplane pthread */
6246
6247 zdplane_info.dg_pthread = frr_pthread_new(&pattr, "Zebra dplane thread",
6248 "zebra_dplane");
6249
6250 zdplane_info.dg_master = zdplane_info.dg_pthread->master;
6251
6252 zdplane_info.dg_run = true;
6253
6254 /* Enqueue an initial event for the dataplane pthread */
6255 thread_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
6256 &zdplane_info.dg_t_update);
6257
6258 /* Enqueue requests and reads if necessary */
6259 frr_each (zns_info_list, &zdplane_info.dg_zns_list, zi) {
6260 #if defined(HAVE_NETLINK)
6261 thread_add_read(zdplane_info.dg_master, dplane_incoming_read,
6262 zi, zi->info.sock, &zi->t_read);
6263 dplane_kernel_info_request(zi);
6264 #endif
6265 }
6266
6267 /* Call start callbacks for registered providers */
6268
6269 DPLANE_LOCK();
6270 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
6271 DPLANE_UNLOCK();
6272
6273 while (prov) {
6274
6275 if (prov->dp_start)
6276 (prov->dp_start)(prov);
6277
6278 /* Locate next provider */
6279 DPLANE_LOCK();
6280 prov = TAILQ_NEXT(prov, dp_prov_link);
6281 DPLANE_UNLOCK();
6282 }
6283
6284 frr_pthread_run(zdplane_info.dg_pthread, NULL);
6285 }
6286
6287 /*
6288 * Initialize the dataplane module at startup; called by zebra rib_init()
6289 */
6290 void zebra_dplane_init(int (*results_fp)(struct dplane_ctx_q *))
6291 {
6292 zebra_dplane_init_internal();
6293 zdplane_info.dg_results_cb = results_fp;
6294 }