2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "lib/libfrr.h"
25 #include "lib/debug.h"
26 #include "lib/frratomic.h"
27 #include "lib/frr_pthread.h"
28 #include "lib/memory.h"
29 #include "lib/queue.h"
30 #include "lib/zebra.h"
31 #include "zebra/zebra_router.h"
32 #include "zebra/zebra_dplane.h"
33 #include "zebra/zebra_vxlan_private.h"
34 #include "zebra/zebra_mpls.h"
36 #include "zebra/debug.h"
37 #include "zebra/zebra_pbr.h"
41 DEFINE_MTYPE_STATIC(ZEBRA
, DP_CTX
, "Zebra DPlane Ctx");
42 DEFINE_MTYPE_STATIC(ZEBRA
, DP_INTF
, "Zebra DPlane Intf");
43 DEFINE_MTYPE_STATIC(ZEBRA
, DP_PROV
, "Zebra DPlane Provider");
44 DEFINE_MTYPE_STATIC(ZEBRA
, DP_NETFILTER
, "Zebra Netfilter Internal Object");
45 DEFINE_MTYPE_STATIC(ZEBRA
, DP_NS
, "DPlane NSes");
51 /* Control for collection of extra interface info with route updates; a plugin
52 * can enable the extra info via a dplane api.
54 static bool dplane_collect_extra_intf_info
;
56 /* Enable test dataplane provider */
57 /*#define DPLANE_TEST_PROVIDER 1 */
59 /* Default value for max queued incoming updates */
60 const uint32_t DPLANE_DEFAULT_MAX_QUEUED
= 200;
62 /* Default value for new work per cycle */
63 const uint32_t DPLANE_DEFAULT_NEW_WORK
= 100;
65 /* Validation check macro for context blocks */
66 /* #define DPLANE_DEBUG 1 */
70 # define DPLANE_CTX_VALID(p) \
75 # define DPLANE_CTX_VALID(p)
77 #endif /* DPLANE_DEBUG */
80 * Nexthop information captured for nexthop/nexthop group updates
82 struct dplane_nexthop_info
{
89 struct nexthop_group ng
;
90 struct nh_grp nh_grp
[MULTIPATH_NUM
];
95 * Optional extra info about interfaces used in route updates' nexthops.
97 struct dplane_intf_extra
{
103 TAILQ_ENTRY(dplane_intf_extra
) link
;
107 * Route information captured for route updates.
109 struct dplane_route_info
{
111 /* Dest and (optional) source prefixes */
112 struct prefix zd_dest
;
113 struct prefix zd_src
;
122 route_tag_t zd_old_tag
;
124 uint32_t zd_old_metric
;
126 uint16_t zd_instance
;
127 uint16_t zd_old_instance
;
130 uint8_t zd_old_distance
;
133 uint32_t zd_nexthop_mtu
;
135 /* Nexthop hash entry info */
136 struct dplane_nexthop_info nhe
;
140 struct nexthop_group zd_ng
;
142 /* Backup nexthops (if present) */
143 struct nexthop_group backup_ng
;
145 /* "Previous" nexthops, used only in route updates without netlink */
146 struct nexthop_group zd_old_ng
;
147 struct nexthop_group old_backup_ng
;
149 /* Optional list of extra interface info */
150 TAILQ_HEAD(dp_intf_extra_q
, dplane_intf_extra
) intf_extra_q
;
154 * Pseudowire info for the dataplane
156 struct dplane_pw_info
{
163 mpls_label_t local_label
;
164 mpls_label_t remote_label
;
166 /* Nexthops that are valid and installed */
167 struct nexthop_group fib_nhg
;
169 /* Primary and backup nexthop sets, copied from the resolving route. */
170 struct nexthop_group primary_nhg
;
171 struct nexthop_group backup_nhg
;
173 union pw_protocol_fields fields
;
177 * Bridge port info for the dataplane
179 struct dplane_br_port_info
{
180 uint32_t sph_filter_cnt
;
181 struct in_addr sph_filters
[ES_VTEP_MAX_CNT
];
182 /* DPLANE_BR_PORT_XXX - see zebra_dplane.h*/
184 uint32_t backup_nhg_id
;
188 * Interface/prefix info for the dataplane
190 struct dplane_intf_info
{
195 #define DPLANE_INTF_CONNECTED (1 << 0) /* Connected peer, p2p */
196 #define DPLANE_INTF_SECONDARY (1 << 1)
197 #define DPLANE_INTF_BROADCAST (1 << 2)
198 #define DPLANE_INTF_HAS_DEST DPLANE_INTF_CONNECTED
199 #define DPLANE_INTF_HAS_LABEL (1 << 4)
201 /* Interface address/prefix */
202 struct prefix prefix
;
204 /* Dest address, for p2p, or broadcast prefix */
205 struct prefix dest_prefix
;
212 * EVPN MAC address info for the dataplane.
214 struct dplane_mac_info
{
216 ifindex_t br_ifindex
;
218 struct in_addr vtep_ip
;
221 uint32_t update_flags
;
225 * Neighbor info for the dataplane
227 struct dplane_neigh_info
{
228 struct ipaddr ip_addr
;
231 struct ipaddr ip_addr
;
235 uint32_t update_flags
;
241 struct dplane_neigh_table
{
244 uint32_t ucast_probes
;
245 uint32_t mcast_probes
;
249 * Policy based routing rule info for the dataplane
251 struct dplane_ctx_rule
{
254 /* The route table pointed by this rule */
257 /* Filter criteria */
261 struct prefix src_ip
;
262 struct prefix dst_ip
;
266 uint16_t action_vlan_id
;
267 uint16_t action_vlan_flags
;
269 uint32_t action_queue_id
;
271 char ifname
[INTERFACE_NAMSIZ
+ 1];
274 struct dplane_rule_info
{
276 * Originating zclient sock fd, so we can know who to send
284 struct dplane_ctx_rule
new;
285 struct dplane_ctx_rule old
;
288 struct dplane_gre_ctx
{
289 uint32_t link_ifindex
;
291 struct zebra_l2info_gre info
;
296 * Network interface configuration info - aligned with netlink's NETCONF
297 * info. The flags values are public, in the dplane.h file...
299 struct dplane_netconf_info
{
302 enum dplane_netconf_status_e mpls_val
;
303 enum dplane_netconf_status_e mcast_val
;
307 * The context block used to exchange info about route updates across
308 * the boundary between the zebra main context (and pthread) and the
309 * dataplane layer (and pthread).
311 struct zebra_dplane_ctx
{
314 enum dplane_op_e zd_op
;
316 /* Status on return */
317 enum zebra_dplane_result zd_status
;
319 /* Dplane provider id */
320 uint32_t zd_provider
;
322 /* Flags - used by providers, e.g. */
330 /* Some updates may be generated by notifications: allow the
331 * plugin to notice and ignore results from its own notifications.
333 uint32_t zd_notif_provider
;
335 /* TODO -- internal/sub-operation status? */
336 enum zebra_dplane_result zd_remote_status
;
337 enum zebra_dplane_result zd_kernel_status
;
340 uint32_t zd_table_id
;
342 char zd_ifname
[INTERFACE_NAMSIZ
];
343 ifindex_t zd_ifindex
;
345 /* Support info for different kinds of updates */
347 struct dplane_route_info rinfo
;
348 struct zebra_lsp lsp
;
349 struct dplane_pw_info pw
;
350 struct dplane_br_port_info br_port
;
351 struct dplane_intf_info intf
;
352 struct dplane_mac_info macinfo
;
353 struct dplane_neigh_info neigh
;
354 struct dplane_rule_info rule
;
355 struct zebra_pbr_iptable iptable
;
356 struct zebra_pbr_ipset ipset
;
358 struct zebra_pbr_ipset_entry entry
;
359 struct zebra_pbr_ipset_info info
;
361 struct dplane_neigh_table neightable
;
362 struct dplane_gre_ctx gre
;
363 struct dplane_netconf_info netconf
;
366 /* Namespace info, used especially for netlink kernel communication */
367 struct zebra_dplane_info zd_ns_info
;
369 /* Embedded list linkage */
370 TAILQ_ENTRY(zebra_dplane_ctx
) zd_q_entries
;
373 /* Flag that can be set by a pre-kernel provider as a signal that an update
374 * should bypass the kernel.
376 #define DPLANE_CTX_FLAG_NO_KERNEL 0x01
380 * Registration block for one dataplane provider.
382 struct zebra_dplane_provider
{
384 char dp_name
[DPLANE_PROVIDER_NAMELEN
+ 1];
386 /* Priority, for ordering among providers */
393 pthread_mutex_t dp_mutex
;
395 /* Plugin-provided extra data */
401 int (*dp_start
)(struct zebra_dplane_provider
*prov
);
403 int (*dp_fp
)(struct zebra_dplane_provider
*prov
);
405 int (*dp_fini
)(struct zebra_dplane_provider
*prov
, bool early_p
);
407 _Atomic
uint32_t dp_in_counter
;
408 _Atomic
uint32_t dp_in_queued
;
409 _Atomic
uint32_t dp_in_max
;
410 _Atomic
uint32_t dp_out_counter
;
411 _Atomic
uint32_t dp_out_queued
;
412 _Atomic
uint32_t dp_out_max
;
413 _Atomic
uint32_t dp_error_counter
;
415 /* Queue of contexts inbound to the provider */
416 struct dplane_ctx_q dp_ctx_in_q
;
418 /* Queue of completed contexts outbound from the provider back
419 * towards the dataplane module.
421 struct dplane_ctx_q dp_ctx_out_q
;
423 /* Embedded list linkage for provider objects */
424 TAILQ_ENTRY(zebra_dplane_provider
) dp_prov_link
;
427 /* Declare types for list of zns info objects */
428 PREDECL_DLIST(zns_info_list
);
430 struct dplane_zns_info
{
431 struct zebra_dplane_info info
;
434 struct thread
*t_read
;
437 struct zns_info_list_item link
;
443 static struct zebra_dplane_globals
{
444 /* Mutex to control access to dataplane components */
445 pthread_mutex_t dg_mutex
;
447 /* Results callback registered by zebra 'core' */
448 int (*dg_results_cb
)(struct dplane_ctx_q
*ctxlist
);
450 /* Sentinel for beginning of shutdown */
451 volatile bool dg_is_shutdown
;
453 /* Sentinel for end of shutdown */
454 volatile bool dg_run
;
456 /* Update context queue inbound to the dataplane */
457 TAILQ_HEAD(zdg_ctx_q
, zebra_dplane_ctx
) dg_update_ctx_q
;
459 /* Ordered list of providers */
460 TAILQ_HEAD(zdg_prov_q
, zebra_dplane_provider
) dg_providers_q
;
462 /* List of info about each zns */
463 struct zns_info_list_head dg_zns_list
;
465 /* Counter used to assign internal ids to providers */
466 uint32_t dg_provider_id
;
468 /* Limit number of pending, unprocessed updates */
469 _Atomic
uint32_t dg_max_queued_updates
;
471 /* Control whether system route notifications should be produced. */
472 bool dg_sys_route_notifs
;
474 /* Limit number of new updates dequeued at once, to pace an
477 uint32_t dg_updates_per_cycle
;
479 _Atomic
uint32_t dg_routes_in
;
480 _Atomic
uint32_t dg_routes_queued
;
481 _Atomic
uint32_t dg_routes_queued_max
;
482 _Atomic
uint32_t dg_route_errors
;
483 _Atomic
uint32_t dg_other_errors
;
485 _Atomic
uint32_t dg_nexthops_in
;
486 _Atomic
uint32_t dg_nexthop_errors
;
488 _Atomic
uint32_t dg_lsps_in
;
489 _Atomic
uint32_t dg_lsp_errors
;
491 _Atomic
uint32_t dg_pws_in
;
492 _Atomic
uint32_t dg_pw_errors
;
494 _Atomic
uint32_t dg_br_port_in
;
495 _Atomic
uint32_t dg_br_port_errors
;
497 _Atomic
uint32_t dg_intf_addrs_in
;
498 _Atomic
uint32_t dg_intf_addr_errors
;
500 _Atomic
uint32_t dg_macs_in
;
501 _Atomic
uint32_t dg_mac_errors
;
503 _Atomic
uint32_t dg_neighs_in
;
504 _Atomic
uint32_t dg_neigh_errors
;
506 _Atomic
uint32_t dg_rules_in
;
507 _Atomic
uint32_t dg_rule_errors
;
509 _Atomic
uint32_t dg_update_yields
;
511 _Atomic
uint32_t dg_iptable_in
;
512 _Atomic
uint32_t dg_iptable_errors
;
514 _Atomic
uint32_t dg_ipset_in
;
515 _Atomic
uint32_t dg_ipset_errors
;
516 _Atomic
uint32_t dg_ipset_entry_in
;
517 _Atomic
uint32_t dg_ipset_entry_errors
;
519 _Atomic
uint32_t dg_neightable_in
;
520 _Atomic
uint32_t dg_neightable_errors
;
522 _Atomic
uint32_t dg_gre_set_in
;
523 _Atomic
uint32_t dg_gre_set_errors
;
525 /* Dataplane pthread */
526 struct frr_pthread
*dg_pthread
;
528 /* Event-delivery context 'master' for the dplane */
529 struct thread_master
*dg_master
;
531 /* Event/'thread' pointer for queued updates */
532 struct thread
*dg_t_update
;
534 /* Event pointer for pending shutdown check loop */
535 struct thread
*dg_t_shutdown_check
;
539 /* Instantiate zns list type */
540 DECLARE_DLIST(zns_info_list
, struct dplane_zns_info
, link
);
543 * Lock and unlock for interactions with the zebra 'core' pthread
545 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
546 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
550 * Lock and unlock for individual providers
552 #define DPLANE_PROV_LOCK(p) pthread_mutex_lock(&((p)->dp_mutex))
553 #define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
556 static void dplane_thread_loop(struct thread
*event
);
557 static void dplane_info_from_zns(struct zebra_dplane_info
*ns_info
,
558 struct zebra_ns
*zns
);
559 static enum zebra_dplane_result
lsp_update_internal(struct zebra_lsp
*lsp
,
560 enum dplane_op_e op
);
561 static enum zebra_dplane_result
pw_update_internal(struct zebra_pw
*pw
,
562 enum dplane_op_e op
);
563 static enum zebra_dplane_result
intf_addr_update_internal(
564 const struct interface
*ifp
, const struct connected
*ifc
,
565 enum dplane_op_e op
);
566 static enum zebra_dplane_result
mac_update_common(
567 enum dplane_op_e op
, const struct interface
*ifp
,
568 const struct interface
*br_ifp
,
569 vlanid_t vid
, const struct ethaddr
*mac
,
570 struct in_addr vtep_ip
, bool sticky
, uint32_t nhg_id
,
571 uint32_t update_flags
);
572 static enum zebra_dplane_result
573 neigh_update_internal(enum dplane_op_e op
, const struct interface
*ifp
,
574 const void *link
, int link_family
,
575 const struct ipaddr
*ip
, uint32_t flags
, uint16_t state
,
576 uint32_t update_flags
, int protocol
);
582 /* Obtain thread_master for dataplane thread */
583 struct thread_master
*dplane_get_thread_master(void)
585 return zdplane_info
.dg_master
;
589 * Allocate a dataplane update context
591 struct zebra_dplane_ctx
*dplane_ctx_alloc(void)
593 struct zebra_dplane_ctx
*p
;
595 /* TODO -- just alloc'ing memory, but would like to maintain
598 p
= XCALLOC(MTYPE_DP_CTX
, sizeof(struct zebra_dplane_ctx
));
603 /* Enable system route notifications */
604 void dplane_enable_sys_route_notifs(void)
606 zdplane_info
.dg_sys_route_notifs
= true;
610 * Clean up dependent/internal allocations inside a context object
612 static void dplane_ctx_free_internal(struct zebra_dplane_ctx
*ctx
)
614 struct dplane_intf_extra
*if_extra
, *if_tmp
;
617 * Some internal allocations may need to be freed, depending on
618 * the type of info captured in the ctx.
620 switch (ctx
->zd_op
) {
621 case DPLANE_OP_ROUTE_INSTALL
:
622 case DPLANE_OP_ROUTE_UPDATE
:
623 case DPLANE_OP_ROUTE_DELETE
:
624 case DPLANE_OP_SYS_ROUTE_ADD
:
625 case DPLANE_OP_SYS_ROUTE_DELETE
:
626 case DPLANE_OP_ROUTE_NOTIFY
:
628 /* Free allocated nexthops */
629 if (ctx
->u
.rinfo
.zd_ng
.nexthop
) {
630 /* This deals with recursive nexthops too */
631 nexthops_free(ctx
->u
.rinfo
.zd_ng
.nexthop
);
633 ctx
->u
.rinfo
.zd_ng
.nexthop
= NULL
;
636 /* Free backup info also (if present) */
637 if (ctx
->u
.rinfo
.backup_ng
.nexthop
) {
638 /* This deals with recursive nexthops too */
639 nexthops_free(ctx
->u
.rinfo
.backup_ng
.nexthop
);
641 ctx
->u
.rinfo
.backup_ng
.nexthop
= NULL
;
644 if (ctx
->u
.rinfo
.zd_old_ng
.nexthop
) {
645 /* This deals with recursive nexthops too */
646 nexthops_free(ctx
->u
.rinfo
.zd_old_ng
.nexthop
);
648 ctx
->u
.rinfo
.zd_old_ng
.nexthop
= NULL
;
651 if (ctx
->u
.rinfo
.old_backup_ng
.nexthop
) {
652 /* This deals with recursive nexthops too */
653 nexthops_free(ctx
->u
.rinfo
.old_backup_ng
.nexthop
);
655 ctx
->u
.rinfo
.old_backup_ng
.nexthop
= NULL
;
658 /* Optional extra interface info */
659 TAILQ_FOREACH_SAFE(if_extra
, &ctx
->u
.rinfo
.intf_extra_q
,
661 TAILQ_REMOVE(&ctx
->u
.rinfo
.intf_extra_q
, if_extra
,
663 XFREE(MTYPE_DP_INTF
, if_extra
);
668 case DPLANE_OP_NH_INSTALL
:
669 case DPLANE_OP_NH_UPDATE
:
670 case DPLANE_OP_NH_DELETE
: {
671 if (ctx
->u
.rinfo
.nhe
.ng
.nexthop
) {
672 /* This deals with recursive nexthops too */
673 nexthops_free(ctx
->u
.rinfo
.nhe
.ng
.nexthop
);
675 ctx
->u
.rinfo
.nhe
.ng
.nexthop
= NULL
;
680 case DPLANE_OP_LSP_INSTALL
:
681 case DPLANE_OP_LSP_UPDATE
:
682 case DPLANE_OP_LSP_DELETE
:
683 case DPLANE_OP_LSP_NOTIFY
:
685 struct zebra_nhlfe
*nhlfe
;
687 /* Unlink and free allocated NHLFEs */
688 frr_each_safe(nhlfe_list
, &ctx
->u
.lsp
.nhlfe_list
, nhlfe
) {
689 nhlfe_list_del(&ctx
->u
.lsp
.nhlfe_list
, nhlfe
);
690 zebra_mpls_nhlfe_free(nhlfe
);
693 /* Unlink and free allocated backup NHLFEs, if present */
694 frr_each_safe(nhlfe_list
,
695 &(ctx
->u
.lsp
.backup_nhlfe_list
), nhlfe
) {
696 nhlfe_list_del(&ctx
->u
.lsp
.backup_nhlfe_list
,
698 zebra_mpls_nhlfe_free(nhlfe
);
701 /* Clear pointers in lsp struct, in case we're caching
702 * free context structs.
704 nhlfe_list_init(&ctx
->u
.lsp
.nhlfe_list
);
705 ctx
->u
.lsp
.best_nhlfe
= NULL
;
706 nhlfe_list_init(&ctx
->u
.lsp
.backup_nhlfe_list
);
711 case DPLANE_OP_PW_INSTALL
:
712 case DPLANE_OP_PW_UNINSTALL
:
713 /* Free allocated nexthops */
714 if (ctx
->u
.pw
.fib_nhg
.nexthop
) {
715 /* This deals with recursive nexthops too */
716 nexthops_free(ctx
->u
.pw
.fib_nhg
.nexthop
);
718 ctx
->u
.pw
.fib_nhg
.nexthop
= NULL
;
720 if (ctx
->u
.pw
.primary_nhg
.nexthop
) {
721 nexthops_free(ctx
->u
.pw
.primary_nhg
.nexthop
);
723 ctx
->u
.pw
.primary_nhg
.nexthop
= NULL
;
725 if (ctx
->u
.pw
.backup_nhg
.nexthop
) {
726 nexthops_free(ctx
->u
.pw
.backup_nhg
.nexthop
);
728 ctx
->u
.pw
.backup_nhg
.nexthop
= NULL
;
732 case DPLANE_OP_ADDR_INSTALL
:
733 case DPLANE_OP_ADDR_UNINSTALL
:
734 case DPLANE_OP_INTF_ADDR_ADD
:
735 case DPLANE_OP_INTF_ADDR_DEL
:
736 /* Maybe free label string, if allocated */
737 if (ctx
->u
.intf
.label
!= NULL
&&
738 ctx
->u
.intf
.label
!= ctx
->u
.intf
.label_buf
) {
739 XFREE(MTYPE_DP_CTX
, ctx
->u
.intf
.label
);
740 ctx
->u
.intf
.label
= NULL
;
744 case DPLANE_OP_MAC_INSTALL
:
745 case DPLANE_OP_MAC_DELETE
:
746 case DPLANE_OP_NEIGH_INSTALL
:
747 case DPLANE_OP_NEIGH_UPDATE
:
748 case DPLANE_OP_NEIGH_DELETE
:
749 case DPLANE_OP_VTEP_ADD
:
750 case DPLANE_OP_VTEP_DELETE
:
751 case DPLANE_OP_RULE_ADD
:
752 case DPLANE_OP_RULE_DELETE
:
753 case DPLANE_OP_RULE_UPDATE
:
754 case DPLANE_OP_NEIGH_DISCOVER
:
755 case DPLANE_OP_BR_PORT_UPDATE
:
756 case DPLANE_OP_NEIGH_IP_INSTALL
:
757 case DPLANE_OP_NEIGH_IP_DELETE
:
759 case DPLANE_OP_IPSET_ADD
:
760 case DPLANE_OP_IPSET_DELETE
:
763 case DPLANE_OP_IPSET_ENTRY_ADD
:
764 case DPLANE_OP_IPSET_ENTRY_DELETE
:
766 case DPLANE_OP_NEIGH_TABLE_UPDATE
:
768 case DPLANE_OP_IPTABLE_ADD
:
769 case DPLANE_OP_IPTABLE_DELETE
:
770 if (ctx
->u
.iptable
.interface_name_list
) {
771 struct listnode
*node
, *nnode
;
774 for (ALL_LIST_ELEMENTS(
775 ctx
->u
.iptable
.interface_name_list
, node
,
778 ctx
->u
.iptable
.interface_name_list
,
780 XFREE(MTYPE_DP_NETFILTER
, ifname
);
782 list_delete(&ctx
->u
.iptable
.interface_name_list
);
785 case DPLANE_OP_GRE_SET
:
786 case DPLANE_OP_INTF_NETCONFIG
:
792 * Free a dataplane results context.
794 static void dplane_ctx_free(struct zebra_dplane_ctx
**pctx
)
799 DPLANE_CTX_VALID(*pctx
);
801 /* TODO -- just freeing memory, but would like to maintain
805 /* Some internal allocations may need to be freed, depending on
806 * the type of info captured in the ctx.
808 dplane_ctx_free_internal(*pctx
);
810 XFREE(MTYPE_DP_CTX
, *pctx
);
814 * Reset an allocated context object for re-use. All internal allocations are
815 * freed and the context is memset.
817 void dplane_ctx_reset(struct zebra_dplane_ctx
*ctx
)
819 dplane_ctx_free_internal(ctx
);
820 memset(ctx
, 0, sizeof(*ctx
));
824 * Return a context block to the dplane module after processing
826 void dplane_ctx_fini(struct zebra_dplane_ctx
**pctx
)
828 /* TODO -- maintain pool; for now, just free */
829 dplane_ctx_free(pctx
);
832 /* Enqueue a context block */
833 void dplane_ctx_enqueue_tail(struct dplane_ctx_q
*q
,
834 const struct zebra_dplane_ctx
*ctx
)
836 TAILQ_INSERT_TAIL(q
, (struct zebra_dplane_ctx
*)ctx
, zd_q_entries
);
839 /* Append a list of context blocks to another list */
840 void dplane_ctx_list_append(struct dplane_ctx_q
*to_list
,
841 struct dplane_ctx_q
*from_list
)
843 if (TAILQ_FIRST(from_list
)) {
844 TAILQ_CONCAT(to_list
, from_list
, zd_q_entries
);
846 /* And clear 'from' list */
847 TAILQ_INIT(from_list
);
851 struct zebra_dplane_ctx
*dplane_ctx_get_head(struct dplane_ctx_q
*q
)
853 struct zebra_dplane_ctx
*ctx
= TAILQ_FIRST(q
);
858 /* Dequeue a context block from the head of a list */
859 struct zebra_dplane_ctx
*dplane_ctx_dequeue(struct dplane_ctx_q
*q
)
861 struct zebra_dplane_ctx
*ctx
= TAILQ_FIRST(q
);
864 TAILQ_REMOVE(q
, ctx
, zd_q_entries
);
870 * Accessors for information from the context object
872 enum zebra_dplane_result
dplane_ctx_get_status(
873 const struct zebra_dplane_ctx
*ctx
)
875 DPLANE_CTX_VALID(ctx
);
877 return ctx
->zd_status
;
880 void dplane_ctx_set_status(struct zebra_dplane_ctx
*ctx
,
881 enum zebra_dplane_result status
)
883 DPLANE_CTX_VALID(ctx
);
885 ctx
->zd_status
= status
;
888 /* Retrieve last/current provider id */
889 uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx
*ctx
)
891 DPLANE_CTX_VALID(ctx
);
892 return ctx
->zd_provider
;
895 /* Providers run before the kernel can control whether a kernel
896 * update should be done.
898 void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx
*ctx
)
900 DPLANE_CTX_VALID(ctx
);
902 SET_FLAG(ctx
->zd_flags
, DPLANE_CTX_FLAG_NO_KERNEL
);
905 bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx
*ctx
)
907 DPLANE_CTX_VALID(ctx
);
909 return CHECK_FLAG(ctx
->zd_flags
, DPLANE_CTX_FLAG_NO_KERNEL
);
912 void dplane_ctx_set_op(struct zebra_dplane_ctx
*ctx
, enum dplane_op_e op
)
914 DPLANE_CTX_VALID(ctx
);
918 enum dplane_op_e
dplane_ctx_get_op(const struct zebra_dplane_ctx
*ctx
)
920 DPLANE_CTX_VALID(ctx
);
925 const char *dplane_op2str(enum dplane_op_e op
)
927 const char *ret
= "UNKNOWN";
935 case DPLANE_OP_ROUTE_INSTALL
:
936 ret
= "ROUTE_INSTALL";
938 case DPLANE_OP_ROUTE_UPDATE
:
939 ret
= "ROUTE_UPDATE";
941 case DPLANE_OP_ROUTE_DELETE
:
942 ret
= "ROUTE_DELETE";
944 case DPLANE_OP_ROUTE_NOTIFY
:
945 ret
= "ROUTE_NOTIFY";
949 case DPLANE_OP_NH_INSTALL
:
952 case DPLANE_OP_NH_UPDATE
:
955 case DPLANE_OP_NH_DELETE
:
959 case DPLANE_OP_LSP_INSTALL
:
962 case DPLANE_OP_LSP_UPDATE
:
965 case DPLANE_OP_LSP_DELETE
:
968 case DPLANE_OP_LSP_NOTIFY
:
972 case DPLANE_OP_PW_INSTALL
:
975 case DPLANE_OP_PW_UNINSTALL
:
976 ret
= "PW_UNINSTALL";
979 case DPLANE_OP_SYS_ROUTE_ADD
:
980 ret
= "SYS_ROUTE_ADD";
982 case DPLANE_OP_SYS_ROUTE_DELETE
:
983 ret
= "SYS_ROUTE_DEL";
986 case DPLANE_OP_BR_PORT_UPDATE
:
987 ret
= "BR_PORT_UPDATE";
990 case DPLANE_OP_ADDR_INSTALL
:
991 ret
= "ADDR_INSTALL";
993 case DPLANE_OP_ADDR_UNINSTALL
:
994 ret
= "ADDR_UNINSTALL";
997 case DPLANE_OP_MAC_INSTALL
:
1000 case DPLANE_OP_MAC_DELETE
:
1004 case DPLANE_OP_NEIGH_INSTALL
:
1005 ret
= "NEIGH_INSTALL";
1007 case DPLANE_OP_NEIGH_UPDATE
:
1008 ret
= "NEIGH_UPDATE";
1010 case DPLANE_OP_NEIGH_DELETE
:
1011 ret
= "NEIGH_DELETE";
1013 case DPLANE_OP_VTEP_ADD
:
1016 case DPLANE_OP_VTEP_DELETE
:
1017 ret
= "VTEP_DELETE";
1020 case DPLANE_OP_RULE_ADD
:
1023 case DPLANE_OP_RULE_DELETE
:
1024 ret
= "RULE_DELETE";
1026 case DPLANE_OP_RULE_UPDATE
:
1027 ret
= "RULE_UPDATE";
1030 case DPLANE_OP_NEIGH_DISCOVER
:
1031 ret
= "NEIGH_DISCOVER";
1034 case DPLANE_OP_IPTABLE_ADD
:
1035 ret
= "IPTABLE_ADD";
1037 case DPLANE_OP_IPTABLE_DELETE
:
1038 ret
= "IPTABLE_DELETE";
1040 case DPLANE_OP_IPSET_ADD
:
1043 case DPLANE_OP_IPSET_DELETE
:
1044 ret
= "IPSET_DELETE";
1046 case DPLANE_OP_IPSET_ENTRY_ADD
:
1047 ret
= "IPSET_ENTRY_ADD";
1049 case DPLANE_OP_IPSET_ENTRY_DELETE
:
1050 ret
= "IPSET_ENTRY_DELETE";
1052 case DPLANE_OP_NEIGH_IP_INSTALL
:
1053 ret
= "NEIGH_IP_INSTALL";
1055 case DPLANE_OP_NEIGH_IP_DELETE
:
1056 ret
= "NEIGH_IP_DELETE";
1058 case DPLANE_OP_NEIGH_TABLE_UPDATE
:
1059 ret
= "NEIGH_TABLE_UPDATE";
1062 case DPLANE_OP_GRE_SET
:
1066 case DPLANE_OP_INTF_ADDR_ADD
:
1067 return "INTF_ADDR_ADD";
1069 case DPLANE_OP_INTF_ADDR_DEL
:
1070 return "INTF_ADDR_DEL";
1072 case DPLANE_OP_INTF_NETCONFIG
:
1073 return "INTF_NETCONFIG";
1079 const char *dplane_res2str(enum zebra_dplane_result res
)
1081 const char *ret
= "<Unknown>";
1084 case ZEBRA_DPLANE_REQUEST_FAILURE
:
1087 case ZEBRA_DPLANE_REQUEST_QUEUED
:
1090 case ZEBRA_DPLANE_REQUEST_SUCCESS
:
1098 void dplane_ctx_set_dest(struct zebra_dplane_ctx
*ctx
,
1099 const struct prefix
*dest
)
1101 DPLANE_CTX_VALID(ctx
);
1103 prefix_copy(&(ctx
->u
.rinfo
.zd_dest
), dest
);
1106 const struct prefix
*dplane_ctx_get_dest(const struct zebra_dplane_ctx
*ctx
)
1108 DPLANE_CTX_VALID(ctx
);
1110 return &(ctx
->u
.rinfo
.zd_dest
);
1113 void dplane_ctx_set_src(struct zebra_dplane_ctx
*ctx
, const struct prefix
*src
)
1115 DPLANE_CTX_VALID(ctx
);
1118 prefix_copy(&(ctx
->u
.rinfo
.zd_src
), src
);
1120 memset(&(ctx
->u
.rinfo
.zd_src
), 0, sizeof(struct prefix
));
1123 /* Source prefix is a little special - return NULL for "no src prefix" */
1124 const struct prefix
*dplane_ctx_get_src(const struct zebra_dplane_ctx
*ctx
)
1126 DPLANE_CTX_VALID(ctx
);
1128 if (ctx
->u
.rinfo
.zd_src
.prefixlen
== 0 &&
1129 IN6_IS_ADDR_UNSPECIFIED(&(ctx
->u
.rinfo
.zd_src
.u
.prefix6
))) {
1132 return &(ctx
->u
.rinfo
.zd_src
);
1136 bool dplane_ctx_is_update(const struct zebra_dplane_ctx
*ctx
)
1138 DPLANE_CTX_VALID(ctx
);
1140 return ctx
->zd_is_update
;
1143 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx
*ctx
)
1145 DPLANE_CTX_VALID(ctx
);
1150 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx
*ctx
)
1152 DPLANE_CTX_VALID(ctx
);
1154 return ctx
->zd_old_seq
;
1157 void dplane_ctx_set_vrf(struct zebra_dplane_ctx
*ctx
, vrf_id_t vrf
)
1159 DPLANE_CTX_VALID(ctx
);
1161 ctx
->zd_vrf_id
= vrf
;
1164 vrf_id_t
dplane_ctx_get_vrf(const struct zebra_dplane_ctx
*ctx
)
1166 DPLANE_CTX_VALID(ctx
);
1168 return ctx
->zd_vrf_id
;
1171 /* In some paths we have only a namespace id */
1172 void dplane_ctx_set_ns_id(struct zebra_dplane_ctx
*ctx
, ns_id_t nsid
)
1174 DPLANE_CTX_VALID(ctx
);
1176 ctx
->zd_ns_info
.ns_id
= nsid
;
1179 ns_id_t
dplane_ctx_get_ns_id(const struct zebra_dplane_ctx
*ctx
)
1181 DPLANE_CTX_VALID(ctx
);
1183 return ctx
->zd_ns_info
.ns_id
;
1186 bool dplane_ctx_is_from_notif(const struct zebra_dplane_ctx
*ctx
)
1188 DPLANE_CTX_VALID(ctx
);
1190 return (ctx
->zd_notif_provider
!= 0);
1193 uint32_t dplane_ctx_get_notif_provider(const struct zebra_dplane_ctx
*ctx
)
1195 DPLANE_CTX_VALID(ctx
);
1197 return ctx
->zd_notif_provider
;
1200 void dplane_ctx_set_notif_provider(struct zebra_dplane_ctx
*ctx
,
1203 DPLANE_CTX_VALID(ctx
);
1205 ctx
->zd_notif_provider
= id
;
1208 const char *dplane_ctx_get_ifname(const struct zebra_dplane_ctx
*ctx
)
1210 DPLANE_CTX_VALID(ctx
);
1212 return ctx
->zd_ifname
;
1215 void dplane_ctx_set_ifname(struct zebra_dplane_ctx
*ctx
, const char *ifname
)
1217 DPLANE_CTX_VALID(ctx
);
1222 strlcpy(ctx
->zd_ifname
, ifname
, sizeof(ctx
->zd_ifname
));
1225 ifindex_t
dplane_ctx_get_ifindex(const struct zebra_dplane_ctx
*ctx
)
1227 DPLANE_CTX_VALID(ctx
);
1229 return ctx
->zd_ifindex
;
1232 void dplane_ctx_set_ifindex(struct zebra_dplane_ctx
*ctx
, ifindex_t ifindex
)
1234 DPLANE_CTX_VALID(ctx
);
1236 ctx
->zd_ifindex
= ifindex
;
1239 void dplane_ctx_set_type(struct zebra_dplane_ctx
*ctx
, int type
)
1241 DPLANE_CTX_VALID(ctx
);
1243 ctx
->u
.rinfo
.zd_type
= type
;
1246 int dplane_ctx_get_type(const struct zebra_dplane_ctx
*ctx
)
1248 DPLANE_CTX_VALID(ctx
);
1250 return ctx
->u
.rinfo
.zd_type
;
1253 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx
*ctx
)
1255 DPLANE_CTX_VALID(ctx
);
1257 return ctx
->u
.rinfo
.zd_old_type
;
1260 void dplane_ctx_set_afi(struct zebra_dplane_ctx
*ctx
, afi_t afi
)
1262 DPLANE_CTX_VALID(ctx
);
1264 ctx
->u
.rinfo
.zd_afi
= afi
;
1267 afi_t
dplane_ctx_get_afi(const struct zebra_dplane_ctx
*ctx
)
1269 DPLANE_CTX_VALID(ctx
);
1271 return ctx
->u
.rinfo
.zd_afi
;
1274 void dplane_ctx_set_safi(struct zebra_dplane_ctx
*ctx
, safi_t safi
)
1276 DPLANE_CTX_VALID(ctx
);
1278 ctx
->u
.rinfo
.zd_safi
= safi
;
1281 safi_t
dplane_ctx_get_safi(const struct zebra_dplane_ctx
*ctx
)
1283 DPLANE_CTX_VALID(ctx
);
1285 return ctx
->u
.rinfo
.zd_safi
;
1288 void dplane_ctx_set_table(struct zebra_dplane_ctx
*ctx
, uint32_t table
)
1290 DPLANE_CTX_VALID(ctx
);
1292 ctx
->zd_table_id
= table
;
1295 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx
*ctx
)
1297 DPLANE_CTX_VALID(ctx
);
1299 return ctx
->zd_table_id
;
1302 route_tag_t
dplane_ctx_get_tag(const struct zebra_dplane_ctx
*ctx
)
1304 DPLANE_CTX_VALID(ctx
);
1306 return ctx
->u
.rinfo
.zd_tag
;
1309 void dplane_ctx_set_tag(struct zebra_dplane_ctx
*ctx
, route_tag_t tag
)
1311 DPLANE_CTX_VALID(ctx
);
1313 ctx
->u
.rinfo
.zd_tag
= tag
;
1316 route_tag_t
dplane_ctx_get_old_tag(const struct zebra_dplane_ctx
*ctx
)
1318 DPLANE_CTX_VALID(ctx
);
1320 return ctx
->u
.rinfo
.zd_old_tag
;
1323 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx
*ctx
)
1325 DPLANE_CTX_VALID(ctx
);
1327 return ctx
->u
.rinfo
.zd_instance
;
1330 void dplane_ctx_set_instance(struct zebra_dplane_ctx
*ctx
, uint16_t instance
)
1332 DPLANE_CTX_VALID(ctx
);
1334 ctx
->u
.rinfo
.zd_instance
= instance
;
1337 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx
*ctx
)
1339 DPLANE_CTX_VALID(ctx
);
1341 return ctx
->u
.rinfo
.zd_old_instance
;
1344 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx
*ctx
)
1346 DPLANE_CTX_VALID(ctx
);
1348 return ctx
->u
.rinfo
.zd_metric
;
1351 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx
*ctx
)
1353 DPLANE_CTX_VALID(ctx
);
1355 return ctx
->u
.rinfo
.zd_old_metric
;
1358 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx
*ctx
)
1360 DPLANE_CTX_VALID(ctx
);
1362 return ctx
->u
.rinfo
.zd_mtu
;
1365 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx
*ctx
)
1367 DPLANE_CTX_VALID(ctx
);
1369 return ctx
->u
.rinfo
.zd_nexthop_mtu
;
1372 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx
*ctx
)
1374 DPLANE_CTX_VALID(ctx
);
1376 return ctx
->u
.rinfo
.zd_distance
;
1379 void dplane_ctx_set_distance(struct zebra_dplane_ctx
*ctx
, uint8_t distance
)
1381 DPLANE_CTX_VALID(ctx
);
1383 ctx
->u
.rinfo
.zd_distance
= distance
;
1386 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx
*ctx
)
1388 DPLANE_CTX_VALID(ctx
);
1390 return ctx
->u
.rinfo
.zd_old_distance
;
1394 * Set the nexthops associated with a context: note that processing code
1395 * may well expect that nexthops are in canonical (sorted) order, so we
1396 * will enforce that here.
1398 void dplane_ctx_set_nexthops(struct zebra_dplane_ctx
*ctx
, struct nexthop
*nh
)
1400 DPLANE_CTX_VALID(ctx
);
1402 if (ctx
->u
.rinfo
.zd_ng
.nexthop
) {
1403 nexthops_free(ctx
->u
.rinfo
.zd_ng
.nexthop
);
1404 ctx
->u
.rinfo
.zd_ng
.nexthop
= NULL
;
1406 nexthop_group_copy_nh_sorted(&(ctx
->u
.rinfo
.zd_ng
), nh
);
1410 * Set the list of backup nexthops; their ordering is preserved (they're not
1413 void dplane_ctx_set_backup_nhg(struct zebra_dplane_ctx
*ctx
,
1414 const struct nexthop_group
*nhg
)
1416 struct nexthop
*nh
, *last_nh
, *nexthop
;
1418 DPLANE_CTX_VALID(ctx
);
1420 if (ctx
->u
.rinfo
.backup_ng
.nexthop
) {
1421 nexthops_free(ctx
->u
.rinfo
.backup_ng
.nexthop
);
1422 ctx
->u
.rinfo
.backup_ng
.nexthop
= NULL
;
1427 /* Be careful to preserve the order of the backup list */
1428 for (nh
= nhg
->nexthop
; nh
; nh
= nh
->next
) {
1429 nexthop
= nexthop_dup(nh
, NULL
);
1432 NEXTHOP_APPEND(last_nh
, nexthop
);
1434 ctx
->u
.rinfo
.backup_ng
.nexthop
= nexthop
;
1440 uint32_t dplane_ctx_get_nhg_id(const struct zebra_dplane_ctx
*ctx
)
1442 DPLANE_CTX_VALID(ctx
);
1443 return ctx
->u
.rinfo
.zd_nhg_id
;
1446 const struct nexthop_group
*dplane_ctx_get_ng(
1447 const struct zebra_dplane_ctx
*ctx
)
1449 DPLANE_CTX_VALID(ctx
);
1451 return &(ctx
->u
.rinfo
.zd_ng
);
1454 const struct nexthop_group
*
1455 dplane_ctx_get_backup_ng(const struct zebra_dplane_ctx
*ctx
)
1457 DPLANE_CTX_VALID(ctx
);
1459 return &(ctx
->u
.rinfo
.backup_ng
);
1462 const struct nexthop_group
*
1463 dplane_ctx_get_old_ng(const struct zebra_dplane_ctx
*ctx
)
1465 DPLANE_CTX_VALID(ctx
);
1467 return &(ctx
->u
.rinfo
.zd_old_ng
);
1470 const struct nexthop_group
*
1471 dplane_ctx_get_old_backup_ng(const struct zebra_dplane_ctx
*ctx
)
1473 DPLANE_CTX_VALID(ctx
);
1475 return &(ctx
->u
.rinfo
.old_backup_ng
);
1478 const struct zebra_dplane_info
*dplane_ctx_get_ns(
1479 const struct zebra_dplane_ctx
*ctx
)
1481 DPLANE_CTX_VALID(ctx
);
1483 return &(ctx
->zd_ns_info
);
1486 int dplane_ctx_get_ns_sock(const struct zebra_dplane_ctx
*ctx
)
1488 DPLANE_CTX_VALID(ctx
);
1491 return ctx
->zd_ns_info
.sock
;
1497 /* Accessors for nexthop information */
1498 uint32_t dplane_ctx_get_nhe_id(const struct zebra_dplane_ctx
*ctx
)
1500 DPLANE_CTX_VALID(ctx
);
1501 return ctx
->u
.rinfo
.nhe
.id
;
1504 uint32_t dplane_ctx_get_old_nhe_id(const struct zebra_dplane_ctx
*ctx
)
1506 DPLANE_CTX_VALID(ctx
);
1507 return ctx
->u
.rinfo
.nhe
.old_id
;
1510 afi_t
dplane_ctx_get_nhe_afi(const struct zebra_dplane_ctx
*ctx
)
1512 DPLANE_CTX_VALID(ctx
);
1513 return ctx
->u
.rinfo
.nhe
.afi
;
1516 vrf_id_t
dplane_ctx_get_nhe_vrf_id(const struct zebra_dplane_ctx
*ctx
)
1518 DPLANE_CTX_VALID(ctx
);
1519 return ctx
->u
.rinfo
.nhe
.vrf_id
;
1522 int dplane_ctx_get_nhe_type(const struct zebra_dplane_ctx
*ctx
)
1524 DPLANE_CTX_VALID(ctx
);
1525 return ctx
->u
.rinfo
.nhe
.type
;
1528 const struct nexthop_group
*
1529 dplane_ctx_get_nhe_ng(const struct zebra_dplane_ctx
*ctx
)
1531 DPLANE_CTX_VALID(ctx
);
1532 return &(ctx
->u
.rinfo
.nhe
.ng
);
1535 const struct nh_grp
*
1536 dplane_ctx_get_nhe_nh_grp(const struct zebra_dplane_ctx
*ctx
)
1538 DPLANE_CTX_VALID(ctx
);
1539 return ctx
->u
.rinfo
.nhe
.nh_grp
;
1542 uint8_t dplane_ctx_get_nhe_nh_grp_count(const struct zebra_dplane_ctx
*ctx
)
1544 DPLANE_CTX_VALID(ctx
);
1545 return ctx
->u
.rinfo
.nhe
.nh_grp_count
;
1548 /* Accessors for LSP information */
1550 mpls_label_t
dplane_ctx_get_in_label(const struct zebra_dplane_ctx
*ctx
)
1552 DPLANE_CTX_VALID(ctx
);
1554 return ctx
->u
.lsp
.ile
.in_label
;
1557 void dplane_ctx_set_in_label(struct zebra_dplane_ctx
*ctx
, mpls_label_t label
)
1559 DPLANE_CTX_VALID(ctx
);
1561 ctx
->u
.lsp
.ile
.in_label
= label
;
1564 uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx
*ctx
)
1566 DPLANE_CTX_VALID(ctx
);
1568 return ctx
->u
.lsp
.addr_family
;
1571 void dplane_ctx_set_addr_family(struct zebra_dplane_ctx
*ctx
,
1574 DPLANE_CTX_VALID(ctx
);
1576 ctx
->u
.lsp
.addr_family
= family
;
1579 uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx
*ctx
)
1581 DPLANE_CTX_VALID(ctx
);
1583 return ctx
->u
.lsp
.flags
;
1586 void dplane_ctx_set_lsp_flags(struct zebra_dplane_ctx
*ctx
,
1589 DPLANE_CTX_VALID(ctx
);
1591 ctx
->u
.lsp
.flags
= flags
;
1594 const struct nhlfe_list_head
*dplane_ctx_get_nhlfe_list(
1595 const struct zebra_dplane_ctx
*ctx
)
1597 DPLANE_CTX_VALID(ctx
);
1598 return &(ctx
->u
.lsp
.nhlfe_list
);
1601 const struct nhlfe_list_head
*dplane_ctx_get_backup_nhlfe_list(
1602 const struct zebra_dplane_ctx
*ctx
)
1604 DPLANE_CTX_VALID(ctx
);
1605 return &(ctx
->u
.lsp
.backup_nhlfe_list
);
1608 struct zebra_nhlfe
*dplane_ctx_add_nhlfe(struct zebra_dplane_ctx
*ctx
,
1609 enum lsp_types_t lsp_type
,
1610 enum nexthop_types_t nh_type
,
1611 const union g_addr
*gate
,
1612 ifindex_t ifindex
, uint8_t num_labels
,
1613 mpls_label_t
*out_labels
)
1615 struct zebra_nhlfe
*nhlfe
;
1617 DPLANE_CTX_VALID(ctx
);
1619 nhlfe
= zebra_mpls_lsp_add_nhlfe(&(ctx
->u
.lsp
),
1620 lsp_type
, nh_type
, gate
,
1621 ifindex
, num_labels
, out_labels
);
1626 struct zebra_nhlfe
*dplane_ctx_add_backup_nhlfe(
1627 struct zebra_dplane_ctx
*ctx
, enum lsp_types_t lsp_type
,
1628 enum nexthop_types_t nh_type
, const union g_addr
*gate
,
1629 ifindex_t ifindex
, uint8_t num_labels
, mpls_label_t
*out_labels
)
1631 struct zebra_nhlfe
*nhlfe
;
1633 DPLANE_CTX_VALID(ctx
);
1635 nhlfe
= zebra_mpls_lsp_add_backup_nhlfe(&(ctx
->u
.lsp
),
1636 lsp_type
, nh_type
, gate
,
1637 ifindex
, num_labels
,
1643 const struct zebra_nhlfe
*
1644 dplane_ctx_get_best_nhlfe(const struct zebra_dplane_ctx
*ctx
)
1646 DPLANE_CTX_VALID(ctx
);
1648 return ctx
->u
.lsp
.best_nhlfe
;
1651 const struct zebra_nhlfe
*
1652 dplane_ctx_set_best_nhlfe(struct zebra_dplane_ctx
*ctx
,
1653 struct zebra_nhlfe
*nhlfe
)
1655 DPLANE_CTX_VALID(ctx
);
1657 ctx
->u
.lsp
.best_nhlfe
= nhlfe
;
1658 return ctx
->u
.lsp
.best_nhlfe
;
1661 uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx
*ctx
)
1663 DPLANE_CTX_VALID(ctx
);
1665 return ctx
->u
.lsp
.num_ecmp
;
1668 mpls_label_t
dplane_ctx_get_pw_local_label(const struct zebra_dplane_ctx
*ctx
)
1670 DPLANE_CTX_VALID(ctx
);
1672 return ctx
->u
.pw
.local_label
;
1675 mpls_label_t
dplane_ctx_get_pw_remote_label(const struct zebra_dplane_ctx
*ctx
)
1677 DPLANE_CTX_VALID(ctx
);
1679 return ctx
->u
.pw
.remote_label
;
1682 int dplane_ctx_get_pw_type(const struct zebra_dplane_ctx
*ctx
)
1684 DPLANE_CTX_VALID(ctx
);
1686 return ctx
->u
.pw
.type
;
1689 int dplane_ctx_get_pw_af(const struct zebra_dplane_ctx
*ctx
)
1691 DPLANE_CTX_VALID(ctx
);
1693 return ctx
->u
.pw
.af
;
1696 uint32_t dplane_ctx_get_pw_flags(const struct zebra_dplane_ctx
*ctx
)
1698 DPLANE_CTX_VALID(ctx
);
1700 return ctx
->u
.pw
.flags
;
1703 int dplane_ctx_get_pw_status(const struct zebra_dplane_ctx
*ctx
)
1705 DPLANE_CTX_VALID(ctx
);
1707 return ctx
->u
.pw
.status
;
1710 void dplane_ctx_set_pw_status(struct zebra_dplane_ctx
*ctx
, int status
)
1712 DPLANE_CTX_VALID(ctx
);
1714 ctx
->u
.pw
.status
= status
;
1717 const union g_addr
*dplane_ctx_get_pw_dest(
1718 const struct zebra_dplane_ctx
*ctx
)
1720 DPLANE_CTX_VALID(ctx
);
1722 return &(ctx
->u
.pw
.dest
);
1725 const union pw_protocol_fields
*dplane_ctx_get_pw_proto(
1726 const struct zebra_dplane_ctx
*ctx
)
1728 DPLANE_CTX_VALID(ctx
);
1730 return &(ctx
->u
.pw
.fields
);
1733 const struct nexthop_group
*
1734 dplane_ctx_get_pw_nhg(const struct zebra_dplane_ctx
*ctx
)
1736 DPLANE_CTX_VALID(ctx
);
1738 return &(ctx
->u
.pw
.fib_nhg
);
1741 const struct nexthop_group
*
1742 dplane_ctx_get_pw_primary_nhg(const struct zebra_dplane_ctx
*ctx
)
1744 DPLANE_CTX_VALID(ctx
);
1746 return &(ctx
->u
.pw
.primary_nhg
);
1749 const struct nexthop_group
*
1750 dplane_ctx_get_pw_backup_nhg(const struct zebra_dplane_ctx
*ctx
)
1752 DPLANE_CTX_VALID(ctx
);
1754 return &(ctx
->u
.pw
.backup_nhg
);
1757 /* Accessors for interface information */
1758 uint32_t dplane_ctx_get_intf_metric(const struct zebra_dplane_ctx
*ctx
)
1760 DPLANE_CTX_VALID(ctx
);
1762 return ctx
->u
.intf
.metric
;
1765 void dplane_ctx_set_intf_metric(struct zebra_dplane_ctx
*ctx
, uint32_t metric
)
1767 DPLANE_CTX_VALID(ctx
);
1769 ctx
->u
.intf
.metric
= metric
;
1772 /* Is interface addr p2p? */
1773 bool dplane_ctx_intf_is_connected(const struct zebra_dplane_ctx
*ctx
)
1775 DPLANE_CTX_VALID(ctx
);
1777 return (ctx
->u
.intf
.flags
& DPLANE_INTF_CONNECTED
);
1780 bool dplane_ctx_intf_is_secondary(const struct zebra_dplane_ctx
*ctx
)
1782 DPLANE_CTX_VALID(ctx
);
1784 return (ctx
->u
.intf
.flags
& DPLANE_INTF_SECONDARY
);
1787 bool dplane_ctx_intf_is_broadcast(const struct zebra_dplane_ctx
*ctx
)
1789 DPLANE_CTX_VALID(ctx
);
1791 return (ctx
->u
.intf
.flags
& DPLANE_INTF_BROADCAST
);
1794 void dplane_ctx_intf_set_connected(struct zebra_dplane_ctx
*ctx
)
1796 DPLANE_CTX_VALID(ctx
);
1798 ctx
->u
.intf
.flags
|= DPLANE_INTF_CONNECTED
;
1801 void dplane_ctx_intf_set_secondary(struct zebra_dplane_ctx
*ctx
)
1803 DPLANE_CTX_VALID(ctx
);
1805 ctx
->u
.intf
.flags
|= DPLANE_INTF_SECONDARY
;
1808 void dplane_ctx_intf_set_broadcast(struct zebra_dplane_ctx
*ctx
)
1810 DPLANE_CTX_VALID(ctx
);
1812 ctx
->u
.intf
.flags
|= DPLANE_INTF_BROADCAST
;
1815 const struct prefix
*dplane_ctx_get_intf_addr(
1816 const struct zebra_dplane_ctx
*ctx
)
1818 DPLANE_CTX_VALID(ctx
);
1820 return &(ctx
->u
.intf
.prefix
);
1823 void dplane_ctx_set_intf_addr(struct zebra_dplane_ctx
*ctx
,
1824 const struct prefix
*p
)
1826 DPLANE_CTX_VALID(ctx
);
1828 prefix_copy(&(ctx
->u
.intf
.prefix
), p
);
1831 bool dplane_ctx_intf_has_dest(const struct zebra_dplane_ctx
*ctx
)
1833 DPLANE_CTX_VALID(ctx
);
1835 return (ctx
->u
.intf
.flags
& DPLANE_INTF_HAS_DEST
);
1838 const struct prefix
*dplane_ctx_get_intf_dest(
1839 const struct zebra_dplane_ctx
*ctx
)
1841 DPLANE_CTX_VALID(ctx
);
1843 return &(ctx
->u
.intf
.dest_prefix
);
1846 void dplane_ctx_set_intf_dest(struct zebra_dplane_ctx
*ctx
,
1847 const struct prefix
*p
)
1849 DPLANE_CTX_VALID(ctx
);
1851 prefix_copy(&(ctx
->u
.intf
.dest_prefix
), p
);
1854 bool dplane_ctx_intf_has_label(const struct zebra_dplane_ctx
*ctx
)
1856 DPLANE_CTX_VALID(ctx
);
1858 return (ctx
->u
.intf
.flags
& DPLANE_INTF_HAS_LABEL
);
1861 const char *dplane_ctx_get_intf_label(const struct zebra_dplane_ctx
*ctx
)
1863 DPLANE_CTX_VALID(ctx
);
1865 return ctx
->u
.intf
.label
;
1868 void dplane_ctx_set_intf_label(struct zebra_dplane_ctx
*ctx
, const char *label
)
1872 DPLANE_CTX_VALID(ctx
);
1874 if (ctx
->u
.intf
.label
&& ctx
->u
.intf
.label
!= ctx
->u
.intf
.label_buf
)
1875 XFREE(MTYPE_DP_CTX
, ctx
->u
.intf
.label
);
1877 ctx
->u
.intf
.label
= NULL
;
1880 ctx
->u
.intf
.flags
|= DPLANE_INTF_HAS_LABEL
;
1882 /* Use embedded buffer if it's adequate; else allocate. */
1883 len
= strlen(label
);
1885 if (len
< sizeof(ctx
->u
.intf
.label_buf
)) {
1886 strlcpy(ctx
->u
.intf
.label_buf
, label
,
1887 sizeof(ctx
->u
.intf
.label_buf
));
1888 ctx
->u
.intf
.label
= ctx
->u
.intf
.label_buf
;
1890 ctx
->u
.intf
.label
= XSTRDUP(MTYPE_DP_CTX
, label
);
1893 ctx
->u
.intf
.flags
&= ~DPLANE_INTF_HAS_LABEL
;
1897 /* Accessors for MAC information */
1898 vlanid_t
dplane_ctx_mac_get_vlan(const struct zebra_dplane_ctx
*ctx
)
1900 DPLANE_CTX_VALID(ctx
);
1901 return ctx
->u
.macinfo
.vid
;
1904 bool dplane_ctx_mac_is_sticky(const struct zebra_dplane_ctx
*ctx
)
1906 DPLANE_CTX_VALID(ctx
);
1907 return ctx
->u
.macinfo
.is_sticky
;
1910 uint32_t dplane_ctx_mac_get_nhg_id(const struct zebra_dplane_ctx
*ctx
)
1912 DPLANE_CTX_VALID(ctx
);
1913 return ctx
->u
.macinfo
.nhg_id
;
1916 uint32_t dplane_ctx_mac_get_update_flags(const struct zebra_dplane_ctx
*ctx
)
1918 DPLANE_CTX_VALID(ctx
);
1919 return ctx
->u
.macinfo
.update_flags
;
1922 const struct ethaddr
*dplane_ctx_mac_get_addr(
1923 const struct zebra_dplane_ctx
*ctx
)
1925 DPLANE_CTX_VALID(ctx
);
1926 return &(ctx
->u
.macinfo
.mac
);
1929 const struct in_addr
*dplane_ctx_mac_get_vtep_ip(
1930 const struct zebra_dplane_ctx
*ctx
)
1932 DPLANE_CTX_VALID(ctx
);
1933 return &(ctx
->u
.macinfo
.vtep_ip
);
1936 ifindex_t
dplane_ctx_mac_get_br_ifindex(const struct zebra_dplane_ctx
*ctx
)
1938 DPLANE_CTX_VALID(ctx
);
1939 return ctx
->u
.macinfo
.br_ifindex
;
1942 /* Accessors for neighbor information */
1943 const struct ipaddr
*dplane_ctx_neigh_get_ipaddr(
1944 const struct zebra_dplane_ctx
*ctx
)
1946 DPLANE_CTX_VALID(ctx
);
1947 return &(ctx
->u
.neigh
.ip_addr
);
1950 const struct ipaddr
*
1951 dplane_ctx_neigh_get_link_ip(const struct zebra_dplane_ctx
*ctx
)
1953 DPLANE_CTX_VALID(ctx
);
1954 return &(ctx
->u
.neigh
.link
.ip_addr
);
1957 const struct ethaddr
*dplane_ctx_neigh_get_mac(
1958 const struct zebra_dplane_ctx
*ctx
)
1960 DPLANE_CTX_VALID(ctx
);
1961 return &(ctx
->u
.neigh
.link
.mac
);
1964 uint32_t dplane_ctx_neigh_get_flags(const struct zebra_dplane_ctx
*ctx
)
1966 DPLANE_CTX_VALID(ctx
);
1967 return ctx
->u
.neigh
.flags
;
1970 uint16_t dplane_ctx_neigh_get_state(const struct zebra_dplane_ctx
*ctx
)
1972 DPLANE_CTX_VALID(ctx
);
1973 return ctx
->u
.neigh
.state
;
1976 uint32_t dplane_ctx_neigh_get_update_flags(const struct zebra_dplane_ctx
*ctx
)
1978 DPLANE_CTX_VALID(ctx
);
1979 return ctx
->u
.neigh
.update_flags
;
1982 /* Accessor for GRE set */
1984 dplane_ctx_gre_get_link_ifindex(const struct zebra_dplane_ctx
*ctx
)
1986 DPLANE_CTX_VALID(ctx
);
1988 return ctx
->u
.gre
.link_ifindex
;
1992 dplane_ctx_gre_get_mtu(const struct zebra_dplane_ctx
*ctx
)
1994 DPLANE_CTX_VALID(ctx
);
1996 return ctx
->u
.gre
.mtu
;
1999 const struct zebra_l2info_gre
*
2000 dplane_ctx_gre_get_info(const struct zebra_dplane_ctx
*ctx
)
2002 DPLANE_CTX_VALID(ctx
);
2004 return &ctx
->u
.gre
.info
;
2007 /* Accessors for PBR rule information */
2008 int dplane_ctx_rule_get_sock(const struct zebra_dplane_ctx
*ctx
)
2010 DPLANE_CTX_VALID(ctx
);
2012 return ctx
->u
.rule
.sock
;
2015 const char *dplane_ctx_rule_get_ifname(const struct zebra_dplane_ctx
*ctx
)
2017 DPLANE_CTX_VALID(ctx
);
2019 return ctx
->u
.rule
.new.ifname
;
2022 int dplane_ctx_rule_get_unique(const struct zebra_dplane_ctx
*ctx
)
2024 DPLANE_CTX_VALID(ctx
);
2026 return ctx
->u
.rule
.unique
;
2029 int dplane_ctx_rule_get_seq(const struct zebra_dplane_ctx
*ctx
)
2031 DPLANE_CTX_VALID(ctx
);
2033 return ctx
->u
.rule
.seq
;
2036 uint32_t dplane_ctx_rule_get_priority(const struct zebra_dplane_ctx
*ctx
)
2038 DPLANE_CTX_VALID(ctx
);
2040 return ctx
->u
.rule
.new.priority
;
2043 uint32_t dplane_ctx_rule_get_old_priority(const struct zebra_dplane_ctx
*ctx
)
2045 DPLANE_CTX_VALID(ctx
);
2047 return ctx
->u
.rule
.old
.priority
;
2050 uint32_t dplane_ctx_rule_get_table(const struct zebra_dplane_ctx
*ctx
)
2052 DPLANE_CTX_VALID(ctx
);
2054 return ctx
->u
.rule
.new.table
;
2057 uint32_t dplane_ctx_rule_get_old_table(const struct zebra_dplane_ctx
*ctx
)
2059 DPLANE_CTX_VALID(ctx
);
2061 return ctx
->u
.rule
.old
.table
;
2064 uint32_t dplane_ctx_rule_get_filter_bm(const struct zebra_dplane_ctx
*ctx
)
2066 DPLANE_CTX_VALID(ctx
);
2068 return ctx
->u
.rule
.new.filter_bm
;
2071 uint32_t dplane_ctx_rule_get_old_filter_bm(const struct zebra_dplane_ctx
*ctx
)
2073 DPLANE_CTX_VALID(ctx
);
2075 return ctx
->u
.rule
.old
.filter_bm
;
2078 uint32_t dplane_ctx_rule_get_fwmark(const struct zebra_dplane_ctx
*ctx
)
2080 DPLANE_CTX_VALID(ctx
);
2082 return ctx
->u
.rule
.new.fwmark
;
2085 uint32_t dplane_ctx_rule_get_old_fwmark(const struct zebra_dplane_ctx
*ctx
)
2087 DPLANE_CTX_VALID(ctx
);
2089 return ctx
->u
.rule
.old
.fwmark
;
2092 uint8_t dplane_ctx_rule_get_ipproto(const struct zebra_dplane_ctx
*ctx
)
2094 DPLANE_CTX_VALID(ctx
);
2096 return ctx
->u
.rule
.new.ip_proto
;
2099 uint8_t dplane_ctx_rule_get_old_ipproto(const struct zebra_dplane_ctx
*ctx
)
2101 DPLANE_CTX_VALID(ctx
);
2103 return ctx
->u
.rule
.old
.ip_proto
;
2106 uint8_t dplane_ctx_rule_get_dsfield(const struct zebra_dplane_ctx
*ctx
)
2108 DPLANE_CTX_VALID(ctx
);
2110 return ctx
->u
.rule
.new.dsfield
;
2113 uint8_t dplane_ctx_rule_get_old_dsfield(const struct zebra_dplane_ctx
*ctx
)
2115 DPLANE_CTX_VALID(ctx
);
2117 return ctx
->u
.rule
.old
.dsfield
;
2120 const struct prefix
*
2121 dplane_ctx_rule_get_src_ip(const struct zebra_dplane_ctx
*ctx
)
2123 DPLANE_CTX_VALID(ctx
);
2125 return &(ctx
->u
.rule
.new.src_ip
);
2128 const struct prefix
*
2129 dplane_ctx_rule_get_old_src_ip(const struct zebra_dplane_ctx
*ctx
)
2131 DPLANE_CTX_VALID(ctx
);
2133 return &(ctx
->u
.rule
.old
.src_ip
);
2136 const struct prefix
*
2137 dplane_ctx_rule_get_dst_ip(const struct zebra_dplane_ctx
*ctx
)
2139 DPLANE_CTX_VALID(ctx
);
2141 return &(ctx
->u
.rule
.new.dst_ip
);
2144 const struct prefix
*
2145 dplane_ctx_rule_get_old_dst_ip(const struct zebra_dplane_ctx
*ctx
)
2147 DPLANE_CTX_VALID(ctx
);
2149 return &(ctx
->u
.rule
.old
.dst_ip
);
2152 uint32_t dplane_ctx_get_br_port_flags(const struct zebra_dplane_ctx
*ctx
)
2154 DPLANE_CTX_VALID(ctx
);
2156 return ctx
->u
.br_port
.flags
;
2160 dplane_ctx_get_br_port_sph_filter_cnt(const struct zebra_dplane_ctx
*ctx
)
2162 DPLANE_CTX_VALID(ctx
);
2164 return ctx
->u
.br_port
.sph_filter_cnt
;
2167 const struct in_addr
*
2168 dplane_ctx_get_br_port_sph_filters(const struct zebra_dplane_ctx
*ctx
)
2170 DPLANE_CTX_VALID(ctx
);
2172 return ctx
->u
.br_port
.sph_filters
;
2176 dplane_ctx_get_br_port_backup_nhg_id(const struct zebra_dplane_ctx
*ctx
)
2178 DPLANE_CTX_VALID(ctx
);
2180 return ctx
->u
.br_port
.backup_nhg_id
;
2183 /* Accessors for PBR iptable information */
2184 void dplane_ctx_get_pbr_iptable(const struct zebra_dplane_ctx
*ctx
,
2185 struct zebra_pbr_iptable
*table
)
2187 DPLANE_CTX_VALID(ctx
);
2189 memcpy(table
, &ctx
->u
.iptable
, sizeof(struct zebra_pbr_iptable
));
2192 void dplane_ctx_get_pbr_ipset(const struct zebra_dplane_ctx
*ctx
,
2193 struct zebra_pbr_ipset
*ipset
)
2195 DPLANE_CTX_VALID(ctx
);
2199 if (ctx
->zd_op
== DPLANE_OP_IPSET_ENTRY_ADD
||
2200 ctx
->zd_op
== DPLANE_OP_IPSET_ENTRY_DELETE
) {
2201 memset(ipset
, 0, sizeof(struct zebra_pbr_ipset
));
2202 ipset
->type
= ctx
->u
.ipset_entry
.info
.type
;
2203 ipset
->family
= ctx
->u
.ipset_entry
.info
.family
;
2204 memcpy(&ipset
->ipset_name
, &ctx
->u
.ipset_entry
.info
.ipset_name
,
2205 ZEBRA_IPSET_NAME_SIZE
);
2207 memcpy(ipset
, &ctx
->u
.ipset
, sizeof(struct zebra_pbr_ipset
));
2210 void dplane_ctx_get_pbr_ipset_entry(const struct zebra_dplane_ctx
*ctx
,
2211 struct zebra_pbr_ipset_entry
*entry
)
2213 DPLANE_CTX_VALID(ctx
);
2217 memcpy(entry
, &ctx
->u
.ipset_entry
.entry
, sizeof(struct zebra_pbr_ipset_entry
));
2221 * End of dplane context accessors
2224 /* Optional extra info about interfaces in nexthops - a plugin must enable
2227 const struct dplane_intf_extra
*
2228 dplane_ctx_get_intf_extra(const struct zebra_dplane_ctx
*ctx
)
2230 return TAILQ_FIRST(&ctx
->u
.rinfo
.intf_extra_q
);
2233 const struct dplane_intf_extra
*
2234 dplane_ctx_intf_extra_next(const struct zebra_dplane_ctx
*ctx
,
2235 const struct dplane_intf_extra
*ptr
)
2237 return TAILQ_NEXT(ptr
, link
);
2240 vrf_id_t
dplane_intf_extra_get_vrfid(const struct dplane_intf_extra
*ptr
)
2245 uint32_t dplane_intf_extra_get_ifindex(const struct dplane_intf_extra
*ptr
)
2247 return ptr
->ifindex
;
2250 uint32_t dplane_intf_extra_get_flags(const struct dplane_intf_extra
*ptr
)
2255 uint32_t dplane_intf_extra_get_status(const struct dplane_intf_extra
*ptr
)
2261 * End of interface extra info accessors
2264 uint8_t dplane_ctx_neightable_get_family(const struct zebra_dplane_ctx
*ctx
)
2266 DPLANE_CTX_VALID(ctx
);
2268 return ctx
->u
.neightable
.family
;
2272 dplane_ctx_neightable_get_app_probes(const struct zebra_dplane_ctx
*ctx
)
2274 DPLANE_CTX_VALID(ctx
);
2276 return ctx
->u
.neightable
.app_probes
;
2280 dplane_ctx_neightable_get_ucast_probes(const struct zebra_dplane_ctx
*ctx
)
2282 DPLANE_CTX_VALID(ctx
);
2284 return ctx
->u
.neightable
.ucast_probes
;
2288 dplane_ctx_neightable_get_mcast_probes(const struct zebra_dplane_ctx
*ctx
)
2290 DPLANE_CTX_VALID(ctx
);
2292 return ctx
->u
.neightable
.mcast_probes
;
2295 ifindex_t
dplane_ctx_get_netconf_ifindex(const struct zebra_dplane_ctx
*ctx
)
2297 DPLANE_CTX_VALID(ctx
);
2299 return ctx
->u
.netconf
.ifindex
;
2302 ns_id_t
dplane_ctx_get_netconf_ns_id(const struct zebra_dplane_ctx
*ctx
)
2304 DPLANE_CTX_VALID(ctx
);
2306 return ctx
->u
.netconf
.ns_id
;
2309 void dplane_ctx_set_netconf_ifindex(struct zebra_dplane_ctx
*ctx
,
2312 DPLANE_CTX_VALID(ctx
);
2314 ctx
->u
.netconf
.ifindex
= ifindex
;
2317 void dplane_ctx_set_netconf_ns_id(struct zebra_dplane_ctx
*ctx
, ns_id_t ns_id
)
2319 DPLANE_CTX_VALID(ctx
);
2321 ctx
->u
.netconf
.ns_id
= ns_id
;
2324 enum dplane_netconf_status_e
2325 dplane_ctx_get_netconf_mpls(const struct zebra_dplane_ctx
*ctx
)
2327 DPLANE_CTX_VALID(ctx
);
2329 return ctx
->u
.netconf
.mpls_val
;
2332 enum dplane_netconf_status_e
2333 dplane_ctx_get_netconf_mcast(const struct zebra_dplane_ctx
*ctx
)
2335 DPLANE_CTX_VALID(ctx
);
2337 return ctx
->u
.netconf
.mcast_val
;
2340 void dplane_ctx_set_netconf_mpls(struct zebra_dplane_ctx
*ctx
,
2341 enum dplane_netconf_status_e val
)
2343 DPLANE_CTX_VALID(ctx
);
2345 ctx
->u
.netconf
.mpls_val
= val
;
2348 void dplane_ctx_set_netconf_mcast(struct zebra_dplane_ctx
*ctx
,
2349 enum dplane_netconf_status_e val
)
2351 DPLANE_CTX_VALID(ctx
);
2353 ctx
->u
.netconf
.mcast_val
= val
;
2357 * Retrieve the limit on the number of pending, unprocessed updates.
2359 uint32_t dplane_get_in_queue_limit(void)
2361 return atomic_load_explicit(&zdplane_info
.dg_max_queued_updates
,
2362 memory_order_relaxed
);
2366 * Configure limit on the number of pending, queued updates.
2368 void dplane_set_in_queue_limit(uint32_t limit
, bool set
)
2370 /* Reset to default on 'unset' */
2372 limit
= DPLANE_DEFAULT_MAX_QUEUED
;
2374 atomic_store_explicit(&zdplane_info
.dg_max_queued_updates
, limit
,
2375 memory_order_relaxed
);
2379 * Retrieve the current queue depth of incoming, unprocessed updates
2381 uint32_t dplane_get_in_queue_len(void)
2383 return atomic_load_explicit(&zdplane_info
.dg_routes_queued
,
2384 memory_order_seq_cst
);
2388 * Common dataplane context init with zebra namespace info.
2390 static int dplane_ctx_ns_init(struct zebra_dplane_ctx
*ctx
,
2391 struct zebra_ns
*zns
,
2394 dplane_info_from_zns(&(ctx
->zd_ns_info
), zns
);
2396 ctx
->zd_is_update
= is_update
;
2398 #if defined(HAVE_NETLINK)
2399 /* Increment message counter after copying to context struct - may need
2400 * two messages in some 'update' cases.
2403 zns
->netlink_dplane_out
.seq
+= 2;
2405 zns
->netlink_dplane_out
.seq
++;
2406 #endif /* HAVE_NETLINK */
2412 * Initialize a context block for a route update from zebra data structs.
2414 int dplane_ctx_route_init(struct zebra_dplane_ctx
*ctx
, enum dplane_op_e op
,
2415 struct route_node
*rn
, struct route_entry
*re
)
2418 const struct route_table
*table
= NULL
;
2419 const struct rib_table_info
*info
;
2420 const struct prefix
*p
, *src_p
;
2421 struct zebra_ns
*zns
;
2422 struct zebra_vrf
*zvrf
;
2423 struct nexthop
*nexthop
;
2424 struct zebra_l3vni
*zl3vni
;
2425 const struct interface
*ifp
;
2426 struct dplane_intf_extra
*if_extra
;
2428 if (!ctx
|| !rn
|| !re
)
2431 TAILQ_INIT(&ctx
->u
.rinfo
.intf_extra_q
);
2434 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
2436 ctx
->u
.rinfo
.zd_type
= re
->type
;
2437 ctx
->u
.rinfo
.zd_old_type
= re
->type
;
2439 /* Prefixes: dest, and optional source */
2440 srcdest_rnode_prefixes(rn
, &p
, &src_p
);
2442 prefix_copy(&(ctx
->u
.rinfo
.zd_dest
), p
);
2445 prefix_copy(&(ctx
->u
.rinfo
.zd_src
), src_p
);
2447 memset(&(ctx
->u
.rinfo
.zd_src
), 0, sizeof(ctx
->u
.rinfo
.zd_src
));
2449 ctx
->zd_table_id
= re
->table
;
2451 ctx
->u
.rinfo
.zd_metric
= re
->metric
;
2452 ctx
->u
.rinfo
.zd_old_metric
= re
->metric
;
2453 ctx
->zd_vrf_id
= re
->vrf_id
;
2454 ctx
->u
.rinfo
.zd_mtu
= re
->mtu
;
2455 ctx
->u
.rinfo
.zd_nexthop_mtu
= re
->nexthop_mtu
;
2456 ctx
->u
.rinfo
.zd_instance
= re
->instance
;
2457 ctx
->u
.rinfo
.zd_tag
= re
->tag
;
2458 ctx
->u
.rinfo
.zd_old_tag
= re
->tag
;
2459 ctx
->u
.rinfo
.zd_distance
= re
->distance
;
2461 table
= srcdest_rnode_table(rn
);
2464 ctx
->u
.rinfo
.zd_afi
= info
->afi
;
2465 ctx
->u
.rinfo
.zd_safi
= info
->safi
;
2467 /* Copy nexthops; recursive info is included too */
2468 copy_nexthops(&(ctx
->u
.rinfo
.zd_ng
.nexthop
),
2469 re
->nhe
->nhg
.nexthop
, NULL
);
2470 ctx
->u
.rinfo
.zd_nhg_id
= re
->nhe
->id
;
2472 /* Copy backup nexthop info, if present */
2473 if (re
->nhe
->backup_info
&& re
->nhe
->backup_info
->nhe
) {
2474 copy_nexthops(&(ctx
->u
.rinfo
.backup_ng
.nexthop
),
2475 re
->nhe
->backup_info
->nhe
->nhg
.nexthop
, NULL
);
2479 * Ensure that the dplane nexthops' flags are clear and copy
2480 * encapsulation information.
2482 for (ALL_NEXTHOPS(ctx
->u
.rinfo
.zd_ng
, nexthop
)) {
2483 UNSET_FLAG(nexthop
->flags
, NEXTHOP_FLAG_FIB
);
2485 /* Optionally capture extra interface info while we're in the
2486 * main zebra pthread - a plugin has to ask for this info.
2488 if (dplane_collect_extra_intf_info
) {
2489 ifp
= if_lookup_by_index(nexthop
->ifindex
,
2495 sizeof(struct dplane_intf_extra
));
2496 if_extra
->vrf_id
= nexthop
->vrf_id
;
2497 if_extra
->ifindex
= nexthop
->ifindex
;
2498 if_extra
->flags
= ifp
->flags
;
2499 if_extra
->status
= ifp
->status
;
2501 TAILQ_INSERT_TAIL(&ctx
->u
.rinfo
.intf_extra_q
,
2506 /* Check for available evpn encapsulations. */
2507 if (!CHECK_FLAG(re
->flags
, ZEBRA_FLAG_EVPN_ROUTE
))
2510 zl3vni
= zl3vni_from_vrf(nexthop
->vrf_id
);
2511 if (zl3vni
&& is_l3vni_oper_up(zl3vni
)) {
2512 nexthop
->nh_encap_type
= NET_VXLAN
;
2513 nexthop
->nh_encap
.vni
= zl3vni
->vni
;
2517 /* Don't need some info when capturing a system notification */
2518 if (op
== DPLANE_OP_SYS_ROUTE_ADD
||
2519 op
== DPLANE_OP_SYS_ROUTE_DELETE
) {
2524 /* Extract ns info - can't use pointers to 'core' structs */
2525 zvrf
= vrf_info_lookup(re
->vrf_id
);
2527 dplane_ctx_ns_init(ctx
, zns
, (op
== DPLANE_OP_ROUTE_UPDATE
));
2531 struct nhg_hash_entry
*nhe
= zebra_nhg_resolve(re
->nhe
);
2533 ctx
->u
.rinfo
.nhe
.id
= nhe
->id
;
2534 ctx
->u
.rinfo
.nhe
.old_id
= 0;
2536 * Check if the nhe is installed/queued before doing anything
2539 * If its a delete we only use the prefix anyway, so this only
2540 * matters for INSTALL/UPDATE.
2542 if (zebra_nhg_kernel_nexthops_enabled()
2543 && (((op
== DPLANE_OP_ROUTE_INSTALL
)
2544 || (op
== DPLANE_OP_ROUTE_UPDATE
))
2545 && !CHECK_FLAG(nhe
->flags
, NEXTHOP_GROUP_INSTALLED
)
2546 && !CHECK_FLAG(nhe
->flags
, NEXTHOP_GROUP_QUEUED
))) {
2551 re
->nhe_installed_id
= nhe
->id
;
2553 #endif /* HAVE_NETLINK */
2555 /* Trying out the sequence number idea, so we can try to detect
2556 * when a result is stale.
2558 re
->dplane_sequence
= zebra_router_get_next_sequence();
2559 ctx
->zd_seq
= re
->dplane_sequence
;
2568 * dplane_ctx_nexthop_init() - Initialize a context block for a nexthop update
2570 * @ctx: Dataplane context to init
2571 * @op: Operation being performed
2572 * @nhe: Nexthop group hash entry
2574 * Return: Result status
2576 int dplane_ctx_nexthop_init(struct zebra_dplane_ctx
*ctx
, enum dplane_op_e op
,
2577 struct nhg_hash_entry
*nhe
)
2579 struct zebra_vrf
*zvrf
= NULL
;
2580 struct zebra_ns
*zns
= NULL
;
2587 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
2589 /* Copy over nhe info */
2590 ctx
->u
.rinfo
.nhe
.id
= nhe
->id
;
2591 ctx
->u
.rinfo
.nhe
.afi
= nhe
->afi
;
2592 ctx
->u
.rinfo
.nhe
.vrf_id
= nhe
->vrf_id
;
2593 ctx
->u
.rinfo
.nhe
.type
= nhe
->type
;
2595 nexthop_group_copy(&(ctx
->u
.rinfo
.nhe
.ng
), &(nhe
->nhg
));
2597 /* If this is a group, convert it to a grp array of ids */
2598 if (!zebra_nhg_depends_is_empty(nhe
)
2599 && !CHECK_FLAG(nhe
->flags
, NEXTHOP_GROUP_RECURSIVE
))
2600 ctx
->u
.rinfo
.nhe
.nh_grp_count
= zebra_nhg_nhe2grp(
2601 ctx
->u
.rinfo
.nhe
.nh_grp
, nhe
, MULTIPATH_NUM
);
2603 zvrf
= vrf_info_lookup(nhe
->vrf_id
);
2606 * Fallback to default namespace if the vrf got ripped out from under
2609 zns
= zvrf
? zvrf
->zns
: zebra_ns_lookup(NS_DEFAULT
);
2612 * TODO: Might not need to mark this as an update, since
2613 * it probably won't require two messages
2615 dplane_ctx_ns_init(ctx
, zns
, (op
== DPLANE_OP_NH_UPDATE
));
2624 * Capture information for an LSP update in a dplane context.
2626 int dplane_ctx_lsp_init(struct zebra_dplane_ctx
*ctx
, enum dplane_op_e op
,
2627 struct zebra_lsp
*lsp
)
2630 struct zebra_nhlfe
*nhlfe
, *new_nhlfe
;
2633 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
2635 /* Capture namespace info */
2636 dplane_ctx_ns_init(ctx
, zebra_ns_lookup(NS_DEFAULT
),
2637 (op
== DPLANE_OP_LSP_UPDATE
));
2639 memset(&ctx
->u
.lsp
, 0, sizeof(ctx
->u
.lsp
));
2641 nhlfe_list_init(&(ctx
->u
.lsp
.nhlfe_list
));
2642 nhlfe_list_init(&(ctx
->u
.lsp
.backup_nhlfe_list
));
2644 /* This may be called to create/init a dplane context, not necessarily
2645 * to copy an lsp object.
2652 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
2653 zlog_debug("init dplane ctx %s: in-label %u ecmp# %d",
2654 dplane_op2str(op
), lsp
->ile
.in_label
,
2657 ctx
->u
.lsp
.ile
= lsp
->ile
;
2658 ctx
->u
.lsp
.addr_family
= lsp
->addr_family
;
2659 ctx
->u
.lsp
.num_ecmp
= lsp
->num_ecmp
;
2660 ctx
->u
.lsp
.flags
= lsp
->flags
;
2662 /* Copy source LSP's nhlfes, and capture 'best' nhlfe */
2663 frr_each(nhlfe_list
, &lsp
->nhlfe_list
, nhlfe
) {
2664 /* Not sure if this is meaningful... */
2665 if (nhlfe
->nexthop
== NULL
)
2668 new_nhlfe
= zebra_mpls_lsp_add_nh(&(ctx
->u
.lsp
), nhlfe
->type
,
2670 if (new_nhlfe
== NULL
|| new_nhlfe
->nexthop
== NULL
) {
2675 /* Need to copy flags and backup info too */
2676 new_nhlfe
->flags
= nhlfe
->flags
;
2677 new_nhlfe
->nexthop
->flags
= nhlfe
->nexthop
->flags
;
2679 if (CHECK_FLAG(new_nhlfe
->nexthop
->flags
,
2680 NEXTHOP_FLAG_HAS_BACKUP
)) {
2681 new_nhlfe
->nexthop
->backup_num
=
2682 nhlfe
->nexthop
->backup_num
;
2683 memcpy(new_nhlfe
->nexthop
->backup_idx
,
2684 nhlfe
->nexthop
->backup_idx
,
2685 new_nhlfe
->nexthop
->backup_num
);
2688 if (nhlfe
== lsp
->best_nhlfe
)
2689 ctx
->u
.lsp
.best_nhlfe
= new_nhlfe
;
2695 /* Capture backup nhlfes/nexthops */
2696 frr_each(nhlfe_list
, &lsp
->backup_nhlfe_list
, nhlfe
) {
2697 /* Not sure if this is meaningful... */
2698 if (nhlfe
->nexthop
== NULL
)
2701 new_nhlfe
= zebra_mpls_lsp_add_backup_nh(&(ctx
->u
.lsp
),
2704 if (new_nhlfe
== NULL
|| new_nhlfe
->nexthop
== NULL
) {
2709 /* Need to copy flags too */
2710 new_nhlfe
->flags
= nhlfe
->flags
;
2711 new_nhlfe
->nexthop
->flags
= nhlfe
->nexthop
->flags
;
2714 /* On error the ctx will be cleaned-up, so we don't need to
2715 * deal with any allocated nhlfe or nexthop structs here.
2723 * Capture information for an LSP update in a dplane context.
2725 static int dplane_ctx_pw_init(struct zebra_dplane_ctx
*ctx
,
2726 enum dplane_op_e op
,
2727 struct zebra_pw
*pw
)
2732 struct route_table
*table
;
2733 struct route_node
*rn
;
2734 struct route_entry
*re
;
2735 const struct nexthop_group
*nhg
;
2736 struct nexthop
*nh
, *newnh
, *last_nh
;
2738 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
2739 zlog_debug("init dplane ctx %s: pw '%s', loc %u, rem %u",
2740 dplane_op2str(op
), pw
->ifname
, pw
->local_label
,
2744 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
2746 /* Capture namespace info: no netlink support as of 12/18,
2747 * but just in case...
2749 dplane_ctx_ns_init(ctx
, zebra_ns_lookup(NS_DEFAULT
), false);
2751 memset(&ctx
->u
.pw
, 0, sizeof(ctx
->u
.pw
));
2753 /* This name appears to be c-string, so we use string copy. */
2754 strlcpy(ctx
->zd_ifname
, pw
->ifname
, sizeof(ctx
->zd_ifname
));
2756 ctx
->zd_vrf_id
= pw
->vrf_id
;
2757 ctx
->zd_ifindex
= pw
->ifindex
;
2758 ctx
->u
.pw
.type
= pw
->type
;
2759 ctx
->u
.pw
.af
= pw
->af
;
2760 ctx
->u
.pw
.local_label
= pw
->local_label
;
2761 ctx
->u
.pw
.remote_label
= pw
->remote_label
;
2762 ctx
->u
.pw
.flags
= pw
->flags
;
2764 ctx
->u
.pw
.dest
= pw
->nexthop
;
2766 ctx
->u
.pw
.fields
= pw
->data
;
2768 /* Capture nexthop info for the pw destination. We need to look
2769 * up and use zebra datastructs, but we're running in the zebra
2770 * pthread here so that should be ok.
2772 memcpy(&p
.u
, &pw
->nexthop
, sizeof(pw
->nexthop
));
2774 p
.prefixlen
= ((pw
->af
== AF_INET
) ? IPV4_MAX_BITLEN
: IPV6_MAX_BITLEN
);
2776 afi
= (pw
->af
== AF_INET
) ? AFI_IP
: AFI_IP6
;
2777 table
= zebra_vrf_table(afi
, SAFI_UNICAST
, pw
->vrf_id
);
2781 rn
= route_node_match(table
, &p
);
2786 RNODE_FOREACH_RE(rn
, re
) {
2787 if (CHECK_FLAG(re
->flags
, ZEBRA_FLAG_SELECTED
))
2792 /* We'll capture a 'fib' list of nexthops that meet our
2793 * criteria: installed, and labelled.
2795 nhg
= rib_get_fib_nhg(re
);
2798 if (nhg
&& nhg
->nexthop
) {
2799 for (ALL_NEXTHOPS_PTR(nhg
, nh
)) {
2800 if (!CHECK_FLAG(nh
->flags
, NEXTHOP_FLAG_ACTIVE
)
2801 || CHECK_FLAG(nh
->flags
,
2802 NEXTHOP_FLAG_RECURSIVE
)
2803 || nh
->nh_label
== NULL
)
2806 newnh
= nexthop_dup(nh
, NULL
);
2809 NEXTHOP_APPEND(last_nh
, newnh
);
2811 ctx
->u
.pw
.fib_nhg
.nexthop
= newnh
;
2816 /* Include any installed backup nexthops also. */
2817 nhg
= rib_get_fib_backup_nhg(re
);
2818 if (nhg
&& nhg
->nexthop
) {
2819 for (ALL_NEXTHOPS_PTR(nhg
, nh
)) {
2820 if (!CHECK_FLAG(nh
->flags
, NEXTHOP_FLAG_ACTIVE
)
2821 || CHECK_FLAG(nh
->flags
,
2822 NEXTHOP_FLAG_RECURSIVE
)
2823 || nh
->nh_label
== NULL
)
2826 newnh
= nexthop_dup(nh
, NULL
);
2829 NEXTHOP_APPEND(last_nh
, newnh
);
2831 ctx
->u
.pw
.fib_nhg
.nexthop
= newnh
;
2836 /* Copy primary nexthops; recursive info is included too */
2837 assert(re
->nhe
!= NULL
); /* SA warning */
2838 copy_nexthops(&(ctx
->u
.pw
.primary_nhg
.nexthop
),
2839 re
->nhe
->nhg
.nexthop
, NULL
);
2840 ctx
->u
.pw
.nhg_id
= re
->nhe
->id
;
2842 /* Copy backup nexthop info, if present */
2843 if (re
->nhe
->backup_info
&& re
->nhe
->backup_info
->nhe
) {
2844 copy_nexthops(&(ctx
->u
.pw
.backup_nhg
.nexthop
),
2845 re
->nhe
->backup_info
->nhe
->nhg
.nexthop
,
2849 route_unlock_node(rn
);
2858 * dplane_ctx_rule_init_single() - Initialize a dataplane representation of a
2861 * @dplane_rule: Dataplane internal representation of a rule
2864 static void dplane_ctx_rule_init_single(struct dplane_ctx_rule
*dplane_rule
,
2865 struct zebra_pbr_rule
*rule
)
2867 dplane_rule
->priority
= rule
->rule
.priority
;
2868 dplane_rule
->table
= rule
->rule
.action
.table
;
2870 dplane_rule
->filter_bm
= rule
->rule
.filter
.filter_bm
;
2871 dplane_rule
->fwmark
= rule
->rule
.filter
.fwmark
;
2872 dplane_rule
->dsfield
= rule
->rule
.filter
.dsfield
;
2873 dplane_rule
->ip_proto
= rule
->rule
.filter
.ip_proto
;
2874 prefix_copy(&(dplane_rule
->dst_ip
), &rule
->rule
.filter
.dst_ip
);
2875 prefix_copy(&(dplane_rule
->src_ip
), &rule
->rule
.filter
.src_ip
);
2877 dplane_rule
->action_pcp
= rule
->rule
.action
.pcp
;
2878 dplane_rule
->action_vlan_flags
= rule
->rule
.action
.vlan_flags
;
2879 dplane_rule
->action_vlan_id
= rule
->rule
.action
.vlan_id
;
2880 dplane_rule
->action_queue_id
= rule
->rule
.action
.queue_id
;
2882 strlcpy(dplane_rule
->ifname
, rule
->ifname
, INTERFACE_NAMSIZ
);
2886 * dplane_ctx_rule_init() - Initialize a context block for a PBR rule update.
2888 * @ctx: Dataplane context to init
2889 * @op: Operation being performed
2890 * @new_rule: PBR rule
2892 * Return: Result status
2894 static int dplane_ctx_rule_init(struct zebra_dplane_ctx
*ctx
,
2895 enum dplane_op_e op
,
2896 struct zebra_pbr_rule
*new_rule
,
2897 struct zebra_pbr_rule
*old_rule
)
2899 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
2901 "init dplane ctx %s: IF %s Prio %u Fwmark %u Src %pFX Dst %pFX Table %u",
2902 dplane_op2str(op
), new_rule
->ifname
,
2903 new_rule
->rule
.priority
, new_rule
->rule
.filter
.fwmark
,
2904 &new_rule
->rule
.filter
.src_ip
,
2905 &new_rule
->rule
.filter
.dst_ip
,
2906 new_rule
->rule
.action
.table
);
2909 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
2911 dplane_ctx_ns_init(ctx
, zebra_ns_lookup(NS_DEFAULT
),
2912 op
== DPLANE_OP_RULE_UPDATE
);
2914 ctx
->zd_vrf_id
= new_rule
->vrf_id
;
2915 strlcpy(ctx
->zd_ifname
, new_rule
->ifname
, sizeof(ctx
->zd_ifname
));
2917 ctx
->u
.rule
.sock
= new_rule
->sock
;
2918 ctx
->u
.rule
.unique
= new_rule
->rule
.unique
;
2919 ctx
->u
.rule
.seq
= new_rule
->rule
.seq
;
2921 dplane_ctx_rule_init_single(&ctx
->u
.rule
.new, new_rule
);
2922 if (op
== DPLANE_OP_RULE_UPDATE
)
2923 dplane_ctx_rule_init_single(&ctx
->u
.rule
.old
, old_rule
);
2929 * dplane_ctx_iptable_init() - Initialize a context block for a PBR iptable
2932 * @ctx: Dataplane context to init
2933 * @op: Operation being performed
2934 * @new_rule: PBR iptable
2936 * Return: Result status
2938 static int dplane_ctx_iptable_init(struct zebra_dplane_ctx
*ctx
,
2939 enum dplane_op_e op
,
2940 struct zebra_pbr_iptable
*iptable
)
2943 struct listnode
*node
;
2945 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
) {
2947 "init dplane ctx %s: Unique %u Fwmark %u Family %s Action %s",
2948 dplane_op2str(op
), iptable
->unique
, iptable
->fwmark
,
2949 family2str(iptable
->family
),
2950 iptable
->action
== ZEBRA_IPTABLES_DROP
? "Drop"
2955 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
2957 dplane_ctx_ns_init(ctx
, zebra_ns_lookup(NS_DEFAULT
), false);
2959 ctx
->zd_vrf_id
= iptable
->vrf_id
;
2960 memcpy(&ctx
->u
.iptable
, iptable
, sizeof(struct zebra_pbr_iptable
));
2961 ctx
->u
.iptable
.interface_name_list
= NULL
;
2962 if (iptable
->nb_interface
> 0) {
2963 ctx
->u
.iptable
.interface_name_list
= list_new();
2964 for (ALL_LIST_ELEMENTS_RO(iptable
->interface_name_list
, node
,
2966 listnode_add(ctx
->u
.iptable
.interface_name_list
,
2967 XSTRDUP(MTYPE_DP_NETFILTER
, ifname
));
2974 * dplane_ctx_ipset_init() - Initialize a context block for a PBR ipset update.
2976 * @ctx: Dataplane context to init
2977 * @op: Operation being performed
2978 * @new_rule: PBR ipset
2980 * Return: Result status
2982 static int dplane_ctx_ipset_init(struct zebra_dplane_ctx
*ctx
,
2983 enum dplane_op_e op
,
2984 struct zebra_pbr_ipset
*ipset
)
2986 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
) {
2987 zlog_debug("init dplane ctx %s: %s Unique %u Family %s Type %s",
2988 dplane_op2str(op
), ipset
->ipset_name
, ipset
->unique
,
2989 family2str(ipset
->family
),
2990 zebra_pbr_ipset_type2str(ipset
->type
));
2994 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
2996 dplane_ctx_ns_init(ctx
, zebra_ns_lookup(NS_DEFAULT
), false);
2998 ctx
->zd_vrf_id
= ipset
->vrf_id
;
3000 memcpy(&ctx
->u
.ipset
, ipset
, sizeof(struct zebra_pbr_ipset
));
3005 * dplane_ctx_ipset_entry_init() - Initialize a context block for a PBR ipset
3008 * @ctx: Dataplane context to init
3009 * @op: Operation being performed
3010 * @new_rule: PBR ipset
3012 * Return: Result status
3015 dplane_ctx_ipset_entry_init(struct zebra_dplane_ctx
*ctx
, enum dplane_op_e op
,
3016 struct zebra_pbr_ipset_entry
*ipset_entry
)
3018 struct zebra_pbr_ipset
*ipset
;
3020 ipset
= ipset_entry
->backpointer
;
3021 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
) {
3022 zlog_debug("init dplane ctx %s: %s Unique %u filter %u",
3023 dplane_op2str(op
), ipset
->ipset_name
,
3024 ipset_entry
->unique
, ipset_entry
->filter_bm
);
3028 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
3030 dplane_ctx_ns_init(ctx
, zebra_ns_lookup(NS_DEFAULT
), false);
3032 ctx
->zd_vrf_id
= ipset
->vrf_id
;
3034 memcpy(&ctx
->u
.ipset_entry
.entry
, ipset_entry
,
3035 sizeof(struct zebra_pbr_ipset_entry
));
3036 ctx
->u
.ipset_entry
.entry
.backpointer
= NULL
;
3037 ctx
->u
.ipset_entry
.info
.type
= ipset
->type
;
3038 ctx
->u
.ipset_entry
.info
.family
= ipset
->family
;
3039 memcpy(&ctx
->u
.ipset_entry
.info
.ipset_name
, &ipset
->ipset_name
,
3040 ZEBRA_IPSET_NAME_SIZE
);
3047 * Enqueue a new update,
3048 * and ensure an event is active for the dataplane pthread.
3050 static int dplane_update_enqueue(struct zebra_dplane_ctx
*ctx
)
3053 uint32_t high
, curr
;
3055 /* Enqueue for processing by the dataplane pthread */
3058 TAILQ_INSERT_TAIL(&zdplane_info
.dg_update_ctx_q
, ctx
,
3063 curr
= atomic_fetch_add_explicit(
3064 &(zdplane_info
.dg_routes_queued
),
3065 1, memory_order_seq_cst
);
3067 curr
++; /* We got the pre-incremented value */
3069 /* Maybe update high-water counter also */
3070 high
= atomic_load_explicit(&zdplane_info
.dg_routes_queued_max
,
3071 memory_order_seq_cst
);
3072 while (high
< curr
) {
3073 if (atomic_compare_exchange_weak_explicit(
3074 &zdplane_info
.dg_routes_queued_max
,
3076 memory_order_seq_cst
,
3077 memory_order_seq_cst
))
3081 /* Ensure that an event for the dataplane thread is active */
3082 ret
= dplane_provider_work_ready();
3088 * Utility that prepares a route update and enqueues it for processing
3090 static enum zebra_dplane_result
3091 dplane_route_update_internal(struct route_node
*rn
,
3092 struct route_entry
*re
,
3093 struct route_entry
*old_re
,
3094 enum dplane_op_e op
)
3096 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3098 struct zebra_dplane_ctx
*ctx
= NULL
;
3100 /* Obtain context block */
3101 ctx
= dplane_ctx_alloc();
3103 /* Init context with info from zebra data structs */
3104 ret
= dplane_ctx_route_init(ctx
, op
, rn
, re
);
3106 /* Capture some extra info for update case
3107 * where there's a different 'old' route.
3109 if ((op
== DPLANE_OP_ROUTE_UPDATE
) &&
3110 old_re
&& (old_re
!= re
)) {
3112 old_re
->dplane_sequence
=
3113 zebra_router_get_next_sequence();
3114 ctx
->zd_old_seq
= old_re
->dplane_sequence
;
3116 ctx
->u
.rinfo
.zd_old_tag
= old_re
->tag
;
3117 ctx
->u
.rinfo
.zd_old_type
= old_re
->type
;
3118 ctx
->u
.rinfo
.zd_old_instance
= old_re
->instance
;
3119 ctx
->u
.rinfo
.zd_old_distance
= old_re
->distance
;
3120 ctx
->u
.rinfo
.zd_old_metric
= old_re
->metric
;
3121 ctx
->u
.rinfo
.nhe
.old_id
= old_re
->nhe
->id
;
3123 #ifndef HAVE_NETLINK
3124 /* For bsd, capture previous re's nexthops too, sigh.
3125 * We'll need these to do per-nexthop deletes.
3127 copy_nexthops(&(ctx
->u
.rinfo
.zd_old_ng
.nexthop
),
3128 old_re
->nhe
->nhg
.nexthop
, NULL
);
3130 if (zebra_nhg_get_backup_nhg(old_re
->nhe
) != NULL
) {
3131 struct nexthop_group
*nhg
;
3132 struct nexthop
**nh
;
3134 nhg
= zebra_nhg_get_backup_nhg(old_re
->nhe
);
3135 nh
= &(ctx
->u
.rinfo
.old_backup_ng
.nexthop
);
3138 copy_nexthops(nh
, nhg
->nexthop
, NULL
);
3140 #endif /* !HAVE_NETLINK */
3144 * If the old and new context type, and nexthop group id
3145 * are the same there is no need to send down a route replace
3146 * as that we know we have sent a nexthop group replace
3147 * or an upper level protocol has sent us the exact
3150 if ((dplane_ctx_get_type(ctx
) == dplane_ctx_get_old_type(ctx
))
3151 && (dplane_ctx_get_nhe_id(ctx
)
3152 == dplane_ctx_get_old_nhe_id(ctx
))
3153 && (dplane_ctx_get_nhe_id(ctx
) >= ZEBRA_NHG_PROTO_LOWER
)) {
3154 struct nexthop
*nexthop
;
3156 if (IS_ZEBRA_DEBUG_DPLANE
)
3158 "%s: Ignoring Route exactly the same",
3161 for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx
),
3163 if (CHECK_FLAG(nexthop
->flags
,
3164 NEXTHOP_FLAG_RECURSIVE
))
3167 if (CHECK_FLAG(nexthop
->flags
,
3168 NEXTHOP_FLAG_ACTIVE
))
3169 SET_FLAG(nexthop
->flags
,
3173 dplane_ctx_free(&ctx
);
3174 return ZEBRA_DPLANE_REQUEST_SUCCESS
;
3177 /* Enqueue context for processing */
3178 ret
= dplane_update_enqueue(ctx
);
3181 /* Update counter */
3182 atomic_fetch_add_explicit(&zdplane_info
.dg_routes_in
, 1,
3183 memory_order_relaxed
);
3186 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
3188 atomic_fetch_add_explicit(&zdplane_info
.dg_route_errors
, 1,
3189 memory_order_relaxed
);
3191 dplane_ctx_free(&ctx
);
3198 * dplane_nexthop_update_internal() - Helper for enqueuing nexthop changes
3200 * @nhe: Nexthop group hash entry where the change occured
3201 * @op: The operation to be enqued
3203 * Return: Result of the change
3205 static enum zebra_dplane_result
3206 dplane_nexthop_update_internal(struct nhg_hash_entry
*nhe
, enum dplane_op_e op
)
3208 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3210 struct zebra_dplane_ctx
*ctx
= NULL
;
3212 /* Obtain context block */
3213 ctx
= dplane_ctx_alloc();
3219 ret
= dplane_ctx_nexthop_init(ctx
, op
, nhe
);
3221 ret
= dplane_update_enqueue(ctx
);
3224 /* Update counter */
3225 atomic_fetch_add_explicit(&zdplane_info
.dg_nexthops_in
, 1,
3226 memory_order_relaxed
);
3229 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
3231 atomic_fetch_add_explicit(&zdplane_info
.dg_nexthop_errors
, 1,
3232 memory_order_relaxed
);
3234 dplane_ctx_free(&ctx
);
3241 * Enqueue a route 'add' for the dataplane.
3243 enum zebra_dplane_result
dplane_route_add(struct route_node
*rn
,
3244 struct route_entry
*re
)
3246 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3248 if (rn
== NULL
|| re
== NULL
)
3251 ret
= dplane_route_update_internal(rn
, re
, NULL
,
3252 DPLANE_OP_ROUTE_INSTALL
);
3259 * Enqueue a route update for the dataplane.
3261 enum zebra_dplane_result
dplane_route_update(struct route_node
*rn
,
3262 struct route_entry
*re
,
3263 struct route_entry
*old_re
)
3265 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3267 if (rn
== NULL
|| re
== NULL
)
3270 ret
= dplane_route_update_internal(rn
, re
, old_re
,
3271 DPLANE_OP_ROUTE_UPDATE
);
3277 * Enqueue a route removal for the dataplane.
3279 enum zebra_dplane_result
dplane_route_delete(struct route_node
*rn
,
3280 struct route_entry
*re
)
3282 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3284 if (rn
== NULL
|| re
== NULL
)
3287 ret
= dplane_route_update_internal(rn
, re
, NULL
,
3288 DPLANE_OP_ROUTE_DELETE
);
3295 * Notify the dplane when system/connected routes change.
3297 enum zebra_dplane_result
dplane_sys_route_add(struct route_node
*rn
,
3298 struct route_entry
*re
)
3300 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3302 /* Ignore this event unless a provider plugin has requested it. */
3303 if (!zdplane_info
.dg_sys_route_notifs
) {
3304 ret
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
3308 if (rn
== NULL
|| re
== NULL
)
3311 ret
= dplane_route_update_internal(rn
, re
, NULL
,
3312 DPLANE_OP_SYS_ROUTE_ADD
);
3319 * Notify the dplane when system/connected routes are deleted.
3321 enum zebra_dplane_result
dplane_sys_route_del(struct route_node
*rn
,
3322 struct route_entry
*re
)
3324 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3326 /* Ignore this event unless a provider plugin has requested it. */
3327 if (!zdplane_info
.dg_sys_route_notifs
) {
3328 ret
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
3332 if (rn
== NULL
|| re
== NULL
)
3335 ret
= dplane_route_update_internal(rn
, re
, NULL
,
3336 DPLANE_OP_SYS_ROUTE_DELETE
);
3343 * Update from an async notification, to bring other fibs up-to-date.
3345 enum zebra_dplane_result
3346 dplane_route_notif_update(struct route_node
*rn
,
3347 struct route_entry
*re
,
3348 enum dplane_op_e op
,
3349 struct zebra_dplane_ctx
*ctx
)
3351 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3353 struct zebra_dplane_ctx
*new_ctx
= NULL
;
3354 struct nexthop
*nexthop
;
3355 struct nexthop_group
*nhg
;
3357 if (rn
== NULL
|| re
== NULL
)
3360 new_ctx
= dplane_ctx_alloc();
3361 if (new_ctx
== NULL
)
3364 /* Init context with info from zebra data structs */
3365 dplane_ctx_route_init(new_ctx
, op
, rn
, re
);
3367 /* For add/update, need to adjust the nexthops so that we match
3368 * the notification state, which may not be the route-entry/RIB
3371 if (op
== DPLANE_OP_ROUTE_UPDATE
||
3372 op
== DPLANE_OP_ROUTE_INSTALL
) {
3374 nexthops_free(new_ctx
->u
.rinfo
.zd_ng
.nexthop
);
3375 new_ctx
->u
.rinfo
.zd_ng
.nexthop
= NULL
;
3377 nhg
= rib_get_fib_nhg(re
);
3378 if (nhg
&& nhg
->nexthop
)
3379 copy_nexthops(&(new_ctx
->u
.rinfo
.zd_ng
.nexthop
),
3380 nhg
->nexthop
, NULL
);
3382 /* Check for installed backup nexthops also */
3383 nhg
= rib_get_fib_backup_nhg(re
);
3384 if (nhg
&& nhg
->nexthop
) {
3385 copy_nexthops(&(new_ctx
->u
.rinfo
.zd_ng
.nexthop
),
3386 nhg
->nexthop
, NULL
);
3389 for (ALL_NEXTHOPS(new_ctx
->u
.rinfo
.zd_ng
, nexthop
))
3390 UNSET_FLAG(nexthop
->flags
, NEXTHOP_FLAG_FIB
);
3394 /* Capture info about the source of the notification, in 'ctx' */
3395 dplane_ctx_set_notif_provider(new_ctx
,
3396 dplane_ctx_get_notif_provider(ctx
));
3398 ret
= dplane_update_enqueue(new_ctx
);
3402 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
3404 dplane_ctx_free(&new_ctx
);
3410 * Enqueue a nexthop add for the dataplane.
3412 enum zebra_dplane_result
dplane_nexthop_add(struct nhg_hash_entry
*nhe
)
3414 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3417 ret
= dplane_nexthop_update_internal(nhe
, DPLANE_OP_NH_INSTALL
);
3422 * Enqueue a nexthop update for the dataplane.
3424 * Might not need this func since zebra's nexthop objects should be immutable?
3426 enum zebra_dplane_result
dplane_nexthop_update(struct nhg_hash_entry
*nhe
)
3428 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3431 ret
= dplane_nexthop_update_internal(nhe
, DPLANE_OP_NH_UPDATE
);
3436 * Enqueue a nexthop removal for the dataplane.
3438 enum zebra_dplane_result
dplane_nexthop_delete(struct nhg_hash_entry
*nhe
)
3440 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3443 ret
= dplane_nexthop_update_internal(nhe
, DPLANE_OP_NH_DELETE
);
3449 * Enqueue LSP add for the dataplane.
3451 enum zebra_dplane_result
dplane_lsp_add(struct zebra_lsp
*lsp
)
3453 enum zebra_dplane_result ret
=
3454 lsp_update_internal(lsp
, DPLANE_OP_LSP_INSTALL
);
3460 * Enqueue LSP update for the dataplane.
3462 enum zebra_dplane_result
dplane_lsp_update(struct zebra_lsp
*lsp
)
3464 enum zebra_dplane_result ret
=
3465 lsp_update_internal(lsp
, DPLANE_OP_LSP_UPDATE
);
3471 * Enqueue LSP delete for the dataplane.
3473 enum zebra_dplane_result
dplane_lsp_delete(struct zebra_lsp
*lsp
)
3475 enum zebra_dplane_result ret
=
3476 lsp_update_internal(lsp
, DPLANE_OP_LSP_DELETE
);
3481 /* Update or un-install resulting from an async notification */
3482 enum zebra_dplane_result
3483 dplane_lsp_notif_update(struct zebra_lsp
*lsp
, enum dplane_op_e op
,
3484 struct zebra_dplane_ctx
*notif_ctx
)
3486 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3488 struct zebra_dplane_ctx
*ctx
= NULL
;
3489 struct nhlfe_list_head
*head
;
3490 struct zebra_nhlfe
*nhlfe
, *new_nhlfe
;
3492 /* Obtain context block */
3493 ctx
= dplane_ctx_alloc();
3499 /* Copy info from zebra LSP */
3500 ret
= dplane_ctx_lsp_init(ctx
, op
, lsp
);
3504 /* Add any installed backup nhlfes */
3505 head
= &(ctx
->u
.lsp
.backup_nhlfe_list
);
3506 frr_each(nhlfe_list
, head
, nhlfe
) {
3508 if (CHECK_FLAG(nhlfe
->flags
, NHLFE_FLAG_INSTALLED
) &&
3509 CHECK_FLAG(nhlfe
->nexthop
->flags
, NEXTHOP_FLAG_FIB
)) {
3510 new_nhlfe
= zebra_mpls_lsp_add_nh(&(ctx
->u
.lsp
),
3514 /* Need to copy flags too */
3515 new_nhlfe
->flags
= nhlfe
->flags
;
3516 new_nhlfe
->nexthop
->flags
= nhlfe
->nexthop
->flags
;
3520 /* Capture info about the source of the notification */
3521 dplane_ctx_set_notif_provider(
3523 dplane_ctx_get_notif_provider(notif_ctx
));
3525 ret
= dplane_update_enqueue(ctx
);
3528 /* Update counter */
3529 atomic_fetch_add_explicit(&zdplane_info
.dg_lsps_in
, 1,
3530 memory_order_relaxed
);
3533 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
3535 atomic_fetch_add_explicit(&zdplane_info
.dg_lsp_errors
, 1,
3536 memory_order_relaxed
);
3538 dplane_ctx_free(&ctx
);
3544 * Enqueue pseudowire install for the dataplane.
3546 enum zebra_dplane_result
dplane_pw_install(struct zebra_pw
*pw
)
3548 return pw_update_internal(pw
, DPLANE_OP_PW_INSTALL
);
3552 * Enqueue pseudowire un-install for the dataplane.
3554 enum zebra_dplane_result
dplane_pw_uninstall(struct zebra_pw
*pw
)
3556 return pw_update_internal(pw
, DPLANE_OP_PW_UNINSTALL
);
3560 * Common internal LSP update utility
3562 static enum zebra_dplane_result
lsp_update_internal(struct zebra_lsp
*lsp
,
3563 enum dplane_op_e op
)
3565 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3567 struct zebra_dplane_ctx
*ctx
= NULL
;
3569 /* Obtain context block */
3570 ctx
= dplane_ctx_alloc();
3572 ret
= dplane_ctx_lsp_init(ctx
, op
, lsp
);
3576 ret
= dplane_update_enqueue(ctx
);
3579 /* Update counter */
3580 atomic_fetch_add_explicit(&zdplane_info
.dg_lsps_in
, 1,
3581 memory_order_relaxed
);
3584 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
3586 atomic_fetch_add_explicit(&zdplane_info
.dg_lsp_errors
, 1,
3587 memory_order_relaxed
);
3588 dplane_ctx_free(&ctx
);
3595 * Internal, common handler for pseudowire updates.
3597 static enum zebra_dplane_result
pw_update_internal(struct zebra_pw
*pw
,
3598 enum dplane_op_e op
)
3600 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3602 struct zebra_dplane_ctx
*ctx
= NULL
;
3604 ctx
= dplane_ctx_alloc();
3606 ret
= dplane_ctx_pw_init(ctx
, op
, pw
);
3610 ret
= dplane_update_enqueue(ctx
);
3613 /* Update counter */
3614 atomic_fetch_add_explicit(&zdplane_info
.dg_pws_in
, 1,
3615 memory_order_relaxed
);
3618 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
3620 atomic_fetch_add_explicit(&zdplane_info
.dg_pw_errors
, 1,
3621 memory_order_relaxed
);
3622 dplane_ctx_free(&ctx
);
3629 * Enqueue access br_port update.
3631 enum zebra_dplane_result
3632 dplane_br_port_update(const struct interface
*ifp
, bool non_df
,
3633 uint32_t sph_filter_cnt
,
3634 const struct in_addr
*sph_filters
, uint32_t backup_nhg_id
)
3636 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3639 struct zebra_dplane_ctx
*ctx
= NULL
;
3640 struct zebra_ns
*zns
;
3641 enum dplane_op_e op
= DPLANE_OP_BR_PORT_UPDATE
;
3644 flags
|= DPLANE_BR_PORT_NON_DF
;
3646 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
|| IS_ZEBRA_DEBUG_EVPN_MH_ES
) {
3648 char vtep_str
[ES_VTEP_LIST_STR_SZ
];
3651 for (i
= 0; i
< sph_filter_cnt
; ++i
) {
3652 snprintfrr(vtep_str
+ strlen(vtep_str
),
3653 sizeof(vtep_str
) - strlen(vtep_str
), "%pI4 ",
3657 "init br_port ctx %s: ifp %s, flags 0x%x backup_nhg 0x%x sph %s",
3658 dplane_op2str(op
), ifp
->name
, flags
, backup_nhg_id
,
3662 ctx
= dplane_ctx_alloc();
3665 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
3666 ctx
->zd_vrf_id
= ifp
->vrf
->vrf_id
;
3668 zns
= zebra_ns_lookup(ifp
->vrf
->vrf_id
);
3669 dplane_ctx_ns_init(ctx
, zns
, false);
3671 ctx
->zd_ifindex
= ifp
->ifindex
;
3672 strlcpy(ctx
->zd_ifname
, ifp
->name
, sizeof(ctx
->zd_ifname
));
3674 /* Init the br-port-specific data area */
3675 memset(&ctx
->u
.br_port
, 0, sizeof(ctx
->u
.br_port
));
3677 ctx
->u
.br_port
.flags
= flags
;
3678 ctx
->u
.br_port
.backup_nhg_id
= backup_nhg_id
;
3679 ctx
->u
.br_port
.sph_filter_cnt
= sph_filter_cnt
;
3680 memcpy(ctx
->u
.br_port
.sph_filters
, sph_filters
,
3681 sizeof(ctx
->u
.br_port
.sph_filters
[0]) * sph_filter_cnt
);
3683 /* Enqueue for processing on the dplane pthread */
3684 ret
= dplane_update_enqueue(ctx
);
3686 /* Increment counter */
3687 atomic_fetch_add_explicit(&zdplane_info
.dg_br_port_in
, 1,
3688 memory_order_relaxed
);
3691 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
3694 atomic_fetch_add_explicit(&zdplane_info
.dg_br_port_errors
, 1,
3695 memory_order_relaxed
);
3696 dplane_ctx_free(&ctx
);
3703 * Enqueue interface address add for the dataplane.
3705 enum zebra_dplane_result
dplane_intf_addr_set(const struct interface
*ifp
,
3706 const struct connected
*ifc
)
3708 #if !defined(HAVE_NETLINK) && defined(HAVE_STRUCT_IFALIASREQ)
3709 /* Extra checks for this OS path. */
3711 /* Don't configure PtP addresses on broadcast ifs or reverse */
3712 if (!(ifp
->flags
& IFF_POINTOPOINT
) != !CONNECTED_PEER(ifc
)) {
3713 if (IS_ZEBRA_DEBUG_KERNEL
|| IS_ZEBRA_DEBUG_DPLANE
)
3714 zlog_debug("Failed to set intf addr: mismatch p2p and connected");
3716 return ZEBRA_DPLANE_REQUEST_FAILURE
;
3720 return intf_addr_update_internal(ifp
, ifc
, DPLANE_OP_ADDR_INSTALL
);
3724 * Enqueue interface address remove/uninstall for the dataplane.
3726 enum zebra_dplane_result
dplane_intf_addr_unset(const struct interface
*ifp
,
3727 const struct connected
*ifc
)
3729 return intf_addr_update_internal(ifp
, ifc
, DPLANE_OP_ADDR_UNINSTALL
);
3732 static enum zebra_dplane_result
intf_addr_update_internal(
3733 const struct interface
*ifp
, const struct connected
*ifc
,
3734 enum dplane_op_e op
)
3736 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3738 struct zebra_dplane_ctx
*ctx
= NULL
;
3739 struct zebra_ns
*zns
;
3741 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
3742 zlog_debug("init intf ctx %s: idx %d, addr %u:%pFX",
3743 dplane_op2str(op
), ifp
->ifindex
, ifp
->vrf
->vrf_id
,
3746 ctx
= dplane_ctx_alloc();
3749 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
3750 ctx
->zd_vrf_id
= ifp
->vrf
->vrf_id
;
3752 zns
= zebra_ns_lookup(ifp
->vrf
->vrf_id
);
3753 dplane_ctx_ns_init(ctx
, zns
, false);
3755 /* Init the interface-addr-specific area */
3756 memset(&ctx
->u
.intf
, 0, sizeof(ctx
->u
.intf
));
3758 strlcpy(ctx
->zd_ifname
, ifp
->name
, sizeof(ctx
->zd_ifname
));
3759 ctx
->zd_ifindex
= ifp
->ifindex
;
3760 ctx
->u
.intf
.prefix
= *(ifc
->address
);
3762 if (if_is_broadcast(ifp
))
3763 ctx
->u
.intf
.flags
|= DPLANE_INTF_BROADCAST
;
3765 if (CONNECTED_PEER(ifc
)) {
3766 ctx
->u
.intf
.dest_prefix
= *(ifc
->destination
);
3767 ctx
->u
.intf
.flags
|=
3768 (DPLANE_INTF_CONNECTED
| DPLANE_INTF_HAS_DEST
);
3771 if (CHECK_FLAG(ifc
->flags
, ZEBRA_IFA_SECONDARY
))
3772 ctx
->u
.intf
.flags
|= DPLANE_INTF_SECONDARY
;
3777 ctx
->u
.intf
.flags
|= DPLANE_INTF_HAS_LABEL
;
3779 /* Use embedded buffer if it's adequate; else allocate. */
3780 len
= strlen(ifc
->label
);
3782 if (len
< sizeof(ctx
->u
.intf
.label_buf
)) {
3783 strlcpy(ctx
->u
.intf
.label_buf
, ifc
->label
,
3784 sizeof(ctx
->u
.intf
.label_buf
));
3785 ctx
->u
.intf
.label
= ctx
->u
.intf
.label_buf
;
3787 ctx
->u
.intf
.label
= XSTRDUP(MTYPE_DP_CTX
, ifc
->label
);
3791 ret
= dplane_update_enqueue(ctx
);
3793 /* Increment counter */
3794 atomic_fetch_add_explicit(&zdplane_info
.dg_intf_addrs_in
, 1,
3795 memory_order_relaxed
);
3798 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
3801 atomic_fetch_add_explicit(&zdplane_info
.dg_intf_addr_errors
,
3802 1, memory_order_relaxed
);
3803 dplane_ctx_free(&ctx
);
3810 * Enqueue vxlan/evpn mac add (or update).
3812 enum zebra_dplane_result
dplane_rem_mac_add(const struct interface
*ifp
,
3813 const struct interface
*bridge_ifp
,
3815 const struct ethaddr
*mac
,
3816 struct in_addr vtep_ip
,
3821 enum zebra_dplane_result result
;
3822 uint32_t update_flags
= 0;
3824 update_flags
|= DPLANE_MAC_REMOTE
;
3826 update_flags
|= DPLANE_MAC_WAS_STATIC
;
3828 /* Use common helper api */
3829 result
= mac_update_common(DPLANE_OP_MAC_INSTALL
, ifp
, bridge_ifp
,
3830 vid
, mac
, vtep_ip
, sticky
, nhg_id
, update_flags
);
3835 * Enqueue vxlan/evpn mac delete.
3837 enum zebra_dplane_result
dplane_rem_mac_del(const struct interface
*ifp
,
3838 const struct interface
*bridge_ifp
,
3840 const struct ethaddr
*mac
,
3841 struct in_addr vtep_ip
)
3843 enum zebra_dplane_result result
;
3844 uint32_t update_flags
= 0;
3846 update_flags
|= DPLANE_MAC_REMOTE
;
3848 /* Use common helper api */
3849 result
= mac_update_common(DPLANE_OP_MAC_DELETE
, ifp
, bridge_ifp
,
3850 vid
, mac
, vtep_ip
, false, 0, update_flags
);
3855 * API to configure link local with either MAC address or IP information
3857 enum zebra_dplane_result
dplane_neigh_ip_update(enum dplane_op_e op
,
3858 const struct interface
*ifp
,
3859 struct ipaddr
*link_ip
,
3861 uint32_t ndm_state
, int protocol
)
3863 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3865 uint32_t update_flags
;
3867 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
3868 zlog_debug("%s: init link ctx %s: ifp %s, link_ip %pIA ip %pIA",
3869 __func__
, dplane_op2str(op
), ifp
->name
, link_ip
, ip
);
3871 if (ndm_state
== ZEBRA_NEIGH_STATE_REACHABLE
)
3872 state
= DPLANE_NUD_REACHABLE
;
3873 else if (ndm_state
== ZEBRA_NEIGH_STATE_FAILED
)
3874 state
= DPLANE_NUD_FAILED
;
3876 update_flags
= DPLANE_NEIGH_NO_EXTENSION
;
3878 result
= neigh_update_internal(op
, ifp
, (const void *)link_ip
,
3879 ipaddr_family(link_ip
), ip
, 0, state
,
3880 update_flags
, protocol
);
3886 * Enqueue local mac add (or update).
3888 enum zebra_dplane_result
dplane_local_mac_add(const struct interface
*ifp
,
3889 const struct interface
*bridge_ifp
,
3891 const struct ethaddr
*mac
,
3893 uint32_t set_static
,
3894 uint32_t set_inactive
)
3896 enum zebra_dplane_result result
;
3897 uint32_t update_flags
= 0;
3898 struct in_addr vtep_ip
;
3901 update_flags
|= DPLANE_MAC_SET_STATIC
;
3904 update_flags
|= DPLANE_MAC_SET_INACTIVE
;
3908 /* Use common helper api */
3909 result
= mac_update_common(DPLANE_OP_MAC_INSTALL
, ifp
, bridge_ifp
,
3910 vid
, mac
, vtep_ip
, sticky
, 0,
3916 * Enqueue local mac del
3918 enum zebra_dplane_result
3919 dplane_local_mac_del(const struct interface
*ifp
,
3920 const struct interface
*bridge_ifp
, vlanid_t vid
,
3921 const struct ethaddr
*mac
)
3923 enum zebra_dplane_result result
;
3924 struct in_addr vtep_ip
;
3928 /* Use common helper api */
3929 result
= mac_update_common(DPLANE_OP_MAC_DELETE
, ifp
, bridge_ifp
, vid
,
3930 mac
, vtep_ip
, false, 0, 0);
3934 * Public api to init an empty context - either newly-allocated or
3935 * reset/cleared - for a MAC update.
3937 void dplane_mac_init(struct zebra_dplane_ctx
*ctx
,
3938 const struct interface
*ifp
,
3939 const struct interface
*br_ifp
,
3941 const struct ethaddr
*mac
,
3942 struct in_addr vtep_ip
,
3945 uint32_t update_flags
)
3947 struct zebra_ns
*zns
;
3949 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
3950 ctx
->zd_vrf_id
= ifp
->vrf
->vrf_id
;
3952 zns
= zebra_ns_lookup(ifp
->vrf
->vrf_id
);
3953 dplane_ctx_ns_init(ctx
, zns
, false);
3955 strlcpy(ctx
->zd_ifname
, ifp
->name
, sizeof(ctx
->zd_ifname
));
3956 ctx
->zd_ifindex
= ifp
->ifindex
;
3958 /* Init the mac-specific data area */
3959 memset(&ctx
->u
.macinfo
, 0, sizeof(ctx
->u
.macinfo
));
3961 ctx
->u
.macinfo
.br_ifindex
= br_ifp
->ifindex
;
3962 ctx
->u
.macinfo
.vtep_ip
= vtep_ip
;
3963 ctx
->u
.macinfo
.mac
= *mac
;
3964 ctx
->u
.macinfo
.vid
= vid
;
3965 ctx
->u
.macinfo
.is_sticky
= sticky
;
3966 ctx
->u
.macinfo
.nhg_id
= nhg_id
;
3967 ctx
->u
.macinfo
.update_flags
= update_flags
;
3971 * Common helper api for MAC address/vxlan updates
3973 static enum zebra_dplane_result
3974 mac_update_common(enum dplane_op_e op
,
3975 const struct interface
*ifp
,
3976 const struct interface
*br_ifp
,
3978 const struct ethaddr
*mac
,
3979 struct in_addr vtep_ip
,
3982 uint32_t update_flags
)
3984 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3986 struct zebra_dplane_ctx
*ctx
= NULL
;
3988 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
3989 zlog_debug("init mac ctx %s: mac %pEA, ifp %s, vtep %pI4",
3990 dplane_op2str(op
), mac
, ifp
->name
, &vtep_ip
);
3992 ctx
= dplane_ctx_alloc();
3995 /* Common init for the ctx */
3996 dplane_mac_init(ctx
, ifp
, br_ifp
, vid
, mac
, vtep_ip
, sticky
,
3997 nhg_id
, update_flags
);
3999 /* Enqueue for processing on the dplane pthread */
4000 ret
= dplane_update_enqueue(ctx
);
4002 /* Increment counter */
4003 atomic_fetch_add_explicit(&zdplane_info
.dg_macs_in
, 1,
4004 memory_order_relaxed
);
4007 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
4010 atomic_fetch_add_explicit(&zdplane_info
.dg_mac_errors
, 1,
4011 memory_order_relaxed
);
4012 dplane_ctx_free(&ctx
);
4019 * Enqueue evpn neighbor add for the dataplane.
4021 enum zebra_dplane_result
dplane_rem_neigh_add(const struct interface
*ifp
,
4022 const struct ipaddr
*ip
,
4023 const struct ethaddr
*mac
,
4024 uint32_t flags
, bool was_static
)
4026 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4027 uint32_t update_flags
= 0;
4029 update_flags
|= DPLANE_NEIGH_REMOTE
;
4032 update_flags
|= DPLANE_NEIGH_WAS_STATIC
;
4034 result
= neigh_update_internal(
4035 DPLANE_OP_NEIGH_INSTALL
, ifp
, (const void *)mac
, AF_ETHERNET
,
4036 ip
, flags
, DPLANE_NUD_NOARP
, update_flags
, 0);
4042 * Enqueue local neighbor add for the dataplane.
4044 enum zebra_dplane_result
dplane_local_neigh_add(const struct interface
*ifp
,
4045 const struct ipaddr
*ip
,
4046 const struct ethaddr
*mac
,
4047 bool set_router
, bool set_static
,
4050 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4051 uint32_t update_flags
= 0;
4056 update_flags
|= DPLANE_NEIGH_SET_STATIC
;
4059 update_flags
|= DPLANE_NEIGH_SET_INACTIVE
;
4060 state
= DPLANE_NUD_STALE
;
4062 state
= DPLANE_NUD_REACHABLE
;
4066 ntf
|= DPLANE_NTF_ROUTER
;
4068 result
= neigh_update_internal(DPLANE_OP_NEIGH_INSTALL
, ifp
,
4069 (const void *)mac
, AF_ETHERNET
, ip
, ntf
,
4070 state
, update_flags
, 0);
4076 * Enqueue evpn neighbor delete for the dataplane.
4078 enum zebra_dplane_result
dplane_rem_neigh_delete(const struct interface
*ifp
,
4079 const struct ipaddr
*ip
)
4081 enum zebra_dplane_result result
;
4082 uint32_t update_flags
= 0;
4084 update_flags
|= DPLANE_NEIGH_REMOTE
;
4086 result
= neigh_update_internal(DPLANE_OP_NEIGH_DELETE
, ifp
, NULL
,
4087 AF_ETHERNET
, ip
, 0, 0, update_flags
, 0);
4093 * Enqueue evpn VTEP add for the dataplane.
4095 enum zebra_dplane_result
dplane_vtep_add(const struct interface
*ifp
,
4096 const struct in_addr
*ip
,
4099 enum zebra_dplane_result result
;
4100 struct ethaddr mac
= { {0, 0, 0, 0, 0, 0} };
4103 if (IS_ZEBRA_DEBUG_VXLAN
)
4104 zlog_debug("Install %pI4 into flood list for VNI %u intf %s(%u)",
4105 ip
, vni
, ifp
->name
, ifp
->ifindex
);
4107 SET_IPADDR_V4(&addr
);
4108 addr
.ipaddr_v4
= *ip
;
4110 result
= neigh_update_internal(DPLANE_OP_VTEP_ADD
, ifp
, &mac
,
4111 AF_ETHERNET
, &addr
, 0, 0, 0, 0);
4117 * Enqueue evpn VTEP add for the dataplane.
4119 enum zebra_dplane_result
dplane_vtep_delete(const struct interface
*ifp
,
4120 const struct in_addr
*ip
,
4123 enum zebra_dplane_result result
;
4124 struct ethaddr mac
= { {0, 0, 0, 0, 0, 0} };
4127 if (IS_ZEBRA_DEBUG_VXLAN
)
4129 "Uninstall %pI4 from flood list for VNI %u intf %s(%u)",
4130 ip
, vni
, ifp
->name
, ifp
->ifindex
);
4132 SET_IPADDR_V4(&addr
);
4133 addr
.ipaddr_v4
= *ip
;
4135 result
= neigh_update_internal(DPLANE_OP_VTEP_DELETE
, ifp
,
4136 (const void *)&mac
, AF_ETHERNET
, &addr
,
4142 enum zebra_dplane_result
dplane_neigh_discover(const struct interface
*ifp
,
4143 const struct ipaddr
*ip
)
4145 enum zebra_dplane_result result
;
4147 result
= neigh_update_internal(DPLANE_OP_NEIGH_DISCOVER
, ifp
, NULL
,
4148 AF_ETHERNET
, ip
, DPLANE_NTF_USE
,
4149 DPLANE_NUD_INCOMPLETE
, 0, 0);
4154 enum zebra_dplane_result
dplane_neigh_table_update(const struct interface
*ifp
,
4155 const uint8_t family
,
4156 const uint32_t app_probes
,
4157 const uint32_t ucast_probes
,
4158 const uint32_t mcast_probes
)
4160 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4162 struct zebra_dplane_ctx
*ctx
= NULL
;
4163 struct zebra_ns
*zns
;
4164 enum dplane_op_e op
= DPLANE_OP_NEIGH_TABLE_UPDATE
;
4166 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
) {
4167 zlog_debug("set neigh ctx %s: ifp %s, family %s",
4168 dplane_op2str(op
), ifp
->name
, family2str(family
));
4171 ctx
= dplane_ctx_alloc();
4174 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
4175 ctx
->zd_vrf_id
= ifp
->vrf
->vrf_id
;
4177 zns
= zebra_ns_lookup(ifp
->vrf
->vrf_id
);
4178 dplane_ctx_ns_init(ctx
, zns
, false);
4180 strlcpy(ctx
->zd_ifname
, ifp
->name
, sizeof(ctx
->zd_ifname
));
4181 ctx
->zd_ifindex
= ifp
->ifindex
;
4183 /* Init the neighbor-specific data area */
4184 memset(&ctx
->u
.neightable
, 0, sizeof(ctx
->u
.neightable
));
4186 ctx
->u
.neightable
.family
= family
;
4187 ctx
->u
.neightable
.app_probes
= app_probes
;
4188 ctx
->u
.neightable
.ucast_probes
= ucast_probes
;
4189 ctx
->u
.neightable
.mcast_probes
= mcast_probes
;
4191 /* Enqueue for processing on the dplane pthread */
4192 ret
= dplane_update_enqueue(ctx
);
4194 /* Increment counter */
4195 atomic_fetch_add_explicit(&zdplane_info
.dg_neightable_in
, 1,
4196 memory_order_relaxed
);
4199 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
4202 atomic_fetch_add_explicit(&zdplane_info
.dg_neightable_errors
, 1,
4203 memory_order_relaxed
);
4204 dplane_ctx_free(&ctx
);
4211 * Common helper api for neighbor updates
4213 static enum zebra_dplane_result
4214 neigh_update_internal(enum dplane_op_e op
, const struct interface
*ifp
,
4215 const void *link
, const int link_family
,
4216 const struct ipaddr
*ip
, uint32_t flags
, uint16_t state
,
4217 uint32_t update_flags
, int protocol
)
4219 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4221 struct zebra_dplane_ctx
*ctx
= NULL
;
4222 struct zebra_ns
*zns
;
4223 const struct ethaddr
*mac
= NULL
;
4224 const struct ipaddr
*link_ip
= NULL
;
4226 if (link_family
== AF_ETHERNET
)
4227 mac
= (const struct ethaddr
*)link
;
4229 link_ip
= (const struct ipaddr
*)link
;
4231 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
) {
4232 char buf1
[PREFIX_STRLEN
];
4235 if (link_family
== AF_ETHERNET
)
4236 prefix_mac2str(mac
, buf1
, sizeof(buf1
));
4238 ipaddr2str(link_ip
, buf1
, sizeof(buf1
));
4239 zlog_debug("init neigh ctx %s: ifp %s, %s %s, ip %pIA",
4240 dplane_op2str(op
), ifp
->name
,
4241 link_family
== AF_ETHERNET
? "mac " : "link ",
4245 ctx
= dplane_ctx_alloc();
4248 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
4249 ctx
->zd_vrf_id
= ifp
->vrf
->vrf_id
;
4250 dplane_ctx_set_type(ctx
, protocol
);
4252 zns
= zebra_ns_lookup(ifp
->vrf
->vrf_id
);
4253 dplane_ctx_ns_init(ctx
, zns
, false);
4255 strlcpy(ctx
->zd_ifname
, ifp
->name
, sizeof(ctx
->zd_ifname
));
4256 ctx
->zd_ifindex
= ifp
->ifindex
;
4258 /* Init the neighbor-specific data area */
4259 memset(&ctx
->u
.neigh
, 0, sizeof(ctx
->u
.neigh
));
4261 ctx
->u
.neigh
.ip_addr
= *ip
;
4263 ctx
->u
.neigh
.link
.mac
= *mac
;
4265 ctx
->u
.neigh
.link
.ip_addr
= *link_ip
;
4267 ctx
->u
.neigh
.flags
= flags
;
4268 ctx
->u
.neigh
.state
= state
;
4269 ctx
->u
.neigh
.update_flags
= update_flags
;
4271 /* Enqueue for processing on the dplane pthread */
4272 ret
= dplane_update_enqueue(ctx
);
4274 /* Increment counter */
4275 atomic_fetch_add_explicit(&zdplane_info
.dg_neighs_in
, 1,
4276 memory_order_relaxed
);
4279 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
4282 atomic_fetch_add_explicit(&zdplane_info
.dg_neigh_errors
, 1,
4283 memory_order_relaxed
);
4284 dplane_ctx_free(&ctx
);
4291 * Common helper api for PBR rule updates
4293 static enum zebra_dplane_result
4294 rule_update_internal(enum dplane_op_e op
, struct zebra_pbr_rule
*new_rule
,
4295 struct zebra_pbr_rule
*old_rule
)
4297 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4298 struct zebra_dplane_ctx
*ctx
;
4301 ctx
= dplane_ctx_alloc();
4303 ret
= dplane_ctx_rule_init(ctx
, op
, new_rule
, old_rule
);
4307 ret
= dplane_update_enqueue(ctx
);
4310 atomic_fetch_add_explicit(&zdplane_info
.dg_rules_in
, 1,
4311 memory_order_relaxed
);
4314 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
4316 atomic_fetch_add_explicit(&zdplane_info
.dg_rule_errors
, 1,
4317 memory_order_relaxed
);
4318 dplane_ctx_free(&ctx
);
4324 enum zebra_dplane_result
dplane_pbr_rule_add(struct zebra_pbr_rule
*rule
)
4326 return rule_update_internal(DPLANE_OP_RULE_ADD
, rule
, NULL
);
4329 enum zebra_dplane_result
dplane_pbr_rule_delete(struct zebra_pbr_rule
*rule
)
4331 return rule_update_internal(DPLANE_OP_RULE_DELETE
, rule
, NULL
);
4334 enum zebra_dplane_result
dplane_pbr_rule_update(struct zebra_pbr_rule
*old_rule
,
4335 struct zebra_pbr_rule
*new_rule
)
4337 return rule_update_internal(DPLANE_OP_RULE_UPDATE
, new_rule
, old_rule
);
4340 * Common helper api for iptable updates
4342 static enum zebra_dplane_result
4343 iptable_update_internal(enum dplane_op_e op
, struct zebra_pbr_iptable
*iptable
)
4345 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4346 struct zebra_dplane_ctx
*ctx
;
4349 ctx
= dplane_ctx_alloc();
4351 ret
= dplane_ctx_iptable_init(ctx
, op
, iptable
);
4355 ret
= dplane_update_enqueue(ctx
);
4358 atomic_fetch_add_explicit(&zdplane_info
.dg_iptable_in
, 1,
4359 memory_order_relaxed
);
4362 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
4364 atomic_fetch_add_explicit(&zdplane_info
.dg_iptable_errors
, 1,
4365 memory_order_relaxed
);
4366 dplane_ctx_free(&ctx
);
4372 enum zebra_dplane_result
4373 dplane_pbr_iptable_add(struct zebra_pbr_iptable
*iptable
)
4375 return iptable_update_internal(DPLANE_OP_IPTABLE_ADD
, iptable
);
4378 enum zebra_dplane_result
4379 dplane_pbr_iptable_delete(struct zebra_pbr_iptable
*iptable
)
4381 return iptable_update_internal(DPLANE_OP_IPTABLE_DELETE
, iptable
);
4385 * Common helper api for ipset updates
4387 static enum zebra_dplane_result
4388 ipset_update_internal(enum dplane_op_e op
, struct zebra_pbr_ipset
*ipset
)
4390 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4391 struct zebra_dplane_ctx
*ctx
;
4394 ctx
= dplane_ctx_alloc();
4396 ret
= dplane_ctx_ipset_init(ctx
, op
, ipset
);
4400 ret
= dplane_update_enqueue(ctx
);
4403 atomic_fetch_add_explicit(&zdplane_info
.dg_ipset_in
, 1,
4404 memory_order_relaxed
);
4407 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
4409 atomic_fetch_add_explicit(&zdplane_info
.dg_ipset_errors
, 1,
4410 memory_order_relaxed
);
4411 dplane_ctx_free(&ctx
);
4417 enum zebra_dplane_result
dplane_pbr_ipset_add(struct zebra_pbr_ipset
*ipset
)
4419 return ipset_update_internal(DPLANE_OP_IPSET_ADD
, ipset
);
4422 enum zebra_dplane_result
dplane_pbr_ipset_delete(struct zebra_pbr_ipset
*ipset
)
4424 return ipset_update_internal(DPLANE_OP_IPSET_DELETE
, ipset
);
4428 * Common helper api for ipset updates
4430 static enum zebra_dplane_result
4431 ipset_entry_update_internal(enum dplane_op_e op
,
4432 struct zebra_pbr_ipset_entry
*ipset_entry
)
4434 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4435 struct zebra_dplane_ctx
*ctx
;
4438 ctx
= dplane_ctx_alloc();
4440 ret
= dplane_ctx_ipset_entry_init(ctx
, op
, ipset_entry
);
4444 ret
= dplane_update_enqueue(ctx
);
4447 atomic_fetch_add_explicit(&zdplane_info
.dg_ipset_entry_in
, 1,
4448 memory_order_relaxed
);
4451 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
4453 atomic_fetch_add_explicit(&zdplane_info
.dg_ipset_entry_errors
,
4454 1, memory_order_relaxed
);
4455 dplane_ctx_free(&ctx
);
4461 enum zebra_dplane_result
4462 dplane_pbr_ipset_entry_add(struct zebra_pbr_ipset_entry
*ipset
)
4464 return ipset_entry_update_internal(DPLANE_OP_IPSET_ENTRY_ADD
, ipset
);
4467 enum zebra_dplane_result
4468 dplane_pbr_ipset_entry_delete(struct zebra_pbr_ipset_entry
*ipset
)
4470 return ipset_entry_update_internal(DPLANE_OP_IPSET_ENTRY_DELETE
, ipset
);
4474 * Common helper api for GRE set
4476 enum zebra_dplane_result
4477 dplane_gre_set(struct interface
*ifp
, struct interface
*ifp_link
,
4478 unsigned int mtu
, const struct zebra_l2info_gre
*gre_info
)
4480 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4481 struct zebra_dplane_ctx
*ctx
;
4482 enum dplane_op_e op
= DPLANE_OP_GRE_SET
;
4484 struct zebra_ns
*zns
;
4486 ctx
= dplane_ctx_alloc();
4491 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
) {
4492 zlog_debug("init dplane ctx %s: if %s link %s%s",
4493 dplane_op2str(op
), ifp
->name
,
4494 ifp_link
? "set" : "unset", ifp_link
?
4495 ifp_link
->name
: "");
4499 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
4500 zns
= zebra_ns_lookup(ifp
->vrf
->vrf_id
);
4503 dplane_ctx_ns_init(ctx
, zns
, false);
4505 dplane_ctx_set_ifname(ctx
, ifp
->name
);
4506 ctx
->zd_vrf_id
= ifp
->vrf
->vrf_id
;
4507 ctx
->zd_ifindex
= ifp
->ifindex
;
4509 ctx
->u
.gre
.link_ifindex
= ifp_link
->ifindex
;
4511 ctx
->u
.gre
.link_ifindex
= 0;
4513 memcpy(&ctx
->u
.gre
.info
, gre_info
, sizeof(ctx
->u
.gre
.info
));
4514 ctx
->u
.gre
.mtu
= mtu
;
4516 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
4518 /* Enqueue context for processing */
4519 ret
= dplane_update_enqueue(ctx
);
4521 /* Update counter */
4522 atomic_fetch_add_explicit(&zdplane_info
.dg_gre_set_in
, 1,
4523 memory_order_relaxed
);
4526 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
4528 atomic_fetch_add_explicit(
4529 &zdplane_info
.dg_gre_set_errors
, 1,
4530 memory_order_relaxed
);
4532 dplane_ctx_free(&ctx
);
4533 result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4539 * Handler for 'show dplane'
4541 int dplane_show_helper(struct vty
*vty
, bool detailed
)
4543 uint64_t queued
, queue_max
, limit
, errs
, incoming
, yields
,
4546 /* Using atomics because counters are being changed in different
4549 incoming
= atomic_load_explicit(&zdplane_info
.dg_routes_in
,
4550 memory_order_relaxed
);
4551 limit
= atomic_load_explicit(&zdplane_info
.dg_max_queued_updates
,
4552 memory_order_relaxed
);
4553 queued
= atomic_load_explicit(&zdplane_info
.dg_routes_queued
,
4554 memory_order_relaxed
);
4555 queue_max
= atomic_load_explicit(&zdplane_info
.dg_routes_queued_max
,
4556 memory_order_relaxed
);
4557 errs
= atomic_load_explicit(&zdplane_info
.dg_route_errors
,
4558 memory_order_relaxed
);
4559 yields
= atomic_load_explicit(&zdplane_info
.dg_update_yields
,
4560 memory_order_relaxed
);
4561 other_errs
= atomic_load_explicit(&zdplane_info
.dg_other_errors
,
4562 memory_order_relaxed
);
4564 vty_out(vty
, "Zebra dataplane:\nRoute updates: %"PRIu64
"\n",
4566 vty_out(vty
, "Route update errors: %"PRIu64
"\n", errs
);
4567 vty_out(vty
, "Other errors : %"PRIu64
"\n", other_errs
);
4568 vty_out(vty
, "Route update queue limit: %"PRIu64
"\n", limit
);
4569 vty_out(vty
, "Route update queue depth: %"PRIu64
"\n", queued
);
4570 vty_out(vty
, "Route update queue max: %"PRIu64
"\n", queue_max
);
4571 vty_out(vty
, "Dplane update yields: %"PRIu64
"\n", yields
);
4573 incoming
= atomic_load_explicit(&zdplane_info
.dg_lsps_in
,
4574 memory_order_relaxed
);
4575 errs
= atomic_load_explicit(&zdplane_info
.dg_lsp_errors
,
4576 memory_order_relaxed
);
4577 vty_out(vty
, "LSP updates: %"PRIu64
"\n", incoming
);
4578 vty_out(vty
, "LSP update errors: %"PRIu64
"\n", errs
);
4580 incoming
= atomic_load_explicit(&zdplane_info
.dg_pws_in
,
4581 memory_order_relaxed
);
4582 errs
= atomic_load_explicit(&zdplane_info
.dg_pw_errors
,
4583 memory_order_relaxed
);
4584 vty_out(vty
, "PW updates: %"PRIu64
"\n", incoming
);
4585 vty_out(vty
, "PW update errors: %"PRIu64
"\n", errs
);
4587 incoming
= atomic_load_explicit(&zdplane_info
.dg_intf_addrs_in
,
4588 memory_order_relaxed
);
4589 errs
= atomic_load_explicit(&zdplane_info
.dg_intf_addr_errors
,
4590 memory_order_relaxed
);
4591 vty_out(vty
, "Intf addr updates: %"PRIu64
"\n", incoming
);
4592 vty_out(vty
, "Intf addr errors: %"PRIu64
"\n", errs
);
4594 incoming
= atomic_load_explicit(&zdplane_info
.dg_macs_in
,
4595 memory_order_relaxed
);
4596 errs
= atomic_load_explicit(&zdplane_info
.dg_mac_errors
,
4597 memory_order_relaxed
);
4598 vty_out(vty
, "EVPN MAC updates: %"PRIu64
"\n", incoming
);
4599 vty_out(vty
, "EVPN MAC errors: %"PRIu64
"\n", errs
);
4601 incoming
= atomic_load_explicit(&zdplane_info
.dg_neighs_in
,
4602 memory_order_relaxed
);
4603 errs
= atomic_load_explicit(&zdplane_info
.dg_neigh_errors
,
4604 memory_order_relaxed
);
4605 vty_out(vty
, "EVPN neigh updates: %"PRIu64
"\n", incoming
);
4606 vty_out(vty
, "EVPN neigh errors: %"PRIu64
"\n", errs
);
4608 incoming
= atomic_load_explicit(&zdplane_info
.dg_rules_in
,
4609 memory_order_relaxed
);
4610 errs
= atomic_load_explicit(&zdplane_info
.dg_rule_errors
,
4611 memory_order_relaxed
);
4612 vty_out(vty
, "Rule updates: %" PRIu64
"\n", incoming
);
4613 vty_out(vty
, "Rule errors: %" PRIu64
"\n", errs
);
4615 incoming
= atomic_load_explicit(&zdplane_info
.dg_br_port_in
,
4616 memory_order_relaxed
);
4617 errs
= atomic_load_explicit(&zdplane_info
.dg_br_port_errors
,
4618 memory_order_relaxed
);
4619 vty_out(vty
, "Bridge port updates: %" PRIu64
"\n", incoming
);
4620 vty_out(vty
, "Bridge port errors: %" PRIu64
"\n", errs
);
4622 incoming
= atomic_load_explicit(&zdplane_info
.dg_iptable_in
,
4623 memory_order_relaxed
);
4624 errs
= atomic_load_explicit(&zdplane_info
.dg_iptable_errors
,
4625 memory_order_relaxed
);
4626 vty_out(vty
, "IPtable updates: %" PRIu64
"\n", incoming
);
4627 vty_out(vty
, "IPtable errors: %" PRIu64
"\n", errs
);
4628 incoming
= atomic_load_explicit(&zdplane_info
.dg_ipset_in
,
4629 memory_order_relaxed
);
4630 errs
= atomic_load_explicit(&zdplane_info
.dg_ipset_errors
,
4631 memory_order_relaxed
);
4632 vty_out(vty
, "IPset updates: %" PRIu64
"\n", incoming
);
4633 vty_out(vty
, "IPset errors: %" PRIu64
"\n", errs
);
4634 incoming
= atomic_load_explicit(&zdplane_info
.dg_ipset_entry_in
,
4635 memory_order_relaxed
);
4636 errs
= atomic_load_explicit(&zdplane_info
.dg_ipset_entry_errors
,
4637 memory_order_relaxed
);
4638 vty_out(vty
, "IPset entry updates: %" PRIu64
"\n", incoming
);
4639 vty_out(vty
, "IPset entry errors: %" PRIu64
"\n", errs
);
4641 incoming
= atomic_load_explicit(&zdplane_info
.dg_neightable_in
,
4642 memory_order_relaxed
);
4643 errs
= atomic_load_explicit(&zdplane_info
.dg_neightable_errors
,
4644 memory_order_relaxed
);
4645 vty_out(vty
, "Neighbor Table updates: %"PRIu64
"\n", incoming
);
4646 vty_out(vty
, "Neighbor Table errors: %"PRIu64
"\n", errs
);
4648 incoming
= atomic_load_explicit(&zdplane_info
.dg_gre_set_in
,
4649 memory_order_relaxed
);
4650 errs
= atomic_load_explicit(&zdplane_info
.dg_gre_set_errors
,
4651 memory_order_relaxed
);
4652 vty_out(vty
, "GRE set updates: %"PRIu64
"\n", incoming
);
4653 vty_out(vty
, "GRE set errors: %"PRIu64
"\n", errs
);
4658 * Handler for 'show dplane providers'
4660 int dplane_show_provs_helper(struct vty
*vty
, bool detailed
)
4662 struct zebra_dplane_provider
*prov
;
4663 uint64_t in
, in_q
, in_max
, out
, out_q
, out_max
;
4665 vty_out(vty
, "Zebra dataplane providers:\n");
4668 prov
= TAILQ_FIRST(&zdplane_info
.dg_providers_q
);
4671 /* Show counters, useful info from each registered provider */
4674 in
= atomic_load_explicit(&prov
->dp_in_counter
,
4675 memory_order_relaxed
);
4676 in_q
= atomic_load_explicit(&prov
->dp_in_queued
,
4677 memory_order_relaxed
);
4678 in_max
= atomic_load_explicit(&prov
->dp_in_max
,
4679 memory_order_relaxed
);
4680 out
= atomic_load_explicit(&prov
->dp_out_counter
,
4681 memory_order_relaxed
);
4682 out_q
= atomic_load_explicit(&prov
->dp_out_queued
,
4683 memory_order_relaxed
);
4684 out_max
= atomic_load_explicit(&prov
->dp_out_max
,
4685 memory_order_relaxed
);
4687 vty_out(vty
, "%s (%u): in: %"PRIu64
", q: %"PRIu64
", q_max: %"PRIu64
", out: %"PRIu64
", q: %"PRIu64
", q_max: %"PRIu64
"\n",
4688 prov
->dp_name
, prov
->dp_id
, in
, in_q
, in_max
,
4689 out
, out_q
, out_max
);
4692 prov
= TAILQ_NEXT(prov
, dp_prov_link
);
4700 * Helper for 'show run' etc.
4702 int dplane_config_write_helper(struct vty
*vty
)
4704 if (zdplane_info
.dg_max_queued_updates
!= DPLANE_DEFAULT_MAX_QUEUED
)
4705 vty_out(vty
, "zebra dplane limit %u\n",
4706 zdplane_info
.dg_max_queued_updates
);
4712 * Provider registration
4714 int dplane_provider_register(const char *name
,
4715 enum dplane_provider_prio prio
,
4717 int (*start_fp
)(struct zebra_dplane_provider
*),
4718 int (*fp
)(struct zebra_dplane_provider
*),
4719 int (*fini_fp
)(struct zebra_dplane_provider
*,
4722 struct zebra_dplane_provider
**prov_p
)
4725 struct zebra_dplane_provider
*p
= NULL
, *last
;
4733 if (prio
<= DPLANE_PRIO_NONE
||
4734 prio
> DPLANE_PRIO_LAST
) {
4739 /* Allocate and init new provider struct */
4740 p
= XCALLOC(MTYPE_DP_PROV
, sizeof(struct zebra_dplane_provider
));
4742 pthread_mutex_init(&(p
->dp_mutex
), NULL
);
4743 TAILQ_INIT(&(p
->dp_ctx_in_q
));
4744 TAILQ_INIT(&(p
->dp_ctx_out_q
));
4746 p
->dp_flags
= flags
;
4747 p
->dp_priority
= prio
;
4749 p
->dp_start
= start_fp
;
4750 p
->dp_fini
= fini_fp
;
4753 /* Lock - the dplane pthread may be running */
4756 p
->dp_id
= ++zdplane_info
.dg_provider_id
;
4759 strlcpy(p
->dp_name
, name
, DPLANE_PROVIDER_NAMELEN
);
4761 snprintf(p
->dp_name
, DPLANE_PROVIDER_NAMELEN
,
4762 "provider-%u", p
->dp_id
);
4764 /* Insert into list ordered by priority */
4765 TAILQ_FOREACH(last
, &zdplane_info
.dg_providers_q
, dp_prov_link
) {
4766 if (last
->dp_priority
> p
->dp_priority
)
4771 TAILQ_INSERT_BEFORE(last
, p
, dp_prov_link
);
4773 TAILQ_INSERT_TAIL(&zdplane_info
.dg_providers_q
, p
,
4779 if (IS_ZEBRA_DEBUG_DPLANE
)
4780 zlog_debug("dplane: registered new provider '%s' (%u), prio %d",
4781 p
->dp_name
, p
->dp_id
, p
->dp_priority
);
4790 /* Accessors for provider attributes */
4791 const char *dplane_provider_get_name(const struct zebra_dplane_provider
*prov
)
4793 return prov
->dp_name
;
4796 uint32_t dplane_provider_get_id(const struct zebra_dplane_provider
*prov
)
4801 void *dplane_provider_get_data(const struct zebra_dplane_provider
*prov
)
4803 return prov
->dp_data
;
4806 int dplane_provider_get_work_limit(const struct zebra_dplane_provider
*prov
)
4808 return zdplane_info
.dg_updates_per_cycle
;
4811 /* Lock/unlock a provider's mutex - iff the provider was registered with
4812 * the THREADED flag.
4814 void dplane_provider_lock(struct zebra_dplane_provider
*prov
)
4816 if (dplane_provider_is_threaded(prov
))
4817 DPLANE_PROV_LOCK(prov
);
4820 void dplane_provider_unlock(struct zebra_dplane_provider
*prov
)
4822 if (dplane_provider_is_threaded(prov
))
4823 DPLANE_PROV_UNLOCK(prov
);
4827 * Dequeue and maintain associated counter
4829 struct zebra_dplane_ctx
*dplane_provider_dequeue_in_ctx(
4830 struct zebra_dplane_provider
*prov
)
4832 struct zebra_dplane_ctx
*ctx
= NULL
;
4834 dplane_provider_lock(prov
);
4836 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_in_q
));
4838 TAILQ_REMOVE(&(prov
->dp_ctx_in_q
), ctx
, zd_q_entries
);
4840 atomic_fetch_sub_explicit(&prov
->dp_in_queued
, 1,
4841 memory_order_relaxed
);
4844 dplane_provider_unlock(prov
);
4850 * Dequeue work to a list, return count
4852 int dplane_provider_dequeue_in_list(struct zebra_dplane_provider
*prov
,
4853 struct dplane_ctx_q
*listp
)
4856 struct zebra_dplane_ctx
*ctx
;
4858 limit
= zdplane_info
.dg_updates_per_cycle
;
4860 dplane_provider_lock(prov
);
4862 for (ret
= 0; ret
< limit
; ret
++) {
4863 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_in_q
));
4865 TAILQ_REMOVE(&(prov
->dp_ctx_in_q
), ctx
, zd_q_entries
);
4867 TAILQ_INSERT_TAIL(listp
, ctx
, zd_q_entries
);
4874 atomic_fetch_sub_explicit(&prov
->dp_in_queued
, ret
,
4875 memory_order_relaxed
);
4877 dplane_provider_unlock(prov
);
4882 uint32_t dplane_provider_out_ctx_queue_len(struct zebra_dplane_provider
*prov
)
4884 return atomic_load_explicit(&(prov
->dp_out_counter
),
4885 memory_order_relaxed
);
4889 * Enqueue and maintain associated counter
4891 void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider
*prov
,
4892 struct zebra_dplane_ctx
*ctx
)
4894 uint64_t curr
, high
;
4896 dplane_provider_lock(prov
);
4898 TAILQ_INSERT_TAIL(&(prov
->dp_ctx_out_q
), ctx
,
4901 /* Maintain out-queue counters */
4902 atomic_fetch_add_explicit(&(prov
->dp_out_queued
), 1,
4903 memory_order_relaxed
);
4904 curr
= atomic_load_explicit(&prov
->dp_out_queued
,
4905 memory_order_relaxed
);
4906 high
= atomic_load_explicit(&prov
->dp_out_max
,
4907 memory_order_relaxed
);
4909 atomic_store_explicit(&prov
->dp_out_max
, curr
,
4910 memory_order_relaxed
);
4912 dplane_provider_unlock(prov
);
4914 atomic_fetch_add_explicit(&(prov
->dp_out_counter
), 1,
4915 memory_order_relaxed
);
4919 * Accessor for provider object
4921 bool dplane_provider_is_threaded(const struct zebra_dplane_provider
*prov
)
4923 return (prov
->dp_flags
& DPLANE_PROV_FLAG_THREADED
);
4927 * Internal helper that copies information from a zebra ns object; this is
4928 * called in the zebra main pthread context as part of dplane ctx init.
4930 static void dplane_info_from_zns(struct zebra_dplane_info
*ns_info
,
4931 struct zebra_ns
*zns
)
4933 ns_info
->ns_id
= zns
->ns_id
;
4935 #if defined(HAVE_NETLINK)
4936 ns_info
->is_cmd
= true;
4937 ns_info
->sock
= zns
->netlink_dplane_out
.sock
;
4938 #endif /* NETLINK */
4943 * Callback when an OS (netlink) incoming event read is ready. This runs
4944 * in the dplane pthread.
4946 static void dplane_incoming_read(struct thread
*event
)
4948 struct dplane_zns_info
*zi
= THREAD_ARG(event
);
4950 kernel_dplane_read(&zi
->info
);
4952 /* Re-start read task */
4953 thread_add_read(zdplane_info
.dg_master
, dplane_incoming_read
, zi
,
4954 zi
->info
.sock
, &zi
->t_read
);
4956 #endif /* HAVE_NETLINK */
4959 * Notify dplane when namespaces are enabled and disabled. The dplane
4960 * needs to start and stop reading incoming events from the zns. In the
4961 * common case where vrfs are _not_ namespaces, there will only be one
4964 * This is called in the main pthread.
4966 void zebra_dplane_ns_enable(struct zebra_ns
*zns
, bool enabled
)
4968 struct dplane_zns_info
*zi
;
4970 if (IS_ZEBRA_DEBUG_DPLANE
)
4971 zlog_debug("%s: %s for nsid %u", __func__
,
4972 (enabled
? "ENABLED" : "DISABLED"), zns
->ns_id
);
4974 /* Search for an existing zns info entry */
4975 frr_each (zns_info_list
, &zdplane_info
.dg_zns_list
, zi
) {
4976 if (zi
->info
.ns_id
== zns
->ns_id
)
4981 /* Create a new entry if necessary; start reading. */
4983 zi
= XCALLOC(MTYPE_DP_NS
, sizeof(*zi
));
4985 zi
->info
.ns_id
= zns
->ns_id
;
4987 zns_info_list_add_tail(&zdplane_info
.dg_zns_list
, zi
);
4989 if (IS_ZEBRA_DEBUG_DPLANE
)
4990 zlog_debug("%s: nsid %u, new zi %p", __func__
,
4994 /* Make sure we're up-to-date with the zns object */
4995 #if defined(HAVE_NETLINK)
4996 zi
->info
.is_cmd
= false;
4997 zi
->info
.sock
= zns
->netlink_dplane_in
.sock
;
4999 /* Start read task for the dplane pthread. */
5000 if (zdplane_info
.dg_master
)
5001 thread_add_read(zdplane_info
.dg_master
,
5002 dplane_incoming_read
, zi
, zi
->info
.sock
,
5006 if (IS_ZEBRA_DEBUG_DPLANE
)
5007 zlog_debug("%s: nsid %u, deleting zi %p", __func__
,
5010 /* Stop reading, free memory */
5011 zns_info_list_del(&zdplane_info
.dg_zns_list
, zi
);
5013 if (zdplane_info
.dg_master
)
5014 thread_cancel_async(zdplane_info
.dg_master
, &zi
->t_read
,
5017 XFREE(MTYPE_DP_NS
, zi
);
5022 * Provider api to signal that work/events are available
5023 * for the dataplane pthread.
5025 int dplane_provider_work_ready(void)
5027 /* Note that during zebra startup, we may be offered work before
5028 * the dataplane pthread (and thread-master) are ready. We want to
5029 * enqueue the work, but the event-scheduling machinery may not be
5032 if (zdplane_info
.dg_run
) {
5033 thread_add_event(zdplane_info
.dg_master
,
5034 dplane_thread_loop
, NULL
, 0,
5035 &zdplane_info
.dg_t_update
);
5042 * Enqueue a context directly to zebra main.
5044 void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx
*ctx
)
5046 struct dplane_ctx_q temp_list
;
5048 /* Zebra's api takes a list, so we need to use a temporary list */
5049 TAILQ_INIT(&temp_list
);
5051 TAILQ_INSERT_TAIL(&temp_list
, ctx
, zd_q_entries
);
5052 (zdplane_info
.dg_results_cb
)(&temp_list
);
5056 * Kernel dataplane provider
5059 static void kernel_dplane_log_detail(struct zebra_dplane_ctx
*ctx
)
5061 char buf
[PREFIX_STRLEN
];
5063 switch (dplane_ctx_get_op(ctx
)) {
5065 case DPLANE_OP_ROUTE_INSTALL
:
5066 case DPLANE_OP_ROUTE_UPDATE
:
5067 case DPLANE_OP_ROUTE_DELETE
:
5068 zlog_debug("%u:%pFX Dplane route update ctx %p op %s",
5069 dplane_ctx_get_vrf(ctx
), dplane_ctx_get_dest(ctx
),
5070 ctx
, dplane_op2str(dplane_ctx_get_op(ctx
)));
5073 case DPLANE_OP_NH_INSTALL
:
5074 case DPLANE_OP_NH_UPDATE
:
5075 case DPLANE_OP_NH_DELETE
:
5076 zlog_debug("ID (%u) Dplane nexthop update ctx %p op %s",
5077 dplane_ctx_get_nhe_id(ctx
), ctx
,
5078 dplane_op2str(dplane_ctx_get_op(ctx
)));
5081 case DPLANE_OP_LSP_INSTALL
:
5082 case DPLANE_OP_LSP_UPDATE
:
5083 case DPLANE_OP_LSP_DELETE
:
5086 case DPLANE_OP_PW_INSTALL
:
5087 case DPLANE_OP_PW_UNINSTALL
:
5088 zlog_debug("Dplane pw %s: op %s af %d loc: %u rem: %u",
5089 dplane_ctx_get_ifname(ctx
),
5090 dplane_op2str(ctx
->zd_op
), dplane_ctx_get_pw_af(ctx
),
5091 dplane_ctx_get_pw_local_label(ctx
),
5092 dplane_ctx_get_pw_remote_label(ctx
));
5095 case DPLANE_OP_ADDR_INSTALL
:
5096 case DPLANE_OP_ADDR_UNINSTALL
:
5097 zlog_debug("Dplane intf %s, idx %u, addr %pFX",
5098 dplane_op2str(dplane_ctx_get_op(ctx
)),
5099 dplane_ctx_get_ifindex(ctx
),
5100 dplane_ctx_get_intf_addr(ctx
));
5103 case DPLANE_OP_MAC_INSTALL
:
5104 case DPLANE_OP_MAC_DELETE
:
5105 prefix_mac2str(dplane_ctx_mac_get_addr(ctx
), buf
,
5108 zlog_debug("Dplane %s, mac %s, ifindex %u",
5109 dplane_op2str(dplane_ctx_get_op(ctx
)),
5110 buf
, dplane_ctx_get_ifindex(ctx
));
5113 case DPLANE_OP_NEIGH_INSTALL
:
5114 case DPLANE_OP_NEIGH_UPDATE
:
5115 case DPLANE_OP_NEIGH_DELETE
:
5116 case DPLANE_OP_VTEP_ADD
:
5117 case DPLANE_OP_VTEP_DELETE
:
5118 case DPLANE_OP_NEIGH_DISCOVER
:
5119 case DPLANE_OP_NEIGH_IP_INSTALL
:
5120 case DPLANE_OP_NEIGH_IP_DELETE
:
5121 ipaddr2str(dplane_ctx_neigh_get_ipaddr(ctx
), buf
,
5124 zlog_debug("Dplane %s, ip %s, ifindex %u",
5125 dplane_op2str(dplane_ctx_get_op(ctx
)),
5126 buf
, dplane_ctx_get_ifindex(ctx
));
5129 case DPLANE_OP_RULE_ADD
:
5130 case DPLANE_OP_RULE_DELETE
:
5131 case DPLANE_OP_RULE_UPDATE
:
5132 zlog_debug("Dplane rule update op %s, if %s(%u), ctx %p",
5133 dplane_op2str(dplane_ctx_get_op(ctx
)),
5134 dplane_ctx_get_ifname(ctx
),
5135 dplane_ctx_get_ifindex(ctx
), ctx
);
5138 case DPLANE_OP_SYS_ROUTE_ADD
:
5139 case DPLANE_OP_SYS_ROUTE_DELETE
:
5140 case DPLANE_OP_ROUTE_NOTIFY
:
5141 case DPLANE_OP_LSP_NOTIFY
:
5142 case DPLANE_OP_BR_PORT_UPDATE
:
5144 case DPLANE_OP_NONE
:
5147 case DPLANE_OP_IPTABLE_ADD
:
5148 case DPLANE_OP_IPTABLE_DELETE
: {
5149 struct zebra_pbr_iptable ipt
;
5151 dplane_ctx_get_pbr_iptable(ctx
, &ipt
);
5152 zlog_debug("Dplane iptable update op %s, unique(%u), ctx %p",
5153 dplane_op2str(dplane_ctx_get_op(ctx
)), ipt
.unique
,
5156 case DPLANE_OP_IPSET_ADD
:
5157 case DPLANE_OP_IPSET_DELETE
: {
5158 struct zebra_pbr_ipset ipset
;
5160 dplane_ctx_get_pbr_ipset(ctx
, &ipset
);
5161 zlog_debug("Dplane ipset update op %s, unique(%u), ctx %p",
5162 dplane_op2str(dplane_ctx_get_op(ctx
)), ipset
.unique
,
5165 case DPLANE_OP_IPSET_ENTRY_ADD
:
5166 case DPLANE_OP_IPSET_ENTRY_DELETE
: {
5167 struct zebra_pbr_ipset_entry ipent
;
5169 dplane_ctx_get_pbr_ipset_entry(ctx
, &ipent
);
5171 "Dplane ipset entry update op %s, unique(%u), ctx %p",
5172 dplane_op2str(dplane_ctx_get_op(ctx
)), ipent
.unique
,
5175 case DPLANE_OP_NEIGH_TABLE_UPDATE
:
5176 zlog_debug("Dplane neigh table op %s, ifp %s, family %s",
5177 dplane_op2str(dplane_ctx_get_op(ctx
)),
5178 dplane_ctx_get_ifname(ctx
),
5179 family2str(dplane_ctx_neightable_get_family(ctx
)));
5181 case DPLANE_OP_GRE_SET
:
5182 zlog_debug("Dplane gre set op %s, ifp %s, link %u",
5183 dplane_op2str(dplane_ctx_get_op(ctx
)),
5184 dplane_ctx_get_ifname(ctx
),
5185 ctx
->u
.gre
.link_ifindex
);
5188 case DPLANE_OP_INTF_ADDR_ADD
:
5189 case DPLANE_OP_INTF_ADDR_DEL
:
5190 zlog_debug("Dplane incoming op %s, intf %s, addr %pFX",
5191 dplane_op2str(dplane_ctx_get_op(ctx
)),
5192 dplane_ctx_get_ifname(ctx
),
5193 dplane_ctx_get_intf_addr(ctx
));
5196 case DPLANE_OP_INTF_NETCONFIG
:
5197 zlog_debug("%s: ifindex %d, mpls %d, mcast %d",
5198 dplane_op2str(dplane_ctx_get_op(ctx
)),
5199 dplane_ctx_get_netconf_ifindex(ctx
),
5200 dplane_ctx_get_netconf_mpls(ctx
),
5201 dplane_ctx_get_netconf_mcast(ctx
));
5206 static void kernel_dplane_handle_result(struct zebra_dplane_ctx
*ctx
)
5208 enum zebra_dplane_result res
= dplane_ctx_get_status(ctx
);
5210 switch (dplane_ctx_get_op(ctx
)) {
5212 case DPLANE_OP_ROUTE_INSTALL
:
5213 case DPLANE_OP_ROUTE_UPDATE
:
5214 case DPLANE_OP_ROUTE_DELETE
:
5215 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5216 atomic_fetch_add_explicit(&zdplane_info
.dg_route_errors
,
5217 1, memory_order_relaxed
);
5219 if ((dplane_ctx_get_op(ctx
) != DPLANE_OP_ROUTE_DELETE
)
5220 && (res
== ZEBRA_DPLANE_REQUEST_SUCCESS
)) {
5221 struct nexthop
*nexthop
;
5223 /* Update installed nexthops to signal which have been
5226 for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx
),
5228 if (CHECK_FLAG(nexthop
->flags
,
5229 NEXTHOP_FLAG_RECURSIVE
))
5232 if (CHECK_FLAG(nexthop
->flags
,
5233 NEXTHOP_FLAG_ACTIVE
)) {
5234 SET_FLAG(nexthop
->flags
,
5241 case DPLANE_OP_NH_INSTALL
:
5242 case DPLANE_OP_NH_UPDATE
:
5243 case DPLANE_OP_NH_DELETE
:
5244 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5245 atomic_fetch_add_explicit(
5246 &zdplane_info
.dg_nexthop_errors
, 1,
5247 memory_order_relaxed
);
5250 case DPLANE_OP_LSP_INSTALL
:
5251 case DPLANE_OP_LSP_UPDATE
:
5252 case DPLANE_OP_LSP_DELETE
:
5253 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5254 atomic_fetch_add_explicit(&zdplane_info
.dg_lsp_errors
,
5255 1, memory_order_relaxed
);
5258 case DPLANE_OP_PW_INSTALL
:
5259 case DPLANE_OP_PW_UNINSTALL
:
5260 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5261 atomic_fetch_add_explicit(&zdplane_info
.dg_pw_errors
, 1,
5262 memory_order_relaxed
);
5265 case DPLANE_OP_ADDR_INSTALL
:
5266 case DPLANE_OP_ADDR_UNINSTALL
:
5267 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5268 atomic_fetch_add_explicit(
5269 &zdplane_info
.dg_intf_addr_errors
, 1,
5270 memory_order_relaxed
);
5273 case DPLANE_OP_MAC_INSTALL
:
5274 case DPLANE_OP_MAC_DELETE
:
5275 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5276 atomic_fetch_add_explicit(&zdplane_info
.dg_mac_errors
,
5277 1, memory_order_relaxed
);
5280 case DPLANE_OP_NEIGH_INSTALL
:
5281 case DPLANE_OP_NEIGH_UPDATE
:
5282 case DPLANE_OP_NEIGH_DELETE
:
5283 case DPLANE_OP_VTEP_ADD
:
5284 case DPLANE_OP_VTEP_DELETE
:
5285 case DPLANE_OP_NEIGH_DISCOVER
:
5286 case DPLANE_OP_NEIGH_IP_INSTALL
:
5287 case DPLANE_OP_NEIGH_IP_DELETE
:
5288 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5289 atomic_fetch_add_explicit(&zdplane_info
.dg_neigh_errors
,
5290 1, memory_order_relaxed
);
5293 case DPLANE_OP_RULE_ADD
:
5294 case DPLANE_OP_RULE_DELETE
:
5295 case DPLANE_OP_RULE_UPDATE
:
5296 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5297 atomic_fetch_add_explicit(&zdplane_info
.dg_rule_errors
,
5298 1, memory_order_relaxed
);
5301 case DPLANE_OP_IPTABLE_ADD
:
5302 case DPLANE_OP_IPTABLE_DELETE
:
5303 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5304 atomic_fetch_add_explicit(
5305 &zdplane_info
.dg_iptable_errors
, 1,
5306 memory_order_relaxed
);
5309 case DPLANE_OP_IPSET_ADD
:
5310 case DPLANE_OP_IPSET_DELETE
:
5311 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5312 atomic_fetch_add_explicit(&zdplane_info
.dg_ipset_errors
,
5313 1, memory_order_relaxed
);
5316 case DPLANE_OP_IPSET_ENTRY_ADD
:
5317 case DPLANE_OP_IPSET_ENTRY_DELETE
:
5318 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5319 atomic_fetch_add_explicit(
5320 &zdplane_info
.dg_ipset_entry_errors
, 1,
5321 memory_order_relaxed
);
5324 case DPLANE_OP_NEIGH_TABLE_UPDATE
:
5325 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5326 atomic_fetch_add_explicit(
5327 &zdplane_info
.dg_neightable_errors
, 1,
5328 memory_order_relaxed
);
5331 case DPLANE_OP_GRE_SET
:
5332 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5333 atomic_fetch_add_explicit(
5334 &zdplane_info
.dg_gre_set_errors
, 1,
5335 memory_order_relaxed
);
5337 /* Ignore 'notifications' - no-op */
5338 case DPLANE_OP_SYS_ROUTE_ADD
:
5339 case DPLANE_OP_SYS_ROUTE_DELETE
:
5340 case DPLANE_OP_ROUTE_NOTIFY
:
5341 case DPLANE_OP_LSP_NOTIFY
:
5342 case DPLANE_OP_BR_PORT_UPDATE
:
5345 /* TODO -- error counters for incoming events? */
5346 case DPLANE_OP_INTF_ADDR_ADD
:
5347 case DPLANE_OP_INTF_ADDR_DEL
:
5348 case DPLANE_OP_INTF_NETCONFIG
:
5351 case DPLANE_OP_NONE
:
5352 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5353 atomic_fetch_add_explicit(&zdplane_info
.dg_other_errors
,
5354 1, memory_order_relaxed
);
5359 static void kernel_dplane_process_iptable(struct zebra_dplane_provider
*prov
,
5360 struct zebra_dplane_ctx
*ctx
)
5362 zebra_pbr_process_iptable(ctx
);
5363 dplane_provider_enqueue_out_ctx(prov
, ctx
);
5366 static void kernel_dplane_process_ipset(struct zebra_dplane_provider
*prov
,
5367 struct zebra_dplane_ctx
*ctx
)
5369 zebra_pbr_process_ipset(ctx
);
5370 dplane_provider_enqueue_out_ctx(prov
, ctx
);
5374 kernel_dplane_process_ipset_entry(struct zebra_dplane_provider
*prov
,
5375 struct zebra_dplane_ctx
*ctx
)
5377 zebra_pbr_process_ipset_entry(ctx
);
5378 dplane_provider_enqueue_out_ctx(prov
, ctx
);
5382 * Kernel provider callback
5384 static int kernel_dplane_process_func(struct zebra_dplane_provider
*prov
)
5386 struct zebra_dplane_ctx
*ctx
, *tctx
;
5387 struct dplane_ctx_q work_list
;
5390 TAILQ_INIT(&work_list
);
5392 limit
= dplane_provider_get_work_limit(prov
);
5394 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
5395 zlog_debug("dplane provider '%s': processing",
5396 dplane_provider_get_name(prov
));
5398 for (counter
= 0; counter
< limit
; counter
++) {
5399 ctx
= dplane_provider_dequeue_in_ctx(prov
);
5402 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
5403 kernel_dplane_log_detail(ctx
);
5405 if ((dplane_ctx_get_op(ctx
) == DPLANE_OP_IPTABLE_ADD
5406 || dplane_ctx_get_op(ctx
) == DPLANE_OP_IPTABLE_DELETE
))
5407 kernel_dplane_process_iptable(prov
, ctx
);
5408 else if ((dplane_ctx_get_op(ctx
) == DPLANE_OP_IPSET_ADD
5409 || dplane_ctx_get_op(ctx
) == DPLANE_OP_IPSET_DELETE
))
5410 kernel_dplane_process_ipset(prov
, ctx
);
5411 else if ((dplane_ctx_get_op(ctx
) == DPLANE_OP_IPSET_ENTRY_ADD
5412 || dplane_ctx_get_op(ctx
)
5413 == DPLANE_OP_IPSET_ENTRY_DELETE
))
5414 kernel_dplane_process_ipset_entry(prov
, ctx
);
5416 TAILQ_INSERT_TAIL(&work_list
, ctx
, zd_q_entries
);
5419 kernel_update_multi(&work_list
);
5421 TAILQ_FOREACH_SAFE (ctx
, &work_list
, zd_q_entries
, tctx
) {
5422 kernel_dplane_handle_result(ctx
);
5424 TAILQ_REMOVE(&work_list
, ctx
, zd_q_entries
);
5425 dplane_provider_enqueue_out_ctx(prov
, ctx
);
5428 /* Ensure that we'll run the work loop again if there's still
5431 if (counter
>= limit
) {
5432 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
5433 zlog_debug("dplane provider '%s' reached max updates %d",
5434 dplane_provider_get_name(prov
), counter
);
5436 atomic_fetch_add_explicit(&zdplane_info
.dg_update_yields
,
5437 1, memory_order_relaxed
);
5439 dplane_provider_work_ready();
5445 #ifdef DPLANE_TEST_PROVIDER
5448 * Test dataplane provider plugin
5452 * Test provider process callback
5454 static int test_dplane_process_func(struct zebra_dplane_provider
*prov
)
5456 struct zebra_dplane_ctx
*ctx
;
5459 /* Just moving from 'in' queue to 'out' queue */
5461 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
5462 zlog_debug("dplane provider '%s': processing",
5463 dplane_provider_get_name(prov
));
5465 limit
= dplane_provider_get_work_limit(prov
);
5467 for (counter
= 0; counter
< limit
; counter
++) {
5468 ctx
= dplane_provider_dequeue_in_ctx(prov
);
5472 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
5473 zlog_debug("dplane provider '%s': op %s",
5474 dplane_provider_get_name(prov
),
5475 dplane_op2str(dplane_ctx_get_op(ctx
)));
5477 dplane_ctx_set_status(ctx
, ZEBRA_DPLANE_REQUEST_SUCCESS
);
5479 dplane_provider_enqueue_out_ctx(prov
, ctx
);
5482 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
5483 zlog_debug("dplane provider '%s': processed %d",
5484 dplane_provider_get_name(prov
), counter
);
5486 /* Ensure that we'll run the work loop again if there's still
5489 if (counter
>= limit
)
5490 dplane_provider_work_ready();
5496 * Test provider shutdown/fini callback
5498 static int test_dplane_shutdown_func(struct zebra_dplane_provider
*prov
,
5501 if (IS_ZEBRA_DEBUG_DPLANE
)
5502 zlog_debug("dplane provider '%s': %sshutdown",
5503 dplane_provider_get_name(prov
),
5504 early
? "early " : "");
5508 #endif /* DPLANE_TEST_PROVIDER */
5511 * Register default kernel provider
5513 static void dplane_provider_init(void)
5517 ret
= dplane_provider_register("Kernel",
5519 DPLANE_PROV_FLAGS_DEFAULT
, NULL
,
5520 kernel_dplane_process_func
,
5525 zlog_err("Unable to register kernel dplane provider: %d",
5528 #ifdef DPLANE_TEST_PROVIDER
5529 /* Optional test provider ... */
5530 ret
= dplane_provider_register("Test",
5531 DPLANE_PRIO_PRE_KERNEL
,
5532 DPLANE_PROV_FLAGS_DEFAULT
, NULL
,
5533 test_dplane_process_func
,
5534 test_dplane_shutdown_func
,
5535 NULL
/* data */, NULL
);
5538 zlog_err("Unable to register test dplane provider: %d",
5540 #endif /* DPLANE_TEST_PROVIDER */
5544 * Allow zebra code to walk the queue of pending contexts, evaluate each one
5545 * using a callback function. If the function returns 'true', the context
5546 * will be dequeued and freed without being processed.
5548 int dplane_clean_ctx_queue(bool (*context_cb
)(struct zebra_dplane_ctx
*ctx
,
5549 void *arg
), void *val
)
5551 struct zebra_dplane_ctx
*ctx
, *temp
;
5552 struct dplane_ctx_q work_list
;
5554 TAILQ_INIT(&work_list
);
5556 if (context_cb
== NULL
)
5559 /* Walk the pending context queue under the dplane lock. */
5562 TAILQ_FOREACH_SAFE(ctx
, &zdplane_info
.dg_update_ctx_q
, zd_q_entries
,
5564 if (context_cb(ctx
, val
)) {
5565 TAILQ_REMOVE(&zdplane_info
.dg_update_ctx_q
, ctx
,
5567 TAILQ_INSERT_TAIL(&work_list
, ctx
, zd_q_entries
);
5573 /* Now free any contexts selected by the caller, without holding
5576 TAILQ_FOREACH_SAFE(ctx
, &work_list
, zd_q_entries
, temp
) {
5577 TAILQ_REMOVE(&work_list
, ctx
, zd_q_entries
);
5578 dplane_ctx_fini(&ctx
);
5586 /* Indicates zebra shutdown/exit is in progress. Some operations may be
5587 * simplified or skipped during shutdown processing.
5589 bool dplane_is_in_shutdown(void)
5591 return zdplane_info
.dg_is_shutdown
;
5595 * Enable collection of extra info about interfaces in route updates.
5597 void dplane_enable_intf_extra_info(void)
5599 dplane_collect_extra_intf_info
= true;
5603 * Early or pre-shutdown, de-init notification api. This runs pretty
5604 * early during zebra shutdown, as a signal to stop new work and prepare
5605 * for updates generated by shutdown/cleanup activity, as zebra tries to
5606 * remove everything it's responsible for.
5607 * NB: This runs in the main zebra pthread context.
5609 void zebra_dplane_pre_finish(void)
5611 struct zebra_dplane_provider
*prov
;
5613 if (IS_ZEBRA_DEBUG_DPLANE
)
5614 zlog_debug("Zebra dataplane pre-finish called");
5616 zdplane_info
.dg_is_shutdown
= true;
5618 /* Notify provider(s) of pending shutdown. */
5619 TAILQ_FOREACH(prov
, &zdplane_info
.dg_providers_q
, dp_prov_link
) {
5620 if (prov
->dp_fini
== NULL
)
5623 prov
->dp_fini(prov
, true /* early */);
5628 * Utility to determine whether work remains enqueued within the dplane;
5629 * used during system shutdown processing.
5631 static bool dplane_work_pending(void)
5634 struct zebra_dplane_ctx
*ctx
;
5635 struct zebra_dplane_provider
*prov
;
5637 /* TODO -- just checking incoming/pending work for now, must check
5642 ctx
= TAILQ_FIRST(&zdplane_info
.dg_update_ctx_q
);
5643 prov
= TAILQ_FIRST(&zdplane_info
.dg_providers_q
);
5654 dplane_provider_lock(prov
);
5656 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_in_q
));
5658 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_out_q
));
5660 dplane_provider_unlock(prov
);
5666 prov
= TAILQ_NEXT(prov
, dp_prov_link
);
5678 * Shutdown-time intermediate callback, used to determine when all pending
5679 * in-flight updates are done. If there's still work to do, reschedules itself.
5680 * If all work is done, schedules an event to the main zebra thread for
5681 * final zebra shutdown.
5682 * This runs in the dplane pthread context.
5684 static void dplane_check_shutdown_status(struct thread
*event
)
5686 struct dplane_zns_info
*zi
;
5688 if (IS_ZEBRA_DEBUG_DPLANE
)
5689 zlog_debug("Zebra dataplane shutdown status check called");
5691 /* Remove any zns info entries as we stop the dplane pthread. */
5692 frr_each_safe (zns_info_list
, &zdplane_info
.dg_zns_list
, zi
) {
5693 zns_info_list_del(&zdplane_info
.dg_zns_list
, zi
);
5695 if (zdplane_info
.dg_master
)
5696 thread_cancel(&zi
->t_read
);
5698 XFREE(MTYPE_DP_NS
, zi
);
5701 if (dplane_work_pending()) {
5702 /* Reschedule dplane check on a short timer */
5703 thread_add_timer_msec(zdplane_info
.dg_master
,
5704 dplane_check_shutdown_status
,
5706 &zdplane_info
.dg_t_shutdown_check
);
5708 /* TODO - give up and stop waiting after a short time? */
5711 /* We appear to be done - schedule a final callback event
5712 * for the zebra main pthread.
5714 thread_add_event(zrouter
.master
, zebra_finalize
, NULL
, 0, NULL
);
5719 * Shutdown, de-init api. This runs pretty late during shutdown,
5720 * after zebra has tried to free/remove/uninstall all routes during shutdown.
5721 * At this point, dplane work may still remain to be done, so we can't just
5722 * blindly terminate. If there's still work to do, we'll periodically check
5723 * and when done, we'll enqueue a task to the zebra main thread for final
5724 * termination processing.
5726 * NB: This runs in the main zebra thread context.
5728 void zebra_dplane_finish(void)
5730 if (IS_ZEBRA_DEBUG_DPLANE
)
5731 zlog_debug("Zebra dataplane fini called");
5733 thread_add_event(zdplane_info
.dg_master
,
5734 dplane_check_shutdown_status
, NULL
, 0,
5735 &zdplane_info
.dg_t_shutdown_check
);
5739 * Main dataplane pthread event loop. The thread takes new incoming work
5740 * and offers it to the first provider. It then iterates through the
5741 * providers, taking complete work from each one and offering it
5742 * to the next in order. At each step, a limited number of updates are
5743 * processed during a cycle in order to provide some fairness.
5745 * This loop through the providers is only run once, so that the dataplane
5746 * pthread can look for other pending work - such as i/o work on behalf of
5749 static void dplane_thread_loop(struct thread
*event
)
5751 struct dplane_ctx_q work_list
;
5752 struct dplane_ctx_q error_list
;
5753 struct zebra_dplane_provider
*prov
;
5754 struct zebra_dplane_ctx
*ctx
, *tctx
;
5755 int limit
, counter
, error_counter
;
5756 uint64_t curr
, high
;
5757 bool reschedule
= false;
5759 /* Capture work limit per cycle */
5760 limit
= zdplane_info
.dg_updates_per_cycle
;
5762 /* Init temporary lists used to move contexts among providers */
5763 TAILQ_INIT(&work_list
);
5764 TAILQ_INIT(&error_list
);
5767 /* Check for zebra shutdown */
5768 if (!zdplane_info
.dg_run
)
5771 /* Dequeue some incoming work from zebra (if any) onto the temporary
5776 /* Locate initial registered provider */
5777 prov
= TAILQ_FIRST(&zdplane_info
.dg_providers_q
);
5779 /* Move new work from incoming list to temp list */
5780 for (counter
= 0; counter
< limit
; counter
++) {
5781 ctx
= TAILQ_FIRST(&zdplane_info
.dg_update_ctx_q
);
5783 TAILQ_REMOVE(&zdplane_info
.dg_update_ctx_q
, ctx
,
5786 ctx
->zd_provider
= prov
->dp_id
;
5788 TAILQ_INSERT_TAIL(&work_list
, ctx
, zd_q_entries
);
5796 atomic_fetch_sub_explicit(&zdplane_info
.dg_routes_queued
, counter
,
5797 memory_order_relaxed
);
5799 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
5800 zlog_debug("dplane: incoming new work counter: %d", counter
);
5802 /* Iterate through the registered providers, offering new incoming
5803 * work. If the provider has outgoing work in its queue, take that
5804 * work for the next provider
5808 /* At each iteration, the temporary work list has 'counter'
5811 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
5812 zlog_debug("dplane enqueues %d new work to provider '%s'",
5813 counter
, dplane_provider_get_name(prov
));
5815 /* Capture current provider id in each context; check for
5818 TAILQ_FOREACH_SAFE(ctx
, &work_list
, zd_q_entries
, tctx
) {
5819 if (dplane_ctx_get_status(ctx
) ==
5820 ZEBRA_DPLANE_REQUEST_SUCCESS
) {
5821 ctx
->zd_provider
= prov
->dp_id
;
5824 * TODO -- improve error-handling: recirc
5825 * errors backwards so that providers can
5826 * 'undo' their work (if they want to)
5829 /* Move to error list; will be returned
5832 TAILQ_REMOVE(&work_list
, ctx
, zd_q_entries
);
5833 TAILQ_INSERT_TAIL(&error_list
,
5839 /* Enqueue new work to the provider */
5840 dplane_provider_lock(prov
);
5842 if (TAILQ_FIRST(&work_list
))
5843 TAILQ_CONCAT(&(prov
->dp_ctx_in_q
), &work_list
,
5846 atomic_fetch_add_explicit(&prov
->dp_in_counter
, counter
,
5847 memory_order_relaxed
);
5848 atomic_fetch_add_explicit(&prov
->dp_in_queued
, counter
,
5849 memory_order_relaxed
);
5850 curr
= atomic_load_explicit(&prov
->dp_in_queued
,
5851 memory_order_relaxed
);
5852 high
= atomic_load_explicit(&prov
->dp_in_max
,
5853 memory_order_relaxed
);
5855 atomic_store_explicit(&prov
->dp_in_max
, curr
,
5856 memory_order_relaxed
);
5858 dplane_provider_unlock(prov
);
5860 /* Reset the temp list (though the 'concat' may have done this
5861 * already), and the counter
5863 TAILQ_INIT(&work_list
);
5866 /* Call into the provider code. Note that this is
5867 * unconditional: we offer to do work even if we don't enqueue
5870 (*prov
->dp_fp
)(prov
);
5872 /* Check for zebra shutdown */
5873 if (!zdplane_info
.dg_run
)
5876 /* Dequeue completed work from the provider */
5877 dplane_provider_lock(prov
);
5879 while (counter
< limit
) {
5880 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_out_q
));
5882 TAILQ_REMOVE(&(prov
->dp_ctx_out_q
), ctx
,
5885 TAILQ_INSERT_TAIL(&work_list
,
5892 dplane_provider_unlock(prov
);
5894 if (counter
>= limit
)
5897 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
5898 zlog_debug("dplane dequeues %d completed work from provider %s",
5899 counter
, dplane_provider_get_name(prov
));
5901 /* Locate next provider */
5903 prov
= TAILQ_NEXT(prov
, dp_prov_link
);
5908 * We hit the work limit while processing at least one provider's
5909 * output queue - ensure we come back and finish it.
5912 dplane_provider_work_ready();
5914 /* After all providers have been serviced, enqueue any completed
5915 * work and any errors back to zebra so it can process the results.
5917 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
5918 zlog_debug("dplane has %d completed, %d errors, for zebra main",
5919 counter
, error_counter
);
5922 * Hand lists through the api to zebra main,
5923 * to reduce the number of lock/unlock cycles
5926 /* Call through to zebra main */
5927 (zdplane_info
.dg_results_cb
)(&error_list
);
5929 TAILQ_INIT(&error_list
);
5931 /* Call through to zebra main */
5932 (zdplane_info
.dg_results_cb
)(&work_list
);
5934 TAILQ_INIT(&work_list
);
5938 * Final phase of shutdown, after all work enqueued to dplane has been
5939 * processed. This is called from the zebra main pthread context.
5941 void zebra_dplane_shutdown(void)
5943 struct zebra_dplane_provider
*dp
;
5945 if (IS_ZEBRA_DEBUG_DPLANE
)
5946 zlog_debug("Zebra dataplane shutdown called");
5948 /* Stop dplane thread, if it's running */
5950 zdplane_info
.dg_run
= false;
5952 if (zdplane_info
.dg_t_update
)
5953 thread_cancel_async(zdplane_info
.dg_t_update
->master
,
5954 &zdplane_info
.dg_t_update
, NULL
);
5956 frr_pthread_stop(zdplane_info
.dg_pthread
, NULL
);
5958 /* Destroy pthread */
5959 frr_pthread_destroy(zdplane_info
.dg_pthread
);
5960 zdplane_info
.dg_pthread
= NULL
;
5961 zdplane_info
.dg_master
= NULL
;
5963 /* Notify provider(s) of final shutdown.
5964 * Note that this call is in the main pthread, so providers must
5965 * be prepared for that.
5967 TAILQ_FOREACH(dp
, &zdplane_info
.dg_providers_q
, dp_prov_link
) {
5968 if (dp
->dp_fini
== NULL
)
5971 dp
->dp_fini(dp
, false);
5974 /* TODO -- Clean-up provider objects */
5976 /* TODO -- Clean queue(s), free memory */
5980 * Initialize the dataplane module during startup, internal/private version
5982 static void zebra_dplane_init_internal(void)
5984 memset(&zdplane_info
, 0, sizeof(zdplane_info
));
5986 pthread_mutex_init(&zdplane_info
.dg_mutex
, NULL
);
5988 TAILQ_INIT(&zdplane_info
.dg_update_ctx_q
);
5989 TAILQ_INIT(&zdplane_info
.dg_providers_q
);
5990 zns_info_list_init(&zdplane_info
.dg_zns_list
);
5992 zdplane_info
.dg_updates_per_cycle
= DPLANE_DEFAULT_NEW_WORK
;
5994 zdplane_info
.dg_max_queued_updates
= DPLANE_DEFAULT_MAX_QUEUED
;
5996 /* Register default kernel 'provider' during init */
5997 dplane_provider_init();
6001 * Start the dataplane pthread. This step needs to be run later than the
6002 * 'init' step, in case zebra has fork-ed.
6004 void zebra_dplane_start(void)
6006 struct dplane_zns_info
*zi
;
6007 struct zebra_dplane_provider
*prov
;
6008 struct frr_pthread_attr pattr
= {
6009 .start
= frr_pthread_attr_default
.start
,
6010 .stop
= frr_pthread_attr_default
.stop
6013 /* Start dataplane pthread */
6015 zdplane_info
.dg_pthread
= frr_pthread_new(&pattr
, "Zebra dplane thread",
6018 zdplane_info
.dg_master
= zdplane_info
.dg_pthread
->master
;
6020 zdplane_info
.dg_run
= true;
6022 /* Enqueue an initial event for the dataplane pthread */
6023 thread_add_event(zdplane_info
.dg_master
, dplane_thread_loop
, NULL
, 0,
6024 &zdplane_info
.dg_t_update
);
6026 /* Enqueue reads if necessary */
6027 frr_each (zns_info_list
, &zdplane_info
.dg_zns_list
, zi
) {
6028 #if defined(HAVE_NETLINK)
6029 thread_add_read(zdplane_info
.dg_master
, dplane_incoming_read
,
6030 zi
, zi
->info
.sock
, &zi
->t_read
);
6034 /* Call start callbacks for registered providers */
6037 prov
= TAILQ_FIRST(&zdplane_info
.dg_providers_q
);
6043 (prov
->dp_start
)(prov
);
6045 /* Locate next provider */
6047 prov
= TAILQ_NEXT(prov
, dp_prov_link
);
6051 frr_pthread_run(zdplane_info
.dg_pthread
, NULL
);
6055 * Initialize the dataplane module at startup; called by zebra rib_init()
6057 void zebra_dplane_init(int (*results_fp
)(struct dplane_ctx_q
*))
6059 zebra_dplane_init_internal();
6060 zdplane_info
.dg_results_cb
= results_fp
;