2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "lib/libfrr.h"
25 #include "lib/debug.h"
26 #include "lib/frratomic.h"
27 #include "lib/frr_pthread.h"
28 #include "lib/memory.h"
29 #include "lib/queue.h"
30 #include "lib/zebra.h"
31 #include "zebra/netconf_netlink.h"
32 #include "zebra/zebra_router.h"
33 #include "zebra/zebra_dplane.h"
34 #include "zebra/zebra_vxlan_private.h"
35 #include "zebra/zebra_mpls.h"
37 #include "zebra/debug.h"
38 #include "zebra/zebra_pbr.h"
42 DEFINE_MTYPE_STATIC(ZEBRA
, DP_CTX
, "Zebra DPlane Ctx");
43 DEFINE_MTYPE_STATIC(ZEBRA
, DP_INTF
, "Zebra DPlane Intf");
44 DEFINE_MTYPE_STATIC(ZEBRA
, DP_PROV
, "Zebra DPlane Provider");
45 DEFINE_MTYPE_STATIC(ZEBRA
, DP_NETFILTER
, "Zebra Netfilter Internal Object");
46 DEFINE_MTYPE_STATIC(ZEBRA
, DP_NS
, "DPlane NSes");
52 /* Control for collection of extra interface info with route updates; a plugin
53 * can enable the extra info via a dplane api.
55 static bool dplane_collect_extra_intf_info
;
57 /* Enable test dataplane provider */
58 /*#define DPLANE_TEST_PROVIDER 1 */
60 /* Default value for max queued incoming updates */
61 const uint32_t DPLANE_DEFAULT_MAX_QUEUED
= 200;
63 /* Default value for new work per cycle */
64 const uint32_t DPLANE_DEFAULT_NEW_WORK
= 100;
66 /* Validation check macro for context blocks */
67 /* #define DPLANE_DEBUG 1 */
71 # define DPLANE_CTX_VALID(p) \
76 # define DPLANE_CTX_VALID(p)
78 #endif /* DPLANE_DEBUG */
81 * Nexthop information captured for nexthop/nexthop group updates
83 struct dplane_nexthop_info
{
90 struct nexthop_group ng
;
91 struct nh_grp nh_grp
[MULTIPATH_NUM
];
96 * Optional extra info about interfaces used in route updates' nexthops.
98 struct dplane_intf_extra
{
104 TAILQ_ENTRY(dplane_intf_extra
) link
;
108 * Route information captured for route updates.
110 struct dplane_route_info
{
112 /* Dest and (optional) source prefixes */
113 struct prefix zd_dest
;
114 struct prefix zd_src
;
123 route_tag_t zd_old_tag
;
125 uint32_t zd_old_metric
;
127 uint16_t zd_instance
;
128 uint16_t zd_old_instance
;
131 uint8_t zd_old_distance
;
134 uint32_t zd_nexthop_mtu
;
136 /* Nexthop hash entry info */
137 struct dplane_nexthop_info nhe
;
141 struct nexthop_group zd_ng
;
143 /* Backup nexthops (if present) */
144 struct nexthop_group backup_ng
;
146 /* "Previous" nexthops, used only in route updates without netlink */
147 struct nexthop_group zd_old_ng
;
148 struct nexthop_group old_backup_ng
;
150 /* Optional list of extra interface info */
151 TAILQ_HEAD(dp_intf_extra_q
, dplane_intf_extra
) intf_extra_q
;
155 * Pseudowire info for the dataplane
157 struct dplane_pw_info
{
164 mpls_label_t local_label
;
165 mpls_label_t remote_label
;
167 /* Nexthops that are valid and installed */
168 struct nexthop_group fib_nhg
;
170 /* Primary and backup nexthop sets, copied from the resolving route. */
171 struct nexthop_group primary_nhg
;
172 struct nexthop_group backup_nhg
;
174 union pw_protocol_fields fields
;
178 * Bridge port info for the dataplane
180 struct dplane_br_port_info
{
181 uint32_t sph_filter_cnt
;
182 struct in_addr sph_filters
[ES_VTEP_MAX_CNT
];
183 /* DPLANE_BR_PORT_XXX - see zebra_dplane.h*/
185 uint32_t backup_nhg_id
;
189 * Interface/prefix info for the dataplane
191 struct dplane_intf_info
{
199 #define DPLANE_INTF_CONNECTED (1 << 0) /* Connected peer, p2p */
200 #define DPLANE_INTF_SECONDARY (1 << 1)
201 #define DPLANE_INTF_BROADCAST (1 << 2)
202 #define DPLANE_INTF_HAS_DEST DPLANE_INTF_CONNECTED
203 #define DPLANE_INTF_HAS_LABEL (1 << 4)
205 /* Interface address/prefix */
206 struct prefix prefix
;
208 /* Dest address, for p2p, or broadcast prefix */
209 struct prefix dest_prefix
;
216 * EVPN MAC address info for the dataplane.
218 struct dplane_mac_info
{
220 ifindex_t br_ifindex
;
222 struct in_addr vtep_ip
;
225 uint32_t update_flags
;
229 * Neighbor info for the dataplane
231 struct dplane_neigh_info
{
232 struct ipaddr ip_addr
;
235 struct ipaddr ip_addr
;
239 uint32_t update_flags
;
245 struct dplane_neigh_table
{
248 uint32_t ucast_probes
;
249 uint32_t mcast_probes
;
253 * Policy based routing rule info for the dataplane
255 struct dplane_ctx_rule
{
258 /* The route table pointed by this rule */
261 /* Filter criteria */
265 struct prefix src_ip
;
266 struct prefix dst_ip
;
270 uint16_t action_vlan_id
;
271 uint16_t action_vlan_flags
;
273 uint32_t action_queue_id
;
275 char ifname
[INTERFACE_NAMSIZ
+ 1];
278 struct dplane_rule_info
{
280 * Originating zclient sock fd, so we can know who to send
288 struct dplane_ctx_rule
new;
289 struct dplane_ctx_rule old
;
292 struct dplane_gre_ctx
{
293 uint32_t link_ifindex
;
295 struct zebra_l2info_gre info
;
300 * Network interface configuration info - aligned with netlink's NETCONF
301 * info. The flags values are public, in the dplane.h file...
303 struct dplane_netconf_info
{
306 enum dplane_netconf_status_e mpls_val
;
307 enum dplane_netconf_status_e mcast_val
;
311 * The context block used to exchange info about route updates across
312 * the boundary between the zebra main context (and pthread) and the
313 * dataplane layer (and pthread).
315 struct zebra_dplane_ctx
{
318 enum dplane_op_e zd_op
;
320 /* Status on return */
321 enum zebra_dplane_result zd_status
;
323 /* Dplane provider id */
324 uint32_t zd_provider
;
326 /* Flags - used by providers, e.g. */
334 /* Some updates may be generated by notifications: allow the
335 * plugin to notice and ignore results from its own notifications.
337 uint32_t zd_notif_provider
;
339 /* TODO -- internal/sub-operation status? */
340 enum zebra_dplane_result zd_remote_status
;
341 enum zebra_dplane_result zd_kernel_status
;
344 uint32_t zd_table_id
;
346 char zd_ifname
[INTERFACE_NAMSIZ
];
347 ifindex_t zd_ifindex
;
349 /* Support info for different kinds of updates */
351 struct dplane_route_info rinfo
;
352 struct zebra_lsp lsp
;
353 struct dplane_pw_info pw
;
354 struct dplane_br_port_info br_port
;
355 struct dplane_intf_info intf
;
356 struct dplane_mac_info macinfo
;
357 struct dplane_neigh_info neigh
;
358 struct dplane_rule_info rule
;
359 struct zebra_pbr_iptable iptable
;
360 struct zebra_pbr_ipset ipset
;
362 struct zebra_pbr_ipset_entry entry
;
363 struct zebra_pbr_ipset_info info
;
365 struct dplane_neigh_table neightable
;
366 struct dplane_gre_ctx gre
;
367 struct dplane_netconf_info netconf
;
370 /* Namespace info, used especially for netlink kernel communication */
371 struct zebra_dplane_info zd_ns_info
;
373 /* Embedded list linkage */
374 TAILQ_ENTRY(zebra_dplane_ctx
) zd_q_entries
;
377 /* Flag that can be set by a pre-kernel provider as a signal that an update
378 * should bypass the kernel.
380 #define DPLANE_CTX_FLAG_NO_KERNEL 0x01
384 * Registration block for one dataplane provider.
386 struct zebra_dplane_provider
{
388 char dp_name
[DPLANE_PROVIDER_NAMELEN
+ 1];
390 /* Priority, for ordering among providers */
397 pthread_mutex_t dp_mutex
;
399 /* Plugin-provided extra data */
405 int (*dp_start
)(struct zebra_dplane_provider
*prov
);
407 int (*dp_fp
)(struct zebra_dplane_provider
*prov
);
409 int (*dp_fini
)(struct zebra_dplane_provider
*prov
, bool early_p
);
411 _Atomic
uint32_t dp_in_counter
;
412 _Atomic
uint32_t dp_in_queued
;
413 _Atomic
uint32_t dp_in_max
;
414 _Atomic
uint32_t dp_out_counter
;
415 _Atomic
uint32_t dp_out_queued
;
416 _Atomic
uint32_t dp_out_max
;
417 _Atomic
uint32_t dp_error_counter
;
419 /* Queue of contexts inbound to the provider */
420 struct dplane_ctx_q dp_ctx_in_q
;
422 /* Queue of completed contexts outbound from the provider back
423 * towards the dataplane module.
425 struct dplane_ctx_q dp_ctx_out_q
;
427 /* Embedded list linkage for provider objects */
428 TAILQ_ENTRY(zebra_dplane_provider
) dp_prov_link
;
431 /* Declare types for list of zns info objects */
432 PREDECL_DLIST(zns_info_list
);
434 struct dplane_zns_info
{
435 struct zebra_dplane_info info
;
437 /* Request data from the OS */
438 struct thread
*t_request
;
441 struct thread
*t_read
;
444 struct zns_info_list_item link
;
450 static struct zebra_dplane_globals
{
451 /* Mutex to control access to dataplane components */
452 pthread_mutex_t dg_mutex
;
454 /* Results callback registered by zebra 'core' */
455 int (*dg_results_cb
)(struct dplane_ctx_q
*ctxlist
);
457 /* Sentinel for beginning of shutdown */
458 volatile bool dg_is_shutdown
;
460 /* Sentinel for end of shutdown */
461 volatile bool dg_run
;
463 /* Update context queue inbound to the dataplane */
464 TAILQ_HEAD(zdg_ctx_q
, zebra_dplane_ctx
) dg_update_ctx_q
;
466 /* Ordered list of providers */
467 TAILQ_HEAD(zdg_prov_q
, zebra_dplane_provider
) dg_providers_q
;
469 /* List of info about each zns */
470 struct zns_info_list_head dg_zns_list
;
472 /* Counter used to assign internal ids to providers */
473 uint32_t dg_provider_id
;
475 /* Limit number of pending, unprocessed updates */
476 _Atomic
uint32_t dg_max_queued_updates
;
478 /* Control whether system route notifications should be produced. */
479 bool dg_sys_route_notifs
;
481 /* Limit number of new updates dequeued at once, to pace an
484 uint32_t dg_updates_per_cycle
;
486 _Atomic
uint32_t dg_routes_in
;
487 _Atomic
uint32_t dg_routes_queued
;
488 _Atomic
uint32_t dg_routes_queued_max
;
489 _Atomic
uint32_t dg_route_errors
;
490 _Atomic
uint32_t dg_other_errors
;
492 _Atomic
uint32_t dg_nexthops_in
;
493 _Atomic
uint32_t dg_nexthop_errors
;
495 _Atomic
uint32_t dg_lsps_in
;
496 _Atomic
uint32_t dg_lsp_errors
;
498 _Atomic
uint32_t dg_pws_in
;
499 _Atomic
uint32_t dg_pw_errors
;
501 _Atomic
uint32_t dg_br_port_in
;
502 _Atomic
uint32_t dg_br_port_errors
;
504 _Atomic
uint32_t dg_intf_addrs_in
;
505 _Atomic
uint32_t dg_intf_addr_errors
;
507 _Atomic
uint32_t dg_macs_in
;
508 _Atomic
uint32_t dg_mac_errors
;
510 _Atomic
uint32_t dg_neighs_in
;
511 _Atomic
uint32_t dg_neigh_errors
;
513 _Atomic
uint32_t dg_rules_in
;
514 _Atomic
uint32_t dg_rule_errors
;
516 _Atomic
uint32_t dg_update_yields
;
518 _Atomic
uint32_t dg_iptable_in
;
519 _Atomic
uint32_t dg_iptable_errors
;
521 _Atomic
uint32_t dg_ipset_in
;
522 _Atomic
uint32_t dg_ipset_errors
;
523 _Atomic
uint32_t dg_ipset_entry_in
;
524 _Atomic
uint32_t dg_ipset_entry_errors
;
526 _Atomic
uint32_t dg_neightable_in
;
527 _Atomic
uint32_t dg_neightable_errors
;
529 _Atomic
uint32_t dg_gre_set_in
;
530 _Atomic
uint32_t dg_gre_set_errors
;
532 _Atomic
uint32_t dg_intfs_in
;
533 _Atomic
uint32_t dg_intf_errors
;
535 /* Dataplane pthread */
536 struct frr_pthread
*dg_pthread
;
538 /* Event-delivery context 'master' for the dplane */
539 struct thread_master
*dg_master
;
541 /* Event/'thread' pointer for queued updates */
542 struct thread
*dg_t_update
;
544 /* Event pointer for pending shutdown check loop */
545 struct thread
*dg_t_shutdown_check
;
549 /* Instantiate zns list type */
550 DECLARE_DLIST(zns_info_list
, struct dplane_zns_info
, link
);
553 * Lock and unlock for interactions with the zebra 'core' pthread
555 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
556 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
560 * Lock and unlock for individual providers
562 #define DPLANE_PROV_LOCK(p) pthread_mutex_lock(&((p)->dp_mutex))
563 #define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
566 static void dplane_thread_loop(struct thread
*event
);
567 static enum zebra_dplane_result
lsp_update_internal(struct zebra_lsp
*lsp
,
568 enum dplane_op_e op
);
569 static enum zebra_dplane_result
pw_update_internal(struct zebra_pw
*pw
,
570 enum dplane_op_e op
);
571 static enum zebra_dplane_result
intf_addr_update_internal(
572 const struct interface
*ifp
, const struct connected
*ifc
,
573 enum dplane_op_e op
);
574 static enum zebra_dplane_result
mac_update_common(
575 enum dplane_op_e op
, const struct interface
*ifp
,
576 const struct interface
*br_ifp
,
577 vlanid_t vid
, const struct ethaddr
*mac
,
578 struct in_addr vtep_ip
, bool sticky
, uint32_t nhg_id
,
579 uint32_t update_flags
);
580 static enum zebra_dplane_result
581 neigh_update_internal(enum dplane_op_e op
, const struct interface
*ifp
,
582 const void *link
, int link_family
,
583 const struct ipaddr
*ip
, uint32_t flags
, uint16_t state
,
584 uint32_t update_flags
, int protocol
);
590 /* Obtain thread_master for dataplane thread */
591 struct thread_master
*dplane_get_thread_master(void)
593 return zdplane_info
.dg_master
;
597 * Allocate a dataplane update context
599 struct zebra_dplane_ctx
*dplane_ctx_alloc(void)
601 struct zebra_dplane_ctx
*p
;
603 /* TODO -- just alloc'ing memory, but would like to maintain
606 p
= XCALLOC(MTYPE_DP_CTX
, sizeof(struct zebra_dplane_ctx
));
611 /* Enable system route notifications */
612 void dplane_enable_sys_route_notifs(void)
614 zdplane_info
.dg_sys_route_notifs
= true;
618 * Clean up dependent/internal allocations inside a context object
620 static void dplane_ctx_free_internal(struct zebra_dplane_ctx
*ctx
)
622 struct dplane_intf_extra
*if_extra
, *if_tmp
;
625 * Some internal allocations may need to be freed, depending on
626 * the type of info captured in the ctx.
628 switch (ctx
->zd_op
) {
629 case DPLANE_OP_ROUTE_INSTALL
:
630 case DPLANE_OP_ROUTE_UPDATE
:
631 case DPLANE_OP_ROUTE_DELETE
:
632 case DPLANE_OP_SYS_ROUTE_ADD
:
633 case DPLANE_OP_SYS_ROUTE_DELETE
:
634 case DPLANE_OP_ROUTE_NOTIFY
:
636 /* Free allocated nexthops */
637 if (ctx
->u
.rinfo
.zd_ng
.nexthop
) {
638 /* This deals with recursive nexthops too */
639 nexthops_free(ctx
->u
.rinfo
.zd_ng
.nexthop
);
641 ctx
->u
.rinfo
.zd_ng
.nexthop
= NULL
;
644 /* Free backup info also (if present) */
645 if (ctx
->u
.rinfo
.backup_ng
.nexthop
) {
646 /* This deals with recursive nexthops too */
647 nexthops_free(ctx
->u
.rinfo
.backup_ng
.nexthop
);
649 ctx
->u
.rinfo
.backup_ng
.nexthop
= NULL
;
652 if (ctx
->u
.rinfo
.zd_old_ng
.nexthop
) {
653 /* This deals with recursive nexthops too */
654 nexthops_free(ctx
->u
.rinfo
.zd_old_ng
.nexthop
);
656 ctx
->u
.rinfo
.zd_old_ng
.nexthop
= NULL
;
659 if (ctx
->u
.rinfo
.old_backup_ng
.nexthop
) {
660 /* This deals with recursive nexthops too */
661 nexthops_free(ctx
->u
.rinfo
.old_backup_ng
.nexthop
);
663 ctx
->u
.rinfo
.old_backup_ng
.nexthop
= NULL
;
666 /* Optional extra interface info */
667 TAILQ_FOREACH_SAFE(if_extra
, &ctx
->u
.rinfo
.intf_extra_q
,
669 TAILQ_REMOVE(&ctx
->u
.rinfo
.intf_extra_q
, if_extra
,
671 XFREE(MTYPE_DP_INTF
, if_extra
);
676 case DPLANE_OP_NH_INSTALL
:
677 case DPLANE_OP_NH_UPDATE
:
678 case DPLANE_OP_NH_DELETE
: {
679 if (ctx
->u
.rinfo
.nhe
.ng
.nexthop
) {
680 /* This deals with recursive nexthops too */
681 nexthops_free(ctx
->u
.rinfo
.nhe
.ng
.nexthop
);
683 ctx
->u
.rinfo
.nhe
.ng
.nexthop
= NULL
;
688 case DPLANE_OP_LSP_INSTALL
:
689 case DPLANE_OP_LSP_UPDATE
:
690 case DPLANE_OP_LSP_DELETE
:
691 case DPLANE_OP_LSP_NOTIFY
:
693 struct zebra_nhlfe
*nhlfe
;
695 /* Unlink and free allocated NHLFEs */
696 frr_each_safe(nhlfe_list
, &ctx
->u
.lsp
.nhlfe_list
, nhlfe
) {
697 nhlfe_list_del(&ctx
->u
.lsp
.nhlfe_list
, nhlfe
);
698 zebra_mpls_nhlfe_free(nhlfe
);
701 /* Unlink and free allocated backup NHLFEs, if present */
702 frr_each_safe(nhlfe_list
,
703 &(ctx
->u
.lsp
.backup_nhlfe_list
), nhlfe
) {
704 nhlfe_list_del(&ctx
->u
.lsp
.backup_nhlfe_list
,
706 zebra_mpls_nhlfe_free(nhlfe
);
709 /* Clear pointers in lsp struct, in case we're caching
710 * free context structs.
712 nhlfe_list_init(&ctx
->u
.lsp
.nhlfe_list
);
713 ctx
->u
.lsp
.best_nhlfe
= NULL
;
714 nhlfe_list_init(&ctx
->u
.lsp
.backup_nhlfe_list
);
719 case DPLANE_OP_PW_INSTALL
:
720 case DPLANE_OP_PW_UNINSTALL
:
721 /* Free allocated nexthops */
722 if (ctx
->u
.pw
.fib_nhg
.nexthop
) {
723 /* This deals with recursive nexthops too */
724 nexthops_free(ctx
->u
.pw
.fib_nhg
.nexthop
);
726 ctx
->u
.pw
.fib_nhg
.nexthop
= NULL
;
728 if (ctx
->u
.pw
.primary_nhg
.nexthop
) {
729 nexthops_free(ctx
->u
.pw
.primary_nhg
.nexthop
);
731 ctx
->u
.pw
.primary_nhg
.nexthop
= NULL
;
733 if (ctx
->u
.pw
.backup_nhg
.nexthop
) {
734 nexthops_free(ctx
->u
.pw
.backup_nhg
.nexthop
);
736 ctx
->u
.pw
.backup_nhg
.nexthop
= NULL
;
740 case DPLANE_OP_ADDR_INSTALL
:
741 case DPLANE_OP_ADDR_UNINSTALL
:
742 case DPLANE_OP_INTF_ADDR_ADD
:
743 case DPLANE_OP_INTF_ADDR_DEL
:
744 /* Maybe free label string, if allocated */
745 if (ctx
->u
.intf
.label
!= NULL
&&
746 ctx
->u
.intf
.label
!= ctx
->u
.intf
.label_buf
) {
747 XFREE(MTYPE_DP_CTX
, ctx
->u
.intf
.label
);
748 ctx
->u
.intf
.label
= NULL
;
752 case DPLANE_OP_MAC_INSTALL
:
753 case DPLANE_OP_MAC_DELETE
:
754 case DPLANE_OP_NEIGH_INSTALL
:
755 case DPLANE_OP_NEIGH_UPDATE
:
756 case DPLANE_OP_NEIGH_DELETE
:
757 case DPLANE_OP_VTEP_ADD
:
758 case DPLANE_OP_VTEP_DELETE
:
759 case DPLANE_OP_RULE_ADD
:
760 case DPLANE_OP_RULE_DELETE
:
761 case DPLANE_OP_RULE_UPDATE
:
762 case DPLANE_OP_NEIGH_DISCOVER
:
763 case DPLANE_OP_BR_PORT_UPDATE
:
764 case DPLANE_OP_NEIGH_IP_INSTALL
:
765 case DPLANE_OP_NEIGH_IP_DELETE
:
767 case DPLANE_OP_IPSET_ADD
:
768 case DPLANE_OP_IPSET_DELETE
:
769 case DPLANE_OP_INTF_INSTALL
:
770 case DPLANE_OP_INTF_UPDATE
:
771 case DPLANE_OP_INTF_DELETE
:
774 case DPLANE_OP_IPSET_ENTRY_ADD
:
775 case DPLANE_OP_IPSET_ENTRY_DELETE
:
777 case DPLANE_OP_NEIGH_TABLE_UPDATE
:
779 case DPLANE_OP_IPTABLE_ADD
:
780 case DPLANE_OP_IPTABLE_DELETE
:
781 if (ctx
->u
.iptable
.interface_name_list
) {
782 struct listnode
*node
, *nnode
;
785 for (ALL_LIST_ELEMENTS(
786 ctx
->u
.iptable
.interface_name_list
, node
,
789 ctx
->u
.iptable
.interface_name_list
,
791 XFREE(MTYPE_DP_NETFILTER
, ifname
);
793 list_delete(&ctx
->u
.iptable
.interface_name_list
);
796 case DPLANE_OP_GRE_SET
:
797 case DPLANE_OP_INTF_NETCONFIG
:
803 * Free a dataplane results context.
805 static void dplane_ctx_free(struct zebra_dplane_ctx
**pctx
)
810 DPLANE_CTX_VALID(*pctx
);
812 /* TODO -- just freeing memory, but would like to maintain
816 /* Some internal allocations may need to be freed, depending on
817 * the type of info captured in the ctx.
819 dplane_ctx_free_internal(*pctx
);
821 XFREE(MTYPE_DP_CTX
, *pctx
);
825 * Reset an allocated context object for re-use. All internal allocations are
826 * freed and the context is memset.
828 void dplane_ctx_reset(struct zebra_dplane_ctx
*ctx
)
830 dplane_ctx_free_internal(ctx
);
831 memset(ctx
, 0, sizeof(*ctx
));
835 * Return a context block to the dplane module after processing
837 void dplane_ctx_fini(struct zebra_dplane_ctx
**pctx
)
839 /* TODO -- maintain pool; for now, just free */
840 dplane_ctx_free(pctx
);
843 /* Enqueue a context block */
844 void dplane_ctx_enqueue_tail(struct dplane_ctx_q
*q
,
845 const struct zebra_dplane_ctx
*ctx
)
847 TAILQ_INSERT_TAIL(q
, (struct zebra_dplane_ctx
*)ctx
, zd_q_entries
);
850 /* Append a list of context blocks to another list */
851 void dplane_ctx_list_append(struct dplane_ctx_q
*to_list
,
852 struct dplane_ctx_q
*from_list
)
854 if (TAILQ_FIRST(from_list
)) {
855 TAILQ_CONCAT(to_list
, from_list
, zd_q_entries
);
857 /* And clear 'from' list */
858 TAILQ_INIT(from_list
);
862 struct zebra_dplane_ctx
*dplane_ctx_get_head(struct dplane_ctx_q
*q
)
864 struct zebra_dplane_ctx
*ctx
= TAILQ_FIRST(q
);
869 /* Dequeue a context block from the head of a list */
870 struct zebra_dplane_ctx
*dplane_ctx_dequeue(struct dplane_ctx_q
*q
)
872 struct zebra_dplane_ctx
*ctx
= TAILQ_FIRST(q
);
875 TAILQ_REMOVE(q
, ctx
, zd_q_entries
);
881 * Accessors for information from the context object
883 enum zebra_dplane_result
dplane_ctx_get_status(
884 const struct zebra_dplane_ctx
*ctx
)
886 DPLANE_CTX_VALID(ctx
);
888 return ctx
->zd_status
;
891 void dplane_ctx_set_status(struct zebra_dplane_ctx
*ctx
,
892 enum zebra_dplane_result status
)
894 DPLANE_CTX_VALID(ctx
);
896 ctx
->zd_status
= status
;
899 /* Retrieve last/current provider id */
900 uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx
*ctx
)
902 DPLANE_CTX_VALID(ctx
);
903 return ctx
->zd_provider
;
906 /* Providers run before the kernel can control whether a kernel
907 * update should be done.
909 void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx
*ctx
)
911 DPLANE_CTX_VALID(ctx
);
913 SET_FLAG(ctx
->zd_flags
, DPLANE_CTX_FLAG_NO_KERNEL
);
916 bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx
*ctx
)
918 DPLANE_CTX_VALID(ctx
);
920 return CHECK_FLAG(ctx
->zd_flags
, DPLANE_CTX_FLAG_NO_KERNEL
);
923 void dplane_ctx_set_op(struct zebra_dplane_ctx
*ctx
, enum dplane_op_e op
)
925 DPLANE_CTX_VALID(ctx
);
929 enum dplane_op_e
dplane_ctx_get_op(const struct zebra_dplane_ctx
*ctx
)
931 DPLANE_CTX_VALID(ctx
);
936 const char *dplane_op2str(enum dplane_op_e op
)
938 const char *ret
= "UNKNOWN";
946 case DPLANE_OP_ROUTE_INSTALL
:
947 ret
= "ROUTE_INSTALL";
949 case DPLANE_OP_ROUTE_UPDATE
:
950 ret
= "ROUTE_UPDATE";
952 case DPLANE_OP_ROUTE_DELETE
:
953 ret
= "ROUTE_DELETE";
955 case DPLANE_OP_ROUTE_NOTIFY
:
956 ret
= "ROUTE_NOTIFY";
960 case DPLANE_OP_NH_INSTALL
:
963 case DPLANE_OP_NH_UPDATE
:
966 case DPLANE_OP_NH_DELETE
:
970 case DPLANE_OP_LSP_INSTALL
:
973 case DPLANE_OP_LSP_UPDATE
:
976 case DPLANE_OP_LSP_DELETE
:
979 case DPLANE_OP_LSP_NOTIFY
:
983 case DPLANE_OP_PW_INSTALL
:
986 case DPLANE_OP_PW_UNINSTALL
:
987 ret
= "PW_UNINSTALL";
990 case DPLANE_OP_SYS_ROUTE_ADD
:
991 ret
= "SYS_ROUTE_ADD";
993 case DPLANE_OP_SYS_ROUTE_DELETE
:
994 ret
= "SYS_ROUTE_DEL";
997 case DPLANE_OP_BR_PORT_UPDATE
:
998 ret
= "BR_PORT_UPDATE";
1001 case DPLANE_OP_ADDR_INSTALL
:
1002 ret
= "ADDR_INSTALL";
1004 case DPLANE_OP_ADDR_UNINSTALL
:
1005 ret
= "ADDR_UNINSTALL";
1008 case DPLANE_OP_MAC_INSTALL
:
1009 ret
= "MAC_INSTALL";
1011 case DPLANE_OP_MAC_DELETE
:
1015 case DPLANE_OP_NEIGH_INSTALL
:
1016 ret
= "NEIGH_INSTALL";
1018 case DPLANE_OP_NEIGH_UPDATE
:
1019 ret
= "NEIGH_UPDATE";
1021 case DPLANE_OP_NEIGH_DELETE
:
1022 ret
= "NEIGH_DELETE";
1024 case DPLANE_OP_VTEP_ADD
:
1027 case DPLANE_OP_VTEP_DELETE
:
1028 ret
= "VTEP_DELETE";
1031 case DPLANE_OP_RULE_ADD
:
1034 case DPLANE_OP_RULE_DELETE
:
1035 ret
= "RULE_DELETE";
1037 case DPLANE_OP_RULE_UPDATE
:
1038 ret
= "RULE_UPDATE";
1041 case DPLANE_OP_NEIGH_DISCOVER
:
1042 ret
= "NEIGH_DISCOVER";
1045 case DPLANE_OP_IPTABLE_ADD
:
1046 ret
= "IPTABLE_ADD";
1048 case DPLANE_OP_IPTABLE_DELETE
:
1049 ret
= "IPTABLE_DELETE";
1051 case DPLANE_OP_IPSET_ADD
:
1054 case DPLANE_OP_IPSET_DELETE
:
1055 ret
= "IPSET_DELETE";
1057 case DPLANE_OP_IPSET_ENTRY_ADD
:
1058 ret
= "IPSET_ENTRY_ADD";
1060 case DPLANE_OP_IPSET_ENTRY_DELETE
:
1061 ret
= "IPSET_ENTRY_DELETE";
1063 case DPLANE_OP_NEIGH_IP_INSTALL
:
1064 ret
= "NEIGH_IP_INSTALL";
1066 case DPLANE_OP_NEIGH_IP_DELETE
:
1067 ret
= "NEIGH_IP_DELETE";
1069 case DPLANE_OP_NEIGH_TABLE_UPDATE
:
1070 ret
= "NEIGH_TABLE_UPDATE";
1073 case DPLANE_OP_GRE_SET
:
1077 case DPLANE_OP_INTF_ADDR_ADD
:
1078 return "INTF_ADDR_ADD";
1080 case DPLANE_OP_INTF_ADDR_DEL
:
1081 return "INTF_ADDR_DEL";
1083 case DPLANE_OP_INTF_NETCONFIG
:
1084 return "INTF_NETCONFIG";
1086 case DPLANE_OP_INTF_INSTALL
:
1087 ret
= "INTF_INSTALL";
1089 case DPLANE_OP_INTF_UPDATE
:
1090 ret
= "INTF_UPDATE";
1092 case DPLANE_OP_INTF_DELETE
:
1093 ret
= "INTF_DELETE";
1100 const char *dplane_res2str(enum zebra_dplane_result res
)
1102 const char *ret
= "<Unknown>";
1105 case ZEBRA_DPLANE_REQUEST_FAILURE
:
1108 case ZEBRA_DPLANE_REQUEST_QUEUED
:
1111 case ZEBRA_DPLANE_REQUEST_SUCCESS
:
1119 void dplane_ctx_set_dest(struct zebra_dplane_ctx
*ctx
,
1120 const struct prefix
*dest
)
1122 DPLANE_CTX_VALID(ctx
);
1124 prefix_copy(&(ctx
->u
.rinfo
.zd_dest
), dest
);
1127 const struct prefix
*dplane_ctx_get_dest(const struct zebra_dplane_ctx
*ctx
)
1129 DPLANE_CTX_VALID(ctx
);
1131 return &(ctx
->u
.rinfo
.zd_dest
);
1134 void dplane_ctx_set_src(struct zebra_dplane_ctx
*ctx
, const struct prefix
*src
)
1136 DPLANE_CTX_VALID(ctx
);
1139 prefix_copy(&(ctx
->u
.rinfo
.zd_src
), src
);
1141 memset(&(ctx
->u
.rinfo
.zd_src
), 0, sizeof(struct prefix
));
1144 /* Source prefix is a little special - return NULL for "no src prefix" */
1145 const struct prefix
*dplane_ctx_get_src(const struct zebra_dplane_ctx
*ctx
)
1147 DPLANE_CTX_VALID(ctx
);
1149 if (ctx
->u
.rinfo
.zd_src
.prefixlen
== 0 &&
1150 IN6_IS_ADDR_UNSPECIFIED(&(ctx
->u
.rinfo
.zd_src
.u
.prefix6
))) {
1153 return &(ctx
->u
.rinfo
.zd_src
);
1157 bool dplane_ctx_is_update(const struct zebra_dplane_ctx
*ctx
)
1159 DPLANE_CTX_VALID(ctx
);
1161 return ctx
->zd_is_update
;
1164 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx
*ctx
)
1166 DPLANE_CTX_VALID(ctx
);
1171 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx
*ctx
)
1173 DPLANE_CTX_VALID(ctx
);
1175 return ctx
->zd_old_seq
;
1178 void dplane_ctx_set_vrf(struct zebra_dplane_ctx
*ctx
, vrf_id_t vrf
)
1180 DPLANE_CTX_VALID(ctx
);
1182 ctx
->zd_vrf_id
= vrf
;
1185 vrf_id_t
dplane_ctx_get_vrf(const struct zebra_dplane_ctx
*ctx
)
1187 DPLANE_CTX_VALID(ctx
);
1189 return ctx
->zd_vrf_id
;
1192 /* In some paths we have only a namespace id */
1193 void dplane_ctx_set_ns_id(struct zebra_dplane_ctx
*ctx
, ns_id_t nsid
)
1195 DPLANE_CTX_VALID(ctx
);
1197 ctx
->zd_ns_info
.ns_id
= nsid
;
1200 ns_id_t
dplane_ctx_get_ns_id(const struct zebra_dplane_ctx
*ctx
)
1202 DPLANE_CTX_VALID(ctx
);
1204 return ctx
->zd_ns_info
.ns_id
;
1207 bool dplane_ctx_is_from_notif(const struct zebra_dplane_ctx
*ctx
)
1209 DPLANE_CTX_VALID(ctx
);
1211 return (ctx
->zd_notif_provider
!= 0);
1214 uint32_t dplane_ctx_get_notif_provider(const struct zebra_dplane_ctx
*ctx
)
1216 DPLANE_CTX_VALID(ctx
);
1218 return ctx
->zd_notif_provider
;
1221 void dplane_ctx_set_notif_provider(struct zebra_dplane_ctx
*ctx
,
1224 DPLANE_CTX_VALID(ctx
);
1226 ctx
->zd_notif_provider
= id
;
1229 const char *dplane_ctx_get_ifname(const struct zebra_dplane_ctx
*ctx
)
1231 DPLANE_CTX_VALID(ctx
);
1233 return ctx
->zd_ifname
;
1236 void dplane_ctx_set_ifname(struct zebra_dplane_ctx
*ctx
, const char *ifname
)
1238 DPLANE_CTX_VALID(ctx
);
1243 strlcpy(ctx
->zd_ifname
, ifname
, sizeof(ctx
->zd_ifname
));
1246 ifindex_t
dplane_ctx_get_ifindex(const struct zebra_dplane_ctx
*ctx
)
1248 DPLANE_CTX_VALID(ctx
);
1250 return ctx
->zd_ifindex
;
1253 void dplane_ctx_set_ifindex(struct zebra_dplane_ctx
*ctx
, ifindex_t ifindex
)
1255 DPLANE_CTX_VALID(ctx
);
1257 ctx
->zd_ifindex
= ifindex
;
1260 void dplane_ctx_set_type(struct zebra_dplane_ctx
*ctx
, int type
)
1262 DPLANE_CTX_VALID(ctx
);
1264 ctx
->u
.rinfo
.zd_type
= type
;
1267 int dplane_ctx_get_type(const struct zebra_dplane_ctx
*ctx
)
1269 DPLANE_CTX_VALID(ctx
);
1271 return ctx
->u
.rinfo
.zd_type
;
1274 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx
*ctx
)
1276 DPLANE_CTX_VALID(ctx
);
1278 return ctx
->u
.rinfo
.zd_old_type
;
1281 void dplane_ctx_set_afi(struct zebra_dplane_ctx
*ctx
, afi_t afi
)
1283 DPLANE_CTX_VALID(ctx
);
1285 ctx
->u
.rinfo
.zd_afi
= afi
;
1288 afi_t
dplane_ctx_get_afi(const struct zebra_dplane_ctx
*ctx
)
1290 DPLANE_CTX_VALID(ctx
);
1292 return ctx
->u
.rinfo
.zd_afi
;
1295 void dplane_ctx_set_safi(struct zebra_dplane_ctx
*ctx
, safi_t safi
)
1297 DPLANE_CTX_VALID(ctx
);
1299 ctx
->u
.rinfo
.zd_safi
= safi
;
1302 safi_t
dplane_ctx_get_safi(const struct zebra_dplane_ctx
*ctx
)
1304 DPLANE_CTX_VALID(ctx
);
1306 return ctx
->u
.rinfo
.zd_safi
;
1309 void dplane_ctx_set_table(struct zebra_dplane_ctx
*ctx
, uint32_t table
)
1311 DPLANE_CTX_VALID(ctx
);
1313 ctx
->zd_table_id
= table
;
1316 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx
*ctx
)
1318 DPLANE_CTX_VALID(ctx
);
1320 return ctx
->zd_table_id
;
1323 route_tag_t
dplane_ctx_get_tag(const struct zebra_dplane_ctx
*ctx
)
1325 DPLANE_CTX_VALID(ctx
);
1327 return ctx
->u
.rinfo
.zd_tag
;
1330 void dplane_ctx_set_tag(struct zebra_dplane_ctx
*ctx
, route_tag_t tag
)
1332 DPLANE_CTX_VALID(ctx
);
1334 ctx
->u
.rinfo
.zd_tag
= tag
;
1337 route_tag_t
dplane_ctx_get_old_tag(const struct zebra_dplane_ctx
*ctx
)
1339 DPLANE_CTX_VALID(ctx
);
1341 return ctx
->u
.rinfo
.zd_old_tag
;
1344 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx
*ctx
)
1346 DPLANE_CTX_VALID(ctx
);
1348 return ctx
->u
.rinfo
.zd_instance
;
1351 void dplane_ctx_set_instance(struct zebra_dplane_ctx
*ctx
, uint16_t instance
)
1353 DPLANE_CTX_VALID(ctx
);
1355 ctx
->u
.rinfo
.zd_instance
= instance
;
1358 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx
*ctx
)
1360 DPLANE_CTX_VALID(ctx
);
1362 return ctx
->u
.rinfo
.zd_old_instance
;
1365 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx
*ctx
)
1367 DPLANE_CTX_VALID(ctx
);
1369 return ctx
->u
.rinfo
.zd_metric
;
1372 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx
*ctx
)
1374 DPLANE_CTX_VALID(ctx
);
1376 return ctx
->u
.rinfo
.zd_old_metric
;
1379 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx
*ctx
)
1381 DPLANE_CTX_VALID(ctx
);
1383 return ctx
->u
.rinfo
.zd_mtu
;
1386 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx
*ctx
)
1388 DPLANE_CTX_VALID(ctx
);
1390 return ctx
->u
.rinfo
.zd_nexthop_mtu
;
1393 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx
*ctx
)
1395 DPLANE_CTX_VALID(ctx
);
1397 return ctx
->u
.rinfo
.zd_distance
;
1400 void dplane_ctx_set_distance(struct zebra_dplane_ctx
*ctx
, uint8_t distance
)
1402 DPLANE_CTX_VALID(ctx
);
1404 ctx
->u
.rinfo
.zd_distance
= distance
;
1407 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx
*ctx
)
1409 DPLANE_CTX_VALID(ctx
);
1411 return ctx
->u
.rinfo
.zd_old_distance
;
1415 * Set the nexthops associated with a context: note that processing code
1416 * may well expect that nexthops are in canonical (sorted) order, so we
1417 * will enforce that here.
1419 void dplane_ctx_set_nexthops(struct zebra_dplane_ctx
*ctx
, struct nexthop
*nh
)
1421 DPLANE_CTX_VALID(ctx
);
1423 if (ctx
->u
.rinfo
.zd_ng
.nexthop
) {
1424 nexthops_free(ctx
->u
.rinfo
.zd_ng
.nexthop
);
1425 ctx
->u
.rinfo
.zd_ng
.nexthop
= NULL
;
1427 nexthop_group_copy_nh_sorted(&(ctx
->u
.rinfo
.zd_ng
), nh
);
1431 * Set the list of backup nexthops; their ordering is preserved (they're not
1434 void dplane_ctx_set_backup_nhg(struct zebra_dplane_ctx
*ctx
,
1435 const struct nexthop_group
*nhg
)
1437 struct nexthop
*nh
, *last_nh
, *nexthop
;
1439 DPLANE_CTX_VALID(ctx
);
1441 if (ctx
->u
.rinfo
.backup_ng
.nexthop
) {
1442 nexthops_free(ctx
->u
.rinfo
.backup_ng
.nexthop
);
1443 ctx
->u
.rinfo
.backup_ng
.nexthop
= NULL
;
1448 /* Be careful to preserve the order of the backup list */
1449 for (nh
= nhg
->nexthop
; nh
; nh
= nh
->next
) {
1450 nexthop
= nexthop_dup(nh
, NULL
);
1453 NEXTHOP_APPEND(last_nh
, nexthop
);
1455 ctx
->u
.rinfo
.backup_ng
.nexthop
= nexthop
;
1461 uint32_t dplane_ctx_get_nhg_id(const struct zebra_dplane_ctx
*ctx
)
1463 DPLANE_CTX_VALID(ctx
);
1464 return ctx
->u
.rinfo
.zd_nhg_id
;
1467 const struct nexthop_group
*dplane_ctx_get_ng(
1468 const struct zebra_dplane_ctx
*ctx
)
1470 DPLANE_CTX_VALID(ctx
);
1472 return &(ctx
->u
.rinfo
.zd_ng
);
1475 const struct nexthop_group
*
1476 dplane_ctx_get_backup_ng(const struct zebra_dplane_ctx
*ctx
)
1478 DPLANE_CTX_VALID(ctx
);
1480 return &(ctx
->u
.rinfo
.backup_ng
);
1483 const struct nexthop_group
*
1484 dplane_ctx_get_old_ng(const struct zebra_dplane_ctx
*ctx
)
1486 DPLANE_CTX_VALID(ctx
);
1488 return &(ctx
->u
.rinfo
.zd_old_ng
);
1491 const struct nexthop_group
*
1492 dplane_ctx_get_old_backup_ng(const struct zebra_dplane_ctx
*ctx
)
1494 DPLANE_CTX_VALID(ctx
);
1496 return &(ctx
->u
.rinfo
.old_backup_ng
);
1499 const struct zebra_dplane_info
*dplane_ctx_get_ns(
1500 const struct zebra_dplane_ctx
*ctx
)
1502 DPLANE_CTX_VALID(ctx
);
1504 return &(ctx
->zd_ns_info
);
1507 int dplane_ctx_get_ns_sock(const struct zebra_dplane_ctx
*ctx
)
1509 DPLANE_CTX_VALID(ctx
);
1512 return ctx
->zd_ns_info
.sock
;
1518 /* Accessors for nexthop information */
1519 uint32_t dplane_ctx_get_nhe_id(const struct zebra_dplane_ctx
*ctx
)
1521 DPLANE_CTX_VALID(ctx
);
1522 return ctx
->u
.rinfo
.nhe
.id
;
1525 uint32_t dplane_ctx_get_old_nhe_id(const struct zebra_dplane_ctx
*ctx
)
1527 DPLANE_CTX_VALID(ctx
);
1528 return ctx
->u
.rinfo
.nhe
.old_id
;
1531 afi_t
dplane_ctx_get_nhe_afi(const struct zebra_dplane_ctx
*ctx
)
1533 DPLANE_CTX_VALID(ctx
);
1534 return ctx
->u
.rinfo
.nhe
.afi
;
1537 vrf_id_t
dplane_ctx_get_nhe_vrf_id(const struct zebra_dplane_ctx
*ctx
)
1539 DPLANE_CTX_VALID(ctx
);
1540 return ctx
->u
.rinfo
.nhe
.vrf_id
;
1543 int dplane_ctx_get_nhe_type(const struct zebra_dplane_ctx
*ctx
)
1545 DPLANE_CTX_VALID(ctx
);
1546 return ctx
->u
.rinfo
.nhe
.type
;
1549 const struct nexthop_group
*
1550 dplane_ctx_get_nhe_ng(const struct zebra_dplane_ctx
*ctx
)
1552 DPLANE_CTX_VALID(ctx
);
1553 return &(ctx
->u
.rinfo
.nhe
.ng
);
1556 const struct nh_grp
*
1557 dplane_ctx_get_nhe_nh_grp(const struct zebra_dplane_ctx
*ctx
)
1559 DPLANE_CTX_VALID(ctx
);
1560 return ctx
->u
.rinfo
.nhe
.nh_grp
;
1563 uint8_t dplane_ctx_get_nhe_nh_grp_count(const struct zebra_dplane_ctx
*ctx
)
1565 DPLANE_CTX_VALID(ctx
);
1566 return ctx
->u
.rinfo
.nhe
.nh_grp_count
;
1569 /* Accessors for LSP information */
1571 mpls_label_t
dplane_ctx_get_in_label(const struct zebra_dplane_ctx
*ctx
)
1573 DPLANE_CTX_VALID(ctx
);
1575 return ctx
->u
.lsp
.ile
.in_label
;
1578 void dplane_ctx_set_in_label(struct zebra_dplane_ctx
*ctx
, mpls_label_t label
)
1580 DPLANE_CTX_VALID(ctx
);
1582 ctx
->u
.lsp
.ile
.in_label
= label
;
1585 uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx
*ctx
)
1587 DPLANE_CTX_VALID(ctx
);
1589 return ctx
->u
.lsp
.addr_family
;
1592 void dplane_ctx_set_addr_family(struct zebra_dplane_ctx
*ctx
,
1595 DPLANE_CTX_VALID(ctx
);
1597 ctx
->u
.lsp
.addr_family
= family
;
1600 uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx
*ctx
)
1602 DPLANE_CTX_VALID(ctx
);
1604 return ctx
->u
.lsp
.flags
;
1607 void dplane_ctx_set_lsp_flags(struct zebra_dplane_ctx
*ctx
,
1610 DPLANE_CTX_VALID(ctx
);
1612 ctx
->u
.lsp
.flags
= flags
;
1615 const struct nhlfe_list_head
*dplane_ctx_get_nhlfe_list(
1616 const struct zebra_dplane_ctx
*ctx
)
1618 DPLANE_CTX_VALID(ctx
);
1619 return &(ctx
->u
.lsp
.nhlfe_list
);
1622 const struct nhlfe_list_head
*dplane_ctx_get_backup_nhlfe_list(
1623 const struct zebra_dplane_ctx
*ctx
)
1625 DPLANE_CTX_VALID(ctx
);
1626 return &(ctx
->u
.lsp
.backup_nhlfe_list
);
1629 struct zebra_nhlfe
*dplane_ctx_add_nhlfe(struct zebra_dplane_ctx
*ctx
,
1630 enum lsp_types_t lsp_type
,
1631 enum nexthop_types_t nh_type
,
1632 const union g_addr
*gate
,
1633 ifindex_t ifindex
, uint8_t num_labels
,
1634 mpls_label_t
*out_labels
)
1636 struct zebra_nhlfe
*nhlfe
;
1638 DPLANE_CTX_VALID(ctx
);
1640 nhlfe
= zebra_mpls_lsp_add_nhlfe(&(ctx
->u
.lsp
),
1641 lsp_type
, nh_type
, gate
,
1642 ifindex
, num_labels
, out_labels
);
1647 struct zebra_nhlfe
*dplane_ctx_add_backup_nhlfe(
1648 struct zebra_dplane_ctx
*ctx
, enum lsp_types_t lsp_type
,
1649 enum nexthop_types_t nh_type
, const union g_addr
*gate
,
1650 ifindex_t ifindex
, uint8_t num_labels
, mpls_label_t
*out_labels
)
1652 struct zebra_nhlfe
*nhlfe
;
1654 DPLANE_CTX_VALID(ctx
);
1656 nhlfe
= zebra_mpls_lsp_add_backup_nhlfe(&(ctx
->u
.lsp
),
1657 lsp_type
, nh_type
, gate
,
1658 ifindex
, num_labels
,
1664 const struct zebra_nhlfe
*
1665 dplane_ctx_get_best_nhlfe(const struct zebra_dplane_ctx
*ctx
)
1667 DPLANE_CTX_VALID(ctx
);
1669 return ctx
->u
.lsp
.best_nhlfe
;
1672 const struct zebra_nhlfe
*
1673 dplane_ctx_set_best_nhlfe(struct zebra_dplane_ctx
*ctx
,
1674 struct zebra_nhlfe
*nhlfe
)
1676 DPLANE_CTX_VALID(ctx
);
1678 ctx
->u
.lsp
.best_nhlfe
= nhlfe
;
1679 return ctx
->u
.lsp
.best_nhlfe
;
1682 uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx
*ctx
)
1684 DPLANE_CTX_VALID(ctx
);
1686 return ctx
->u
.lsp
.num_ecmp
;
1689 mpls_label_t
dplane_ctx_get_pw_local_label(const struct zebra_dplane_ctx
*ctx
)
1691 DPLANE_CTX_VALID(ctx
);
1693 return ctx
->u
.pw
.local_label
;
1696 mpls_label_t
dplane_ctx_get_pw_remote_label(const struct zebra_dplane_ctx
*ctx
)
1698 DPLANE_CTX_VALID(ctx
);
1700 return ctx
->u
.pw
.remote_label
;
1703 int dplane_ctx_get_pw_type(const struct zebra_dplane_ctx
*ctx
)
1705 DPLANE_CTX_VALID(ctx
);
1707 return ctx
->u
.pw
.type
;
1710 int dplane_ctx_get_pw_af(const struct zebra_dplane_ctx
*ctx
)
1712 DPLANE_CTX_VALID(ctx
);
1714 return ctx
->u
.pw
.af
;
1717 uint32_t dplane_ctx_get_pw_flags(const struct zebra_dplane_ctx
*ctx
)
1719 DPLANE_CTX_VALID(ctx
);
1721 return ctx
->u
.pw
.flags
;
1724 int dplane_ctx_get_pw_status(const struct zebra_dplane_ctx
*ctx
)
1726 DPLANE_CTX_VALID(ctx
);
1728 return ctx
->u
.pw
.status
;
1731 void dplane_ctx_set_pw_status(struct zebra_dplane_ctx
*ctx
, int status
)
1733 DPLANE_CTX_VALID(ctx
);
1735 ctx
->u
.pw
.status
= status
;
1738 const union g_addr
*dplane_ctx_get_pw_dest(
1739 const struct zebra_dplane_ctx
*ctx
)
1741 DPLANE_CTX_VALID(ctx
);
1743 return &(ctx
->u
.pw
.dest
);
1746 const union pw_protocol_fields
*dplane_ctx_get_pw_proto(
1747 const struct zebra_dplane_ctx
*ctx
)
1749 DPLANE_CTX_VALID(ctx
);
1751 return &(ctx
->u
.pw
.fields
);
1754 const struct nexthop_group
*
1755 dplane_ctx_get_pw_nhg(const struct zebra_dplane_ctx
*ctx
)
1757 DPLANE_CTX_VALID(ctx
);
1759 return &(ctx
->u
.pw
.fib_nhg
);
1762 const struct nexthop_group
*
1763 dplane_ctx_get_pw_primary_nhg(const struct zebra_dplane_ctx
*ctx
)
1765 DPLANE_CTX_VALID(ctx
);
1767 return &(ctx
->u
.pw
.primary_nhg
);
1770 const struct nexthop_group
*
1771 dplane_ctx_get_pw_backup_nhg(const struct zebra_dplane_ctx
*ctx
)
1773 DPLANE_CTX_VALID(ctx
);
1775 return &(ctx
->u
.pw
.backup_nhg
);
1778 /* Accessors for interface information */
1779 uint32_t dplane_ctx_get_intf_metric(const struct zebra_dplane_ctx
*ctx
)
1781 DPLANE_CTX_VALID(ctx
);
1783 return ctx
->u
.intf
.metric
;
1786 void dplane_ctx_set_intf_metric(struct zebra_dplane_ctx
*ctx
, uint32_t metric
)
1788 DPLANE_CTX_VALID(ctx
);
1790 ctx
->u
.intf
.metric
= metric
;
1793 uint32_t dplane_ctx_get_intf_pd_reason_val(const struct zebra_dplane_ctx
*ctx
)
1795 DPLANE_CTX_VALID(ctx
);
1797 return ctx
->u
.intf
.pd_reason_val
;
1800 void dplane_ctx_set_intf_pd_reason_val(struct zebra_dplane_ctx
*ctx
, bool val
)
1802 DPLANE_CTX_VALID(ctx
);
1804 ctx
->u
.intf
.pd_reason_val
= val
;
1807 bool dplane_ctx_intf_is_protodown(const struct zebra_dplane_ctx
*ctx
)
1809 DPLANE_CTX_VALID(ctx
);
1811 return ctx
->u
.intf
.protodown
;
1814 /* Is interface addr p2p? */
1815 bool dplane_ctx_intf_is_connected(const struct zebra_dplane_ctx
*ctx
)
1817 DPLANE_CTX_VALID(ctx
);
1819 return (ctx
->u
.intf
.flags
& DPLANE_INTF_CONNECTED
);
1822 bool dplane_ctx_intf_is_secondary(const struct zebra_dplane_ctx
*ctx
)
1824 DPLANE_CTX_VALID(ctx
);
1826 return (ctx
->u
.intf
.flags
& DPLANE_INTF_SECONDARY
);
1829 bool dplane_ctx_intf_is_broadcast(const struct zebra_dplane_ctx
*ctx
)
1831 DPLANE_CTX_VALID(ctx
);
1833 return (ctx
->u
.intf
.flags
& DPLANE_INTF_BROADCAST
);
1836 void dplane_ctx_intf_set_connected(struct zebra_dplane_ctx
*ctx
)
1838 DPLANE_CTX_VALID(ctx
);
1840 ctx
->u
.intf
.flags
|= DPLANE_INTF_CONNECTED
;
1843 void dplane_ctx_intf_set_secondary(struct zebra_dplane_ctx
*ctx
)
1845 DPLANE_CTX_VALID(ctx
);
1847 ctx
->u
.intf
.flags
|= DPLANE_INTF_SECONDARY
;
1850 void dplane_ctx_intf_set_broadcast(struct zebra_dplane_ctx
*ctx
)
1852 DPLANE_CTX_VALID(ctx
);
1854 ctx
->u
.intf
.flags
|= DPLANE_INTF_BROADCAST
;
1857 const struct prefix
*dplane_ctx_get_intf_addr(
1858 const struct zebra_dplane_ctx
*ctx
)
1860 DPLANE_CTX_VALID(ctx
);
1862 return &(ctx
->u
.intf
.prefix
);
1865 void dplane_ctx_set_intf_addr(struct zebra_dplane_ctx
*ctx
,
1866 const struct prefix
*p
)
1868 DPLANE_CTX_VALID(ctx
);
1870 prefix_copy(&(ctx
->u
.intf
.prefix
), p
);
1873 bool dplane_ctx_intf_has_dest(const struct zebra_dplane_ctx
*ctx
)
1875 DPLANE_CTX_VALID(ctx
);
1877 return (ctx
->u
.intf
.flags
& DPLANE_INTF_HAS_DEST
);
1880 const struct prefix
*dplane_ctx_get_intf_dest(
1881 const struct zebra_dplane_ctx
*ctx
)
1883 DPLANE_CTX_VALID(ctx
);
1885 return &(ctx
->u
.intf
.dest_prefix
);
1888 void dplane_ctx_set_intf_dest(struct zebra_dplane_ctx
*ctx
,
1889 const struct prefix
*p
)
1891 DPLANE_CTX_VALID(ctx
);
1893 prefix_copy(&(ctx
->u
.intf
.dest_prefix
), p
);
1896 bool dplane_ctx_intf_has_label(const struct zebra_dplane_ctx
*ctx
)
1898 DPLANE_CTX_VALID(ctx
);
1900 return (ctx
->u
.intf
.flags
& DPLANE_INTF_HAS_LABEL
);
1903 const char *dplane_ctx_get_intf_label(const struct zebra_dplane_ctx
*ctx
)
1905 DPLANE_CTX_VALID(ctx
);
1907 return ctx
->u
.intf
.label
;
1910 void dplane_ctx_set_intf_label(struct zebra_dplane_ctx
*ctx
, const char *label
)
1914 DPLANE_CTX_VALID(ctx
);
1916 if (ctx
->u
.intf
.label
&& ctx
->u
.intf
.label
!= ctx
->u
.intf
.label_buf
)
1917 XFREE(MTYPE_DP_CTX
, ctx
->u
.intf
.label
);
1919 ctx
->u
.intf
.label
= NULL
;
1922 ctx
->u
.intf
.flags
|= DPLANE_INTF_HAS_LABEL
;
1924 /* Use embedded buffer if it's adequate; else allocate. */
1925 len
= strlen(label
);
1927 if (len
< sizeof(ctx
->u
.intf
.label_buf
)) {
1928 strlcpy(ctx
->u
.intf
.label_buf
, label
,
1929 sizeof(ctx
->u
.intf
.label_buf
));
1930 ctx
->u
.intf
.label
= ctx
->u
.intf
.label_buf
;
1932 ctx
->u
.intf
.label
= XSTRDUP(MTYPE_DP_CTX
, label
);
1935 ctx
->u
.intf
.flags
&= ~DPLANE_INTF_HAS_LABEL
;
1939 /* Accessors for MAC information */
1940 vlanid_t
dplane_ctx_mac_get_vlan(const struct zebra_dplane_ctx
*ctx
)
1942 DPLANE_CTX_VALID(ctx
);
1943 return ctx
->u
.macinfo
.vid
;
1946 bool dplane_ctx_mac_is_sticky(const struct zebra_dplane_ctx
*ctx
)
1948 DPLANE_CTX_VALID(ctx
);
1949 return ctx
->u
.macinfo
.is_sticky
;
1952 uint32_t dplane_ctx_mac_get_nhg_id(const struct zebra_dplane_ctx
*ctx
)
1954 DPLANE_CTX_VALID(ctx
);
1955 return ctx
->u
.macinfo
.nhg_id
;
1958 uint32_t dplane_ctx_mac_get_update_flags(const struct zebra_dplane_ctx
*ctx
)
1960 DPLANE_CTX_VALID(ctx
);
1961 return ctx
->u
.macinfo
.update_flags
;
1964 const struct ethaddr
*dplane_ctx_mac_get_addr(
1965 const struct zebra_dplane_ctx
*ctx
)
1967 DPLANE_CTX_VALID(ctx
);
1968 return &(ctx
->u
.macinfo
.mac
);
1971 const struct in_addr
*dplane_ctx_mac_get_vtep_ip(
1972 const struct zebra_dplane_ctx
*ctx
)
1974 DPLANE_CTX_VALID(ctx
);
1975 return &(ctx
->u
.macinfo
.vtep_ip
);
1978 ifindex_t
dplane_ctx_mac_get_br_ifindex(const struct zebra_dplane_ctx
*ctx
)
1980 DPLANE_CTX_VALID(ctx
);
1981 return ctx
->u
.macinfo
.br_ifindex
;
1984 /* Accessors for neighbor information */
1985 const struct ipaddr
*dplane_ctx_neigh_get_ipaddr(
1986 const struct zebra_dplane_ctx
*ctx
)
1988 DPLANE_CTX_VALID(ctx
);
1989 return &(ctx
->u
.neigh
.ip_addr
);
1992 const struct ipaddr
*
1993 dplane_ctx_neigh_get_link_ip(const struct zebra_dplane_ctx
*ctx
)
1995 DPLANE_CTX_VALID(ctx
);
1996 return &(ctx
->u
.neigh
.link
.ip_addr
);
1999 const struct ethaddr
*dplane_ctx_neigh_get_mac(
2000 const struct zebra_dplane_ctx
*ctx
)
2002 DPLANE_CTX_VALID(ctx
);
2003 return &(ctx
->u
.neigh
.link
.mac
);
2006 uint32_t dplane_ctx_neigh_get_flags(const struct zebra_dplane_ctx
*ctx
)
2008 DPLANE_CTX_VALID(ctx
);
2009 return ctx
->u
.neigh
.flags
;
2012 uint16_t dplane_ctx_neigh_get_state(const struct zebra_dplane_ctx
*ctx
)
2014 DPLANE_CTX_VALID(ctx
);
2015 return ctx
->u
.neigh
.state
;
2018 uint32_t dplane_ctx_neigh_get_update_flags(const struct zebra_dplane_ctx
*ctx
)
2020 DPLANE_CTX_VALID(ctx
);
2021 return ctx
->u
.neigh
.update_flags
;
2024 /* Accessor for GRE set */
2026 dplane_ctx_gre_get_link_ifindex(const struct zebra_dplane_ctx
*ctx
)
2028 DPLANE_CTX_VALID(ctx
);
2030 return ctx
->u
.gre
.link_ifindex
;
2034 dplane_ctx_gre_get_mtu(const struct zebra_dplane_ctx
*ctx
)
2036 DPLANE_CTX_VALID(ctx
);
2038 return ctx
->u
.gre
.mtu
;
2041 const struct zebra_l2info_gre
*
2042 dplane_ctx_gre_get_info(const struct zebra_dplane_ctx
*ctx
)
2044 DPLANE_CTX_VALID(ctx
);
2046 return &ctx
->u
.gre
.info
;
2049 /* Accessors for PBR rule information */
2050 int dplane_ctx_rule_get_sock(const struct zebra_dplane_ctx
*ctx
)
2052 DPLANE_CTX_VALID(ctx
);
2054 return ctx
->u
.rule
.sock
;
2057 const char *dplane_ctx_rule_get_ifname(const struct zebra_dplane_ctx
*ctx
)
2059 DPLANE_CTX_VALID(ctx
);
2061 return ctx
->u
.rule
.new.ifname
;
2064 int dplane_ctx_rule_get_unique(const struct zebra_dplane_ctx
*ctx
)
2066 DPLANE_CTX_VALID(ctx
);
2068 return ctx
->u
.rule
.unique
;
2071 int dplane_ctx_rule_get_seq(const struct zebra_dplane_ctx
*ctx
)
2073 DPLANE_CTX_VALID(ctx
);
2075 return ctx
->u
.rule
.seq
;
2078 uint32_t dplane_ctx_rule_get_priority(const struct zebra_dplane_ctx
*ctx
)
2080 DPLANE_CTX_VALID(ctx
);
2082 return ctx
->u
.rule
.new.priority
;
2085 uint32_t dplane_ctx_rule_get_old_priority(const struct zebra_dplane_ctx
*ctx
)
2087 DPLANE_CTX_VALID(ctx
);
2089 return ctx
->u
.rule
.old
.priority
;
2092 uint32_t dplane_ctx_rule_get_table(const struct zebra_dplane_ctx
*ctx
)
2094 DPLANE_CTX_VALID(ctx
);
2096 return ctx
->u
.rule
.new.table
;
2099 uint32_t dplane_ctx_rule_get_old_table(const struct zebra_dplane_ctx
*ctx
)
2101 DPLANE_CTX_VALID(ctx
);
2103 return ctx
->u
.rule
.old
.table
;
2106 uint32_t dplane_ctx_rule_get_filter_bm(const struct zebra_dplane_ctx
*ctx
)
2108 DPLANE_CTX_VALID(ctx
);
2110 return ctx
->u
.rule
.new.filter_bm
;
2113 uint32_t dplane_ctx_rule_get_old_filter_bm(const struct zebra_dplane_ctx
*ctx
)
2115 DPLANE_CTX_VALID(ctx
);
2117 return ctx
->u
.rule
.old
.filter_bm
;
2120 uint32_t dplane_ctx_rule_get_fwmark(const struct zebra_dplane_ctx
*ctx
)
2122 DPLANE_CTX_VALID(ctx
);
2124 return ctx
->u
.rule
.new.fwmark
;
2127 uint32_t dplane_ctx_rule_get_old_fwmark(const struct zebra_dplane_ctx
*ctx
)
2129 DPLANE_CTX_VALID(ctx
);
2131 return ctx
->u
.rule
.old
.fwmark
;
2134 uint8_t dplane_ctx_rule_get_ipproto(const struct zebra_dplane_ctx
*ctx
)
2136 DPLANE_CTX_VALID(ctx
);
2138 return ctx
->u
.rule
.new.ip_proto
;
2141 uint8_t dplane_ctx_rule_get_old_ipproto(const struct zebra_dplane_ctx
*ctx
)
2143 DPLANE_CTX_VALID(ctx
);
2145 return ctx
->u
.rule
.old
.ip_proto
;
2148 uint8_t dplane_ctx_rule_get_dsfield(const struct zebra_dplane_ctx
*ctx
)
2150 DPLANE_CTX_VALID(ctx
);
2152 return ctx
->u
.rule
.new.dsfield
;
2155 uint8_t dplane_ctx_rule_get_old_dsfield(const struct zebra_dplane_ctx
*ctx
)
2157 DPLANE_CTX_VALID(ctx
);
2159 return ctx
->u
.rule
.old
.dsfield
;
2162 const struct prefix
*
2163 dplane_ctx_rule_get_src_ip(const struct zebra_dplane_ctx
*ctx
)
2165 DPLANE_CTX_VALID(ctx
);
2167 return &(ctx
->u
.rule
.new.src_ip
);
2170 const struct prefix
*
2171 dplane_ctx_rule_get_old_src_ip(const struct zebra_dplane_ctx
*ctx
)
2173 DPLANE_CTX_VALID(ctx
);
2175 return &(ctx
->u
.rule
.old
.src_ip
);
2178 const struct prefix
*
2179 dplane_ctx_rule_get_dst_ip(const struct zebra_dplane_ctx
*ctx
)
2181 DPLANE_CTX_VALID(ctx
);
2183 return &(ctx
->u
.rule
.new.dst_ip
);
2186 const struct prefix
*
2187 dplane_ctx_rule_get_old_dst_ip(const struct zebra_dplane_ctx
*ctx
)
2189 DPLANE_CTX_VALID(ctx
);
2191 return &(ctx
->u
.rule
.old
.dst_ip
);
2194 uint32_t dplane_ctx_get_br_port_flags(const struct zebra_dplane_ctx
*ctx
)
2196 DPLANE_CTX_VALID(ctx
);
2198 return ctx
->u
.br_port
.flags
;
2202 dplane_ctx_get_br_port_sph_filter_cnt(const struct zebra_dplane_ctx
*ctx
)
2204 DPLANE_CTX_VALID(ctx
);
2206 return ctx
->u
.br_port
.sph_filter_cnt
;
2209 const struct in_addr
*
2210 dplane_ctx_get_br_port_sph_filters(const struct zebra_dplane_ctx
*ctx
)
2212 DPLANE_CTX_VALID(ctx
);
2214 return ctx
->u
.br_port
.sph_filters
;
2218 dplane_ctx_get_br_port_backup_nhg_id(const struct zebra_dplane_ctx
*ctx
)
2220 DPLANE_CTX_VALID(ctx
);
2222 return ctx
->u
.br_port
.backup_nhg_id
;
2225 /* Accessors for PBR iptable information */
2226 void dplane_ctx_get_pbr_iptable(const struct zebra_dplane_ctx
*ctx
,
2227 struct zebra_pbr_iptable
*table
)
2229 DPLANE_CTX_VALID(ctx
);
2231 memcpy(table
, &ctx
->u
.iptable
, sizeof(struct zebra_pbr_iptable
));
2234 void dplane_ctx_get_pbr_ipset(const struct zebra_dplane_ctx
*ctx
,
2235 struct zebra_pbr_ipset
*ipset
)
2237 DPLANE_CTX_VALID(ctx
);
2241 if (ctx
->zd_op
== DPLANE_OP_IPSET_ENTRY_ADD
||
2242 ctx
->zd_op
== DPLANE_OP_IPSET_ENTRY_DELETE
) {
2243 memset(ipset
, 0, sizeof(struct zebra_pbr_ipset
));
2244 ipset
->type
= ctx
->u
.ipset_entry
.info
.type
;
2245 ipset
->family
= ctx
->u
.ipset_entry
.info
.family
;
2246 memcpy(&ipset
->ipset_name
, &ctx
->u
.ipset_entry
.info
.ipset_name
,
2247 ZEBRA_IPSET_NAME_SIZE
);
2249 memcpy(ipset
, &ctx
->u
.ipset
, sizeof(struct zebra_pbr_ipset
));
2252 void dplane_ctx_get_pbr_ipset_entry(const struct zebra_dplane_ctx
*ctx
,
2253 struct zebra_pbr_ipset_entry
*entry
)
2255 DPLANE_CTX_VALID(ctx
);
2259 memcpy(entry
, &ctx
->u
.ipset_entry
.entry
, sizeof(struct zebra_pbr_ipset_entry
));
2263 * End of dplane context accessors
2266 /* Optional extra info about interfaces in nexthops - a plugin must enable
2269 const struct dplane_intf_extra
*
2270 dplane_ctx_get_intf_extra(const struct zebra_dplane_ctx
*ctx
)
2272 return TAILQ_FIRST(&ctx
->u
.rinfo
.intf_extra_q
);
2275 const struct dplane_intf_extra
*
2276 dplane_ctx_intf_extra_next(const struct zebra_dplane_ctx
*ctx
,
2277 const struct dplane_intf_extra
*ptr
)
2279 return TAILQ_NEXT(ptr
, link
);
2282 vrf_id_t
dplane_intf_extra_get_vrfid(const struct dplane_intf_extra
*ptr
)
2287 uint32_t dplane_intf_extra_get_ifindex(const struct dplane_intf_extra
*ptr
)
2289 return ptr
->ifindex
;
2292 uint32_t dplane_intf_extra_get_flags(const struct dplane_intf_extra
*ptr
)
2297 uint32_t dplane_intf_extra_get_status(const struct dplane_intf_extra
*ptr
)
2303 * End of interface extra info accessors
2306 uint8_t dplane_ctx_neightable_get_family(const struct zebra_dplane_ctx
*ctx
)
2308 DPLANE_CTX_VALID(ctx
);
2310 return ctx
->u
.neightable
.family
;
2314 dplane_ctx_neightable_get_app_probes(const struct zebra_dplane_ctx
*ctx
)
2316 DPLANE_CTX_VALID(ctx
);
2318 return ctx
->u
.neightable
.app_probes
;
2322 dplane_ctx_neightable_get_ucast_probes(const struct zebra_dplane_ctx
*ctx
)
2324 DPLANE_CTX_VALID(ctx
);
2326 return ctx
->u
.neightable
.ucast_probes
;
2330 dplane_ctx_neightable_get_mcast_probes(const struct zebra_dplane_ctx
*ctx
)
2332 DPLANE_CTX_VALID(ctx
);
2334 return ctx
->u
.neightable
.mcast_probes
;
2337 ifindex_t
dplane_ctx_get_netconf_ifindex(const struct zebra_dplane_ctx
*ctx
)
2339 DPLANE_CTX_VALID(ctx
);
2341 return ctx
->u
.netconf
.ifindex
;
2344 ns_id_t
dplane_ctx_get_netconf_ns_id(const struct zebra_dplane_ctx
*ctx
)
2346 DPLANE_CTX_VALID(ctx
);
2348 return ctx
->u
.netconf
.ns_id
;
2351 void dplane_ctx_set_netconf_ifindex(struct zebra_dplane_ctx
*ctx
,
2354 DPLANE_CTX_VALID(ctx
);
2356 ctx
->u
.netconf
.ifindex
= ifindex
;
2359 void dplane_ctx_set_netconf_ns_id(struct zebra_dplane_ctx
*ctx
, ns_id_t ns_id
)
2361 DPLANE_CTX_VALID(ctx
);
2363 ctx
->u
.netconf
.ns_id
= ns_id
;
2366 enum dplane_netconf_status_e
2367 dplane_ctx_get_netconf_mpls(const struct zebra_dplane_ctx
*ctx
)
2369 DPLANE_CTX_VALID(ctx
);
2371 return ctx
->u
.netconf
.mpls_val
;
2374 enum dplane_netconf_status_e
2375 dplane_ctx_get_netconf_mcast(const struct zebra_dplane_ctx
*ctx
)
2377 DPLANE_CTX_VALID(ctx
);
2379 return ctx
->u
.netconf
.mcast_val
;
2382 void dplane_ctx_set_netconf_mpls(struct zebra_dplane_ctx
*ctx
,
2383 enum dplane_netconf_status_e val
)
2385 DPLANE_CTX_VALID(ctx
);
2387 ctx
->u
.netconf
.mpls_val
= val
;
2390 void dplane_ctx_set_netconf_mcast(struct zebra_dplane_ctx
*ctx
,
2391 enum dplane_netconf_status_e val
)
2393 DPLANE_CTX_VALID(ctx
);
2395 ctx
->u
.netconf
.mcast_val
= val
;
2399 * Retrieve the limit on the number of pending, unprocessed updates.
2401 uint32_t dplane_get_in_queue_limit(void)
2403 return atomic_load_explicit(&zdplane_info
.dg_max_queued_updates
,
2404 memory_order_relaxed
);
2408 * Configure limit on the number of pending, queued updates.
2410 void dplane_set_in_queue_limit(uint32_t limit
, bool set
)
2412 /* Reset to default on 'unset' */
2414 limit
= DPLANE_DEFAULT_MAX_QUEUED
;
2416 atomic_store_explicit(&zdplane_info
.dg_max_queued_updates
, limit
,
2417 memory_order_relaxed
);
2421 * Retrieve the current queue depth of incoming, unprocessed updates
2423 uint32_t dplane_get_in_queue_len(void)
2425 return atomic_load_explicit(&zdplane_info
.dg_routes_queued
,
2426 memory_order_seq_cst
);
2430 * Internal helper that copies information from a zebra ns object; this is
2431 * called in the zebra main pthread context as part of dplane ctx init.
2433 static void ctx_info_from_zns(struct zebra_dplane_info
*ns_info
,
2434 struct zebra_ns
*zns
)
2436 ns_info
->ns_id
= zns
->ns_id
;
2438 #if defined(HAVE_NETLINK)
2439 ns_info
->is_cmd
= true;
2440 ns_info
->sock
= zns
->netlink_dplane_out
.sock
;
2441 ns_info
->seq
= zns
->netlink_dplane_out
.seq
;
2442 #endif /* NETLINK */
2446 * Common dataplane context init with zebra namespace info.
2448 static int dplane_ctx_ns_init(struct zebra_dplane_ctx
*ctx
,
2449 struct zebra_ns
*zns
,
2452 ctx_info_from_zns(&(ctx
->zd_ns_info
), zns
); /* */
2454 ctx
->zd_is_update
= is_update
;
2456 #if defined(HAVE_NETLINK)
2457 /* Increment message counter after copying to context struct - may need
2458 * two messages in some 'update' cases.
2461 zns
->netlink_dplane_out
.seq
+= 2;
2463 zns
->netlink_dplane_out
.seq
++;
2464 #endif /* HAVE_NETLINK */
2470 * Initialize a context block for a route update from zebra data structs.
2472 int dplane_ctx_route_init(struct zebra_dplane_ctx
*ctx
, enum dplane_op_e op
,
2473 struct route_node
*rn
, struct route_entry
*re
)
2476 const struct route_table
*table
= NULL
;
2477 const struct rib_table_info
*info
;
2478 const struct prefix
*p
, *src_p
;
2479 struct zebra_ns
*zns
;
2480 struct zebra_vrf
*zvrf
;
2481 struct nexthop
*nexthop
;
2482 struct zebra_l3vni
*zl3vni
;
2483 const struct interface
*ifp
;
2484 struct dplane_intf_extra
*if_extra
;
2486 if (!ctx
|| !rn
|| !re
)
2489 TAILQ_INIT(&ctx
->u
.rinfo
.intf_extra_q
);
2492 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
2494 ctx
->u
.rinfo
.zd_type
= re
->type
;
2495 ctx
->u
.rinfo
.zd_old_type
= re
->type
;
2497 /* Prefixes: dest, and optional source */
2498 srcdest_rnode_prefixes(rn
, &p
, &src_p
);
2500 prefix_copy(&(ctx
->u
.rinfo
.zd_dest
), p
);
2503 prefix_copy(&(ctx
->u
.rinfo
.zd_src
), src_p
);
2505 memset(&(ctx
->u
.rinfo
.zd_src
), 0, sizeof(ctx
->u
.rinfo
.zd_src
));
2507 ctx
->zd_table_id
= re
->table
;
2509 ctx
->u
.rinfo
.zd_metric
= re
->metric
;
2510 ctx
->u
.rinfo
.zd_old_metric
= re
->metric
;
2511 ctx
->zd_vrf_id
= re
->vrf_id
;
2512 ctx
->u
.rinfo
.zd_mtu
= re
->mtu
;
2513 ctx
->u
.rinfo
.zd_nexthop_mtu
= re
->nexthop_mtu
;
2514 ctx
->u
.rinfo
.zd_instance
= re
->instance
;
2515 ctx
->u
.rinfo
.zd_tag
= re
->tag
;
2516 ctx
->u
.rinfo
.zd_old_tag
= re
->tag
;
2517 ctx
->u
.rinfo
.zd_distance
= re
->distance
;
2519 table
= srcdest_rnode_table(rn
);
2522 ctx
->u
.rinfo
.zd_afi
= info
->afi
;
2523 ctx
->u
.rinfo
.zd_safi
= info
->safi
;
2525 /* Copy nexthops; recursive info is included too */
2526 copy_nexthops(&(ctx
->u
.rinfo
.zd_ng
.nexthop
),
2527 re
->nhe
->nhg
.nexthop
, NULL
);
2528 ctx
->u
.rinfo
.zd_nhg_id
= re
->nhe
->id
;
2530 /* Copy backup nexthop info, if present */
2531 if (re
->nhe
->backup_info
&& re
->nhe
->backup_info
->nhe
) {
2532 copy_nexthops(&(ctx
->u
.rinfo
.backup_ng
.nexthop
),
2533 re
->nhe
->backup_info
->nhe
->nhg
.nexthop
, NULL
);
2537 * Ensure that the dplane nexthops' flags are clear and copy
2538 * encapsulation information.
2540 for (ALL_NEXTHOPS(ctx
->u
.rinfo
.zd_ng
, nexthop
)) {
2541 UNSET_FLAG(nexthop
->flags
, NEXTHOP_FLAG_FIB
);
2543 /* Optionally capture extra interface info while we're in the
2544 * main zebra pthread - a plugin has to ask for this info.
2546 if (dplane_collect_extra_intf_info
) {
2547 ifp
= if_lookup_by_index(nexthop
->ifindex
,
2553 sizeof(struct dplane_intf_extra
));
2554 if_extra
->vrf_id
= nexthop
->vrf_id
;
2555 if_extra
->ifindex
= nexthop
->ifindex
;
2556 if_extra
->flags
= ifp
->flags
;
2557 if_extra
->status
= ifp
->status
;
2559 TAILQ_INSERT_TAIL(&ctx
->u
.rinfo
.intf_extra_q
,
2564 /* Check for available evpn encapsulations. */
2565 if (!CHECK_FLAG(re
->flags
, ZEBRA_FLAG_EVPN_ROUTE
))
2568 zl3vni
= zl3vni_from_vrf(nexthop
->vrf_id
);
2569 if (zl3vni
&& is_l3vni_oper_up(zl3vni
)) {
2570 nexthop
->nh_encap_type
= NET_VXLAN
;
2571 nexthop
->nh_encap
.vni
= zl3vni
->vni
;
2575 /* Don't need some info when capturing a system notification */
2576 if (op
== DPLANE_OP_SYS_ROUTE_ADD
||
2577 op
== DPLANE_OP_SYS_ROUTE_DELETE
) {
2582 /* Extract ns info - can't use pointers to 'core' structs */
2583 zvrf
= vrf_info_lookup(re
->vrf_id
);
2585 dplane_ctx_ns_init(ctx
, zns
, (op
== DPLANE_OP_ROUTE_UPDATE
));
2589 struct nhg_hash_entry
*nhe
= zebra_nhg_resolve(re
->nhe
);
2591 ctx
->u
.rinfo
.nhe
.id
= nhe
->id
;
2592 ctx
->u
.rinfo
.nhe
.old_id
= 0;
2594 * Check if the nhe is installed/queued before doing anything
2597 * If its a delete we only use the prefix anyway, so this only
2598 * matters for INSTALL/UPDATE.
2600 if (zebra_nhg_kernel_nexthops_enabled()
2601 && (((op
== DPLANE_OP_ROUTE_INSTALL
)
2602 || (op
== DPLANE_OP_ROUTE_UPDATE
))
2603 && !CHECK_FLAG(nhe
->flags
, NEXTHOP_GROUP_INSTALLED
)
2604 && !CHECK_FLAG(nhe
->flags
, NEXTHOP_GROUP_QUEUED
))) {
2609 re
->nhe_installed_id
= nhe
->id
;
2611 #endif /* HAVE_NETLINK */
2613 /* Trying out the sequence number idea, so we can try to detect
2614 * when a result is stale.
2616 re
->dplane_sequence
= zebra_router_get_next_sequence();
2617 ctx
->zd_seq
= re
->dplane_sequence
;
2626 * dplane_ctx_nexthop_init() - Initialize a context block for a nexthop update
2628 * @ctx: Dataplane context to init
2629 * @op: Operation being performed
2630 * @nhe: Nexthop group hash entry
2632 * Return: Result status
2634 int dplane_ctx_nexthop_init(struct zebra_dplane_ctx
*ctx
, enum dplane_op_e op
,
2635 struct nhg_hash_entry
*nhe
)
2637 struct zebra_vrf
*zvrf
= NULL
;
2638 struct zebra_ns
*zns
= NULL
;
2645 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
2647 /* Copy over nhe info */
2648 ctx
->u
.rinfo
.nhe
.id
= nhe
->id
;
2649 ctx
->u
.rinfo
.nhe
.afi
= nhe
->afi
;
2650 ctx
->u
.rinfo
.nhe
.vrf_id
= nhe
->vrf_id
;
2651 ctx
->u
.rinfo
.nhe
.type
= nhe
->type
;
2653 nexthop_group_copy(&(ctx
->u
.rinfo
.nhe
.ng
), &(nhe
->nhg
));
2655 /* If this is a group, convert it to a grp array of ids */
2656 if (!zebra_nhg_depends_is_empty(nhe
)
2657 && !CHECK_FLAG(nhe
->flags
, NEXTHOP_GROUP_RECURSIVE
))
2658 ctx
->u
.rinfo
.nhe
.nh_grp_count
= zebra_nhg_nhe2grp(
2659 ctx
->u
.rinfo
.nhe
.nh_grp
, nhe
, MULTIPATH_NUM
);
2661 zvrf
= vrf_info_lookup(nhe
->vrf_id
);
2664 * Fallback to default namespace if the vrf got ripped out from under
2667 zns
= zvrf
? zvrf
->zns
: zebra_ns_lookup(NS_DEFAULT
);
2670 * TODO: Might not need to mark this as an update, since
2671 * it probably won't require two messages
2673 dplane_ctx_ns_init(ctx
, zns
, (op
== DPLANE_OP_NH_UPDATE
));
2682 * dplane_ctx_intf_init() - Initialize a context block for a interface update
2684 * @ctx: Dataplane context to init
2685 * @op: Operation being performed
2688 * Return: Result status
2690 int dplane_ctx_intf_init(struct zebra_dplane_ctx
*ctx
, enum dplane_op_e op
,
2691 const struct interface
*ifp
)
2693 struct zebra_ns
*zns
;
2694 struct zebra_if
*zif
;
2696 bool set_pdown
, unset_pdown
;
2702 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
2703 ctx
->zd_vrf_id
= ifp
->vrf
->vrf_id
;
2705 strlcpy(ctx
->zd_ifname
, ifp
->name
, sizeof(ctx
->zd_ifname
));
2706 ctx
->zd_ifindex
= ifp
->ifindex
;
2708 zns
= zebra_ns_lookup(ifp
->vrf
->vrf_id
);
2709 dplane_ctx_ns_init(ctx
, zns
, false);
2712 /* Copy over ifp info */
2713 ctx
->u
.intf
.metric
= ifp
->metric
;
2714 ctx
->u
.intf
.flags
= ifp
->flags
;
2716 /* Copy over extra zebra info, if available */
2717 zif
= (struct zebra_if
*)ifp
->info
;
2720 set_pdown
= !!(zif
->flags
& ZIF_FLAG_SET_PROTODOWN
);
2721 unset_pdown
= !!(zif
->flags
& ZIF_FLAG_UNSET_PROTODOWN
);
2723 if (zif
->protodown_rc
&&
2724 ZEBRA_IF_IS_PROTODOWN_ONLY_EXTERNAL(zif
) == false)
2725 ctx
->u
.intf
.pd_reason_val
= true;
2728 * See if we have new protodown state to set, otherwise keep
2732 ctx
->u
.intf
.protodown
= true;
2733 else if (unset_pdown
)
2734 ctx
->u
.intf
.protodown
= false;
2736 ctx
->u
.intf
.protodown
= !!ZEBRA_IF_IS_PROTODOWN(zif
);
2739 dplane_ctx_ns_init(ctx
, zns
, (op
== DPLANE_OP_INTF_UPDATE
));
2740 ctx
->zd_is_update
= (op
== DPLANE_OP_INTF_UPDATE
);
2749 * Capture information for an LSP update in a dplane context.
2751 int dplane_ctx_lsp_init(struct zebra_dplane_ctx
*ctx
, enum dplane_op_e op
,
2752 struct zebra_lsp
*lsp
)
2755 struct zebra_nhlfe
*nhlfe
, *new_nhlfe
;
2758 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
2760 /* Capture namespace info */
2761 dplane_ctx_ns_init(ctx
, zebra_ns_lookup(NS_DEFAULT
),
2762 (op
== DPLANE_OP_LSP_UPDATE
));
2764 memset(&ctx
->u
.lsp
, 0, sizeof(ctx
->u
.lsp
));
2766 nhlfe_list_init(&(ctx
->u
.lsp
.nhlfe_list
));
2767 nhlfe_list_init(&(ctx
->u
.lsp
.backup_nhlfe_list
));
2769 /* This may be called to create/init a dplane context, not necessarily
2770 * to copy an lsp object.
2777 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
2778 zlog_debug("init dplane ctx %s: in-label %u ecmp# %d",
2779 dplane_op2str(op
), lsp
->ile
.in_label
,
2782 ctx
->u
.lsp
.ile
= lsp
->ile
;
2783 ctx
->u
.lsp
.addr_family
= lsp
->addr_family
;
2784 ctx
->u
.lsp
.num_ecmp
= lsp
->num_ecmp
;
2785 ctx
->u
.lsp
.flags
= lsp
->flags
;
2787 /* Copy source LSP's nhlfes, and capture 'best' nhlfe */
2788 frr_each(nhlfe_list
, &lsp
->nhlfe_list
, nhlfe
) {
2789 /* Not sure if this is meaningful... */
2790 if (nhlfe
->nexthop
== NULL
)
2793 new_nhlfe
= zebra_mpls_lsp_add_nh(&(ctx
->u
.lsp
), nhlfe
->type
,
2795 if (new_nhlfe
== NULL
|| new_nhlfe
->nexthop
== NULL
) {
2800 /* Need to copy flags and backup info too */
2801 new_nhlfe
->flags
= nhlfe
->flags
;
2802 new_nhlfe
->nexthop
->flags
= nhlfe
->nexthop
->flags
;
2804 if (CHECK_FLAG(new_nhlfe
->nexthop
->flags
,
2805 NEXTHOP_FLAG_HAS_BACKUP
)) {
2806 new_nhlfe
->nexthop
->backup_num
=
2807 nhlfe
->nexthop
->backup_num
;
2808 memcpy(new_nhlfe
->nexthop
->backup_idx
,
2809 nhlfe
->nexthop
->backup_idx
,
2810 new_nhlfe
->nexthop
->backup_num
);
2813 if (nhlfe
== lsp
->best_nhlfe
)
2814 ctx
->u
.lsp
.best_nhlfe
= new_nhlfe
;
2820 /* Capture backup nhlfes/nexthops */
2821 frr_each(nhlfe_list
, &lsp
->backup_nhlfe_list
, nhlfe
) {
2822 /* Not sure if this is meaningful... */
2823 if (nhlfe
->nexthop
== NULL
)
2826 new_nhlfe
= zebra_mpls_lsp_add_backup_nh(&(ctx
->u
.lsp
),
2829 if (new_nhlfe
== NULL
|| new_nhlfe
->nexthop
== NULL
) {
2834 /* Need to copy flags too */
2835 new_nhlfe
->flags
= nhlfe
->flags
;
2836 new_nhlfe
->nexthop
->flags
= nhlfe
->nexthop
->flags
;
2839 /* On error the ctx will be cleaned-up, so we don't need to
2840 * deal with any allocated nhlfe or nexthop structs here.
2848 * Capture information for an LSP update in a dplane context.
2850 static int dplane_ctx_pw_init(struct zebra_dplane_ctx
*ctx
,
2851 enum dplane_op_e op
,
2852 struct zebra_pw
*pw
)
2857 struct route_table
*table
;
2858 struct route_node
*rn
;
2859 struct route_entry
*re
;
2860 const struct nexthop_group
*nhg
;
2861 struct nexthop
*nh
, *newnh
, *last_nh
;
2863 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
2864 zlog_debug("init dplane ctx %s: pw '%s', loc %u, rem %u",
2865 dplane_op2str(op
), pw
->ifname
, pw
->local_label
,
2869 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
2871 /* Capture namespace info: no netlink support as of 12/18,
2872 * but just in case...
2874 dplane_ctx_ns_init(ctx
, zebra_ns_lookup(NS_DEFAULT
), false);
2876 memset(&ctx
->u
.pw
, 0, sizeof(ctx
->u
.pw
));
2878 /* This name appears to be c-string, so we use string copy. */
2879 strlcpy(ctx
->zd_ifname
, pw
->ifname
, sizeof(ctx
->zd_ifname
));
2881 ctx
->zd_vrf_id
= pw
->vrf_id
;
2882 ctx
->zd_ifindex
= pw
->ifindex
;
2883 ctx
->u
.pw
.type
= pw
->type
;
2884 ctx
->u
.pw
.af
= pw
->af
;
2885 ctx
->u
.pw
.local_label
= pw
->local_label
;
2886 ctx
->u
.pw
.remote_label
= pw
->remote_label
;
2887 ctx
->u
.pw
.flags
= pw
->flags
;
2889 ctx
->u
.pw
.dest
= pw
->nexthop
;
2891 ctx
->u
.pw
.fields
= pw
->data
;
2893 /* Capture nexthop info for the pw destination. We need to look
2894 * up and use zebra datastructs, but we're running in the zebra
2895 * pthread here so that should be ok.
2897 memcpy(&p
.u
, &pw
->nexthop
, sizeof(pw
->nexthop
));
2899 p
.prefixlen
= ((pw
->af
== AF_INET
) ? IPV4_MAX_BITLEN
: IPV6_MAX_BITLEN
);
2901 afi
= (pw
->af
== AF_INET
) ? AFI_IP
: AFI_IP6
;
2902 table
= zebra_vrf_table(afi
, SAFI_UNICAST
, pw
->vrf_id
);
2906 rn
= route_node_match(table
, &p
);
2911 RNODE_FOREACH_RE(rn
, re
) {
2912 if (CHECK_FLAG(re
->flags
, ZEBRA_FLAG_SELECTED
))
2917 /* We'll capture a 'fib' list of nexthops that meet our
2918 * criteria: installed, and labelled.
2920 nhg
= rib_get_fib_nhg(re
);
2923 if (nhg
&& nhg
->nexthop
) {
2924 for (ALL_NEXTHOPS_PTR(nhg
, nh
)) {
2925 if (!CHECK_FLAG(nh
->flags
, NEXTHOP_FLAG_ACTIVE
)
2926 || CHECK_FLAG(nh
->flags
,
2927 NEXTHOP_FLAG_RECURSIVE
)
2928 || nh
->nh_label
== NULL
)
2931 newnh
= nexthop_dup(nh
, NULL
);
2934 NEXTHOP_APPEND(last_nh
, newnh
);
2936 ctx
->u
.pw
.fib_nhg
.nexthop
= newnh
;
2941 /* Include any installed backup nexthops also. */
2942 nhg
= rib_get_fib_backup_nhg(re
);
2943 if (nhg
&& nhg
->nexthop
) {
2944 for (ALL_NEXTHOPS_PTR(nhg
, nh
)) {
2945 if (!CHECK_FLAG(nh
->flags
, NEXTHOP_FLAG_ACTIVE
)
2946 || CHECK_FLAG(nh
->flags
,
2947 NEXTHOP_FLAG_RECURSIVE
)
2948 || nh
->nh_label
== NULL
)
2951 newnh
= nexthop_dup(nh
, NULL
);
2954 NEXTHOP_APPEND(last_nh
, newnh
);
2956 ctx
->u
.pw
.fib_nhg
.nexthop
= newnh
;
2961 /* Copy primary nexthops; recursive info is included too */
2962 assert(re
->nhe
!= NULL
); /* SA warning */
2963 copy_nexthops(&(ctx
->u
.pw
.primary_nhg
.nexthop
),
2964 re
->nhe
->nhg
.nexthop
, NULL
);
2965 ctx
->u
.pw
.nhg_id
= re
->nhe
->id
;
2967 /* Copy backup nexthop info, if present */
2968 if (re
->nhe
->backup_info
&& re
->nhe
->backup_info
->nhe
) {
2969 copy_nexthops(&(ctx
->u
.pw
.backup_nhg
.nexthop
),
2970 re
->nhe
->backup_info
->nhe
->nhg
.nexthop
,
2974 route_unlock_node(rn
);
2983 * dplane_ctx_rule_init_single() - Initialize a dataplane representation of a
2986 * @dplane_rule: Dataplane internal representation of a rule
2989 static void dplane_ctx_rule_init_single(struct dplane_ctx_rule
*dplane_rule
,
2990 struct zebra_pbr_rule
*rule
)
2992 dplane_rule
->priority
= rule
->rule
.priority
;
2993 dplane_rule
->table
= rule
->rule
.action
.table
;
2995 dplane_rule
->filter_bm
= rule
->rule
.filter
.filter_bm
;
2996 dplane_rule
->fwmark
= rule
->rule
.filter
.fwmark
;
2997 dplane_rule
->dsfield
= rule
->rule
.filter
.dsfield
;
2998 dplane_rule
->ip_proto
= rule
->rule
.filter
.ip_proto
;
2999 prefix_copy(&(dplane_rule
->dst_ip
), &rule
->rule
.filter
.dst_ip
);
3000 prefix_copy(&(dplane_rule
->src_ip
), &rule
->rule
.filter
.src_ip
);
3002 dplane_rule
->action_pcp
= rule
->rule
.action
.pcp
;
3003 dplane_rule
->action_vlan_flags
= rule
->rule
.action
.vlan_flags
;
3004 dplane_rule
->action_vlan_id
= rule
->rule
.action
.vlan_id
;
3005 dplane_rule
->action_queue_id
= rule
->rule
.action
.queue_id
;
3007 strlcpy(dplane_rule
->ifname
, rule
->ifname
, INTERFACE_NAMSIZ
);
3011 * dplane_ctx_rule_init() - Initialize a context block for a PBR rule update.
3013 * @ctx: Dataplane context to init
3014 * @op: Operation being performed
3015 * @new_rule: PBR rule
3017 * Return: Result status
3019 static int dplane_ctx_rule_init(struct zebra_dplane_ctx
*ctx
,
3020 enum dplane_op_e op
,
3021 struct zebra_pbr_rule
*new_rule
,
3022 struct zebra_pbr_rule
*old_rule
)
3024 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
3026 "init dplane ctx %s: IF %s Prio %u Fwmark %u Src %pFX Dst %pFX Table %u",
3027 dplane_op2str(op
), new_rule
->ifname
,
3028 new_rule
->rule
.priority
, new_rule
->rule
.filter
.fwmark
,
3029 &new_rule
->rule
.filter
.src_ip
,
3030 &new_rule
->rule
.filter
.dst_ip
,
3031 new_rule
->rule
.action
.table
);
3034 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
3036 dplane_ctx_ns_init(ctx
, zebra_ns_lookup(NS_DEFAULT
),
3037 op
== DPLANE_OP_RULE_UPDATE
);
3039 ctx
->zd_vrf_id
= new_rule
->vrf_id
;
3040 strlcpy(ctx
->zd_ifname
, new_rule
->ifname
, sizeof(ctx
->zd_ifname
));
3042 ctx
->u
.rule
.sock
= new_rule
->sock
;
3043 ctx
->u
.rule
.unique
= new_rule
->rule
.unique
;
3044 ctx
->u
.rule
.seq
= new_rule
->rule
.seq
;
3046 dplane_ctx_rule_init_single(&ctx
->u
.rule
.new, new_rule
);
3047 if (op
== DPLANE_OP_RULE_UPDATE
)
3048 dplane_ctx_rule_init_single(&ctx
->u
.rule
.old
, old_rule
);
3054 * dplane_ctx_iptable_init() - Initialize a context block for a PBR iptable
3057 * @ctx: Dataplane context to init
3058 * @op: Operation being performed
3059 * @new_rule: PBR iptable
3061 * Return: Result status
3063 static int dplane_ctx_iptable_init(struct zebra_dplane_ctx
*ctx
,
3064 enum dplane_op_e op
,
3065 struct zebra_pbr_iptable
*iptable
)
3068 struct listnode
*node
;
3070 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
) {
3072 "init dplane ctx %s: Unique %u Fwmark %u Family %s Action %s",
3073 dplane_op2str(op
), iptable
->unique
, iptable
->fwmark
,
3074 family2str(iptable
->family
),
3075 iptable
->action
== ZEBRA_IPTABLES_DROP
? "Drop"
3080 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
3082 dplane_ctx_ns_init(ctx
, zebra_ns_lookup(NS_DEFAULT
), false);
3084 ctx
->zd_vrf_id
= iptable
->vrf_id
;
3085 memcpy(&ctx
->u
.iptable
, iptable
, sizeof(struct zebra_pbr_iptable
));
3086 ctx
->u
.iptable
.interface_name_list
= NULL
;
3087 if (iptable
->nb_interface
> 0) {
3088 ctx
->u
.iptable
.interface_name_list
= list_new();
3089 for (ALL_LIST_ELEMENTS_RO(iptable
->interface_name_list
, node
,
3091 listnode_add(ctx
->u
.iptable
.interface_name_list
,
3092 XSTRDUP(MTYPE_DP_NETFILTER
, ifname
));
3099 * dplane_ctx_ipset_init() - Initialize a context block for a PBR ipset update.
3101 * @ctx: Dataplane context to init
3102 * @op: Operation being performed
3103 * @new_rule: PBR ipset
3105 * Return: Result status
3107 static int dplane_ctx_ipset_init(struct zebra_dplane_ctx
*ctx
,
3108 enum dplane_op_e op
,
3109 struct zebra_pbr_ipset
*ipset
)
3111 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
) {
3112 zlog_debug("init dplane ctx %s: %s Unique %u Family %s Type %s",
3113 dplane_op2str(op
), ipset
->ipset_name
, ipset
->unique
,
3114 family2str(ipset
->family
),
3115 zebra_pbr_ipset_type2str(ipset
->type
));
3119 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
3121 dplane_ctx_ns_init(ctx
, zebra_ns_lookup(NS_DEFAULT
), false);
3123 ctx
->zd_vrf_id
= ipset
->vrf_id
;
3125 memcpy(&ctx
->u
.ipset
, ipset
, sizeof(struct zebra_pbr_ipset
));
3130 * dplane_ctx_ipset_entry_init() - Initialize a context block for a PBR ipset
3133 * @ctx: Dataplane context to init
3134 * @op: Operation being performed
3135 * @new_rule: PBR ipset
3137 * Return: Result status
3140 dplane_ctx_ipset_entry_init(struct zebra_dplane_ctx
*ctx
, enum dplane_op_e op
,
3141 struct zebra_pbr_ipset_entry
*ipset_entry
)
3143 struct zebra_pbr_ipset
*ipset
;
3145 ipset
= ipset_entry
->backpointer
;
3146 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
) {
3147 zlog_debug("init dplane ctx %s: %s Unique %u filter %u",
3148 dplane_op2str(op
), ipset
->ipset_name
,
3149 ipset_entry
->unique
, ipset_entry
->filter_bm
);
3153 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
3155 dplane_ctx_ns_init(ctx
, zebra_ns_lookup(NS_DEFAULT
), false);
3157 ctx
->zd_vrf_id
= ipset
->vrf_id
;
3159 memcpy(&ctx
->u
.ipset_entry
.entry
, ipset_entry
,
3160 sizeof(struct zebra_pbr_ipset_entry
));
3161 ctx
->u
.ipset_entry
.entry
.backpointer
= NULL
;
3162 ctx
->u
.ipset_entry
.info
.type
= ipset
->type
;
3163 ctx
->u
.ipset_entry
.info
.family
= ipset
->family
;
3164 memcpy(&ctx
->u
.ipset_entry
.info
.ipset_name
, &ipset
->ipset_name
,
3165 ZEBRA_IPSET_NAME_SIZE
);
3172 * Enqueue a new update,
3173 * and ensure an event is active for the dataplane pthread.
3175 static int dplane_update_enqueue(struct zebra_dplane_ctx
*ctx
)
3178 uint32_t high
, curr
;
3180 /* Enqueue for processing by the dataplane pthread */
3183 TAILQ_INSERT_TAIL(&zdplane_info
.dg_update_ctx_q
, ctx
,
3188 curr
= atomic_fetch_add_explicit(
3189 &(zdplane_info
.dg_routes_queued
),
3190 1, memory_order_seq_cst
);
3192 curr
++; /* We got the pre-incremented value */
3194 /* Maybe update high-water counter also */
3195 high
= atomic_load_explicit(&zdplane_info
.dg_routes_queued_max
,
3196 memory_order_seq_cst
);
3197 while (high
< curr
) {
3198 if (atomic_compare_exchange_weak_explicit(
3199 &zdplane_info
.dg_routes_queued_max
,
3201 memory_order_seq_cst
,
3202 memory_order_seq_cst
))
3206 /* Ensure that an event for the dataplane thread is active */
3207 ret
= dplane_provider_work_ready();
3213 * Utility that prepares a route update and enqueues it for processing
3215 static enum zebra_dplane_result
3216 dplane_route_update_internal(struct route_node
*rn
,
3217 struct route_entry
*re
,
3218 struct route_entry
*old_re
,
3219 enum dplane_op_e op
)
3221 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3223 struct zebra_dplane_ctx
*ctx
= NULL
;
3225 /* Obtain context block */
3226 ctx
= dplane_ctx_alloc();
3228 /* Init context with info from zebra data structs */
3229 ret
= dplane_ctx_route_init(ctx
, op
, rn
, re
);
3231 /* Capture some extra info for update case
3232 * where there's a different 'old' route.
3234 if ((op
== DPLANE_OP_ROUTE_UPDATE
) &&
3235 old_re
&& (old_re
!= re
)) {
3237 old_re
->dplane_sequence
=
3238 zebra_router_get_next_sequence();
3239 ctx
->zd_old_seq
= old_re
->dplane_sequence
;
3241 ctx
->u
.rinfo
.zd_old_tag
= old_re
->tag
;
3242 ctx
->u
.rinfo
.zd_old_type
= old_re
->type
;
3243 ctx
->u
.rinfo
.zd_old_instance
= old_re
->instance
;
3244 ctx
->u
.rinfo
.zd_old_distance
= old_re
->distance
;
3245 ctx
->u
.rinfo
.zd_old_metric
= old_re
->metric
;
3246 ctx
->u
.rinfo
.nhe
.old_id
= old_re
->nhe
->id
;
3248 #ifndef HAVE_NETLINK
3249 /* For bsd, capture previous re's nexthops too, sigh.
3250 * We'll need these to do per-nexthop deletes.
3252 copy_nexthops(&(ctx
->u
.rinfo
.zd_old_ng
.nexthop
),
3253 old_re
->nhe
->nhg
.nexthop
, NULL
);
3255 if (zebra_nhg_get_backup_nhg(old_re
->nhe
) != NULL
) {
3256 struct nexthop_group
*nhg
;
3257 struct nexthop
**nh
;
3259 nhg
= zebra_nhg_get_backup_nhg(old_re
->nhe
);
3260 nh
= &(ctx
->u
.rinfo
.old_backup_ng
.nexthop
);
3263 copy_nexthops(nh
, nhg
->nexthop
, NULL
);
3265 #endif /* !HAVE_NETLINK */
3269 * If the old and new context type, and nexthop group id
3270 * are the same there is no need to send down a route replace
3271 * as that we know we have sent a nexthop group replace
3272 * or an upper level protocol has sent us the exact
3275 if ((dplane_ctx_get_type(ctx
) == dplane_ctx_get_old_type(ctx
))
3276 && (dplane_ctx_get_nhe_id(ctx
)
3277 == dplane_ctx_get_old_nhe_id(ctx
))
3278 && (dplane_ctx_get_nhe_id(ctx
) >= ZEBRA_NHG_PROTO_LOWER
)) {
3279 struct nexthop
*nexthop
;
3281 if (IS_ZEBRA_DEBUG_DPLANE
)
3283 "%s: Ignoring Route exactly the same",
3286 for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx
),
3288 if (CHECK_FLAG(nexthop
->flags
,
3289 NEXTHOP_FLAG_RECURSIVE
))
3292 if (CHECK_FLAG(nexthop
->flags
,
3293 NEXTHOP_FLAG_ACTIVE
))
3294 SET_FLAG(nexthop
->flags
,
3298 dplane_ctx_free(&ctx
);
3299 return ZEBRA_DPLANE_REQUEST_SUCCESS
;
3302 /* Enqueue context for processing */
3303 ret
= dplane_update_enqueue(ctx
);
3306 /* Update counter */
3307 atomic_fetch_add_explicit(&zdplane_info
.dg_routes_in
, 1,
3308 memory_order_relaxed
);
3311 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
3313 atomic_fetch_add_explicit(&zdplane_info
.dg_route_errors
, 1,
3314 memory_order_relaxed
);
3316 dplane_ctx_free(&ctx
);
3323 * dplane_nexthop_update_internal() - Helper for enqueuing nexthop changes
3325 * @nhe: Nexthop group hash entry where the change occured
3326 * @op: The operation to be enqued
3328 * Return: Result of the change
3330 static enum zebra_dplane_result
3331 dplane_nexthop_update_internal(struct nhg_hash_entry
*nhe
, enum dplane_op_e op
)
3333 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3335 struct zebra_dplane_ctx
*ctx
= NULL
;
3337 /* Obtain context block */
3338 ctx
= dplane_ctx_alloc();
3344 ret
= dplane_ctx_nexthop_init(ctx
, op
, nhe
);
3346 ret
= dplane_update_enqueue(ctx
);
3349 /* Update counter */
3350 atomic_fetch_add_explicit(&zdplane_info
.dg_nexthops_in
, 1,
3351 memory_order_relaxed
);
3354 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
3356 atomic_fetch_add_explicit(&zdplane_info
.dg_nexthop_errors
, 1,
3357 memory_order_relaxed
);
3359 dplane_ctx_free(&ctx
);
3366 * Enqueue a route 'add' for the dataplane.
3368 enum zebra_dplane_result
dplane_route_add(struct route_node
*rn
,
3369 struct route_entry
*re
)
3371 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3373 if (rn
== NULL
|| re
== NULL
)
3376 ret
= dplane_route_update_internal(rn
, re
, NULL
,
3377 DPLANE_OP_ROUTE_INSTALL
);
3384 * Enqueue a route update for the dataplane.
3386 enum zebra_dplane_result
dplane_route_update(struct route_node
*rn
,
3387 struct route_entry
*re
,
3388 struct route_entry
*old_re
)
3390 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3392 if (rn
== NULL
|| re
== NULL
)
3395 ret
= dplane_route_update_internal(rn
, re
, old_re
,
3396 DPLANE_OP_ROUTE_UPDATE
);
3402 * Enqueue a route removal for the dataplane.
3404 enum zebra_dplane_result
dplane_route_delete(struct route_node
*rn
,
3405 struct route_entry
*re
)
3407 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3409 if (rn
== NULL
|| re
== NULL
)
3412 ret
= dplane_route_update_internal(rn
, re
, NULL
,
3413 DPLANE_OP_ROUTE_DELETE
);
3420 * Notify the dplane when system/connected routes change.
3422 enum zebra_dplane_result
dplane_sys_route_add(struct route_node
*rn
,
3423 struct route_entry
*re
)
3425 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3427 /* Ignore this event unless a provider plugin has requested it. */
3428 if (!zdplane_info
.dg_sys_route_notifs
) {
3429 ret
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
3433 if (rn
== NULL
|| re
== NULL
)
3436 ret
= dplane_route_update_internal(rn
, re
, NULL
,
3437 DPLANE_OP_SYS_ROUTE_ADD
);
3444 * Notify the dplane when system/connected routes are deleted.
3446 enum zebra_dplane_result
dplane_sys_route_del(struct route_node
*rn
,
3447 struct route_entry
*re
)
3449 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3451 /* Ignore this event unless a provider plugin has requested it. */
3452 if (!zdplane_info
.dg_sys_route_notifs
) {
3453 ret
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
3457 if (rn
== NULL
|| re
== NULL
)
3460 ret
= dplane_route_update_internal(rn
, re
, NULL
,
3461 DPLANE_OP_SYS_ROUTE_DELETE
);
3468 * Update from an async notification, to bring other fibs up-to-date.
3470 enum zebra_dplane_result
3471 dplane_route_notif_update(struct route_node
*rn
,
3472 struct route_entry
*re
,
3473 enum dplane_op_e op
,
3474 struct zebra_dplane_ctx
*ctx
)
3476 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3478 struct zebra_dplane_ctx
*new_ctx
= NULL
;
3479 struct nexthop
*nexthop
;
3480 struct nexthop_group
*nhg
;
3482 if (rn
== NULL
|| re
== NULL
)
3485 new_ctx
= dplane_ctx_alloc();
3486 if (new_ctx
== NULL
)
3489 /* Init context with info from zebra data structs */
3490 dplane_ctx_route_init(new_ctx
, op
, rn
, re
);
3492 /* For add/update, need to adjust the nexthops so that we match
3493 * the notification state, which may not be the route-entry/RIB
3496 if (op
== DPLANE_OP_ROUTE_UPDATE
||
3497 op
== DPLANE_OP_ROUTE_INSTALL
) {
3499 nexthops_free(new_ctx
->u
.rinfo
.zd_ng
.nexthop
);
3500 new_ctx
->u
.rinfo
.zd_ng
.nexthop
= NULL
;
3502 nhg
= rib_get_fib_nhg(re
);
3503 if (nhg
&& nhg
->nexthop
)
3504 copy_nexthops(&(new_ctx
->u
.rinfo
.zd_ng
.nexthop
),
3505 nhg
->nexthop
, NULL
);
3507 /* Check for installed backup nexthops also */
3508 nhg
= rib_get_fib_backup_nhg(re
);
3509 if (nhg
&& nhg
->nexthop
) {
3510 copy_nexthops(&(new_ctx
->u
.rinfo
.zd_ng
.nexthop
),
3511 nhg
->nexthop
, NULL
);
3514 for (ALL_NEXTHOPS(new_ctx
->u
.rinfo
.zd_ng
, nexthop
))
3515 UNSET_FLAG(nexthop
->flags
, NEXTHOP_FLAG_FIB
);
3519 /* Capture info about the source of the notification, in 'ctx' */
3520 dplane_ctx_set_notif_provider(new_ctx
,
3521 dplane_ctx_get_notif_provider(ctx
));
3523 ret
= dplane_update_enqueue(new_ctx
);
3527 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
3529 dplane_ctx_free(&new_ctx
);
3535 * Enqueue a nexthop add for the dataplane.
3537 enum zebra_dplane_result
dplane_nexthop_add(struct nhg_hash_entry
*nhe
)
3539 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3542 ret
= dplane_nexthop_update_internal(nhe
, DPLANE_OP_NH_INSTALL
);
3547 * Enqueue a nexthop update for the dataplane.
3549 * Might not need this func since zebra's nexthop objects should be immutable?
3551 enum zebra_dplane_result
dplane_nexthop_update(struct nhg_hash_entry
*nhe
)
3553 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3556 ret
= dplane_nexthop_update_internal(nhe
, DPLANE_OP_NH_UPDATE
);
3561 * Enqueue a nexthop removal for the dataplane.
3563 enum zebra_dplane_result
dplane_nexthop_delete(struct nhg_hash_entry
*nhe
)
3565 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3568 ret
= dplane_nexthop_update_internal(nhe
, DPLANE_OP_NH_DELETE
);
3574 * Enqueue LSP add for the dataplane.
3576 enum zebra_dplane_result
dplane_lsp_add(struct zebra_lsp
*lsp
)
3578 enum zebra_dplane_result ret
=
3579 lsp_update_internal(lsp
, DPLANE_OP_LSP_INSTALL
);
3585 * Enqueue LSP update for the dataplane.
3587 enum zebra_dplane_result
dplane_lsp_update(struct zebra_lsp
*lsp
)
3589 enum zebra_dplane_result ret
=
3590 lsp_update_internal(lsp
, DPLANE_OP_LSP_UPDATE
);
3596 * Enqueue LSP delete for the dataplane.
3598 enum zebra_dplane_result
dplane_lsp_delete(struct zebra_lsp
*lsp
)
3600 enum zebra_dplane_result ret
=
3601 lsp_update_internal(lsp
, DPLANE_OP_LSP_DELETE
);
3606 /* Update or un-install resulting from an async notification */
3607 enum zebra_dplane_result
3608 dplane_lsp_notif_update(struct zebra_lsp
*lsp
, enum dplane_op_e op
,
3609 struct zebra_dplane_ctx
*notif_ctx
)
3611 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3613 struct zebra_dplane_ctx
*ctx
= NULL
;
3614 struct nhlfe_list_head
*head
;
3615 struct zebra_nhlfe
*nhlfe
, *new_nhlfe
;
3617 /* Obtain context block */
3618 ctx
= dplane_ctx_alloc();
3624 /* Copy info from zebra LSP */
3625 ret
= dplane_ctx_lsp_init(ctx
, op
, lsp
);
3629 /* Add any installed backup nhlfes */
3630 head
= &(ctx
->u
.lsp
.backup_nhlfe_list
);
3631 frr_each(nhlfe_list
, head
, nhlfe
) {
3633 if (CHECK_FLAG(nhlfe
->flags
, NHLFE_FLAG_INSTALLED
) &&
3634 CHECK_FLAG(nhlfe
->nexthop
->flags
, NEXTHOP_FLAG_FIB
)) {
3635 new_nhlfe
= zebra_mpls_lsp_add_nh(&(ctx
->u
.lsp
),
3639 /* Need to copy flags too */
3640 new_nhlfe
->flags
= nhlfe
->flags
;
3641 new_nhlfe
->nexthop
->flags
= nhlfe
->nexthop
->flags
;
3645 /* Capture info about the source of the notification */
3646 dplane_ctx_set_notif_provider(
3648 dplane_ctx_get_notif_provider(notif_ctx
));
3650 ret
= dplane_update_enqueue(ctx
);
3653 /* Update counter */
3654 atomic_fetch_add_explicit(&zdplane_info
.dg_lsps_in
, 1,
3655 memory_order_relaxed
);
3658 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
3660 atomic_fetch_add_explicit(&zdplane_info
.dg_lsp_errors
, 1,
3661 memory_order_relaxed
);
3663 dplane_ctx_free(&ctx
);
3669 * Enqueue pseudowire install for the dataplane.
3671 enum zebra_dplane_result
dplane_pw_install(struct zebra_pw
*pw
)
3673 return pw_update_internal(pw
, DPLANE_OP_PW_INSTALL
);
3677 * Enqueue pseudowire un-install for the dataplane.
3679 enum zebra_dplane_result
dplane_pw_uninstall(struct zebra_pw
*pw
)
3681 return pw_update_internal(pw
, DPLANE_OP_PW_UNINSTALL
);
3685 * Common internal LSP update utility
3687 static enum zebra_dplane_result
lsp_update_internal(struct zebra_lsp
*lsp
,
3688 enum dplane_op_e op
)
3690 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3692 struct zebra_dplane_ctx
*ctx
= NULL
;
3694 /* Obtain context block */
3695 ctx
= dplane_ctx_alloc();
3697 ret
= dplane_ctx_lsp_init(ctx
, op
, lsp
);
3701 ret
= dplane_update_enqueue(ctx
);
3704 /* Update counter */
3705 atomic_fetch_add_explicit(&zdplane_info
.dg_lsps_in
, 1,
3706 memory_order_relaxed
);
3709 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
3711 atomic_fetch_add_explicit(&zdplane_info
.dg_lsp_errors
, 1,
3712 memory_order_relaxed
);
3713 dplane_ctx_free(&ctx
);
3720 * Internal, common handler for pseudowire updates.
3722 static enum zebra_dplane_result
pw_update_internal(struct zebra_pw
*pw
,
3723 enum dplane_op_e op
)
3725 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3727 struct zebra_dplane_ctx
*ctx
= NULL
;
3729 ctx
= dplane_ctx_alloc();
3731 ret
= dplane_ctx_pw_init(ctx
, op
, pw
);
3735 ret
= dplane_update_enqueue(ctx
);
3738 /* Update counter */
3739 atomic_fetch_add_explicit(&zdplane_info
.dg_pws_in
, 1,
3740 memory_order_relaxed
);
3743 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
3745 atomic_fetch_add_explicit(&zdplane_info
.dg_pw_errors
, 1,
3746 memory_order_relaxed
);
3747 dplane_ctx_free(&ctx
);
3754 * Enqueue access br_port update.
3756 enum zebra_dplane_result
3757 dplane_br_port_update(const struct interface
*ifp
, bool non_df
,
3758 uint32_t sph_filter_cnt
,
3759 const struct in_addr
*sph_filters
, uint32_t backup_nhg_id
)
3761 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3764 struct zebra_dplane_ctx
*ctx
= NULL
;
3765 struct zebra_ns
*zns
;
3766 enum dplane_op_e op
= DPLANE_OP_BR_PORT_UPDATE
;
3769 flags
|= DPLANE_BR_PORT_NON_DF
;
3771 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
|| IS_ZEBRA_DEBUG_EVPN_MH_ES
) {
3773 char vtep_str
[ES_VTEP_LIST_STR_SZ
];
3776 for (i
= 0; i
< sph_filter_cnt
; ++i
) {
3777 snprintfrr(vtep_str
+ strlen(vtep_str
),
3778 sizeof(vtep_str
) - strlen(vtep_str
), "%pI4 ",
3782 "init br_port ctx %s: ifp %s, flags 0x%x backup_nhg 0x%x sph %s",
3783 dplane_op2str(op
), ifp
->name
, flags
, backup_nhg_id
,
3787 ctx
= dplane_ctx_alloc();
3790 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
3791 ctx
->zd_vrf_id
= ifp
->vrf
->vrf_id
;
3793 zns
= zebra_ns_lookup(ifp
->vrf
->vrf_id
);
3794 dplane_ctx_ns_init(ctx
, zns
, false);
3796 ctx
->zd_ifindex
= ifp
->ifindex
;
3797 strlcpy(ctx
->zd_ifname
, ifp
->name
, sizeof(ctx
->zd_ifname
));
3799 /* Init the br-port-specific data area */
3800 memset(&ctx
->u
.br_port
, 0, sizeof(ctx
->u
.br_port
));
3802 ctx
->u
.br_port
.flags
= flags
;
3803 ctx
->u
.br_port
.backup_nhg_id
= backup_nhg_id
;
3804 ctx
->u
.br_port
.sph_filter_cnt
= sph_filter_cnt
;
3805 memcpy(ctx
->u
.br_port
.sph_filters
, sph_filters
,
3806 sizeof(ctx
->u
.br_port
.sph_filters
[0]) * sph_filter_cnt
);
3808 /* Enqueue for processing on the dplane pthread */
3809 ret
= dplane_update_enqueue(ctx
);
3811 /* Increment counter */
3812 atomic_fetch_add_explicit(&zdplane_info
.dg_br_port_in
, 1,
3813 memory_order_relaxed
);
3816 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
3819 atomic_fetch_add_explicit(&zdplane_info
.dg_br_port_errors
, 1,
3820 memory_order_relaxed
);
3821 dplane_ctx_free(&ctx
);
3828 * Enqueue interface address add for the dataplane.
3830 enum zebra_dplane_result
dplane_intf_addr_set(const struct interface
*ifp
,
3831 const struct connected
*ifc
)
3833 #if !defined(HAVE_NETLINK) && defined(HAVE_STRUCT_IFALIASREQ)
3834 /* Extra checks for this OS path. */
3836 /* Don't configure PtP addresses on broadcast ifs or reverse */
3837 if (!(ifp
->flags
& IFF_POINTOPOINT
) != !CONNECTED_PEER(ifc
)) {
3838 if (IS_ZEBRA_DEBUG_KERNEL
|| IS_ZEBRA_DEBUG_DPLANE
)
3839 zlog_debug("Failed to set intf addr: mismatch p2p and connected");
3841 return ZEBRA_DPLANE_REQUEST_FAILURE
;
3845 return intf_addr_update_internal(ifp
, ifc
, DPLANE_OP_ADDR_INSTALL
);
3849 * Enqueue interface address remove/uninstall for the dataplane.
3851 enum zebra_dplane_result
dplane_intf_addr_unset(const struct interface
*ifp
,
3852 const struct connected
*ifc
)
3854 return intf_addr_update_internal(ifp
, ifc
, DPLANE_OP_ADDR_UNINSTALL
);
3857 static enum zebra_dplane_result
intf_addr_update_internal(
3858 const struct interface
*ifp
, const struct connected
*ifc
,
3859 enum dplane_op_e op
)
3861 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3863 struct zebra_dplane_ctx
*ctx
= NULL
;
3864 struct zebra_ns
*zns
;
3866 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
3867 zlog_debug("init intf ctx %s: idx %d, addr %u:%pFX",
3868 dplane_op2str(op
), ifp
->ifindex
, ifp
->vrf
->vrf_id
,
3871 ctx
= dplane_ctx_alloc();
3874 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
3875 ctx
->zd_vrf_id
= ifp
->vrf
->vrf_id
;
3877 zns
= zebra_ns_lookup(ifp
->vrf
->vrf_id
);
3878 dplane_ctx_ns_init(ctx
, zns
, false);
3880 /* Init the interface-addr-specific area */
3881 memset(&ctx
->u
.intf
, 0, sizeof(ctx
->u
.intf
));
3883 strlcpy(ctx
->zd_ifname
, ifp
->name
, sizeof(ctx
->zd_ifname
));
3884 ctx
->zd_ifindex
= ifp
->ifindex
;
3885 ctx
->u
.intf
.prefix
= *(ifc
->address
);
3887 if (if_is_broadcast(ifp
))
3888 ctx
->u
.intf
.flags
|= DPLANE_INTF_BROADCAST
;
3890 if (CONNECTED_PEER(ifc
)) {
3891 ctx
->u
.intf
.dest_prefix
= *(ifc
->destination
);
3892 ctx
->u
.intf
.flags
|=
3893 (DPLANE_INTF_CONNECTED
| DPLANE_INTF_HAS_DEST
);
3896 if (CHECK_FLAG(ifc
->flags
, ZEBRA_IFA_SECONDARY
))
3897 ctx
->u
.intf
.flags
|= DPLANE_INTF_SECONDARY
;
3902 ctx
->u
.intf
.flags
|= DPLANE_INTF_HAS_LABEL
;
3904 /* Use embedded buffer if it's adequate; else allocate. */
3905 len
= strlen(ifc
->label
);
3907 if (len
< sizeof(ctx
->u
.intf
.label_buf
)) {
3908 strlcpy(ctx
->u
.intf
.label_buf
, ifc
->label
,
3909 sizeof(ctx
->u
.intf
.label_buf
));
3910 ctx
->u
.intf
.label
= ctx
->u
.intf
.label_buf
;
3912 ctx
->u
.intf
.label
= XSTRDUP(MTYPE_DP_CTX
, ifc
->label
);
3916 ret
= dplane_update_enqueue(ctx
);
3918 /* Increment counter */
3919 atomic_fetch_add_explicit(&zdplane_info
.dg_intf_addrs_in
, 1,
3920 memory_order_relaxed
);
3923 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
3926 atomic_fetch_add_explicit(&zdplane_info
.dg_intf_addr_errors
,
3927 1, memory_order_relaxed
);
3928 dplane_ctx_free(&ctx
);
3935 * dplane_intf_update_internal() - Helper for enqueuing interface changes
3937 * @ifp: Interface where the change occured
3938 * @op: The operation to be enqued
3940 * Return: Result of the change
3942 static enum zebra_dplane_result
3943 dplane_intf_update_internal(const struct interface
*ifp
, enum dplane_op_e op
)
3945 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3947 struct zebra_dplane_ctx
*ctx
= NULL
;
3949 /* Obtain context block */
3950 ctx
= dplane_ctx_alloc();
3956 ret
= dplane_ctx_intf_init(ctx
, op
, ifp
);
3958 ret
= dplane_update_enqueue(ctx
);
3961 /* Update counter */
3962 atomic_fetch_add_explicit(&zdplane_info
.dg_intfs_in
, 1,
3963 memory_order_relaxed
);
3966 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
3968 atomic_fetch_add_explicit(&zdplane_info
.dg_intf_errors
, 1,
3969 memory_order_relaxed
);
3971 dplane_ctx_free(&ctx
);
3978 * Enqueue a interface add for the dataplane.
3980 enum zebra_dplane_result
dplane_intf_add(const struct interface
*ifp
)
3982 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3985 ret
= dplane_intf_update_internal(ifp
, DPLANE_OP_INTF_INSTALL
);
3990 * Enqueue a interface update for the dataplane.
3992 enum zebra_dplane_result
dplane_intf_update(const struct interface
*ifp
)
3994 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
3997 ret
= dplane_intf_update_internal(ifp
, DPLANE_OP_INTF_UPDATE
);
4002 * Enqueue a interface delete for the dataplane.
4004 enum zebra_dplane_result
dplane_intf_delete(const struct interface
*ifp
)
4006 enum zebra_dplane_result ret
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4009 ret
= dplane_intf_update_internal(ifp
, DPLANE_OP_INTF_DELETE
);
4014 * Enqueue vxlan/evpn mac add (or update).
4016 enum zebra_dplane_result
dplane_rem_mac_add(const struct interface
*ifp
,
4017 const struct interface
*bridge_ifp
,
4019 const struct ethaddr
*mac
,
4020 struct in_addr vtep_ip
,
4025 enum zebra_dplane_result result
;
4026 uint32_t update_flags
= 0;
4028 update_flags
|= DPLANE_MAC_REMOTE
;
4030 update_flags
|= DPLANE_MAC_WAS_STATIC
;
4032 /* Use common helper api */
4033 result
= mac_update_common(DPLANE_OP_MAC_INSTALL
, ifp
, bridge_ifp
,
4034 vid
, mac
, vtep_ip
, sticky
, nhg_id
, update_flags
);
4039 * Enqueue vxlan/evpn mac delete.
4041 enum zebra_dplane_result
dplane_rem_mac_del(const struct interface
*ifp
,
4042 const struct interface
*bridge_ifp
,
4044 const struct ethaddr
*mac
,
4045 struct in_addr vtep_ip
)
4047 enum zebra_dplane_result result
;
4048 uint32_t update_flags
= 0;
4050 update_flags
|= DPLANE_MAC_REMOTE
;
4052 /* Use common helper api */
4053 result
= mac_update_common(DPLANE_OP_MAC_DELETE
, ifp
, bridge_ifp
,
4054 vid
, mac
, vtep_ip
, false, 0, update_flags
);
4059 * API to configure link local with either MAC address or IP information
4061 enum zebra_dplane_result
dplane_neigh_ip_update(enum dplane_op_e op
,
4062 const struct interface
*ifp
,
4063 struct ipaddr
*link_ip
,
4065 uint32_t ndm_state
, int protocol
)
4067 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4069 uint32_t update_flags
;
4071 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
4072 zlog_debug("%s: init link ctx %s: ifp %s, link_ip %pIA ip %pIA",
4073 __func__
, dplane_op2str(op
), ifp
->name
, link_ip
, ip
);
4075 if (ndm_state
== ZEBRA_NEIGH_STATE_REACHABLE
)
4076 state
= DPLANE_NUD_REACHABLE
;
4077 else if (ndm_state
== ZEBRA_NEIGH_STATE_FAILED
)
4078 state
= DPLANE_NUD_FAILED
;
4080 update_flags
= DPLANE_NEIGH_NO_EXTENSION
;
4082 result
= neigh_update_internal(op
, ifp
, (const void *)link_ip
,
4083 ipaddr_family(link_ip
), ip
, 0, state
,
4084 update_flags
, protocol
);
4090 * Enqueue local mac add (or update).
4092 enum zebra_dplane_result
dplane_local_mac_add(const struct interface
*ifp
,
4093 const struct interface
*bridge_ifp
,
4095 const struct ethaddr
*mac
,
4097 uint32_t set_static
,
4098 uint32_t set_inactive
)
4100 enum zebra_dplane_result result
;
4101 uint32_t update_flags
= 0;
4102 struct in_addr vtep_ip
;
4105 update_flags
|= DPLANE_MAC_SET_STATIC
;
4108 update_flags
|= DPLANE_MAC_SET_INACTIVE
;
4112 /* Use common helper api */
4113 result
= mac_update_common(DPLANE_OP_MAC_INSTALL
, ifp
, bridge_ifp
,
4114 vid
, mac
, vtep_ip
, sticky
, 0,
4120 * Enqueue local mac del
4122 enum zebra_dplane_result
4123 dplane_local_mac_del(const struct interface
*ifp
,
4124 const struct interface
*bridge_ifp
, vlanid_t vid
,
4125 const struct ethaddr
*mac
)
4127 enum zebra_dplane_result result
;
4128 struct in_addr vtep_ip
;
4132 /* Use common helper api */
4133 result
= mac_update_common(DPLANE_OP_MAC_DELETE
, ifp
, bridge_ifp
, vid
,
4134 mac
, vtep_ip
, false, 0, 0);
4138 * Public api to init an empty context - either newly-allocated or
4139 * reset/cleared - for a MAC update.
4141 void dplane_mac_init(struct zebra_dplane_ctx
*ctx
,
4142 const struct interface
*ifp
,
4143 const struct interface
*br_ifp
,
4145 const struct ethaddr
*mac
,
4146 struct in_addr vtep_ip
,
4149 uint32_t update_flags
)
4151 struct zebra_ns
*zns
;
4153 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
4154 ctx
->zd_vrf_id
= ifp
->vrf
->vrf_id
;
4156 zns
= zebra_ns_lookup(ifp
->vrf
->vrf_id
);
4157 dplane_ctx_ns_init(ctx
, zns
, false);
4159 strlcpy(ctx
->zd_ifname
, ifp
->name
, sizeof(ctx
->zd_ifname
));
4160 ctx
->zd_ifindex
= ifp
->ifindex
;
4162 /* Init the mac-specific data area */
4163 memset(&ctx
->u
.macinfo
, 0, sizeof(ctx
->u
.macinfo
));
4165 ctx
->u
.macinfo
.br_ifindex
= br_ifp
->ifindex
;
4166 ctx
->u
.macinfo
.vtep_ip
= vtep_ip
;
4167 ctx
->u
.macinfo
.mac
= *mac
;
4168 ctx
->u
.macinfo
.vid
= vid
;
4169 ctx
->u
.macinfo
.is_sticky
= sticky
;
4170 ctx
->u
.macinfo
.nhg_id
= nhg_id
;
4171 ctx
->u
.macinfo
.update_flags
= update_flags
;
4175 * Common helper api for MAC address/vxlan updates
4177 static enum zebra_dplane_result
4178 mac_update_common(enum dplane_op_e op
,
4179 const struct interface
*ifp
,
4180 const struct interface
*br_ifp
,
4182 const struct ethaddr
*mac
,
4183 struct in_addr vtep_ip
,
4186 uint32_t update_flags
)
4188 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4190 struct zebra_dplane_ctx
*ctx
= NULL
;
4192 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
4193 zlog_debug("init mac ctx %s: mac %pEA, ifp %s, vtep %pI4",
4194 dplane_op2str(op
), mac
, ifp
->name
, &vtep_ip
);
4196 ctx
= dplane_ctx_alloc();
4199 /* Common init for the ctx */
4200 dplane_mac_init(ctx
, ifp
, br_ifp
, vid
, mac
, vtep_ip
, sticky
,
4201 nhg_id
, update_flags
);
4203 /* Enqueue for processing on the dplane pthread */
4204 ret
= dplane_update_enqueue(ctx
);
4206 /* Increment counter */
4207 atomic_fetch_add_explicit(&zdplane_info
.dg_macs_in
, 1,
4208 memory_order_relaxed
);
4211 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
4214 atomic_fetch_add_explicit(&zdplane_info
.dg_mac_errors
, 1,
4215 memory_order_relaxed
);
4216 dplane_ctx_free(&ctx
);
4223 * Enqueue evpn neighbor add for the dataplane.
4225 enum zebra_dplane_result
dplane_rem_neigh_add(const struct interface
*ifp
,
4226 const struct ipaddr
*ip
,
4227 const struct ethaddr
*mac
,
4228 uint32_t flags
, bool was_static
)
4230 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4231 uint32_t update_flags
= 0;
4233 update_flags
|= DPLANE_NEIGH_REMOTE
;
4236 update_flags
|= DPLANE_NEIGH_WAS_STATIC
;
4238 result
= neigh_update_internal(
4239 DPLANE_OP_NEIGH_INSTALL
, ifp
, (const void *)mac
, AF_ETHERNET
,
4240 ip
, flags
, DPLANE_NUD_NOARP
, update_flags
, 0);
4246 * Enqueue local neighbor add for the dataplane.
4248 enum zebra_dplane_result
dplane_local_neigh_add(const struct interface
*ifp
,
4249 const struct ipaddr
*ip
,
4250 const struct ethaddr
*mac
,
4251 bool set_router
, bool set_static
,
4254 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4255 uint32_t update_flags
= 0;
4260 update_flags
|= DPLANE_NEIGH_SET_STATIC
;
4263 update_flags
|= DPLANE_NEIGH_SET_INACTIVE
;
4264 state
= DPLANE_NUD_STALE
;
4266 state
= DPLANE_NUD_REACHABLE
;
4270 ntf
|= DPLANE_NTF_ROUTER
;
4272 result
= neigh_update_internal(DPLANE_OP_NEIGH_INSTALL
, ifp
,
4273 (const void *)mac
, AF_ETHERNET
, ip
, ntf
,
4274 state
, update_flags
, 0);
4280 * Enqueue evpn neighbor delete for the dataplane.
4282 enum zebra_dplane_result
dplane_rem_neigh_delete(const struct interface
*ifp
,
4283 const struct ipaddr
*ip
)
4285 enum zebra_dplane_result result
;
4286 uint32_t update_flags
= 0;
4288 update_flags
|= DPLANE_NEIGH_REMOTE
;
4290 result
= neigh_update_internal(DPLANE_OP_NEIGH_DELETE
, ifp
, NULL
,
4291 AF_ETHERNET
, ip
, 0, 0, update_flags
, 0);
4297 * Enqueue evpn VTEP add for the dataplane.
4299 enum zebra_dplane_result
dplane_vtep_add(const struct interface
*ifp
,
4300 const struct in_addr
*ip
,
4303 enum zebra_dplane_result result
;
4304 struct ethaddr mac
= { {0, 0, 0, 0, 0, 0} };
4307 if (IS_ZEBRA_DEBUG_VXLAN
)
4308 zlog_debug("Install %pI4 into flood list for VNI %u intf %s(%u)",
4309 ip
, vni
, ifp
->name
, ifp
->ifindex
);
4311 SET_IPADDR_V4(&addr
);
4312 addr
.ipaddr_v4
= *ip
;
4314 result
= neigh_update_internal(DPLANE_OP_VTEP_ADD
, ifp
, &mac
,
4315 AF_ETHERNET
, &addr
, 0, 0, 0, 0);
4321 * Enqueue evpn VTEP add for the dataplane.
4323 enum zebra_dplane_result
dplane_vtep_delete(const struct interface
*ifp
,
4324 const struct in_addr
*ip
,
4327 enum zebra_dplane_result result
;
4328 struct ethaddr mac
= { {0, 0, 0, 0, 0, 0} };
4331 if (IS_ZEBRA_DEBUG_VXLAN
)
4333 "Uninstall %pI4 from flood list for VNI %u intf %s(%u)",
4334 ip
, vni
, ifp
->name
, ifp
->ifindex
);
4336 SET_IPADDR_V4(&addr
);
4337 addr
.ipaddr_v4
= *ip
;
4339 result
= neigh_update_internal(DPLANE_OP_VTEP_DELETE
, ifp
,
4340 (const void *)&mac
, AF_ETHERNET
, &addr
,
4346 enum zebra_dplane_result
dplane_neigh_discover(const struct interface
*ifp
,
4347 const struct ipaddr
*ip
)
4349 enum zebra_dplane_result result
;
4351 result
= neigh_update_internal(DPLANE_OP_NEIGH_DISCOVER
, ifp
, NULL
,
4352 AF_ETHERNET
, ip
, DPLANE_NTF_USE
,
4353 DPLANE_NUD_INCOMPLETE
, 0, 0);
4358 enum zebra_dplane_result
dplane_neigh_table_update(const struct interface
*ifp
,
4359 const uint8_t family
,
4360 const uint32_t app_probes
,
4361 const uint32_t ucast_probes
,
4362 const uint32_t mcast_probes
)
4364 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4366 struct zebra_dplane_ctx
*ctx
= NULL
;
4367 struct zebra_ns
*zns
;
4368 enum dplane_op_e op
= DPLANE_OP_NEIGH_TABLE_UPDATE
;
4370 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
) {
4371 zlog_debug("set neigh ctx %s: ifp %s, family %s",
4372 dplane_op2str(op
), ifp
->name
, family2str(family
));
4375 ctx
= dplane_ctx_alloc();
4378 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
4379 ctx
->zd_vrf_id
= ifp
->vrf
->vrf_id
;
4381 zns
= zebra_ns_lookup(ifp
->vrf
->vrf_id
);
4382 dplane_ctx_ns_init(ctx
, zns
, false);
4384 strlcpy(ctx
->zd_ifname
, ifp
->name
, sizeof(ctx
->zd_ifname
));
4385 ctx
->zd_ifindex
= ifp
->ifindex
;
4387 /* Init the neighbor-specific data area */
4388 memset(&ctx
->u
.neightable
, 0, sizeof(ctx
->u
.neightable
));
4390 ctx
->u
.neightable
.family
= family
;
4391 ctx
->u
.neightable
.app_probes
= app_probes
;
4392 ctx
->u
.neightable
.ucast_probes
= ucast_probes
;
4393 ctx
->u
.neightable
.mcast_probes
= mcast_probes
;
4395 /* Enqueue for processing on the dplane pthread */
4396 ret
= dplane_update_enqueue(ctx
);
4398 /* Increment counter */
4399 atomic_fetch_add_explicit(&zdplane_info
.dg_neightable_in
, 1,
4400 memory_order_relaxed
);
4403 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
4406 atomic_fetch_add_explicit(&zdplane_info
.dg_neightable_errors
, 1,
4407 memory_order_relaxed
);
4408 dplane_ctx_free(&ctx
);
4415 * Common helper api for neighbor updates
4417 static enum zebra_dplane_result
4418 neigh_update_internal(enum dplane_op_e op
, const struct interface
*ifp
,
4419 const void *link
, const int link_family
,
4420 const struct ipaddr
*ip
, uint32_t flags
, uint16_t state
,
4421 uint32_t update_flags
, int protocol
)
4423 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4425 struct zebra_dplane_ctx
*ctx
= NULL
;
4426 struct zebra_ns
*zns
;
4427 const struct ethaddr
*mac
= NULL
;
4428 const struct ipaddr
*link_ip
= NULL
;
4430 if (link_family
== AF_ETHERNET
)
4431 mac
= (const struct ethaddr
*)link
;
4433 link_ip
= (const struct ipaddr
*)link
;
4435 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
) {
4436 char buf1
[PREFIX_STRLEN
];
4439 if (link_family
== AF_ETHERNET
)
4440 prefix_mac2str(mac
, buf1
, sizeof(buf1
));
4442 ipaddr2str(link_ip
, buf1
, sizeof(buf1
));
4443 zlog_debug("init neigh ctx %s: ifp %s, %s %s, ip %pIA",
4444 dplane_op2str(op
), ifp
->name
,
4445 link_family
== AF_ETHERNET
? "mac " : "link ",
4449 ctx
= dplane_ctx_alloc();
4452 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
4453 ctx
->zd_vrf_id
= ifp
->vrf
->vrf_id
;
4454 dplane_ctx_set_type(ctx
, protocol
);
4456 zns
= zebra_ns_lookup(ifp
->vrf
->vrf_id
);
4457 dplane_ctx_ns_init(ctx
, zns
, false);
4459 strlcpy(ctx
->zd_ifname
, ifp
->name
, sizeof(ctx
->zd_ifname
));
4460 ctx
->zd_ifindex
= ifp
->ifindex
;
4462 /* Init the neighbor-specific data area */
4463 memset(&ctx
->u
.neigh
, 0, sizeof(ctx
->u
.neigh
));
4465 ctx
->u
.neigh
.ip_addr
= *ip
;
4467 ctx
->u
.neigh
.link
.mac
= *mac
;
4469 ctx
->u
.neigh
.link
.ip_addr
= *link_ip
;
4471 ctx
->u
.neigh
.flags
= flags
;
4472 ctx
->u
.neigh
.state
= state
;
4473 ctx
->u
.neigh
.update_flags
= update_flags
;
4475 /* Enqueue for processing on the dplane pthread */
4476 ret
= dplane_update_enqueue(ctx
);
4478 /* Increment counter */
4479 atomic_fetch_add_explicit(&zdplane_info
.dg_neighs_in
, 1,
4480 memory_order_relaxed
);
4483 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
4486 atomic_fetch_add_explicit(&zdplane_info
.dg_neigh_errors
, 1,
4487 memory_order_relaxed
);
4488 dplane_ctx_free(&ctx
);
4495 * Common helper api for PBR rule updates
4497 static enum zebra_dplane_result
4498 rule_update_internal(enum dplane_op_e op
, struct zebra_pbr_rule
*new_rule
,
4499 struct zebra_pbr_rule
*old_rule
)
4501 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4502 struct zebra_dplane_ctx
*ctx
;
4505 ctx
= dplane_ctx_alloc();
4507 ret
= dplane_ctx_rule_init(ctx
, op
, new_rule
, old_rule
);
4511 ret
= dplane_update_enqueue(ctx
);
4514 atomic_fetch_add_explicit(&zdplane_info
.dg_rules_in
, 1,
4515 memory_order_relaxed
);
4518 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
4520 atomic_fetch_add_explicit(&zdplane_info
.dg_rule_errors
, 1,
4521 memory_order_relaxed
);
4522 dplane_ctx_free(&ctx
);
4528 enum zebra_dplane_result
dplane_pbr_rule_add(struct zebra_pbr_rule
*rule
)
4530 return rule_update_internal(DPLANE_OP_RULE_ADD
, rule
, NULL
);
4533 enum zebra_dplane_result
dplane_pbr_rule_delete(struct zebra_pbr_rule
*rule
)
4535 return rule_update_internal(DPLANE_OP_RULE_DELETE
, rule
, NULL
);
4538 enum zebra_dplane_result
dplane_pbr_rule_update(struct zebra_pbr_rule
*old_rule
,
4539 struct zebra_pbr_rule
*new_rule
)
4541 return rule_update_internal(DPLANE_OP_RULE_UPDATE
, new_rule
, old_rule
);
4544 * Common helper api for iptable updates
4546 static enum zebra_dplane_result
4547 iptable_update_internal(enum dplane_op_e op
, struct zebra_pbr_iptable
*iptable
)
4549 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4550 struct zebra_dplane_ctx
*ctx
;
4553 if ((op
== DPLANE_OP_IPTABLE_ADD
&&
4554 CHECK_FLAG(iptable
->internal_flags
, IPTABLE_INSTALL_QUEUED
)) ||
4555 (op
== DPLANE_OP_IPTABLE_DELETE
&&
4556 CHECK_FLAG(iptable
->internal_flags
, IPTABLE_UNINSTALL_QUEUED
))) {
4557 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
4559 "update dplane ctx %s: iptable %s already in progress",
4560 dplane_op2str(op
), iptable
->ipset_name
);
4564 ctx
= dplane_ctx_alloc();
4566 ret
= dplane_ctx_iptable_init(ctx
, op
, iptable
);
4570 ret
= dplane_update_enqueue(ctx
);
4573 atomic_fetch_add_explicit(&zdplane_info
.dg_iptable_in
, 1,
4574 memory_order_relaxed
);
4577 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
4578 if (op
== DPLANE_OP_IPTABLE_ADD
)
4579 SET_FLAG(iptable
->internal_flags
,
4580 IPTABLE_INSTALL_QUEUED
);
4582 SET_FLAG(iptable
->internal_flags
,
4583 IPTABLE_UNINSTALL_QUEUED
);
4585 atomic_fetch_add_explicit(&zdplane_info
.dg_iptable_errors
, 1,
4586 memory_order_relaxed
);
4587 dplane_ctx_free(&ctx
);
4592 enum zebra_dplane_result
4593 dplane_pbr_iptable_add(struct zebra_pbr_iptable
*iptable
)
4595 return iptable_update_internal(DPLANE_OP_IPTABLE_ADD
, iptable
);
4598 enum zebra_dplane_result
4599 dplane_pbr_iptable_delete(struct zebra_pbr_iptable
*iptable
)
4601 return iptable_update_internal(DPLANE_OP_IPTABLE_DELETE
, iptable
);
4605 * Common helper api for ipset updates
4607 static enum zebra_dplane_result
4608 ipset_update_internal(enum dplane_op_e op
, struct zebra_pbr_ipset
*ipset
)
4610 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4611 struct zebra_dplane_ctx
*ctx
;
4614 ctx
= dplane_ctx_alloc();
4616 ret
= dplane_ctx_ipset_init(ctx
, op
, ipset
);
4620 ret
= dplane_update_enqueue(ctx
);
4623 atomic_fetch_add_explicit(&zdplane_info
.dg_ipset_in
, 1,
4624 memory_order_relaxed
);
4627 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
4629 atomic_fetch_add_explicit(&zdplane_info
.dg_ipset_errors
, 1,
4630 memory_order_relaxed
);
4631 dplane_ctx_free(&ctx
);
4637 enum zebra_dplane_result
dplane_pbr_ipset_add(struct zebra_pbr_ipset
*ipset
)
4639 return ipset_update_internal(DPLANE_OP_IPSET_ADD
, ipset
);
4642 enum zebra_dplane_result
dplane_pbr_ipset_delete(struct zebra_pbr_ipset
*ipset
)
4644 return ipset_update_internal(DPLANE_OP_IPSET_DELETE
, ipset
);
4648 * Common helper api for ipset updates
4650 static enum zebra_dplane_result
4651 ipset_entry_update_internal(enum dplane_op_e op
,
4652 struct zebra_pbr_ipset_entry
*ipset_entry
)
4654 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4655 struct zebra_dplane_ctx
*ctx
;
4658 ctx
= dplane_ctx_alloc();
4660 ret
= dplane_ctx_ipset_entry_init(ctx
, op
, ipset_entry
);
4664 ret
= dplane_update_enqueue(ctx
);
4667 atomic_fetch_add_explicit(&zdplane_info
.dg_ipset_entry_in
, 1,
4668 memory_order_relaxed
);
4671 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
4673 atomic_fetch_add_explicit(&zdplane_info
.dg_ipset_entry_errors
,
4674 1, memory_order_relaxed
);
4675 dplane_ctx_free(&ctx
);
4681 enum zebra_dplane_result
4682 dplane_pbr_ipset_entry_add(struct zebra_pbr_ipset_entry
*ipset
)
4684 return ipset_entry_update_internal(DPLANE_OP_IPSET_ENTRY_ADD
, ipset
);
4687 enum zebra_dplane_result
4688 dplane_pbr_ipset_entry_delete(struct zebra_pbr_ipset_entry
*ipset
)
4690 return ipset_entry_update_internal(DPLANE_OP_IPSET_ENTRY_DELETE
, ipset
);
4694 * Common helper api for GRE set
4696 enum zebra_dplane_result
4697 dplane_gre_set(struct interface
*ifp
, struct interface
*ifp_link
,
4698 unsigned int mtu
, const struct zebra_l2info_gre
*gre_info
)
4700 enum zebra_dplane_result result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4701 struct zebra_dplane_ctx
*ctx
;
4702 enum dplane_op_e op
= DPLANE_OP_GRE_SET
;
4704 struct zebra_ns
*zns
;
4706 ctx
= dplane_ctx_alloc();
4711 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
) {
4712 zlog_debug("init dplane ctx %s: if %s link %s%s",
4713 dplane_op2str(op
), ifp
->name
,
4714 ifp_link
? "set" : "unset", ifp_link
?
4715 ifp_link
->name
: "");
4719 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
4720 zns
= zebra_ns_lookup(ifp
->vrf
->vrf_id
);
4723 dplane_ctx_ns_init(ctx
, zns
, false);
4725 dplane_ctx_set_ifname(ctx
, ifp
->name
);
4726 ctx
->zd_vrf_id
= ifp
->vrf
->vrf_id
;
4727 ctx
->zd_ifindex
= ifp
->ifindex
;
4729 ctx
->u
.gre
.link_ifindex
= ifp_link
->ifindex
;
4731 ctx
->u
.gre
.link_ifindex
= 0;
4733 memcpy(&ctx
->u
.gre
.info
, gre_info
, sizeof(ctx
->u
.gre
.info
));
4734 ctx
->u
.gre
.mtu
= mtu
;
4736 ctx
->zd_status
= ZEBRA_DPLANE_REQUEST_SUCCESS
;
4738 /* Enqueue context for processing */
4739 ret
= dplane_update_enqueue(ctx
);
4741 /* Update counter */
4742 atomic_fetch_add_explicit(&zdplane_info
.dg_gre_set_in
, 1,
4743 memory_order_relaxed
);
4746 result
= ZEBRA_DPLANE_REQUEST_QUEUED
;
4748 atomic_fetch_add_explicit(
4749 &zdplane_info
.dg_gre_set_errors
, 1,
4750 memory_order_relaxed
);
4752 dplane_ctx_free(&ctx
);
4753 result
= ZEBRA_DPLANE_REQUEST_FAILURE
;
4759 * Handler for 'show dplane'
4761 int dplane_show_helper(struct vty
*vty
, bool detailed
)
4763 uint64_t queued
, queue_max
, limit
, errs
, incoming
, yields
,
4766 /* Using atomics because counters are being changed in different
4769 incoming
= atomic_load_explicit(&zdplane_info
.dg_routes_in
,
4770 memory_order_relaxed
);
4771 limit
= atomic_load_explicit(&zdplane_info
.dg_max_queued_updates
,
4772 memory_order_relaxed
);
4773 queued
= atomic_load_explicit(&zdplane_info
.dg_routes_queued
,
4774 memory_order_relaxed
);
4775 queue_max
= atomic_load_explicit(&zdplane_info
.dg_routes_queued_max
,
4776 memory_order_relaxed
);
4777 errs
= atomic_load_explicit(&zdplane_info
.dg_route_errors
,
4778 memory_order_relaxed
);
4779 yields
= atomic_load_explicit(&zdplane_info
.dg_update_yields
,
4780 memory_order_relaxed
);
4781 other_errs
= atomic_load_explicit(&zdplane_info
.dg_other_errors
,
4782 memory_order_relaxed
);
4784 vty_out(vty
, "Zebra dataplane:\nRoute updates: %"PRIu64
"\n",
4786 vty_out(vty
, "Route update errors: %"PRIu64
"\n", errs
);
4787 vty_out(vty
, "Other errors : %"PRIu64
"\n", other_errs
);
4788 vty_out(vty
, "Route update queue limit: %"PRIu64
"\n", limit
);
4789 vty_out(vty
, "Route update queue depth: %"PRIu64
"\n", queued
);
4790 vty_out(vty
, "Route update queue max: %"PRIu64
"\n", queue_max
);
4791 vty_out(vty
, "Dplane update yields: %"PRIu64
"\n", yields
);
4793 incoming
= atomic_load_explicit(&zdplane_info
.dg_lsps_in
,
4794 memory_order_relaxed
);
4795 errs
= atomic_load_explicit(&zdplane_info
.dg_lsp_errors
,
4796 memory_order_relaxed
);
4797 vty_out(vty
, "LSP updates: %"PRIu64
"\n", incoming
);
4798 vty_out(vty
, "LSP update errors: %"PRIu64
"\n", errs
);
4800 incoming
= atomic_load_explicit(&zdplane_info
.dg_pws_in
,
4801 memory_order_relaxed
);
4802 errs
= atomic_load_explicit(&zdplane_info
.dg_pw_errors
,
4803 memory_order_relaxed
);
4804 vty_out(vty
, "PW updates: %"PRIu64
"\n", incoming
);
4805 vty_out(vty
, "PW update errors: %"PRIu64
"\n", errs
);
4807 incoming
= atomic_load_explicit(&zdplane_info
.dg_intf_addrs_in
,
4808 memory_order_relaxed
);
4809 errs
= atomic_load_explicit(&zdplane_info
.dg_intf_addr_errors
,
4810 memory_order_relaxed
);
4811 vty_out(vty
, "Intf addr updates: %"PRIu64
"\n", incoming
);
4812 vty_out(vty
, "Intf addr errors: %"PRIu64
"\n", errs
);
4814 incoming
= atomic_load_explicit(&zdplane_info
.dg_macs_in
,
4815 memory_order_relaxed
);
4816 errs
= atomic_load_explicit(&zdplane_info
.dg_mac_errors
,
4817 memory_order_relaxed
);
4818 vty_out(vty
, "EVPN MAC updates: %"PRIu64
"\n", incoming
);
4819 vty_out(vty
, "EVPN MAC errors: %"PRIu64
"\n", errs
);
4821 incoming
= atomic_load_explicit(&zdplane_info
.dg_neighs_in
,
4822 memory_order_relaxed
);
4823 errs
= atomic_load_explicit(&zdplane_info
.dg_neigh_errors
,
4824 memory_order_relaxed
);
4825 vty_out(vty
, "EVPN neigh updates: %"PRIu64
"\n", incoming
);
4826 vty_out(vty
, "EVPN neigh errors: %"PRIu64
"\n", errs
);
4828 incoming
= atomic_load_explicit(&zdplane_info
.dg_rules_in
,
4829 memory_order_relaxed
);
4830 errs
= atomic_load_explicit(&zdplane_info
.dg_rule_errors
,
4831 memory_order_relaxed
);
4832 vty_out(vty
, "Rule updates: %" PRIu64
"\n", incoming
);
4833 vty_out(vty
, "Rule errors: %" PRIu64
"\n", errs
);
4835 incoming
= atomic_load_explicit(&zdplane_info
.dg_br_port_in
,
4836 memory_order_relaxed
);
4837 errs
= atomic_load_explicit(&zdplane_info
.dg_br_port_errors
,
4838 memory_order_relaxed
);
4839 vty_out(vty
, "Bridge port updates: %" PRIu64
"\n", incoming
);
4840 vty_out(vty
, "Bridge port errors: %" PRIu64
"\n", errs
);
4842 incoming
= atomic_load_explicit(&zdplane_info
.dg_iptable_in
,
4843 memory_order_relaxed
);
4844 errs
= atomic_load_explicit(&zdplane_info
.dg_iptable_errors
,
4845 memory_order_relaxed
);
4846 vty_out(vty
, "IPtable updates: %" PRIu64
"\n", incoming
);
4847 vty_out(vty
, "IPtable errors: %" PRIu64
"\n", errs
);
4848 incoming
= atomic_load_explicit(&zdplane_info
.dg_ipset_in
,
4849 memory_order_relaxed
);
4850 errs
= atomic_load_explicit(&zdplane_info
.dg_ipset_errors
,
4851 memory_order_relaxed
);
4852 vty_out(vty
, "IPset updates: %" PRIu64
"\n", incoming
);
4853 vty_out(vty
, "IPset errors: %" PRIu64
"\n", errs
);
4854 incoming
= atomic_load_explicit(&zdplane_info
.dg_ipset_entry_in
,
4855 memory_order_relaxed
);
4856 errs
= atomic_load_explicit(&zdplane_info
.dg_ipset_entry_errors
,
4857 memory_order_relaxed
);
4858 vty_out(vty
, "IPset entry updates: %" PRIu64
"\n", incoming
);
4859 vty_out(vty
, "IPset entry errors: %" PRIu64
"\n", errs
);
4861 incoming
= atomic_load_explicit(&zdplane_info
.dg_neightable_in
,
4862 memory_order_relaxed
);
4863 errs
= atomic_load_explicit(&zdplane_info
.dg_neightable_errors
,
4864 memory_order_relaxed
);
4865 vty_out(vty
, "Neighbor Table updates: %"PRIu64
"\n", incoming
);
4866 vty_out(vty
, "Neighbor Table errors: %"PRIu64
"\n", errs
);
4868 incoming
= atomic_load_explicit(&zdplane_info
.dg_gre_set_in
,
4869 memory_order_relaxed
);
4870 errs
= atomic_load_explicit(&zdplane_info
.dg_gre_set_errors
,
4871 memory_order_relaxed
);
4872 vty_out(vty
, "GRE set updates: %"PRIu64
"\n", incoming
);
4873 vty_out(vty
, "GRE set errors: %"PRIu64
"\n", errs
);
4878 * Handler for 'show dplane providers'
4880 int dplane_show_provs_helper(struct vty
*vty
, bool detailed
)
4882 struct zebra_dplane_provider
*prov
;
4883 uint64_t in
, in_q
, in_max
, out
, out_q
, out_max
;
4885 vty_out(vty
, "Zebra dataplane providers:\n");
4888 prov
= TAILQ_FIRST(&zdplane_info
.dg_providers_q
);
4891 /* Show counters, useful info from each registered provider */
4894 in
= atomic_load_explicit(&prov
->dp_in_counter
,
4895 memory_order_relaxed
);
4896 in_q
= atomic_load_explicit(&prov
->dp_in_queued
,
4897 memory_order_relaxed
);
4898 in_max
= atomic_load_explicit(&prov
->dp_in_max
,
4899 memory_order_relaxed
);
4900 out
= atomic_load_explicit(&prov
->dp_out_counter
,
4901 memory_order_relaxed
);
4902 out_q
= atomic_load_explicit(&prov
->dp_out_queued
,
4903 memory_order_relaxed
);
4904 out_max
= atomic_load_explicit(&prov
->dp_out_max
,
4905 memory_order_relaxed
);
4907 vty_out(vty
, "%s (%u): in: %"PRIu64
", q: %"PRIu64
", q_max: %"PRIu64
", out: %"PRIu64
", q: %"PRIu64
", q_max: %"PRIu64
"\n",
4908 prov
->dp_name
, prov
->dp_id
, in
, in_q
, in_max
,
4909 out
, out_q
, out_max
);
4912 prov
= TAILQ_NEXT(prov
, dp_prov_link
);
4920 * Helper for 'show run' etc.
4922 int dplane_config_write_helper(struct vty
*vty
)
4924 if (zdplane_info
.dg_max_queued_updates
!= DPLANE_DEFAULT_MAX_QUEUED
)
4925 vty_out(vty
, "zebra dplane limit %u\n",
4926 zdplane_info
.dg_max_queued_updates
);
4932 * Provider registration
4934 int dplane_provider_register(const char *name
,
4935 enum dplane_provider_prio prio
,
4937 int (*start_fp
)(struct zebra_dplane_provider
*),
4938 int (*fp
)(struct zebra_dplane_provider
*),
4939 int (*fini_fp
)(struct zebra_dplane_provider
*,
4942 struct zebra_dplane_provider
**prov_p
)
4945 struct zebra_dplane_provider
*p
= NULL
, *last
;
4953 if (prio
<= DPLANE_PRIO_NONE
||
4954 prio
> DPLANE_PRIO_LAST
) {
4959 /* Allocate and init new provider struct */
4960 p
= XCALLOC(MTYPE_DP_PROV
, sizeof(struct zebra_dplane_provider
));
4962 pthread_mutex_init(&(p
->dp_mutex
), NULL
);
4963 TAILQ_INIT(&(p
->dp_ctx_in_q
));
4964 TAILQ_INIT(&(p
->dp_ctx_out_q
));
4966 p
->dp_flags
= flags
;
4967 p
->dp_priority
= prio
;
4969 p
->dp_start
= start_fp
;
4970 p
->dp_fini
= fini_fp
;
4973 /* Lock - the dplane pthread may be running */
4976 p
->dp_id
= ++zdplane_info
.dg_provider_id
;
4979 strlcpy(p
->dp_name
, name
, DPLANE_PROVIDER_NAMELEN
);
4981 snprintf(p
->dp_name
, DPLANE_PROVIDER_NAMELEN
,
4982 "provider-%u", p
->dp_id
);
4984 /* Insert into list ordered by priority */
4985 TAILQ_FOREACH(last
, &zdplane_info
.dg_providers_q
, dp_prov_link
) {
4986 if (last
->dp_priority
> p
->dp_priority
)
4991 TAILQ_INSERT_BEFORE(last
, p
, dp_prov_link
);
4993 TAILQ_INSERT_TAIL(&zdplane_info
.dg_providers_q
, p
,
4999 if (IS_ZEBRA_DEBUG_DPLANE
)
5000 zlog_debug("dplane: registered new provider '%s' (%u), prio %d",
5001 p
->dp_name
, p
->dp_id
, p
->dp_priority
);
5010 /* Accessors for provider attributes */
5011 const char *dplane_provider_get_name(const struct zebra_dplane_provider
*prov
)
5013 return prov
->dp_name
;
5016 uint32_t dplane_provider_get_id(const struct zebra_dplane_provider
*prov
)
5021 void *dplane_provider_get_data(const struct zebra_dplane_provider
*prov
)
5023 return prov
->dp_data
;
5026 int dplane_provider_get_work_limit(const struct zebra_dplane_provider
*prov
)
5028 return zdplane_info
.dg_updates_per_cycle
;
5031 /* Lock/unlock a provider's mutex - iff the provider was registered with
5032 * the THREADED flag.
5034 void dplane_provider_lock(struct zebra_dplane_provider
*prov
)
5036 if (dplane_provider_is_threaded(prov
))
5037 DPLANE_PROV_LOCK(prov
);
5040 void dplane_provider_unlock(struct zebra_dplane_provider
*prov
)
5042 if (dplane_provider_is_threaded(prov
))
5043 DPLANE_PROV_UNLOCK(prov
);
5047 * Dequeue and maintain associated counter
5049 struct zebra_dplane_ctx
*dplane_provider_dequeue_in_ctx(
5050 struct zebra_dplane_provider
*prov
)
5052 struct zebra_dplane_ctx
*ctx
= NULL
;
5054 dplane_provider_lock(prov
);
5056 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_in_q
));
5058 TAILQ_REMOVE(&(prov
->dp_ctx_in_q
), ctx
, zd_q_entries
);
5060 atomic_fetch_sub_explicit(&prov
->dp_in_queued
, 1,
5061 memory_order_relaxed
);
5064 dplane_provider_unlock(prov
);
5070 * Dequeue work to a list, return count
5072 int dplane_provider_dequeue_in_list(struct zebra_dplane_provider
*prov
,
5073 struct dplane_ctx_q
*listp
)
5076 struct zebra_dplane_ctx
*ctx
;
5078 limit
= zdplane_info
.dg_updates_per_cycle
;
5080 dplane_provider_lock(prov
);
5082 for (ret
= 0; ret
< limit
; ret
++) {
5083 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_in_q
));
5085 TAILQ_REMOVE(&(prov
->dp_ctx_in_q
), ctx
, zd_q_entries
);
5087 TAILQ_INSERT_TAIL(listp
, ctx
, zd_q_entries
);
5094 atomic_fetch_sub_explicit(&prov
->dp_in_queued
, ret
,
5095 memory_order_relaxed
);
5097 dplane_provider_unlock(prov
);
5102 uint32_t dplane_provider_out_ctx_queue_len(struct zebra_dplane_provider
*prov
)
5104 return atomic_load_explicit(&(prov
->dp_out_counter
),
5105 memory_order_relaxed
);
5109 * Enqueue and maintain associated counter
5111 void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider
*prov
,
5112 struct zebra_dplane_ctx
*ctx
)
5114 uint64_t curr
, high
;
5116 dplane_provider_lock(prov
);
5118 TAILQ_INSERT_TAIL(&(prov
->dp_ctx_out_q
), ctx
,
5121 /* Maintain out-queue counters */
5122 atomic_fetch_add_explicit(&(prov
->dp_out_queued
), 1,
5123 memory_order_relaxed
);
5124 curr
= atomic_load_explicit(&prov
->dp_out_queued
,
5125 memory_order_relaxed
);
5126 high
= atomic_load_explicit(&prov
->dp_out_max
,
5127 memory_order_relaxed
);
5129 atomic_store_explicit(&prov
->dp_out_max
, curr
,
5130 memory_order_relaxed
);
5132 dplane_provider_unlock(prov
);
5134 atomic_fetch_add_explicit(&(prov
->dp_out_counter
), 1,
5135 memory_order_relaxed
);
5139 * Accessor for provider object
5141 bool dplane_provider_is_threaded(const struct zebra_dplane_provider
*prov
)
5143 return (prov
->dp_flags
& DPLANE_PROV_FLAG_THREADED
);
5148 * Callback when an OS (netlink) incoming event read is ready. This runs
5149 * in the dplane pthread.
5151 static void dplane_incoming_read(struct thread
*event
)
5153 struct dplane_zns_info
*zi
= THREAD_ARG(event
);
5155 kernel_dplane_read(&zi
->info
);
5157 /* Re-start read task */
5158 thread_add_read(zdplane_info
.dg_master
, dplane_incoming_read
, zi
,
5159 zi
->info
.sock
, &zi
->t_read
);
5163 * Callback in the dataplane pthread that requests info from the OS and
5164 * initiates netlink reads.
5166 static void dplane_incoming_request(struct thread
*event
)
5168 struct dplane_zns_info
*zi
= THREAD_ARG(event
);
5170 /* Start read task */
5171 thread_add_read(zdplane_info
.dg_master
, dplane_incoming_read
, zi
,
5172 zi
->info
.sock
, &zi
->t_read
);
5175 netlink_request_netconf(zi
->info
.sock
);
5179 * Initiate requests for existing info from the OS. This is called by the
5180 * main pthread, but we want all activity on the dplane netlink socket to
5181 * take place on the dplane pthread, so we schedule an event to accomplish
5184 static void dplane_kernel_info_request(struct dplane_zns_info
*zi
)
5186 /* If we happen to encounter an enabled zns before the dplane
5187 * pthread is running, we'll initiate this later on.
5189 if (zdplane_info
.dg_master
)
5190 thread_add_event(zdplane_info
.dg_master
,
5191 dplane_incoming_request
, zi
, 0,
5195 #endif /* HAVE_NETLINK */
5198 * Notify dplane when namespaces are enabled and disabled. The dplane
5199 * needs to start and stop reading incoming events from the zns. In the
5200 * common case where vrfs are _not_ namespaces, there will only be one
5203 * This is called in the main pthread.
5205 void zebra_dplane_ns_enable(struct zebra_ns
*zns
, bool enabled
)
5207 struct dplane_zns_info
*zi
;
5209 if (IS_ZEBRA_DEBUG_DPLANE
)
5210 zlog_debug("%s: %s for nsid %u", __func__
,
5211 (enabled
? "ENABLED" : "DISABLED"), zns
->ns_id
);
5213 /* Search for an existing zns info entry */
5214 frr_each (zns_info_list
, &zdplane_info
.dg_zns_list
, zi
) {
5215 if (zi
->info
.ns_id
== zns
->ns_id
)
5220 /* Create a new entry if necessary; start reading. */
5222 zi
= XCALLOC(MTYPE_DP_NS
, sizeof(*zi
));
5224 zi
->info
.ns_id
= zns
->ns_id
;
5226 zns_info_list_add_tail(&zdplane_info
.dg_zns_list
, zi
);
5228 if (IS_ZEBRA_DEBUG_DPLANE
)
5229 zlog_debug("%s: nsid %u, new zi %p", __func__
,
5233 /* Make sure we're up-to-date with the zns object */
5234 #if defined(HAVE_NETLINK)
5235 zi
->info
.is_cmd
= false;
5236 zi
->info
.sock
= zns
->netlink_dplane_in
.sock
;
5238 /* Initiate requests for existing info from the OS, and
5239 * begin reading from the netlink socket.
5241 dplane_kernel_info_request(zi
);
5244 if (IS_ZEBRA_DEBUG_DPLANE
)
5245 zlog_debug("%s: nsid %u, deleting zi %p", __func__
,
5248 /* Stop reading, free memory */
5249 zns_info_list_del(&zdplane_info
.dg_zns_list
, zi
);
5251 /* Stop any outstanding tasks */
5252 if (zdplane_info
.dg_master
) {
5253 thread_cancel_async(zdplane_info
.dg_master
,
5254 &zi
->t_request
, NULL
);
5256 thread_cancel_async(zdplane_info
.dg_master
, &zi
->t_read
,
5260 XFREE(MTYPE_DP_NS
, zi
);
5265 * Provider api to signal that work/events are available
5266 * for the dataplane pthread.
5268 int dplane_provider_work_ready(void)
5270 /* Note that during zebra startup, we may be offered work before
5271 * the dataplane pthread (and thread-master) are ready. We want to
5272 * enqueue the work, but the event-scheduling machinery may not be
5275 if (zdplane_info
.dg_run
) {
5276 thread_add_event(zdplane_info
.dg_master
,
5277 dplane_thread_loop
, NULL
, 0,
5278 &zdplane_info
.dg_t_update
);
5285 * Enqueue a context directly to zebra main.
5287 void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx
*ctx
)
5289 struct dplane_ctx_q temp_list
;
5291 /* Zebra's api takes a list, so we need to use a temporary list */
5292 TAILQ_INIT(&temp_list
);
5294 TAILQ_INSERT_TAIL(&temp_list
, ctx
, zd_q_entries
);
5295 (zdplane_info
.dg_results_cb
)(&temp_list
);
5299 * Kernel dataplane provider
5302 static void kernel_dplane_log_detail(struct zebra_dplane_ctx
*ctx
)
5304 char buf
[PREFIX_STRLEN
];
5306 switch (dplane_ctx_get_op(ctx
)) {
5308 case DPLANE_OP_ROUTE_INSTALL
:
5309 case DPLANE_OP_ROUTE_UPDATE
:
5310 case DPLANE_OP_ROUTE_DELETE
:
5311 zlog_debug("%u:%pFX Dplane route update ctx %p op %s",
5312 dplane_ctx_get_vrf(ctx
), dplane_ctx_get_dest(ctx
),
5313 ctx
, dplane_op2str(dplane_ctx_get_op(ctx
)));
5316 case DPLANE_OP_NH_INSTALL
:
5317 case DPLANE_OP_NH_UPDATE
:
5318 case DPLANE_OP_NH_DELETE
:
5319 zlog_debug("ID (%u) Dplane nexthop update ctx %p op %s",
5320 dplane_ctx_get_nhe_id(ctx
), ctx
,
5321 dplane_op2str(dplane_ctx_get_op(ctx
)));
5324 case DPLANE_OP_LSP_INSTALL
:
5325 case DPLANE_OP_LSP_UPDATE
:
5326 case DPLANE_OP_LSP_DELETE
:
5329 case DPLANE_OP_PW_INSTALL
:
5330 case DPLANE_OP_PW_UNINSTALL
:
5331 zlog_debug("Dplane pw %s: op %s af %d loc: %u rem: %u",
5332 dplane_ctx_get_ifname(ctx
),
5333 dplane_op2str(ctx
->zd_op
), dplane_ctx_get_pw_af(ctx
),
5334 dplane_ctx_get_pw_local_label(ctx
),
5335 dplane_ctx_get_pw_remote_label(ctx
));
5338 case DPLANE_OP_ADDR_INSTALL
:
5339 case DPLANE_OP_ADDR_UNINSTALL
:
5340 zlog_debug("Dplane intf %s, idx %u, addr %pFX",
5341 dplane_op2str(dplane_ctx_get_op(ctx
)),
5342 dplane_ctx_get_ifindex(ctx
),
5343 dplane_ctx_get_intf_addr(ctx
));
5346 case DPLANE_OP_MAC_INSTALL
:
5347 case DPLANE_OP_MAC_DELETE
:
5348 prefix_mac2str(dplane_ctx_mac_get_addr(ctx
), buf
,
5351 zlog_debug("Dplane %s, mac %s, ifindex %u",
5352 dplane_op2str(dplane_ctx_get_op(ctx
)),
5353 buf
, dplane_ctx_get_ifindex(ctx
));
5356 case DPLANE_OP_NEIGH_INSTALL
:
5357 case DPLANE_OP_NEIGH_UPDATE
:
5358 case DPLANE_OP_NEIGH_DELETE
:
5359 case DPLANE_OP_VTEP_ADD
:
5360 case DPLANE_OP_VTEP_DELETE
:
5361 case DPLANE_OP_NEIGH_DISCOVER
:
5362 case DPLANE_OP_NEIGH_IP_INSTALL
:
5363 case DPLANE_OP_NEIGH_IP_DELETE
:
5364 ipaddr2str(dplane_ctx_neigh_get_ipaddr(ctx
), buf
,
5367 zlog_debug("Dplane %s, ip %s, ifindex %u",
5368 dplane_op2str(dplane_ctx_get_op(ctx
)),
5369 buf
, dplane_ctx_get_ifindex(ctx
));
5372 case DPLANE_OP_RULE_ADD
:
5373 case DPLANE_OP_RULE_DELETE
:
5374 case DPLANE_OP_RULE_UPDATE
:
5375 zlog_debug("Dplane rule update op %s, if %s(%u), ctx %p",
5376 dplane_op2str(dplane_ctx_get_op(ctx
)),
5377 dplane_ctx_get_ifname(ctx
),
5378 dplane_ctx_get_ifindex(ctx
), ctx
);
5381 case DPLANE_OP_SYS_ROUTE_ADD
:
5382 case DPLANE_OP_SYS_ROUTE_DELETE
:
5383 case DPLANE_OP_ROUTE_NOTIFY
:
5384 case DPLANE_OP_LSP_NOTIFY
:
5385 case DPLANE_OP_BR_PORT_UPDATE
:
5387 case DPLANE_OP_NONE
:
5390 case DPLANE_OP_IPTABLE_ADD
:
5391 case DPLANE_OP_IPTABLE_DELETE
: {
5392 struct zebra_pbr_iptable ipt
;
5394 dplane_ctx_get_pbr_iptable(ctx
, &ipt
);
5395 zlog_debug("Dplane iptable update op %s, unique(%u), ctx %p",
5396 dplane_op2str(dplane_ctx_get_op(ctx
)), ipt
.unique
,
5399 case DPLANE_OP_IPSET_ADD
:
5400 case DPLANE_OP_IPSET_DELETE
: {
5401 struct zebra_pbr_ipset ipset
;
5403 dplane_ctx_get_pbr_ipset(ctx
, &ipset
);
5404 zlog_debug("Dplane ipset update op %s, unique(%u), ctx %p",
5405 dplane_op2str(dplane_ctx_get_op(ctx
)), ipset
.unique
,
5408 case DPLANE_OP_IPSET_ENTRY_ADD
:
5409 case DPLANE_OP_IPSET_ENTRY_DELETE
: {
5410 struct zebra_pbr_ipset_entry ipent
;
5412 dplane_ctx_get_pbr_ipset_entry(ctx
, &ipent
);
5414 "Dplane ipset entry update op %s, unique(%u), ctx %p",
5415 dplane_op2str(dplane_ctx_get_op(ctx
)), ipent
.unique
,
5418 case DPLANE_OP_NEIGH_TABLE_UPDATE
:
5419 zlog_debug("Dplane neigh table op %s, ifp %s, family %s",
5420 dplane_op2str(dplane_ctx_get_op(ctx
)),
5421 dplane_ctx_get_ifname(ctx
),
5422 family2str(dplane_ctx_neightable_get_family(ctx
)));
5424 case DPLANE_OP_GRE_SET
:
5425 zlog_debug("Dplane gre set op %s, ifp %s, link %u",
5426 dplane_op2str(dplane_ctx_get_op(ctx
)),
5427 dplane_ctx_get_ifname(ctx
),
5428 ctx
->u
.gre
.link_ifindex
);
5431 case DPLANE_OP_INTF_ADDR_ADD
:
5432 case DPLANE_OP_INTF_ADDR_DEL
:
5433 zlog_debug("Dplane incoming op %s, intf %s, addr %pFX",
5434 dplane_op2str(dplane_ctx_get_op(ctx
)),
5435 dplane_ctx_get_ifname(ctx
),
5436 dplane_ctx_get_intf_addr(ctx
));
5439 case DPLANE_OP_INTF_NETCONFIG
:
5440 zlog_debug("%s: ifindex %d, mpls %d, mcast %d",
5441 dplane_op2str(dplane_ctx_get_op(ctx
)),
5442 dplane_ctx_get_netconf_ifindex(ctx
),
5443 dplane_ctx_get_netconf_mpls(ctx
),
5444 dplane_ctx_get_netconf_mcast(ctx
));
5447 case DPLANE_OP_INTF_INSTALL
:
5448 case DPLANE_OP_INTF_UPDATE
:
5449 case DPLANE_OP_INTF_DELETE
:
5450 zlog_debug("Dplane intf %s, idx %u, protodown %d",
5451 dplane_op2str(dplane_ctx_get_op(ctx
)),
5452 dplane_ctx_get_ifindex(ctx
),
5453 dplane_ctx_intf_is_protodown(ctx
));
5458 static void kernel_dplane_handle_result(struct zebra_dplane_ctx
*ctx
)
5460 enum zebra_dplane_result res
= dplane_ctx_get_status(ctx
);
5462 switch (dplane_ctx_get_op(ctx
)) {
5464 case DPLANE_OP_ROUTE_INSTALL
:
5465 case DPLANE_OP_ROUTE_UPDATE
:
5466 case DPLANE_OP_ROUTE_DELETE
:
5467 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5468 atomic_fetch_add_explicit(&zdplane_info
.dg_route_errors
,
5469 1, memory_order_relaxed
);
5471 if ((dplane_ctx_get_op(ctx
) != DPLANE_OP_ROUTE_DELETE
)
5472 && (res
== ZEBRA_DPLANE_REQUEST_SUCCESS
)) {
5473 struct nexthop
*nexthop
;
5475 /* Update installed nexthops to signal which have been
5478 for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx
),
5480 if (CHECK_FLAG(nexthop
->flags
,
5481 NEXTHOP_FLAG_RECURSIVE
))
5484 if (CHECK_FLAG(nexthop
->flags
,
5485 NEXTHOP_FLAG_ACTIVE
)) {
5486 SET_FLAG(nexthop
->flags
,
5493 case DPLANE_OP_NH_INSTALL
:
5494 case DPLANE_OP_NH_UPDATE
:
5495 case DPLANE_OP_NH_DELETE
:
5496 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5497 atomic_fetch_add_explicit(
5498 &zdplane_info
.dg_nexthop_errors
, 1,
5499 memory_order_relaxed
);
5502 case DPLANE_OP_LSP_INSTALL
:
5503 case DPLANE_OP_LSP_UPDATE
:
5504 case DPLANE_OP_LSP_DELETE
:
5505 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5506 atomic_fetch_add_explicit(&zdplane_info
.dg_lsp_errors
,
5507 1, memory_order_relaxed
);
5510 case DPLANE_OP_PW_INSTALL
:
5511 case DPLANE_OP_PW_UNINSTALL
:
5512 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5513 atomic_fetch_add_explicit(&zdplane_info
.dg_pw_errors
, 1,
5514 memory_order_relaxed
);
5517 case DPLANE_OP_ADDR_INSTALL
:
5518 case DPLANE_OP_ADDR_UNINSTALL
:
5519 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5520 atomic_fetch_add_explicit(
5521 &zdplane_info
.dg_intf_addr_errors
, 1,
5522 memory_order_relaxed
);
5525 case DPLANE_OP_MAC_INSTALL
:
5526 case DPLANE_OP_MAC_DELETE
:
5527 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5528 atomic_fetch_add_explicit(&zdplane_info
.dg_mac_errors
,
5529 1, memory_order_relaxed
);
5532 case DPLANE_OP_NEIGH_INSTALL
:
5533 case DPLANE_OP_NEIGH_UPDATE
:
5534 case DPLANE_OP_NEIGH_DELETE
:
5535 case DPLANE_OP_VTEP_ADD
:
5536 case DPLANE_OP_VTEP_DELETE
:
5537 case DPLANE_OP_NEIGH_DISCOVER
:
5538 case DPLANE_OP_NEIGH_IP_INSTALL
:
5539 case DPLANE_OP_NEIGH_IP_DELETE
:
5540 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5541 atomic_fetch_add_explicit(&zdplane_info
.dg_neigh_errors
,
5542 1, memory_order_relaxed
);
5545 case DPLANE_OP_RULE_ADD
:
5546 case DPLANE_OP_RULE_DELETE
:
5547 case DPLANE_OP_RULE_UPDATE
:
5548 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5549 atomic_fetch_add_explicit(&zdplane_info
.dg_rule_errors
,
5550 1, memory_order_relaxed
);
5553 case DPLANE_OP_IPTABLE_ADD
:
5554 case DPLANE_OP_IPTABLE_DELETE
:
5555 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5556 atomic_fetch_add_explicit(
5557 &zdplane_info
.dg_iptable_errors
, 1,
5558 memory_order_relaxed
);
5561 case DPLANE_OP_IPSET_ADD
:
5562 case DPLANE_OP_IPSET_DELETE
:
5563 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5564 atomic_fetch_add_explicit(&zdplane_info
.dg_ipset_errors
,
5565 1, memory_order_relaxed
);
5568 case DPLANE_OP_IPSET_ENTRY_ADD
:
5569 case DPLANE_OP_IPSET_ENTRY_DELETE
:
5570 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5571 atomic_fetch_add_explicit(
5572 &zdplane_info
.dg_ipset_entry_errors
, 1,
5573 memory_order_relaxed
);
5576 case DPLANE_OP_NEIGH_TABLE_UPDATE
:
5577 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5578 atomic_fetch_add_explicit(
5579 &zdplane_info
.dg_neightable_errors
, 1,
5580 memory_order_relaxed
);
5583 case DPLANE_OP_GRE_SET
:
5584 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5585 atomic_fetch_add_explicit(
5586 &zdplane_info
.dg_gre_set_errors
, 1,
5587 memory_order_relaxed
);
5590 case DPLANE_OP_INTF_INSTALL
:
5591 case DPLANE_OP_INTF_UPDATE
:
5592 case DPLANE_OP_INTF_DELETE
:
5593 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5594 atomic_fetch_add_explicit(&zdplane_info
.dg_intf_errors
,
5595 1, memory_order_relaxed
);
5598 /* Ignore 'notifications' - no-op */
5599 case DPLANE_OP_SYS_ROUTE_ADD
:
5600 case DPLANE_OP_SYS_ROUTE_DELETE
:
5601 case DPLANE_OP_ROUTE_NOTIFY
:
5602 case DPLANE_OP_LSP_NOTIFY
:
5603 case DPLANE_OP_BR_PORT_UPDATE
:
5606 /* TODO -- error counters for incoming events? */
5607 case DPLANE_OP_INTF_ADDR_ADD
:
5608 case DPLANE_OP_INTF_ADDR_DEL
:
5609 case DPLANE_OP_INTF_NETCONFIG
:
5612 case DPLANE_OP_NONE
:
5613 if (res
!= ZEBRA_DPLANE_REQUEST_SUCCESS
)
5614 atomic_fetch_add_explicit(&zdplane_info
.dg_other_errors
,
5615 1, memory_order_relaxed
);
5620 static void kernel_dplane_process_iptable(struct zebra_dplane_provider
*prov
,
5621 struct zebra_dplane_ctx
*ctx
)
5623 zebra_pbr_process_iptable(ctx
);
5624 dplane_provider_enqueue_out_ctx(prov
, ctx
);
5627 static void kernel_dplane_process_ipset(struct zebra_dplane_provider
*prov
,
5628 struct zebra_dplane_ctx
*ctx
)
5630 zebra_pbr_process_ipset(ctx
);
5631 dplane_provider_enqueue_out_ctx(prov
, ctx
);
5635 kernel_dplane_process_ipset_entry(struct zebra_dplane_provider
*prov
,
5636 struct zebra_dplane_ctx
*ctx
)
5638 zebra_pbr_process_ipset_entry(ctx
);
5639 dplane_provider_enqueue_out_ctx(prov
, ctx
);
5643 * Kernel provider callback
5645 static int kernel_dplane_process_func(struct zebra_dplane_provider
*prov
)
5647 struct zebra_dplane_ctx
*ctx
, *tctx
;
5648 struct dplane_ctx_q work_list
;
5651 TAILQ_INIT(&work_list
);
5653 limit
= dplane_provider_get_work_limit(prov
);
5655 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
5656 zlog_debug("dplane provider '%s': processing",
5657 dplane_provider_get_name(prov
));
5659 for (counter
= 0; counter
< limit
; counter
++) {
5660 ctx
= dplane_provider_dequeue_in_ctx(prov
);
5663 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
5664 kernel_dplane_log_detail(ctx
);
5666 if ((dplane_ctx_get_op(ctx
) == DPLANE_OP_IPTABLE_ADD
5667 || dplane_ctx_get_op(ctx
) == DPLANE_OP_IPTABLE_DELETE
))
5668 kernel_dplane_process_iptable(prov
, ctx
);
5669 else if ((dplane_ctx_get_op(ctx
) == DPLANE_OP_IPSET_ADD
5670 || dplane_ctx_get_op(ctx
) == DPLANE_OP_IPSET_DELETE
))
5671 kernel_dplane_process_ipset(prov
, ctx
);
5672 else if ((dplane_ctx_get_op(ctx
) == DPLANE_OP_IPSET_ENTRY_ADD
5673 || dplane_ctx_get_op(ctx
)
5674 == DPLANE_OP_IPSET_ENTRY_DELETE
))
5675 kernel_dplane_process_ipset_entry(prov
, ctx
);
5677 TAILQ_INSERT_TAIL(&work_list
, ctx
, zd_q_entries
);
5680 kernel_update_multi(&work_list
);
5682 TAILQ_FOREACH_SAFE (ctx
, &work_list
, zd_q_entries
, tctx
) {
5683 kernel_dplane_handle_result(ctx
);
5685 TAILQ_REMOVE(&work_list
, ctx
, zd_q_entries
);
5686 dplane_provider_enqueue_out_ctx(prov
, ctx
);
5689 /* Ensure that we'll run the work loop again if there's still
5692 if (counter
>= limit
) {
5693 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
5694 zlog_debug("dplane provider '%s' reached max updates %d",
5695 dplane_provider_get_name(prov
), counter
);
5697 atomic_fetch_add_explicit(&zdplane_info
.dg_update_yields
,
5698 1, memory_order_relaxed
);
5700 dplane_provider_work_ready();
5706 #ifdef DPLANE_TEST_PROVIDER
5709 * Test dataplane provider plugin
5713 * Test provider process callback
5715 static int test_dplane_process_func(struct zebra_dplane_provider
*prov
)
5717 struct zebra_dplane_ctx
*ctx
;
5720 /* Just moving from 'in' queue to 'out' queue */
5722 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
5723 zlog_debug("dplane provider '%s': processing",
5724 dplane_provider_get_name(prov
));
5726 limit
= dplane_provider_get_work_limit(prov
);
5728 for (counter
= 0; counter
< limit
; counter
++) {
5729 ctx
= dplane_provider_dequeue_in_ctx(prov
);
5733 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
5734 zlog_debug("dplane provider '%s': op %s",
5735 dplane_provider_get_name(prov
),
5736 dplane_op2str(dplane_ctx_get_op(ctx
)));
5738 dplane_ctx_set_status(ctx
, ZEBRA_DPLANE_REQUEST_SUCCESS
);
5740 dplane_provider_enqueue_out_ctx(prov
, ctx
);
5743 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
5744 zlog_debug("dplane provider '%s': processed %d",
5745 dplane_provider_get_name(prov
), counter
);
5747 /* Ensure that we'll run the work loop again if there's still
5750 if (counter
>= limit
)
5751 dplane_provider_work_ready();
5757 * Test provider shutdown/fini callback
5759 static int test_dplane_shutdown_func(struct zebra_dplane_provider
*prov
,
5762 if (IS_ZEBRA_DEBUG_DPLANE
)
5763 zlog_debug("dplane provider '%s': %sshutdown",
5764 dplane_provider_get_name(prov
),
5765 early
? "early " : "");
5769 #endif /* DPLANE_TEST_PROVIDER */
5772 * Register default kernel provider
5774 static void dplane_provider_init(void)
5778 ret
= dplane_provider_register("Kernel",
5780 DPLANE_PROV_FLAGS_DEFAULT
, NULL
,
5781 kernel_dplane_process_func
,
5786 zlog_err("Unable to register kernel dplane provider: %d",
5789 #ifdef DPLANE_TEST_PROVIDER
5790 /* Optional test provider ... */
5791 ret
= dplane_provider_register("Test",
5792 DPLANE_PRIO_PRE_KERNEL
,
5793 DPLANE_PROV_FLAGS_DEFAULT
, NULL
,
5794 test_dplane_process_func
,
5795 test_dplane_shutdown_func
,
5796 NULL
/* data */, NULL
);
5799 zlog_err("Unable to register test dplane provider: %d",
5801 #endif /* DPLANE_TEST_PROVIDER */
5805 * Allow zebra code to walk the queue of pending contexts, evaluate each one
5806 * using a callback function. If the function returns 'true', the context
5807 * will be dequeued and freed without being processed.
5809 int dplane_clean_ctx_queue(bool (*context_cb
)(struct zebra_dplane_ctx
*ctx
,
5810 void *arg
), void *val
)
5812 struct zebra_dplane_ctx
*ctx
, *temp
;
5813 struct dplane_ctx_q work_list
;
5815 TAILQ_INIT(&work_list
);
5817 if (context_cb
== NULL
)
5820 /* Walk the pending context queue under the dplane lock. */
5823 TAILQ_FOREACH_SAFE(ctx
, &zdplane_info
.dg_update_ctx_q
, zd_q_entries
,
5825 if (context_cb(ctx
, val
)) {
5826 TAILQ_REMOVE(&zdplane_info
.dg_update_ctx_q
, ctx
,
5828 TAILQ_INSERT_TAIL(&work_list
, ctx
, zd_q_entries
);
5834 /* Now free any contexts selected by the caller, without holding
5837 TAILQ_FOREACH_SAFE(ctx
, &work_list
, zd_q_entries
, temp
) {
5838 TAILQ_REMOVE(&work_list
, ctx
, zd_q_entries
);
5839 dplane_ctx_fini(&ctx
);
5847 /* Indicates zebra shutdown/exit is in progress. Some operations may be
5848 * simplified or skipped during shutdown processing.
5850 bool dplane_is_in_shutdown(void)
5852 return zdplane_info
.dg_is_shutdown
;
5856 * Enable collection of extra info about interfaces in route updates.
5858 void dplane_enable_intf_extra_info(void)
5860 dplane_collect_extra_intf_info
= true;
5864 * Early or pre-shutdown, de-init notification api. This runs pretty
5865 * early during zebra shutdown, as a signal to stop new work and prepare
5866 * for updates generated by shutdown/cleanup activity, as zebra tries to
5867 * remove everything it's responsible for.
5868 * NB: This runs in the main zebra pthread context.
5870 void zebra_dplane_pre_finish(void)
5872 struct zebra_dplane_provider
*prov
;
5874 if (IS_ZEBRA_DEBUG_DPLANE
)
5875 zlog_debug("Zebra dataplane pre-finish called");
5877 zdplane_info
.dg_is_shutdown
= true;
5879 /* Notify provider(s) of pending shutdown. */
5880 TAILQ_FOREACH(prov
, &zdplane_info
.dg_providers_q
, dp_prov_link
) {
5881 if (prov
->dp_fini
== NULL
)
5884 prov
->dp_fini(prov
, true /* early */);
5889 * Utility to determine whether work remains enqueued within the dplane;
5890 * used during system shutdown processing.
5892 static bool dplane_work_pending(void)
5895 struct zebra_dplane_ctx
*ctx
;
5896 struct zebra_dplane_provider
*prov
;
5898 /* TODO -- just checking incoming/pending work for now, must check
5903 ctx
= TAILQ_FIRST(&zdplane_info
.dg_update_ctx_q
);
5904 prov
= TAILQ_FIRST(&zdplane_info
.dg_providers_q
);
5915 dplane_provider_lock(prov
);
5917 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_in_q
));
5919 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_out_q
));
5921 dplane_provider_unlock(prov
);
5927 prov
= TAILQ_NEXT(prov
, dp_prov_link
);
5939 * Shutdown-time intermediate callback, used to determine when all pending
5940 * in-flight updates are done. If there's still work to do, reschedules itself.
5941 * If all work is done, schedules an event to the main zebra thread for
5942 * final zebra shutdown.
5943 * This runs in the dplane pthread context.
5945 static void dplane_check_shutdown_status(struct thread
*event
)
5947 struct dplane_zns_info
*zi
;
5949 if (IS_ZEBRA_DEBUG_DPLANE
)
5950 zlog_debug("Zebra dataplane shutdown status check called");
5952 /* Remove any zns info entries as we stop the dplane pthread. */
5953 frr_each_safe (zns_info_list
, &zdplane_info
.dg_zns_list
, zi
) {
5954 zns_info_list_del(&zdplane_info
.dg_zns_list
, zi
);
5956 if (zdplane_info
.dg_master
) {
5957 thread_cancel(&zi
->t_read
);
5958 thread_cancel(&zi
->t_request
);
5961 XFREE(MTYPE_DP_NS
, zi
);
5964 if (dplane_work_pending()) {
5965 /* Reschedule dplane check on a short timer */
5966 thread_add_timer_msec(zdplane_info
.dg_master
,
5967 dplane_check_shutdown_status
,
5969 &zdplane_info
.dg_t_shutdown_check
);
5971 /* TODO - give up and stop waiting after a short time? */
5974 /* We appear to be done - schedule a final callback event
5975 * for the zebra main pthread.
5977 thread_add_event(zrouter
.master
, zebra_finalize
, NULL
, 0, NULL
);
5982 * Shutdown, de-init api. This runs pretty late during shutdown,
5983 * after zebra has tried to free/remove/uninstall all routes during shutdown.
5984 * At this point, dplane work may still remain to be done, so we can't just
5985 * blindly terminate. If there's still work to do, we'll periodically check
5986 * and when done, we'll enqueue a task to the zebra main thread for final
5987 * termination processing.
5989 * NB: This runs in the main zebra thread context.
5991 void zebra_dplane_finish(void)
5993 if (IS_ZEBRA_DEBUG_DPLANE
)
5994 zlog_debug("Zebra dataplane fini called");
5996 thread_add_event(zdplane_info
.dg_master
,
5997 dplane_check_shutdown_status
, NULL
, 0,
5998 &zdplane_info
.dg_t_shutdown_check
);
6002 * Main dataplane pthread event loop. The thread takes new incoming work
6003 * and offers it to the first provider. It then iterates through the
6004 * providers, taking complete work from each one and offering it
6005 * to the next in order. At each step, a limited number of updates are
6006 * processed during a cycle in order to provide some fairness.
6008 * This loop through the providers is only run once, so that the dataplane
6009 * pthread can look for other pending work - such as i/o work on behalf of
6012 static void dplane_thread_loop(struct thread
*event
)
6014 struct dplane_ctx_q work_list
;
6015 struct dplane_ctx_q error_list
;
6016 struct zebra_dplane_provider
*prov
;
6017 struct zebra_dplane_ctx
*ctx
, *tctx
;
6018 int limit
, counter
, error_counter
;
6019 uint64_t curr
, high
;
6020 bool reschedule
= false;
6022 /* Capture work limit per cycle */
6023 limit
= zdplane_info
.dg_updates_per_cycle
;
6025 /* Init temporary lists used to move contexts among providers */
6026 TAILQ_INIT(&work_list
);
6027 TAILQ_INIT(&error_list
);
6030 /* Check for zebra shutdown */
6031 if (!zdplane_info
.dg_run
)
6034 /* Dequeue some incoming work from zebra (if any) onto the temporary
6039 /* Locate initial registered provider */
6040 prov
= TAILQ_FIRST(&zdplane_info
.dg_providers_q
);
6042 /* Move new work from incoming list to temp list */
6043 for (counter
= 0; counter
< limit
; counter
++) {
6044 ctx
= TAILQ_FIRST(&zdplane_info
.dg_update_ctx_q
);
6046 TAILQ_REMOVE(&zdplane_info
.dg_update_ctx_q
, ctx
,
6049 ctx
->zd_provider
= prov
->dp_id
;
6051 TAILQ_INSERT_TAIL(&work_list
, ctx
, zd_q_entries
);
6059 atomic_fetch_sub_explicit(&zdplane_info
.dg_routes_queued
, counter
,
6060 memory_order_relaxed
);
6062 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
6063 zlog_debug("dplane: incoming new work counter: %d", counter
);
6065 /* Iterate through the registered providers, offering new incoming
6066 * work. If the provider has outgoing work in its queue, take that
6067 * work for the next provider
6071 /* At each iteration, the temporary work list has 'counter'
6074 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
6075 zlog_debug("dplane enqueues %d new work to provider '%s'",
6076 counter
, dplane_provider_get_name(prov
));
6078 /* Capture current provider id in each context; check for
6081 TAILQ_FOREACH_SAFE(ctx
, &work_list
, zd_q_entries
, tctx
) {
6082 if (dplane_ctx_get_status(ctx
) ==
6083 ZEBRA_DPLANE_REQUEST_SUCCESS
) {
6084 ctx
->zd_provider
= prov
->dp_id
;
6087 * TODO -- improve error-handling: recirc
6088 * errors backwards so that providers can
6089 * 'undo' their work (if they want to)
6092 /* Move to error list; will be returned
6095 TAILQ_REMOVE(&work_list
, ctx
, zd_q_entries
);
6096 TAILQ_INSERT_TAIL(&error_list
,
6102 /* Enqueue new work to the provider */
6103 dplane_provider_lock(prov
);
6105 if (TAILQ_FIRST(&work_list
))
6106 TAILQ_CONCAT(&(prov
->dp_ctx_in_q
), &work_list
,
6109 atomic_fetch_add_explicit(&prov
->dp_in_counter
, counter
,
6110 memory_order_relaxed
);
6111 atomic_fetch_add_explicit(&prov
->dp_in_queued
, counter
,
6112 memory_order_relaxed
);
6113 curr
= atomic_load_explicit(&prov
->dp_in_queued
,
6114 memory_order_relaxed
);
6115 high
= atomic_load_explicit(&prov
->dp_in_max
,
6116 memory_order_relaxed
);
6118 atomic_store_explicit(&prov
->dp_in_max
, curr
,
6119 memory_order_relaxed
);
6121 dplane_provider_unlock(prov
);
6123 /* Reset the temp list (though the 'concat' may have done this
6124 * already), and the counter
6126 TAILQ_INIT(&work_list
);
6129 /* Call into the provider code. Note that this is
6130 * unconditional: we offer to do work even if we don't enqueue
6133 (*prov
->dp_fp
)(prov
);
6135 /* Check for zebra shutdown */
6136 if (!zdplane_info
.dg_run
)
6139 /* Dequeue completed work from the provider */
6140 dplane_provider_lock(prov
);
6142 while (counter
< limit
) {
6143 ctx
= TAILQ_FIRST(&(prov
->dp_ctx_out_q
));
6145 TAILQ_REMOVE(&(prov
->dp_ctx_out_q
), ctx
,
6148 TAILQ_INSERT_TAIL(&work_list
,
6155 dplane_provider_unlock(prov
);
6157 if (counter
>= limit
)
6160 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
6161 zlog_debug("dplane dequeues %d completed work from provider %s",
6162 counter
, dplane_provider_get_name(prov
));
6164 /* Locate next provider */
6166 prov
= TAILQ_NEXT(prov
, dp_prov_link
);
6171 * We hit the work limit while processing at least one provider's
6172 * output queue - ensure we come back and finish it.
6175 dplane_provider_work_ready();
6177 /* After all providers have been serviced, enqueue any completed
6178 * work and any errors back to zebra so it can process the results.
6180 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL
)
6181 zlog_debug("dplane has %d completed, %d errors, for zebra main",
6182 counter
, error_counter
);
6185 * Hand lists through the api to zebra main,
6186 * to reduce the number of lock/unlock cycles
6189 /* Call through to zebra main */
6190 (zdplane_info
.dg_results_cb
)(&error_list
);
6192 TAILQ_INIT(&error_list
);
6194 /* Call through to zebra main */
6195 (zdplane_info
.dg_results_cb
)(&work_list
);
6197 TAILQ_INIT(&work_list
);
6201 * Final phase of shutdown, after all work enqueued to dplane has been
6202 * processed. This is called from the zebra main pthread context.
6204 void zebra_dplane_shutdown(void)
6206 struct zebra_dplane_provider
*dp
;
6208 if (IS_ZEBRA_DEBUG_DPLANE
)
6209 zlog_debug("Zebra dataplane shutdown called");
6211 /* Stop dplane thread, if it's running */
6213 zdplane_info
.dg_run
= false;
6215 if (zdplane_info
.dg_t_update
)
6216 thread_cancel_async(zdplane_info
.dg_t_update
->master
,
6217 &zdplane_info
.dg_t_update
, NULL
);
6219 frr_pthread_stop(zdplane_info
.dg_pthread
, NULL
);
6221 /* Destroy pthread */
6222 frr_pthread_destroy(zdplane_info
.dg_pthread
);
6223 zdplane_info
.dg_pthread
= NULL
;
6224 zdplane_info
.dg_master
= NULL
;
6226 /* Notify provider(s) of final shutdown.
6227 * Note that this call is in the main pthread, so providers must
6228 * be prepared for that.
6230 TAILQ_FOREACH(dp
, &zdplane_info
.dg_providers_q
, dp_prov_link
) {
6231 if (dp
->dp_fini
== NULL
)
6234 dp
->dp_fini(dp
, false);
6237 /* TODO -- Clean-up provider objects */
6239 /* TODO -- Clean queue(s), free memory */
6243 * Initialize the dataplane module during startup, internal/private version
6245 static void zebra_dplane_init_internal(void)
6247 memset(&zdplane_info
, 0, sizeof(zdplane_info
));
6249 pthread_mutex_init(&zdplane_info
.dg_mutex
, NULL
);
6251 TAILQ_INIT(&zdplane_info
.dg_update_ctx_q
);
6252 TAILQ_INIT(&zdplane_info
.dg_providers_q
);
6253 zns_info_list_init(&zdplane_info
.dg_zns_list
);
6255 zdplane_info
.dg_updates_per_cycle
= DPLANE_DEFAULT_NEW_WORK
;
6257 zdplane_info
.dg_max_queued_updates
= DPLANE_DEFAULT_MAX_QUEUED
;
6259 /* Register default kernel 'provider' during init */
6260 dplane_provider_init();
6264 * Start the dataplane pthread. This step needs to be run later than the
6265 * 'init' step, in case zebra has fork-ed.
6267 void zebra_dplane_start(void)
6269 struct dplane_zns_info
*zi
;
6270 struct zebra_dplane_provider
*prov
;
6271 struct frr_pthread_attr pattr
= {
6272 .start
= frr_pthread_attr_default
.start
,
6273 .stop
= frr_pthread_attr_default
.stop
6276 /* Start dataplane pthread */
6278 zdplane_info
.dg_pthread
= frr_pthread_new(&pattr
, "Zebra dplane thread",
6281 zdplane_info
.dg_master
= zdplane_info
.dg_pthread
->master
;
6283 zdplane_info
.dg_run
= true;
6285 /* Enqueue an initial event for the dataplane pthread */
6286 thread_add_event(zdplane_info
.dg_master
, dplane_thread_loop
, NULL
, 0,
6287 &zdplane_info
.dg_t_update
);
6289 /* Enqueue requests and reads if necessary */
6290 frr_each (zns_info_list
, &zdplane_info
.dg_zns_list
, zi
) {
6291 #if defined(HAVE_NETLINK)
6292 thread_add_read(zdplane_info
.dg_master
, dplane_incoming_read
,
6293 zi
, zi
->info
.sock
, &zi
->t_read
);
6294 dplane_kernel_info_request(zi
);
6298 /* Call start callbacks for registered providers */
6301 prov
= TAILQ_FIRST(&zdplane_info
.dg_providers_q
);
6307 (prov
->dp_start
)(prov
);
6309 /* Locate next provider */
6311 prov
= TAILQ_NEXT(prov
, dp_prov_link
);
6315 frr_pthread_run(zdplane_info
.dg_pthread
, NULL
);
6319 * Initialize the dataplane module at startup; called by zebra rib_init()
6321 void zebra_dplane_init(int (*results_fp
)(struct dplane_ctx_q
*))
6323 zebra_dplane_init_internal();
6324 zdplane_info
.dg_results_cb
= results_fp
;