]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_dplane.c
Merge pull request #10961 from opensourcerouting/build-ms-ext
[mirror_frr.git] / zebra / zebra_dplane.c
1 /*
2 * Zebra dataplane layer.
3 * Copyright (c) 2018 Volta Networks, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "lib/libfrr.h"
25 #include "lib/debug.h"
26 #include "lib/frratomic.h"
27 #include "lib/frr_pthread.h"
28 #include "lib/memory.h"
29 #include "lib/queue.h"
30 #include "lib/zebra.h"
31 #include "zebra/netconf_netlink.h"
32 #include "zebra/zebra_router.h"
33 #include "zebra/zebra_dplane.h"
34 #include "zebra/zebra_vxlan_private.h"
35 #include "zebra/zebra_mpls.h"
36 #include "zebra/rt.h"
37 #include "zebra/debug.h"
38 #include "zebra/zebra_pbr.h"
39 #include "printfrr.h"
40
41 /* Memory types */
42 DEFINE_MTYPE_STATIC(ZEBRA, DP_CTX, "Zebra DPlane Ctx");
43 DEFINE_MTYPE_STATIC(ZEBRA, DP_INTF, "Zebra DPlane Intf");
44 DEFINE_MTYPE_STATIC(ZEBRA, DP_PROV, "Zebra DPlane Provider");
45 DEFINE_MTYPE_STATIC(ZEBRA, DP_NETFILTER, "Zebra Netfilter Internal Object");
46 DEFINE_MTYPE_STATIC(ZEBRA, DP_NS, "DPlane NSes");
47
48 #ifndef AOK
49 # define AOK 0
50 #endif
51
52 /* Control for collection of extra interface info with route updates; a plugin
53 * can enable the extra info via a dplane api.
54 */
55 static bool dplane_collect_extra_intf_info;
56
57 /* Enable test dataplane provider */
58 /*#define DPLANE_TEST_PROVIDER 1 */
59
60 /* Default value for max queued incoming updates */
61 const uint32_t DPLANE_DEFAULT_MAX_QUEUED = 200;
62
63 /* Default value for new work per cycle */
64 const uint32_t DPLANE_DEFAULT_NEW_WORK = 100;
65
66 /* Validation check macro for context blocks */
67 /* #define DPLANE_DEBUG 1 */
68
69 #ifdef DPLANE_DEBUG
70
71 # define DPLANE_CTX_VALID(p) \
72 assert((p) != NULL)
73
74 #else
75
76 # define DPLANE_CTX_VALID(p)
77
78 #endif /* DPLANE_DEBUG */
79
80 /*
81 * Nexthop information captured for nexthop/nexthop group updates
82 */
83 struct dplane_nexthop_info {
84 uint32_t id;
85 uint32_t old_id;
86 afi_t afi;
87 vrf_id_t vrf_id;
88 int type;
89
90 struct nexthop_group ng;
91 struct nh_grp nh_grp[MULTIPATH_NUM];
92 uint8_t nh_grp_count;
93 };
94
95 /*
96 * Optional extra info about interfaces used in route updates' nexthops.
97 */
98 struct dplane_intf_extra {
99 vrf_id_t vrf_id;
100 uint32_t ifindex;
101 uint32_t flags;
102 uint32_t status;
103
104 TAILQ_ENTRY(dplane_intf_extra) link;
105 };
106
107 /*
108 * Route information captured for route updates.
109 */
110 struct dplane_route_info {
111
112 /* Dest and (optional) source prefixes */
113 struct prefix zd_dest;
114 struct prefix zd_src;
115
116 afi_t zd_afi;
117 safi_t zd_safi;
118
119 int zd_type;
120 int zd_old_type;
121
122 route_tag_t zd_tag;
123 route_tag_t zd_old_tag;
124 uint32_t zd_metric;
125 uint32_t zd_old_metric;
126
127 uint16_t zd_instance;
128 uint16_t zd_old_instance;
129
130 uint8_t zd_distance;
131 uint8_t zd_old_distance;
132
133 uint32_t zd_mtu;
134 uint32_t zd_nexthop_mtu;
135
136 /* Nexthop hash entry info */
137 struct dplane_nexthop_info nhe;
138
139 /* Nexthops */
140 uint32_t zd_nhg_id;
141 struct nexthop_group zd_ng;
142
143 /* Backup nexthops (if present) */
144 struct nexthop_group backup_ng;
145
146 /* "Previous" nexthops, used only in route updates without netlink */
147 struct nexthop_group zd_old_ng;
148 struct nexthop_group old_backup_ng;
149
150 /* Optional list of extra interface info */
151 TAILQ_HEAD(dp_intf_extra_q, dplane_intf_extra) intf_extra_q;
152 };
153
154 /*
155 * Pseudowire info for the dataplane
156 */
157 struct dplane_pw_info {
158 int type;
159 int af;
160 int status;
161 uint32_t flags;
162 uint32_t nhg_id;
163 union g_addr dest;
164 mpls_label_t local_label;
165 mpls_label_t remote_label;
166
167 /* Nexthops that are valid and installed */
168 struct nexthop_group fib_nhg;
169
170 /* Primary and backup nexthop sets, copied from the resolving route. */
171 struct nexthop_group primary_nhg;
172 struct nexthop_group backup_nhg;
173
174 union pw_protocol_fields fields;
175 };
176
177 /*
178 * Bridge port info for the dataplane
179 */
180 struct dplane_br_port_info {
181 uint32_t sph_filter_cnt;
182 struct in_addr sph_filters[ES_VTEP_MAX_CNT];
183 /* DPLANE_BR_PORT_XXX - see zebra_dplane.h*/
184 uint32_t flags;
185 uint32_t backup_nhg_id;
186 };
187
188 /*
189 * Interface/prefix info for the dataplane
190 */
191 struct dplane_intf_info {
192
193 uint32_t metric;
194 uint32_t flags;
195
196 bool protodown;
197 bool pd_reason_val;
198
199 #define DPLANE_INTF_CONNECTED (1 << 0) /* Connected peer, p2p */
200 #define DPLANE_INTF_SECONDARY (1 << 1)
201 #define DPLANE_INTF_BROADCAST (1 << 2)
202 #define DPLANE_INTF_HAS_DEST DPLANE_INTF_CONNECTED
203 #define DPLANE_INTF_HAS_LABEL (1 << 4)
204
205 /* Interface address/prefix */
206 struct prefix prefix;
207
208 /* Dest address, for p2p, or broadcast prefix */
209 struct prefix dest_prefix;
210
211 char *label;
212 char label_buf[32];
213 };
214
215 /*
216 * EVPN MAC address info for the dataplane.
217 */
218 struct dplane_mac_info {
219 vlanid_t vid;
220 ifindex_t br_ifindex;
221 struct ethaddr mac;
222 struct in_addr vtep_ip;
223 bool is_sticky;
224 uint32_t nhg_id;
225 uint32_t update_flags;
226 };
227
228 /*
229 * Neighbor info for the dataplane
230 */
231 struct dplane_neigh_info {
232 struct ipaddr ip_addr;
233 union {
234 struct ethaddr mac;
235 struct ipaddr ip_addr;
236 } link;
237 uint32_t flags;
238 uint16_t state;
239 uint32_t update_flags;
240 };
241
242 /*
243 * Neighbor Table
244 */
245 struct dplane_neigh_table {
246 uint8_t family;
247 uint32_t app_probes;
248 uint32_t ucast_probes;
249 uint32_t mcast_probes;
250 };
251
252 /*
253 * Policy based routing rule info for the dataplane
254 */
255 struct dplane_ctx_rule {
256 uint32_t priority;
257
258 /* The route table pointed by this rule */
259 uint32_t table;
260
261 /* Filter criteria */
262 uint32_t filter_bm;
263 uint32_t fwmark;
264 uint8_t dsfield;
265 struct prefix src_ip;
266 struct prefix dst_ip;
267 uint8_t ip_proto;
268
269 uint8_t action_pcp;
270 uint16_t action_vlan_id;
271 uint16_t action_vlan_flags;
272
273 uint32_t action_queue_id;
274
275 char ifname[INTERFACE_NAMSIZ + 1];
276 };
277
278 struct dplane_rule_info {
279 /*
280 * Originating zclient sock fd, so we can know who to send
281 * back to.
282 */
283 int sock;
284
285 int unique;
286 int seq;
287
288 struct dplane_ctx_rule new;
289 struct dplane_ctx_rule old;
290 };
291
292 struct dplane_gre_ctx {
293 uint32_t link_ifindex;
294 unsigned int mtu;
295 struct zebra_l2info_gre info;
296 };
297
298
299 /*
300 * Network interface configuration info - aligned with netlink's NETCONF
301 * info. The flags values are public, in the dplane.h file...
302 */
303 struct dplane_netconf_info {
304 ns_id_t ns_id;
305 ifindex_t ifindex;
306 enum dplane_netconf_status_e mpls_val;
307 enum dplane_netconf_status_e mcast_val;
308 };
309
310 /*
311 * The context block used to exchange info about route updates across
312 * the boundary between the zebra main context (and pthread) and the
313 * dataplane layer (and pthread).
314 */
315 struct zebra_dplane_ctx {
316
317 /* Operation code */
318 enum dplane_op_e zd_op;
319
320 /* Status on return */
321 enum zebra_dplane_result zd_status;
322
323 /* Dplane provider id */
324 uint32_t zd_provider;
325
326 /* Flags - used by providers, e.g. */
327 int zd_flags;
328
329 bool zd_is_update;
330
331 uint32_t zd_seq;
332 uint32_t zd_old_seq;
333
334 /* Some updates may be generated by notifications: allow the
335 * plugin to notice and ignore results from its own notifications.
336 */
337 uint32_t zd_notif_provider;
338
339 /* TODO -- internal/sub-operation status? */
340 enum zebra_dplane_result zd_remote_status;
341 enum zebra_dplane_result zd_kernel_status;
342
343 vrf_id_t zd_vrf_id;
344 uint32_t zd_table_id;
345
346 char zd_ifname[INTERFACE_NAMSIZ];
347 ifindex_t zd_ifindex;
348
349 /* Support info for different kinds of updates */
350 union {
351 struct dplane_route_info rinfo;
352 struct zebra_lsp lsp;
353 struct dplane_pw_info pw;
354 struct dplane_br_port_info br_port;
355 struct dplane_intf_info intf;
356 struct dplane_mac_info macinfo;
357 struct dplane_neigh_info neigh;
358 struct dplane_rule_info rule;
359 struct zebra_pbr_iptable iptable;
360 struct zebra_pbr_ipset ipset;
361 struct {
362 struct zebra_pbr_ipset_entry entry;
363 struct zebra_pbr_ipset_info info;
364 } ipset_entry;
365 struct dplane_neigh_table neightable;
366 struct dplane_gre_ctx gre;
367 struct dplane_netconf_info netconf;
368 } u;
369
370 /* Namespace info, used especially for netlink kernel communication */
371 struct zebra_dplane_info zd_ns_info;
372
373 /* Embedded list linkage */
374 TAILQ_ENTRY(zebra_dplane_ctx) zd_q_entries;
375 };
376
377 /* Flag that can be set by a pre-kernel provider as a signal that an update
378 * should bypass the kernel.
379 */
380 #define DPLANE_CTX_FLAG_NO_KERNEL 0x01
381
382
383 /*
384 * Registration block for one dataplane provider.
385 */
386 struct zebra_dplane_provider {
387 /* Name */
388 char dp_name[DPLANE_PROVIDER_NAMELEN + 1];
389
390 /* Priority, for ordering among providers */
391 uint8_t dp_priority;
392
393 /* Id value */
394 uint32_t dp_id;
395
396 /* Mutex */
397 pthread_mutex_t dp_mutex;
398
399 /* Plugin-provided extra data */
400 void *dp_data;
401
402 /* Flags */
403 int dp_flags;
404
405 int (*dp_start)(struct zebra_dplane_provider *prov);
406
407 int (*dp_fp)(struct zebra_dplane_provider *prov);
408
409 int (*dp_fini)(struct zebra_dplane_provider *prov, bool early_p);
410
411 _Atomic uint32_t dp_in_counter;
412 _Atomic uint32_t dp_in_queued;
413 _Atomic uint32_t dp_in_max;
414 _Atomic uint32_t dp_out_counter;
415 _Atomic uint32_t dp_out_queued;
416 _Atomic uint32_t dp_out_max;
417 _Atomic uint32_t dp_error_counter;
418
419 /* Queue of contexts inbound to the provider */
420 struct dplane_ctx_q dp_ctx_in_q;
421
422 /* Queue of completed contexts outbound from the provider back
423 * towards the dataplane module.
424 */
425 struct dplane_ctx_q dp_ctx_out_q;
426
427 /* Embedded list linkage for provider objects */
428 TAILQ_ENTRY(zebra_dplane_provider) dp_prov_link;
429 };
430
431 /* Declare types for list of zns info objects */
432 PREDECL_DLIST(zns_info_list);
433
434 struct dplane_zns_info {
435 struct zebra_dplane_info info;
436
437 /* Request data from the OS */
438 struct thread *t_request;
439
440 /* Read event */
441 struct thread *t_read;
442
443 /* List linkage */
444 struct zns_info_list_item link;
445 };
446
447 /*
448 * Globals
449 */
450 static struct zebra_dplane_globals {
451 /* Mutex to control access to dataplane components */
452 pthread_mutex_t dg_mutex;
453
454 /* Results callback registered by zebra 'core' */
455 int (*dg_results_cb)(struct dplane_ctx_q *ctxlist);
456
457 /* Sentinel for beginning of shutdown */
458 volatile bool dg_is_shutdown;
459
460 /* Sentinel for end of shutdown */
461 volatile bool dg_run;
462
463 /* Update context queue inbound to the dataplane */
464 TAILQ_HEAD(zdg_ctx_q, zebra_dplane_ctx) dg_update_ctx_q;
465
466 /* Ordered list of providers */
467 TAILQ_HEAD(zdg_prov_q, zebra_dplane_provider) dg_providers_q;
468
469 /* List of info about each zns */
470 struct zns_info_list_head dg_zns_list;
471
472 /* Counter used to assign internal ids to providers */
473 uint32_t dg_provider_id;
474
475 /* Limit number of pending, unprocessed updates */
476 _Atomic uint32_t dg_max_queued_updates;
477
478 /* Control whether system route notifications should be produced. */
479 bool dg_sys_route_notifs;
480
481 /* Limit number of new updates dequeued at once, to pace an
482 * incoming burst.
483 */
484 uint32_t dg_updates_per_cycle;
485
486 _Atomic uint32_t dg_routes_in;
487 _Atomic uint32_t dg_routes_queued;
488 _Atomic uint32_t dg_routes_queued_max;
489 _Atomic uint32_t dg_route_errors;
490 _Atomic uint32_t dg_other_errors;
491
492 _Atomic uint32_t dg_nexthops_in;
493 _Atomic uint32_t dg_nexthop_errors;
494
495 _Atomic uint32_t dg_lsps_in;
496 _Atomic uint32_t dg_lsp_errors;
497
498 _Atomic uint32_t dg_pws_in;
499 _Atomic uint32_t dg_pw_errors;
500
501 _Atomic uint32_t dg_br_port_in;
502 _Atomic uint32_t dg_br_port_errors;
503
504 _Atomic uint32_t dg_intf_addrs_in;
505 _Atomic uint32_t dg_intf_addr_errors;
506
507 _Atomic uint32_t dg_macs_in;
508 _Atomic uint32_t dg_mac_errors;
509
510 _Atomic uint32_t dg_neighs_in;
511 _Atomic uint32_t dg_neigh_errors;
512
513 _Atomic uint32_t dg_rules_in;
514 _Atomic uint32_t dg_rule_errors;
515
516 _Atomic uint32_t dg_update_yields;
517
518 _Atomic uint32_t dg_iptable_in;
519 _Atomic uint32_t dg_iptable_errors;
520
521 _Atomic uint32_t dg_ipset_in;
522 _Atomic uint32_t dg_ipset_errors;
523 _Atomic uint32_t dg_ipset_entry_in;
524 _Atomic uint32_t dg_ipset_entry_errors;
525
526 _Atomic uint32_t dg_neightable_in;
527 _Atomic uint32_t dg_neightable_errors;
528
529 _Atomic uint32_t dg_gre_set_in;
530 _Atomic uint32_t dg_gre_set_errors;
531
532 _Atomic uint32_t dg_intfs_in;
533 _Atomic uint32_t dg_intf_errors;
534
535 /* Dataplane pthread */
536 struct frr_pthread *dg_pthread;
537
538 /* Event-delivery context 'master' for the dplane */
539 struct thread_master *dg_master;
540
541 /* Event/'thread' pointer for queued updates */
542 struct thread *dg_t_update;
543
544 /* Event pointer for pending shutdown check loop */
545 struct thread *dg_t_shutdown_check;
546
547 } zdplane_info;
548
549 /* Instantiate zns list type */
550 DECLARE_DLIST(zns_info_list, struct dplane_zns_info, link);
551
552 /*
553 * Lock and unlock for interactions with the zebra 'core' pthread
554 */
555 #define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
556 #define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
557
558
559 /*
560 * Lock and unlock for individual providers
561 */
562 #define DPLANE_PROV_LOCK(p) pthread_mutex_lock(&((p)->dp_mutex))
563 #define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
564
565 /* Prototypes */
566 static void dplane_thread_loop(struct thread *event);
567 static enum zebra_dplane_result lsp_update_internal(struct zebra_lsp *lsp,
568 enum dplane_op_e op);
569 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
570 enum dplane_op_e op);
571 static enum zebra_dplane_result intf_addr_update_internal(
572 const struct interface *ifp, const struct connected *ifc,
573 enum dplane_op_e op);
574 static enum zebra_dplane_result mac_update_common(
575 enum dplane_op_e op, const struct interface *ifp,
576 const struct interface *br_ifp,
577 vlanid_t vid, const struct ethaddr *mac,
578 struct in_addr vtep_ip, bool sticky, uint32_t nhg_id,
579 uint32_t update_flags);
580 static enum zebra_dplane_result
581 neigh_update_internal(enum dplane_op_e op, const struct interface *ifp,
582 const void *link, int link_family,
583 const struct ipaddr *ip, uint32_t flags, uint16_t state,
584 uint32_t update_flags, int protocol);
585
586 /*
587 * Public APIs
588 */
589
590 /* Obtain thread_master for dataplane thread */
591 struct thread_master *dplane_get_thread_master(void)
592 {
593 return zdplane_info.dg_master;
594 }
595
596 /*
597 * Allocate a dataplane update context
598 */
599 struct zebra_dplane_ctx *dplane_ctx_alloc(void)
600 {
601 struct zebra_dplane_ctx *p;
602
603 /* TODO -- just alloc'ing memory, but would like to maintain
604 * a pool
605 */
606 p = XCALLOC(MTYPE_DP_CTX, sizeof(struct zebra_dplane_ctx));
607
608 return p;
609 }
610
611 /* Enable system route notifications */
612 void dplane_enable_sys_route_notifs(void)
613 {
614 zdplane_info.dg_sys_route_notifs = true;
615 }
616
617 /*
618 * Clean up dependent/internal allocations inside a context object
619 */
620 static void dplane_ctx_free_internal(struct zebra_dplane_ctx *ctx)
621 {
622 struct dplane_intf_extra *if_extra, *if_tmp;
623
624 /*
625 * Some internal allocations may need to be freed, depending on
626 * the type of info captured in the ctx.
627 */
628 switch (ctx->zd_op) {
629 case DPLANE_OP_ROUTE_INSTALL:
630 case DPLANE_OP_ROUTE_UPDATE:
631 case DPLANE_OP_ROUTE_DELETE:
632 case DPLANE_OP_SYS_ROUTE_ADD:
633 case DPLANE_OP_SYS_ROUTE_DELETE:
634 case DPLANE_OP_ROUTE_NOTIFY:
635
636 /* Free allocated nexthops */
637 if (ctx->u.rinfo.zd_ng.nexthop) {
638 /* This deals with recursive nexthops too */
639 nexthops_free(ctx->u.rinfo.zd_ng.nexthop);
640
641 ctx->u.rinfo.zd_ng.nexthop = NULL;
642 }
643
644 /* Free backup info also (if present) */
645 if (ctx->u.rinfo.backup_ng.nexthop) {
646 /* This deals with recursive nexthops too */
647 nexthops_free(ctx->u.rinfo.backup_ng.nexthop);
648
649 ctx->u.rinfo.backup_ng.nexthop = NULL;
650 }
651
652 if (ctx->u.rinfo.zd_old_ng.nexthop) {
653 /* This deals with recursive nexthops too */
654 nexthops_free(ctx->u.rinfo.zd_old_ng.nexthop);
655
656 ctx->u.rinfo.zd_old_ng.nexthop = NULL;
657 }
658
659 if (ctx->u.rinfo.old_backup_ng.nexthop) {
660 /* This deals with recursive nexthops too */
661 nexthops_free(ctx->u.rinfo.old_backup_ng.nexthop);
662
663 ctx->u.rinfo.old_backup_ng.nexthop = NULL;
664 }
665
666 /* Optional extra interface info */
667 TAILQ_FOREACH_SAFE(if_extra, &ctx->u.rinfo.intf_extra_q,
668 link, if_tmp) {
669 TAILQ_REMOVE(&ctx->u.rinfo.intf_extra_q, if_extra,
670 link);
671 XFREE(MTYPE_DP_INTF, if_extra);
672 }
673
674 break;
675
676 case DPLANE_OP_NH_INSTALL:
677 case DPLANE_OP_NH_UPDATE:
678 case DPLANE_OP_NH_DELETE: {
679 if (ctx->u.rinfo.nhe.ng.nexthop) {
680 /* This deals with recursive nexthops too */
681 nexthops_free(ctx->u.rinfo.nhe.ng.nexthop);
682
683 ctx->u.rinfo.nhe.ng.nexthop = NULL;
684 }
685 break;
686 }
687
688 case DPLANE_OP_LSP_INSTALL:
689 case DPLANE_OP_LSP_UPDATE:
690 case DPLANE_OP_LSP_DELETE:
691 case DPLANE_OP_LSP_NOTIFY:
692 {
693 struct zebra_nhlfe *nhlfe;
694
695 /* Unlink and free allocated NHLFEs */
696 frr_each_safe(nhlfe_list, &ctx->u.lsp.nhlfe_list, nhlfe) {
697 nhlfe_list_del(&ctx->u.lsp.nhlfe_list, nhlfe);
698 zebra_mpls_nhlfe_free(nhlfe);
699 }
700
701 /* Unlink and free allocated backup NHLFEs, if present */
702 frr_each_safe(nhlfe_list,
703 &(ctx->u.lsp.backup_nhlfe_list), nhlfe) {
704 nhlfe_list_del(&ctx->u.lsp.backup_nhlfe_list,
705 nhlfe);
706 zebra_mpls_nhlfe_free(nhlfe);
707 }
708
709 /* Clear pointers in lsp struct, in case we're caching
710 * free context structs.
711 */
712 nhlfe_list_init(&ctx->u.lsp.nhlfe_list);
713 ctx->u.lsp.best_nhlfe = NULL;
714 nhlfe_list_init(&ctx->u.lsp.backup_nhlfe_list);
715
716 break;
717 }
718
719 case DPLANE_OP_PW_INSTALL:
720 case DPLANE_OP_PW_UNINSTALL:
721 /* Free allocated nexthops */
722 if (ctx->u.pw.fib_nhg.nexthop) {
723 /* This deals with recursive nexthops too */
724 nexthops_free(ctx->u.pw.fib_nhg.nexthop);
725
726 ctx->u.pw.fib_nhg.nexthop = NULL;
727 }
728 if (ctx->u.pw.primary_nhg.nexthop) {
729 nexthops_free(ctx->u.pw.primary_nhg.nexthop);
730
731 ctx->u.pw.primary_nhg.nexthop = NULL;
732 }
733 if (ctx->u.pw.backup_nhg.nexthop) {
734 nexthops_free(ctx->u.pw.backup_nhg.nexthop);
735
736 ctx->u.pw.backup_nhg.nexthop = NULL;
737 }
738 break;
739
740 case DPLANE_OP_ADDR_INSTALL:
741 case DPLANE_OP_ADDR_UNINSTALL:
742 case DPLANE_OP_INTF_ADDR_ADD:
743 case DPLANE_OP_INTF_ADDR_DEL:
744 /* Maybe free label string, if allocated */
745 if (ctx->u.intf.label != NULL &&
746 ctx->u.intf.label != ctx->u.intf.label_buf) {
747 XFREE(MTYPE_DP_CTX, ctx->u.intf.label);
748 ctx->u.intf.label = NULL;
749 }
750 break;
751
752 case DPLANE_OP_MAC_INSTALL:
753 case DPLANE_OP_MAC_DELETE:
754 case DPLANE_OP_NEIGH_INSTALL:
755 case DPLANE_OP_NEIGH_UPDATE:
756 case DPLANE_OP_NEIGH_DELETE:
757 case DPLANE_OP_VTEP_ADD:
758 case DPLANE_OP_VTEP_DELETE:
759 case DPLANE_OP_RULE_ADD:
760 case DPLANE_OP_RULE_DELETE:
761 case DPLANE_OP_RULE_UPDATE:
762 case DPLANE_OP_NEIGH_DISCOVER:
763 case DPLANE_OP_BR_PORT_UPDATE:
764 case DPLANE_OP_NEIGH_IP_INSTALL:
765 case DPLANE_OP_NEIGH_IP_DELETE:
766 case DPLANE_OP_NONE:
767 case DPLANE_OP_IPSET_ADD:
768 case DPLANE_OP_IPSET_DELETE:
769 case DPLANE_OP_INTF_INSTALL:
770 case DPLANE_OP_INTF_UPDATE:
771 case DPLANE_OP_INTF_DELETE:
772 break;
773
774 case DPLANE_OP_IPSET_ENTRY_ADD:
775 case DPLANE_OP_IPSET_ENTRY_DELETE:
776 break;
777 case DPLANE_OP_NEIGH_TABLE_UPDATE:
778 break;
779 case DPLANE_OP_IPTABLE_ADD:
780 case DPLANE_OP_IPTABLE_DELETE:
781 if (ctx->u.iptable.interface_name_list) {
782 struct listnode *node, *nnode;
783 char *ifname;
784
785 for (ALL_LIST_ELEMENTS(
786 ctx->u.iptable.interface_name_list, node,
787 nnode, ifname)) {
788 LISTNODE_DETACH(
789 ctx->u.iptable.interface_name_list,
790 node);
791 XFREE(MTYPE_DP_NETFILTER, ifname);
792 }
793 list_delete(&ctx->u.iptable.interface_name_list);
794 }
795 break;
796 case DPLANE_OP_GRE_SET:
797 case DPLANE_OP_INTF_NETCONFIG:
798 break;
799 }
800 }
801
802 /*
803 * Free a dataplane results context.
804 */
805 static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
806 {
807 if (pctx == NULL)
808 return;
809
810 DPLANE_CTX_VALID(*pctx);
811
812 /* TODO -- just freeing memory, but would like to maintain
813 * a pool
814 */
815
816 /* Some internal allocations may need to be freed, depending on
817 * the type of info captured in the ctx.
818 */
819 dplane_ctx_free_internal(*pctx);
820
821 XFREE(MTYPE_DP_CTX, *pctx);
822 }
823
824 /*
825 * Reset an allocated context object for re-use. All internal allocations are
826 * freed and the context is memset.
827 */
828 void dplane_ctx_reset(struct zebra_dplane_ctx *ctx)
829 {
830 dplane_ctx_free_internal(ctx);
831 memset(ctx, 0, sizeof(*ctx));
832 }
833
834 /*
835 * Return a context block to the dplane module after processing
836 */
837 void dplane_ctx_fini(struct zebra_dplane_ctx **pctx)
838 {
839 /* TODO -- maintain pool; for now, just free */
840 dplane_ctx_free(pctx);
841 }
842
843 /* Enqueue a context block */
844 void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
845 const struct zebra_dplane_ctx *ctx)
846 {
847 TAILQ_INSERT_TAIL(q, (struct zebra_dplane_ctx *)ctx, zd_q_entries);
848 }
849
850 /* Append a list of context blocks to another list */
851 void dplane_ctx_list_append(struct dplane_ctx_q *to_list,
852 struct dplane_ctx_q *from_list)
853 {
854 if (TAILQ_FIRST(from_list)) {
855 TAILQ_CONCAT(to_list, from_list, zd_q_entries);
856
857 /* And clear 'from' list */
858 TAILQ_INIT(from_list);
859 }
860 }
861
862 struct zebra_dplane_ctx *dplane_ctx_get_head(struct dplane_ctx_q *q)
863 {
864 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
865
866 return ctx;
867 }
868
869 /* Dequeue a context block from the head of a list */
870 struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_q *q)
871 {
872 struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
873
874 if (ctx)
875 TAILQ_REMOVE(q, ctx, zd_q_entries);
876
877 return ctx;
878 }
879
880 /*
881 * Accessors for information from the context object
882 */
883 enum zebra_dplane_result dplane_ctx_get_status(
884 const struct zebra_dplane_ctx *ctx)
885 {
886 DPLANE_CTX_VALID(ctx);
887
888 return ctx->zd_status;
889 }
890
891 void dplane_ctx_set_status(struct zebra_dplane_ctx *ctx,
892 enum zebra_dplane_result status)
893 {
894 DPLANE_CTX_VALID(ctx);
895
896 ctx->zd_status = status;
897 }
898
899 /* Retrieve last/current provider id */
900 uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx *ctx)
901 {
902 DPLANE_CTX_VALID(ctx);
903 return ctx->zd_provider;
904 }
905
906 /* Providers run before the kernel can control whether a kernel
907 * update should be done.
908 */
909 void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx *ctx)
910 {
911 DPLANE_CTX_VALID(ctx);
912
913 SET_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
914 }
915
916 bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx *ctx)
917 {
918 DPLANE_CTX_VALID(ctx);
919
920 return CHECK_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
921 }
922
923 void dplane_ctx_set_op(struct zebra_dplane_ctx *ctx, enum dplane_op_e op)
924 {
925 DPLANE_CTX_VALID(ctx);
926 ctx->zd_op = op;
927 }
928
929 enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx)
930 {
931 DPLANE_CTX_VALID(ctx);
932
933 return ctx->zd_op;
934 }
935
936 const char *dplane_op2str(enum dplane_op_e op)
937 {
938 const char *ret = "UNKNOWN";
939
940 switch (op) {
941 case DPLANE_OP_NONE:
942 ret = "NONE";
943 break;
944
945 /* Route update */
946 case DPLANE_OP_ROUTE_INSTALL:
947 ret = "ROUTE_INSTALL";
948 break;
949 case DPLANE_OP_ROUTE_UPDATE:
950 ret = "ROUTE_UPDATE";
951 break;
952 case DPLANE_OP_ROUTE_DELETE:
953 ret = "ROUTE_DELETE";
954 break;
955 case DPLANE_OP_ROUTE_NOTIFY:
956 ret = "ROUTE_NOTIFY";
957 break;
958
959 /* Nexthop update */
960 case DPLANE_OP_NH_INSTALL:
961 ret = "NH_INSTALL";
962 break;
963 case DPLANE_OP_NH_UPDATE:
964 ret = "NH_UPDATE";
965 break;
966 case DPLANE_OP_NH_DELETE:
967 ret = "NH_DELETE";
968 break;
969
970 case DPLANE_OP_LSP_INSTALL:
971 ret = "LSP_INSTALL";
972 break;
973 case DPLANE_OP_LSP_UPDATE:
974 ret = "LSP_UPDATE";
975 break;
976 case DPLANE_OP_LSP_DELETE:
977 ret = "LSP_DELETE";
978 break;
979 case DPLANE_OP_LSP_NOTIFY:
980 ret = "LSP_NOTIFY";
981 break;
982
983 case DPLANE_OP_PW_INSTALL:
984 ret = "PW_INSTALL";
985 break;
986 case DPLANE_OP_PW_UNINSTALL:
987 ret = "PW_UNINSTALL";
988 break;
989
990 case DPLANE_OP_SYS_ROUTE_ADD:
991 ret = "SYS_ROUTE_ADD";
992 break;
993 case DPLANE_OP_SYS_ROUTE_DELETE:
994 ret = "SYS_ROUTE_DEL";
995 break;
996
997 case DPLANE_OP_BR_PORT_UPDATE:
998 ret = "BR_PORT_UPDATE";
999 break;
1000
1001 case DPLANE_OP_ADDR_INSTALL:
1002 ret = "ADDR_INSTALL";
1003 break;
1004 case DPLANE_OP_ADDR_UNINSTALL:
1005 ret = "ADDR_UNINSTALL";
1006 break;
1007
1008 case DPLANE_OP_MAC_INSTALL:
1009 ret = "MAC_INSTALL";
1010 break;
1011 case DPLANE_OP_MAC_DELETE:
1012 ret = "MAC_DELETE";
1013 break;
1014
1015 case DPLANE_OP_NEIGH_INSTALL:
1016 ret = "NEIGH_INSTALL";
1017 break;
1018 case DPLANE_OP_NEIGH_UPDATE:
1019 ret = "NEIGH_UPDATE";
1020 break;
1021 case DPLANE_OP_NEIGH_DELETE:
1022 ret = "NEIGH_DELETE";
1023 break;
1024 case DPLANE_OP_VTEP_ADD:
1025 ret = "VTEP_ADD";
1026 break;
1027 case DPLANE_OP_VTEP_DELETE:
1028 ret = "VTEP_DELETE";
1029 break;
1030
1031 case DPLANE_OP_RULE_ADD:
1032 ret = "RULE_ADD";
1033 break;
1034 case DPLANE_OP_RULE_DELETE:
1035 ret = "RULE_DELETE";
1036 break;
1037 case DPLANE_OP_RULE_UPDATE:
1038 ret = "RULE_UPDATE";
1039 break;
1040
1041 case DPLANE_OP_NEIGH_DISCOVER:
1042 ret = "NEIGH_DISCOVER";
1043 break;
1044
1045 case DPLANE_OP_IPTABLE_ADD:
1046 ret = "IPTABLE_ADD";
1047 break;
1048 case DPLANE_OP_IPTABLE_DELETE:
1049 ret = "IPTABLE_DELETE";
1050 break;
1051 case DPLANE_OP_IPSET_ADD:
1052 ret = "IPSET_ADD";
1053 break;
1054 case DPLANE_OP_IPSET_DELETE:
1055 ret = "IPSET_DELETE";
1056 break;
1057 case DPLANE_OP_IPSET_ENTRY_ADD:
1058 ret = "IPSET_ENTRY_ADD";
1059 break;
1060 case DPLANE_OP_IPSET_ENTRY_DELETE:
1061 ret = "IPSET_ENTRY_DELETE";
1062 break;
1063 case DPLANE_OP_NEIGH_IP_INSTALL:
1064 ret = "NEIGH_IP_INSTALL";
1065 break;
1066 case DPLANE_OP_NEIGH_IP_DELETE:
1067 ret = "NEIGH_IP_DELETE";
1068 break;
1069 case DPLANE_OP_NEIGH_TABLE_UPDATE:
1070 ret = "NEIGH_TABLE_UPDATE";
1071 break;
1072
1073 case DPLANE_OP_GRE_SET:
1074 ret = "GRE_SET";
1075 break;
1076
1077 case DPLANE_OP_INTF_ADDR_ADD:
1078 return "INTF_ADDR_ADD";
1079
1080 case DPLANE_OP_INTF_ADDR_DEL:
1081 return "INTF_ADDR_DEL";
1082
1083 case DPLANE_OP_INTF_NETCONFIG:
1084 return "INTF_NETCONFIG";
1085
1086 case DPLANE_OP_INTF_INSTALL:
1087 ret = "INTF_INSTALL";
1088 break;
1089 case DPLANE_OP_INTF_UPDATE:
1090 ret = "INTF_UPDATE";
1091 break;
1092 case DPLANE_OP_INTF_DELETE:
1093 ret = "INTF_DELETE";
1094 break;
1095 }
1096
1097 return ret;
1098 }
1099
1100 const char *dplane_res2str(enum zebra_dplane_result res)
1101 {
1102 const char *ret = "<Unknown>";
1103
1104 switch (res) {
1105 case ZEBRA_DPLANE_REQUEST_FAILURE:
1106 ret = "FAILURE";
1107 break;
1108 case ZEBRA_DPLANE_REQUEST_QUEUED:
1109 ret = "QUEUED";
1110 break;
1111 case ZEBRA_DPLANE_REQUEST_SUCCESS:
1112 ret = "SUCCESS";
1113 break;
1114 }
1115
1116 return ret;
1117 }
1118
1119 void dplane_ctx_set_dest(struct zebra_dplane_ctx *ctx,
1120 const struct prefix *dest)
1121 {
1122 DPLANE_CTX_VALID(ctx);
1123
1124 prefix_copy(&(ctx->u.rinfo.zd_dest), dest);
1125 }
1126
1127 const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
1128 {
1129 DPLANE_CTX_VALID(ctx);
1130
1131 return &(ctx->u.rinfo.zd_dest);
1132 }
1133
1134 void dplane_ctx_set_src(struct zebra_dplane_ctx *ctx, const struct prefix *src)
1135 {
1136 DPLANE_CTX_VALID(ctx);
1137
1138 if (src)
1139 prefix_copy(&(ctx->u.rinfo.zd_src), src);
1140 else
1141 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(struct prefix));
1142 }
1143
1144 /* Source prefix is a little special - return NULL for "no src prefix" */
1145 const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx)
1146 {
1147 DPLANE_CTX_VALID(ctx);
1148
1149 if (ctx->u.rinfo.zd_src.prefixlen == 0 &&
1150 IN6_IS_ADDR_UNSPECIFIED(&(ctx->u.rinfo.zd_src.u.prefix6))) {
1151 return NULL;
1152 } else {
1153 return &(ctx->u.rinfo.zd_src);
1154 }
1155 }
1156
1157 bool dplane_ctx_is_update(const struct zebra_dplane_ctx *ctx)
1158 {
1159 DPLANE_CTX_VALID(ctx);
1160
1161 return ctx->zd_is_update;
1162 }
1163
1164 uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx *ctx)
1165 {
1166 DPLANE_CTX_VALID(ctx);
1167
1168 return ctx->zd_seq;
1169 }
1170
1171 uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx)
1172 {
1173 DPLANE_CTX_VALID(ctx);
1174
1175 return ctx->zd_old_seq;
1176 }
1177
1178 void dplane_ctx_set_vrf(struct zebra_dplane_ctx *ctx, vrf_id_t vrf)
1179 {
1180 DPLANE_CTX_VALID(ctx);
1181
1182 ctx->zd_vrf_id = vrf;
1183 }
1184
1185 vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx)
1186 {
1187 DPLANE_CTX_VALID(ctx);
1188
1189 return ctx->zd_vrf_id;
1190 }
1191
1192 /* In some paths we have only a namespace id */
1193 void dplane_ctx_set_ns_id(struct zebra_dplane_ctx *ctx, ns_id_t nsid)
1194 {
1195 DPLANE_CTX_VALID(ctx);
1196
1197 ctx->zd_ns_info.ns_id = nsid;
1198 }
1199
1200 ns_id_t dplane_ctx_get_ns_id(const struct zebra_dplane_ctx *ctx)
1201 {
1202 DPLANE_CTX_VALID(ctx);
1203
1204 return ctx->zd_ns_info.ns_id;
1205 }
1206
1207 bool dplane_ctx_is_from_notif(const struct zebra_dplane_ctx *ctx)
1208 {
1209 DPLANE_CTX_VALID(ctx);
1210
1211 return (ctx->zd_notif_provider != 0);
1212 }
1213
1214 uint32_t dplane_ctx_get_notif_provider(const struct zebra_dplane_ctx *ctx)
1215 {
1216 DPLANE_CTX_VALID(ctx);
1217
1218 return ctx->zd_notif_provider;
1219 }
1220
1221 void dplane_ctx_set_notif_provider(struct zebra_dplane_ctx *ctx,
1222 uint32_t id)
1223 {
1224 DPLANE_CTX_VALID(ctx);
1225
1226 ctx->zd_notif_provider = id;
1227 }
1228
1229 const char *dplane_ctx_get_ifname(const struct zebra_dplane_ctx *ctx)
1230 {
1231 DPLANE_CTX_VALID(ctx);
1232
1233 return ctx->zd_ifname;
1234 }
1235
1236 void dplane_ctx_set_ifname(struct zebra_dplane_ctx *ctx, const char *ifname)
1237 {
1238 DPLANE_CTX_VALID(ctx);
1239
1240 if (!ifname)
1241 return;
1242
1243 strlcpy(ctx->zd_ifname, ifname, sizeof(ctx->zd_ifname));
1244 }
1245
1246 ifindex_t dplane_ctx_get_ifindex(const struct zebra_dplane_ctx *ctx)
1247 {
1248 DPLANE_CTX_VALID(ctx);
1249
1250 return ctx->zd_ifindex;
1251 }
1252
1253 void dplane_ctx_set_ifindex(struct zebra_dplane_ctx *ctx, ifindex_t ifindex)
1254 {
1255 DPLANE_CTX_VALID(ctx);
1256
1257 ctx->zd_ifindex = ifindex;
1258 }
1259
1260 void dplane_ctx_set_type(struct zebra_dplane_ctx *ctx, int type)
1261 {
1262 DPLANE_CTX_VALID(ctx);
1263
1264 ctx->u.rinfo.zd_type = type;
1265 }
1266
1267 int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx)
1268 {
1269 DPLANE_CTX_VALID(ctx);
1270
1271 return ctx->u.rinfo.zd_type;
1272 }
1273
1274 int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx)
1275 {
1276 DPLANE_CTX_VALID(ctx);
1277
1278 return ctx->u.rinfo.zd_old_type;
1279 }
1280
1281 void dplane_ctx_set_afi(struct zebra_dplane_ctx *ctx, afi_t afi)
1282 {
1283 DPLANE_CTX_VALID(ctx);
1284
1285 ctx->u.rinfo.zd_afi = afi;
1286 }
1287
1288 afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
1289 {
1290 DPLANE_CTX_VALID(ctx);
1291
1292 return ctx->u.rinfo.zd_afi;
1293 }
1294
1295 void dplane_ctx_set_safi(struct zebra_dplane_ctx *ctx, safi_t safi)
1296 {
1297 DPLANE_CTX_VALID(ctx);
1298
1299 ctx->u.rinfo.zd_safi = safi;
1300 }
1301
1302 safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
1303 {
1304 DPLANE_CTX_VALID(ctx);
1305
1306 return ctx->u.rinfo.zd_safi;
1307 }
1308
1309 void dplane_ctx_set_table(struct zebra_dplane_ctx *ctx, uint32_t table)
1310 {
1311 DPLANE_CTX_VALID(ctx);
1312
1313 ctx->zd_table_id = table;
1314 }
1315
1316 uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx)
1317 {
1318 DPLANE_CTX_VALID(ctx);
1319
1320 return ctx->zd_table_id;
1321 }
1322
1323 route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx)
1324 {
1325 DPLANE_CTX_VALID(ctx);
1326
1327 return ctx->u.rinfo.zd_tag;
1328 }
1329
1330 void dplane_ctx_set_tag(struct zebra_dplane_ctx *ctx, route_tag_t tag)
1331 {
1332 DPLANE_CTX_VALID(ctx);
1333
1334 ctx->u.rinfo.zd_tag = tag;
1335 }
1336
1337 route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx)
1338 {
1339 DPLANE_CTX_VALID(ctx);
1340
1341 return ctx->u.rinfo.zd_old_tag;
1342 }
1343
1344 uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx)
1345 {
1346 DPLANE_CTX_VALID(ctx);
1347
1348 return ctx->u.rinfo.zd_instance;
1349 }
1350
1351 void dplane_ctx_set_instance(struct zebra_dplane_ctx *ctx, uint16_t instance)
1352 {
1353 DPLANE_CTX_VALID(ctx);
1354
1355 ctx->u.rinfo.zd_instance = instance;
1356 }
1357
1358 uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
1359 {
1360 DPLANE_CTX_VALID(ctx);
1361
1362 return ctx->u.rinfo.zd_old_instance;
1363 }
1364
1365 uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx)
1366 {
1367 DPLANE_CTX_VALID(ctx);
1368
1369 return ctx->u.rinfo.zd_metric;
1370 }
1371
1372 uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx)
1373 {
1374 DPLANE_CTX_VALID(ctx);
1375
1376 return ctx->u.rinfo.zd_old_metric;
1377 }
1378
1379 uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx)
1380 {
1381 DPLANE_CTX_VALID(ctx);
1382
1383 return ctx->u.rinfo.zd_mtu;
1384 }
1385
1386 uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx)
1387 {
1388 DPLANE_CTX_VALID(ctx);
1389
1390 return ctx->u.rinfo.zd_nexthop_mtu;
1391 }
1392
1393 uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx)
1394 {
1395 DPLANE_CTX_VALID(ctx);
1396
1397 return ctx->u.rinfo.zd_distance;
1398 }
1399
1400 void dplane_ctx_set_distance(struct zebra_dplane_ctx *ctx, uint8_t distance)
1401 {
1402 DPLANE_CTX_VALID(ctx);
1403
1404 ctx->u.rinfo.zd_distance = distance;
1405 }
1406
1407 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
1408 {
1409 DPLANE_CTX_VALID(ctx);
1410
1411 return ctx->u.rinfo.zd_old_distance;
1412 }
1413
1414 /*
1415 * Set the nexthops associated with a context: note that processing code
1416 * may well expect that nexthops are in canonical (sorted) order, so we
1417 * will enforce that here.
1418 */
1419 void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh)
1420 {
1421 DPLANE_CTX_VALID(ctx);
1422
1423 if (ctx->u.rinfo.zd_ng.nexthop) {
1424 nexthops_free(ctx->u.rinfo.zd_ng.nexthop);
1425 ctx->u.rinfo.zd_ng.nexthop = NULL;
1426 }
1427 nexthop_group_copy_nh_sorted(&(ctx->u.rinfo.zd_ng), nh);
1428 }
1429
1430 /*
1431 * Set the list of backup nexthops; their ordering is preserved (they're not
1432 * re-sorted.)
1433 */
1434 void dplane_ctx_set_backup_nhg(struct zebra_dplane_ctx *ctx,
1435 const struct nexthop_group *nhg)
1436 {
1437 struct nexthop *nh, *last_nh, *nexthop;
1438
1439 DPLANE_CTX_VALID(ctx);
1440
1441 if (ctx->u.rinfo.backup_ng.nexthop) {
1442 nexthops_free(ctx->u.rinfo.backup_ng.nexthop);
1443 ctx->u.rinfo.backup_ng.nexthop = NULL;
1444 }
1445
1446 last_nh = NULL;
1447
1448 /* Be careful to preserve the order of the backup list */
1449 for (nh = nhg->nexthop; nh; nh = nh->next) {
1450 nexthop = nexthop_dup(nh, NULL);
1451
1452 if (last_nh)
1453 NEXTHOP_APPEND(last_nh, nexthop);
1454 else
1455 ctx->u.rinfo.backup_ng.nexthop = nexthop;
1456
1457 last_nh = nexthop;
1458 }
1459 }
1460
1461 uint32_t dplane_ctx_get_nhg_id(const struct zebra_dplane_ctx *ctx)
1462 {
1463 DPLANE_CTX_VALID(ctx);
1464 return ctx->u.rinfo.zd_nhg_id;
1465 }
1466
1467 const struct nexthop_group *dplane_ctx_get_ng(
1468 const struct zebra_dplane_ctx *ctx)
1469 {
1470 DPLANE_CTX_VALID(ctx);
1471
1472 return &(ctx->u.rinfo.zd_ng);
1473 }
1474
1475 const struct nexthop_group *
1476 dplane_ctx_get_backup_ng(const struct zebra_dplane_ctx *ctx)
1477 {
1478 DPLANE_CTX_VALID(ctx);
1479
1480 return &(ctx->u.rinfo.backup_ng);
1481 }
1482
1483 const struct nexthop_group *
1484 dplane_ctx_get_old_ng(const struct zebra_dplane_ctx *ctx)
1485 {
1486 DPLANE_CTX_VALID(ctx);
1487
1488 return &(ctx->u.rinfo.zd_old_ng);
1489 }
1490
1491 const struct nexthop_group *
1492 dplane_ctx_get_old_backup_ng(const struct zebra_dplane_ctx *ctx)
1493 {
1494 DPLANE_CTX_VALID(ctx);
1495
1496 return &(ctx->u.rinfo.old_backup_ng);
1497 }
1498
1499 const struct zebra_dplane_info *dplane_ctx_get_ns(
1500 const struct zebra_dplane_ctx *ctx)
1501 {
1502 DPLANE_CTX_VALID(ctx);
1503
1504 return &(ctx->zd_ns_info);
1505 }
1506
1507 int dplane_ctx_get_ns_sock(const struct zebra_dplane_ctx *ctx)
1508 {
1509 DPLANE_CTX_VALID(ctx);
1510
1511 #ifdef HAVE_NETLINK
1512 return ctx->zd_ns_info.sock;
1513 #else
1514 return -1;
1515 #endif
1516 }
1517
1518 /* Accessors for nexthop information */
1519 uint32_t dplane_ctx_get_nhe_id(const struct zebra_dplane_ctx *ctx)
1520 {
1521 DPLANE_CTX_VALID(ctx);
1522 return ctx->u.rinfo.nhe.id;
1523 }
1524
1525 uint32_t dplane_ctx_get_old_nhe_id(const struct zebra_dplane_ctx *ctx)
1526 {
1527 DPLANE_CTX_VALID(ctx);
1528 return ctx->u.rinfo.nhe.old_id;
1529 }
1530
1531 afi_t dplane_ctx_get_nhe_afi(const struct zebra_dplane_ctx *ctx)
1532 {
1533 DPLANE_CTX_VALID(ctx);
1534 return ctx->u.rinfo.nhe.afi;
1535 }
1536
1537 vrf_id_t dplane_ctx_get_nhe_vrf_id(const struct zebra_dplane_ctx *ctx)
1538 {
1539 DPLANE_CTX_VALID(ctx);
1540 return ctx->u.rinfo.nhe.vrf_id;
1541 }
1542
1543 int dplane_ctx_get_nhe_type(const struct zebra_dplane_ctx *ctx)
1544 {
1545 DPLANE_CTX_VALID(ctx);
1546 return ctx->u.rinfo.nhe.type;
1547 }
1548
1549 const struct nexthop_group *
1550 dplane_ctx_get_nhe_ng(const struct zebra_dplane_ctx *ctx)
1551 {
1552 DPLANE_CTX_VALID(ctx);
1553 return &(ctx->u.rinfo.nhe.ng);
1554 }
1555
1556 const struct nh_grp *
1557 dplane_ctx_get_nhe_nh_grp(const struct zebra_dplane_ctx *ctx)
1558 {
1559 DPLANE_CTX_VALID(ctx);
1560 return ctx->u.rinfo.nhe.nh_grp;
1561 }
1562
1563 uint8_t dplane_ctx_get_nhe_nh_grp_count(const struct zebra_dplane_ctx *ctx)
1564 {
1565 DPLANE_CTX_VALID(ctx);
1566 return ctx->u.rinfo.nhe.nh_grp_count;
1567 }
1568
1569 /* Accessors for LSP information */
1570
1571 mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx)
1572 {
1573 DPLANE_CTX_VALID(ctx);
1574
1575 return ctx->u.lsp.ile.in_label;
1576 }
1577
1578 void dplane_ctx_set_in_label(struct zebra_dplane_ctx *ctx, mpls_label_t label)
1579 {
1580 DPLANE_CTX_VALID(ctx);
1581
1582 ctx->u.lsp.ile.in_label = label;
1583 }
1584
1585 uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx *ctx)
1586 {
1587 DPLANE_CTX_VALID(ctx);
1588
1589 return ctx->u.lsp.addr_family;
1590 }
1591
1592 void dplane_ctx_set_addr_family(struct zebra_dplane_ctx *ctx,
1593 uint8_t family)
1594 {
1595 DPLANE_CTX_VALID(ctx);
1596
1597 ctx->u.lsp.addr_family = family;
1598 }
1599
1600 uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx)
1601 {
1602 DPLANE_CTX_VALID(ctx);
1603
1604 return ctx->u.lsp.flags;
1605 }
1606
1607 void dplane_ctx_set_lsp_flags(struct zebra_dplane_ctx *ctx,
1608 uint32_t flags)
1609 {
1610 DPLANE_CTX_VALID(ctx);
1611
1612 ctx->u.lsp.flags = flags;
1613 }
1614
1615 const struct nhlfe_list_head *dplane_ctx_get_nhlfe_list(
1616 const struct zebra_dplane_ctx *ctx)
1617 {
1618 DPLANE_CTX_VALID(ctx);
1619 return &(ctx->u.lsp.nhlfe_list);
1620 }
1621
1622 const struct nhlfe_list_head *dplane_ctx_get_backup_nhlfe_list(
1623 const struct zebra_dplane_ctx *ctx)
1624 {
1625 DPLANE_CTX_VALID(ctx);
1626 return &(ctx->u.lsp.backup_nhlfe_list);
1627 }
1628
1629 struct zebra_nhlfe *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
1630 enum lsp_types_t lsp_type,
1631 enum nexthop_types_t nh_type,
1632 const union g_addr *gate,
1633 ifindex_t ifindex, uint8_t num_labels,
1634 mpls_label_t *out_labels)
1635 {
1636 struct zebra_nhlfe *nhlfe;
1637
1638 DPLANE_CTX_VALID(ctx);
1639
1640 nhlfe = zebra_mpls_lsp_add_nhlfe(&(ctx->u.lsp),
1641 lsp_type, nh_type, gate,
1642 ifindex, num_labels, out_labels);
1643
1644 return nhlfe;
1645 }
1646
1647 struct zebra_nhlfe *dplane_ctx_add_backup_nhlfe(
1648 struct zebra_dplane_ctx *ctx, enum lsp_types_t lsp_type,
1649 enum nexthop_types_t nh_type, const union g_addr *gate,
1650 ifindex_t ifindex, uint8_t num_labels, mpls_label_t *out_labels)
1651 {
1652 struct zebra_nhlfe *nhlfe;
1653
1654 DPLANE_CTX_VALID(ctx);
1655
1656 nhlfe = zebra_mpls_lsp_add_backup_nhlfe(&(ctx->u.lsp),
1657 lsp_type, nh_type, gate,
1658 ifindex, num_labels,
1659 out_labels);
1660
1661 return nhlfe;
1662 }
1663
1664 const struct zebra_nhlfe *
1665 dplane_ctx_get_best_nhlfe(const struct zebra_dplane_ctx *ctx)
1666 {
1667 DPLANE_CTX_VALID(ctx);
1668
1669 return ctx->u.lsp.best_nhlfe;
1670 }
1671
1672 const struct zebra_nhlfe *
1673 dplane_ctx_set_best_nhlfe(struct zebra_dplane_ctx *ctx,
1674 struct zebra_nhlfe *nhlfe)
1675 {
1676 DPLANE_CTX_VALID(ctx);
1677
1678 ctx->u.lsp.best_nhlfe = nhlfe;
1679 return ctx->u.lsp.best_nhlfe;
1680 }
1681
1682 uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx *ctx)
1683 {
1684 DPLANE_CTX_VALID(ctx);
1685
1686 return ctx->u.lsp.num_ecmp;
1687 }
1688
1689 mpls_label_t dplane_ctx_get_pw_local_label(const struct zebra_dplane_ctx *ctx)
1690 {
1691 DPLANE_CTX_VALID(ctx);
1692
1693 return ctx->u.pw.local_label;
1694 }
1695
1696 mpls_label_t dplane_ctx_get_pw_remote_label(const struct zebra_dplane_ctx *ctx)
1697 {
1698 DPLANE_CTX_VALID(ctx);
1699
1700 return ctx->u.pw.remote_label;
1701 }
1702
1703 int dplane_ctx_get_pw_type(const struct zebra_dplane_ctx *ctx)
1704 {
1705 DPLANE_CTX_VALID(ctx);
1706
1707 return ctx->u.pw.type;
1708 }
1709
1710 int dplane_ctx_get_pw_af(const struct zebra_dplane_ctx *ctx)
1711 {
1712 DPLANE_CTX_VALID(ctx);
1713
1714 return ctx->u.pw.af;
1715 }
1716
1717 uint32_t dplane_ctx_get_pw_flags(const struct zebra_dplane_ctx *ctx)
1718 {
1719 DPLANE_CTX_VALID(ctx);
1720
1721 return ctx->u.pw.flags;
1722 }
1723
1724 int dplane_ctx_get_pw_status(const struct zebra_dplane_ctx *ctx)
1725 {
1726 DPLANE_CTX_VALID(ctx);
1727
1728 return ctx->u.pw.status;
1729 }
1730
1731 void dplane_ctx_set_pw_status(struct zebra_dplane_ctx *ctx, int status)
1732 {
1733 DPLANE_CTX_VALID(ctx);
1734
1735 ctx->u.pw.status = status;
1736 }
1737
1738 const union g_addr *dplane_ctx_get_pw_dest(
1739 const struct zebra_dplane_ctx *ctx)
1740 {
1741 DPLANE_CTX_VALID(ctx);
1742
1743 return &(ctx->u.pw.dest);
1744 }
1745
1746 const union pw_protocol_fields *dplane_ctx_get_pw_proto(
1747 const struct zebra_dplane_ctx *ctx)
1748 {
1749 DPLANE_CTX_VALID(ctx);
1750
1751 return &(ctx->u.pw.fields);
1752 }
1753
1754 const struct nexthop_group *
1755 dplane_ctx_get_pw_nhg(const struct zebra_dplane_ctx *ctx)
1756 {
1757 DPLANE_CTX_VALID(ctx);
1758
1759 return &(ctx->u.pw.fib_nhg);
1760 }
1761
1762 const struct nexthop_group *
1763 dplane_ctx_get_pw_primary_nhg(const struct zebra_dplane_ctx *ctx)
1764 {
1765 DPLANE_CTX_VALID(ctx);
1766
1767 return &(ctx->u.pw.primary_nhg);
1768 }
1769
1770 const struct nexthop_group *
1771 dplane_ctx_get_pw_backup_nhg(const struct zebra_dplane_ctx *ctx)
1772 {
1773 DPLANE_CTX_VALID(ctx);
1774
1775 return &(ctx->u.pw.backup_nhg);
1776 }
1777
1778 /* Accessors for interface information */
1779 uint32_t dplane_ctx_get_intf_metric(const struct zebra_dplane_ctx *ctx)
1780 {
1781 DPLANE_CTX_VALID(ctx);
1782
1783 return ctx->u.intf.metric;
1784 }
1785
1786 void dplane_ctx_set_intf_metric(struct zebra_dplane_ctx *ctx, uint32_t metric)
1787 {
1788 DPLANE_CTX_VALID(ctx);
1789
1790 ctx->u.intf.metric = metric;
1791 }
1792
1793 uint32_t dplane_ctx_get_intf_pd_reason_val(const struct zebra_dplane_ctx *ctx)
1794 {
1795 DPLANE_CTX_VALID(ctx);
1796
1797 return ctx->u.intf.pd_reason_val;
1798 }
1799
1800 void dplane_ctx_set_intf_pd_reason_val(struct zebra_dplane_ctx *ctx, bool val)
1801 {
1802 DPLANE_CTX_VALID(ctx);
1803
1804 ctx->u.intf.pd_reason_val = val;
1805 }
1806
1807 bool dplane_ctx_intf_is_protodown(const struct zebra_dplane_ctx *ctx)
1808 {
1809 DPLANE_CTX_VALID(ctx);
1810
1811 return ctx->u.intf.protodown;
1812 }
1813
1814 /* Is interface addr p2p? */
1815 bool dplane_ctx_intf_is_connected(const struct zebra_dplane_ctx *ctx)
1816 {
1817 DPLANE_CTX_VALID(ctx);
1818
1819 return (ctx->u.intf.flags & DPLANE_INTF_CONNECTED);
1820 }
1821
1822 bool dplane_ctx_intf_is_secondary(const struct zebra_dplane_ctx *ctx)
1823 {
1824 DPLANE_CTX_VALID(ctx);
1825
1826 return (ctx->u.intf.flags & DPLANE_INTF_SECONDARY);
1827 }
1828
1829 bool dplane_ctx_intf_is_broadcast(const struct zebra_dplane_ctx *ctx)
1830 {
1831 DPLANE_CTX_VALID(ctx);
1832
1833 return (ctx->u.intf.flags & DPLANE_INTF_BROADCAST);
1834 }
1835
1836 void dplane_ctx_intf_set_connected(struct zebra_dplane_ctx *ctx)
1837 {
1838 DPLANE_CTX_VALID(ctx);
1839
1840 ctx->u.intf.flags |= DPLANE_INTF_CONNECTED;
1841 }
1842
1843 void dplane_ctx_intf_set_secondary(struct zebra_dplane_ctx *ctx)
1844 {
1845 DPLANE_CTX_VALID(ctx);
1846
1847 ctx->u.intf.flags |= DPLANE_INTF_SECONDARY;
1848 }
1849
1850 void dplane_ctx_intf_set_broadcast(struct zebra_dplane_ctx *ctx)
1851 {
1852 DPLANE_CTX_VALID(ctx);
1853
1854 ctx->u.intf.flags |= DPLANE_INTF_BROADCAST;
1855 }
1856
1857 const struct prefix *dplane_ctx_get_intf_addr(
1858 const struct zebra_dplane_ctx *ctx)
1859 {
1860 DPLANE_CTX_VALID(ctx);
1861
1862 return &(ctx->u.intf.prefix);
1863 }
1864
1865 void dplane_ctx_set_intf_addr(struct zebra_dplane_ctx *ctx,
1866 const struct prefix *p)
1867 {
1868 DPLANE_CTX_VALID(ctx);
1869
1870 prefix_copy(&(ctx->u.intf.prefix), p);
1871 }
1872
1873 bool dplane_ctx_intf_has_dest(const struct zebra_dplane_ctx *ctx)
1874 {
1875 DPLANE_CTX_VALID(ctx);
1876
1877 return (ctx->u.intf.flags & DPLANE_INTF_HAS_DEST);
1878 }
1879
1880 const struct prefix *dplane_ctx_get_intf_dest(
1881 const struct zebra_dplane_ctx *ctx)
1882 {
1883 DPLANE_CTX_VALID(ctx);
1884
1885 return &(ctx->u.intf.dest_prefix);
1886 }
1887
1888 void dplane_ctx_set_intf_dest(struct zebra_dplane_ctx *ctx,
1889 const struct prefix *p)
1890 {
1891 DPLANE_CTX_VALID(ctx);
1892
1893 prefix_copy(&(ctx->u.intf.dest_prefix), p);
1894 }
1895
1896 bool dplane_ctx_intf_has_label(const struct zebra_dplane_ctx *ctx)
1897 {
1898 DPLANE_CTX_VALID(ctx);
1899
1900 return (ctx->u.intf.flags & DPLANE_INTF_HAS_LABEL);
1901 }
1902
1903 const char *dplane_ctx_get_intf_label(const struct zebra_dplane_ctx *ctx)
1904 {
1905 DPLANE_CTX_VALID(ctx);
1906
1907 return ctx->u.intf.label;
1908 }
1909
1910 void dplane_ctx_set_intf_label(struct zebra_dplane_ctx *ctx, const char *label)
1911 {
1912 size_t len;
1913
1914 DPLANE_CTX_VALID(ctx);
1915
1916 if (ctx->u.intf.label && ctx->u.intf.label != ctx->u.intf.label_buf)
1917 XFREE(MTYPE_DP_CTX, ctx->u.intf.label);
1918
1919 ctx->u.intf.label = NULL;
1920
1921 if (label) {
1922 ctx->u.intf.flags |= DPLANE_INTF_HAS_LABEL;
1923
1924 /* Use embedded buffer if it's adequate; else allocate. */
1925 len = strlen(label);
1926
1927 if (len < sizeof(ctx->u.intf.label_buf)) {
1928 strlcpy(ctx->u.intf.label_buf, label,
1929 sizeof(ctx->u.intf.label_buf));
1930 ctx->u.intf.label = ctx->u.intf.label_buf;
1931 } else {
1932 ctx->u.intf.label = XSTRDUP(MTYPE_DP_CTX, label);
1933 }
1934 } else {
1935 ctx->u.intf.flags &= ~DPLANE_INTF_HAS_LABEL;
1936 }
1937 }
1938
1939 /* Accessors for MAC information */
1940 vlanid_t dplane_ctx_mac_get_vlan(const struct zebra_dplane_ctx *ctx)
1941 {
1942 DPLANE_CTX_VALID(ctx);
1943 return ctx->u.macinfo.vid;
1944 }
1945
1946 bool dplane_ctx_mac_is_sticky(const struct zebra_dplane_ctx *ctx)
1947 {
1948 DPLANE_CTX_VALID(ctx);
1949 return ctx->u.macinfo.is_sticky;
1950 }
1951
1952 uint32_t dplane_ctx_mac_get_nhg_id(const struct zebra_dplane_ctx *ctx)
1953 {
1954 DPLANE_CTX_VALID(ctx);
1955 return ctx->u.macinfo.nhg_id;
1956 }
1957
1958 uint32_t dplane_ctx_mac_get_update_flags(const struct zebra_dplane_ctx *ctx)
1959 {
1960 DPLANE_CTX_VALID(ctx);
1961 return ctx->u.macinfo.update_flags;
1962 }
1963
1964 const struct ethaddr *dplane_ctx_mac_get_addr(
1965 const struct zebra_dplane_ctx *ctx)
1966 {
1967 DPLANE_CTX_VALID(ctx);
1968 return &(ctx->u.macinfo.mac);
1969 }
1970
1971 const struct in_addr *dplane_ctx_mac_get_vtep_ip(
1972 const struct zebra_dplane_ctx *ctx)
1973 {
1974 DPLANE_CTX_VALID(ctx);
1975 return &(ctx->u.macinfo.vtep_ip);
1976 }
1977
1978 ifindex_t dplane_ctx_mac_get_br_ifindex(const struct zebra_dplane_ctx *ctx)
1979 {
1980 DPLANE_CTX_VALID(ctx);
1981 return ctx->u.macinfo.br_ifindex;
1982 }
1983
1984 /* Accessors for neighbor information */
1985 const struct ipaddr *dplane_ctx_neigh_get_ipaddr(
1986 const struct zebra_dplane_ctx *ctx)
1987 {
1988 DPLANE_CTX_VALID(ctx);
1989 return &(ctx->u.neigh.ip_addr);
1990 }
1991
1992 const struct ipaddr *
1993 dplane_ctx_neigh_get_link_ip(const struct zebra_dplane_ctx *ctx)
1994 {
1995 DPLANE_CTX_VALID(ctx);
1996 return &(ctx->u.neigh.link.ip_addr);
1997 }
1998
1999 const struct ethaddr *dplane_ctx_neigh_get_mac(
2000 const struct zebra_dplane_ctx *ctx)
2001 {
2002 DPLANE_CTX_VALID(ctx);
2003 return &(ctx->u.neigh.link.mac);
2004 }
2005
2006 uint32_t dplane_ctx_neigh_get_flags(const struct zebra_dplane_ctx *ctx)
2007 {
2008 DPLANE_CTX_VALID(ctx);
2009 return ctx->u.neigh.flags;
2010 }
2011
2012 uint16_t dplane_ctx_neigh_get_state(const struct zebra_dplane_ctx *ctx)
2013 {
2014 DPLANE_CTX_VALID(ctx);
2015 return ctx->u.neigh.state;
2016 }
2017
2018 uint32_t dplane_ctx_neigh_get_update_flags(const struct zebra_dplane_ctx *ctx)
2019 {
2020 DPLANE_CTX_VALID(ctx);
2021 return ctx->u.neigh.update_flags;
2022 }
2023
2024 /* Accessor for GRE set */
2025 uint32_t
2026 dplane_ctx_gre_get_link_ifindex(const struct zebra_dplane_ctx *ctx)
2027 {
2028 DPLANE_CTX_VALID(ctx);
2029
2030 return ctx->u.gre.link_ifindex;
2031 }
2032
2033 unsigned int
2034 dplane_ctx_gre_get_mtu(const struct zebra_dplane_ctx *ctx)
2035 {
2036 DPLANE_CTX_VALID(ctx);
2037
2038 return ctx->u.gre.mtu;
2039 }
2040
2041 const struct zebra_l2info_gre *
2042 dplane_ctx_gre_get_info(const struct zebra_dplane_ctx *ctx)
2043 {
2044 DPLANE_CTX_VALID(ctx);
2045
2046 return &ctx->u.gre.info;
2047 }
2048
2049 /* Accessors for PBR rule information */
2050 int dplane_ctx_rule_get_sock(const struct zebra_dplane_ctx *ctx)
2051 {
2052 DPLANE_CTX_VALID(ctx);
2053
2054 return ctx->u.rule.sock;
2055 }
2056
2057 const char *dplane_ctx_rule_get_ifname(const struct zebra_dplane_ctx *ctx)
2058 {
2059 DPLANE_CTX_VALID(ctx);
2060
2061 return ctx->u.rule.new.ifname;
2062 }
2063
2064 int dplane_ctx_rule_get_unique(const struct zebra_dplane_ctx *ctx)
2065 {
2066 DPLANE_CTX_VALID(ctx);
2067
2068 return ctx->u.rule.unique;
2069 }
2070
2071 int dplane_ctx_rule_get_seq(const struct zebra_dplane_ctx *ctx)
2072 {
2073 DPLANE_CTX_VALID(ctx);
2074
2075 return ctx->u.rule.seq;
2076 }
2077
2078 uint32_t dplane_ctx_rule_get_priority(const struct zebra_dplane_ctx *ctx)
2079 {
2080 DPLANE_CTX_VALID(ctx);
2081
2082 return ctx->u.rule.new.priority;
2083 }
2084
2085 uint32_t dplane_ctx_rule_get_old_priority(const struct zebra_dplane_ctx *ctx)
2086 {
2087 DPLANE_CTX_VALID(ctx);
2088
2089 return ctx->u.rule.old.priority;
2090 }
2091
2092 uint32_t dplane_ctx_rule_get_table(const struct zebra_dplane_ctx *ctx)
2093 {
2094 DPLANE_CTX_VALID(ctx);
2095
2096 return ctx->u.rule.new.table;
2097 }
2098
2099 uint32_t dplane_ctx_rule_get_old_table(const struct zebra_dplane_ctx *ctx)
2100 {
2101 DPLANE_CTX_VALID(ctx);
2102
2103 return ctx->u.rule.old.table;
2104 }
2105
2106 uint32_t dplane_ctx_rule_get_filter_bm(const struct zebra_dplane_ctx *ctx)
2107 {
2108 DPLANE_CTX_VALID(ctx);
2109
2110 return ctx->u.rule.new.filter_bm;
2111 }
2112
2113 uint32_t dplane_ctx_rule_get_old_filter_bm(const struct zebra_dplane_ctx *ctx)
2114 {
2115 DPLANE_CTX_VALID(ctx);
2116
2117 return ctx->u.rule.old.filter_bm;
2118 }
2119
2120 uint32_t dplane_ctx_rule_get_fwmark(const struct zebra_dplane_ctx *ctx)
2121 {
2122 DPLANE_CTX_VALID(ctx);
2123
2124 return ctx->u.rule.new.fwmark;
2125 }
2126
2127 uint32_t dplane_ctx_rule_get_old_fwmark(const struct zebra_dplane_ctx *ctx)
2128 {
2129 DPLANE_CTX_VALID(ctx);
2130
2131 return ctx->u.rule.old.fwmark;
2132 }
2133
2134 uint8_t dplane_ctx_rule_get_ipproto(const struct zebra_dplane_ctx *ctx)
2135 {
2136 DPLANE_CTX_VALID(ctx);
2137
2138 return ctx->u.rule.new.ip_proto;
2139 }
2140
2141 uint8_t dplane_ctx_rule_get_old_ipproto(const struct zebra_dplane_ctx *ctx)
2142 {
2143 DPLANE_CTX_VALID(ctx);
2144
2145 return ctx->u.rule.old.ip_proto;
2146 }
2147
2148 uint8_t dplane_ctx_rule_get_dsfield(const struct zebra_dplane_ctx *ctx)
2149 {
2150 DPLANE_CTX_VALID(ctx);
2151
2152 return ctx->u.rule.new.dsfield;
2153 }
2154
2155 uint8_t dplane_ctx_rule_get_old_dsfield(const struct zebra_dplane_ctx *ctx)
2156 {
2157 DPLANE_CTX_VALID(ctx);
2158
2159 return ctx->u.rule.old.dsfield;
2160 }
2161
2162 const struct prefix *
2163 dplane_ctx_rule_get_src_ip(const struct zebra_dplane_ctx *ctx)
2164 {
2165 DPLANE_CTX_VALID(ctx);
2166
2167 return &(ctx->u.rule.new.src_ip);
2168 }
2169
2170 const struct prefix *
2171 dplane_ctx_rule_get_old_src_ip(const struct zebra_dplane_ctx *ctx)
2172 {
2173 DPLANE_CTX_VALID(ctx);
2174
2175 return &(ctx->u.rule.old.src_ip);
2176 }
2177
2178 const struct prefix *
2179 dplane_ctx_rule_get_dst_ip(const struct zebra_dplane_ctx *ctx)
2180 {
2181 DPLANE_CTX_VALID(ctx);
2182
2183 return &(ctx->u.rule.new.dst_ip);
2184 }
2185
2186 const struct prefix *
2187 dplane_ctx_rule_get_old_dst_ip(const struct zebra_dplane_ctx *ctx)
2188 {
2189 DPLANE_CTX_VALID(ctx);
2190
2191 return &(ctx->u.rule.old.dst_ip);
2192 }
2193
2194 uint32_t dplane_ctx_get_br_port_flags(const struct zebra_dplane_ctx *ctx)
2195 {
2196 DPLANE_CTX_VALID(ctx);
2197
2198 return ctx->u.br_port.flags;
2199 }
2200
2201 uint32_t
2202 dplane_ctx_get_br_port_sph_filter_cnt(const struct zebra_dplane_ctx *ctx)
2203 {
2204 DPLANE_CTX_VALID(ctx);
2205
2206 return ctx->u.br_port.sph_filter_cnt;
2207 }
2208
2209 const struct in_addr *
2210 dplane_ctx_get_br_port_sph_filters(const struct zebra_dplane_ctx *ctx)
2211 {
2212 DPLANE_CTX_VALID(ctx);
2213
2214 return ctx->u.br_port.sph_filters;
2215 }
2216
2217 uint32_t
2218 dplane_ctx_get_br_port_backup_nhg_id(const struct zebra_dplane_ctx *ctx)
2219 {
2220 DPLANE_CTX_VALID(ctx);
2221
2222 return ctx->u.br_port.backup_nhg_id;
2223 }
2224
2225 /* Accessors for PBR iptable information */
2226 void dplane_ctx_get_pbr_iptable(const struct zebra_dplane_ctx *ctx,
2227 struct zebra_pbr_iptable *table)
2228 {
2229 DPLANE_CTX_VALID(ctx);
2230
2231 memcpy(table, &ctx->u.iptable, sizeof(struct zebra_pbr_iptable));
2232 }
2233
2234 void dplane_ctx_get_pbr_ipset(const struct zebra_dplane_ctx *ctx,
2235 struct zebra_pbr_ipset *ipset)
2236 {
2237 DPLANE_CTX_VALID(ctx);
2238
2239 assert(ipset);
2240
2241 if (ctx->zd_op == DPLANE_OP_IPSET_ENTRY_ADD ||
2242 ctx->zd_op == DPLANE_OP_IPSET_ENTRY_DELETE) {
2243 memset(ipset, 0, sizeof(struct zebra_pbr_ipset));
2244 ipset->type = ctx->u.ipset_entry.info.type;
2245 ipset->family = ctx->u.ipset_entry.info.family;
2246 memcpy(&ipset->ipset_name, &ctx->u.ipset_entry.info.ipset_name,
2247 ZEBRA_IPSET_NAME_SIZE);
2248 } else
2249 memcpy(ipset, &ctx->u.ipset, sizeof(struct zebra_pbr_ipset));
2250 }
2251
2252 void dplane_ctx_get_pbr_ipset_entry(const struct zebra_dplane_ctx *ctx,
2253 struct zebra_pbr_ipset_entry *entry)
2254 {
2255 DPLANE_CTX_VALID(ctx);
2256
2257 assert(entry);
2258
2259 memcpy(entry, &ctx->u.ipset_entry.entry, sizeof(struct zebra_pbr_ipset_entry));
2260 }
2261
2262 /*
2263 * End of dplane context accessors
2264 */
2265
2266 /* Optional extra info about interfaces in nexthops - a plugin must enable
2267 * this extra info.
2268 */
2269 const struct dplane_intf_extra *
2270 dplane_ctx_get_intf_extra(const struct zebra_dplane_ctx *ctx)
2271 {
2272 return TAILQ_FIRST(&ctx->u.rinfo.intf_extra_q);
2273 }
2274
2275 const struct dplane_intf_extra *
2276 dplane_ctx_intf_extra_next(const struct zebra_dplane_ctx *ctx,
2277 const struct dplane_intf_extra *ptr)
2278 {
2279 return TAILQ_NEXT(ptr, link);
2280 }
2281
2282 vrf_id_t dplane_intf_extra_get_vrfid(const struct dplane_intf_extra *ptr)
2283 {
2284 return ptr->vrf_id;
2285 }
2286
2287 uint32_t dplane_intf_extra_get_ifindex(const struct dplane_intf_extra *ptr)
2288 {
2289 return ptr->ifindex;
2290 }
2291
2292 uint32_t dplane_intf_extra_get_flags(const struct dplane_intf_extra *ptr)
2293 {
2294 return ptr->flags;
2295 }
2296
2297 uint32_t dplane_intf_extra_get_status(const struct dplane_intf_extra *ptr)
2298 {
2299 return ptr->status;
2300 }
2301
2302 /*
2303 * End of interface extra info accessors
2304 */
2305
2306 uint8_t dplane_ctx_neightable_get_family(const struct zebra_dplane_ctx *ctx)
2307 {
2308 DPLANE_CTX_VALID(ctx);
2309
2310 return ctx->u.neightable.family;
2311 }
2312
2313 uint32_t
2314 dplane_ctx_neightable_get_app_probes(const struct zebra_dplane_ctx *ctx)
2315 {
2316 DPLANE_CTX_VALID(ctx);
2317
2318 return ctx->u.neightable.app_probes;
2319 }
2320
2321 uint32_t
2322 dplane_ctx_neightable_get_ucast_probes(const struct zebra_dplane_ctx *ctx)
2323 {
2324 DPLANE_CTX_VALID(ctx);
2325
2326 return ctx->u.neightable.ucast_probes;
2327 }
2328
2329 uint32_t
2330 dplane_ctx_neightable_get_mcast_probes(const struct zebra_dplane_ctx *ctx)
2331 {
2332 DPLANE_CTX_VALID(ctx);
2333
2334 return ctx->u.neightable.mcast_probes;
2335 }
2336
2337 ifindex_t dplane_ctx_get_netconf_ifindex(const struct zebra_dplane_ctx *ctx)
2338 {
2339 DPLANE_CTX_VALID(ctx);
2340
2341 return ctx->u.netconf.ifindex;
2342 }
2343
2344 ns_id_t dplane_ctx_get_netconf_ns_id(const struct zebra_dplane_ctx *ctx)
2345 {
2346 DPLANE_CTX_VALID(ctx);
2347
2348 return ctx->u.netconf.ns_id;
2349 }
2350
2351 void dplane_ctx_set_netconf_ifindex(struct zebra_dplane_ctx *ctx,
2352 ifindex_t ifindex)
2353 {
2354 DPLANE_CTX_VALID(ctx);
2355
2356 ctx->u.netconf.ifindex = ifindex;
2357 }
2358
2359 void dplane_ctx_set_netconf_ns_id(struct zebra_dplane_ctx *ctx, ns_id_t ns_id)
2360 {
2361 DPLANE_CTX_VALID(ctx);
2362
2363 ctx->u.netconf.ns_id = ns_id;
2364 }
2365
2366 enum dplane_netconf_status_e
2367 dplane_ctx_get_netconf_mpls(const struct zebra_dplane_ctx *ctx)
2368 {
2369 DPLANE_CTX_VALID(ctx);
2370
2371 return ctx->u.netconf.mpls_val;
2372 }
2373
2374 enum dplane_netconf_status_e
2375 dplane_ctx_get_netconf_mcast(const struct zebra_dplane_ctx *ctx)
2376 {
2377 DPLANE_CTX_VALID(ctx);
2378
2379 return ctx->u.netconf.mcast_val;
2380 }
2381
2382 void dplane_ctx_set_netconf_mpls(struct zebra_dplane_ctx *ctx,
2383 enum dplane_netconf_status_e val)
2384 {
2385 DPLANE_CTX_VALID(ctx);
2386
2387 ctx->u.netconf.mpls_val = val;
2388 }
2389
2390 void dplane_ctx_set_netconf_mcast(struct zebra_dplane_ctx *ctx,
2391 enum dplane_netconf_status_e val)
2392 {
2393 DPLANE_CTX_VALID(ctx);
2394
2395 ctx->u.netconf.mcast_val = val;
2396 }
2397
2398 /*
2399 * Retrieve the limit on the number of pending, unprocessed updates.
2400 */
2401 uint32_t dplane_get_in_queue_limit(void)
2402 {
2403 return atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
2404 memory_order_relaxed);
2405 }
2406
2407 /*
2408 * Configure limit on the number of pending, queued updates.
2409 */
2410 void dplane_set_in_queue_limit(uint32_t limit, bool set)
2411 {
2412 /* Reset to default on 'unset' */
2413 if (!set)
2414 limit = DPLANE_DEFAULT_MAX_QUEUED;
2415
2416 atomic_store_explicit(&zdplane_info.dg_max_queued_updates, limit,
2417 memory_order_relaxed);
2418 }
2419
2420 /*
2421 * Retrieve the current queue depth of incoming, unprocessed updates
2422 */
2423 uint32_t dplane_get_in_queue_len(void)
2424 {
2425 return atomic_load_explicit(&zdplane_info.dg_routes_queued,
2426 memory_order_seq_cst);
2427 }
2428
2429 /*
2430 * Internal helper that copies information from a zebra ns object; this is
2431 * called in the zebra main pthread context as part of dplane ctx init.
2432 */
2433 static void ctx_info_from_zns(struct zebra_dplane_info *ns_info,
2434 struct zebra_ns *zns)
2435 {
2436 ns_info->ns_id = zns->ns_id;
2437
2438 #if defined(HAVE_NETLINK)
2439 ns_info->is_cmd = true;
2440 ns_info->sock = zns->netlink_dplane_out.sock;
2441 ns_info->seq = zns->netlink_dplane_out.seq;
2442 #endif /* NETLINK */
2443 }
2444
2445 /*
2446 * Common dataplane context init with zebra namespace info.
2447 */
2448 static int dplane_ctx_ns_init(struct zebra_dplane_ctx *ctx,
2449 struct zebra_ns *zns,
2450 bool is_update)
2451 {
2452 ctx_info_from_zns(&(ctx->zd_ns_info), zns); /* */
2453
2454 ctx->zd_is_update = is_update;
2455
2456 #if defined(HAVE_NETLINK)
2457 /* Increment message counter after copying to context struct - may need
2458 * two messages in some 'update' cases.
2459 */
2460 if (is_update)
2461 zns->netlink_dplane_out.seq += 2;
2462 else
2463 zns->netlink_dplane_out.seq++;
2464 #endif /* HAVE_NETLINK */
2465
2466 return AOK;
2467 }
2468
2469 /*
2470 * Initialize a context block for a route update from zebra data structs.
2471 */
2472 int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2473 struct route_node *rn, struct route_entry *re)
2474 {
2475 int ret = EINVAL;
2476 const struct route_table *table = NULL;
2477 const struct rib_table_info *info;
2478 const struct prefix *p, *src_p;
2479 struct zebra_ns *zns;
2480 struct zebra_vrf *zvrf;
2481 struct nexthop *nexthop;
2482 struct zebra_l3vni *zl3vni;
2483 const struct interface *ifp;
2484 struct dplane_intf_extra *if_extra;
2485
2486 if (!ctx || !rn || !re)
2487 goto done;
2488
2489 TAILQ_INIT(&ctx->u.rinfo.intf_extra_q);
2490
2491 ctx->zd_op = op;
2492 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2493
2494 ctx->u.rinfo.zd_type = re->type;
2495 ctx->u.rinfo.zd_old_type = re->type;
2496
2497 /* Prefixes: dest, and optional source */
2498 srcdest_rnode_prefixes(rn, &p, &src_p);
2499
2500 prefix_copy(&(ctx->u.rinfo.zd_dest), p);
2501
2502 if (src_p)
2503 prefix_copy(&(ctx->u.rinfo.zd_src), src_p);
2504 else
2505 memset(&(ctx->u.rinfo.zd_src), 0, sizeof(ctx->u.rinfo.zd_src));
2506
2507 ctx->zd_table_id = re->table;
2508
2509 ctx->u.rinfo.zd_metric = re->metric;
2510 ctx->u.rinfo.zd_old_metric = re->metric;
2511 ctx->zd_vrf_id = re->vrf_id;
2512 ctx->u.rinfo.zd_mtu = re->mtu;
2513 ctx->u.rinfo.zd_nexthop_mtu = re->nexthop_mtu;
2514 ctx->u.rinfo.zd_instance = re->instance;
2515 ctx->u.rinfo.zd_tag = re->tag;
2516 ctx->u.rinfo.zd_old_tag = re->tag;
2517 ctx->u.rinfo.zd_distance = re->distance;
2518
2519 table = srcdest_rnode_table(rn);
2520 info = table->info;
2521
2522 ctx->u.rinfo.zd_afi = info->afi;
2523 ctx->u.rinfo.zd_safi = info->safi;
2524
2525 /* Copy nexthops; recursive info is included too */
2526 copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop),
2527 re->nhe->nhg.nexthop, NULL);
2528 ctx->u.rinfo.zd_nhg_id = re->nhe->id;
2529
2530 /* Copy backup nexthop info, if present */
2531 if (re->nhe->backup_info && re->nhe->backup_info->nhe) {
2532 copy_nexthops(&(ctx->u.rinfo.backup_ng.nexthop),
2533 re->nhe->backup_info->nhe->nhg.nexthop, NULL);
2534 }
2535
2536 /*
2537 * Ensure that the dplane nexthops' flags are clear and copy
2538 * encapsulation information.
2539 */
2540 for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop)) {
2541 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
2542
2543 /* Optionally capture extra interface info while we're in the
2544 * main zebra pthread - a plugin has to ask for this info.
2545 */
2546 if (dplane_collect_extra_intf_info) {
2547 ifp = if_lookup_by_index(nexthop->ifindex,
2548 nexthop->vrf_id);
2549
2550 if (ifp) {
2551 if_extra = XCALLOC(
2552 MTYPE_DP_INTF,
2553 sizeof(struct dplane_intf_extra));
2554 if_extra->vrf_id = nexthop->vrf_id;
2555 if_extra->ifindex = nexthop->ifindex;
2556 if_extra->flags = ifp->flags;
2557 if_extra->status = ifp->status;
2558
2559 TAILQ_INSERT_TAIL(&ctx->u.rinfo.intf_extra_q,
2560 if_extra, link);
2561 }
2562 }
2563
2564 /* Check for available evpn encapsulations. */
2565 if (!CHECK_FLAG(re->flags, ZEBRA_FLAG_EVPN_ROUTE))
2566 continue;
2567
2568 zl3vni = zl3vni_from_vrf(nexthop->vrf_id);
2569 if (zl3vni && is_l3vni_oper_up(zl3vni)) {
2570 nexthop->nh_encap_type = NET_VXLAN;
2571 nexthop->nh_encap.vni = zl3vni->vni;
2572 }
2573 }
2574
2575 /* Don't need some info when capturing a system notification */
2576 if (op == DPLANE_OP_SYS_ROUTE_ADD ||
2577 op == DPLANE_OP_SYS_ROUTE_DELETE) {
2578 ret = AOK;
2579 goto done;
2580 }
2581
2582 /* Extract ns info - can't use pointers to 'core' structs */
2583 zvrf = vrf_info_lookup(re->vrf_id);
2584 zns = zvrf->zns;
2585 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_ROUTE_UPDATE));
2586
2587 #ifdef HAVE_NETLINK
2588 {
2589 struct nhg_hash_entry *nhe = zebra_nhg_resolve(re->nhe);
2590
2591 ctx->u.rinfo.nhe.id = nhe->id;
2592 ctx->u.rinfo.nhe.old_id = 0;
2593 /*
2594 * Check if the nhe is installed/queued before doing anything
2595 * with this route.
2596 *
2597 * If its a delete we only use the prefix anyway, so this only
2598 * matters for INSTALL/UPDATE.
2599 */
2600 if (zebra_nhg_kernel_nexthops_enabled()
2601 && (((op == DPLANE_OP_ROUTE_INSTALL)
2602 || (op == DPLANE_OP_ROUTE_UPDATE))
2603 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)
2604 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED))) {
2605 ret = ENOENT;
2606 goto done;
2607 }
2608
2609 re->nhe_installed_id = nhe->id;
2610 }
2611 #endif /* HAVE_NETLINK */
2612
2613 /* Trying out the sequence number idea, so we can try to detect
2614 * when a result is stale.
2615 */
2616 re->dplane_sequence = zebra_router_get_next_sequence();
2617 ctx->zd_seq = re->dplane_sequence;
2618
2619 ret = AOK;
2620
2621 done:
2622 return ret;
2623 }
2624
2625 /**
2626 * dplane_ctx_nexthop_init() - Initialize a context block for a nexthop update
2627 *
2628 * @ctx: Dataplane context to init
2629 * @op: Operation being performed
2630 * @nhe: Nexthop group hash entry
2631 *
2632 * Return: Result status
2633 */
2634 int dplane_ctx_nexthop_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2635 struct nhg_hash_entry *nhe)
2636 {
2637 struct zebra_vrf *zvrf = NULL;
2638 struct zebra_ns *zns = NULL;
2639 int ret = EINVAL;
2640
2641 if (!ctx || !nhe)
2642 goto done;
2643
2644 ctx->zd_op = op;
2645 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2646
2647 /* Copy over nhe info */
2648 ctx->u.rinfo.nhe.id = nhe->id;
2649 ctx->u.rinfo.nhe.afi = nhe->afi;
2650 ctx->u.rinfo.nhe.vrf_id = nhe->vrf_id;
2651 ctx->u.rinfo.nhe.type = nhe->type;
2652
2653 nexthop_group_copy(&(ctx->u.rinfo.nhe.ng), &(nhe->nhg));
2654
2655 /* If this is a group, convert it to a grp array of ids */
2656 if (!zebra_nhg_depends_is_empty(nhe)
2657 && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_RECURSIVE))
2658 ctx->u.rinfo.nhe.nh_grp_count = zebra_nhg_nhe2grp(
2659 ctx->u.rinfo.nhe.nh_grp, nhe, MULTIPATH_NUM);
2660
2661 zvrf = vrf_info_lookup(nhe->vrf_id);
2662
2663 /*
2664 * Fallback to default namespace if the vrf got ripped out from under
2665 * us.
2666 */
2667 zns = zvrf ? zvrf->zns : zebra_ns_lookup(NS_DEFAULT);
2668
2669 /*
2670 * TODO: Might not need to mark this as an update, since
2671 * it probably won't require two messages
2672 */
2673 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_NH_UPDATE));
2674
2675 ret = AOK;
2676
2677 done:
2678 return ret;
2679 }
2680
2681 /**
2682 * dplane_ctx_intf_init() - Initialize a context block for a interface update
2683 *
2684 * @ctx: Dataplane context to init
2685 * @op: Operation being performed
2686 * @ifp: Interface
2687 *
2688 * Return: Result status
2689 */
2690 int dplane_ctx_intf_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2691 const struct interface *ifp)
2692 {
2693 struct zebra_ns *zns;
2694 struct zebra_if *zif;
2695 int ret = EINVAL;
2696 bool set_pdown, unset_pdown;
2697
2698 if (!ctx || !ifp)
2699 goto done;
2700
2701 ctx->zd_op = op;
2702 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2703 ctx->zd_vrf_id = ifp->vrf->vrf_id;
2704
2705 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
2706 ctx->zd_ifindex = ifp->ifindex;
2707
2708 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
2709 dplane_ctx_ns_init(ctx, zns, false);
2710
2711
2712 /* Copy over ifp info */
2713 ctx->u.intf.metric = ifp->metric;
2714 ctx->u.intf.flags = ifp->flags;
2715
2716 /* Copy over extra zebra info, if available */
2717 zif = (struct zebra_if *)ifp->info;
2718
2719 if (zif) {
2720 set_pdown = !!(zif->flags & ZIF_FLAG_SET_PROTODOWN);
2721 unset_pdown = !!(zif->flags & ZIF_FLAG_UNSET_PROTODOWN);
2722
2723 if (zif->protodown_rc &&
2724 ZEBRA_IF_IS_PROTODOWN_ONLY_EXTERNAL(zif) == false)
2725 ctx->u.intf.pd_reason_val = true;
2726
2727 /*
2728 * See if we have new protodown state to set, otherwise keep
2729 * current state
2730 */
2731 if (set_pdown)
2732 ctx->u.intf.protodown = true;
2733 else if (unset_pdown)
2734 ctx->u.intf.protodown = false;
2735 else
2736 ctx->u.intf.protodown = !!ZEBRA_IF_IS_PROTODOWN(zif);
2737 }
2738
2739 dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_INTF_UPDATE));
2740 ctx->zd_is_update = (op == DPLANE_OP_INTF_UPDATE);
2741
2742 ret = AOK;
2743
2744 done:
2745 return ret;
2746 }
2747
2748 /*
2749 * Capture information for an LSP update in a dplane context.
2750 */
2751 int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
2752 struct zebra_lsp *lsp)
2753 {
2754 int ret = AOK;
2755 struct zebra_nhlfe *nhlfe, *new_nhlfe;
2756
2757 ctx->zd_op = op;
2758 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2759
2760 /* Capture namespace info */
2761 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
2762 (op == DPLANE_OP_LSP_UPDATE));
2763
2764 memset(&ctx->u.lsp, 0, sizeof(ctx->u.lsp));
2765
2766 nhlfe_list_init(&(ctx->u.lsp.nhlfe_list));
2767 nhlfe_list_init(&(ctx->u.lsp.backup_nhlfe_list));
2768
2769 /* This may be called to create/init a dplane context, not necessarily
2770 * to copy an lsp object.
2771 */
2772 if (lsp == NULL) {
2773 ret = AOK;
2774 goto done;
2775 }
2776
2777 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2778 zlog_debug("init dplane ctx %s: in-label %u ecmp# %d",
2779 dplane_op2str(op), lsp->ile.in_label,
2780 lsp->num_ecmp);
2781
2782 ctx->u.lsp.ile = lsp->ile;
2783 ctx->u.lsp.addr_family = lsp->addr_family;
2784 ctx->u.lsp.num_ecmp = lsp->num_ecmp;
2785 ctx->u.lsp.flags = lsp->flags;
2786
2787 /* Copy source LSP's nhlfes, and capture 'best' nhlfe */
2788 frr_each(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
2789 /* Not sure if this is meaningful... */
2790 if (nhlfe->nexthop == NULL)
2791 continue;
2792
2793 new_nhlfe = zebra_mpls_lsp_add_nh(&(ctx->u.lsp), nhlfe->type,
2794 nhlfe->nexthop);
2795 if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
2796 ret = ENOMEM;
2797 break;
2798 }
2799
2800 /* Need to copy flags and backup info too */
2801 new_nhlfe->flags = nhlfe->flags;
2802 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
2803
2804 if (CHECK_FLAG(new_nhlfe->nexthop->flags,
2805 NEXTHOP_FLAG_HAS_BACKUP)) {
2806 new_nhlfe->nexthop->backup_num =
2807 nhlfe->nexthop->backup_num;
2808 memcpy(new_nhlfe->nexthop->backup_idx,
2809 nhlfe->nexthop->backup_idx,
2810 new_nhlfe->nexthop->backup_num);
2811 }
2812
2813 if (nhlfe == lsp->best_nhlfe)
2814 ctx->u.lsp.best_nhlfe = new_nhlfe;
2815 }
2816
2817 if (ret != AOK)
2818 goto done;
2819
2820 /* Capture backup nhlfes/nexthops */
2821 frr_each(nhlfe_list, &lsp->backup_nhlfe_list, nhlfe) {
2822 /* Not sure if this is meaningful... */
2823 if (nhlfe->nexthop == NULL)
2824 continue;
2825
2826 new_nhlfe = zebra_mpls_lsp_add_backup_nh(&(ctx->u.lsp),
2827 nhlfe->type,
2828 nhlfe->nexthop);
2829 if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
2830 ret = ENOMEM;
2831 break;
2832 }
2833
2834 /* Need to copy flags too */
2835 new_nhlfe->flags = nhlfe->flags;
2836 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
2837 }
2838
2839 /* On error the ctx will be cleaned-up, so we don't need to
2840 * deal with any allocated nhlfe or nexthop structs here.
2841 */
2842 done:
2843
2844 return ret;
2845 }
2846
2847 /*
2848 * Capture information for an LSP update in a dplane context.
2849 */
2850 static int dplane_ctx_pw_init(struct zebra_dplane_ctx *ctx,
2851 enum dplane_op_e op,
2852 struct zebra_pw *pw)
2853 {
2854 int ret = EINVAL;
2855 struct prefix p;
2856 afi_t afi;
2857 struct route_table *table;
2858 struct route_node *rn;
2859 struct route_entry *re;
2860 const struct nexthop_group *nhg;
2861 struct nexthop *nh, *newnh, *last_nh;
2862
2863 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
2864 zlog_debug("init dplane ctx %s: pw '%s', loc %u, rem %u",
2865 dplane_op2str(op), pw->ifname, pw->local_label,
2866 pw->remote_label);
2867
2868 ctx->zd_op = op;
2869 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
2870
2871 /* Capture namespace info: no netlink support as of 12/18,
2872 * but just in case...
2873 */
2874 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
2875
2876 memset(&ctx->u.pw, 0, sizeof(ctx->u.pw));
2877
2878 /* This name appears to be c-string, so we use string copy. */
2879 strlcpy(ctx->zd_ifname, pw->ifname, sizeof(ctx->zd_ifname));
2880
2881 ctx->zd_vrf_id = pw->vrf_id;
2882 ctx->zd_ifindex = pw->ifindex;
2883 ctx->u.pw.type = pw->type;
2884 ctx->u.pw.af = pw->af;
2885 ctx->u.pw.local_label = pw->local_label;
2886 ctx->u.pw.remote_label = pw->remote_label;
2887 ctx->u.pw.flags = pw->flags;
2888
2889 ctx->u.pw.dest = pw->nexthop;
2890
2891 ctx->u.pw.fields = pw->data;
2892
2893 /* Capture nexthop info for the pw destination. We need to look
2894 * up and use zebra datastructs, but we're running in the zebra
2895 * pthread here so that should be ok.
2896 */
2897 memcpy(&p.u, &pw->nexthop, sizeof(pw->nexthop));
2898 p.family = pw->af;
2899 p.prefixlen = ((pw->af == AF_INET) ? IPV4_MAX_BITLEN : IPV6_MAX_BITLEN);
2900
2901 afi = (pw->af == AF_INET) ? AFI_IP : AFI_IP6;
2902 table = zebra_vrf_table(afi, SAFI_UNICAST, pw->vrf_id);
2903 if (table == NULL)
2904 goto done;
2905
2906 rn = route_node_match(table, &p);
2907 if (rn == NULL)
2908 goto done;
2909
2910 re = NULL;
2911 RNODE_FOREACH_RE(rn, re) {
2912 if (CHECK_FLAG(re->flags, ZEBRA_FLAG_SELECTED))
2913 break;
2914 }
2915
2916 if (re) {
2917 /* We'll capture a 'fib' list of nexthops that meet our
2918 * criteria: installed, and labelled.
2919 */
2920 nhg = rib_get_fib_nhg(re);
2921 last_nh = NULL;
2922
2923 if (nhg && nhg->nexthop) {
2924 for (ALL_NEXTHOPS_PTR(nhg, nh)) {
2925 if (!CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE)
2926 || CHECK_FLAG(nh->flags,
2927 NEXTHOP_FLAG_RECURSIVE)
2928 || nh->nh_label == NULL)
2929 continue;
2930
2931 newnh = nexthop_dup(nh, NULL);
2932
2933 if (last_nh)
2934 NEXTHOP_APPEND(last_nh, newnh);
2935 else
2936 ctx->u.pw.fib_nhg.nexthop = newnh;
2937 last_nh = newnh;
2938 }
2939 }
2940
2941 /* Include any installed backup nexthops also. */
2942 nhg = rib_get_fib_backup_nhg(re);
2943 if (nhg && nhg->nexthop) {
2944 for (ALL_NEXTHOPS_PTR(nhg, nh)) {
2945 if (!CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE)
2946 || CHECK_FLAG(nh->flags,
2947 NEXTHOP_FLAG_RECURSIVE)
2948 || nh->nh_label == NULL)
2949 continue;
2950
2951 newnh = nexthop_dup(nh, NULL);
2952
2953 if (last_nh)
2954 NEXTHOP_APPEND(last_nh, newnh);
2955 else
2956 ctx->u.pw.fib_nhg.nexthop = newnh;
2957 last_nh = newnh;
2958 }
2959 }
2960
2961 /* Copy primary nexthops; recursive info is included too */
2962 assert(re->nhe != NULL); /* SA warning */
2963 copy_nexthops(&(ctx->u.pw.primary_nhg.nexthop),
2964 re->nhe->nhg.nexthop, NULL);
2965 ctx->u.pw.nhg_id = re->nhe->id;
2966
2967 /* Copy backup nexthop info, if present */
2968 if (re->nhe->backup_info && re->nhe->backup_info->nhe) {
2969 copy_nexthops(&(ctx->u.pw.backup_nhg.nexthop),
2970 re->nhe->backup_info->nhe->nhg.nexthop,
2971 NULL);
2972 }
2973 }
2974 route_unlock_node(rn);
2975
2976 ret = AOK;
2977
2978 done:
2979 return ret;
2980 }
2981
2982 /**
2983 * dplane_ctx_rule_init_single() - Initialize a dataplane representation of a
2984 * PBR rule.
2985 *
2986 * @dplane_rule: Dataplane internal representation of a rule
2987 * @rule: PBR rule
2988 */
2989 static void dplane_ctx_rule_init_single(struct dplane_ctx_rule *dplane_rule,
2990 struct zebra_pbr_rule *rule)
2991 {
2992 dplane_rule->priority = rule->rule.priority;
2993 dplane_rule->table = rule->rule.action.table;
2994
2995 dplane_rule->filter_bm = rule->rule.filter.filter_bm;
2996 dplane_rule->fwmark = rule->rule.filter.fwmark;
2997 dplane_rule->dsfield = rule->rule.filter.dsfield;
2998 dplane_rule->ip_proto = rule->rule.filter.ip_proto;
2999 prefix_copy(&(dplane_rule->dst_ip), &rule->rule.filter.dst_ip);
3000 prefix_copy(&(dplane_rule->src_ip), &rule->rule.filter.src_ip);
3001
3002 dplane_rule->action_pcp = rule->rule.action.pcp;
3003 dplane_rule->action_vlan_flags = rule->rule.action.vlan_flags;
3004 dplane_rule->action_vlan_id = rule->rule.action.vlan_id;
3005 dplane_rule->action_queue_id = rule->rule.action.queue_id;
3006
3007 strlcpy(dplane_rule->ifname, rule->ifname, INTERFACE_NAMSIZ);
3008 }
3009
3010 /**
3011 * dplane_ctx_rule_init() - Initialize a context block for a PBR rule update.
3012 *
3013 * @ctx: Dataplane context to init
3014 * @op: Operation being performed
3015 * @new_rule: PBR rule
3016 *
3017 * Return: Result status
3018 */
3019 static int dplane_ctx_rule_init(struct zebra_dplane_ctx *ctx,
3020 enum dplane_op_e op,
3021 struct zebra_pbr_rule *new_rule,
3022 struct zebra_pbr_rule *old_rule)
3023 {
3024 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3025 zlog_debug(
3026 "init dplane ctx %s: IF %s Prio %u Fwmark %u Src %pFX Dst %pFX Table %u",
3027 dplane_op2str(op), new_rule->ifname,
3028 new_rule->rule.priority, new_rule->rule.filter.fwmark,
3029 &new_rule->rule.filter.src_ip,
3030 &new_rule->rule.filter.dst_ip,
3031 new_rule->rule.action.table);
3032
3033 ctx->zd_op = op;
3034 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3035
3036 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
3037 op == DPLANE_OP_RULE_UPDATE);
3038
3039 ctx->zd_vrf_id = new_rule->vrf_id;
3040 strlcpy(ctx->zd_ifname, new_rule->ifname, sizeof(ctx->zd_ifname));
3041
3042 ctx->u.rule.sock = new_rule->sock;
3043 ctx->u.rule.unique = new_rule->rule.unique;
3044 ctx->u.rule.seq = new_rule->rule.seq;
3045
3046 dplane_ctx_rule_init_single(&ctx->u.rule.new, new_rule);
3047 if (op == DPLANE_OP_RULE_UPDATE)
3048 dplane_ctx_rule_init_single(&ctx->u.rule.old, old_rule);
3049
3050 return AOK;
3051 }
3052
3053 /**
3054 * dplane_ctx_iptable_init() - Initialize a context block for a PBR iptable
3055 * update.
3056 *
3057 * @ctx: Dataplane context to init
3058 * @op: Operation being performed
3059 * @new_rule: PBR iptable
3060 *
3061 * Return: Result status
3062 */
3063 static int dplane_ctx_iptable_init(struct zebra_dplane_ctx *ctx,
3064 enum dplane_op_e op,
3065 struct zebra_pbr_iptable *iptable)
3066 {
3067 char *ifname;
3068 struct listnode *node;
3069
3070 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3071 zlog_debug(
3072 "init dplane ctx %s: Unique %u Fwmark %u Family %s Action %s",
3073 dplane_op2str(op), iptable->unique, iptable->fwmark,
3074 family2str(iptable->family),
3075 iptable->action == ZEBRA_IPTABLES_DROP ? "Drop"
3076 : "Forward");
3077 }
3078
3079 ctx->zd_op = op;
3080 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3081
3082 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
3083
3084 ctx->zd_vrf_id = iptable->vrf_id;
3085 memcpy(&ctx->u.iptable, iptable, sizeof(struct zebra_pbr_iptable));
3086 ctx->u.iptable.interface_name_list = NULL;
3087 if (iptable->nb_interface > 0) {
3088 ctx->u.iptable.interface_name_list = list_new();
3089 for (ALL_LIST_ELEMENTS_RO(iptable->interface_name_list, node,
3090 ifname)) {
3091 listnode_add(ctx->u.iptable.interface_name_list,
3092 XSTRDUP(MTYPE_DP_NETFILTER, ifname));
3093 }
3094 }
3095 return AOK;
3096 }
3097
3098 /**
3099 * dplane_ctx_ipset_init() - Initialize a context block for a PBR ipset update.
3100 *
3101 * @ctx: Dataplane context to init
3102 * @op: Operation being performed
3103 * @new_rule: PBR ipset
3104 *
3105 * Return: Result status
3106 */
3107 static int dplane_ctx_ipset_init(struct zebra_dplane_ctx *ctx,
3108 enum dplane_op_e op,
3109 struct zebra_pbr_ipset *ipset)
3110 {
3111 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3112 zlog_debug("init dplane ctx %s: %s Unique %u Family %s Type %s",
3113 dplane_op2str(op), ipset->ipset_name, ipset->unique,
3114 family2str(ipset->family),
3115 zebra_pbr_ipset_type2str(ipset->type));
3116 }
3117
3118 ctx->zd_op = op;
3119 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3120
3121 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
3122
3123 ctx->zd_vrf_id = ipset->vrf_id;
3124
3125 memcpy(&ctx->u.ipset, ipset, sizeof(struct zebra_pbr_ipset));
3126 return AOK;
3127 }
3128
3129 /**
3130 * dplane_ctx_ipset_entry_init() - Initialize a context block for a PBR ipset
3131 * update.
3132 *
3133 * @ctx: Dataplane context to init
3134 * @op: Operation being performed
3135 * @new_rule: PBR ipset
3136 *
3137 * Return: Result status
3138 */
3139 static int
3140 dplane_ctx_ipset_entry_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
3141 struct zebra_pbr_ipset_entry *ipset_entry)
3142 {
3143 struct zebra_pbr_ipset *ipset;
3144
3145 ipset = ipset_entry->backpointer;
3146 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
3147 zlog_debug("init dplane ctx %s: %s Unique %u filter %u",
3148 dplane_op2str(op), ipset->ipset_name,
3149 ipset_entry->unique, ipset_entry->filter_bm);
3150 }
3151
3152 ctx->zd_op = op;
3153 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3154
3155 dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
3156
3157 ctx->zd_vrf_id = ipset->vrf_id;
3158
3159 memcpy(&ctx->u.ipset_entry.entry, ipset_entry,
3160 sizeof(struct zebra_pbr_ipset_entry));
3161 ctx->u.ipset_entry.entry.backpointer = NULL;
3162 ctx->u.ipset_entry.info.type = ipset->type;
3163 ctx->u.ipset_entry.info.family = ipset->family;
3164 memcpy(&ctx->u.ipset_entry.info.ipset_name, &ipset->ipset_name,
3165 ZEBRA_IPSET_NAME_SIZE);
3166
3167 return AOK;
3168 }
3169
3170
3171 /*
3172 * Enqueue a new update,
3173 * and ensure an event is active for the dataplane pthread.
3174 */
3175 static int dplane_update_enqueue(struct zebra_dplane_ctx *ctx)
3176 {
3177 int ret = EINVAL;
3178 uint32_t high, curr;
3179
3180 /* Enqueue for processing by the dataplane pthread */
3181 DPLANE_LOCK();
3182 {
3183 TAILQ_INSERT_TAIL(&zdplane_info.dg_update_ctx_q, ctx,
3184 zd_q_entries);
3185 }
3186 DPLANE_UNLOCK();
3187
3188 curr = atomic_fetch_add_explicit(
3189 &(zdplane_info.dg_routes_queued),
3190 1, memory_order_seq_cst);
3191
3192 curr++; /* We got the pre-incremented value */
3193
3194 /* Maybe update high-water counter also */
3195 high = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
3196 memory_order_seq_cst);
3197 while (high < curr) {
3198 if (atomic_compare_exchange_weak_explicit(
3199 &zdplane_info.dg_routes_queued_max,
3200 &high, curr,
3201 memory_order_seq_cst,
3202 memory_order_seq_cst))
3203 break;
3204 }
3205
3206 /* Ensure that an event for the dataplane thread is active */
3207 ret = dplane_provider_work_ready();
3208
3209 return ret;
3210 }
3211
3212 /*
3213 * Utility that prepares a route update and enqueues it for processing
3214 */
3215 static enum zebra_dplane_result
3216 dplane_route_update_internal(struct route_node *rn,
3217 struct route_entry *re,
3218 struct route_entry *old_re,
3219 enum dplane_op_e op)
3220 {
3221 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3222 int ret = EINVAL;
3223 struct zebra_dplane_ctx *ctx = NULL;
3224
3225 /* Obtain context block */
3226 ctx = dplane_ctx_alloc();
3227
3228 /* Init context with info from zebra data structs */
3229 ret = dplane_ctx_route_init(ctx, op, rn, re);
3230 if (ret == AOK) {
3231 /* Capture some extra info for update case
3232 * where there's a different 'old' route.
3233 */
3234 if ((op == DPLANE_OP_ROUTE_UPDATE) &&
3235 old_re && (old_re != re)) {
3236
3237 old_re->dplane_sequence =
3238 zebra_router_get_next_sequence();
3239 ctx->zd_old_seq = old_re->dplane_sequence;
3240
3241 ctx->u.rinfo.zd_old_tag = old_re->tag;
3242 ctx->u.rinfo.zd_old_type = old_re->type;
3243 ctx->u.rinfo.zd_old_instance = old_re->instance;
3244 ctx->u.rinfo.zd_old_distance = old_re->distance;
3245 ctx->u.rinfo.zd_old_metric = old_re->metric;
3246 ctx->u.rinfo.nhe.old_id = old_re->nhe->id;
3247
3248 #ifndef HAVE_NETLINK
3249 /* For bsd, capture previous re's nexthops too, sigh.
3250 * We'll need these to do per-nexthop deletes.
3251 */
3252 copy_nexthops(&(ctx->u.rinfo.zd_old_ng.nexthop),
3253 old_re->nhe->nhg.nexthop, NULL);
3254
3255 if (zebra_nhg_get_backup_nhg(old_re->nhe) != NULL) {
3256 struct nexthop_group *nhg;
3257 struct nexthop **nh;
3258
3259 nhg = zebra_nhg_get_backup_nhg(old_re->nhe);
3260 nh = &(ctx->u.rinfo.old_backup_ng.nexthop);
3261
3262 if (nhg->nexthop)
3263 copy_nexthops(nh, nhg->nexthop, NULL);
3264 }
3265 #endif /* !HAVE_NETLINK */
3266 }
3267
3268 /*
3269 * If the old and new context type, and nexthop group id
3270 * are the same there is no need to send down a route replace
3271 * as that we know we have sent a nexthop group replace
3272 * or an upper level protocol has sent us the exact
3273 * same route again.
3274 */
3275 if ((dplane_ctx_get_type(ctx) == dplane_ctx_get_old_type(ctx))
3276 && (dplane_ctx_get_nhe_id(ctx)
3277 == dplane_ctx_get_old_nhe_id(ctx))
3278 && (dplane_ctx_get_nhe_id(ctx) >= ZEBRA_NHG_PROTO_LOWER)) {
3279 struct nexthop *nexthop;
3280
3281 if (IS_ZEBRA_DEBUG_DPLANE)
3282 zlog_debug(
3283 "%s: Ignoring Route exactly the same",
3284 __func__);
3285
3286 for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx),
3287 nexthop)) {
3288 if (CHECK_FLAG(nexthop->flags,
3289 NEXTHOP_FLAG_RECURSIVE))
3290 continue;
3291
3292 if (CHECK_FLAG(nexthop->flags,
3293 NEXTHOP_FLAG_ACTIVE))
3294 SET_FLAG(nexthop->flags,
3295 NEXTHOP_FLAG_FIB);
3296 }
3297
3298 dplane_ctx_free(&ctx);
3299 return ZEBRA_DPLANE_REQUEST_SUCCESS;
3300 }
3301
3302 /* Enqueue context for processing */
3303 ret = dplane_update_enqueue(ctx);
3304 }
3305
3306 /* Update counter */
3307 atomic_fetch_add_explicit(&zdplane_info.dg_routes_in, 1,
3308 memory_order_relaxed);
3309
3310 if (ret == AOK)
3311 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3312 else {
3313 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors, 1,
3314 memory_order_relaxed);
3315 if (ctx)
3316 dplane_ctx_free(&ctx);
3317 }
3318
3319 return result;
3320 }
3321
3322 /**
3323 * dplane_nexthop_update_internal() - Helper for enqueuing nexthop changes
3324 *
3325 * @nhe: Nexthop group hash entry where the change occured
3326 * @op: The operation to be enqued
3327 *
3328 * Return: Result of the change
3329 */
3330 static enum zebra_dplane_result
3331 dplane_nexthop_update_internal(struct nhg_hash_entry *nhe, enum dplane_op_e op)
3332 {
3333 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3334 int ret = EINVAL;
3335 struct zebra_dplane_ctx *ctx = NULL;
3336
3337 /* Obtain context block */
3338 ctx = dplane_ctx_alloc();
3339 if (!ctx) {
3340 ret = ENOMEM;
3341 goto done;
3342 }
3343
3344 ret = dplane_ctx_nexthop_init(ctx, op, nhe);
3345 if (ret == AOK)
3346 ret = dplane_update_enqueue(ctx);
3347
3348 done:
3349 /* Update counter */
3350 atomic_fetch_add_explicit(&zdplane_info.dg_nexthops_in, 1,
3351 memory_order_relaxed);
3352
3353 if (ret == AOK)
3354 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3355 else {
3356 atomic_fetch_add_explicit(&zdplane_info.dg_nexthop_errors, 1,
3357 memory_order_relaxed);
3358 if (ctx)
3359 dplane_ctx_free(&ctx);
3360 }
3361
3362 return result;
3363 }
3364
3365 /*
3366 * Enqueue a route 'add' for the dataplane.
3367 */
3368 enum zebra_dplane_result dplane_route_add(struct route_node *rn,
3369 struct route_entry *re)
3370 {
3371 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3372
3373 if (rn == NULL || re == NULL)
3374 goto done;
3375
3376 ret = dplane_route_update_internal(rn, re, NULL,
3377 DPLANE_OP_ROUTE_INSTALL);
3378
3379 done:
3380 return ret;
3381 }
3382
3383 /*
3384 * Enqueue a route update for the dataplane.
3385 */
3386 enum zebra_dplane_result dplane_route_update(struct route_node *rn,
3387 struct route_entry *re,
3388 struct route_entry *old_re)
3389 {
3390 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3391
3392 if (rn == NULL || re == NULL)
3393 goto done;
3394
3395 ret = dplane_route_update_internal(rn, re, old_re,
3396 DPLANE_OP_ROUTE_UPDATE);
3397 done:
3398 return ret;
3399 }
3400
3401 /*
3402 * Enqueue a route removal for the dataplane.
3403 */
3404 enum zebra_dplane_result dplane_route_delete(struct route_node *rn,
3405 struct route_entry *re)
3406 {
3407 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3408
3409 if (rn == NULL || re == NULL)
3410 goto done;
3411
3412 ret = dplane_route_update_internal(rn, re, NULL,
3413 DPLANE_OP_ROUTE_DELETE);
3414
3415 done:
3416 return ret;
3417 }
3418
3419 /*
3420 * Notify the dplane when system/connected routes change.
3421 */
3422 enum zebra_dplane_result dplane_sys_route_add(struct route_node *rn,
3423 struct route_entry *re)
3424 {
3425 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3426
3427 /* Ignore this event unless a provider plugin has requested it. */
3428 if (!zdplane_info.dg_sys_route_notifs) {
3429 ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
3430 goto done;
3431 }
3432
3433 if (rn == NULL || re == NULL)
3434 goto done;
3435
3436 ret = dplane_route_update_internal(rn, re, NULL,
3437 DPLANE_OP_SYS_ROUTE_ADD);
3438
3439 done:
3440 return ret;
3441 }
3442
3443 /*
3444 * Notify the dplane when system/connected routes are deleted.
3445 */
3446 enum zebra_dplane_result dplane_sys_route_del(struct route_node *rn,
3447 struct route_entry *re)
3448 {
3449 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3450
3451 /* Ignore this event unless a provider plugin has requested it. */
3452 if (!zdplane_info.dg_sys_route_notifs) {
3453 ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
3454 goto done;
3455 }
3456
3457 if (rn == NULL || re == NULL)
3458 goto done;
3459
3460 ret = dplane_route_update_internal(rn, re, NULL,
3461 DPLANE_OP_SYS_ROUTE_DELETE);
3462
3463 done:
3464 return ret;
3465 }
3466
3467 /*
3468 * Update from an async notification, to bring other fibs up-to-date.
3469 */
3470 enum zebra_dplane_result
3471 dplane_route_notif_update(struct route_node *rn,
3472 struct route_entry *re,
3473 enum dplane_op_e op,
3474 struct zebra_dplane_ctx *ctx)
3475 {
3476 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3477 int ret = EINVAL;
3478 struct zebra_dplane_ctx *new_ctx = NULL;
3479 struct nexthop *nexthop;
3480 struct nexthop_group *nhg;
3481
3482 if (rn == NULL || re == NULL)
3483 goto done;
3484
3485 new_ctx = dplane_ctx_alloc();
3486 if (new_ctx == NULL)
3487 goto done;
3488
3489 /* Init context with info from zebra data structs */
3490 dplane_ctx_route_init(new_ctx, op, rn, re);
3491
3492 /* For add/update, need to adjust the nexthops so that we match
3493 * the notification state, which may not be the route-entry/RIB
3494 * state.
3495 */
3496 if (op == DPLANE_OP_ROUTE_UPDATE ||
3497 op == DPLANE_OP_ROUTE_INSTALL) {
3498
3499 nexthops_free(new_ctx->u.rinfo.zd_ng.nexthop);
3500 new_ctx->u.rinfo.zd_ng.nexthop = NULL;
3501
3502 nhg = rib_get_fib_nhg(re);
3503 if (nhg && nhg->nexthop)
3504 copy_nexthops(&(new_ctx->u.rinfo.zd_ng.nexthop),
3505 nhg->nexthop, NULL);
3506
3507 /* Check for installed backup nexthops also */
3508 nhg = rib_get_fib_backup_nhg(re);
3509 if (nhg && nhg->nexthop) {
3510 copy_nexthops(&(new_ctx->u.rinfo.zd_ng.nexthop),
3511 nhg->nexthop, NULL);
3512 }
3513
3514 for (ALL_NEXTHOPS(new_ctx->u.rinfo.zd_ng, nexthop))
3515 UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
3516
3517 }
3518
3519 /* Capture info about the source of the notification, in 'ctx' */
3520 dplane_ctx_set_notif_provider(new_ctx,
3521 dplane_ctx_get_notif_provider(ctx));
3522
3523 ret = dplane_update_enqueue(new_ctx);
3524
3525 done:
3526 if (ret == AOK)
3527 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3528 else if (new_ctx)
3529 dplane_ctx_free(&new_ctx);
3530
3531 return result;
3532 }
3533
3534 /*
3535 * Enqueue a nexthop add for the dataplane.
3536 */
3537 enum zebra_dplane_result dplane_nexthop_add(struct nhg_hash_entry *nhe)
3538 {
3539 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3540
3541 if (nhe)
3542 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_INSTALL);
3543 return ret;
3544 }
3545
3546 /*
3547 * Enqueue a nexthop update for the dataplane.
3548 *
3549 * Might not need this func since zebra's nexthop objects should be immutable?
3550 */
3551 enum zebra_dplane_result dplane_nexthop_update(struct nhg_hash_entry *nhe)
3552 {
3553 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3554
3555 if (nhe)
3556 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_UPDATE);
3557 return ret;
3558 }
3559
3560 /*
3561 * Enqueue a nexthop removal for the dataplane.
3562 */
3563 enum zebra_dplane_result dplane_nexthop_delete(struct nhg_hash_entry *nhe)
3564 {
3565 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3566
3567 if (nhe)
3568 ret = dplane_nexthop_update_internal(nhe, DPLANE_OP_NH_DELETE);
3569
3570 return ret;
3571 }
3572
3573 /*
3574 * Enqueue LSP add for the dataplane.
3575 */
3576 enum zebra_dplane_result dplane_lsp_add(struct zebra_lsp *lsp)
3577 {
3578 enum zebra_dplane_result ret =
3579 lsp_update_internal(lsp, DPLANE_OP_LSP_INSTALL);
3580
3581 return ret;
3582 }
3583
3584 /*
3585 * Enqueue LSP update for the dataplane.
3586 */
3587 enum zebra_dplane_result dplane_lsp_update(struct zebra_lsp *lsp)
3588 {
3589 enum zebra_dplane_result ret =
3590 lsp_update_internal(lsp, DPLANE_OP_LSP_UPDATE);
3591
3592 return ret;
3593 }
3594
3595 /*
3596 * Enqueue LSP delete for the dataplane.
3597 */
3598 enum zebra_dplane_result dplane_lsp_delete(struct zebra_lsp *lsp)
3599 {
3600 enum zebra_dplane_result ret =
3601 lsp_update_internal(lsp, DPLANE_OP_LSP_DELETE);
3602
3603 return ret;
3604 }
3605
3606 /* Update or un-install resulting from an async notification */
3607 enum zebra_dplane_result
3608 dplane_lsp_notif_update(struct zebra_lsp *lsp, enum dplane_op_e op,
3609 struct zebra_dplane_ctx *notif_ctx)
3610 {
3611 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3612 int ret = EINVAL;
3613 struct zebra_dplane_ctx *ctx = NULL;
3614 struct nhlfe_list_head *head;
3615 struct zebra_nhlfe *nhlfe, *new_nhlfe;
3616
3617 /* Obtain context block */
3618 ctx = dplane_ctx_alloc();
3619 if (ctx == NULL) {
3620 ret = ENOMEM;
3621 goto done;
3622 }
3623
3624 /* Copy info from zebra LSP */
3625 ret = dplane_ctx_lsp_init(ctx, op, lsp);
3626 if (ret != AOK)
3627 goto done;
3628
3629 /* Add any installed backup nhlfes */
3630 head = &(ctx->u.lsp.backup_nhlfe_list);
3631 frr_each(nhlfe_list, head, nhlfe) {
3632
3633 if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED) &&
3634 CHECK_FLAG(nhlfe->nexthop->flags, NEXTHOP_FLAG_FIB)) {
3635 new_nhlfe = zebra_mpls_lsp_add_nh(&(ctx->u.lsp),
3636 nhlfe->type,
3637 nhlfe->nexthop);
3638
3639 /* Need to copy flags too */
3640 new_nhlfe->flags = nhlfe->flags;
3641 new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
3642 }
3643 }
3644
3645 /* Capture info about the source of the notification */
3646 dplane_ctx_set_notif_provider(
3647 ctx,
3648 dplane_ctx_get_notif_provider(notif_ctx));
3649
3650 ret = dplane_update_enqueue(ctx);
3651
3652 done:
3653 /* Update counter */
3654 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
3655 memory_order_relaxed);
3656
3657 if (ret == AOK)
3658 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3659 else {
3660 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
3661 memory_order_relaxed);
3662 if (ctx)
3663 dplane_ctx_free(&ctx);
3664 }
3665 return result;
3666 }
3667
3668 /*
3669 * Enqueue pseudowire install for the dataplane.
3670 */
3671 enum zebra_dplane_result dplane_pw_install(struct zebra_pw *pw)
3672 {
3673 return pw_update_internal(pw, DPLANE_OP_PW_INSTALL);
3674 }
3675
3676 /*
3677 * Enqueue pseudowire un-install for the dataplane.
3678 */
3679 enum zebra_dplane_result dplane_pw_uninstall(struct zebra_pw *pw)
3680 {
3681 return pw_update_internal(pw, DPLANE_OP_PW_UNINSTALL);
3682 }
3683
3684 /*
3685 * Common internal LSP update utility
3686 */
3687 static enum zebra_dplane_result lsp_update_internal(struct zebra_lsp *lsp,
3688 enum dplane_op_e op)
3689 {
3690 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3691 int ret = EINVAL;
3692 struct zebra_dplane_ctx *ctx = NULL;
3693
3694 /* Obtain context block */
3695 ctx = dplane_ctx_alloc();
3696
3697 ret = dplane_ctx_lsp_init(ctx, op, lsp);
3698 if (ret != AOK)
3699 goto done;
3700
3701 ret = dplane_update_enqueue(ctx);
3702
3703 done:
3704 /* Update counter */
3705 atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
3706 memory_order_relaxed);
3707
3708 if (ret == AOK)
3709 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3710 else {
3711 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
3712 memory_order_relaxed);
3713 dplane_ctx_free(&ctx);
3714 }
3715
3716 return result;
3717 }
3718
3719 /*
3720 * Internal, common handler for pseudowire updates.
3721 */
3722 static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
3723 enum dplane_op_e op)
3724 {
3725 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3726 int ret;
3727 struct zebra_dplane_ctx *ctx = NULL;
3728
3729 ctx = dplane_ctx_alloc();
3730
3731 ret = dplane_ctx_pw_init(ctx, op, pw);
3732 if (ret != AOK)
3733 goto done;
3734
3735 ret = dplane_update_enqueue(ctx);
3736
3737 done:
3738 /* Update counter */
3739 atomic_fetch_add_explicit(&zdplane_info.dg_pws_in, 1,
3740 memory_order_relaxed);
3741
3742 if (ret == AOK)
3743 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3744 else {
3745 atomic_fetch_add_explicit(&zdplane_info.dg_pw_errors, 1,
3746 memory_order_relaxed);
3747 dplane_ctx_free(&ctx);
3748 }
3749
3750 return result;
3751 }
3752
3753 /*
3754 * Enqueue access br_port update.
3755 */
3756 enum zebra_dplane_result
3757 dplane_br_port_update(const struct interface *ifp, bool non_df,
3758 uint32_t sph_filter_cnt,
3759 const struct in_addr *sph_filters, uint32_t backup_nhg_id)
3760 {
3761 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3762 uint32_t flags = 0;
3763 int ret;
3764 struct zebra_dplane_ctx *ctx = NULL;
3765 struct zebra_ns *zns;
3766 enum dplane_op_e op = DPLANE_OP_BR_PORT_UPDATE;
3767
3768 if (non_df)
3769 flags |= DPLANE_BR_PORT_NON_DF;
3770
3771 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL || IS_ZEBRA_DEBUG_EVPN_MH_ES) {
3772 uint32_t i;
3773 char vtep_str[ES_VTEP_LIST_STR_SZ];
3774
3775 vtep_str[0] = '\0';
3776 for (i = 0; i < sph_filter_cnt; ++i) {
3777 snprintfrr(vtep_str + strlen(vtep_str),
3778 sizeof(vtep_str) - strlen(vtep_str), "%pI4 ",
3779 &sph_filters[i]);
3780 }
3781 zlog_debug(
3782 "init br_port ctx %s: ifp %s, flags 0x%x backup_nhg 0x%x sph %s",
3783 dplane_op2str(op), ifp->name, flags, backup_nhg_id,
3784 vtep_str);
3785 }
3786
3787 ctx = dplane_ctx_alloc();
3788
3789 ctx->zd_op = op;
3790 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3791 ctx->zd_vrf_id = ifp->vrf->vrf_id;
3792
3793 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
3794 dplane_ctx_ns_init(ctx, zns, false);
3795
3796 ctx->zd_ifindex = ifp->ifindex;
3797 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
3798
3799 /* Init the br-port-specific data area */
3800 memset(&ctx->u.br_port, 0, sizeof(ctx->u.br_port));
3801
3802 ctx->u.br_port.flags = flags;
3803 ctx->u.br_port.backup_nhg_id = backup_nhg_id;
3804 ctx->u.br_port.sph_filter_cnt = sph_filter_cnt;
3805 memcpy(ctx->u.br_port.sph_filters, sph_filters,
3806 sizeof(ctx->u.br_port.sph_filters[0]) * sph_filter_cnt);
3807
3808 /* Enqueue for processing on the dplane pthread */
3809 ret = dplane_update_enqueue(ctx);
3810
3811 /* Increment counter */
3812 atomic_fetch_add_explicit(&zdplane_info.dg_br_port_in, 1,
3813 memory_order_relaxed);
3814
3815 if (ret == AOK) {
3816 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3817 } else {
3818 /* Error counter */
3819 atomic_fetch_add_explicit(&zdplane_info.dg_br_port_errors, 1,
3820 memory_order_relaxed);
3821 dplane_ctx_free(&ctx);
3822 }
3823
3824 return result;
3825 }
3826
3827 /*
3828 * Enqueue interface address add for the dataplane.
3829 */
3830 enum zebra_dplane_result dplane_intf_addr_set(const struct interface *ifp,
3831 const struct connected *ifc)
3832 {
3833 #if !defined(HAVE_NETLINK) && defined(HAVE_STRUCT_IFALIASREQ)
3834 /* Extra checks for this OS path. */
3835
3836 /* Don't configure PtP addresses on broadcast ifs or reverse */
3837 if (!(ifp->flags & IFF_POINTOPOINT) != !CONNECTED_PEER(ifc)) {
3838 if (IS_ZEBRA_DEBUG_KERNEL || IS_ZEBRA_DEBUG_DPLANE)
3839 zlog_debug("Failed to set intf addr: mismatch p2p and connected");
3840
3841 return ZEBRA_DPLANE_REQUEST_FAILURE;
3842 }
3843 #endif
3844
3845 return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_INSTALL);
3846 }
3847
3848 /*
3849 * Enqueue interface address remove/uninstall for the dataplane.
3850 */
3851 enum zebra_dplane_result dplane_intf_addr_unset(const struct interface *ifp,
3852 const struct connected *ifc)
3853 {
3854 return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_UNINSTALL);
3855 }
3856
3857 static enum zebra_dplane_result intf_addr_update_internal(
3858 const struct interface *ifp, const struct connected *ifc,
3859 enum dplane_op_e op)
3860 {
3861 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3862 int ret = EINVAL;
3863 struct zebra_dplane_ctx *ctx = NULL;
3864 struct zebra_ns *zns;
3865
3866 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
3867 zlog_debug("init intf ctx %s: idx %d, addr %u:%pFX",
3868 dplane_op2str(op), ifp->ifindex, ifp->vrf->vrf_id,
3869 ifc->address);
3870
3871 ctx = dplane_ctx_alloc();
3872
3873 ctx->zd_op = op;
3874 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
3875 ctx->zd_vrf_id = ifp->vrf->vrf_id;
3876
3877 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
3878 dplane_ctx_ns_init(ctx, zns, false);
3879
3880 /* Init the interface-addr-specific area */
3881 memset(&ctx->u.intf, 0, sizeof(ctx->u.intf));
3882
3883 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
3884 ctx->zd_ifindex = ifp->ifindex;
3885 ctx->u.intf.prefix = *(ifc->address);
3886
3887 if (if_is_broadcast(ifp))
3888 ctx->u.intf.flags |= DPLANE_INTF_BROADCAST;
3889
3890 if (CONNECTED_PEER(ifc)) {
3891 ctx->u.intf.dest_prefix = *(ifc->destination);
3892 ctx->u.intf.flags |=
3893 (DPLANE_INTF_CONNECTED | DPLANE_INTF_HAS_DEST);
3894 }
3895
3896 if (CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY))
3897 ctx->u.intf.flags |= DPLANE_INTF_SECONDARY;
3898
3899 if (ifc->label) {
3900 size_t len;
3901
3902 ctx->u.intf.flags |= DPLANE_INTF_HAS_LABEL;
3903
3904 /* Use embedded buffer if it's adequate; else allocate. */
3905 len = strlen(ifc->label);
3906
3907 if (len < sizeof(ctx->u.intf.label_buf)) {
3908 strlcpy(ctx->u.intf.label_buf, ifc->label,
3909 sizeof(ctx->u.intf.label_buf));
3910 ctx->u.intf.label = ctx->u.intf.label_buf;
3911 } else {
3912 ctx->u.intf.label = XSTRDUP(MTYPE_DP_CTX, ifc->label);
3913 }
3914 }
3915
3916 ret = dplane_update_enqueue(ctx);
3917
3918 /* Increment counter */
3919 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addrs_in, 1,
3920 memory_order_relaxed);
3921
3922 if (ret == AOK)
3923 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3924 else {
3925 /* Error counter */
3926 atomic_fetch_add_explicit(&zdplane_info.dg_intf_addr_errors,
3927 1, memory_order_relaxed);
3928 dplane_ctx_free(&ctx);
3929 }
3930
3931 return result;
3932 }
3933
3934 /**
3935 * dplane_intf_update_internal() - Helper for enqueuing interface changes
3936 *
3937 * @ifp: Interface where the change occured
3938 * @op: The operation to be enqued
3939 *
3940 * Return: Result of the change
3941 */
3942 static enum zebra_dplane_result
3943 dplane_intf_update_internal(const struct interface *ifp, enum dplane_op_e op)
3944 {
3945 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
3946 int ret = EINVAL;
3947 struct zebra_dplane_ctx *ctx = NULL;
3948
3949 /* Obtain context block */
3950 ctx = dplane_ctx_alloc();
3951 if (!ctx) {
3952 ret = ENOMEM;
3953 goto done;
3954 }
3955
3956 ret = dplane_ctx_intf_init(ctx, op, ifp);
3957 if (ret == AOK)
3958 ret = dplane_update_enqueue(ctx);
3959
3960 done:
3961 /* Update counter */
3962 atomic_fetch_add_explicit(&zdplane_info.dg_intfs_in, 1,
3963 memory_order_relaxed);
3964
3965 if (ret == AOK)
3966 result = ZEBRA_DPLANE_REQUEST_QUEUED;
3967 else {
3968 atomic_fetch_add_explicit(&zdplane_info.dg_intf_errors, 1,
3969 memory_order_relaxed);
3970 if (ctx)
3971 dplane_ctx_free(&ctx);
3972 }
3973
3974 return result;
3975 }
3976
3977 /*
3978 * Enqueue a interface add for the dataplane.
3979 */
3980 enum zebra_dplane_result dplane_intf_add(const struct interface *ifp)
3981 {
3982 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3983
3984 if (ifp)
3985 ret = dplane_intf_update_internal(ifp, DPLANE_OP_INTF_INSTALL);
3986 return ret;
3987 }
3988
3989 /*
3990 * Enqueue a interface update for the dataplane.
3991 */
3992 enum zebra_dplane_result dplane_intf_update(const struct interface *ifp)
3993 {
3994 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
3995
3996 if (ifp)
3997 ret = dplane_intf_update_internal(ifp, DPLANE_OP_INTF_UPDATE);
3998 return ret;
3999 }
4000
4001 /*
4002 * Enqueue a interface delete for the dataplane.
4003 */
4004 enum zebra_dplane_result dplane_intf_delete(const struct interface *ifp)
4005 {
4006 enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
4007
4008 if (ifp)
4009 ret = dplane_intf_update_internal(ifp, DPLANE_OP_INTF_DELETE);
4010 return ret;
4011 }
4012
4013 /*
4014 * Enqueue vxlan/evpn mac add (or update).
4015 */
4016 enum zebra_dplane_result dplane_rem_mac_add(const struct interface *ifp,
4017 const struct interface *bridge_ifp,
4018 vlanid_t vid,
4019 const struct ethaddr *mac,
4020 struct in_addr vtep_ip,
4021 bool sticky,
4022 uint32_t nhg_id,
4023 bool was_static)
4024 {
4025 enum zebra_dplane_result result;
4026 uint32_t update_flags = 0;
4027
4028 update_flags |= DPLANE_MAC_REMOTE;
4029 if (was_static)
4030 update_flags |= DPLANE_MAC_WAS_STATIC;
4031
4032 /* Use common helper api */
4033 result = mac_update_common(DPLANE_OP_MAC_INSTALL, ifp, bridge_ifp,
4034 vid, mac, vtep_ip, sticky, nhg_id, update_flags);
4035 return result;
4036 }
4037
4038 /*
4039 * Enqueue vxlan/evpn mac delete.
4040 */
4041 enum zebra_dplane_result dplane_rem_mac_del(const struct interface *ifp,
4042 const struct interface *bridge_ifp,
4043 vlanid_t vid,
4044 const struct ethaddr *mac,
4045 struct in_addr vtep_ip)
4046 {
4047 enum zebra_dplane_result result;
4048 uint32_t update_flags = 0;
4049
4050 update_flags |= DPLANE_MAC_REMOTE;
4051
4052 /* Use common helper api */
4053 result = mac_update_common(DPLANE_OP_MAC_DELETE, ifp, bridge_ifp,
4054 vid, mac, vtep_ip, false, 0, update_flags);
4055 return result;
4056 }
4057
4058 /*
4059 * API to configure link local with either MAC address or IP information
4060 */
4061 enum zebra_dplane_result dplane_neigh_ip_update(enum dplane_op_e op,
4062 const struct interface *ifp,
4063 struct ipaddr *link_ip,
4064 struct ipaddr *ip,
4065 uint32_t ndm_state, int protocol)
4066 {
4067 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4068 uint16_t state = 0;
4069 uint32_t update_flags;
4070
4071 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
4072 zlog_debug("%s: init link ctx %s: ifp %s, link_ip %pIA ip %pIA",
4073 __func__, dplane_op2str(op), ifp->name, link_ip, ip);
4074
4075 if (ndm_state == ZEBRA_NEIGH_STATE_REACHABLE)
4076 state = DPLANE_NUD_REACHABLE;
4077 else if (ndm_state == ZEBRA_NEIGH_STATE_FAILED)
4078 state = DPLANE_NUD_FAILED;
4079
4080 update_flags = DPLANE_NEIGH_NO_EXTENSION;
4081
4082 result = neigh_update_internal(op, ifp, (const void *)link_ip,
4083 ipaddr_family(link_ip), ip, 0, state,
4084 update_flags, protocol);
4085
4086 return result;
4087 }
4088
4089 /*
4090 * Enqueue local mac add (or update).
4091 */
4092 enum zebra_dplane_result dplane_local_mac_add(const struct interface *ifp,
4093 const struct interface *bridge_ifp,
4094 vlanid_t vid,
4095 const struct ethaddr *mac,
4096 bool sticky,
4097 uint32_t set_static,
4098 uint32_t set_inactive)
4099 {
4100 enum zebra_dplane_result result;
4101 uint32_t update_flags = 0;
4102 struct in_addr vtep_ip;
4103
4104 if (set_static)
4105 update_flags |= DPLANE_MAC_SET_STATIC;
4106
4107 if (set_inactive)
4108 update_flags |= DPLANE_MAC_SET_INACTIVE;
4109
4110 vtep_ip.s_addr = 0;
4111
4112 /* Use common helper api */
4113 result = mac_update_common(DPLANE_OP_MAC_INSTALL, ifp, bridge_ifp,
4114 vid, mac, vtep_ip, sticky, 0,
4115 update_flags);
4116 return result;
4117 }
4118
4119 /*
4120 * Enqueue local mac del
4121 */
4122 enum zebra_dplane_result
4123 dplane_local_mac_del(const struct interface *ifp,
4124 const struct interface *bridge_ifp, vlanid_t vid,
4125 const struct ethaddr *mac)
4126 {
4127 enum zebra_dplane_result result;
4128 struct in_addr vtep_ip;
4129
4130 vtep_ip.s_addr = 0;
4131
4132 /* Use common helper api */
4133 result = mac_update_common(DPLANE_OP_MAC_DELETE, ifp, bridge_ifp, vid,
4134 mac, vtep_ip, false, 0, 0);
4135 return result;
4136 }
4137 /*
4138 * Public api to init an empty context - either newly-allocated or
4139 * reset/cleared - for a MAC update.
4140 */
4141 void dplane_mac_init(struct zebra_dplane_ctx *ctx,
4142 const struct interface *ifp,
4143 const struct interface *br_ifp,
4144 vlanid_t vid,
4145 const struct ethaddr *mac,
4146 struct in_addr vtep_ip,
4147 bool sticky,
4148 uint32_t nhg_id,
4149 uint32_t update_flags)
4150 {
4151 struct zebra_ns *zns;
4152
4153 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4154 ctx->zd_vrf_id = ifp->vrf->vrf_id;
4155
4156 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
4157 dplane_ctx_ns_init(ctx, zns, false);
4158
4159 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
4160 ctx->zd_ifindex = ifp->ifindex;
4161
4162 /* Init the mac-specific data area */
4163 memset(&ctx->u.macinfo, 0, sizeof(ctx->u.macinfo));
4164
4165 ctx->u.macinfo.br_ifindex = br_ifp->ifindex;
4166 ctx->u.macinfo.vtep_ip = vtep_ip;
4167 ctx->u.macinfo.mac = *mac;
4168 ctx->u.macinfo.vid = vid;
4169 ctx->u.macinfo.is_sticky = sticky;
4170 ctx->u.macinfo.nhg_id = nhg_id;
4171 ctx->u.macinfo.update_flags = update_flags;
4172 }
4173
4174 /*
4175 * Common helper api for MAC address/vxlan updates
4176 */
4177 static enum zebra_dplane_result
4178 mac_update_common(enum dplane_op_e op,
4179 const struct interface *ifp,
4180 const struct interface *br_ifp,
4181 vlanid_t vid,
4182 const struct ethaddr *mac,
4183 struct in_addr vtep_ip,
4184 bool sticky,
4185 uint32_t nhg_id,
4186 uint32_t update_flags)
4187 {
4188 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4189 int ret;
4190 struct zebra_dplane_ctx *ctx = NULL;
4191
4192 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
4193 zlog_debug("init mac ctx %s: mac %pEA, ifp %s, vtep %pI4",
4194 dplane_op2str(op), mac, ifp->name, &vtep_ip);
4195
4196 ctx = dplane_ctx_alloc();
4197 ctx->zd_op = op;
4198
4199 /* Common init for the ctx */
4200 dplane_mac_init(ctx, ifp, br_ifp, vid, mac, vtep_ip, sticky,
4201 nhg_id, update_flags);
4202
4203 /* Enqueue for processing on the dplane pthread */
4204 ret = dplane_update_enqueue(ctx);
4205
4206 /* Increment counter */
4207 atomic_fetch_add_explicit(&zdplane_info.dg_macs_in, 1,
4208 memory_order_relaxed);
4209
4210 if (ret == AOK)
4211 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4212 else {
4213 /* Error counter */
4214 atomic_fetch_add_explicit(&zdplane_info.dg_mac_errors, 1,
4215 memory_order_relaxed);
4216 dplane_ctx_free(&ctx);
4217 }
4218
4219 return result;
4220 }
4221
4222 /*
4223 * Enqueue evpn neighbor add for the dataplane.
4224 */
4225 enum zebra_dplane_result dplane_rem_neigh_add(const struct interface *ifp,
4226 const struct ipaddr *ip,
4227 const struct ethaddr *mac,
4228 uint32_t flags, bool was_static)
4229 {
4230 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4231 uint32_t update_flags = 0;
4232
4233 update_flags |= DPLANE_NEIGH_REMOTE;
4234
4235 if (was_static)
4236 update_flags |= DPLANE_NEIGH_WAS_STATIC;
4237
4238 result = neigh_update_internal(
4239 DPLANE_OP_NEIGH_INSTALL, ifp, (const void *)mac, AF_ETHERNET,
4240 ip, flags, DPLANE_NUD_NOARP, update_flags, 0);
4241
4242 return result;
4243 }
4244
4245 /*
4246 * Enqueue local neighbor add for the dataplane.
4247 */
4248 enum zebra_dplane_result dplane_local_neigh_add(const struct interface *ifp,
4249 const struct ipaddr *ip,
4250 const struct ethaddr *mac,
4251 bool set_router, bool set_static,
4252 bool set_inactive)
4253 {
4254 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4255 uint32_t update_flags = 0;
4256 uint32_t ntf = 0;
4257 uint16_t state;
4258
4259 if (set_static)
4260 update_flags |= DPLANE_NEIGH_SET_STATIC;
4261
4262 if (set_inactive) {
4263 update_flags |= DPLANE_NEIGH_SET_INACTIVE;
4264 state = DPLANE_NUD_STALE;
4265 } else {
4266 state = DPLANE_NUD_REACHABLE;
4267 }
4268
4269 if (set_router)
4270 ntf |= DPLANE_NTF_ROUTER;
4271
4272 result = neigh_update_internal(DPLANE_OP_NEIGH_INSTALL, ifp,
4273 (const void *)mac, AF_ETHERNET, ip, ntf,
4274 state, update_flags, 0);
4275
4276 return result;
4277 }
4278
4279 /*
4280 * Enqueue evpn neighbor delete for the dataplane.
4281 */
4282 enum zebra_dplane_result dplane_rem_neigh_delete(const struct interface *ifp,
4283 const struct ipaddr *ip)
4284 {
4285 enum zebra_dplane_result result;
4286 uint32_t update_flags = 0;
4287
4288 update_flags |= DPLANE_NEIGH_REMOTE;
4289
4290 result = neigh_update_internal(DPLANE_OP_NEIGH_DELETE, ifp, NULL,
4291 AF_ETHERNET, ip, 0, 0, update_flags, 0);
4292
4293 return result;
4294 }
4295
4296 /*
4297 * Enqueue evpn VTEP add for the dataplane.
4298 */
4299 enum zebra_dplane_result dplane_vtep_add(const struct interface *ifp,
4300 const struct in_addr *ip,
4301 vni_t vni)
4302 {
4303 enum zebra_dplane_result result;
4304 struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
4305 struct ipaddr addr;
4306
4307 if (IS_ZEBRA_DEBUG_VXLAN)
4308 zlog_debug("Install %pI4 into flood list for VNI %u intf %s(%u)",
4309 ip, vni, ifp->name, ifp->ifindex);
4310
4311 SET_IPADDR_V4(&addr);
4312 addr.ipaddr_v4 = *ip;
4313
4314 result = neigh_update_internal(DPLANE_OP_VTEP_ADD, ifp, &mac,
4315 AF_ETHERNET, &addr, 0, 0, 0, 0);
4316
4317 return result;
4318 }
4319
4320 /*
4321 * Enqueue evpn VTEP add for the dataplane.
4322 */
4323 enum zebra_dplane_result dplane_vtep_delete(const struct interface *ifp,
4324 const struct in_addr *ip,
4325 vni_t vni)
4326 {
4327 enum zebra_dplane_result result;
4328 struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
4329 struct ipaddr addr;
4330
4331 if (IS_ZEBRA_DEBUG_VXLAN)
4332 zlog_debug(
4333 "Uninstall %pI4 from flood list for VNI %u intf %s(%u)",
4334 ip, vni, ifp->name, ifp->ifindex);
4335
4336 SET_IPADDR_V4(&addr);
4337 addr.ipaddr_v4 = *ip;
4338
4339 result = neigh_update_internal(DPLANE_OP_VTEP_DELETE, ifp,
4340 (const void *)&mac, AF_ETHERNET, &addr,
4341 0, 0, 0, 0);
4342
4343 return result;
4344 }
4345
4346 enum zebra_dplane_result dplane_neigh_discover(const struct interface *ifp,
4347 const struct ipaddr *ip)
4348 {
4349 enum zebra_dplane_result result;
4350
4351 result = neigh_update_internal(DPLANE_OP_NEIGH_DISCOVER, ifp, NULL,
4352 AF_ETHERNET, ip, DPLANE_NTF_USE,
4353 DPLANE_NUD_INCOMPLETE, 0, 0);
4354
4355 return result;
4356 }
4357
4358 enum zebra_dplane_result dplane_neigh_table_update(const struct interface *ifp,
4359 const uint8_t family,
4360 const uint32_t app_probes,
4361 const uint32_t ucast_probes,
4362 const uint32_t mcast_probes)
4363 {
4364 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4365 int ret;
4366 struct zebra_dplane_ctx *ctx = NULL;
4367 struct zebra_ns *zns;
4368 enum dplane_op_e op = DPLANE_OP_NEIGH_TABLE_UPDATE;
4369
4370 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
4371 zlog_debug("set neigh ctx %s: ifp %s, family %s",
4372 dplane_op2str(op), ifp->name, family2str(family));
4373 }
4374
4375 ctx = dplane_ctx_alloc();
4376
4377 ctx->zd_op = op;
4378 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4379 ctx->zd_vrf_id = ifp->vrf->vrf_id;
4380
4381 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
4382 dplane_ctx_ns_init(ctx, zns, false);
4383
4384 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
4385 ctx->zd_ifindex = ifp->ifindex;
4386
4387 /* Init the neighbor-specific data area */
4388 memset(&ctx->u.neightable, 0, sizeof(ctx->u.neightable));
4389
4390 ctx->u.neightable.family = family;
4391 ctx->u.neightable.app_probes = app_probes;
4392 ctx->u.neightable.ucast_probes = ucast_probes;
4393 ctx->u.neightable.mcast_probes = mcast_probes;
4394
4395 /* Enqueue for processing on the dplane pthread */
4396 ret = dplane_update_enqueue(ctx);
4397
4398 /* Increment counter */
4399 atomic_fetch_add_explicit(&zdplane_info.dg_neightable_in, 1,
4400 memory_order_relaxed);
4401
4402 if (ret == AOK)
4403 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4404 else {
4405 /* Error counter */
4406 atomic_fetch_add_explicit(&zdplane_info.dg_neightable_errors, 1,
4407 memory_order_relaxed);
4408 dplane_ctx_free(&ctx);
4409 }
4410
4411 return result;
4412 }
4413
4414 /*
4415 * Common helper api for neighbor updates
4416 */
4417 static enum zebra_dplane_result
4418 neigh_update_internal(enum dplane_op_e op, const struct interface *ifp,
4419 const void *link, const int link_family,
4420 const struct ipaddr *ip, uint32_t flags, uint16_t state,
4421 uint32_t update_flags, int protocol)
4422 {
4423 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4424 int ret;
4425 struct zebra_dplane_ctx *ctx = NULL;
4426 struct zebra_ns *zns;
4427 const struct ethaddr *mac = NULL;
4428 const struct ipaddr *link_ip = NULL;
4429
4430 if (link_family == AF_ETHERNET)
4431 mac = (const struct ethaddr *)link;
4432 else
4433 link_ip = (const struct ipaddr *)link;
4434
4435 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
4436 char buf1[PREFIX_STRLEN];
4437
4438 buf1[0] = '\0';
4439 if (link_family == AF_ETHERNET)
4440 prefix_mac2str(mac, buf1, sizeof(buf1));
4441 else
4442 ipaddr2str(link_ip, buf1, sizeof(buf1));
4443 zlog_debug("init neigh ctx %s: ifp %s, %s %s, ip %pIA",
4444 dplane_op2str(op), ifp->name,
4445 link_family == AF_ETHERNET ? "mac " : "link ",
4446 buf1, ip);
4447 }
4448
4449 ctx = dplane_ctx_alloc();
4450
4451 ctx->zd_op = op;
4452 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4453 ctx->zd_vrf_id = ifp->vrf->vrf_id;
4454 dplane_ctx_set_type(ctx, protocol);
4455
4456 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
4457 dplane_ctx_ns_init(ctx, zns, false);
4458
4459 strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
4460 ctx->zd_ifindex = ifp->ifindex;
4461
4462 /* Init the neighbor-specific data area */
4463 memset(&ctx->u.neigh, 0, sizeof(ctx->u.neigh));
4464
4465 ctx->u.neigh.ip_addr = *ip;
4466 if (mac)
4467 ctx->u.neigh.link.mac = *mac;
4468 else if (link_ip)
4469 ctx->u.neigh.link.ip_addr = *link_ip;
4470
4471 ctx->u.neigh.flags = flags;
4472 ctx->u.neigh.state = state;
4473 ctx->u.neigh.update_flags = update_flags;
4474
4475 /* Enqueue for processing on the dplane pthread */
4476 ret = dplane_update_enqueue(ctx);
4477
4478 /* Increment counter */
4479 atomic_fetch_add_explicit(&zdplane_info.dg_neighs_in, 1,
4480 memory_order_relaxed);
4481
4482 if (ret == AOK)
4483 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4484 else {
4485 /* Error counter */
4486 atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors, 1,
4487 memory_order_relaxed);
4488 dplane_ctx_free(&ctx);
4489 }
4490
4491 return result;
4492 }
4493
4494 /*
4495 * Common helper api for PBR rule updates
4496 */
4497 static enum zebra_dplane_result
4498 rule_update_internal(enum dplane_op_e op, struct zebra_pbr_rule *new_rule,
4499 struct zebra_pbr_rule *old_rule)
4500 {
4501 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4502 struct zebra_dplane_ctx *ctx;
4503 int ret;
4504
4505 ctx = dplane_ctx_alloc();
4506
4507 ret = dplane_ctx_rule_init(ctx, op, new_rule, old_rule);
4508 if (ret != AOK)
4509 goto done;
4510
4511 ret = dplane_update_enqueue(ctx);
4512
4513 done:
4514 atomic_fetch_add_explicit(&zdplane_info.dg_rules_in, 1,
4515 memory_order_relaxed);
4516
4517 if (ret == AOK)
4518 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4519 else {
4520 atomic_fetch_add_explicit(&zdplane_info.dg_rule_errors, 1,
4521 memory_order_relaxed);
4522 dplane_ctx_free(&ctx);
4523 }
4524
4525 return result;
4526 }
4527
4528 enum zebra_dplane_result dplane_pbr_rule_add(struct zebra_pbr_rule *rule)
4529 {
4530 return rule_update_internal(DPLANE_OP_RULE_ADD, rule, NULL);
4531 }
4532
4533 enum zebra_dplane_result dplane_pbr_rule_delete(struct zebra_pbr_rule *rule)
4534 {
4535 return rule_update_internal(DPLANE_OP_RULE_DELETE, rule, NULL);
4536 }
4537
4538 enum zebra_dplane_result dplane_pbr_rule_update(struct zebra_pbr_rule *old_rule,
4539 struct zebra_pbr_rule *new_rule)
4540 {
4541 return rule_update_internal(DPLANE_OP_RULE_UPDATE, new_rule, old_rule);
4542 }
4543 /*
4544 * Common helper api for iptable updates
4545 */
4546 static enum zebra_dplane_result
4547 iptable_update_internal(enum dplane_op_e op, struct zebra_pbr_iptable *iptable)
4548 {
4549 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4550 struct zebra_dplane_ctx *ctx;
4551 int ret;
4552
4553 ctx = dplane_ctx_alloc();
4554
4555 ret = dplane_ctx_iptable_init(ctx, op, iptable);
4556 if (ret != AOK)
4557 goto done;
4558
4559 ret = dplane_update_enqueue(ctx);
4560
4561 done:
4562 atomic_fetch_add_explicit(&zdplane_info.dg_iptable_in, 1,
4563 memory_order_relaxed);
4564
4565 if (ret == AOK)
4566 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4567 else {
4568 atomic_fetch_add_explicit(&zdplane_info.dg_iptable_errors, 1,
4569 memory_order_relaxed);
4570 dplane_ctx_free(&ctx);
4571 }
4572
4573 return result;
4574 }
4575
4576 enum zebra_dplane_result
4577 dplane_pbr_iptable_add(struct zebra_pbr_iptable *iptable)
4578 {
4579 return iptable_update_internal(DPLANE_OP_IPTABLE_ADD, iptable);
4580 }
4581
4582 enum zebra_dplane_result
4583 dplane_pbr_iptable_delete(struct zebra_pbr_iptable *iptable)
4584 {
4585 return iptable_update_internal(DPLANE_OP_IPTABLE_DELETE, iptable);
4586 }
4587
4588 /*
4589 * Common helper api for ipset updates
4590 */
4591 static enum zebra_dplane_result
4592 ipset_update_internal(enum dplane_op_e op, struct zebra_pbr_ipset *ipset)
4593 {
4594 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4595 struct zebra_dplane_ctx *ctx;
4596 int ret;
4597
4598 ctx = dplane_ctx_alloc();
4599
4600 ret = dplane_ctx_ipset_init(ctx, op, ipset);
4601 if (ret != AOK)
4602 goto done;
4603
4604 ret = dplane_update_enqueue(ctx);
4605
4606 done:
4607 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_in, 1,
4608 memory_order_relaxed);
4609
4610 if (ret == AOK)
4611 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4612 else {
4613 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_errors, 1,
4614 memory_order_relaxed);
4615 dplane_ctx_free(&ctx);
4616 }
4617
4618 return result;
4619 }
4620
4621 enum zebra_dplane_result dplane_pbr_ipset_add(struct zebra_pbr_ipset *ipset)
4622 {
4623 return ipset_update_internal(DPLANE_OP_IPSET_ADD, ipset);
4624 }
4625
4626 enum zebra_dplane_result dplane_pbr_ipset_delete(struct zebra_pbr_ipset *ipset)
4627 {
4628 return ipset_update_internal(DPLANE_OP_IPSET_DELETE, ipset);
4629 }
4630
4631 /*
4632 * Common helper api for ipset updates
4633 */
4634 static enum zebra_dplane_result
4635 ipset_entry_update_internal(enum dplane_op_e op,
4636 struct zebra_pbr_ipset_entry *ipset_entry)
4637 {
4638 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4639 struct zebra_dplane_ctx *ctx;
4640 int ret;
4641
4642 ctx = dplane_ctx_alloc();
4643
4644 ret = dplane_ctx_ipset_entry_init(ctx, op, ipset_entry);
4645 if (ret != AOK)
4646 goto done;
4647
4648 ret = dplane_update_enqueue(ctx);
4649
4650 done:
4651 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_entry_in, 1,
4652 memory_order_relaxed);
4653
4654 if (ret == AOK)
4655 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4656 else {
4657 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_entry_errors,
4658 1, memory_order_relaxed);
4659 dplane_ctx_free(&ctx);
4660 }
4661
4662 return result;
4663 }
4664
4665 enum zebra_dplane_result
4666 dplane_pbr_ipset_entry_add(struct zebra_pbr_ipset_entry *ipset)
4667 {
4668 return ipset_entry_update_internal(DPLANE_OP_IPSET_ENTRY_ADD, ipset);
4669 }
4670
4671 enum zebra_dplane_result
4672 dplane_pbr_ipset_entry_delete(struct zebra_pbr_ipset_entry *ipset)
4673 {
4674 return ipset_entry_update_internal(DPLANE_OP_IPSET_ENTRY_DELETE, ipset);
4675 }
4676
4677 /*
4678 * Common helper api for GRE set
4679 */
4680 enum zebra_dplane_result
4681 dplane_gre_set(struct interface *ifp, struct interface *ifp_link,
4682 unsigned int mtu, const struct zebra_l2info_gre *gre_info)
4683 {
4684 enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
4685 struct zebra_dplane_ctx *ctx;
4686 enum dplane_op_e op = DPLANE_OP_GRE_SET;
4687 int ret;
4688 struct zebra_ns *zns;
4689
4690 ctx = dplane_ctx_alloc();
4691
4692 if (!ifp)
4693 return result;
4694
4695 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
4696 zlog_debug("init dplane ctx %s: if %s link %s%s",
4697 dplane_op2str(op), ifp->name,
4698 ifp_link ? "set" : "unset", ifp_link ?
4699 ifp_link->name : "");
4700 }
4701
4702 ctx->zd_op = op;
4703 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4704 zns = zebra_ns_lookup(ifp->vrf->vrf_id);
4705 if (!zns)
4706 return result;
4707 dplane_ctx_ns_init(ctx, zns, false);
4708
4709 dplane_ctx_set_ifname(ctx, ifp->name);
4710 ctx->zd_vrf_id = ifp->vrf->vrf_id;
4711 ctx->zd_ifindex = ifp->ifindex;
4712 if (ifp_link)
4713 ctx->u.gre.link_ifindex = ifp_link->ifindex;
4714 else
4715 ctx->u.gre.link_ifindex = 0;
4716 if (gre_info)
4717 memcpy(&ctx->u.gre.info, gre_info, sizeof(ctx->u.gre.info));
4718 ctx->u.gre.mtu = mtu;
4719
4720 ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
4721
4722 /* Enqueue context for processing */
4723 ret = dplane_update_enqueue(ctx);
4724
4725 /* Update counter */
4726 atomic_fetch_add_explicit(&zdplane_info.dg_gre_set_in, 1,
4727 memory_order_relaxed);
4728
4729 if (ret == AOK)
4730 result = ZEBRA_DPLANE_REQUEST_QUEUED;
4731 else {
4732 atomic_fetch_add_explicit(
4733 &zdplane_info.dg_gre_set_errors, 1,
4734 memory_order_relaxed);
4735 if (ctx)
4736 dplane_ctx_free(&ctx);
4737 result = ZEBRA_DPLANE_REQUEST_FAILURE;
4738 }
4739 return result;
4740 }
4741
4742 /*
4743 * Handler for 'show dplane'
4744 */
4745 int dplane_show_helper(struct vty *vty, bool detailed)
4746 {
4747 uint64_t queued, queue_max, limit, errs, incoming, yields,
4748 other_errs;
4749
4750 /* Using atomics because counters are being changed in different
4751 * pthread contexts.
4752 */
4753 incoming = atomic_load_explicit(&zdplane_info.dg_routes_in,
4754 memory_order_relaxed);
4755 limit = atomic_load_explicit(&zdplane_info.dg_max_queued_updates,
4756 memory_order_relaxed);
4757 queued = atomic_load_explicit(&zdplane_info.dg_routes_queued,
4758 memory_order_relaxed);
4759 queue_max = atomic_load_explicit(&zdplane_info.dg_routes_queued_max,
4760 memory_order_relaxed);
4761 errs = atomic_load_explicit(&zdplane_info.dg_route_errors,
4762 memory_order_relaxed);
4763 yields = atomic_load_explicit(&zdplane_info.dg_update_yields,
4764 memory_order_relaxed);
4765 other_errs = atomic_load_explicit(&zdplane_info.dg_other_errors,
4766 memory_order_relaxed);
4767
4768 vty_out(vty, "Zebra dataplane:\nRoute updates: %"PRIu64"\n",
4769 incoming);
4770 vty_out(vty, "Route update errors: %"PRIu64"\n", errs);
4771 vty_out(vty, "Other errors : %"PRIu64"\n", other_errs);
4772 vty_out(vty, "Route update queue limit: %"PRIu64"\n", limit);
4773 vty_out(vty, "Route update queue depth: %"PRIu64"\n", queued);
4774 vty_out(vty, "Route update queue max: %"PRIu64"\n", queue_max);
4775 vty_out(vty, "Dplane update yields: %"PRIu64"\n", yields);
4776
4777 incoming = atomic_load_explicit(&zdplane_info.dg_lsps_in,
4778 memory_order_relaxed);
4779 errs = atomic_load_explicit(&zdplane_info.dg_lsp_errors,
4780 memory_order_relaxed);
4781 vty_out(vty, "LSP updates: %"PRIu64"\n", incoming);
4782 vty_out(vty, "LSP update errors: %"PRIu64"\n", errs);
4783
4784 incoming = atomic_load_explicit(&zdplane_info.dg_pws_in,
4785 memory_order_relaxed);
4786 errs = atomic_load_explicit(&zdplane_info.dg_pw_errors,
4787 memory_order_relaxed);
4788 vty_out(vty, "PW updates: %"PRIu64"\n", incoming);
4789 vty_out(vty, "PW update errors: %"PRIu64"\n", errs);
4790
4791 incoming = atomic_load_explicit(&zdplane_info.dg_intf_addrs_in,
4792 memory_order_relaxed);
4793 errs = atomic_load_explicit(&zdplane_info.dg_intf_addr_errors,
4794 memory_order_relaxed);
4795 vty_out(vty, "Intf addr updates: %"PRIu64"\n", incoming);
4796 vty_out(vty, "Intf addr errors: %"PRIu64"\n", errs);
4797
4798 incoming = atomic_load_explicit(&zdplane_info.dg_macs_in,
4799 memory_order_relaxed);
4800 errs = atomic_load_explicit(&zdplane_info.dg_mac_errors,
4801 memory_order_relaxed);
4802 vty_out(vty, "EVPN MAC updates: %"PRIu64"\n", incoming);
4803 vty_out(vty, "EVPN MAC errors: %"PRIu64"\n", errs);
4804
4805 incoming = atomic_load_explicit(&zdplane_info.dg_neighs_in,
4806 memory_order_relaxed);
4807 errs = atomic_load_explicit(&zdplane_info.dg_neigh_errors,
4808 memory_order_relaxed);
4809 vty_out(vty, "EVPN neigh updates: %"PRIu64"\n", incoming);
4810 vty_out(vty, "EVPN neigh errors: %"PRIu64"\n", errs);
4811
4812 incoming = atomic_load_explicit(&zdplane_info.dg_rules_in,
4813 memory_order_relaxed);
4814 errs = atomic_load_explicit(&zdplane_info.dg_rule_errors,
4815 memory_order_relaxed);
4816 vty_out(vty, "Rule updates: %" PRIu64 "\n", incoming);
4817 vty_out(vty, "Rule errors: %" PRIu64 "\n", errs);
4818
4819 incoming = atomic_load_explicit(&zdplane_info.dg_br_port_in,
4820 memory_order_relaxed);
4821 errs = atomic_load_explicit(&zdplane_info.dg_br_port_errors,
4822 memory_order_relaxed);
4823 vty_out(vty, "Bridge port updates: %" PRIu64 "\n", incoming);
4824 vty_out(vty, "Bridge port errors: %" PRIu64 "\n", errs);
4825
4826 incoming = atomic_load_explicit(&zdplane_info.dg_iptable_in,
4827 memory_order_relaxed);
4828 errs = atomic_load_explicit(&zdplane_info.dg_iptable_errors,
4829 memory_order_relaxed);
4830 vty_out(vty, "IPtable updates: %" PRIu64 "\n", incoming);
4831 vty_out(vty, "IPtable errors: %" PRIu64 "\n", errs);
4832 incoming = atomic_load_explicit(&zdplane_info.dg_ipset_in,
4833 memory_order_relaxed);
4834 errs = atomic_load_explicit(&zdplane_info.dg_ipset_errors,
4835 memory_order_relaxed);
4836 vty_out(vty, "IPset updates: %" PRIu64 "\n", incoming);
4837 vty_out(vty, "IPset errors: %" PRIu64 "\n", errs);
4838 incoming = atomic_load_explicit(&zdplane_info.dg_ipset_entry_in,
4839 memory_order_relaxed);
4840 errs = atomic_load_explicit(&zdplane_info.dg_ipset_entry_errors,
4841 memory_order_relaxed);
4842 vty_out(vty, "IPset entry updates: %" PRIu64 "\n", incoming);
4843 vty_out(vty, "IPset entry errors: %" PRIu64 "\n", errs);
4844
4845 incoming = atomic_load_explicit(&zdplane_info.dg_neightable_in,
4846 memory_order_relaxed);
4847 errs = atomic_load_explicit(&zdplane_info.dg_neightable_errors,
4848 memory_order_relaxed);
4849 vty_out(vty, "Neighbor Table updates: %"PRIu64"\n", incoming);
4850 vty_out(vty, "Neighbor Table errors: %"PRIu64"\n", errs);
4851
4852 incoming = atomic_load_explicit(&zdplane_info.dg_gre_set_in,
4853 memory_order_relaxed);
4854 errs = atomic_load_explicit(&zdplane_info.dg_gre_set_errors,
4855 memory_order_relaxed);
4856 vty_out(vty, "GRE set updates: %"PRIu64"\n", incoming);
4857 vty_out(vty, "GRE set errors: %"PRIu64"\n", errs);
4858 return CMD_SUCCESS;
4859 }
4860
4861 /*
4862 * Handler for 'show dplane providers'
4863 */
4864 int dplane_show_provs_helper(struct vty *vty, bool detailed)
4865 {
4866 struct zebra_dplane_provider *prov;
4867 uint64_t in, in_q, in_max, out, out_q, out_max;
4868
4869 vty_out(vty, "Zebra dataplane providers:\n");
4870
4871 DPLANE_LOCK();
4872 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
4873 DPLANE_UNLOCK();
4874
4875 /* Show counters, useful info from each registered provider */
4876 while (prov) {
4877
4878 in = atomic_load_explicit(&prov->dp_in_counter,
4879 memory_order_relaxed);
4880 in_q = atomic_load_explicit(&prov->dp_in_queued,
4881 memory_order_relaxed);
4882 in_max = atomic_load_explicit(&prov->dp_in_max,
4883 memory_order_relaxed);
4884 out = atomic_load_explicit(&prov->dp_out_counter,
4885 memory_order_relaxed);
4886 out_q = atomic_load_explicit(&prov->dp_out_queued,
4887 memory_order_relaxed);
4888 out_max = atomic_load_explicit(&prov->dp_out_max,
4889 memory_order_relaxed);
4890
4891 vty_out(vty, "%s (%u): in: %"PRIu64", q: %"PRIu64", q_max: %"PRIu64", out: %"PRIu64", q: %"PRIu64", q_max: %"PRIu64"\n",
4892 prov->dp_name, prov->dp_id, in, in_q, in_max,
4893 out, out_q, out_max);
4894
4895 DPLANE_LOCK();
4896 prov = TAILQ_NEXT(prov, dp_prov_link);
4897 DPLANE_UNLOCK();
4898 }
4899
4900 return CMD_SUCCESS;
4901 }
4902
4903 /*
4904 * Helper for 'show run' etc.
4905 */
4906 int dplane_config_write_helper(struct vty *vty)
4907 {
4908 if (zdplane_info.dg_max_queued_updates != DPLANE_DEFAULT_MAX_QUEUED)
4909 vty_out(vty, "zebra dplane limit %u\n",
4910 zdplane_info.dg_max_queued_updates);
4911
4912 return 0;
4913 }
4914
4915 /*
4916 * Provider registration
4917 */
4918 int dplane_provider_register(const char *name,
4919 enum dplane_provider_prio prio,
4920 int flags,
4921 int (*start_fp)(struct zebra_dplane_provider *),
4922 int (*fp)(struct zebra_dplane_provider *),
4923 int (*fini_fp)(struct zebra_dplane_provider *,
4924 bool early),
4925 void *data,
4926 struct zebra_dplane_provider **prov_p)
4927 {
4928 int ret = 0;
4929 struct zebra_dplane_provider *p = NULL, *last;
4930
4931 /* Validate */
4932 if (fp == NULL) {
4933 ret = EINVAL;
4934 goto done;
4935 }
4936
4937 if (prio <= DPLANE_PRIO_NONE ||
4938 prio > DPLANE_PRIO_LAST) {
4939 ret = EINVAL;
4940 goto done;
4941 }
4942
4943 /* Allocate and init new provider struct */
4944 p = XCALLOC(MTYPE_DP_PROV, sizeof(struct zebra_dplane_provider));
4945
4946 pthread_mutex_init(&(p->dp_mutex), NULL);
4947 TAILQ_INIT(&(p->dp_ctx_in_q));
4948 TAILQ_INIT(&(p->dp_ctx_out_q));
4949
4950 p->dp_flags = flags;
4951 p->dp_priority = prio;
4952 p->dp_fp = fp;
4953 p->dp_start = start_fp;
4954 p->dp_fini = fini_fp;
4955 p->dp_data = data;
4956
4957 /* Lock - the dplane pthread may be running */
4958 DPLANE_LOCK();
4959
4960 p->dp_id = ++zdplane_info.dg_provider_id;
4961
4962 if (name)
4963 strlcpy(p->dp_name, name, DPLANE_PROVIDER_NAMELEN);
4964 else
4965 snprintf(p->dp_name, DPLANE_PROVIDER_NAMELEN,
4966 "provider-%u", p->dp_id);
4967
4968 /* Insert into list ordered by priority */
4969 TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_prov_link) {
4970 if (last->dp_priority > p->dp_priority)
4971 break;
4972 }
4973
4974 if (last)
4975 TAILQ_INSERT_BEFORE(last, p, dp_prov_link);
4976 else
4977 TAILQ_INSERT_TAIL(&zdplane_info.dg_providers_q, p,
4978 dp_prov_link);
4979
4980 /* And unlock */
4981 DPLANE_UNLOCK();
4982
4983 if (IS_ZEBRA_DEBUG_DPLANE)
4984 zlog_debug("dplane: registered new provider '%s' (%u), prio %d",
4985 p->dp_name, p->dp_id, p->dp_priority);
4986
4987 done:
4988 if (prov_p)
4989 *prov_p = p;
4990
4991 return ret;
4992 }
4993
4994 /* Accessors for provider attributes */
4995 const char *dplane_provider_get_name(const struct zebra_dplane_provider *prov)
4996 {
4997 return prov->dp_name;
4998 }
4999
5000 uint32_t dplane_provider_get_id(const struct zebra_dplane_provider *prov)
5001 {
5002 return prov->dp_id;
5003 }
5004
5005 void *dplane_provider_get_data(const struct zebra_dplane_provider *prov)
5006 {
5007 return prov->dp_data;
5008 }
5009
5010 int dplane_provider_get_work_limit(const struct zebra_dplane_provider *prov)
5011 {
5012 return zdplane_info.dg_updates_per_cycle;
5013 }
5014
5015 /* Lock/unlock a provider's mutex - iff the provider was registered with
5016 * the THREADED flag.
5017 */
5018 void dplane_provider_lock(struct zebra_dplane_provider *prov)
5019 {
5020 if (dplane_provider_is_threaded(prov))
5021 DPLANE_PROV_LOCK(prov);
5022 }
5023
5024 void dplane_provider_unlock(struct zebra_dplane_provider *prov)
5025 {
5026 if (dplane_provider_is_threaded(prov))
5027 DPLANE_PROV_UNLOCK(prov);
5028 }
5029
5030 /*
5031 * Dequeue and maintain associated counter
5032 */
5033 struct zebra_dplane_ctx *dplane_provider_dequeue_in_ctx(
5034 struct zebra_dplane_provider *prov)
5035 {
5036 struct zebra_dplane_ctx *ctx = NULL;
5037
5038 dplane_provider_lock(prov);
5039
5040 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
5041 if (ctx) {
5042 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
5043
5044 atomic_fetch_sub_explicit(&prov->dp_in_queued, 1,
5045 memory_order_relaxed);
5046 }
5047
5048 dplane_provider_unlock(prov);
5049
5050 return ctx;
5051 }
5052
5053 /*
5054 * Dequeue work to a list, return count
5055 */
5056 int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov,
5057 struct dplane_ctx_q *listp)
5058 {
5059 int limit, ret;
5060 struct zebra_dplane_ctx *ctx;
5061
5062 limit = zdplane_info.dg_updates_per_cycle;
5063
5064 dplane_provider_lock(prov);
5065
5066 for (ret = 0; ret < limit; ret++) {
5067 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
5068 if (ctx) {
5069 TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
5070
5071 TAILQ_INSERT_TAIL(listp, ctx, zd_q_entries);
5072 } else {
5073 break;
5074 }
5075 }
5076
5077 if (ret > 0)
5078 atomic_fetch_sub_explicit(&prov->dp_in_queued, ret,
5079 memory_order_relaxed);
5080
5081 dplane_provider_unlock(prov);
5082
5083 return ret;
5084 }
5085
5086 uint32_t dplane_provider_out_ctx_queue_len(struct zebra_dplane_provider *prov)
5087 {
5088 return atomic_load_explicit(&(prov->dp_out_counter),
5089 memory_order_relaxed);
5090 }
5091
5092 /*
5093 * Enqueue and maintain associated counter
5094 */
5095 void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
5096 struct zebra_dplane_ctx *ctx)
5097 {
5098 uint64_t curr, high;
5099
5100 dplane_provider_lock(prov);
5101
5102 TAILQ_INSERT_TAIL(&(prov->dp_ctx_out_q), ctx,
5103 zd_q_entries);
5104
5105 /* Maintain out-queue counters */
5106 atomic_fetch_add_explicit(&(prov->dp_out_queued), 1,
5107 memory_order_relaxed);
5108 curr = atomic_load_explicit(&prov->dp_out_queued,
5109 memory_order_relaxed);
5110 high = atomic_load_explicit(&prov->dp_out_max,
5111 memory_order_relaxed);
5112 if (curr > high)
5113 atomic_store_explicit(&prov->dp_out_max, curr,
5114 memory_order_relaxed);
5115
5116 dplane_provider_unlock(prov);
5117
5118 atomic_fetch_add_explicit(&(prov->dp_out_counter), 1,
5119 memory_order_relaxed);
5120 }
5121
5122 /*
5123 * Accessor for provider object
5124 */
5125 bool dplane_provider_is_threaded(const struct zebra_dplane_provider *prov)
5126 {
5127 return (prov->dp_flags & DPLANE_PROV_FLAG_THREADED);
5128 }
5129
5130 #ifdef HAVE_NETLINK
5131 /*
5132 * Callback when an OS (netlink) incoming event read is ready. This runs
5133 * in the dplane pthread.
5134 */
5135 static void dplane_incoming_read(struct thread *event)
5136 {
5137 struct dplane_zns_info *zi = THREAD_ARG(event);
5138
5139 kernel_dplane_read(&zi->info);
5140
5141 /* Re-start read task */
5142 thread_add_read(zdplane_info.dg_master, dplane_incoming_read, zi,
5143 zi->info.sock, &zi->t_read);
5144 }
5145
5146 /*
5147 * Callback in the dataplane pthread that requests info from the OS and
5148 * initiates netlink reads.
5149 */
5150 static void dplane_incoming_request(struct thread *event)
5151 {
5152 struct dplane_zns_info *zi = THREAD_ARG(event);
5153
5154 /* Start read task */
5155 thread_add_read(zdplane_info.dg_master, dplane_incoming_read, zi,
5156 zi->info.sock, &zi->t_read);
5157
5158 /* Send requests */
5159 netlink_request_netconf(zi->info.sock);
5160 }
5161
5162 /*
5163 * Initiate requests for existing info from the OS. This is called by the
5164 * main pthread, but we want all activity on the dplane netlink socket to
5165 * take place on the dplane pthread, so we schedule an event to accomplish
5166 * that.
5167 */
5168 static void dplane_kernel_info_request(struct dplane_zns_info *zi)
5169 {
5170 /* If we happen to encounter an enabled zns before the dplane
5171 * pthread is running, we'll initiate this later on.
5172 */
5173 if (zdplane_info.dg_master)
5174 thread_add_event(zdplane_info.dg_master,
5175 dplane_incoming_request, zi, 0,
5176 &zi->t_request);
5177 }
5178
5179 #endif /* HAVE_NETLINK */
5180
5181 /*
5182 * Notify dplane when namespaces are enabled and disabled. The dplane
5183 * needs to start and stop reading incoming events from the zns. In the
5184 * common case where vrfs are _not_ namespaces, there will only be one
5185 * of these.
5186 *
5187 * This is called in the main pthread.
5188 */
5189 void zebra_dplane_ns_enable(struct zebra_ns *zns, bool enabled)
5190 {
5191 struct dplane_zns_info *zi;
5192
5193 if (IS_ZEBRA_DEBUG_DPLANE)
5194 zlog_debug("%s: %s for nsid %u", __func__,
5195 (enabled ? "ENABLED" : "DISABLED"), zns->ns_id);
5196
5197 /* Search for an existing zns info entry */
5198 frr_each (zns_info_list, &zdplane_info.dg_zns_list, zi) {
5199 if (zi->info.ns_id == zns->ns_id)
5200 break;
5201 }
5202
5203 if (enabled) {
5204 /* Create a new entry if necessary; start reading. */
5205 if (zi == NULL) {
5206 zi = XCALLOC(MTYPE_DP_NS, sizeof(*zi));
5207
5208 zi->info.ns_id = zns->ns_id;
5209
5210 zns_info_list_add_tail(&zdplane_info.dg_zns_list, zi);
5211
5212 if (IS_ZEBRA_DEBUG_DPLANE)
5213 zlog_debug("%s: nsid %u, new zi %p", __func__,
5214 zns->ns_id, zi);
5215 }
5216
5217 /* Make sure we're up-to-date with the zns object */
5218 #if defined(HAVE_NETLINK)
5219 zi->info.is_cmd = false;
5220 zi->info.sock = zns->netlink_dplane_in.sock;
5221
5222 /* Initiate requests for existing info from the OS, and
5223 * begin reading from the netlink socket.
5224 */
5225 dplane_kernel_info_request(zi);
5226 #endif
5227 } else if (zi) {
5228 if (IS_ZEBRA_DEBUG_DPLANE)
5229 zlog_debug("%s: nsid %u, deleting zi %p", __func__,
5230 zns->ns_id, zi);
5231
5232 /* Stop reading, free memory */
5233 zns_info_list_del(&zdplane_info.dg_zns_list, zi);
5234
5235 /* Stop any outstanding tasks */
5236 if (zdplane_info.dg_master) {
5237 thread_cancel_async(zdplane_info.dg_master,
5238 &zi->t_request, NULL);
5239
5240 thread_cancel_async(zdplane_info.dg_master, &zi->t_read,
5241 NULL);
5242 }
5243
5244 XFREE(MTYPE_DP_NS, zi);
5245 }
5246 }
5247
5248 /*
5249 * Provider api to signal that work/events are available
5250 * for the dataplane pthread.
5251 */
5252 int dplane_provider_work_ready(void)
5253 {
5254 /* Note that during zebra startup, we may be offered work before
5255 * the dataplane pthread (and thread-master) are ready. We want to
5256 * enqueue the work, but the event-scheduling machinery may not be
5257 * available.
5258 */
5259 if (zdplane_info.dg_run) {
5260 thread_add_event(zdplane_info.dg_master,
5261 dplane_thread_loop, NULL, 0,
5262 &zdplane_info.dg_t_update);
5263 }
5264
5265 return AOK;
5266 }
5267
5268 /*
5269 * Enqueue a context directly to zebra main.
5270 */
5271 void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx)
5272 {
5273 struct dplane_ctx_q temp_list;
5274
5275 /* Zebra's api takes a list, so we need to use a temporary list */
5276 TAILQ_INIT(&temp_list);
5277
5278 TAILQ_INSERT_TAIL(&temp_list, ctx, zd_q_entries);
5279 (zdplane_info.dg_results_cb)(&temp_list);
5280 }
5281
5282 /*
5283 * Kernel dataplane provider
5284 */
5285
5286 static void kernel_dplane_log_detail(struct zebra_dplane_ctx *ctx)
5287 {
5288 char buf[PREFIX_STRLEN];
5289
5290 switch (dplane_ctx_get_op(ctx)) {
5291
5292 case DPLANE_OP_ROUTE_INSTALL:
5293 case DPLANE_OP_ROUTE_UPDATE:
5294 case DPLANE_OP_ROUTE_DELETE:
5295 zlog_debug("%u:%pFX Dplane route update ctx %p op %s",
5296 dplane_ctx_get_vrf(ctx), dplane_ctx_get_dest(ctx),
5297 ctx, dplane_op2str(dplane_ctx_get_op(ctx)));
5298 break;
5299
5300 case DPLANE_OP_NH_INSTALL:
5301 case DPLANE_OP_NH_UPDATE:
5302 case DPLANE_OP_NH_DELETE:
5303 zlog_debug("ID (%u) Dplane nexthop update ctx %p op %s",
5304 dplane_ctx_get_nhe_id(ctx), ctx,
5305 dplane_op2str(dplane_ctx_get_op(ctx)));
5306 break;
5307
5308 case DPLANE_OP_LSP_INSTALL:
5309 case DPLANE_OP_LSP_UPDATE:
5310 case DPLANE_OP_LSP_DELETE:
5311 break;
5312
5313 case DPLANE_OP_PW_INSTALL:
5314 case DPLANE_OP_PW_UNINSTALL:
5315 zlog_debug("Dplane pw %s: op %s af %d loc: %u rem: %u",
5316 dplane_ctx_get_ifname(ctx),
5317 dplane_op2str(ctx->zd_op), dplane_ctx_get_pw_af(ctx),
5318 dplane_ctx_get_pw_local_label(ctx),
5319 dplane_ctx_get_pw_remote_label(ctx));
5320 break;
5321
5322 case DPLANE_OP_ADDR_INSTALL:
5323 case DPLANE_OP_ADDR_UNINSTALL:
5324 zlog_debug("Dplane intf %s, idx %u, addr %pFX",
5325 dplane_op2str(dplane_ctx_get_op(ctx)),
5326 dplane_ctx_get_ifindex(ctx),
5327 dplane_ctx_get_intf_addr(ctx));
5328 break;
5329
5330 case DPLANE_OP_MAC_INSTALL:
5331 case DPLANE_OP_MAC_DELETE:
5332 prefix_mac2str(dplane_ctx_mac_get_addr(ctx), buf,
5333 sizeof(buf));
5334
5335 zlog_debug("Dplane %s, mac %s, ifindex %u",
5336 dplane_op2str(dplane_ctx_get_op(ctx)),
5337 buf, dplane_ctx_get_ifindex(ctx));
5338 break;
5339
5340 case DPLANE_OP_NEIGH_INSTALL:
5341 case DPLANE_OP_NEIGH_UPDATE:
5342 case DPLANE_OP_NEIGH_DELETE:
5343 case DPLANE_OP_VTEP_ADD:
5344 case DPLANE_OP_VTEP_DELETE:
5345 case DPLANE_OP_NEIGH_DISCOVER:
5346 case DPLANE_OP_NEIGH_IP_INSTALL:
5347 case DPLANE_OP_NEIGH_IP_DELETE:
5348 ipaddr2str(dplane_ctx_neigh_get_ipaddr(ctx), buf,
5349 sizeof(buf));
5350
5351 zlog_debug("Dplane %s, ip %s, ifindex %u",
5352 dplane_op2str(dplane_ctx_get_op(ctx)),
5353 buf, dplane_ctx_get_ifindex(ctx));
5354 break;
5355
5356 case DPLANE_OP_RULE_ADD:
5357 case DPLANE_OP_RULE_DELETE:
5358 case DPLANE_OP_RULE_UPDATE:
5359 zlog_debug("Dplane rule update op %s, if %s(%u), ctx %p",
5360 dplane_op2str(dplane_ctx_get_op(ctx)),
5361 dplane_ctx_get_ifname(ctx),
5362 dplane_ctx_get_ifindex(ctx), ctx);
5363 break;
5364
5365 case DPLANE_OP_SYS_ROUTE_ADD:
5366 case DPLANE_OP_SYS_ROUTE_DELETE:
5367 case DPLANE_OP_ROUTE_NOTIFY:
5368 case DPLANE_OP_LSP_NOTIFY:
5369 case DPLANE_OP_BR_PORT_UPDATE:
5370
5371 case DPLANE_OP_NONE:
5372 break;
5373
5374 case DPLANE_OP_IPTABLE_ADD:
5375 case DPLANE_OP_IPTABLE_DELETE: {
5376 struct zebra_pbr_iptable ipt;
5377
5378 dplane_ctx_get_pbr_iptable(ctx, &ipt);
5379 zlog_debug("Dplane iptable update op %s, unique(%u), ctx %p",
5380 dplane_op2str(dplane_ctx_get_op(ctx)), ipt.unique,
5381 ctx);
5382 } break;
5383 case DPLANE_OP_IPSET_ADD:
5384 case DPLANE_OP_IPSET_DELETE: {
5385 struct zebra_pbr_ipset ipset;
5386
5387 dplane_ctx_get_pbr_ipset(ctx, &ipset);
5388 zlog_debug("Dplane ipset update op %s, unique(%u), ctx %p",
5389 dplane_op2str(dplane_ctx_get_op(ctx)), ipset.unique,
5390 ctx);
5391 } break;
5392 case DPLANE_OP_IPSET_ENTRY_ADD:
5393 case DPLANE_OP_IPSET_ENTRY_DELETE: {
5394 struct zebra_pbr_ipset_entry ipent;
5395
5396 dplane_ctx_get_pbr_ipset_entry(ctx, &ipent);
5397 zlog_debug(
5398 "Dplane ipset entry update op %s, unique(%u), ctx %p",
5399 dplane_op2str(dplane_ctx_get_op(ctx)), ipent.unique,
5400 ctx);
5401 } break;
5402 case DPLANE_OP_NEIGH_TABLE_UPDATE:
5403 zlog_debug("Dplane neigh table op %s, ifp %s, family %s",
5404 dplane_op2str(dplane_ctx_get_op(ctx)),
5405 dplane_ctx_get_ifname(ctx),
5406 family2str(dplane_ctx_neightable_get_family(ctx)));
5407 break;
5408 case DPLANE_OP_GRE_SET:
5409 zlog_debug("Dplane gre set op %s, ifp %s, link %u",
5410 dplane_op2str(dplane_ctx_get_op(ctx)),
5411 dplane_ctx_get_ifname(ctx),
5412 ctx->u.gre.link_ifindex);
5413 break;
5414
5415 case DPLANE_OP_INTF_ADDR_ADD:
5416 case DPLANE_OP_INTF_ADDR_DEL:
5417 zlog_debug("Dplane incoming op %s, intf %s, addr %pFX",
5418 dplane_op2str(dplane_ctx_get_op(ctx)),
5419 dplane_ctx_get_ifname(ctx),
5420 dplane_ctx_get_intf_addr(ctx));
5421 break;
5422
5423 case DPLANE_OP_INTF_NETCONFIG:
5424 zlog_debug("%s: ifindex %d, mpls %d, mcast %d",
5425 dplane_op2str(dplane_ctx_get_op(ctx)),
5426 dplane_ctx_get_netconf_ifindex(ctx),
5427 dplane_ctx_get_netconf_mpls(ctx),
5428 dplane_ctx_get_netconf_mcast(ctx));
5429 break;
5430
5431 case DPLANE_OP_INTF_INSTALL:
5432 case DPLANE_OP_INTF_UPDATE:
5433 case DPLANE_OP_INTF_DELETE:
5434 zlog_debug("Dplane intf %s, idx %u, protodown %d",
5435 dplane_op2str(dplane_ctx_get_op(ctx)),
5436 dplane_ctx_get_ifindex(ctx),
5437 dplane_ctx_intf_is_protodown(ctx));
5438 break;
5439 }
5440 }
5441
5442 static void kernel_dplane_handle_result(struct zebra_dplane_ctx *ctx)
5443 {
5444 enum zebra_dplane_result res = dplane_ctx_get_status(ctx);
5445
5446 switch (dplane_ctx_get_op(ctx)) {
5447
5448 case DPLANE_OP_ROUTE_INSTALL:
5449 case DPLANE_OP_ROUTE_UPDATE:
5450 case DPLANE_OP_ROUTE_DELETE:
5451 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5452 atomic_fetch_add_explicit(&zdplane_info.dg_route_errors,
5453 1, memory_order_relaxed);
5454
5455 if ((dplane_ctx_get_op(ctx) != DPLANE_OP_ROUTE_DELETE)
5456 && (res == ZEBRA_DPLANE_REQUEST_SUCCESS)) {
5457 struct nexthop *nexthop;
5458
5459 /* Update installed nexthops to signal which have been
5460 * installed.
5461 */
5462 for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx),
5463 nexthop)) {
5464 if (CHECK_FLAG(nexthop->flags,
5465 NEXTHOP_FLAG_RECURSIVE))
5466 continue;
5467
5468 if (CHECK_FLAG(nexthop->flags,
5469 NEXTHOP_FLAG_ACTIVE)) {
5470 SET_FLAG(nexthop->flags,
5471 NEXTHOP_FLAG_FIB);
5472 }
5473 }
5474 }
5475 break;
5476
5477 case DPLANE_OP_NH_INSTALL:
5478 case DPLANE_OP_NH_UPDATE:
5479 case DPLANE_OP_NH_DELETE:
5480 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5481 atomic_fetch_add_explicit(
5482 &zdplane_info.dg_nexthop_errors, 1,
5483 memory_order_relaxed);
5484 break;
5485
5486 case DPLANE_OP_LSP_INSTALL:
5487 case DPLANE_OP_LSP_UPDATE:
5488 case DPLANE_OP_LSP_DELETE:
5489 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5490 atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors,
5491 1, memory_order_relaxed);
5492 break;
5493
5494 case DPLANE_OP_PW_INSTALL:
5495 case DPLANE_OP_PW_UNINSTALL:
5496 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5497 atomic_fetch_add_explicit(&zdplane_info.dg_pw_errors, 1,
5498 memory_order_relaxed);
5499 break;
5500
5501 case DPLANE_OP_ADDR_INSTALL:
5502 case DPLANE_OP_ADDR_UNINSTALL:
5503 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5504 atomic_fetch_add_explicit(
5505 &zdplane_info.dg_intf_addr_errors, 1,
5506 memory_order_relaxed);
5507 break;
5508
5509 case DPLANE_OP_MAC_INSTALL:
5510 case DPLANE_OP_MAC_DELETE:
5511 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5512 atomic_fetch_add_explicit(&zdplane_info.dg_mac_errors,
5513 1, memory_order_relaxed);
5514 break;
5515
5516 case DPLANE_OP_NEIGH_INSTALL:
5517 case DPLANE_OP_NEIGH_UPDATE:
5518 case DPLANE_OP_NEIGH_DELETE:
5519 case DPLANE_OP_VTEP_ADD:
5520 case DPLANE_OP_VTEP_DELETE:
5521 case DPLANE_OP_NEIGH_DISCOVER:
5522 case DPLANE_OP_NEIGH_IP_INSTALL:
5523 case DPLANE_OP_NEIGH_IP_DELETE:
5524 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5525 atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors,
5526 1, memory_order_relaxed);
5527 break;
5528
5529 case DPLANE_OP_RULE_ADD:
5530 case DPLANE_OP_RULE_DELETE:
5531 case DPLANE_OP_RULE_UPDATE:
5532 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5533 atomic_fetch_add_explicit(&zdplane_info.dg_rule_errors,
5534 1, memory_order_relaxed);
5535 break;
5536
5537 case DPLANE_OP_IPTABLE_ADD:
5538 case DPLANE_OP_IPTABLE_DELETE:
5539 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5540 atomic_fetch_add_explicit(
5541 &zdplane_info.dg_iptable_errors, 1,
5542 memory_order_relaxed);
5543 break;
5544
5545 case DPLANE_OP_IPSET_ADD:
5546 case DPLANE_OP_IPSET_DELETE:
5547 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5548 atomic_fetch_add_explicit(&zdplane_info.dg_ipset_errors,
5549 1, memory_order_relaxed);
5550 break;
5551
5552 case DPLANE_OP_IPSET_ENTRY_ADD:
5553 case DPLANE_OP_IPSET_ENTRY_DELETE:
5554 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5555 atomic_fetch_add_explicit(
5556 &zdplane_info.dg_ipset_entry_errors, 1,
5557 memory_order_relaxed);
5558 break;
5559
5560 case DPLANE_OP_NEIGH_TABLE_UPDATE:
5561 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5562 atomic_fetch_add_explicit(
5563 &zdplane_info.dg_neightable_errors, 1,
5564 memory_order_relaxed);
5565 break;
5566
5567 case DPLANE_OP_GRE_SET:
5568 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5569 atomic_fetch_add_explicit(
5570 &zdplane_info.dg_gre_set_errors, 1,
5571 memory_order_relaxed);
5572 break;
5573
5574 case DPLANE_OP_INTF_INSTALL:
5575 case DPLANE_OP_INTF_UPDATE:
5576 case DPLANE_OP_INTF_DELETE:
5577 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5578 atomic_fetch_add_explicit(&zdplane_info.dg_intf_errors,
5579 1, memory_order_relaxed);
5580 break;
5581
5582 /* Ignore 'notifications' - no-op */
5583 case DPLANE_OP_SYS_ROUTE_ADD:
5584 case DPLANE_OP_SYS_ROUTE_DELETE:
5585 case DPLANE_OP_ROUTE_NOTIFY:
5586 case DPLANE_OP_LSP_NOTIFY:
5587 case DPLANE_OP_BR_PORT_UPDATE:
5588 break;
5589
5590 /* TODO -- error counters for incoming events? */
5591 case DPLANE_OP_INTF_ADDR_ADD:
5592 case DPLANE_OP_INTF_ADDR_DEL:
5593 case DPLANE_OP_INTF_NETCONFIG:
5594 break;
5595
5596 case DPLANE_OP_NONE:
5597 if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
5598 atomic_fetch_add_explicit(&zdplane_info.dg_other_errors,
5599 1, memory_order_relaxed);
5600 break;
5601 }
5602 }
5603
5604 static void kernel_dplane_process_iptable(struct zebra_dplane_provider *prov,
5605 struct zebra_dplane_ctx *ctx)
5606 {
5607 zebra_pbr_process_iptable(ctx);
5608 dplane_provider_enqueue_out_ctx(prov, ctx);
5609 }
5610
5611 static void kernel_dplane_process_ipset(struct zebra_dplane_provider *prov,
5612 struct zebra_dplane_ctx *ctx)
5613 {
5614 zebra_pbr_process_ipset(ctx);
5615 dplane_provider_enqueue_out_ctx(prov, ctx);
5616 }
5617
5618 static void
5619 kernel_dplane_process_ipset_entry(struct zebra_dplane_provider *prov,
5620 struct zebra_dplane_ctx *ctx)
5621 {
5622 zebra_pbr_process_ipset_entry(ctx);
5623 dplane_provider_enqueue_out_ctx(prov, ctx);
5624 }
5625
5626 /*
5627 * Kernel provider callback
5628 */
5629 static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
5630 {
5631 struct zebra_dplane_ctx *ctx, *tctx;
5632 struct dplane_ctx_q work_list;
5633 int counter, limit;
5634
5635 TAILQ_INIT(&work_list);
5636
5637 limit = dplane_provider_get_work_limit(prov);
5638
5639 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5640 zlog_debug("dplane provider '%s': processing",
5641 dplane_provider_get_name(prov));
5642
5643 for (counter = 0; counter < limit; counter++) {
5644 ctx = dplane_provider_dequeue_in_ctx(prov);
5645 if (ctx == NULL)
5646 break;
5647 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5648 kernel_dplane_log_detail(ctx);
5649
5650 if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPTABLE_ADD
5651 || dplane_ctx_get_op(ctx) == DPLANE_OP_IPTABLE_DELETE))
5652 kernel_dplane_process_iptable(prov, ctx);
5653 else if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_ADD
5654 || dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_DELETE))
5655 kernel_dplane_process_ipset(prov, ctx);
5656 else if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_ENTRY_ADD
5657 || dplane_ctx_get_op(ctx)
5658 == DPLANE_OP_IPSET_ENTRY_DELETE))
5659 kernel_dplane_process_ipset_entry(prov, ctx);
5660 else
5661 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
5662 }
5663
5664 kernel_update_multi(&work_list);
5665
5666 TAILQ_FOREACH_SAFE (ctx, &work_list, zd_q_entries, tctx) {
5667 kernel_dplane_handle_result(ctx);
5668
5669 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
5670 dplane_provider_enqueue_out_ctx(prov, ctx);
5671 }
5672
5673 /* Ensure that we'll run the work loop again if there's still
5674 * more work to do.
5675 */
5676 if (counter >= limit) {
5677 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5678 zlog_debug("dplane provider '%s' reached max updates %d",
5679 dplane_provider_get_name(prov), counter);
5680
5681 atomic_fetch_add_explicit(&zdplane_info.dg_update_yields,
5682 1, memory_order_relaxed);
5683
5684 dplane_provider_work_ready();
5685 }
5686
5687 return 0;
5688 }
5689
5690 #ifdef DPLANE_TEST_PROVIDER
5691
5692 /*
5693 * Test dataplane provider plugin
5694 */
5695
5696 /*
5697 * Test provider process callback
5698 */
5699 static int test_dplane_process_func(struct zebra_dplane_provider *prov)
5700 {
5701 struct zebra_dplane_ctx *ctx;
5702 int counter, limit;
5703
5704 /* Just moving from 'in' queue to 'out' queue */
5705
5706 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5707 zlog_debug("dplane provider '%s': processing",
5708 dplane_provider_get_name(prov));
5709
5710 limit = dplane_provider_get_work_limit(prov);
5711
5712 for (counter = 0; counter < limit; counter++) {
5713 ctx = dplane_provider_dequeue_in_ctx(prov);
5714 if (ctx == NULL)
5715 break;
5716
5717 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5718 zlog_debug("dplane provider '%s': op %s",
5719 dplane_provider_get_name(prov),
5720 dplane_op2str(dplane_ctx_get_op(ctx)));
5721
5722 dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
5723
5724 dplane_provider_enqueue_out_ctx(prov, ctx);
5725 }
5726
5727 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
5728 zlog_debug("dplane provider '%s': processed %d",
5729 dplane_provider_get_name(prov), counter);
5730
5731 /* Ensure that we'll run the work loop again if there's still
5732 * more work to do.
5733 */
5734 if (counter >= limit)
5735 dplane_provider_work_ready();
5736
5737 return 0;
5738 }
5739
5740 /*
5741 * Test provider shutdown/fini callback
5742 */
5743 static int test_dplane_shutdown_func(struct zebra_dplane_provider *prov,
5744 bool early)
5745 {
5746 if (IS_ZEBRA_DEBUG_DPLANE)
5747 zlog_debug("dplane provider '%s': %sshutdown",
5748 dplane_provider_get_name(prov),
5749 early ? "early " : "");
5750
5751 return 0;
5752 }
5753 #endif /* DPLANE_TEST_PROVIDER */
5754
5755 /*
5756 * Register default kernel provider
5757 */
5758 static void dplane_provider_init(void)
5759 {
5760 int ret;
5761
5762 ret = dplane_provider_register("Kernel",
5763 DPLANE_PRIO_KERNEL,
5764 DPLANE_PROV_FLAGS_DEFAULT, NULL,
5765 kernel_dplane_process_func,
5766 NULL,
5767 NULL, NULL);
5768
5769 if (ret != AOK)
5770 zlog_err("Unable to register kernel dplane provider: %d",
5771 ret);
5772
5773 #ifdef DPLANE_TEST_PROVIDER
5774 /* Optional test provider ... */
5775 ret = dplane_provider_register("Test",
5776 DPLANE_PRIO_PRE_KERNEL,
5777 DPLANE_PROV_FLAGS_DEFAULT, NULL,
5778 test_dplane_process_func,
5779 test_dplane_shutdown_func,
5780 NULL /* data */, NULL);
5781
5782 if (ret != AOK)
5783 zlog_err("Unable to register test dplane provider: %d",
5784 ret);
5785 #endif /* DPLANE_TEST_PROVIDER */
5786 }
5787
5788 /*
5789 * Allow zebra code to walk the queue of pending contexts, evaluate each one
5790 * using a callback function. If the function returns 'true', the context
5791 * will be dequeued and freed without being processed.
5792 */
5793 int dplane_clean_ctx_queue(bool (*context_cb)(struct zebra_dplane_ctx *ctx,
5794 void *arg), void *val)
5795 {
5796 struct zebra_dplane_ctx *ctx, *temp;
5797 struct dplane_ctx_q work_list;
5798
5799 TAILQ_INIT(&work_list);
5800
5801 if (context_cb == NULL)
5802 goto done;
5803
5804 /* Walk the pending context queue under the dplane lock. */
5805 DPLANE_LOCK();
5806
5807 TAILQ_FOREACH_SAFE(ctx, &zdplane_info.dg_update_ctx_q, zd_q_entries,
5808 temp) {
5809 if (context_cb(ctx, val)) {
5810 TAILQ_REMOVE(&zdplane_info.dg_update_ctx_q, ctx,
5811 zd_q_entries);
5812 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
5813 }
5814 }
5815
5816 DPLANE_UNLOCK();
5817
5818 /* Now free any contexts selected by the caller, without holding
5819 * the lock.
5820 */
5821 TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, temp) {
5822 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
5823 dplane_ctx_fini(&ctx);
5824 }
5825
5826 done:
5827
5828 return 0;
5829 }
5830
5831 /* Indicates zebra shutdown/exit is in progress. Some operations may be
5832 * simplified or skipped during shutdown processing.
5833 */
5834 bool dplane_is_in_shutdown(void)
5835 {
5836 return zdplane_info.dg_is_shutdown;
5837 }
5838
5839 /*
5840 * Enable collection of extra info about interfaces in route updates.
5841 */
5842 void dplane_enable_intf_extra_info(void)
5843 {
5844 dplane_collect_extra_intf_info = true;
5845 }
5846
5847 /*
5848 * Early or pre-shutdown, de-init notification api. This runs pretty
5849 * early during zebra shutdown, as a signal to stop new work and prepare
5850 * for updates generated by shutdown/cleanup activity, as zebra tries to
5851 * remove everything it's responsible for.
5852 * NB: This runs in the main zebra pthread context.
5853 */
5854 void zebra_dplane_pre_finish(void)
5855 {
5856 struct zebra_dplane_provider *prov;
5857
5858 if (IS_ZEBRA_DEBUG_DPLANE)
5859 zlog_debug("Zebra dataplane pre-finish called");
5860
5861 zdplane_info.dg_is_shutdown = true;
5862
5863 /* Notify provider(s) of pending shutdown. */
5864 TAILQ_FOREACH(prov, &zdplane_info.dg_providers_q, dp_prov_link) {
5865 if (prov->dp_fini == NULL)
5866 continue;
5867
5868 prov->dp_fini(prov, true /* early */);
5869 }
5870 }
5871
5872 /*
5873 * Utility to determine whether work remains enqueued within the dplane;
5874 * used during system shutdown processing.
5875 */
5876 static bool dplane_work_pending(void)
5877 {
5878 bool ret = false;
5879 struct zebra_dplane_ctx *ctx;
5880 struct zebra_dplane_provider *prov;
5881
5882 /* TODO -- just checking incoming/pending work for now, must check
5883 * providers
5884 */
5885 DPLANE_LOCK();
5886 {
5887 ctx = TAILQ_FIRST(&zdplane_info.dg_update_ctx_q);
5888 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
5889 }
5890 DPLANE_UNLOCK();
5891
5892 if (ctx != NULL) {
5893 ret = true;
5894 goto done;
5895 }
5896
5897 while (prov) {
5898
5899 dplane_provider_lock(prov);
5900
5901 ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
5902 if (ctx == NULL)
5903 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
5904
5905 dplane_provider_unlock(prov);
5906
5907 if (ctx != NULL)
5908 break;
5909
5910 DPLANE_LOCK();
5911 prov = TAILQ_NEXT(prov, dp_prov_link);
5912 DPLANE_UNLOCK();
5913 }
5914
5915 if (ctx != NULL)
5916 ret = true;
5917
5918 done:
5919 return ret;
5920 }
5921
5922 /*
5923 * Shutdown-time intermediate callback, used to determine when all pending
5924 * in-flight updates are done. If there's still work to do, reschedules itself.
5925 * If all work is done, schedules an event to the main zebra thread for
5926 * final zebra shutdown.
5927 * This runs in the dplane pthread context.
5928 */
5929 static void dplane_check_shutdown_status(struct thread *event)
5930 {
5931 struct dplane_zns_info *zi;
5932
5933 if (IS_ZEBRA_DEBUG_DPLANE)
5934 zlog_debug("Zebra dataplane shutdown status check called");
5935
5936 /* Remove any zns info entries as we stop the dplane pthread. */
5937 frr_each_safe (zns_info_list, &zdplane_info.dg_zns_list, zi) {
5938 zns_info_list_del(&zdplane_info.dg_zns_list, zi);
5939
5940 if (zdplane_info.dg_master) {
5941 thread_cancel(&zi->t_read);
5942 thread_cancel(&zi->t_request);
5943 }
5944
5945 XFREE(MTYPE_DP_NS, zi);
5946 }
5947
5948 if (dplane_work_pending()) {
5949 /* Reschedule dplane check on a short timer */
5950 thread_add_timer_msec(zdplane_info.dg_master,
5951 dplane_check_shutdown_status,
5952 NULL, 100,
5953 &zdplane_info.dg_t_shutdown_check);
5954
5955 /* TODO - give up and stop waiting after a short time? */
5956
5957 } else {
5958 /* We appear to be done - schedule a final callback event
5959 * for the zebra main pthread.
5960 */
5961 thread_add_event(zrouter.master, zebra_finalize, NULL, 0, NULL);
5962 }
5963 }
5964
5965 /*
5966 * Shutdown, de-init api. This runs pretty late during shutdown,
5967 * after zebra has tried to free/remove/uninstall all routes during shutdown.
5968 * At this point, dplane work may still remain to be done, so we can't just
5969 * blindly terminate. If there's still work to do, we'll periodically check
5970 * and when done, we'll enqueue a task to the zebra main thread for final
5971 * termination processing.
5972 *
5973 * NB: This runs in the main zebra thread context.
5974 */
5975 void zebra_dplane_finish(void)
5976 {
5977 if (IS_ZEBRA_DEBUG_DPLANE)
5978 zlog_debug("Zebra dataplane fini called");
5979
5980 thread_add_event(zdplane_info.dg_master,
5981 dplane_check_shutdown_status, NULL, 0,
5982 &zdplane_info.dg_t_shutdown_check);
5983 }
5984
5985 /*
5986 * Main dataplane pthread event loop. The thread takes new incoming work
5987 * and offers it to the first provider. It then iterates through the
5988 * providers, taking complete work from each one and offering it
5989 * to the next in order. At each step, a limited number of updates are
5990 * processed during a cycle in order to provide some fairness.
5991 *
5992 * This loop through the providers is only run once, so that the dataplane
5993 * pthread can look for other pending work - such as i/o work on behalf of
5994 * providers.
5995 */
5996 static void dplane_thread_loop(struct thread *event)
5997 {
5998 struct dplane_ctx_q work_list;
5999 struct dplane_ctx_q error_list;
6000 struct zebra_dplane_provider *prov;
6001 struct zebra_dplane_ctx *ctx, *tctx;
6002 int limit, counter, error_counter;
6003 uint64_t curr, high;
6004 bool reschedule = false;
6005
6006 /* Capture work limit per cycle */
6007 limit = zdplane_info.dg_updates_per_cycle;
6008
6009 /* Init temporary lists used to move contexts among providers */
6010 TAILQ_INIT(&work_list);
6011 TAILQ_INIT(&error_list);
6012 error_counter = 0;
6013
6014 /* Check for zebra shutdown */
6015 if (!zdplane_info.dg_run)
6016 return;
6017
6018 /* Dequeue some incoming work from zebra (if any) onto the temporary
6019 * working list.
6020 */
6021 DPLANE_LOCK();
6022
6023 /* Locate initial registered provider */
6024 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
6025
6026 /* Move new work from incoming list to temp list */
6027 for (counter = 0; counter < limit; counter++) {
6028 ctx = TAILQ_FIRST(&zdplane_info.dg_update_ctx_q);
6029 if (ctx) {
6030 TAILQ_REMOVE(&zdplane_info.dg_update_ctx_q, ctx,
6031 zd_q_entries);
6032
6033 ctx->zd_provider = prov->dp_id;
6034
6035 TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
6036 } else {
6037 break;
6038 }
6039 }
6040
6041 DPLANE_UNLOCK();
6042
6043 atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, counter,
6044 memory_order_relaxed);
6045
6046 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6047 zlog_debug("dplane: incoming new work counter: %d", counter);
6048
6049 /* Iterate through the registered providers, offering new incoming
6050 * work. If the provider has outgoing work in its queue, take that
6051 * work for the next provider
6052 */
6053 while (prov) {
6054
6055 /* At each iteration, the temporary work list has 'counter'
6056 * items.
6057 */
6058 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6059 zlog_debug("dplane enqueues %d new work to provider '%s'",
6060 counter, dplane_provider_get_name(prov));
6061
6062 /* Capture current provider id in each context; check for
6063 * error status.
6064 */
6065 TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, tctx) {
6066 if (dplane_ctx_get_status(ctx) ==
6067 ZEBRA_DPLANE_REQUEST_SUCCESS) {
6068 ctx->zd_provider = prov->dp_id;
6069 } else {
6070 /*
6071 * TODO -- improve error-handling: recirc
6072 * errors backwards so that providers can
6073 * 'undo' their work (if they want to)
6074 */
6075
6076 /* Move to error list; will be returned
6077 * zebra main.
6078 */
6079 TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
6080 TAILQ_INSERT_TAIL(&error_list,
6081 ctx, zd_q_entries);
6082 error_counter++;
6083 }
6084 }
6085
6086 /* Enqueue new work to the provider */
6087 dplane_provider_lock(prov);
6088
6089 if (TAILQ_FIRST(&work_list))
6090 TAILQ_CONCAT(&(prov->dp_ctx_in_q), &work_list,
6091 zd_q_entries);
6092
6093 atomic_fetch_add_explicit(&prov->dp_in_counter, counter,
6094 memory_order_relaxed);
6095 atomic_fetch_add_explicit(&prov->dp_in_queued, counter,
6096 memory_order_relaxed);
6097 curr = atomic_load_explicit(&prov->dp_in_queued,
6098 memory_order_relaxed);
6099 high = atomic_load_explicit(&prov->dp_in_max,
6100 memory_order_relaxed);
6101 if (curr > high)
6102 atomic_store_explicit(&prov->dp_in_max, curr,
6103 memory_order_relaxed);
6104
6105 dplane_provider_unlock(prov);
6106
6107 /* Reset the temp list (though the 'concat' may have done this
6108 * already), and the counter
6109 */
6110 TAILQ_INIT(&work_list);
6111 counter = 0;
6112
6113 /* Call into the provider code. Note that this is
6114 * unconditional: we offer to do work even if we don't enqueue
6115 * any _new_ work.
6116 */
6117 (*prov->dp_fp)(prov);
6118
6119 /* Check for zebra shutdown */
6120 if (!zdplane_info.dg_run)
6121 break;
6122
6123 /* Dequeue completed work from the provider */
6124 dplane_provider_lock(prov);
6125
6126 while (counter < limit) {
6127 ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
6128 if (ctx) {
6129 TAILQ_REMOVE(&(prov->dp_ctx_out_q), ctx,
6130 zd_q_entries);
6131
6132 TAILQ_INSERT_TAIL(&work_list,
6133 ctx, zd_q_entries);
6134 counter++;
6135 } else
6136 break;
6137 }
6138
6139 dplane_provider_unlock(prov);
6140
6141 if (counter >= limit)
6142 reschedule = true;
6143
6144 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6145 zlog_debug("dplane dequeues %d completed work from provider %s",
6146 counter, dplane_provider_get_name(prov));
6147
6148 /* Locate next provider */
6149 DPLANE_LOCK();
6150 prov = TAILQ_NEXT(prov, dp_prov_link);
6151 DPLANE_UNLOCK();
6152 }
6153
6154 /*
6155 * We hit the work limit while processing at least one provider's
6156 * output queue - ensure we come back and finish it.
6157 */
6158 if (reschedule)
6159 dplane_provider_work_ready();
6160
6161 /* After all providers have been serviced, enqueue any completed
6162 * work and any errors back to zebra so it can process the results.
6163 */
6164 if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6165 zlog_debug("dplane has %d completed, %d errors, for zebra main",
6166 counter, error_counter);
6167
6168 /*
6169 * Hand lists through the api to zebra main,
6170 * to reduce the number of lock/unlock cycles
6171 */
6172
6173 /* Call through to zebra main */
6174 (zdplane_info.dg_results_cb)(&error_list);
6175
6176 TAILQ_INIT(&error_list);
6177
6178 /* Call through to zebra main */
6179 (zdplane_info.dg_results_cb)(&work_list);
6180
6181 TAILQ_INIT(&work_list);
6182 }
6183
6184 /*
6185 * Final phase of shutdown, after all work enqueued to dplane has been
6186 * processed. This is called from the zebra main pthread context.
6187 */
6188 void zebra_dplane_shutdown(void)
6189 {
6190 struct zebra_dplane_provider *dp;
6191
6192 if (IS_ZEBRA_DEBUG_DPLANE)
6193 zlog_debug("Zebra dataplane shutdown called");
6194
6195 /* Stop dplane thread, if it's running */
6196
6197 zdplane_info.dg_run = false;
6198
6199 if (zdplane_info.dg_t_update)
6200 thread_cancel_async(zdplane_info.dg_t_update->master,
6201 &zdplane_info.dg_t_update, NULL);
6202
6203 frr_pthread_stop(zdplane_info.dg_pthread, NULL);
6204
6205 /* Destroy pthread */
6206 frr_pthread_destroy(zdplane_info.dg_pthread);
6207 zdplane_info.dg_pthread = NULL;
6208 zdplane_info.dg_master = NULL;
6209
6210 /* Notify provider(s) of final shutdown.
6211 * Note that this call is in the main pthread, so providers must
6212 * be prepared for that.
6213 */
6214 TAILQ_FOREACH(dp, &zdplane_info.dg_providers_q, dp_prov_link) {
6215 if (dp->dp_fini == NULL)
6216 continue;
6217
6218 dp->dp_fini(dp, false);
6219 }
6220
6221 /* TODO -- Clean-up provider objects */
6222
6223 /* TODO -- Clean queue(s), free memory */
6224 }
6225
6226 /*
6227 * Initialize the dataplane module during startup, internal/private version
6228 */
6229 static void zebra_dplane_init_internal(void)
6230 {
6231 memset(&zdplane_info, 0, sizeof(zdplane_info));
6232
6233 pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
6234
6235 TAILQ_INIT(&zdplane_info.dg_update_ctx_q);
6236 TAILQ_INIT(&zdplane_info.dg_providers_q);
6237 zns_info_list_init(&zdplane_info.dg_zns_list);
6238
6239 zdplane_info.dg_updates_per_cycle = DPLANE_DEFAULT_NEW_WORK;
6240
6241 zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;
6242
6243 /* Register default kernel 'provider' during init */
6244 dplane_provider_init();
6245 }
6246
6247 /*
6248 * Start the dataplane pthread. This step needs to be run later than the
6249 * 'init' step, in case zebra has fork-ed.
6250 */
6251 void zebra_dplane_start(void)
6252 {
6253 struct dplane_zns_info *zi;
6254 struct zebra_dplane_provider *prov;
6255 struct frr_pthread_attr pattr = {
6256 .start = frr_pthread_attr_default.start,
6257 .stop = frr_pthread_attr_default.stop
6258 };
6259
6260 /* Start dataplane pthread */
6261
6262 zdplane_info.dg_pthread = frr_pthread_new(&pattr, "Zebra dplane thread",
6263 "zebra_dplane");
6264
6265 zdplane_info.dg_master = zdplane_info.dg_pthread->master;
6266
6267 zdplane_info.dg_run = true;
6268
6269 /* Enqueue an initial event for the dataplane pthread */
6270 thread_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
6271 &zdplane_info.dg_t_update);
6272
6273 /* Enqueue requests and reads if necessary */
6274 frr_each (zns_info_list, &zdplane_info.dg_zns_list, zi) {
6275 #if defined(HAVE_NETLINK)
6276 thread_add_read(zdplane_info.dg_master, dplane_incoming_read,
6277 zi, zi->info.sock, &zi->t_read);
6278 dplane_kernel_info_request(zi);
6279 #endif
6280 }
6281
6282 /* Call start callbacks for registered providers */
6283
6284 DPLANE_LOCK();
6285 prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
6286 DPLANE_UNLOCK();
6287
6288 while (prov) {
6289
6290 if (prov->dp_start)
6291 (prov->dp_start)(prov);
6292
6293 /* Locate next provider */
6294 DPLANE_LOCK();
6295 prov = TAILQ_NEXT(prov, dp_prov_link);
6296 DPLANE_UNLOCK();
6297 }
6298
6299 frr_pthread_run(zdplane_info.dg_pthread, NULL);
6300 }
6301
6302 /*
6303 * Initialize the dataplane module at startup; called by zebra rib_init()
6304 */
6305 void zebra_dplane_init(int (*results_fp)(struct dplane_ctx_q *))
6306 {
6307 zebra_dplane_init_internal();
6308 zdplane_info.dg_results_cb = results_fp;
6309 }