]> git.proxmox.com Git - mirror_frr.git/blame - zebra/dplane_fpm_nl.c
Merge pull request #12567 from YutaroHayakawa/YutaroHayakawa/isl-dev
[mirror_frr.git] / zebra / dplane_fpm_nl.c
CommitLineData
d35f447d
RZ
1/*
2 * Zebra dataplane plugin for Forwarding Plane Manager (FPM) using netlink.
3 *
4 * Copyright (C) 2019 Network Device Education Foundation, Inc. ("NetDEF")
5 * Rafael Zalamena
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; see the file COPYING; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
7309092b
DL
22#ifdef HAVE_CONFIG_H
23#include "config.h" /* Include this explicitly */
24#endif
25
d35f447d
RZ
26#include <arpa/inet.h>
27
28#include <sys/types.h>
29#include <sys/socket.h>
30
31#include <errno.h>
32#include <string.h>
33
d35f447d 34#include "lib/zebra.h"
6cc059cd 35#include "lib/json.h"
d35f447d 36#include "lib/libfrr.h"
c871e6c9 37#include "lib/frratomic.h"
3bdd7fca 38#include "lib/command.h"
d35f447d
RZ
39#include "lib/memory.h"
40#include "lib/network.h"
41#include "lib/ns.h"
42#include "lib/frr_pthread.h"
e5e444d8 43#include "zebra/debug.h"
bda10adf 44#include "zebra/interface.h"
d35f447d 45#include "zebra/zebra_dplane.h"
b300c8bb 46#include "zebra/zebra_mpls.h"
018e77bc 47#include "zebra/zebra_router.h"
b2998086
PR
48#include "zebra/zebra_evpn.h"
49#include "zebra/zebra_evpn_mac.h"
bda10adf 50#include "zebra/zebra_vxlan_private.h"
d35f447d
RZ
51#include "zebra/kernel_netlink.h"
52#include "zebra/rt_netlink.h"
53#include "zebra/debug.h"
a0e11736 54#include "fpm/fpm.h"
d35f447d
RZ
55
56#define SOUTHBOUND_DEFAULT_ADDR INADDR_LOOPBACK
57#define SOUTHBOUND_DEFAULT_PORT 2620
58
a179ba35
RZ
59/**
60 * FPM header:
61 * {
62 * version: 1 byte (always 1),
63 * type: 1 byte (1 for netlink, 2 protobuf),
64 * len: 2 bytes (network order),
65 * }
66 *
67 * This header is used with any format to tell the users how many bytes to
68 * expect.
69 */
70#define FPM_HEADER_SIZE 4
71
d35f447d
RZ
72static const char *prov_name = "dplane_fpm_nl";
73
74struct fpm_nl_ctx {
75 /* data plane connection. */
76 int socket;
3bdd7fca 77 bool disabled;
d35f447d 78 bool connecting;
b55ab92a 79 bool use_nhg;
d35f447d
RZ
80 struct sockaddr_storage addr;
81
82 /* data plane buffers. */
83 struct stream *ibuf;
84 struct stream *obuf;
85 pthread_mutex_t obuf_mutex;
86
ba803a2f
RZ
87 /*
88 * data plane context queue:
89 * When a FPM server connection becomes a bottleneck, we must keep the
90 * data plane contexts until we get a chance to process them.
91 */
92 struct dplane_ctx_q ctxqueue;
93 pthread_mutex_t ctxqueue_mutex;
94
d35f447d 95 /* data plane events. */
ba803a2f 96 struct zebra_dplane_provider *prov;
d35f447d
RZ
97 struct frr_pthread *fthread;
98 struct thread *t_connect;
99 struct thread *t_read;
100 struct thread *t_write;
3bdd7fca 101 struct thread *t_event;
551fa8c3 102 struct thread *t_nhg;
ba803a2f 103 struct thread *t_dequeue;
018e77bc
RZ
104
105 /* zebra events. */
f9bf1ecc
DE
106 struct thread *t_lspreset;
107 struct thread *t_lspwalk;
981ca597
RZ
108 struct thread *t_nhgreset;
109 struct thread *t_nhgwalk;
018e77bc
RZ
110 struct thread *t_ribreset;
111 struct thread *t_ribwalk;
bda10adf
RZ
112 struct thread *t_rmacreset;
113 struct thread *t_rmacwalk;
6cc059cd
RZ
114
115 /* Statistic counters. */
116 struct {
117 /* Amount of bytes read into ibuf. */
770a8d28 118 _Atomic uint32_t bytes_read;
6cc059cd 119 /* Amount of bytes written from obuf. */
770a8d28 120 _Atomic uint32_t bytes_sent;
ad4d1022 121 /* Output buffer current usage. */
770a8d28 122 _Atomic uint32_t obuf_bytes;
ad4d1022 123 /* Output buffer peak usage. */
770a8d28 124 _Atomic uint32_t obuf_peak;
6cc059cd
RZ
125
126 /* Amount of connection closes. */
770a8d28 127 _Atomic uint32_t connection_closes;
6cc059cd 128 /* Amount of connection errors. */
770a8d28 129 _Atomic uint32_t connection_errors;
6cc059cd
RZ
130
131 /* Amount of user configurations: FNE_RECONNECT. */
770a8d28 132 _Atomic uint32_t user_configures;
6cc059cd 133 /* Amount of user disable requests: FNE_DISABLE. */
770a8d28 134 _Atomic uint32_t user_disables;
6cc059cd
RZ
135
136 /* Amount of data plane context processed. */
770a8d28 137 _Atomic uint32_t dplane_contexts;
ba803a2f 138 /* Amount of data plane contexts enqueued. */
770a8d28 139 _Atomic uint32_t ctxqueue_len;
ba803a2f 140 /* Peak amount of data plane contexts enqueued. */
770a8d28 141 _Atomic uint32_t ctxqueue_len_peak;
6cc059cd
RZ
142
143 /* Amount of buffer full events. */
770a8d28 144 _Atomic uint32_t buffer_full;
6cc059cd 145 } counters;
770a8d28 146} *gfnc;
3bdd7fca
RZ
147
148enum fpm_nl_events {
149 /* Ask for FPM to reconnect the external server. */
150 FNE_RECONNECT,
151 /* Disable FPM. */
152 FNE_DISABLE,
6cc059cd
RZ
153 /* Reset counters. */
154 FNE_RESET_COUNTERS,
b55ab92a
RZ
155 /* Toggle next hop group feature. */
156 FNE_TOGGLE_NHG,
a2032324
RZ
157 /* Reconnect request by our own code to avoid races. */
158 FNE_INTERNAL_RECONNECT,
55eb9d4d 159
f9bf1ecc
DE
160 /* LSP walk finished. */
161 FNE_LSP_FINISHED,
55eb9d4d
RZ
162 /* Next hop groups walk finished. */
163 FNE_NHG_FINISHED,
164 /* RIB walk finished. */
165 FNE_RIB_FINISHED,
166 /* RMAC walk finished. */
167 FNE_RMAC_FINISHED,
d35f447d
RZ
168};
169
a2032324
RZ
170#define FPM_RECONNECT(fnc) \
171 thread_add_event((fnc)->fthread->master, fpm_process_event, (fnc), \
172 FNE_INTERNAL_RECONNECT, &(fnc)->t_event)
173
55eb9d4d
RZ
174#define WALK_FINISH(fnc, ev) \
175 thread_add_event((fnc)->fthread->master, fpm_process_event, (fnc), \
176 (ev), NULL)
177
018e77bc
RZ
178/*
179 * Prototypes.
180 */
cc9f21da 181static void fpm_process_event(struct thread *t);
018e77bc 182static int fpm_nl_enqueue(struct fpm_nl_ctx *fnc, struct zebra_dplane_ctx *ctx);
cc9f21da
DS
183static void fpm_lsp_send(struct thread *t);
184static void fpm_lsp_reset(struct thread *t);
185static void fpm_nhg_send(struct thread *t);
186static void fpm_nhg_reset(struct thread *t);
187static void fpm_rib_send(struct thread *t);
188static void fpm_rib_reset(struct thread *t);
189static void fpm_rmac_send(struct thread *t);
190static void fpm_rmac_reset(struct thread *t);
018e77bc 191
3bdd7fca
RZ
192/*
193 * CLI.
194 */
6cc059cd
RZ
195#define FPM_STR "Forwarding Plane Manager configuration\n"
196
3bdd7fca
RZ
197DEFUN(fpm_set_address, fpm_set_address_cmd,
198 "fpm address <A.B.C.D|X:X::X:X> [port (1-65535)]",
6cc059cd 199 FPM_STR
3bdd7fca
RZ
200 "FPM remote listening server address\n"
201 "Remote IPv4 FPM server\n"
202 "Remote IPv6 FPM server\n"
203 "FPM remote listening server port\n"
204 "Remote FPM server port\n")
205{
206 struct sockaddr_in *sin;
207 struct sockaddr_in6 *sin6;
208 uint16_t port = 0;
209 uint8_t naddr[INET6_BUFSIZ];
210
211 if (argc == 5)
212 port = strtol(argv[4]->arg, NULL, 10);
213
214 /* Handle IPv4 addresses. */
215 if (inet_pton(AF_INET, argv[2]->arg, naddr) == 1) {
216 sin = (struct sockaddr_in *)&gfnc->addr;
217
218 memset(sin, 0, sizeof(*sin));
219 sin->sin_family = AF_INET;
220 sin->sin_port =
221 port ? htons(port) : htons(SOUTHBOUND_DEFAULT_PORT);
222#ifdef HAVE_STRUCT_SOCKADDR_SA_LEN
223 sin->sin_len = sizeof(*sin);
224#endif /* HAVE_STRUCT_SOCKADDR_SA_LEN */
225 memcpy(&sin->sin_addr, naddr, sizeof(sin->sin_addr));
226
227 goto ask_reconnect;
228 }
229
230 /* Handle IPv6 addresses. */
231 if (inet_pton(AF_INET6, argv[2]->arg, naddr) != 1) {
232 vty_out(vty, "%% Invalid address: %s\n", argv[2]->arg);
233 return CMD_WARNING;
234 }
235
236 sin6 = (struct sockaddr_in6 *)&gfnc->addr;
237 memset(sin6, 0, sizeof(*sin6));
238 sin6->sin6_family = AF_INET6;
239 sin6->sin6_port = port ? htons(port) : htons(SOUTHBOUND_DEFAULT_PORT);
240#ifdef HAVE_STRUCT_SOCKADDR_SA_LEN
241 sin6->sin6_len = sizeof(*sin6);
242#endif /* HAVE_STRUCT_SOCKADDR_SA_LEN */
243 memcpy(&sin6->sin6_addr, naddr, sizeof(sin6->sin6_addr));
244
245ask_reconnect:
246 thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
247 FNE_RECONNECT, &gfnc->t_event);
248 return CMD_SUCCESS;
249}
250
251DEFUN(no_fpm_set_address, no_fpm_set_address_cmd,
252 "no fpm address [<A.B.C.D|X:X::X:X> [port <1-65535>]]",
253 NO_STR
6cc059cd 254 FPM_STR
3bdd7fca
RZ
255 "FPM remote listening server address\n"
256 "Remote IPv4 FPM server\n"
257 "Remote IPv6 FPM server\n"
258 "FPM remote listening server port\n"
259 "Remote FPM server port\n")
260{
261 thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
262 FNE_DISABLE, &gfnc->t_event);
263 return CMD_SUCCESS;
264}
265
b55ab92a
RZ
266DEFUN(fpm_use_nhg, fpm_use_nhg_cmd,
267 "fpm use-next-hop-groups",
268 FPM_STR
269 "Use netlink next hop groups feature.\n")
270{
271 /* Already enabled. */
272 if (gfnc->use_nhg)
273 return CMD_SUCCESS;
274
275 thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
551fa8c3 276 FNE_TOGGLE_NHG, &gfnc->t_nhg);
b55ab92a
RZ
277
278 return CMD_SUCCESS;
279}
280
281DEFUN(no_fpm_use_nhg, no_fpm_use_nhg_cmd,
282 "no fpm use-next-hop-groups",
283 NO_STR
284 FPM_STR
285 "Use netlink next hop groups feature.\n")
286{
287 /* Already disabled. */
288 if (!gfnc->use_nhg)
289 return CMD_SUCCESS;
290
291 thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
551fa8c3 292 FNE_TOGGLE_NHG, &gfnc->t_nhg);
b55ab92a
RZ
293
294 return CMD_SUCCESS;
295}
296
6cc059cd
RZ
297DEFUN(fpm_reset_counters, fpm_reset_counters_cmd,
298 "clear fpm counters",
299 CLEAR_STR
300 FPM_STR
301 "FPM statistic counters\n")
302{
303 thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
304 FNE_RESET_COUNTERS, &gfnc->t_event);
305 return CMD_SUCCESS;
306}
307
308DEFUN(fpm_show_counters, fpm_show_counters_cmd,
309 "show fpm counters",
310 SHOW_STR
311 FPM_STR
312 "FPM statistic counters\n")
313{
314 vty_out(vty, "%30s\n%30s\n", "FPM counters", "============");
315
316#define SHOW_COUNTER(label, counter) \
770a8d28 317 vty_out(vty, "%28s: %u\n", (label), (counter))
6cc059cd
RZ
318
319 SHOW_COUNTER("Input bytes", gfnc->counters.bytes_read);
320 SHOW_COUNTER("Output bytes", gfnc->counters.bytes_sent);
ad4d1022
RZ
321 SHOW_COUNTER("Output buffer current size", gfnc->counters.obuf_bytes);
322 SHOW_COUNTER("Output buffer peak size", gfnc->counters.obuf_peak);
6cc059cd
RZ
323 SHOW_COUNTER("Connection closes", gfnc->counters.connection_closes);
324 SHOW_COUNTER("Connection errors", gfnc->counters.connection_errors);
325 SHOW_COUNTER("Data plane items processed",
326 gfnc->counters.dplane_contexts);
ba803a2f
RZ
327 SHOW_COUNTER("Data plane items enqueued",
328 gfnc->counters.ctxqueue_len);
329 SHOW_COUNTER("Data plane items queue peak",
330 gfnc->counters.ctxqueue_len_peak);
6cc059cd
RZ
331 SHOW_COUNTER("Buffer full hits", gfnc->counters.buffer_full);
332 SHOW_COUNTER("User FPM configurations", gfnc->counters.user_configures);
333 SHOW_COUNTER("User FPM disable requests", gfnc->counters.user_disables);
334
335#undef SHOW_COUNTER
336
337 return CMD_SUCCESS;
338}
339
340DEFUN(fpm_show_counters_json, fpm_show_counters_json_cmd,
341 "show fpm counters json",
342 SHOW_STR
343 FPM_STR
344 "FPM statistic counters\n"
345 JSON_STR)
346{
347 struct json_object *jo;
348
349 jo = json_object_new_object();
350 json_object_int_add(jo, "bytes-read", gfnc->counters.bytes_read);
351 json_object_int_add(jo, "bytes-sent", gfnc->counters.bytes_sent);
ad4d1022
RZ
352 json_object_int_add(jo, "obuf-bytes", gfnc->counters.obuf_bytes);
353 json_object_int_add(jo, "obuf-bytes-peak", gfnc->counters.obuf_peak);
a50404aa
RZ
354 json_object_int_add(jo, "connection-closes",
355 gfnc->counters.connection_closes);
356 json_object_int_add(jo, "connection-errors",
357 gfnc->counters.connection_errors);
358 json_object_int_add(jo, "data-plane-contexts",
359 gfnc->counters.dplane_contexts);
ba803a2f
RZ
360 json_object_int_add(jo, "data-plane-contexts-queue",
361 gfnc->counters.ctxqueue_len);
362 json_object_int_add(jo, "data-plane-contexts-queue-peak",
363 gfnc->counters.ctxqueue_len_peak);
6cc059cd 364 json_object_int_add(jo, "buffer-full-hits", gfnc->counters.buffer_full);
a50404aa
RZ
365 json_object_int_add(jo, "user-configures",
366 gfnc->counters.user_configures);
6cc059cd 367 json_object_int_add(jo, "user-disables", gfnc->counters.user_disables);
962af8a8 368 vty_json(vty, jo);
6cc059cd
RZ
369
370 return CMD_SUCCESS;
371}
372
3bdd7fca
RZ
373static int fpm_write_config(struct vty *vty)
374{
375 struct sockaddr_in *sin;
376 struct sockaddr_in6 *sin6;
377 int written = 0;
3bdd7fca
RZ
378
379 if (gfnc->disabled)
380 return written;
381
382 switch (gfnc->addr.ss_family) {
383 case AF_INET:
384 written = 1;
385 sin = (struct sockaddr_in *)&gfnc->addr;
a3adec46 386 vty_out(vty, "fpm address %pI4", &sin->sin_addr);
3bdd7fca
RZ
387 if (sin->sin_port != htons(SOUTHBOUND_DEFAULT_PORT))
388 vty_out(vty, " port %d", ntohs(sin->sin_port));
389
390 vty_out(vty, "\n");
391 break;
392 case AF_INET6:
393 written = 1;
394 sin6 = (struct sockaddr_in6 *)&gfnc->addr;
a3adec46 395 vty_out(vty, "fpm address %pI6", &sin6->sin6_addr);
3bdd7fca
RZ
396 if (sin6->sin6_port != htons(SOUTHBOUND_DEFAULT_PORT))
397 vty_out(vty, " port %d", ntohs(sin6->sin6_port));
398
399 vty_out(vty, "\n");
400 break;
401
402 default:
403 break;
404 }
405
b55ab92a
RZ
406 if (!gfnc->use_nhg) {
407 vty_out(vty, "no fpm use-next-hop-groups\n");
408 written = 1;
409 }
410
3bdd7fca
RZ
411 return written;
412}
413
612c2c15 414static struct cmd_node fpm_node = {
893d8beb
DL
415 .name = "fpm",
416 .node = FPM_NODE,
3bdd7fca 417 .prompt = "",
612c2c15 418 .config_write = fpm_write_config,
3bdd7fca
RZ
419};
420
d35f447d
RZ
421/*
422 * FPM functions.
423 */
cc9f21da 424static void fpm_connect(struct thread *t);
d35f447d
RZ
425
426static void fpm_reconnect(struct fpm_nl_ctx *fnc)
427{
a2032324 428 /* Cancel all zebra threads first. */
f9bf1ecc
DE
429 thread_cancel_async(zrouter.master, &fnc->t_lspreset, NULL);
430 thread_cancel_async(zrouter.master, &fnc->t_lspwalk, NULL);
a2032324
RZ
431 thread_cancel_async(zrouter.master, &fnc->t_nhgreset, NULL);
432 thread_cancel_async(zrouter.master, &fnc->t_nhgwalk, NULL);
433 thread_cancel_async(zrouter.master, &fnc->t_ribreset, NULL);
434 thread_cancel_async(zrouter.master, &fnc->t_ribwalk, NULL);
435 thread_cancel_async(zrouter.master, &fnc->t_rmacreset, NULL);
436 thread_cancel_async(zrouter.master, &fnc->t_rmacwalk, NULL);
437
438 /*
439 * Grab the lock to empty the streams (data plane might try to
440 * enqueue updates while we are closing).
441 */
d35f447d
RZ
442 frr_mutex_lock_autounlock(&fnc->obuf_mutex);
443
3bdd7fca
RZ
444 /* Avoid calling close on `-1`. */
445 if (fnc->socket != -1) {
446 close(fnc->socket);
447 fnc->socket = -1;
448 }
449
d35f447d
RZ
450 stream_reset(fnc->ibuf);
451 stream_reset(fnc->obuf);
452 THREAD_OFF(fnc->t_read);
453 THREAD_OFF(fnc->t_write);
018e77bc 454
3bdd7fca
RZ
455 /* FPM is disabled, don't attempt to connect. */
456 if (fnc->disabled)
457 return;
458
d35f447d
RZ
459 thread_add_timer(fnc->fthread->master, fpm_connect, fnc, 3,
460 &fnc->t_connect);
461}
462
cc9f21da 463static void fpm_read(struct thread *t)
d35f447d
RZ
464{
465 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
a0e11736 466 fpm_msg_hdr_t fpm;
d35f447d 467 ssize_t rv;
a0e11736
DS
468 char buf[65535];
469 struct nlmsghdr *hdr;
470 struct zebra_dplane_ctx *ctx;
471 size_t available_bytes;
472 size_t hdr_available_bytes;
d35f447d
RZ
473
474 /* Let's ignore the input at the moment. */
475 rv = stream_read_try(fnc->ibuf, fnc->socket,
476 STREAM_WRITEABLE(fnc->ibuf));
477 if (rv == 0) {
c871e6c9
RZ
478 atomic_fetch_add_explicit(&fnc->counters.connection_closes, 1,
479 memory_order_relaxed);
e5e444d8
RZ
480
481 if (IS_ZEBRA_DEBUG_FPM)
482 zlog_debug("%s: connection closed", __func__);
483
a2032324 484 FPM_RECONNECT(fnc);
cc9f21da 485 return;
d35f447d
RZ
486 }
487 if (rv == -1) {
c871e6c9
RZ
488 atomic_fetch_add_explicit(&fnc->counters.connection_errors, 1,
489 memory_order_relaxed);
e5e444d8
RZ
490 zlog_warn("%s: connection failure: %s", __func__,
491 strerror(errno));
a2032324 492 FPM_RECONNECT(fnc);
cc9f21da 493 return;
d35f447d 494 }
7d83e139
DS
495
496 /* Schedule the next read */
497 thread_add_read(fnc->fthread->master, fpm_read, fnc, fnc->socket,
498 &fnc->t_read);
499
500 /* We've got an interruption. */
501 if (rv == -2)
502 return;
503
d35f447d 504
6cc059cd 505 /* Account all bytes read. */
c871e6c9
RZ
506 atomic_fetch_add_explicit(&fnc->counters.bytes_read, rv,
507 memory_order_relaxed);
a0e11736
DS
508
509 available_bytes = STREAM_READABLE(fnc->ibuf);
510 while (available_bytes) {
511 if (available_bytes < (ssize_t)FPM_MSG_HDR_LEN) {
512 stream_pulldown(fnc->ibuf);
513 return;
514 }
515
516 fpm.version = stream_getc(fnc->ibuf);
517 fpm.msg_type = stream_getc(fnc->ibuf);
518 fpm.msg_len = stream_getw(fnc->ibuf);
519
520 if (fpm.version != FPM_PROTO_VERSION &&
521 fpm.msg_type != FPM_MSG_TYPE_NETLINK) {
522 stream_reset(fnc->ibuf);
523 zlog_warn(
524 "%s: Received version/msg_type %u/%u, expected 1/1",
525 __func__, fpm.version, fpm.msg_type);
526
527 FPM_RECONNECT(fnc);
528 return;
529 }
530
531 /*
532 * If the passed in length doesn't even fill in the header
533 * something is wrong and reset.
534 */
535 if (fpm.msg_len < FPM_MSG_HDR_LEN) {
536 zlog_warn(
537 "%s: Received message length: %u that does not even fill the FPM header",
538 __func__, fpm.msg_len);
539 FPM_RECONNECT(fnc);
540 return;
541 }
542
543 /*
544 * If we have not received the whole payload, reset the stream
545 * back to the beginning of the header and move it to the
546 * top.
547 */
548 if (fpm.msg_len > available_bytes) {
549 stream_rewind_getp(fnc->ibuf, FPM_MSG_HDR_LEN);
550 stream_pulldown(fnc->ibuf);
551 return;
552 }
553
554 available_bytes -= FPM_MSG_HDR_LEN;
555
556 /*
557 * Place the data from the stream into a buffer
558 */
559 hdr = (struct nlmsghdr *)buf;
560 stream_get(buf, fnc->ibuf, fpm.msg_len - FPM_MSG_HDR_LEN);
561 hdr_available_bytes = fpm.msg_len - FPM_MSG_HDR_LEN;
562 available_bytes -= hdr_available_bytes;
563
564 /* Sanity check: must be at least header size. */
565 if (hdr->nlmsg_len < sizeof(*hdr)) {
566 zlog_warn(
567 "%s: [seq=%u] invalid message length %u (< %zu)",
568 __func__, hdr->nlmsg_seq, hdr->nlmsg_len,
569 sizeof(*hdr));
570 continue;
571 }
572 if (hdr->nlmsg_len > fpm.msg_len) {
573 zlog_warn(
574 "%s: Received a inner header length of %u that is greater than the fpm total length of %u",
575 __func__, hdr->nlmsg_len, fpm.msg_len);
576 FPM_RECONNECT(fnc);
577 }
578 /* Not enough bytes available. */
579 if (hdr->nlmsg_len > hdr_available_bytes) {
580 zlog_warn(
581 "%s: [seq=%u] invalid message length %u (> %zu)",
582 __func__, hdr->nlmsg_seq, hdr->nlmsg_len,
583 available_bytes);
584 continue;
585 }
586
587 if (!(hdr->nlmsg_flags & NLM_F_REQUEST)) {
588 if (IS_ZEBRA_DEBUG_FPM)
589 zlog_debug(
590 "%s: [seq=%u] not a request, skipping",
591 __func__, hdr->nlmsg_seq);
592
593 /*
594 * This request is a bust, go to the next one
595 */
596 continue;
597 }
598
599 switch (hdr->nlmsg_type) {
600 case RTM_NEWROUTE:
601 ctx = dplane_ctx_alloc();
602 dplane_ctx_set_op(ctx, DPLANE_OP_ROUTE_NOTIFY);
603 if (netlink_route_change_read_unicast_internal(
604 hdr, 0, false, ctx) != 1) {
605 dplane_ctx_fini(&ctx);
606 stream_pulldown(fnc->ibuf);
607 return;
608 }
609 break;
610 default:
611 if (IS_ZEBRA_DEBUG_FPM)
612 zlog_debug(
613 "%s: Received message type %u which is not currently handled",
614 __func__, hdr->nlmsg_type);
615 break;
616 }
617 }
618
619 stream_reset(fnc->ibuf);
d35f447d
RZ
620}
621
cc9f21da 622static void fpm_write(struct thread *t)
d35f447d
RZ
623{
624 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
625 socklen_t statuslen;
626 ssize_t bwritten;
627 int rv, status;
628 size_t btotal;
629
630 if (fnc->connecting == true) {
631 status = 0;
632 statuslen = sizeof(status);
633
634 rv = getsockopt(fnc->socket, SOL_SOCKET, SO_ERROR, &status,
635 &statuslen);
636 if (rv == -1 || status != 0) {
637 if (rv != -1)
e5e444d8
RZ
638 zlog_warn("%s: connection failed: %s", __func__,
639 strerror(status));
d35f447d 640 else
e5e444d8
RZ
641 zlog_warn("%s: SO_ERROR failed: %s", __func__,
642 strerror(status));
d35f447d 643
c871e6c9
RZ
644 atomic_fetch_add_explicit(
645 &fnc->counters.connection_errors, 1,
646 memory_order_relaxed);
6cc059cd 647
a2032324 648 FPM_RECONNECT(fnc);
cc9f21da 649 return;
d35f447d
RZ
650 }
651
652 fnc->connecting = false;
018e77bc 653
f584de52
RZ
654 /*
655 * Starting with LSPs walk all FPM objects, marking them
656 * as unsent and then replaying them.
657 */
658 thread_add_timer(zrouter.master, fpm_lsp_reset, fnc, 0,
659 &fnc->t_lspreset);
660
e1afb97f
RZ
661 /* Permit receiving messages now. */
662 thread_add_read(fnc->fthread->master, fpm_read, fnc,
663 fnc->socket, &fnc->t_read);
d35f447d
RZ
664 }
665
666 frr_mutex_lock_autounlock(&fnc->obuf_mutex);
667
668 while (true) {
669 /* Stream is empty: reset pointers and return. */
670 if (STREAM_READABLE(fnc->obuf) == 0) {
671 stream_reset(fnc->obuf);
672 break;
673 }
674
675 /* Try to write all at once. */
676 btotal = stream_get_endp(fnc->obuf) -
677 stream_get_getp(fnc->obuf);
678 bwritten = write(fnc->socket, stream_pnt(fnc->obuf), btotal);
679 if (bwritten == 0) {
c871e6c9
RZ
680 atomic_fetch_add_explicit(
681 &fnc->counters.connection_closes, 1,
682 memory_order_relaxed);
e5e444d8
RZ
683
684 if (IS_ZEBRA_DEBUG_FPM)
685 zlog_debug("%s: connection closed", __func__);
d35f447d
RZ
686 break;
687 }
688 if (bwritten == -1) {
ad4d1022
RZ
689 /* Attempt to continue if blocked by a signal. */
690 if (errno == EINTR)
691 continue;
692 /* Receiver is probably slow, lets give it some time. */
693 if (errno == EAGAIN || errno == EWOULDBLOCK)
d35f447d
RZ
694 break;
695
c871e6c9
RZ
696 atomic_fetch_add_explicit(
697 &fnc->counters.connection_errors, 1,
698 memory_order_relaxed);
e5e444d8
RZ
699 zlog_warn("%s: connection failure: %s", __func__,
700 strerror(errno));
a2032324
RZ
701
702 FPM_RECONNECT(fnc);
cc9f21da 703 return;
d35f447d
RZ
704 }
705
6cc059cd 706 /* Account all bytes sent. */
c871e6c9
RZ
707 atomic_fetch_add_explicit(&fnc->counters.bytes_sent, bwritten,
708 memory_order_relaxed);
6cc059cd 709
ad4d1022 710 /* Account number of bytes free. */
c871e6c9
RZ
711 atomic_fetch_sub_explicit(&fnc->counters.obuf_bytes, bwritten,
712 memory_order_relaxed);
ad4d1022 713
d35f447d
RZ
714 stream_forward_getp(fnc->obuf, (size_t)bwritten);
715 }
716
717 /* Stream is not empty yet, we must schedule more writes. */
718 if (STREAM_READABLE(fnc->obuf)) {
ad4d1022 719 stream_pulldown(fnc->obuf);
d35f447d
RZ
720 thread_add_write(fnc->fthread->master, fpm_write, fnc,
721 fnc->socket, &fnc->t_write);
cc9f21da 722 return;
d35f447d 723 }
d35f447d
RZ
724}
725
cc9f21da 726static void fpm_connect(struct thread *t)
d35f447d
RZ
727{
728 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
3bdd7fca
RZ
729 struct sockaddr_in *sin = (struct sockaddr_in *)&fnc->addr;
730 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&fnc->addr;
731 socklen_t slen;
d35f447d
RZ
732 int rv, sock;
733 char addrstr[INET6_ADDRSTRLEN];
734
3bdd7fca 735 sock = socket(fnc->addr.ss_family, SOCK_STREAM, 0);
d35f447d 736 if (sock == -1) {
6cc059cd 737 zlog_err("%s: fpm socket failed: %s", __func__,
d35f447d
RZ
738 strerror(errno));
739 thread_add_timer(fnc->fthread->master, fpm_connect, fnc, 3,
740 &fnc->t_connect);
cc9f21da 741 return;
d35f447d
RZ
742 }
743
744 set_nonblocking(sock);
745
3bdd7fca
RZ
746 if (fnc->addr.ss_family == AF_INET) {
747 inet_ntop(AF_INET, &sin->sin_addr, addrstr, sizeof(addrstr));
748 slen = sizeof(*sin);
749 } else {
750 inet_ntop(AF_INET6, &sin6->sin6_addr, addrstr, sizeof(addrstr));
751 slen = sizeof(*sin6);
752 }
d35f447d 753
e5e444d8
RZ
754 if (IS_ZEBRA_DEBUG_FPM)
755 zlog_debug("%s: attempting to connect to %s:%d", __func__,
756 addrstr, ntohs(sin->sin_port));
d35f447d 757
3bdd7fca 758 rv = connect(sock, (struct sockaddr *)&fnc->addr, slen);
d35f447d 759 if (rv == -1 && errno != EINPROGRESS) {
c871e6c9
RZ
760 atomic_fetch_add_explicit(&fnc->counters.connection_errors, 1,
761 memory_order_relaxed);
d35f447d
RZ
762 close(sock);
763 zlog_warn("%s: fpm connection failed: %s", __func__,
764 strerror(errno));
765 thread_add_timer(fnc->fthread->master, fpm_connect, fnc, 3,
766 &fnc->t_connect);
cc9f21da 767 return;
d35f447d
RZ
768 }
769
770 fnc->connecting = (errno == EINPROGRESS);
771 fnc->socket = sock;
e1afb97f
RZ
772 if (!fnc->connecting)
773 thread_add_read(fnc->fthread->master, fpm_read, fnc, sock,
774 &fnc->t_read);
d35f447d
RZ
775 thread_add_write(fnc->fthread->master, fpm_write, fnc, sock,
776 &fnc->t_write);
777
f9bf1ecc
DE
778 /*
779 * Starting with LSPs walk all FPM objects, marking them
780 * as unsent and then replaying them.
f584de52
RZ
781 *
782 * If we are not connected, then delay the objects reset/send.
f9bf1ecc 783 */
f584de52
RZ
784 if (!fnc->connecting)
785 thread_add_timer(zrouter.master, fpm_lsp_reset, fnc, 0,
786 &fnc->t_lspreset);
d35f447d
RZ
787}
788
789/**
790 * Encode data plane operation context into netlink and enqueue it in the FPM
791 * output buffer.
792 *
793 * @param fnc the netlink FPM context.
794 * @param ctx the data plane operation context data.
795 * @return 0 on success or -1 on not enough space.
796 */
797static int fpm_nl_enqueue(struct fpm_nl_ctx *fnc, struct zebra_dplane_ctx *ctx)
798{
799 uint8_t nl_buf[NL_PKT_BUF_SIZE];
800 size_t nl_buf_len;
801 ssize_t rv;
edfeff42 802 uint64_t obytes, obytes_peak;
b55ab92a
RZ
803 enum dplane_op_e op = dplane_ctx_get_op(ctx);
804
805 /*
806 * If we were configured to not use next hop groups, then quit as soon
807 * as possible.
808 */
809 if ((!fnc->use_nhg)
810 && (op == DPLANE_OP_NH_DELETE || op == DPLANE_OP_NH_INSTALL
811 || op == DPLANE_OP_NH_UPDATE))
812 return 0;
d35f447d
RZ
813
814 nl_buf_len = 0;
815
816 frr_mutex_lock_autounlock(&fnc->obuf_mutex);
817
b55ab92a 818 switch (op) {
d35f447d
RZ
819 case DPLANE_OP_ROUTE_UPDATE:
820 case DPLANE_OP_ROUTE_DELETE:
0be6e7d7
JU
821 rv = netlink_route_multipath_msg_encode(RTM_DELROUTE, ctx,
822 nl_buf, sizeof(nl_buf),
823 true, fnc->use_nhg);
d35f447d 824 if (rv <= 0) {
0be6e7d7
JU
825 zlog_err(
826 "%s: netlink_route_multipath_msg_encode failed",
827 __func__);
d35f447d
RZ
828 return 0;
829 }
830
831 nl_buf_len = (size_t)rv;
d35f447d
RZ
832
833 /* UPDATE operations need a INSTALL, otherwise just quit. */
b55ab92a 834 if (op == DPLANE_OP_ROUTE_DELETE)
d35f447d
RZ
835 break;
836
837 /* FALL THROUGH */
838 case DPLANE_OP_ROUTE_INSTALL:
0be6e7d7 839 rv = netlink_route_multipath_msg_encode(
b55ab92a
RZ
840 RTM_NEWROUTE, ctx, &nl_buf[nl_buf_len],
841 sizeof(nl_buf) - nl_buf_len, true, fnc->use_nhg);
d35f447d 842 if (rv <= 0) {
0be6e7d7
JU
843 zlog_err(
844 "%s: netlink_route_multipath_msg_encode failed",
845 __func__);
d35f447d
RZ
846 return 0;
847 }
848
849 nl_buf_len += (size_t)rv;
d35f447d
RZ
850 break;
851
bda10adf
RZ
852 case DPLANE_OP_MAC_INSTALL:
853 case DPLANE_OP_MAC_DELETE:
854 rv = netlink_macfdb_update_ctx(ctx, nl_buf, sizeof(nl_buf));
855 if (rv <= 0) {
e5e444d8
RZ
856 zlog_err("%s: netlink_macfdb_update_ctx failed",
857 __func__);
bda10adf
RZ
858 return 0;
859 }
860
861 nl_buf_len = (size_t)rv;
bda10adf
RZ
862 break;
863
e9a1cd93 864 case DPLANE_OP_NH_DELETE:
0be6e7d7 865 rv = netlink_nexthop_msg_encode(RTM_DELNEXTHOP, ctx, nl_buf,
45c12994 866 sizeof(nl_buf), true);
e9a1cd93 867 if (rv <= 0) {
0be6e7d7
JU
868 zlog_err("%s: netlink_nexthop_msg_encode failed",
869 __func__);
e9a1cd93
RZ
870 return 0;
871 }
872
873 nl_buf_len = (size_t)rv;
874 break;
d35f447d
RZ
875 case DPLANE_OP_NH_INSTALL:
876 case DPLANE_OP_NH_UPDATE:
0be6e7d7 877 rv = netlink_nexthop_msg_encode(RTM_NEWNEXTHOP, ctx, nl_buf,
45c12994 878 sizeof(nl_buf), true);
e9a1cd93 879 if (rv <= 0) {
0be6e7d7
JU
880 zlog_err("%s: netlink_nexthop_msg_encode failed",
881 __func__);
e9a1cd93
RZ
882 return 0;
883 }
884
885 nl_buf_len = (size_t)rv;
886 break;
887
d35f447d
RZ
888 case DPLANE_OP_LSP_INSTALL:
889 case DPLANE_OP_LSP_UPDATE:
890 case DPLANE_OP_LSP_DELETE:
b300c8bb
DE
891 rv = netlink_lsp_msg_encoder(ctx, nl_buf, sizeof(nl_buf));
892 if (rv <= 0) {
f9bf1ecc
DE
893 zlog_err("%s: netlink_lsp_msg_encoder failed",
894 __func__);
b300c8bb
DE
895 return 0;
896 }
897
898 nl_buf_len += (size_t)rv;
899 break;
900
d4bcd88d 901 /* Un-handled by FPM at this time. */
d35f447d
RZ
902 case DPLANE_OP_PW_INSTALL:
903 case DPLANE_OP_PW_UNINSTALL:
904 case DPLANE_OP_ADDR_INSTALL:
905 case DPLANE_OP_ADDR_UNINSTALL:
d35f447d
RZ
906 case DPLANE_OP_NEIGH_INSTALL:
907 case DPLANE_OP_NEIGH_UPDATE:
908 case DPLANE_OP_NEIGH_DELETE:
909 case DPLANE_OP_VTEP_ADD:
910 case DPLANE_OP_VTEP_DELETE:
911 case DPLANE_OP_SYS_ROUTE_ADD:
912 case DPLANE_OP_SYS_ROUTE_DELETE:
913 case DPLANE_OP_ROUTE_NOTIFY:
914 case DPLANE_OP_LSP_NOTIFY:
d4bcd88d
MS
915 case DPLANE_OP_RULE_ADD:
916 case DPLANE_OP_RULE_DELETE:
917 case DPLANE_OP_RULE_UPDATE:
918 case DPLANE_OP_NEIGH_DISCOVER:
919 case DPLANE_OP_BR_PORT_UPDATE:
920 case DPLANE_OP_IPTABLE_ADD:
921 case DPLANE_OP_IPTABLE_DELETE:
922 case DPLANE_OP_IPSET_ADD:
923 case DPLANE_OP_IPSET_DELETE:
924 case DPLANE_OP_IPSET_ENTRY_ADD:
925 case DPLANE_OP_IPSET_ENTRY_DELETE:
926 case DPLANE_OP_NEIGH_IP_INSTALL:
927 case DPLANE_OP_NEIGH_IP_DELETE:
928 case DPLANE_OP_NEIGH_TABLE_UPDATE:
929 case DPLANE_OP_GRE_SET:
930 case DPLANE_OP_INTF_ADDR_ADD:
931 case DPLANE_OP_INTF_ADDR_DEL:
728f2017 932 case DPLANE_OP_INTF_NETCONFIG:
5d414138
SW
933 case DPLANE_OP_INTF_INSTALL:
934 case DPLANE_OP_INTF_UPDATE:
935 case DPLANE_OP_INTF_DELETE:
c317d3f2
SY
936 case DPLANE_OP_TC_QDISC_INSTALL:
937 case DPLANE_OP_TC_QDISC_UNINSTALL:
938 case DPLANE_OP_TC_CLASS_ADD:
939 case DPLANE_OP_TC_CLASS_DELETE:
940 case DPLANE_OP_TC_CLASS_UPDATE:
941 case DPLANE_OP_TC_FILTER_ADD:
942 case DPLANE_OP_TC_FILTER_DELETE:
943 case DPLANE_OP_TC_FILTER_UPDATE:
d35f447d
RZ
944 case DPLANE_OP_NONE:
945 break;
946
d35f447d
RZ
947 }
948
949 /* Skip empty enqueues. */
950 if (nl_buf_len == 0)
951 return 0;
952
a179ba35
RZ
953 /* We must know if someday a message goes beyond 65KiB. */
954 assert((nl_buf_len + FPM_HEADER_SIZE) <= UINT16_MAX);
955
956 /* Check if we have enough buffer space. */
957 if (STREAM_WRITEABLE(fnc->obuf) < (nl_buf_len + FPM_HEADER_SIZE)) {
c871e6c9
RZ
958 atomic_fetch_add_explicit(&fnc->counters.buffer_full, 1,
959 memory_order_relaxed);
e5e444d8
RZ
960
961 if (IS_ZEBRA_DEBUG_FPM)
962 zlog_debug(
963 "%s: buffer full: wants to write %zu but has %zu",
964 __func__, nl_buf_len + FPM_HEADER_SIZE,
965 STREAM_WRITEABLE(fnc->obuf));
966
a179ba35
RZ
967 return -1;
968 }
969
d35f447d 970 /*
a179ba35
RZ
971 * Fill in the FPM header information.
972 *
973 * See FPM_HEADER_SIZE definition for more information.
d35f447d
RZ
974 */
975 stream_putc(fnc->obuf, 1);
976 stream_putc(fnc->obuf, 1);
a179ba35 977 stream_putw(fnc->obuf, nl_buf_len + FPM_HEADER_SIZE);
d35f447d
RZ
978
979 /* Write current data. */
980 stream_write(fnc->obuf, nl_buf, (size_t)nl_buf_len);
981
ad4d1022 982 /* Account number of bytes waiting to be written. */
c871e6c9
RZ
983 atomic_fetch_add_explicit(&fnc->counters.obuf_bytes,
984 nl_buf_len + FPM_HEADER_SIZE,
985 memory_order_relaxed);
edfeff42
RZ
986 obytes = atomic_load_explicit(&fnc->counters.obuf_bytes,
987 memory_order_relaxed);
988 obytes_peak = atomic_load_explicit(&fnc->counters.obuf_peak,
989 memory_order_relaxed);
990 if (obytes_peak < obytes)
c871e6c9
RZ
991 atomic_store_explicit(&fnc->counters.obuf_peak, obytes,
992 memory_order_relaxed);
ad4d1022 993
d35f447d
RZ
994 /* Tell the thread to start writing. */
995 thread_add_write(fnc->fthread->master, fpm_write, fnc, fnc->socket,
996 &fnc->t_write);
997
998 return 0;
999}
1000
f9bf1ecc
DE
1001/*
1002 * LSP walk/send functions
1003 */
1004struct fpm_lsp_arg {
1005 struct zebra_dplane_ctx *ctx;
1006 struct fpm_nl_ctx *fnc;
1007 bool complete;
1008};
1009
1010static int fpm_lsp_send_cb(struct hash_bucket *bucket, void *arg)
1011{
8f74a383 1012 struct zebra_lsp *lsp = bucket->data;
f9bf1ecc
DE
1013 struct fpm_lsp_arg *fla = arg;
1014
1015 /* Skip entries which have already been sent */
1016 if (CHECK_FLAG(lsp->flags, LSP_FLAG_FPM))
1017 return HASHWALK_CONTINUE;
1018
1019 dplane_ctx_reset(fla->ctx);
1020 dplane_ctx_lsp_init(fla->ctx, DPLANE_OP_LSP_INSTALL, lsp);
1021
1022 if (fpm_nl_enqueue(fla->fnc, fla->ctx) == -1) {
1023 fla->complete = false;
1024 return HASHWALK_ABORT;
1025 }
1026
1027 /* Mark entry as sent */
1028 SET_FLAG(lsp->flags, LSP_FLAG_FPM);
1029 return HASHWALK_CONTINUE;
1030}
1031
cc9f21da 1032static void fpm_lsp_send(struct thread *t)
f9bf1ecc
DE
1033{
1034 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
1035 struct zebra_vrf *zvrf = vrf_info_lookup(VRF_DEFAULT);
1036 struct fpm_lsp_arg fla;
1037
1038 fla.fnc = fnc;
1039 fla.ctx = dplane_ctx_alloc();
1040 fla.complete = true;
1041
1042 hash_walk(zvrf->lsp_table, fpm_lsp_send_cb, &fla);
1043
1044 dplane_ctx_fini(&fla.ctx);
1045
1046 if (fla.complete) {
1047 WALK_FINISH(fnc, FNE_LSP_FINISHED);
1048
1049 /* Now move onto routes */
1f9193c1
RZ
1050 thread_add_timer(zrouter.master, fpm_nhg_reset, fnc, 0,
1051 &fnc->t_nhgreset);
f9bf1ecc
DE
1052 } else {
1053 /* Didn't finish - reschedule LSP walk */
1054 thread_add_timer(zrouter.master, fpm_lsp_send, fnc, 0,
1055 &fnc->t_lspwalk);
1056 }
f9bf1ecc
DE
1057}
1058
981ca597
RZ
1059/*
1060 * Next hop walk/send functions.
1061 */
1062struct fpm_nhg_arg {
1063 struct zebra_dplane_ctx *ctx;
1064 struct fpm_nl_ctx *fnc;
1065 bool complete;
1066};
1067
1068static int fpm_nhg_send_cb(struct hash_bucket *bucket, void *arg)
1069{
1070 struct nhg_hash_entry *nhe = bucket->data;
1071 struct fpm_nhg_arg *fna = arg;
1072
1073 /* This entry was already sent, skip it. */
1074 if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_FPM))
1075 return HASHWALK_CONTINUE;
1076
1077 /* Reset ctx to reuse allocated memory, take a snapshot and send it. */
1078 dplane_ctx_reset(fna->ctx);
1079 dplane_ctx_nexthop_init(fna->ctx, DPLANE_OP_NH_INSTALL, nhe);
1080 if (fpm_nl_enqueue(fna->fnc, fna->ctx) == -1) {
1081 /* Our buffers are full, lets give it some cycles. */
1082 fna->complete = false;
1083 return HASHWALK_ABORT;
1084 }
1085
1086 /* Mark group as sent, so it doesn't get sent again. */
1087 SET_FLAG(nhe->flags, NEXTHOP_GROUP_FPM);
1088
1089 return HASHWALK_CONTINUE;
1090}
1091
cc9f21da 1092static void fpm_nhg_send(struct thread *t)
981ca597
RZ
1093{
1094 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
1095 struct fpm_nhg_arg fna;
1096
1097 fna.fnc = fnc;
1098 fna.ctx = dplane_ctx_alloc();
1099 fna.complete = true;
1100
1101 /* Send next hops. */
1f9193c1
RZ
1102 if (fnc->use_nhg)
1103 hash_walk(zrouter.nhgs_id, fpm_nhg_send_cb, &fna);
981ca597
RZ
1104
1105 /* `free()` allocated memory. */
1106 dplane_ctx_fini(&fna.ctx);
1107
1108 /* We are done sending next hops, lets install the routes now. */
55eb9d4d
RZ
1109 if (fna.complete) {
1110 WALK_FINISH(fnc, FNE_NHG_FINISHED);
e41e0f81
RZ
1111 thread_add_timer(zrouter.master, fpm_rib_reset, fnc, 0,
1112 &fnc->t_ribreset);
55eb9d4d 1113 } else /* Otherwise reschedule next hop group again. */
981ca597
RZ
1114 thread_add_timer(zrouter.master, fpm_nhg_send, fnc, 0,
1115 &fnc->t_nhgwalk);
981ca597
RZ
1116}
1117
018e77bc
RZ
1118/**
1119 * Send all RIB installed routes to the connected data plane.
1120 */
cc9f21da 1121static void fpm_rib_send(struct thread *t)
018e77bc
RZ
1122{
1123 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
1124 rib_dest_t *dest;
1125 struct route_node *rn;
1126 struct route_table *rt;
1127 struct zebra_dplane_ctx *ctx;
1128 rib_tables_iter_t rt_iter;
1129
1130 /* Allocate temporary context for all transactions. */
1131 ctx = dplane_ctx_alloc();
1132
1133 rt_iter.state = RIB_TABLES_ITER_S_INIT;
1134 while ((rt = rib_tables_iter_next(&rt_iter))) {
1135 for (rn = route_top(rt); rn; rn = srcdest_route_next(rn)) {
1136 dest = rib_dest_from_rnode(rn);
1137 /* Skip bad route entries. */
a50404aa 1138 if (dest == NULL || dest->selected_fib == NULL)
018e77bc 1139 continue;
018e77bc
RZ
1140
1141 /* Check for already sent routes. */
a50404aa 1142 if (CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM))
018e77bc 1143 continue;
018e77bc
RZ
1144
1145 /* Enqueue route install. */
1146 dplane_ctx_reset(ctx);
1147 dplane_ctx_route_init(ctx, DPLANE_OP_ROUTE_INSTALL, rn,
1148 dest->selected_fib);
1149 if (fpm_nl_enqueue(fnc, ctx) == -1) {
1150 /* Free the temporary allocated context. */
1151 dplane_ctx_fini(&ctx);
1152
018e77bc
RZ
1153 thread_add_timer(zrouter.master, fpm_rib_send,
1154 fnc, 1, &fnc->t_ribwalk);
cc9f21da 1155 return;
018e77bc
RZ
1156 }
1157
1158 /* Mark as sent. */
1159 SET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
1160 }
1161 }
1162
1163 /* Free the temporary allocated context. */
1164 dplane_ctx_fini(&ctx);
1165
1166 /* All RIB routes sent! */
55eb9d4d 1167 WALK_FINISH(fnc, FNE_RIB_FINISHED);
018e77bc 1168
e41e0f81
RZ
1169 /* Schedule next event: RMAC reset. */
1170 thread_add_event(zrouter.master, fpm_rmac_reset, fnc, 0,
1171 &fnc->t_rmacreset);
018e77bc
RZ
1172}
1173
bda10adf
RZ
1174/*
1175 * The next three functions will handle RMAC enqueue.
1176 */
1177struct fpm_rmac_arg {
1178 struct zebra_dplane_ctx *ctx;
1179 struct fpm_nl_ctx *fnc;
05843a27 1180 struct zebra_l3vni *zl3vni;
55eb9d4d 1181 bool complete;
bda10adf
RZ
1182};
1183
1ac88792 1184static void fpm_enqueue_rmac_table(struct hash_bucket *bucket, void *arg)
bda10adf
RZ
1185{
1186 struct fpm_rmac_arg *fra = arg;
3198b2b3 1187 struct zebra_mac *zrmac = bucket->data;
bda10adf
RZ
1188 struct zebra_if *zif = fra->zl3vni->vxlan_if->info;
1189 const struct zebra_l2info_vxlan *vxl = &zif->l2info.vxl;
1190 struct zebra_if *br_zif;
1191 vlanid_t vid;
1192 bool sticky;
1193
1194 /* Entry already sent. */
55eb9d4d 1195 if (CHECK_FLAG(zrmac->flags, ZEBRA_MAC_FPM_SENT) || !fra->complete)
bda10adf
RZ
1196 return;
1197
1198 sticky = !!CHECK_FLAG(zrmac->flags,
1199 (ZEBRA_MAC_STICKY | ZEBRA_MAC_REMOTE_DEF_GW));
1200 br_zif = (struct zebra_if *)(zif->brslave_info.br_if->info);
1201 vid = IS_ZEBRA_IF_BRIDGE_VLAN_AWARE(br_zif) ? vxl->access_vlan : 0;
1202
1203 dplane_ctx_reset(fra->ctx);
1204 dplane_ctx_set_op(fra->ctx, DPLANE_OP_MAC_INSTALL);
1205 dplane_mac_init(fra->ctx, fra->zl3vni->vxlan_if,
f2a0ba3a 1206 zif->brslave_info.br_if, vid,
f188e68e
AK
1207 &zrmac->macaddr, zrmac->fwd_info.r_vtep_ip, sticky,
1208 0 /*nhg*/, 0 /*update_flags*/);
bda10adf 1209 if (fpm_nl_enqueue(fra->fnc, fra->ctx) == -1) {
bda10adf
RZ
1210 thread_add_timer(zrouter.master, fpm_rmac_send,
1211 fra->fnc, 1, &fra->fnc->t_rmacwalk);
55eb9d4d 1212 fra->complete = false;
bda10adf
RZ
1213 }
1214}
1215
1ac88792 1216static void fpm_enqueue_l3vni_table(struct hash_bucket *bucket, void *arg)
bda10adf
RZ
1217{
1218 struct fpm_rmac_arg *fra = arg;
05843a27 1219 struct zebra_l3vni *zl3vni = bucket->data;
bda10adf
RZ
1220
1221 fra->zl3vni = zl3vni;
1222 hash_iterate(zl3vni->rmac_table, fpm_enqueue_rmac_table, zl3vni);
1223}
1224
cc9f21da 1225static void fpm_rmac_send(struct thread *t)
bda10adf
RZ
1226{
1227 struct fpm_rmac_arg fra;
1228
1229 fra.fnc = THREAD_ARG(t);
1230 fra.ctx = dplane_ctx_alloc();
55eb9d4d 1231 fra.complete = true;
bda10adf
RZ
1232 hash_iterate(zrouter.l3vni_table, fpm_enqueue_l3vni_table, &fra);
1233 dplane_ctx_fini(&fra.ctx);
1234
55eb9d4d
RZ
1235 /* RMAC walk completed. */
1236 if (fra.complete)
1237 WALK_FINISH(fra.fnc, FNE_RMAC_FINISHED);
bda10adf
RZ
1238}
1239
981ca597
RZ
1240/*
1241 * Resets the next hop FPM flags so we send all next hops again.
1242 */
1243static void fpm_nhg_reset_cb(struct hash_bucket *bucket, void *arg)
1244{
1245 struct nhg_hash_entry *nhe = bucket->data;
1246
1247 /* Unset FPM installation flag so it gets installed again. */
1248 UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_FPM);
1249}
1250
cc9f21da 1251static void fpm_nhg_reset(struct thread *t)
981ca597 1252{
55eb9d4d
RZ
1253 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
1254
981ca597 1255 hash_iterate(zrouter.nhgs_id, fpm_nhg_reset_cb, NULL);
e41e0f81
RZ
1256
1257 /* Schedule next step: send next hop groups. */
1258 thread_add_event(zrouter.master, fpm_nhg_send, fnc, 0, &fnc->t_nhgwalk);
981ca597
RZ
1259}
1260
f9bf1ecc
DE
1261/*
1262 * Resets the LSP FPM flag so we send all LSPs again.
1263 */
1264static void fpm_lsp_reset_cb(struct hash_bucket *bucket, void *arg)
1265{
8f74a383 1266 struct zebra_lsp *lsp = bucket->data;
f9bf1ecc
DE
1267
1268 UNSET_FLAG(lsp->flags, LSP_FLAG_FPM);
1269}
1270
cc9f21da 1271static void fpm_lsp_reset(struct thread *t)
f9bf1ecc
DE
1272{
1273 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
1274 struct zebra_vrf *zvrf = vrf_info_lookup(VRF_DEFAULT);
1275
1276 hash_iterate(zvrf->lsp_table, fpm_lsp_reset_cb, NULL);
1277
1278 /* Schedule next step: send LSPs */
1279 thread_add_event(zrouter.master, fpm_lsp_send, fnc, 0, &fnc->t_lspwalk);
f9bf1ecc
DE
1280}
1281
018e77bc
RZ
1282/**
1283 * Resets the RIB FPM flags so we send all routes again.
1284 */
cc9f21da 1285static void fpm_rib_reset(struct thread *t)
018e77bc
RZ
1286{
1287 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
1288 rib_dest_t *dest;
1289 struct route_node *rn;
1290 struct route_table *rt;
1291 rib_tables_iter_t rt_iter;
1292
018e77bc
RZ
1293 rt_iter.state = RIB_TABLES_ITER_S_INIT;
1294 while ((rt = rib_tables_iter_next(&rt_iter))) {
1295 for (rn = route_top(rt); rn; rn = srcdest_route_next(rn)) {
1296 dest = rib_dest_from_rnode(rn);
1297 /* Skip bad route entries. */
1298 if (dest == NULL)
1299 continue;
1300
1301 UNSET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
1302 }
1303 }
1304
e41e0f81
RZ
1305 /* Schedule next step: send RIB routes. */
1306 thread_add_event(zrouter.master, fpm_rib_send, fnc, 0, &fnc->t_ribwalk);
018e77bc
RZ
1307}
1308
bda10adf
RZ
1309/*
1310 * The next three function will handle RMAC table reset.
1311 */
1ac88792 1312static void fpm_unset_rmac_table(struct hash_bucket *bucket, void *arg)
bda10adf 1313{
3198b2b3 1314 struct zebra_mac *zrmac = bucket->data;
bda10adf
RZ
1315
1316 UNSET_FLAG(zrmac->flags, ZEBRA_MAC_FPM_SENT);
1317}
1318
1ac88792 1319static void fpm_unset_l3vni_table(struct hash_bucket *bucket, void *arg)
bda10adf 1320{
05843a27 1321 struct zebra_l3vni *zl3vni = bucket->data;
bda10adf
RZ
1322
1323 hash_iterate(zl3vni->rmac_table, fpm_unset_rmac_table, zl3vni);
1324}
1325
cc9f21da 1326static void fpm_rmac_reset(struct thread *t)
bda10adf 1327{
55eb9d4d
RZ
1328 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
1329
bda10adf
RZ
1330 hash_iterate(zrouter.l3vni_table, fpm_unset_l3vni_table, NULL);
1331
e41e0f81
RZ
1332 /* Schedule next event: send RMAC entries. */
1333 thread_add_event(zrouter.master, fpm_rmac_send, fnc, 0,
1334 &fnc->t_rmacwalk);
bda10adf
RZ
1335}
1336
cc9f21da 1337static void fpm_process_queue(struct thread *t)
ba803a2f
RZ
1338{
1339 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
1340 struct zebra_dplane_ctx *ctx;
3f2b998f 1341 bool no_bufs = false;
438dd3e7 1342 uint64_t processed_contexts = 0;
ba803a2f 1343
ba803a2f
RZ
1344 while (true) {
1345 /* No space available yet. */
3f2b998f
DE
1346 if (STREAM_WRITEABLE(fnc->obuf) < NL_PKT_BUF_SIZE) {
1347 no_bufs = true;
ba803a2f 1348 break;
3f2b998f 1349 }
ba803a2f
RZ
1350
1351 /* Dequeue next item or quit processing. */
dc693fe0
DE
1352 frr_with_mutex (&fnc->ctxqueue_mutex) {
1353 ctx = dplane_ctx_dequeue(&fnc->ctxqueue);
1354 }
ba803a2f
RZ
1355 if (ctx == NULL)
1356 break;
1357
3a150188
DS
1358 /*
1359 * Intentionally ignoring the return value
1360 * as that we are ensuring that we can write to
1361 * the output data in the STREAM_WRITEABLE
1362 * check above, so we can ignore the return
1363 */
3b1caddd
RZ
1364 if (fnc->socket != -1)
1365 (void)fpm_nl_enqueue(fnc, ctx);
ba803a2f
RZ
1366
1367 /* Account the processed entries. */
438dd3e7 1368 processed_contexts++;
c871e6c9
RZ
1369 atomic_fetch_sub_explicit(&fnc->counters.ctxqueue_len, 1,
1370 memory_order_relaxed);
ba803a2f
RZ
1371
1372 dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
1373 dplane_provider_enqueue_out_ctx(fnc->prov, ctx);
1374 }
1375
438dd3e7
DE
1376 /* Update count of processed contexts */
1377 atomic_fetch_add_explicit(&fnc->counters.dplane_contexts,
1378 processed_contexts, memory_order_relaxed);
1379
3f2b998f
DE
1380 /* Re-schedule if we ran out of buffer space */
1381 if (no_bufs)
ba803a2f
RZ
1382 thread_add_timer(fnc->fthread->master, fpm_process_queue,
1383 fnc, 0, &fnc->t_dequeue);
1384
164d8e86
DE
1385 /*
1386 * Let the dataplane thread know if there are items in the
1387 * output queue to be processed. Otherwise they may sit
1388 * until the dataplane thread gets scheduled for new,
1389 * unrelated work.
1390 */
1391 if (dplane_provider_out_ctx_queue_len(fnc->prov) > 0)
1392 dplane_provider_work_ready();
ba803a2f
RZ
1393}
1394
3bdd7fca
RZ
1395/**
1396 * Handles external (e.g. CLI, data plane or others) events.
1397 */
cc9f21da 1398static void fpm_process_event(struct thread *t)
3bdd7fca
RZ
1399{
1400 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
dc31de93 1401 enum fpm_nl_events event = THREAD_VAL(t);
3bdd7fca
RZ
1402
1403 switch (event) {
1404 case FNE_DISABLE:
e5e444d8 1405 zlog_info("%s: manual FPM disable event", __func__);
3bdd7fca 1406 fnc->disabled = true;
c871e6c9
RZ
1407 atomic_fetch_add_explicit(&fnc->counters.user_disables, 1,
1408 memory_order_relaxed);
3bdd7fca
RZ
1409
1410 /* Call reconnect to disable timers and clean up context. */
1411 fpm_reconnect(fnc);
1412 break;
1413
1414 case FNE_RECONNECT:
e5e444d8 1415 zlog_info("%s: manual FPM reconnect event", __func__);
3bdd7fca 1416 fnc->disabled = false;
c871e6c9
RZ
1417 atomic_fetch_add_explicit(&fnc->counters.user_configures, 1,
1418 memory_order_relaxed);
3bdd7fca
RZ
1419 fpm_reconnect(fnc);
1420 break;
1421
6cc059cd 1422 case FNE_RESET_COUNTERS:
e5e444d8 1423 zlog_info("%s: manual FPM counters reset event", __func__);
6cc059cd
RZ
1424 memset(&fnc->counters, 0, sizeof(fnc->counters));
1425 break;
1426
b55ab92a
RZ
1427 case FNE_TOGGLE_NHG:
1428 zlog_info("%s: toggle next hop groups support", __func__);
1429 fnc->use_nhg = !fnc->use_nhg;
1430 fpm_reconnect(fnc);
1431 break;
1432
a2032324
RZ
1433 case FNE_INTERNAL_RECONNECT:
1434 fpm_reconnect(fnc);
1435 break;
1436
55eb9d4d
RZ
1437 case FNE_NHG_FINISHED:
1438 if (IS_ZEBRA_DEBUG_FPM)
1439 zlog_debug("%s: next hop groups walk finished",
1440 __func__);
55eb9d4d
RZ
1441 break;
1442 case FNE_RIB_FINISHED:
1443 if (IS_ZEBRA_DEBUG_FPM)
1444 zlog_debug("%s: RIB walk finished", __func__);
55eb9d4d
RZ
1445 break;
1446 case FNE_RMAC_FINISHED:
1447 if (IS_ZEBRA_DEBUG_FPM)
1448 zlog_debug("%s: RMAC walk finished", __func__);
55eb9d4d 1449 break;
f9bf1ecc
DE
1450 case FNE_LSP_FINISHED:
1451 if (IS_ZEBRA_DEBUG_FPM)
1452 zlog_debug("%s: LSP walk finished", __func__);
1453 break;
3bdd7fca 1454 }
3bdd7fca
RZ
1455}
1456
d35f447d
RZ
1457/*
1458 * Data plane functions.
1459 */
1460static int fpm_nl_start(struct zebra_dplane_provider *prov)
1461{
1462 struct fpm_nl_ctx *fnc;
1463
1464 fnc = dplane_provider_get_data(prov);
1465 fnc->fthread = frr_pthread_new(NULL, prov_name, prov_name);
1466 assert(frr_pthread_run(fnc->fthread, NULL) == 0);
1467 fnc->ibuf = stream_new(NL_PKT_BUF_SIZE);
1468 fnc->obuf = stream_new(NL_PKT_BUF_SIZE * 128);
1469 pthread_mutex_init(&fnc->obuf_mutex, NULL);
1470 fnc->socket = -1;
3bdd7fca 1471 fnc->disabled = true;
ba803a2f
RZ
1472 fnc->prov = prov;
1473 TAILQ_INIT(&fnc->ctxqueue);
1474 pthread_mutex_init(&fnc->ctxqueue_mutex, NULL);
d35f447d 1475
b55ab92a
RZ
1476 /* Set default values. */
1477 fnc->use_nhg = true;
1478
d35f447d
RZ
1479 return 0;
1480}
1481
98a87504 1482static int fpm_nl_finish_early(struct fpm_nl_ctx *fnc)
d35f447d 1483{
98a87504 1484 /* Disable all events and close socket. */
f9bf1ecc
DE
1485 THREAD_OFF(fnc->t_lspreset);
1486 THREAD_OFF(fnc->t_lspwalk);
981ca597
RZ
1487 THREAD_OFF(fnc->t_nhgreset);
1488 THREAD_OFF(fnc->t_nhgwalk);
98a87504
RZ
1489 THREAD_OFF(fnc->t_ribreset);
1490 THREAD_OFF(fnc->t_ribwalk);
1491 THREAD_OFF(fnc->t_rmacreset);
1492 THREAD_OFF(fnc->t_rmacwalk);
551fa8c3
DS
1493 THREAD_OFF(fnc->t_event);
1494 THREAD_OFF(fnc->t_nhg);
98a87504
RZ
1495 thread_cancel_async(fnc->fthread->master, &fnc->t_read, NULL);
1496 thread_cancel_async(fnc->fthread->master, &fnc->t_write, NULL);
1497 thread_cancel_async(fnc->fthread->master, &fnc->t_connect, NULL);
d35f447d 1498
98a87504
RZ
1499 if (fnc->socket != -1) {
1500 close(fnc->socket);
1501 fnc->socket = -1;
1502 }
1503
1504 return 0;
1505}
1506
1507static int fpm_nl_finish_late(struct fpm_nl_ctx *fnc)
1508{
1509 /* Stop the running thread. */
1510 frr_pthread_stop(fnc->fthread, NULL);
1511
1512 /* Free all allocated resources. */
1513 pthread_mutex_destroy(&fnc->obuf_mutex);
1514 pthread_mutex_destroy(&fnc->ctxqueue_mutex);
d35f447d
RZ
1515 stream_free(fnc->ibuf);
1516 stream_free(fnc->obuf);
98a87504
RZ
1517 free(gfnc);
1518 gfnc = NULL;
d35f447d
RZ
1519
1520 return 0;
1521}
1522
98a87504
RZ
1523static int fpm_nl_finish(struct zebra_dplane_provider *prov, bool early)
1524{
1525 struct fpm_nl_ctx *fnc;
1526
1527 fnc = dplane_provider_get_data(prov);
1528 if (early)
1529 return fpm_nl_finish_early(fnc);
1530
1531 return fpm_nl_finish_late(fnc);
1532}
1533
d35f447d
RZ
1534static int fpm_nl_process(struct zebra_dplane_provider *prov)
1535{
1536 struct zebra_dplane_ctx *ctx;
1537 struct fpm_nl_ctx *fnc;
1538 int counter, limit;
bf2f7839 1539 uint64_t cur_queue, peak_queue = 0, stored_peak_queue;
d35f447d
RZ
1540
1541 fnc = dplane_provider_get_data(prov);
1542 limit = dplane_provider_get_work_limit(prov);
1543 for (counter = 0; counter < limit; counter++) {
1544 ctx = dplane_provider_dequeue_in_ctx(prov);
1545 if (ctx == NULL)
1546 break;
1547
1548 /*
1549 * Skip all notifications if not connected, we'll walk the RIB
1550 * anyway.
1551 */
6cc059cd 1552 if (fnc->socket != -1 && fnc->connecting == false) {
dc693fe0
DE
1553 /*
1554 * Update the number of queued contexts *before*
1555 * enqueueing, to ensure counter consistency.
1556 */
c871e6c9
RZ
1557 atomic_fetch_add_explicit(&fnc->counters.ctxqueue_len,
1558 1, memory_order_relaxed);
dc693fe0
DE
1559
1560 frr_with_mutex (&fnc->ctxqueue_mutex) {
1561 dplane_ctx_enqueue_tail(&fnc->ctxqueue, ctx);
1562 }
1563
c871e6c9
RZ
1564 cur_queue = atomic_load_explicit(
1565 &fnc->counters.ctxqueue_len,
1566 memory_order_relaxed);
edfeff42 1567 if (peak_queue < cur_queue)
bf2f7839 1568 peak_queue = cur_queue;
ba803a2f 1569 continue;
6cc059cd
RZ
1570 }
1571
d35f447d
RZ
1572 dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
1573 dplane_provider_enqueue_out_ctx(prov, ctx);
1574 }
1575
bf2f7839
DE
1576 /* Update peak queue length, if we just observed a new peak */
1577 stored_peak_queue = atomic_load_explicit(
1578 &fnc->counters.ctxqueue_len_peak, memory_order_relaxed);
1579 if (stored_peak_queue < peak_queue)
1580 atomic_store_explicit(&fnc->counters.ctxqueue_len_peak,
1581 peak_queue, memory_order_relaxed);
1582
c871e6c9
RZ
1583 if (atomic_load_explicit(&fnc->counters.ctxqueue_len,
1584 memory_order_relaxed)
1585 > 0)
ba803a2f
RZ
1586 thread_add_timer(fnc->fthread->master, fpm_process_queue,
1587 fnc, 0, &fnc->t_dequeue);
1588
b677907c
DE
1589 /* Ensure dataplane thread is rescheduled if we hit the work limit */
1590 if (counter >= limit)
1591 dplane_provider_work_ready();
1592
d35f447d
RZ
1593 return 0;
1594}
1595
1596static int fpm_nl_new(struct thread_master *tm)
1597{
1598 struct zebra_dplane_provider *prov = NULL;
d35f447d
RZ
1599 int rv;
1600
3bdd7fca 1601 gfnc = calloc(1, sizeof(*gfnc));
d35f447d
RZ
1602 rv = dplane_provider_register(prov_name, DPLANE_PRIO_POSTPROCESS,
1603 DPLANE_PROV_FLAG_THREADED, fpm_nl_start,
3bdd7fca 1604 fpm_nl_process, fpm_nl_finish, gfnc,
d35f447d
RZ
1605 &prov);
1606
1607 if (IS_ZEBRA_DEBUG_DPLANE)
1608 zlog_debug("%s register status: %d", prov_name, rv);
1609
612c2c15 1610 install_node(&fpm_node);
6cc059cd
RZ
1611 install_element(ENABLE_NODE, &fpm_show_counters_cmd);
1612 install_element(ENABLE_NODE, &fpm_show_counters_json_cmd);
1613 install_element(ENABLE_NODE, &fpm_reset_counters_cmd);
3bdd7fca
RZ
1614 install_element(CONFIG_NODE, &fpm_set_address_cmd);
1615 install_element(CONFIG_NODE, &no_fpm_set_address_cmd);
b55ab92a
RZ
1616 install_element(CONFIG_NODE, &fpm_use_nhg_cmd);
1617 install_element(CONFIG_NODE, &no_fpm_use_nhg_cmd);
3bdd7fca 1618
d35f447d
RZ
1619 return 0;
1620}
1621
1622static int fpm_nl_init(void)
1623{
1624 hook_register(frr_late_init, fpm_nl_new);
1625 return 0;
1626}
1627
1628FRR_MODULE_SETUP(
1629 .name = "dplane_fpm_nl",
1630 .version = "0.0.1",
1631 .description = "Data plane plugin for FPM using netlink.",
1632 .init = fpm_nl_init,
80413c20 1633);