]> git.proxmox.com Git - mirror_frr.git/blame - zebra/dplane_fpm_nl.c
Merge pull request #12798 from donaldsharp/rib_match_multicast
[mirror_frr.git] / zebra / dplane_fpm_nl.c
CommitLineData
acddc0ed 1// SPDX-License-Identifier: GPL-2.0-or-later
d35f447d
RZ
2/*
3 * Zebra dataplane plugin for Forwarding Plane Manager (FPM) using netlink.
4 *
5 * Copyright (C) 2019 Network Device Education Foundation, Inc. ("NetDEF")
6 * Rafael Zalamena
d35f447d
RZ
7 */
8
7309092b
DL
9#ifdef HAVE_CONFIG_H
10#include "config.h" /* Include this explicitly */
11#endif
12
d35f447d
RZ
13#include <arpa/inet.h>
14
15#include <sys/types.h>
16#include <sys/socket.h>
17
18#include <errno.h>
19#include <string.h>
20
d35f447d 21#include "lib/zebra.h"
6cc059cd 22#include "lib/json.h"
d35f447d 23#include "lib/libfrr.h"
c871e6c9 24#include "lib/frratomic.h"
3bdd7fca 25#include "lib/command.h"
d35f447d
RZ
26#include "lib/memory.h"
27#include "lib/network.h"
28#include "lib/ns.h"
29#include "lib/frr_pthread.h"
e5e444d8 30#include "zebra/debug.h"
bda10adf 31#include "zebra/interface.h"
d35f447d 32#include "zebra/zebra_dplane.h"
b300c8bb 33#include "zebra/zebra_mpls.h"
018e77bc 34#include "zebra/zebra_router.h"
8d30ff3b
SR
35#include "zebra/interface.h"
36#include "zebra/zebra_vxlan_private.h"
b2998086
PR
37#include "zebra/zebra_evpn.h"
38#include "zebra/zebra_evpn_mac.h"
d35f447d
RZ
39#include "zebra/kernel_netlink.h"
40#include "zebra/rt_netlink.h"
41#include "zebra/debug.h"
a0e11736 42#include "fpm/fpm.h"
d35f447d
RZ
43
44#define SOUTHBOUND_DEFAULT_ADDR INADDR_LOOPBACK
45#define SOUTHBOUND_DEFAULT_PORT 2620
46
a179ba35
RZ
47/**
48 * FPM header:
49 * {
50 * version: 1 byte (always 1),
51 * type: 1 byte (1 for netlink, 2 protobuf),
52 * len: 2 bytes (network order),
53 * }
54 *
55 * This header is used with any format to tell the users how many bytes to
56 * expect.
57 */
58#define FPM_HEADER_SIZE 4
59
d35f447d
RZ
60static const char *prov_name = "dplane_fpm_nl";
61
62struct fpm_nl_ctx {
63 /* data plane connection. */
64 int socket;
3bdd7fca 65 bool disabled;
d35f447d 66 bool connecting;
b55ab92a 67 bool use_nhg;
d35f447d
RZ
68 struct sockaddr_storage addr;
69
70 /* data plane buffers. */
71 struct stream *ibuf;
72 struct stream *obuf;
73 pthread_mutex_t obuf_mutex;
74
ba803a2f
RZ
75 /*
76 * data plane context queue:
77 * When a FPM server connection becomes a bottleneck, we must keep the
78 * data plane contexts until we get a chance to process them.
79 */
ac96497c 80 struct dplane_ctx_list_head ctxqueue;
ba803a2f
RZ
81 pthread_mutex_t ctxqueue_mutex;
82
d35f447d 83 /* data plane events. */
ba803a2f 84 struct zebra_dplane_provider *prov;
d35f447d
RZ
85 struct frr_pthread *fthread;
86 struct thread *t_connect;
87 struct thread *t_read;
88 struct thread *t_write;
3bdd7fca 89 struct thread *t_event;
551fa8c3 90 struct thread *t_nhg;
ba803a2f 91 struct thread *t_dequeue;
018e77bc
RZ
92
93 /* zebra events. */
f9bf1ecc
DE
94 struct thread *t_lspreset;
95 struct thread *t_lspwalk;
981ca597
RZ
96 struct thread *t_nhgreset;
97 struct thread *t_nhgwalk;
018e77bc
RZ
98 struct thread *t_ribreset;
99 struct thread *t_ribwalk;
bda10adf
RZ
100 struct thread *t_rmacreset;
101 struct thread *t_rmacwalk;
6cc059cd
RZ
102
103 /* Statistic counters. */
104 struct {
105 /* Amount of bytes read into ibuf. */
770a8d28 106 _Atomic uint32_t bytes_read;
6cc059cd 107 /* Amount of bytes written from obuf. */
770a8d28 108 _Atomic uint32_t bytes_sent;
ad4d1022 109 /* Output buffer current usage. */
770a8d28 110 _Atomic uint32_t obuf_bytes;
ad4d1022 111 /* Output buffer peak usage. */
770a8d28 112 _Atomic uint32_t obuf_peak;
6cc059cd
RZ
113
114 /* Amount of connection closes. */
770a8d28 115 _Atomic uint32_t connection_closes;
6cc059cd 116 /* Amount of connection errors. */
770a8d28 117 _Atomic uint32_t connection_errors;
6cc059cd
RZ
118
119 /* Amount of user configurations: FNE_RECONNECT. */
770a8d28 120 _Atomic uint32_t user_configures;
6cc059cd 121 /* Amount of user disable requests: FNE_DISABLE. */
770a8d28 122 _Atomic uint32_t user_disables;
6cc059cd
RZ
123
124 /* Amount of data plane context processed. */
770a8d28 125 _Atomic uint32_t dplane_contexts;
ba803a2f 126 /* Amount of data plane contexts enqueued. */
770a8d28 127 _Atomic uint32_t ctxqueue_len;
ba803a2f 128 /* Peak amount of data plane contexts enqueued. */
770a8d28 129 _Atomic uint32_t ctxqueue_len_peak;
6cc059cd
RZ
130
131 /* Amount of buffer full events. */
770a8d28 132 _Atomic uint32_t buffer_full;
6cc059cd 133 } counters;
770a8d28 134} *gfnc;
3bdd7fca
RZ
135
136enum fpm_nl_events {
137 /* Ask for FPM to reconnect the external server. */
138 FNE_RECONNECT,
139 /* Disable FPM. */
140 FNE_DISABLE,
6cc059cd
RZ
141 /* Reset counters. */
142 FNE_RESET_COUNTERS,
b55ab92a
RZ
143 /* Toggle next hop group feature. */
144 FNE_TOGGLE_NHG,
a2032324
RZ
145 /* Reconnect request by our own code to avoid races. */
146 FNE_INTERNAL_RECONNECT,
55eb9d4d 147
f9bf1ecc
DE
148 /* LSP walk finished. */
149 FNE_LSP_FINISHED,
55eb9d4d
RZ
150 /* Next hop groups walk finished. */
151 FNE_NHG_FINISHED,
152 /* RIB walk finished. */
153 FNE_RIB_FINISHED,
154 /* RMAC walk finished. */
155 FNE_RMAC_FINISHED,
d35f447d
RZ
156};
157
a2032324
RZ
158#define FPM_RECONNECT(fnc) \
159 thread_add_event((fnc)->fthread->master, fpm_process_event, (fnc), \
160 FNE_INTERNAL_RECONNECT, &(fnc)->t_event)
161
55eb9d4d
RZ
162#define WALK_FINISH(fnc, ev) \
163 thread_add_event((fnc)->fthread->master, fpm_process_event, (fnc), \
164 (ev), NULL)
165
018e77bc
RZ
166/*
167 * Prototypes.
168 */
cc9f21da 169static void fpm_process_event(struct thread *t);
018e77bc 170static int fpm_nl_enqueue(struct fpm_nl_ctx *fnc, struct zebra_dplane_ctx *ctx);
cc9f21da
DS
171static void fpm_lsp_send(struct thread *t);
172static void fpm_lsp_reset(struct thread *t);
173static void fpm_nhg_send(struct thread *t);
174static void fpm_nhg_reset(struct thread *t);
175static void fpm_rib_send(struct thread *t);
176static void fpm_rib_reset(struct thread *t);
177static void fpm_rmac_send(struct thread *t);
178static void fpm_rmac_reset(struct thread *t);
018e77bc 179
3bdd7fca
RZ
180/*
181 * CLI.
182 */
6cc059cd
RZ
183#define FPM_STR "Forwarding Plane Manager configuration\n"
184
3bdd7fca
RZ
185DEFUN(fpm_set_address, fpm_set_address_cmd,
186 "fpm address <A.B.C.D|X:X::X:X> [port (1-65535)]",
6cc059cd 187 FPM_STR
3bdd7fca
RZ
188 "FPM remote listening server address\n"
189 "Remote IPv4 FPM server\n"
190 "Remote IPv6 FPM server\n"
191 "FPM remote listening server port\n"
192 "Remote FPM server port\n")
193{
194 struct sockaddr_in *sin;
195 struct sockaddr_in6 *sin6;
196 uint16_t port = 0;
197 uint8_t naddr[INET6_BUFSIZ];
198
199 if (argc == 5)
200 port = strtol(argv[4]->arg, NULL, 10);
201
202 /* Handle IPv4 addresses. */
203 if (inet_pton(AF_INET, argv[2]->arg, naddr) == 1) {
204 sin = (struct sockaddr_in *)&gfnc->addr;
205
206 memset(sin, 0, sizeof(*sin));
207 sin->sin_family = AF_INET;
208 sin->sin_port =
209 port ? htons(port) : htons(SOUTHBOUND_DEFAULT_PORT);
210#ifdef HAVE_STRUCT_SOCKADDR_SA_LEN
211 sin->sin_len = sizeof(*sin);
212#endif /* HAVE_STRUCT_SOCKADDR_SA_LEN */
213 memcpy(&sin->sin_addr, naddr, sizeof(sin->sin_addr));
214
215 goto ask_reconnect;
216 }
217
218 /* Handle IPv6 addresses. */
219 if (inet_pton(AF_INET6, argv[2]->arg, naddr) != 1) {
220 vty_out(vty, "%% Invalid address: %s\n", argv[2]->arg);
221 return CMD_WARNING;
222 }
223
224 sin6 = (struct sockaddr_in6 *)&gfnc->addr;
225 memset(sin6, 0, sizeof(*sin6));
226 sin6->sin6_family = AF_INET6;
227 sin6->sin6_port = port ? htons(port) : htons(SOUTHBOUND_DEFAULT_PORT);
228#ifdef HAVE_STRUCT_SOCKADDR_SA_LEN
229 sin6->sin6_len = sizeof(*sin6);
230#endif /* HAVE_STRUCT_SOCKADDR_SA_LEN */
231 memcpy(&sin6->sin6_addr, naddr, sizeof(sin6->sin6_addr));
232
233ask_reconnect:
234 thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
235 FNE_RECONNECT, &gfnc->t_event);
236 return CMD_SUCCESS;
237}
238
239DEFUN(no_fpm_set_address, no_fpm_set_address_cmd,
240 "no fpm address [<A.B.C.D|X:X::X:X> [port <1-65535>]]",
241 NO_STR
6cc059cd 242 FPM_STR
3bdd7fca
RZ
243 "FPM remote listening server address\n"
244 "Remote IPv4 FPM server\n"
245 "Remote IPv6 FPM server\n"
246 "FPM remote listening server port\n"
247 "Remote FPM server port\n")
248{
249 thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
250 FNE_DISABLE, &gfnc->t_event);
251 return CMD_SUCCESS;
252}
253
b55ab92a
RZ
254DEFUN(fpm_use_nhg, fpm_use_nhg_cmd,
255 "fpm use-next-hop-groups",
256 FPM_STR
257 "Use netlink next hop groups feature.\n")
258{
259 /* Already enabled. */
260 if (gfnc->use_nhg)
261 return CMD_SUCCESS;
262
263 thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
551fa8c3 264 FNE_TOGGLE_NHG, &gfnc->t_nhg);
b55ab92a
RZ
265
266 return CMD_SUCCESS;
267}
268
269DEFUN(no_fpm_use_nhg, no_fpm_use_nhg_cmd,
270 "no fpm use-next-hop-groups",
271 NO_STR
272 FPM_STR
273 "Use netlink next hop groups feature.\n")
274{
275 /* Already disabled. */
276 if (!gfnc->use_nhg)
277 return CMD_SUCCESS;
278
279 thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
551fa8c3 280 FNE_TOGGLE_NHG, &gfnc->t_nhg);
b55ab92a
RZ
281
282 return CMD_SUCCESS;
283}
284
6cc059cd
RZ
285DEFUN(fpm_reset_counters, fpm_reset_counters_cmd,
286 "clear fpm counters",
287 CLEAR_STR
288 FPM_STR
289 "FPM statistic counters\n")
290{
291 thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
292 FNE_RESET_COUNTERS, &gfnc->t_event);
293 return CMD_SUCCESS;
294}
295
296DEFUN(fpm_show_counters, fpm_show_counters_cmd,
297 "show fpm counters",
298 SHOW_STR
299 FPM_STR
300 "FPM statistic counters\n")
301{
302 vty_out(vty, "%30s\n%30s\n", "FPM counters", "============");
303
304#define SHOW_COUNTER(label, counter) \
770a8d28 305 vty_out(vty, "%28s: %u\n", (label), (counter))
6cc059cd
RZ
306
307 SHOW_COUNTER("Input bytes", gfnc->counters.bytes_read);
308 SHOW_COUNTER("Output bytes", gfnc->counters.bytes_sent);
ad4d1022
RZ
309 SHOW_COUNTER("Output buffer current size", gfnc->counters.obuf_bytes);
310 SHOW_COUNTER("Output buffer peak size", gfnc->counters.obuf_peak);
6cc059cd
RZ
311 SHOW_COUNTER("Connection closes", gfnc->counters.connection_closes);
312 SHOW_COUNTER("Connection errors", gfnc->counters.connection_errors);
313 SHOW_COUNTER("Data plane items processed",
314 gfnc->counters.dplane_contexts);
ba803a2f
RZ
315 SHOW_COUNTER("Data plane items enqueued",
316 gfnc->counters.ctxqueue_len);
317 SHOW_COUNTER("Data plane items queue peak",
318 gfnc->counters.ctxqueue_len_peak);
6cc059cd
RZ
319 SHOW_COUNTER("Buffer full hits", gfnc->counters.buffer_full);
320 SHOW_COUNTER("User FPM configurations", gfnc->counters.user_configures);
321 SHOW_COUNTER("User FPM disable requests", gfnc->counters.user_disables);
322
323#undef SHOW_COUNTER
324
325 return CMD_SUCCESS;
326}
327
328DEFUN(fpm_show_counters_json, fpm_show_counters_json_cmd,
329 "show fpm counters json",
330 SHOW_STR
331 FPM_STR
332 "FPM statistic counters\n"
333 JSON_STR)
334{
335 struct json_object *jo;
336
337 jo = json_object_new_object();
338 json_object_int_add(jo, "bytes-read", gfnc->counters.bytes_read);
339 json_object_int_add(jo, "bytes-sent", gfnc->counters.bytes_sent);
ad4d1022
RZ
340 json_object_int_add(jo, "obuf-bytes", gfnc->counters.obuf_bytes);
341 json_object_int_add(jo, "obuf-bytes-peak", gfnc->counters.obuf_peak);
a50404aa
RZ
342 json_object_int_add(jo, "connection-closes",
343 gfnc->counters.connection_closes);
344 json_object_int_add(jo, "connection-errors",
345 gfnc->counters.connection_errors);
346 json_object_int_add(jo, "data-plane-contexts",
347 gfnc->counters.dplane_contexts);
ba803a2f
RZ
348 json_object_int_add(jo, "data-plane-contexts-queue",
349 gfnc->counters.ctxqueue_len);
350 json_object_int_add(jo, "data-plane-contexts-queue-peak",
351 gfnc->counters.ctxqueue_len_peak);
6cc059cd 352 json_object_int_add(jo, "buffer-full-hits", gfnc->counters.buffer_full);
a50404aa
RZ
353 json_object_int_add(jo, "user-configures",
354 gfnc->counters.user_configures);
6cc059cd 355 json_object_int_add(jo, "user-disables", gfnc->counters.user_disables);
962af8a8 356 vty_json(vty, jo);
6cc059cd
RZ
357
358 return CMD_SUCCESS;
359}
360
3bdd7fca
RZ
361static int fpm_write_config(struct vty *vty)
362{
363 struct sockaddr_in *sin;
364 struct sockaddr_in6 *sin6;
365 int written = 0;
3bdd7fca
RZ
366
367 if (gfnc->disabled)
368 return written;
369
370 switch (gfnc->addr.ss_family) {
371 case AF_INET:
372 written = 1;
373 sin = (struct sockaddr_in *)&gfnc->addr;
a3adec46 374 vty_out(vty, "fpm address %pI4", &sin->sin_addr);
3bdd7fca
RZ
375 if (sin->sin_port != htons(SOUTHBOUND_DEFAULT_PORT))
376 vty_out(vty, " port %d", ntohs(sin->sin_port));
377
378 vty_out(vty, "\n");
379 break;
380 case AF_INET6:
381 written = 1;
382 sin6 = (struct sockaddr_in6 *)&gfnc->addr;
a3adec46 383 vty_out(vty, "fpm address %pI6", &sin6->sin6_addr);
3bdd7fca
RZ
384 if (sin6->sin6_port != htons(SOUTHBOUND_DEFAULT_PORT))
385 vty_out(vty, " port %d", ntohs(sin6->sin6_port));
386
387 vty_out(vty, "\n");
388 break;
389
390 default:
391 break;
392 }
393
b55ab92a
RZ
394 if (!gfnc->use_nhg) {
395 vty_out(vty, "no fpm use-next-hop-groups\n");
396 written = 1;
397 }
398
3bdd7fca
RZ
399 return written;
400}
401
612c2c15 402static struct cmd_node fpm_node = {
893d8beb
DL
403 .name = "fpm",
404 .node = FPM_NODE,
3bdd7fca 405 .prompt = "",
612c2c15 406 .config_write = fpm_write_config,
3bdd7fca
RZ
407};
408
d35f447d
RZ
409/*
410 * FPM functions.
411 */
cc9f21da 412static void fpm_connect(struct thread *t);
d35f447d
RZ
413
414static void fpm_reconnect(struct fpm_nl_ctx *fnc)
415{
a2032324 416 /* Cancel all zebra threads first. */
f9bf1ecc
DE
417 thread_cancel_async(zrouter.master, &fnc->t_lspreset, NULL);
418 thread_cancel_async(zrouter.master, &fnc->t_lspwalk, NULL);
a2032324
RZ
419 thread_cancel_async(zrouter.master, &fnc->t_nhgreset, NULL);
420 thread_cancel_async(zrouter.master, &fnc->t_nhgwalk, NULL);
421 thread_cancel_async(zrouter.master, &fnc->t_ribreset, NULL);
422 thread_cancel_async(zrouter.master, &fnc->t_ribwalk, NULL);
423 thread_cancel_async(zrouter.master, &fnc->t_rmacreset, NULL);
424 thread_cancel_async(zrouter.master, &fnc->t_rmacwalk, NULL);
425
426 /*
427 * Grab the lock to empty the streams (data plane might try to
428 * enqueue updates while we are closing).
429 */
d35f447d
RZ
430 frr_mutex_lock_autounlock(&fnc->obuf_mutex);
431
3bdd7fca
RZ
432 /* Avoid calling close on `-1`. */
433 if (fnc->socket != -1) {
434 close(fnc->socket);
435 fnc->socket = -1;
436 }
437
d35f447d
RZ
438 stream_reset(fnc->ibuf);
439 stream_reset(fnc->obuf);
440 THREAD_OFF(fnc->t_read);
441 THREAD_OFF(fnc->t_write);
018e77bc 442
3bdd7fca
RZ
443 /* FPM is disabled, don't attempt to connect. */
444 if (fnc->disabled)
445 return;
446
d35f447d
RZ
447 thread_add_timer(fnc->fthread->master, fpm_connect, fnc, 3,
448 &fnc->t_connect);
449}
450
cc9f21da 451static void fpm_read(struct thread *t)
d35f447d
RZ
452{
453 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
a0e11736 454 fpm_msg_hdr_t fpm;
d35f447d 455 ssize_t rv;
a0e11736
DS
456 char buf[65535];
457 struct nlmsghdr *hdr;
458 struct zebra_dplane_ctx *ctx;
459 size_t available_bytes;
460 size_t hdr_available_bytes;
d35f447d
RZ
461
462 /* Let's ignore the input at the moment. */
463 rv = stream_read_try(fnc->ibuf, fnc->socket,
464 STREAM_WRITEABLE(fnc->ibuf));
465 if (rv == 0) {
c871e6c9
RZ
466 atomic_fetch_add_explicit(&fnc->counters.connection_closes, 1,
467 memory_order_relaxed);
e5e444d8
RZ
468
469 if (IS_ZEBRA_DEBUG_FPM)
470 zlog_debug("%s: connection closed", __func__);
471
a2032324 472 FPM_RECONNECT(fnc);
cc9f21da 473 return;
d35f447d
RZ
474 }
475 if (rv == -1) {
c871e6c9
RZ
476 atomic_fetch_add_explicit(&fnc->counters.connection_errors, 1,
477 memory_order_relaxed);
e5e444d8
RZ
478 zlog_warn("%s: connection failure: %s", __func__,
479 strerror(errno));
a2032324 480 FPM_RECONNECT(fnc);
cc9f21da 481 return;
d35f447d 482 }
7d83e139
DS
483
484 /* Schedule the next read */
485 thread_add_read(fnc->fthread->master, fpm_read, fnc, fnc->socket,
486 &fnc->t_read);
487
488 /* We've got an interruption. */
489 if (rv == -2)
490 return;
491
d35f447d 492
6cc059cd 493 /* Account all bytes read. */
c871e6c9
RZ
494 atomic_fetch_add_explicit(&fnc->counters.bytes_read, rv,
495 memory_order_relaxed);
a0e11736
DS
496
497 available_bytes = STREAM_READABLE(fnc->ibuf);
498 while (available_bytes) {
499 if (available_bytes < (ssize_t)FPM_MSG_HDR_LEN) {
500 stream_pulldown(fnc->ibuf);
501 return;
502 }
503
504 fpm.version = stream_getc(fnc->ibuf);
505 fpm.msg_type = stream_getc(fnc->ibuf);
506 fpm.msg_len = stream_getw(fnc->ibuf);
507
508 if (fpm.version != FPM_PROTO_VERSION &&
509 fpm.msg_type != FPM_MSG_TYPE_NETLINK) {
510 stream_reset(fnc->ibuf);
511 zlog_warn(
512 "%s: Received version/msg_type %u/%u, expected 1/1",
513 __func__, fpm.version, fpm.msg_type);
514
515 FPM_RECONNECT(fnc);
516 return;
517 }
518
519 /*
520 * If the passed in length doesn't even fill in the header
521 * something is wrong and reset.
522 */
523 if (fpm.msg_len < FPM_MSG_HDR_LEN) {
524 zlog_warn(
525 "%s: Received message length: %u that does not even fill the FPM header",
526 __func__, fpm.msg_len);
527 FPM_RECONNECT(fnc);
528 return;
529 }
530
531 /*
532 * If we have not received the whole payload, reset the stream
533 * back to the beginning of the header and move it to the
534 * top.
535 */
536 if (fpm.msg_len > available_bytes) {
537 stream_rewind_getp(fnc->ibuf, FPM_MSG_HDR_LEN);
538 stream_pulldown(fnc->ibuf);
539 return;
540 }
541
542 available_bytes -= FPM_MSG_HDR_LEN;
543
544 /*
545 * Place the data from the stream into a buffer
546 */
547 hdr = (struct nlmsghdr *)buf;
548 stream_get(buf, fnc->ibuf, fpm.msg_len - FPM_MSG_HDR_LEN);
549 hdr_available_bytes = fpm.msg_len - FPM_MSG_HDR_LEN;
550 available_bytes -= hdr_available_bytes;
551
552 /* Sanity check: must be at least header size. */
553 if (hdr->nlmsg_len < sizeof(*hdr)) {
554 zlog_warn(
555 "%s: [seq=%u] invalid message length %u (< %zu)",
556 __func__, hdr->nlmsg_seq, hdr->nlmsg_len,
557 sizeof(*hdr));
558 continue;
559 }
560 if (hdr->nlmsg_len > fpm.msg_len) {
561 zlog_warn(
562 "%s: Received a inner header length of %u that is greater than the fpm total length of %u",
563 __func__, hdr->nlmsg_len, fpm.msg_len);
564 FPM_RECONNECT(fnc);
565 }
566 /* Not enough bytes available. */
567 if (hdr->nlmsg_len > hdr_available_bytes) {
568 zlog_warn(
569 "%s: [seq=%u] invalid message length %u (> %zu)",
570 __func__, hdr->nlmsg_seq, hdr->nlmsg_len,
571 available_bytes);
572 continue;
573 }
574
575 if (!(hdr->nlmsg_flags & NLM_F_REQUEST)) {
576 if (IS_ZEBRA_DEBUG_FPM)
577 zlog_debug(
578 "%s: [seq=%u] not a request, skipping",
579 __func__, hdr->nlmsg_seq);
580
581 /*
582 * This request is a bust, go to the next one
583 */
584 continue;
585 }
586
587 switch (hdr->nlmsg_type) {
588 case RTM_NEWROUTE:
589 ctx = dplane_ctx_alloc();
590 dplane_ctx_set_op(ctx, DPLANE_OP_ROUTE_NOTIFY);
591 if (netlink_route_change_read_unicast_internal(
592 hdr, 0, false, ctx) != 1) {
593 dplane_ctx_fini(&ctx);
594 stream_pulldown(fnc->ibuf);
c0275ab1
DS
595 /*
596 * Let's continue to read other messages
597 * Even if we ignore this one.
598 */
a0e11736
DS
599 }
600 break;
601 default:
602 if (IS_ZEBRA_DEBUG_FPM)
603 zlog_debug(
604 "%s: Received message type %u which is not currently handled",
605 __func__, hdr->nlmsg_type);
606 break;
607 }
608 }
609
610 stream_reset(fnc->ibuf);
d35f447d
RZ
611}
612
cc9f21da 613static void fpm_write(struct thread *t)
d35f447d
RZ
614{
615 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
616 socklen_t statuslen;
617 ssize_t bwritten;
618 int rv, status;
619 size_t btotal;
620
621 if (fnc->connecting == true) {
622 status = 0;
623 statuslen = sizeof(status);
624
625 rv = getsockopt(fnc->socket, SOL_SOCKET, SO_ERROR, &status,
626 &statuslen);
627 if (rv == -1 || status != 0) {
628 if (rv != -1)
e5e444d8
RZ
629 zlog_warn("%s: connection failed: %s", __func__,
630 strerror(status));
d35f447d 631 else
e5e444d8
RZ
632 zlog_warn("%s: SO_ERROR failed: %s", __func__,
633 strerror(status));
d35f447d 634
c871e6c9
RZ
635 atomic_fetch_add_explicit(
636 &fnc->counters.connection_errors, 1,
637 memory_order_relaxed);
6cc059cd 638
a2032324 639 FPM_RECONNECT(fnc);
cc9f21da 640 return;
d35f447d
RZ
641 }
642
643 fnc->connecting = false;
018e77bc 644
f584de52
RZ
645 /*
646 * Starting with LSPs walk all FPM objects, marking them
647 * as unsent and then replaying them.
648 */
649 thread_add_timer(zrouter.master, fpm_lsp_reset, fnc, 0,
650 &fnc->t_lspreset);
651
e1afb97f
RZ
652 /* Permit receiving messages now. */
653 thread_add_read(fnc->fthread->master, fpm_read, fnc,
654 fnc->socket, &fnc->t_read);
d35f447d
RZ
655 }
656
657 frr_mutex_lock_autounlock(&fnc->obuf_mutex);
658
659 while (true) {
660 /* Stream is empty: reset pointers and return. */
661 if (STREAM_READABLE(fnc->obuf) == 0) {
662 stream_reset(fnc->obuf);
663 break;
664 }
665
666 /* Try to write all at once. */
667 btotal = stream_get_endp(fnc->obuf) -
668 stream_get_getp(fnc->obuf);
669 bwritten = write(fnc->socket, stream_pnt(fnc->obuf), btotal);
670 if (bwritten == 0) {
c871e6c9
RZ
671 atomic_fetch_add_explicit(
672 &fnc->counters.connection_closes, 1,
673 memory_order_relaxed);
e5e444d8
RZ
674
675 if (IS_ZEBRA_DEBUG_FPM)
676 zlog_debug("%s: connection closed", __func__);
d35f447d
RZ
677 break;
678 }
679 if (bwritten == -1) {
ad4d1022
RZ
680 /* Attempt to continue if blocked by a signal. */
681 if (errno == EINTR)
682 continue;
683 /* Receiver is probably slow, lets give it some time. */
684 if (errno == EAGAIN || errno == EWOULDBLOCK)
d35f447d
RZ
685 break;
686
c871e6c9
RZ
687 atomic_fetch_add_explicit(
688 &fnc->counters.connection_errors, 1,
689 memory_order_relaxed);
e5e444d8
RZ
690 zlog_warn("%s: connection failure: %s", __func__,
691 strerror(errno));
a2032324
RZ
692
693 FPM_RECONNECT(fnc);
cc9f21da 694 return;
d35f447d
RZ
695 }
696
6cc059cd 697 /* Account all bytes sent. */
c871e6c9
RZ
698 atomic_fetch_add_explicit(&fnc->counters.bytes_sent, bwritten,
699 memory_order_relaxed);
6cc059cd 700
ad4d1022 701 /* Account number of bytes free. */
c871e6c9
RZ
702 atomic_fetch_sub_explicit(&fnc->counters.obuf_bytes, bwritten,
703 memory_order_relaxed);
ad4d1022 704
d35f447d
RZ
705 stream_forward_getp(fnc->obuf, (size_t)bwritten);
706 }
707
708 /* Stream is not empty yet, we must schedule more writes. */
709 if (STREAM_READABLE(fnc->obuf)) {
ad4d1022 710 stream_pulldown(fnc->obuf);
d35f447d
RZ
711 thread_add_write(fnc->fthread->master, fpm_write, fnc,
712 fnc->socket, &fnc->t_write);
cc9f21da 713 return;
d35f447d 714 }
d35f447d
RZ
715}
716
cc9f21da 717static void fpm_connect(struct thread *t)
d35f447d
RZ
718{
719 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
3bdd7fca
RZ
720 struct sockaddr_in *sin = (struct sockaddr_in *)&fnc->addr;
721 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&fnc->addr;
722 socklen_t slen;
d35f447d
RZ
723 int rv, sock;
724 char addrstr[INET6_ADDRSTRLEN];
725
3bdd7fca 726 sock = socket(fnc->addr.ss_family, SOCK_STREAM, 0);
d35f447d 727 if (sock == -1) {
6cc059cd 728 zlog_err("%s: fpm socket failed: %s", __func__,
d35f447d
RZ
729 strerror(errno));
730 thread_add_timer(fnc->fthread->master, fpm_connect, fnc, 3,
731 &fnc->t_connect);
cc9f21da 732 return;
d35f447d
RZ
733 }
734
735 set_nonblocking(sock);
736
3bdd7fca
RZ
737 if (fnc->addr.ss_family == AF_INET) {
738 inet_ntop(AF_INET, &sin->sin_addr, addrstr, sizeof(addrstr));
739 slen = sizeof(*sin);
740 } else {
741 inet_ntop(AF_INET6, &sin6->sin6_addr, addrstr, sizeof(addrstr));
742 slen = sizeof(*sin6);
743 }
d35f447d 744
e5e444d8
RZ
745 if (IS_ZEBRA_DEBUG_FPM)
746 zlog_debug("%s: attempting to connect to %s:%d", __func__,
747 addrstr, ntohs(sin->sin_port));
d35f447d 748
3bdd7fca 749 rv = connect(sock, (struct sockaddr *)&fnc->addr, slen);
d35f447d 750 if (rv == -1 && errno != EINPROGRESS) {
c871e6c9
RZ
751 atomic_fetch_add_explicit(&fnc->counters.connection_errors, 1,
752 memory_order_relaxed);
d35f447d
RZ
753 close(sock);
754 zlog_warn("%s: fpm connection failed: %s", __func__,
755 strerror(errno));
756 thread_add_timer(fnc->fthread->master, fpm_connect, fnc, 3,
757 &fnc->t_connect);
cc9f21da 758 return;
d35f447d
RZ
759 }
760
761 fnc->connecting = (errno == EINPROGRESS);
762 fnc->socket = sock;
e1afb97f
RZ
763 if (!fnc->connecting)
764 thread_add_read(fnc->fthread->master, fpm_read, fnc, sock,
765 &fnc->t_read);
d35f447d
RZ
766 thread_add_write(fnc->fthread->master, fpm_write, fnc, sock,
767 &fnc->t_write);
768
f9bf1ecc
DE
769 /*
770 * Starting with LSPs walk all FPM objects, marking them
771 * as unsent and then replaying them.
f584de52
RZ
772 *
773 * If we are not connected, then delay the objects reset/send.
f9bf1ecc 774 */
f584de52
RZ
775 if (!fnc->connecting)
776 thread_add_timer(zrouter.master, fpm_lsp_reset, fnc, 0,
777 &fnc->t_lspreset);
d35f447d
RZ
778}
779
780/**
781 * Encode data plane operation context into netlink and enqueue it in the FPM
782 * output buffer.
783 *
784 * @param fnc the netlink FPM context.
785 * @param ctx the data plane operation context data.
786 * @return 0 on success or -1 on not enough space.
787 */
788static int fpm_nl_enqueue(struct fpm_nl_ctx *fnc, struct zebra_dplane_ctx *ctx)
789{
790 uint8_t nl_buf[NL_PKT_BUF_SIZE];
791 size_t nl_buf_len;
792 ssize_t rv;
edfeff42 793 uint64_t obytes, obytes_peak;
b55ab92a
RZ
794 enum dplane_op_e op = dplane_ctx_get_op(ctx);
795
796 /*
797 * If we were configured to not use next hop groups, then quit as soon
798 * as possible.
799 */
800 if ((!fnc->use_nhg)
801 && (op == DPLANE_OP_NH_DELETE || op == DPLANE_OP_NH_INSTALL
802 || op == DPLANE_OP_NH_UPDATE))
803 return 0;
d35f447d
RZ
804
805 nl_buf_len = 0;
806
807 frr_mutex_lock_autounlock(&fnc->obuf_mutex);
808
b55ab92a 809 switch (op) {
d35f447d
RZ
810 case DPLANE_OP_ROUTE_UPDATE:
811 case DPLANE_OP_ROUTE_DELETE:
0be6e7d7
JU
812 rv = netlink_route_multipath_msg_encode(RTM_DELROUTE, ctx,
813 nl_buf, sizeof(nl_buf),
814 true, fnc->use_nhg);
d35f447d 815 if (rv <= 0) {
0be6e7d7
JU
816 zlog_err(
817 "%s: netlink_route_multipath_msg_encode failed",
818 __func__);
d35f447d
RZ
819 return 0;
820 }
821
822 nl_buf_len = (size_t)rv;
d35f447d
RZ
823
824 /* UPDATE operations need a INSTALL, otherwise just quit. */
b55ab92a 825 if (op == DPLANE_OP_ROUTE_DELETE)
d35f447d
RZ
826 break;
827
828 /* FALL THROUGH */
829 case DPLANE_OP_ROUTE_INSTALL:
0be6e7d7 830 rv = netlink_route_multipath_msg_encode(
b55ab92a
RZ
831 RTM_NEWROUTE, ctx, &nl_buf[nl_buf_len],
832 sizeof(nl_buf) - nl_buf_len, true, fnc->use_nhg);
d35f447d 833 if (rv <= 0) {
0be6e7d7
JU
834 zlog_err(
835 "%s: netlink_route_multipath_msg_encode failed",
836 __func__);
d35f447d
RZ
837 return 0;
838 }
839
840 nl_buf_len += (size_t)rv;
d35f447d
RZ
841 break;
842
bda10adf
RZ
843 case DPLANE_OP_MAC_INSTALL:
844 case DPLANE_OP_MAC_DELETE:
845 rv = netlink_macfdb_update_ctx(ctx, nl_buf, sizeof(nl_buf));
846 if (rv <= 0) {
e5e444d8
RZ
847 zlog_err("%s: netlink_macfdb_update_ctx failed",
848 __func__);
bda10adf
RZ
849 return 0;
850 }
851
852 nl_buf_len = (size_t)rv;
bda10adf
RZ
853 break;
854
e9a1cd93 855 case DPLANE_OP_NH_DELETE:
0be6e7d7 856 rv = netlink_nexthop_msg_encode(RTM_DELNEXTHOP, ctx, nl_buf,
45c12994 857 sizeof(nl_buf), true);
e9a1cd93 858 if (rv <= 0) {
0be6e7d7
JU
859 zlog_err("%s: netlink_nexthop_msg_encode failed",
860 __func__);
e9a1cd93
RZ
861 return 0;
862 }
863
864 nl_buf_len = (size_t)rv;
865 break;
d35f447d
RZ
866 case DPLANE_OP_NH_INSTALL:
867 case DPLANE_OP_NH_UPDATE:
0be6e7d7 868 rv = netlink_nexthop_msg_encode(RTM_NEWNEXTHOP, ctx, nl_buf,
45c12994 869 sizeof(nl_buf), true);
e9a1cd93 870 if (rv <= 0) {
0be6e7d7
JU
871 zlog_err("%s: netlink_nexthop_msg_encode failed",
872 __func__);
e9a1cd93
RZ
873 return 0;
874 }
875
876 nl_buf_len = (size_t)rv;
877 break;
878
d35f447d
RZ
879 case DPLANE_OP_LSP_INSTALL:
880 case DPLANE_OP_LSP_UPDATE:
881 case DPLANE_OP_LSP_DELETE:
b300c8bb
DE
882 rv = netlink_lsp_msg_encoder(ctx, nl_buf, sizeof(nl_buf));
883 if (rv <= 0) {
f9bf1ecc
DE
884 zlog_err("%s: netlink_lsp_msg_encoder failed",
885 __func__);
b300c8bb
DE
886 return 0;
887 }
888
889 nl_buf_len += (size_t)rv;
890 break;
891
d4bcd88d 892 /* Un-handled by FPM at this time. */
d35f447d
RZ
893 case DPLANE_OP_PW_INSTALL:
894 case DPLANE_OP_PW_UNINSTALL:
895 case DPLANE_OP_ADDR_INSTALL:
896 case DPLANE_OP_ADDR_UNINSTALL:
d35f447d
RZ
897 case DPLANE_OP_NEIGH_INSTALL:
898 case DPLANE_OP_NEIGH_UPDATE:
899 case DPLANE_OP_NEIGH_DELETE:
900 case DPLANE_OP_VTEP_ADD:
901 case DPLANE_OP_VTEP_DELETE:
902 case DPLANE_OP_SYS_ROUTE_ADD:
903 case DPLANE_OP_SYS_ROUTE_DELETE:
904 case DPLANE_OP_ROUTE_NOTIFY:
905 case DPLANE_OP_LSP_NOTIFY:
d4bcd88d
MS
906 case DPLANE_OP_RULE_ADD:
907 case DPLANE_OP_RULE_DELETE:
908 case DPLANE_OP_RULE_UPDATE:
909 case DPLANE_OP_NEIGH_DISCOVER:
910 case DPLANE_OP_BR_PORT_UPDATE:
911 case DPLANE_OP_IPTABLE_ADD:
912 case DPLANE_OP_IPTABLE_DELETE:
913 case DPLANE_OP_IPSET_ADD:
914 case DPLANE_OP_IPSET_DELETE:
915 case DPLANE_OP_IPSET_ENTRY_ADD:
916 case DPLANE_OP_IPSET_ENTRY_DELETE:
917 case DPLANE_OP_NEIGH_IP_INSTALL:
918 case DPLANE_OP_NEIGH_IP_DELETE:
919 case DPLANE_OP_NEIGH_TABLE_UPDATE:
920 case DPLANE_OP_GRE_SET:
921 case DPLANE_OP_INTF_ADDR_ADD:
922 case DPLANE_OP_INTF_ADDR_DEL:
728f2017 923 case DPLANE_OP_INTF_NETCONFIG:
5d414138
SW
924 case DPLANE_OP_INTF_INSTALL:
925 case DPLANE_OP_INTF_UPDATE:
926 case DPLANE_OP_INTF_DELETE:
c317d3f2
SY
927 case DPLANE_OP_TC_QDISC_INSTALL:
928 case DPLANE_OP_TC_QDISC_UNINSTALL:
929 case DPLANE_OP_TC_CLASS_ADD:
930 case DPLANE_OP_TC_CLASS_DELETE:
931 case DPLANE_OP_TC_CLASS_UPDATE:
932 case DPLANE_OP_TC_FILTER_ADD:
933 case DPLANE_OP_TC_FILTER_DELETE:
934 case DPLANE_OP_TC_FILTER_UPDATE:
d35f447d
RZ
935 case DPLANE_OP_NONE:
936 break;
937
d35f447d
RZ
938 }
939
940 /* Skip empty enqueues. */
941 if (nl_buf_len == 0)
942 return 0;
943
a179ba35
RZ
944 /* We must know if someday a message goes beyond 65KiB. */
945 assert((nl_buf_len + FPM_HEADER_SIZE) <= UINT16_MAX);
946
947 /* Check if we have enough buffer space. */
948 if (STREAM_WRITEABLE(fnc->obuf) < (nl_buf_len + FPM_HEADER_SIZE)) {
c871e6c9
RZ
949 atomic_fetch_add_explicit(&fnc->counters.buffer_full, 1,
950 memory_order_relaxed);
e5e444d8
RZ
951
952 if (IS_ZEBRA_DEBUG_FPM)
953 zlog_debug(
954 "%s: buffer full: wants to write %zu but has %zu",
955 __func__, nl_buf_len + FPM_HEADER_SIZE,
956 STREAM_WRITEABLE(fnc->obuf));
957
a179ba35
RZ
958 return -1;
959 }
960
d35f447d 961 /*
a179ba35
RZ
962 * Fill in the FPM header information.
963 *
964 * See FPM_HEADER_SIZE definition for more information.
d35f447d
RZ
965 */
966 stream_putc(fnc->obuf, 1);
967 stream_putc(fnc->obuf, 1);
a179ba35 968 stream_putw(fnc->obuf, nl_buf_len + FPM_HEADER_SIZE);
d35f447d
RZ
969
970 /* Write current data. */
971 stream_write(fnc->obuf, nl_buf, (size_t)nl_buf_len);
972
ad4d1022 973 /* Account number of bytes waiting to be written. */
c871e6c9
RZ
974 atomic_fetch_add_explicit(&fnc->counters.obuf_bytes,
975 nl_buf_len + FPM_HEADER_SIZE,
976 memory_order_relaxed);
edfeff42
RZ
977 obytes = atomic_load_explicit(&fnc->counters.obuf_bytes,
978 memory_order_relaxed);
979 obytes_peak = atomic_load_explicit(&fnc->counters.obuf_peak,
980 memory_order_relaxed);
981 if (obytes_peak < obytes)
c871e6c9
RZ
982 atomic_store_explicit(&fnc->counters.obuf_peak, obytes,
983 memory_order_relaxed);
ad4d1022 984
d35f447d
RZ
985 /* Tell the thread to start writing. */
986 thread_add_write(fnc->fthread->master, fpm_write, fnc, fnc->socket,
987 &fnc->t_write);
988
989 return 0;
990}
991
f9bf1ecc
DE
992/*
993 * LSP walk/send functions
994 */
995struct fpm_lsp_arg {
996 struct zebra_dplane_ctx *ctx;
997 struct fpm_nl_ctx *fnc;
998 bool complete;
999};
1000
1001static int fpm_lsp_send_cb(struct hash_bucket *bucket, void *arg)
1002{
8f74a383 1003 struct zebra_lsp *lsp = bucket->data;
f9bf1ecc
DE
1004 struct fpm_lsp_arg *fla = arg;
1005
1006 /* Skip entries which have already been sent */
1007 if (CHECK_FLAG(lsp->flags, LSP_FLAG_FPM))
1008 return HASHWALK_CONTINUE;
1009
1010 dplane_ctx_reset(fla->ctx);
1011 dplane_ctx_lsp_init(fla->ctx, DPLANE_OP_LSP_INSTALL, lsp);
1012
1013 if (fpm_nl_enqueue(fla->fnc, fla->ctx) == -1) {
1014 fla->complete = false;
1015 return HASHWALK_ABORT;
1016 }
1017
1018 /* Mark entry as sent */
1019 SET_FLAG(lsp->flags, LSP_FLAG_FPM);
1020 return HASHWALK_CONTINUE;
1021}
1022
cc9f21da 1023static void fpm_lsp_send(struct thread *t)
f9bf1ecc
DE
1024{
1025 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
1026 struct zebra_vrf *zvrf = vrf_info_lookup(VRF_DEFAULT);
1027 struct fpm_lsp_arg fla;
1028
1029 fla.fnc = fnc;
1030 fla.ctx = dplane_ctx_alloc();
1031 fla.complete = true;
1032
1033 hash_walk(zvrf->lsp_table, fpm_lsp_send_cb, &fla);
1034
1035 dplane_ctx_fini(&fla.ctx);
1036
1037 if (fla.complete) {
1038 WALK_FINISH(fnc, FNE_LSP_FINISHED);
1039
1040 /* Now move onto routes */
1f9193c1
RZ
1041 thread_add_timer(zrouter.master, fpm_nhg_reset, fnc, 0,
1042 &fnc->t_nhgreset);
f9bf1ecc
DE
1043 } else {
1044 /* Didn't finish - reschedule LSP walk */
1045 thread_add_timer(zrouter.master, fpm_lsp_send, fnc, 0,
1046 &fnc->t_lspwalk);
1047 }
f9bf1ecc
DE
1048}
1049
981ca597
RZ
1050/*
1051 * Next hop walk/send functions.
1052 */
1053struct fpm_nhg_arg {
1054 struct zebra_dplane_ctx *ctx;
1055 struct fpm_nl_ctx *fnc;
1056 bool complete;
1057};
1058
1059static int fpm_nhg_send_cb(struct hash_bucket *bucket, void *arg)
1060{
1061 struct nhg_hash_entry *nhe = bucket->data;
1062 struct fpm_nhg_arg *fna = arg;
1063
1064 /* This entry was already sent, skip it. */
1065 if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_FPM))
1066 return HASHWALK_CONTINUE;
1067
1068 /* Reset ctx to reuse allocated memory, take a snapshot and send it. */
1069 dplane_ctx_reset(fna->ctx);
1070 dplane_ctx_nexthop_init(fna->ctx, DPLANE_OP_NH_INSTALL, nhe);
1071 if (fpm_nl_enqueue(fna->fnc, fna->ctx) == -1) {
1072 /* Our buffers are full, lets give it some cycles. */
1073 fna->complete = false;
1074 return HASHWALK_ABORT;
1075 }
1076
1077 /* Mark group as sent, so it doesn't get sent again. */
1078 SET_FLAG(nhe->flags, NEXTHOP_GROUP_FPM);
1079
1080 return HASHWALK_CONTINUE;
1081}
1082
cc9f21da 1083static void fpm_nhg_send(struct thread *t)
981ca597
RZ
1084{
1085 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
1086 struct fpm_nhg_arg fna;
1087
1088 fna.fnc = fnc;
1089 fna.ctx = dplane_ctx_alloc();
1090 fna.complete = true;
1091
1092 /* Send next hops. */
1f9193c1
RZ
1093 if (fnc->use_nhg)
1094 hash_walk(zrouter.nhgs_id, fpm_nhg_send_cb, &fna);
981ca597
RZ
1095
1096 /* `free()` allocated memory. */
1097 dplane_ctx_fini(&fna.ctx);
1098
1099 /* We are done sending next hops, lets install the routes now. */
55eb9d4d
RZ
1100 if (fna.complete) {
1101 WALK_FINISH(fnc, FNE_NHG_FINISHED);
e41e0f81
RZ
1102 thread_add_timer(zrouter.master, fpm_rib_reset, fnc, 0,
1103 &fnc->t_ribreset);
55eb9d4d 1104 } else /* Otherwise reschedule next hop group again. */
981ca597
RZ
1105 thread_add_timer(zrouter.master, fpm_nhg_send, fnc, 0,
1106 &fnc->t_nhgwalk);
981ca597
RZ
1107}
1108
018e77bc
RZ
1109/**
1110 * Send all RIB installed routes to the connected data plane.
1111 */
cc9f21da 1112static void fpm_rib_send(struct thread *t)
018e77bc
RZ
1113{
1114 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
1115 rib_dest_t *dest;
1116 struct route_node *rn;
1117 struct route_table *rt;
1118 struct zebra_dplane_ctx *ctx;
1119 rib_tables_iter_t rt_iter;
1120
1121 /* Allocate temporary context for all transactions. */
1122 ctx = dplane_ctx_alloc();
1123
1124 rt_iter.state = RIB_TABLES_ITER_S_INIT;
1125 while ((rt = rib_tables_iter_next(&rt_iter))) {
1126 for (rn = route_top(rt); rn; rn = srcdest_route_next(rn)) {
1127 dest = rib_dest_from_rnode(rn);
1128 /* Skip bad route entries. */
a50404aa 1129 if (dest == NULL || dest->selected_fib == NULL)
018e77bc 1130 continue;
018e77bc
RZ
1131
1132 /* Check for already sent routes. */
a50404aa 1133 if (CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM))
018e77bc 1134 continue;
018e77bc
RZ
1135
1136 /* Enqueue route install. */
1137 dplane_ctx_reset(ctx);
1138 dplane_ctx_route_init(ctx, DPLANE_OP_ROUTE_INSTALL, rn,
1139 dest->selected_fib);
1140 if (fpm_nl_enqueue(fnc, ctx) == -1) {
1141 /* Free the temporary allocated context. */
1142 dplane_ctx_fini(&ctx);
1143
018e77bc
RZ
1144 thread_add_timer(zrouter.master, fpm_rib_send,
1145 fnc, 1, &fnc->t_ribwalk);
cc9f21da 1146 return;
018e77bc
RZ
1147 }
1148
1149 /* Mark as sent. */
1150 SET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
1151 }
1152 }
1153
1154 /* Free the temporary allocated context. */
1155 dplane_ctx_fini(&ctx);
1156
1157 /* All RIB routes sent! */
55eb9d4d 1158 WALK_FINISH(fnc, FNE_RIB_FINISHED);
018e77bc 1159
e41e0f81
RZ
1160 /* Schedule next event: RMAC reset. */
1161 thread_add_event(zrouter.master, fpm_rmac_reset, fnc, 0,
1162 &fnc->t_rmacreset);
018e77bc
RZ
1163}
1164
bda10adf
RZ
1165/*
1166 * The next three functions will handle RMAC enqueue.
1167 */
1168struct fpm_rmac_arg {
1169 struct zebra_dplane_ctx *ctx;
1170 struct fpm_nl_ctx *fnc;
05843a27 1171 struct zebra_l3vni *zl3vni;
55eb9d4d 1172 bool complete;
bda10adf
RZ
1173};
1174
1ac88792 1175static void fpm_enqueue_rmac_table(struct hash_bucket *bucket, void *arg)
bda10adf
RZ
1176{
1177 struct fpm_rmac_arg *fra = arg;
3198b2b3 1178 struct zebra_mac *zrmac = bucket->data;
bda10adf 1179 struct zebra_if *zif = fra->zl3vni->vxlan_if->info;
8d30ff3b 1180 struct zebra_vxlan_vni *vni;
bda10adf
RZ
1181 struct zebra_if *br_zif;
1182 vlanid_t vid;
1183 bool sticky;
1184
1185 /* Entry already sent. */
55eb9d4d 1186 if (CHECK_FLAG(zrmac->flags, ZEBRA_MAC_FPM_SENT) || !fra->complete)
bda10adf
RZ
1187 return;
1188
1189 sticky = !!CHECK_FLAG(zrmac->flags,
1190 (ZEBRA_MAC_STICKY | ZEBRA_MAC_REMOTE_DEF_GW));
1191 br_zif = (struct zebra_if *)(zif->brslave_info.br_if->info);
8d30ff3b
SR
1192 vni = zebra_vxlan_if_vni_find(zif, fra->zl3vni->vni);
1193 vid = IS_ZEBRA_IF_BRIDGE_VLAN_AWARE(br_zif) ? vni->access_vlan : 0;
bda10adf
RZ
1194
1195 dplane_ctx_reset(fra->ctx);
1196 dplane_ctx_set_op(fra->ctx, DPLANE_OP_MAC_INSTALL);
1197 dplane_mac_init(fra->ctx, fra->zl3vni->vxlan_if,
b95ce8fa
SR
1198 zif->brslave_info.br_if, vid, &zrmac->macaddr, vni->vni,
1199 zrmac->fwd_info.r_vtep_ip, sticky, 0 /*nhg*/,
1200 0 /*update_flags*/);
bda10adf 1201 if (fpm_nl_enqueue(fra->fnc, fra->ctx) == -1) {
bda10adf
RZ
1202 thread_add_timer(zrouter.master, fpm_rmac_send,
1203 fra->fnc, 1, &fra->fnc->t_rmacwalk);
55eb9d4d 1204 fra->complete = false;
bda10adf
RZ
1205 }
1206}
1207
1ac88792 1208static void fpm_enqueue_l3vni_table(struct hash_bucket *bucket, void *arg)
bda10adf
RZ
1209{
1210 struct fpm_rmac_arg *fra = arg;
05843a27 1211 struct zebra_l3vni *zl3vni = bucket->data;
bda10adf
RZ
1212
1213 fra->zl3vni = zl3vni;
1214 hash_iterate(zl3vni->rmac_table, fpm_enqueue_rmac_table, zl3vni);
1215}
1216
cc9f21da 1217static void fpm_rmac_send(struct thread *t)
bda10adf
RZ
1218{
1219 struct fpm_rmac_arg fra;
1220
1221 fra.fnc = THREAD_ARG(t);
1222 fra.ctx = dplane_ctx_alloc();
55eb9d4d 1223 fra.complete = true;
bda10adf
RZ
1224 hash_iterate(zrouter.l3vni_table, fpm_enqueue_l3vni_table, &fra);
1225 dplane_ctx_fini(&fra.ctx);
1226
55eb9d4d
RZ
1227 /* RMAC walk completed. */
1228 if (fra.complete)
1229 WALK_FINISH(fra.fnc, FNE_RMAC_FINISHED);
bda10adf
RZ
1230}
1231
981ca597
RZ
1232/*
1233 * Resets the next hop FPM flags so we send all next hops again.
1234 */
1235static void fpm_nhg_reset_cb(struct hash_bucket *bucket, void *arg)
1236{
1237 struct nhg_hash_entry *nhe = bucket->data;
1238
1239 /* Unset FPM installation flag so it gets installed again. */
1240 UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_FPM);
1241}
1242
cc9f21da 1243static void fpm_nhg_reset(struct thread *t)
981ca597 1244{
55eb9d4d
RZ
1245 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
1246
981ca597 1247 hash_iterate(zrouter.nhgs_id, fpm_nhg_reset_cb, NULL);
e41e0f81
RZ
1248
1249 /* Schedule next step: send next hop groups. */
1250 thread_add_event(zrouter.master, fpm_nhg_send, fnc, 0, &fnc->t_nhgwalk);
981ca597
RZ
1251}
1252
f9bf1ecc
DE
1253/*
1254 * Resets the LSP FPM flag so we send all LSPs again.
1255 */
1256static void fpm_lsp_reset_cb(struct hash_bucket *bucket, void *arg)
1257{
8f74a383 1258 struct zebra_lsp *lsp = bucket->data;
f9bf1ecc
DE
1259
1260 UNSET_FLAG(lsp->flags, LSP_FLAG_FPM);
1261}
1262
cc9f21da 1263static void fpm_lsp_reset(struct thread *t)
f9bf1ecc
DE
1264{
1265 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
1266 struct zebra_vrf *zvrf = vrf_info_lookup(VRF_DEFAULT);
1267
1268 hash_iterate(zvrf->lsp_table, fpm_lsp_reset_cb, NULL);
1269
1270 /* Schedule next step: send LSPs */
1271 thread_add_event(zrouter.master, fpm_lsp_send, fnc, 0, &fnc->t_lspwalk);
f9bf1ecc
DE
1272}
1273
018e77bc
RZ
1274/**
1275 * Resets the RIB FPM flags so we send all routes again.
1276 */
cc9f21da 1277static void fpm_rib_reset(struct thread *t)
018e77bc
RZ
1278{
1279 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
1280 rib_dest_t *dest;
1281 struct route_node *rn;
1282 struct route_table *rt;
1283 rib_tables_iter_t rt_iter;
1284
018e77bc
RZ
1285 rt_iter.state = RIB_TABLES_ITER_S_INIT;
1286 while ((rt = rib_tables_iter_next(&rt_iter))) {
1287 for (rn = route_top(rt); rn; rn = srcdest_route_next(rn)) {
1288 dest = rib_dest_from_rnode(rn);
1289 /* Skip bad route entries. */
1290 if (dest == NULL)
1291 continue;
1292
1293 UNSET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
1294 }
1295 }
1296
e41e0f81
RZ
1297 /* Schedule next step: send RIB routes. */
1298 thread_add_event(zrouter.master, fpm_rib_send, fnc, 0, &fnc->t_ribwalk);
018e77bc
RZ
1299}
1300
bda10adf
RZ
1301/*
1302 * The next three function will handle RMAC table reset.
1303 */
1ac88792 1304static void fpm_unset_rmac_table(struct hash_bucket *bucket, void *arg)
bda10adf 1305{
3198b2b3 1306 struct zebra_mac *zrmac = bucket->data;
bda10adf
RZ
1307
1308 UNSET_FLAG(zrmac->flags, ZEBRA_MAC_FPM_SENT);
1309}
1310
1ac88792 1311static void fpm_unset_l3vni_table(struct hash_bucket *bucket, void *arg)
bda10adf 1312{
05843a27 1313 struct zebra_l3vni *zl3vni = bucket->data;
bda10adf
RZ
1314
1315 hash_iterate(zl3vni->rmac_table, fpm_unset_rmac_table, zl3vni);
1316}
1317
cc9f21da 1318static void fpm_rmac_reset(struct thread *t)
bda10adf 1319{
55eb9d4d
RZ
1320 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
1321
bda10adf
RZ
1322 hash_iterate(zrouter.l3vni_table, fpm_unset_l3vni_table, NULL);
1323
e41e0f81
RZ
1324 /* Schedule next event: send RMAC entries. */
1325 thread_add_event(zrouter.master, fpm_rmac_send, fnc, 0,
1326 &fnc->t_rmacwalk);
bda10adf
RZ
1327}
1328
cc9f21da 1329static void fpm_process_queue(struct thread *t)
ba803a2f
RZ
1330{
1331 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
1332 struct zebra_dplane_ctx *ctx;
3f2b998f 1333 bool no_bufs = false;
438dd3e7 1334 uint64_t processed_contexts = 0;
ba803a2f 1335
ba803a2f
RZ
1336 while (true) {
1337 /* No space available yet. */
3f2b998f
DE
1338 if (STREAM_WRITEABLE(fnc->obuf) < NL_PKT_BUF_SIZE) {
1339 no_bufs = true;
ba803a2f 1340 break;
3f2b998f 1341 }
ba803a2f
RZ
1342
1343 /* Dequeue next item or quit processing. */
dc693fe0
DE
1344 frr_with_mutex (&fnc->ctxqueue_mutex) {
1345 ctx = dplane_ctx_dequeue(&fnc->ctxqueue);
1346 }
ba803a2f
RZ
1347 if (ctx == NULL)
1348 break;
1349
3a150188
DS
1350 /*
1351 * Intentionally ignoring the return value
1352 * as that we are ensuring that we can write to
1353 * the output data in the STREAM_WRITEABLE
1354 * check above, so we can ignore the return
1355 */
3b1caddd
RZ
1356 if (fnc->socket != -1)
1357 (void)fpm_nl_enqueue(fnc, ctx);
ba803a2f
RZ
1358
1359 /* Account the processed entries. */
438dd3e7 1360 processed_contexts++;
c871e6c9
RZ
1361 atomic_fetch_sub_explicit(&fnc->counters.ctxqueue_len, 1,
1362 memory_order_relaxed);
ba803a2f
RZ
1363
1364 dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
1365 dplane_provider_enqueue_out_ctx(fnc->prov, ctx);
1366 }
1367
438dd3e7
DE
1368 /* Update count of processed contexts */
1369 atomic_fetch_add_explicit(&fnc->counters.dplane_contexts,
1370 processed_contexts, memory_order_relaxed);
1371
3f2b998f
DE
1372 /* Re-schedule if we ran out of buffer space */
1373 if (no_bufs)
ba803a2f
RZ
1374 thread_add_timer(fnc->fthread->master, fpm_process_queue,
1375 fnc, 0, &fnc->t_dequeue);
1376
164d8e86
DE
1377 /*
1378 * Let the dataplane thread know if there are items in the
1379 * output queue to be processed. Otherwise they may sit
1380 * until the dataplane thread gets scheduled for new,
1381 * unrelated work.
1382 */
1383 if (dplane_provider_out_ctx_queue_len(fnc->prov) > 0)
1384 dplane_provider_work_ready();
ba803a2f
RZ
1385}
1386
3bdd7fca
RZ
1387/**
1388 * Handles external (e.g. CLI, data plane or others) events.
1389 */
cc9f21da 1390static void fpm_process_event(struct thread *t)
3bdd7fca
RZ
1391{
1392 struct fpm_nl_ctx *fnc = THREAD_ARG(t);
dc31de93 1393 enum fpm_nl_events event = THREAD_VAL(t);
3bdd7fca
RZ
1394
1395 switch (event) {
1396 case FNE_DISABLE:
e5e444d8 1397 zlog_info("%s: manual FPM disable event", __func__);
3bdd7fca 1398 fnc->disabled = true;
c871e6c9
RZ
1399 atomic_fetch_add_explicit(&fnc->counters.user_disables, 1,
1400 memory_order_relaxed);
3bdd7fca
RZ
1401
1402 /* Call reconnect to disable timers and clean up context. */
1403 fpm_reconnect(fnc);
1404 break;
1405
1406 case FNE_RECONNECT:
e5e444d8 1407 zlog_info("%s: manual FPM reconnect event", __func__);
3bdd7fca 1408 fnc->disabled = false;
c871e6c9
RZ
1409 atomic_fetch_add_explicit(&fnc->counters.user_configures, 1,
1410 memory_order_relaxed);
3bdd7fca
RZ
1411 fpm_reconnect(fnc);
1412 break;
1413
6cc059cd 1414 case FNE_RESET_COUNTERS:
e5e444d8 1415 zlog_info("%s: manual FPM counters reset event", __func__);
6cc059cd
RZ
1416 memset(&fnc->counters, 0, sizeof(fnc->counters));
1417 break;
1418
b55ab92a
RZ
1419 case FNE_TOGGLE_NHG:
1420 zlog_info("%s: toggle next hop groups support", __func__);
1421 fnc->use_nhg = !fnc->use_nhg;
1422 fpm_reconnect(fnc);
1423 break;
1424
a2032324
RZ
1425 case FNE_INTERNAL_RECONNECT:
1426 fpm_reconnect(fnc);
1427 break;
1428
55eb9d4d
RZ
1429 case FNE_NHG_FINISHED:
1430 if (IS_ZEBRA_DEBUG_FPM)
1431 zlog_debug("%s: next hop groups walk finished",
1432 __func__);
55eb9d4d
RZ
1433 break;
1434 case FNE_RIB_FINISHED:
1435 if (IS_ZEBRA_DEBUG_FPM)
1436 zlog_debug("%s: RIB walk finished", __func__);
55eb9d4d
RZ
1437 break;
1438 case FNE_RMAC_FINISHED:
1439 if (IS_ZEBRA_DEBUG_FPM)
1440 zlog_debug("%s: RMAC walk finished", __func__);
55eb9d4d 1441 break;
f9bf1ecc
DE
1442 case FNE_LSP_FINISHED:
1443 if (IS_ZEBRA_DEBUG_FPM)
1444 zlog_debug("%s: LSP walk finished", __func__);
1445 break;
3bdd7fca 1446 }
3bdd7fca
RZ
1447}
1448
d35f447d
RZ
1449/*
1450 * Data plane functions.
1451 */
1452static int fpm_nl_start(struct zebra_dplane_provider *prov)
1453{
1454 struct fpm_nl_ctx *fnc;
1455
1456 fnc = dplane_provider_get_data(prov);
1457 fnc->fthread = frr_pthread_new(NULL, prov_name, prov_name);
1458 assert(frr_pthread_run(fnc->fthread, NULL) == 0);
1459 fnc->ibuf = stream_new(NL_PKT_BUF_SIZE);
1460 fnc->obuf = stream_new(NL_PKT_BUF_SIZE * 128);
1461 pthread_mutex_init(&fnc->obuf_mutex, NULL);
1462 fnc->socket = -1;
3bdd7fca 1463 fnc->disabled = true;
ba803a2f 1464 fnc->prov = prov;
ac96497c 1465 dplane_ctx_q_init(&fnc->ctxqueue);
ba803a2f 1466 pthread_mutex_init(&fnc->ctxqueue_mutex, NULL);
d35f447d 1467
b55ab92a
RZ
1468 /* Set default values. */
1469 fnc->use_nhg = true;
1470
d35f447d
RZ
1471 return 0;
1472}
1473
98a87504 1474static int fpm_nl_finish_early(struct fpm_nl_ctx *fnc)
d35f447d 1475{
98a87504 1476 /* Disable all events and close socket. */
f9bf1ecc
DE
1477 THREAD_OFF(fnc->t_lspreset);
1478 THREAD_OFF(fnc->t_lspwalk);
981ca597
RZ
1479 THREAD_OFF(fnc->t_nhgreset);
1480 THREAD_OFF(fnc->t_nhgwalk);
98a87504
RZ
1481 THREAD_OFF(fnc->t_ribreset);
1482 THREAD_OFF(fnc->t_ribwalk);
1483 THREAD_OFF(fnc->t_rmacreset);
1484 THREAD_OFF(fnc->t_rmacwalk);
551fa8c3
DS
1485 THREAD_OFF(fnc->t_event);
1486 THREAD_OFF(fnc->t_nhg);
98a87504
RZ
1487 thread_cancel_async(fnc->fthread->master, &fnc->t_read, NULL);
1488 thread_cancel_async(fnc->fthread->master, &fnc->t_write, NULL);
1489 thread_cancel_async(fnc->fthread->master, &fnc->t_connect, NULL);
d35f447d 1490
98a87504
RZ
1491 if (fnc->socket != -1) {
1492 close(fnc->socket);
1493 fnc->socket = -1;
1494 }
1495
1496 return 0;
1497}
1498
1499static int fpm_nl_finish_late(struct fpm_nl_ctx *fnc)
1500{
1501 /* Stop the running thread. */
1502 frr_pthread_stop(fnc->fthread, NULL);
1503
1504 /* Free all allocated resources. */
1505 pthread_mutex_destroy(&fnc->obuf_mutex);
1506 pthread_mutex_destroy(&fnc->ctxqueue_mutex);
d35f447d
RZ
1507 stream_free(fnc->ibuf);
1508 stream_free(fnc->obuf);
98a87504
RZ
1509 free(gfnc);
1510 gfnc = NULL;
d35f447d
RZ
1511
1512 return 0;
1513}
1514
98a87504
RZ
1515static int fpm_nl_finish(struct zebra_dplane_provider *prov, bool early)
1516{
1517 struct fpm_nl_ctx *fnc;
1518
1519 fnc = dplane_provider_get_data(prov);
1520 if (early)
1521 return fpm_nl_finish_early(fnc);
1522
1523 return fpm_nl_finish_late(fnc);
1524}
1525
d35f447d
RZ
1526static int fpm_nl_process(struct zebra_dplane_provider *prov)
1527{
1528 struct zebra_dplane_ctx *ctx;
1529 struct fpm_nl_ctx *fnc;
1530 int counter, limit;
bf2f7839 1531 uint64_t cur_queue, peak_queue = 0, stored_peak_queue;
d35f447d
RZ
1532
1533 fnc = dplane_provider_get_data(prov);
1534 limit = dplane_provider_get_work_limit(prov);
1535 for (counter = 0; counter < limit; counter++) {
1536 ctx = dplane_provider_dequeue_in_ctx(prov);
1537 if (ctx == NULL)
1538 break;
1539
1540 /*
1541 * Skip all notifications if not connected, we'll walk the RIB
1542 * anyway.
1543 */
6cc059cd 1544 if (fnc->socket != -1 && fnc->connecting == false) {
dc693fe0
DE
1545 /*
1546 * Update the number of queued contexts *before*
1547 * enqueueing, to ensure counter consistency.
1548 */
c871e6c9
RZ
1549 atomic_fetch_add_explicit(&fnc->counters.ctxqueue_len,
1550 1, memory_order_relaxed);
dc693fe0
DE
1551
1552 frr_with_mutex (&fnc->ctxqueue_mutex) {
1553 dplane_ctx_enqueue_tail(&fnc->ctxqueue, ctx);
1554 }
1555
c871e6c9
RZ
1556 cur_queue = atomic_load_explicit(
1557 &fnc->counters.ctxqueue_len,
1558 memory_order_relaxed);
edfeff42 1559 if (peak_queue < cur_queue)
bf2f7839 1560 peak_queue = cur_queue;
ba803a2f 1561 continue;
6cc059cd
RZ
1562 }
1563
d35f447d
RZ
1564 dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
1565 dplane_provider_enqueue_out_ctx(prov, ctx);
1566 }
1567
bf2f7839
DE
1568 /* Update peak queue length, if we just observed a new peak */
1569 stored_peak_queue = atomic_load_explicit(
1570 &fnc->counters.ctxqueue_len_peak, memory_order_relaxed);
1571 if (stored_peak_queue < peak_queue)
1572 atomic_store_explicit(&fnc->counters.ctxqueue_len_peak,
1573 peak_queue, memory_order_relaxed);
1574
c871e6c9
RZ
1575 if (atomic_load_explicit(&fnc->counters.ctxqueue_len,
1576 memory_order_relaxed)
1577 > 0)
ba803a2f
RZ
1578 thread_add_timer(fnc->fthread->master, fpm_process_queue,
1579 fnc, 0, &fnc->t_dequeue);
1580
b677907c
DE
1581 /* Ensure dataplane thread is rescheduled if we hit the work limit */
1582 if (counter >= limit)
1583 dplane_provider_work_ready();
1584
d35f447d
RZ
1585 return 0;
1586}
1587
1588static int fpm_nl_new(struct thread_master *tm)
1589{
1590 struct zebra_dplane_provider *prov = NULL;
d35f447d
RZ
1591 int rv;
1592
3bdd7fca 1593 gfnc = calloc(1, sizeof(*gfnc));
d35f447d
RZ
1594 rv = dplane_provider_register(prov_name, DPLANE_PRIO_POSTPROCESS,
1595 DPLANE_PROV_FLAG_THREADED, fpm_nl_start,
3bdd7fca 1596 fpm_nl_process, fpm_nl_finish, gfnc,
d35f447d
RZ
1597 &prov);
1598
1599 if (IS_ZEBRA_DEBUG_DPLANE)
1600 zlog_debug("%s register status: %d", prov_name, rv);
1601
612c2c15 1602 install_node(&fpm_node);
6cc059cd
RZ
1603 install_element(ENABLE_NODE, &fpm_show_counters_cmd);
1604 install_element(ENABLE_NODE, &fpm_show_counters_json_cmd);
1605 install_element(ENABLE_NODE, &fpm_reset_counters_cmd);
3bdd7fca
RZ
1606 install_element(CONFIG_NODE, &fpm_set_address_cmd);
1607 install_element(CONFIG_NODE, &no_fpm_set_address_cmd);
b55ab92a
RZ
1608 install_element(CONFIG_NODE, &fpm_use_nhg_cmd);
1609 install_element(CONFIG_NODE, &no_fpm_use_nhg_cmd);
3bdd7fca 1610
d35f447d
RZ
1611 return 0;
1612}
1613
1614static int fpm_nl_init(void)
1615{
1616 hook_register(frr_late_init, fpm_nl_new);
1617 return 0;
1618}
1619
1620FRR_MODULE_SETUP(
1621 .name = "dplane_fpm_nl",
1622 .version = "0.0.1",
1623 .description = "Data plane plugin for FPM using netlink.",
1624 .init = fpm_nl_init,
80413c20 1625);