]> git.proxmox.com Git - mirror_frr.git/blame - zebra/zebra_fpm.c
*: Rename thread.[ch] to event.[ch]
[mirror_frr.git] / zebra / zebra_fpm.c
CommitLineData
acddc0ed 1// SPDX-License-Identifier: GPL-2.0-or-later
5adc2528
AS
2/*
3 * Main implementation file for interface to Forwarding Plane Manager.
4 *
5 * Copyright (C) 2012 by Open Source Routing.
6 * Copyright (C) 2012 by Internet Systems Consortium, Inc. ("ISC")
5adc2528
AS
7 */
8
9#include <zebra.h>
10
11#include "log.h"
4f8ea50c 12#include "libfrr.h"
5adc2528 13#include "stream.h"
cb37cb33 14#include "event.h"
5adc2528
AS
15#include "network.h"
16#include "command.h"
09781197 17#include "lib/version.h"
e5218ec8 18#include "jhash.h"
5adc2528
AS
19
20#include "zebra/rib.h"
7c551956
DS
21#include "zebra/zserv.h"
22#include "zebra/zebra_ns.h"
23#include "zebra/zebra_vrf.h"
67aeb554 24#include "zebra/zebra_errors.h"
5adc2528
AS
25
26#include "fpm/fpm.h"
5adc2528 27#include "zebra_fpm_private.h"
e5218ec8 28#include "zebra/zebra_router.h"
a780a738 29#include "zebra_vxlan_private.h"
e5218ec8
AD
30
31DEFINE_MTYPE_STATIC(ZEBRA, FPM_MAC_INFO, "FPM_MAC_INFO");
5adc2528
AS
32
33/*
34 * Interval at which we attempt to connect to the FPM.
35 */
36#define ZFPM_CONNECT_RETRY_IVL 5
37
38/*
39 * Sizes of outgoing and incoming stream buffers for writing/reading
40 * FPM messages.
41 */
42#define ZFPM_OBUF_SIZE (2 * FPM_MAX_MSG_LEN)
43#define ZFPM_IBUF_SIZE (FPM_MAX_MSG_LEN)
44
45/*
46 * The maximum number of times the FPM socket write callback can call
47 * 'write' before it yields.
48 */
49#define ZFPM_MAX_WRITES_PER_RUN 10
50
51/*
52 * Interval over which we collect statistics.
53 */
54#define ZFPM_STATS_IVL_SECS 10
fbe748e5
AD
55#define FPM_MAX_MAC_MSG_LEN 512
56
1ac88792 57static void zfpm_iterate_rmac_table(struct hash_bucket *bucket, void *args);
5adc2528
AS
58
59/*
60 * Structure that holds state for iterating over all route_node
61 * structures that are candidates for being communicated to the FPM.
62 */
332cba05 63struct zfpm_rnodes_iter {
d62a17ae 64 rib_tables_iter_t tables_iter;
65 route_table_iter_t iter;
332cba05 66};
5adc2528
AS
67
68/*
69 * Statistics.
70 */
eeaf257b 71struct zfpm_stats {
d62a17ae 72 unsigned long connect_calls;
73 unsigned long connect_no_sock;
5adc2528 74
d62a17ae 75 unsigned long read_cb_calls;
5adc2528 76
d62a17ae 77 unsigned long write_cb_calls;
78 unsigned long write_calls;
79 unsigned long partial_writes;
80 unsigned long max_writes_hit;
81 unsigned long t_write_yields;
5adc2528 82
d62a17ae 83 unsigned long nop_deletes_skipped;
84 unsigned long route_adds;
85 unsigned long route_dels;
5adc2528 86
d62a17ae 87 unsigned long updates_triggered;
88 unsigned long redundant_triggers;
5adc2528 89
d62a17ae 90 unsigned long dests_del_after_update;
5adc2528 91
d62a17ae 92 unsigned long t_conn_down_starts;
93 unsigned long t_conn_down_dests_processed;
94 unsigned long t_conn_down_yields;
95 unsigned long t_conn_down_finishes;
5adc2528 96
d62a17ae 97 unsigned long t_conn_up_starts;
98 unsigned long t_conn_up_dests_processed;
99 unsigned long t_conn_up_yields;
100 unsigned long t_conn_up_aborts;
101 unsigned long t_conn_up_finishes;
eeaf257b 102};
5adc2528
AS
103
104/*
105 * States for the FPM state machine.
106 */
1d6a3ee8 107enum zfpm_state {
5adc2528 108
d62a17ae 109 /*
110 * In this state we are not yet ready to connect to the FPM. This
111 * can happen when this module is disabled, or if we're cleaning up
112 * after a connection has gone down.
113 */
114 ZFPM_STATE_IDLE,
115
116 /*
117 * Ready to talk to the FPM and periodically trying to connect to
118 * it.
119 */
120 ZFPM_STATE_ACTIVE,
121
122 /*
123 * In the middle of bringing up a TCP connection. Specifically,
124 * waiting for a connect() call to complete asynchronously.
125 */
126 ZFPM_STATE_CONNECTING,
127
128 /*
129 * TCP connection to the FPM is up.
130 */
131 ZFPM_STATE_ESTABLISHED
5adc2528 132
1d6a3ee8 133};
5adc2528 134
fb0aa886
AS
135/*
136 * Message format to be used to communicate with the FPM.
137 */
a78c2b98 138enum zfpm_msg_format {
d62a17ae 139 ZFPM_MSG_FORMAT_NONE,
140 ZFPM_MSG_FORMAT_NETLINK,
141 ZFPM_MSG_FORMAT_PROTOBUF,
a78c2b98
DS
142};
143
5adc2528
AS
144/*
145 * Globals.
146 */
768e40bd 147struct zfpm_glob {
d62a17ae 148
149 /*
150 * True if the FPM module has been enabled.
151 */
152 int enabled;
153
154 /*
155 * Message format to be used to communicate with the fpm.
156 */
a78c2b98 157 enum zfpm_msg_format message_format;
d62a17ae 158
159 struct thread_master *master;
160
1d6a3ee8 161 enum zfpm_state state;
d62a17ae 162
163 in_addr_t fpm_server;
164 /*
165 * Port on which the FPM is running.
166 */
167 int fpm_port;
168
169 /*
170 * List of rib_dest_t structures to be processed
171 */
172 TAILQ_HEAD(zfpm_dest_q, rib_dest_t_) dest_q;
173
e5218ec8
AD
174 /*
175 * List of fpm_mac_info structures to be processed
176 */
177 TAILQ_HEAD(zfpm_mac_q, fpm_mac_info_t) mac_q;
178
179 /*
180 * Hash table of fpm_mac_info_t entries
181 *
182 * While adding fpm_mac_info_t for a MAC to the mac_q,
183 * it is possible that another fpm_mac_info_t node for the this MAC
184 * is already present in the queue.
185 * This is possible in the case of consecutive add->delete operations.
186 * To avoid such duplicate insertions in the mac_q,
187 * define a hash table for fpm_mac_info_t which can be looked up
188 * to see if an fpm_mac_info_t node for a MAC is already present
189 * in the mac_q.
190 */
191 struct hash *fpm_mac_info_table;
192
d62a17ae 193 /*
194 * Stream socket to the FPM.
195 */
196 int sock;
197
198 /*
199 * Buffers for messages to/from the FPM.
200 */
201 struct stream *obuf;
202 struct stream *ibuf;
203
204 /*
205 * Threads for I/O.
206 */
207 struct thread *t_connect;
208 struct thread *t_write;
209 struct thread *t_read;
210
211 /*
212 * Thread to clean up after the TCP connection to the FPM goes down
213 * and the state that belongs to it.
214 */
215 struct thread *t_conn_down;
216
217 struct {
332cba05 218 struct zfpm_rnodes_iter iter;
d62a17ae 219 } t_conn_down_state;
220
221 /*
222 * Thread to take actions once the TCP conn to the FPM comes up, and
223 * the state that belongs to it.
224 */
225 struct thread *t_conn_up;
226
227 struct {
332cba05 228 struct zfpm_rnodes_iter iter;
d62a17ae 229 } t_conn_up_state;
230
231 unsigned long connect_calls;
232 time_t last_connect_call_time;
233
234 /*
235 * Stats from the start of the current statistics interval up to
236 * now. These are the counters we typically update in the code.
237 */
eeaf257b 238 struct zfpm_stats stats;
d62a17ae 239
240 /*
241 * Statistics that were gathered in the last collection interval.
242 */
eeaf257b 243 struct zfpm_stats last_ivl_stats;
d62a17ae 244
245 /*
246 * Cumulative stats from the last clear to the start of the current
247 * statistics interval.
248 */
eeaf257b 249 struct zfpm_stats cumulative_stats;
d62a17ae 250
251 /*
252 * Stats interval timer.
253 */
254 struct thread *t_stats;
255
256 /*
257 * If non-zero, the last time when statistics were cleared.
258 */
259 time_t last_stats_clear_time;
e840edca
KK
260
261 /*
262 * Flag to track the MAC dump status to FPM
263 */
264 bool fpm_mac_dump_done;
768e40bd 265};
5adc2528 266
768e40bd
DS
267static struct zfpm_glob zfpm_glob_space;
268static struct zfpm_glob *zfpm_g = &zfpm_glob_space;
5adc2528 269
d62a17ae 270static int zfpm_trigger_update(struct route_node *rn, const char *reason);
4f8ea50c 271
cc9f21da
DS
272static void zfpm_read_cb(struct thread *thread);
273static void zfpm_write_cb(struct thread *thread);
5adc2528 274
1d6a3ee8 275static void zfpm_set_state(enum zfpm_state state, const char *reason);
d62a17ae 276static void zfpm_start_connect_timer(const char *reason);
277static void zfpm_start_stats_timer(void);
a780a738 278static void zfpm_mac_info_del(struct fpm_mac_info_t *fpm_mac);
5adc2528 279
316d2d52
NK
280static const char ipv4_ll_buf[16] = "169.254.0.1";
281union g_addr ipv4ll_gateway;
282
5adc2528
AS
283/*
284 * zfpm_thread_should_yield
285 */
d62a17ae 286static inline int zfpm_thread_should_yield(struct thread *t)
5adc2528 287{
d62a17ae 288 return thread_should_yield(t);
5adc2528
AS
289}
290
291/*
292 * zfpm_state_to_str
293 */
1d6a3ee8 294static const char *zfpm_state_to_str(enum zfpm_state state)
5adc2528 295{
d62a17ae 296 switch (state) {
5adc2528 297
d62a17ae 298 case ZFPM_STATE_IDLE:
299 return "idle";
5adc2528 300
d62a17ae 301 case ZFPM_STATE_ACTIVE:
302 return "active";
5adc2528 303
d62a17ae 304 case ZFPM_STATE_CONNECTING:
305 return "connecting";
5adc2528 306
d62a17ae 307 case ZFPM_STATE_ESTABLISHED:
308 return "established";
5adc2528 309
d62a17ae 310 default:
311 return "unknown";
312 }
5adc2528
AS
313}
314
5adc2528
AS
315/*
316 * zfpm_get_elapsed_time
317 *
318 * Returns the time elapsed (in seconds) since the given time.
319 */
d62a17ae 320static time_t zfpm_get_elapsed_time(time_t reference)
5adc2528 321{
d62a17ae 322 time_t now;
5adc2528 323
d62a17ae 324 now = monotime(NULL);
5adc2528 325
d62a17ae 326 if (now < reference) {
327 assert(0);
328 return 0;
329 }
5adc2528 330
d62a17ae 331 return now - reference;
5adc2528
AS
332}
333
5adc2528
AS
334/*
335 * zfpm_rnodes_iter_init
336 */
332cba05 337static inline void zfpm_rnodes_iter_init(struct zfpm_rnodes_iter *iter)
5adc2528 338{
d62a17ae 339 memset(iter, 0, sizeof(*iter));
340 rib_tables_iter_init(&iter->tables_iter);
341
342 /*
343 * This is a hack, but it makes implementing 'next' easier by
344 * ensuring that route_table_iter_next() will return NULL the first
345 * time we call it.
346 */
347 route_table_iter_init(&iter->iter, NULL);
348 route_table_iter_cleanup(&iter->iter);
5adc2528
AS
349}
350
351/*
352 * zfpm_rnodes_iter_next
353 */
332cba05
DS
354static inline struct route_node *
355zfpm_rnodes_iter_next(struct zfpm_rnodes_iter *iter)
5adc2528 356{
d62a17ae 357 struct route_node *rn;
358 struct route_table *table;
5adc2528 359
d62a17ae 360 while (1) {
361 rn = route_table_iter_next(&iter->iter);
362 if (rn)
363 return rn;
5adc2528 364
d62a17ae 365 /*
366 * We've made our way through this table, go to the next one.
367 */
368 route_table_iter_cleanup(&iter->iter);
5adc2528 369
c6bbea17 370 table = rib_tables_iter_next(&iter->tables_iter);
5adc2528 371
d62a17ae 372 if (!table)
373 return NULL;
5adc2528 374
d62a17ae 375 route_table_iter_init(&iter->iter, table);
376 }
5adc2528 377
d62a17ae 378 return NULL;
5adc2528
AS
379}
380
381/*
382 * zfpm_rnodes_iter_pause
383 */
332cba05 384static inline void zfpm_rnodes_iter_pause(struct zfpm_rnodes_iter *iter)
5adc2528 385{
d62a17ae 386 route_table_iter_pause(&iter->iter);
5adc2528
AS
387}
388
389/*
390 * zfpm_rnodes_iter_cleanup
391 */
332cba05 392static inline void zfpm_rnodes_iter_cleanup(struct zfpm_rnodes_iter *iter)
5adc2528 393{
d62a17ae 394 route_table_iter_cleanup(&iter->iter);
395 rib_tables_iter_cleanup(&iter->tables_iter);
5adc2528
AS
396}
397
398/*
399 * zfpm_stats_init
400 *
401 * Initialize a statistics block.
402 */
eeaf257b 403static inline void zfpm_stats_init(struct zfpm_stats *stats)
5adc2528 404{
d62a17ae 405 memset(stats, 0, sizeof(*stats));
5adc2528
AS
406}
407
408/*
409 * zfpm_stats_reset
410 */
eeaf257b 411static inline void zfpm_stats_reset(struct zfpm_stats *stats)
5adc2528 412{
d62a17ae 413 zfpm_stats_init(stats);
5adc2528
AS
414}
415
416/*
417 * zfpm_stats_copy
418 */
eeaf257b
DS
419static inline void zfpm_stats_copy(const struct zfpm_stats *src,
420 struct zfpm_stats *dest)
5adc2528 421{
d62a17ae 422 memcpy(dest, src, sizeof(*dest));
5adc2528
AS
423}
424
425/*
426 * zfpm_stats_compose
427 *
428 * Total up the statistics in two stats structures ('s1 and 's2') and
429 * return the result in the third argument, 'result'. Note that the
430 * pointer 'result' may be the same as 's1' or 's2'.
431 *
432 * For simplicity, the implementation below assumes that the stats
433 * structure is composed entirely of counters. This can easily be
434 * changed when necessary.
435 */
eeaf257b
DS
436static void zfpm_stats_compose(const struct zfpm_stats *s1,
437 const struct zfpm_stats *s2,
438 struct zfpm_stats *result)
5adc2528 439{
d62a17ae 440 const unsigned long *p1, *p2;
441 unsigned long *result_p;
442 int i, num_counters;
5adc2528 443
d62a17ae 444 p1 = (const unsigned long *)s1;
445 p2 = (const unsigned long *)s2;
446 result_p = (unsigned long *)result;
5adc2528 447
eeaf257b 448 num_counters = (sizeof(struct zfpm_stats) / sizeof(unsigned long));
5adc2528 449
d62a17ae 450 for (i = 0; i < num_counters; i++) {
451 result_p[i] = p1[i] + p2[i];
452 }
5adc2528
AS
453}
454
455/*
456 * zfpm_read_on
457 */
d62a17ae 458static inline void zfpm_read_on(void)
5adc2528 459{
d62a17ae 460 assert(!zfpm_g->t_read);
461 assert(zfpm_g->sock >= 0);
5adc2528 462
d62a17ae 463 thread_add_read(zfpm_g->master, zfpm_read_cb, 0, zfpm_g->sock,
464 &zfpm_g->t_read);
5adc2528
AS
465}
466
467/*
468 * zfpm_write_on
469 */
d62a17ae 470static inline void zfpm_write_on(void)
5adc2528 471{
d62a17ae 472 assert(!zfpm_g->t_write);
473 assert(zfpm_g->sock >= 0);
5adc2528 474
d62a17ae 475 thread_add_write(zfpm_g->master, zfpm_write_cb, 0, zfpm_g->sock,
476 &zfpm_g->t_write);
5adc2528
AS
477}
478
479/*
480 * zfpm_read_off
481 */
d62a17ae 482static inline void zfpm_read_off(void)
5adc2528 483{
146bcb9b 484 THREAD_OFF(zfpm_g->t_read);
5adc2528
AS
485}
486
487/*
488 * zfpm_write_off
489 */
d62a17ae 490static inline void zfpm_write_off(void)
5adc2528 491{
146bcb9b 492 THREAD_OFF(zfpm_g->t_write);
5adc2528
AS
493}
494
f0c459f0
DS
495static inline void zfpm_connect_off(void)
496{
146bcb9b 497 THREAD_OFF(zfpm_g->t_connect);
f0c459f0
DS
498}
499
5adc2528
AS
500/*
501 * zfpm_conn_up_thread_cb
502 *
503 * Callback for actions to be taken when the connection to the FPM
504 * comes up.
505 */
cc9f21da 506static void zfpm_conn_up_thread_cb(struct thread *thread)
5adc2528 507{
d62a17ae 508 struct route_node *rnode;
332cba05 509 struct zfpm_rnodes_iter *iter;
d62a17ae 510 rib_dest_t *dest;
5adc2528 511
d62a17ae 512 iter = &zfpm_g->t_conn_up_state.iter;
5adc2528 513
d62a17ae 514 if (zfpm_g->state != ZFPM_STATE_ESTABLISHED) {
515 zfpm_debug(
516 "Connection not up anymore, conn_up thread aborting");
517 zfpm_g->stats.t_conn_up_aborts++;
518 goto done;
519 }
5adc2528 520
e840edca
KK
521 if (!zfpm_g->fpm_mac_dump_done) {
522 /* Enqueue FPM updates for all the RMAC entries */
523 hash_iterate(zrouter.l3vni_table, zfpm_iterate_rmac_table,
524 NULL);
525 /* mark dump done so that its not repeated after yield */
526 zfpm_g->fpm_mac_dump_done = true;
527 }
fbe748e5 528
d62a17ae 529 while ((rnode = zfpm_rnodes_iter_next(iter))) {
530 dest = rib_dest_from_rnode(rnode);
531
532 if (dest) {
533 zfpm_g->stats.t_conn_up_dests_processed++;
534 zfpm_trigger_update(rnode, NULL);
535 }
536
537 /*
538 * Yield if need be.
539 */
540 if (!zfpm_thread_should_yield(thread))
541 continue;
542
543 zfpm_g->stats.t_conn_up_yields++;
544 zfpm_rnodes_iter_pause(iter);
d62a17ae 545 thread_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb,
546 NULL, 0, &zfpm_g->t_conn_up);
cc9f21da 547 return;
5adc2528
AS
548 }
549
d62a17ae 550 zfpm_g->stats.t_conn_up_finishes++;
551
552done:
553 zfpm_rnodes_iter_cleanup(iter);
5adc2528
AS
554}
555
556/*
557 * zfpm_connection_up
558 *
559 * Called when the connection to the FPM comes up.
560 */
d62a17ae 561static void zfpm_connection_up(const char *detail)
5adc2528 562{
d62a17ae 563 assert(zfpm_g->sock >= 0);
564 zfpm_read_on();
565 zfpm_write_on();
566 zfpm_set_state(ZFPM_STATE_ESTABLISHED, detail);
567
568 /*
569 * Start thread to push existing routes to the FPM.
570 */
146bcb9b 571 THREAD_OFF(zfpm_g->t_conn_up);
d62a17ae 572
573 zfpm_rnodes_iter_init(&zfpm_g->t_conn_up_state.iter);
e840edca 574 zfpm_g->fpm_mac_dump_done = false;
d62a17ae 575
576 zfpm_debug("Starting conn_up thread");
ef1dbba8 577
d62a17ae 578 thread_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb, NULL, 0,
579 &zfpm_g->t_conn_up);
580 zfpm_g->stats.t_conn_up_starts++;
5adc2528
AS
581}
582
583/*
584 * zfpm_connect_check
585 *
586 * Check if an asynchronous connect() to the FPM is complete.
587 */
d62a17ae 588static void zfpm_connect_check(void)
5adc2528 589{
d62a17ae 590 int status;
591 socklen_t slen;
592 int ret;
593
594 zfpm_read_off();
595 zfpm_write_off();
596
597 slen = sizeof(status);
598 ret = getsockopt(zfpm_g->sock, SOL_SOCKET, SO_ERROR, (void *)&status,
599 &slen);
600
601 if (ret >= 0 && status == 0) {
602 zfpm_connection_up("async connect complete");
603 return;
604 }
605
606 /*
607 * getsockopt() failed or indicated an error on the socket.
608 */
609 close(zfpm_g->sock);
610 zfpm_g->sock = -1;
611
612 zfpm_start_connect_timer("getsockopt() after async connect failed");
613 return;
5adc2528
AS
614}
615
616/*
617 * zfpm_conn_down_thread_cb
618 *
619 * Callback that is invoked to clean up state after the TCP connection
620 * to the FPM goes down.
621 */
cc9f21da 622static void zfpm_conn_down_thread_cb(struct thread *thread)
5adc2528 623{
d62a17ae 624 struct route_node *rnode;
332cba05 625 struct zfpm_rnodes_iter *iter;
d62a17ae 626 rib_dest_t *dest;
a780a738 627 struct fpm_mac_info_t *mac = NULL;
5adc2528 628
d62a17ae 629 assert(zfpm_g->state == ZFPM_STATE_IDLE);
5adc2528 630
a780a738
AD
631 /*
632 * Delink and free all fpm_mac_info_t nodes
633 * in the mac_q and fpm_mac_info_hash
634 */
635 while ((mac = TAILQ_FIRST(&zfpm_g->mac_q)) != NULL)
636 zfpm_mac_info_del(mac);
637
d62a17ae 638 zfpm_g->t_conn_down = NULL;
5adc2528 639
d62a17ae 640 iter = &zfpm_g->t_conn_down_state.iter;
5adc2528 641
d62a17ae 642 while ((rnode = zfpm_rnodes_iter_next(iter))) {
643 dest = rib_dest_from_rnode(rnode);
5adc2528 644
d62a17ae 645 if (dest) {
646 if (CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM)) {
647 TAILQ_REMOVE(&zfpm_g->dest_q, dest,
648 fpm_q_entries);
649 }
650
651 UNSET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
652 UNSET_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM);
5adc2528 653
d62a17ae 654 zfpm_g->stats.t_conn_down_dests_processed++;
5adc2528 655
d62a17ae 656 /*
657 * Check if the dest should be deleted.
658 */
659 rib_gc_dest(rnode);
660 }
5adc2528 661
d62a17ae 662 /*
663 * Yield if need be.
664 */
665 if (!zfpm_thread_should_yield(thread))
666 continue;
667
668 zfpm_g->stats.t_conn_down_yields++;
669 zfpm_rnodes_iter_pause(iter);
670 zfpm_g->t_conn_down = NULL;
671 thread_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb,
672 NULL, 0, &zfpm_g->t_conn_down);
cc9f21da 673 return;
5adc2528
AS
674 }
675
d62a17ae 676 zfpm_g->stats.t_conn_down_finishes++;
677 zfpm_rnodes_iter_cleanup(iter);
678
679 /*
680 * Start the process of connecting to the FPM again.
681 */
682 zfpm_start_connect_timer("cleanup complete");
5adc2528
AS
683}
684
685/*
686 * zfpm_connection_down
687 *
688 * Called when the connection to the FPM has gone down.
689 */
d62a17ae 690static void zfpm_connection_down(const char *detail)
5adc2528 691{
d62a17ae 692 if (!detail)
693 detail = "unknown";
5adc2528 694
d62a17ae 695 assert(zfpm_g->state == ZFPM_STATE_ESTABLISHED);
5adc2528 696
d62a17ae 697 zlog_info("connection to the FPM has gone down: %s", detail);
5adc2528 698
d62a17ae 699 zfpm_read_off();
700 zfpm_write_off();
5adc2528 701
d62a17ae 702 stream_reset(zfpm_g->ibuf);
703 stream_reset(zfpm_g->obuf);
5adc2528 704
d62a17ae 705 if (zfpm_g->sock >= 0) {
706 close(zfpm_g->sock);
707 zfpm_g->sock = -1;
708 }
5adc2528 709
d62a17ae 710 /*
711 * Start thread to clean up state after the connection goes down.
712 */
713 assert(!zfpm_g->t_conn_down);
d62a17ae 714 zfpm_rnodes_iter_init(&zfpm_g->t_conn_down_state.iter);
715 zfpm_g->t_conn_down = NULL;
716 thread_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb, NULL, 0,
717 &zfpm_g->t_conn_down);
718 zfpm_g->stats.t_conn_down_starts++;
719
720 zfpm_set_state(ZFPM_STATE_IDLE, detail);
5adc2528
AS
721}
722
723/*
724 * zfpm_read_cb
725 */
cc9f21da 726static void zfpm_read_cb(struct thread *thread)
5adc2528 727{
d62a17ae 728 size_t already;
729 struct stream *ibuf;
730 uint16_t msg_len;
731 fpm_msg_hdr_t *hdr;
732
733 zfpm_g->stats.read_cb_calls++;
d62a17ae 734
735 /*
736 * Check if async connect is now done.
737 */
738 if (zfpm_g->state == ZFPM_STATE_CONNECTING) {
739 zfpm_connect_check();
cc9f21da 740 return;
5adc2528
AS
741 }
742
d62a17ae 743 assert(zfpm_g->state == ZFPM_STATE_ESTABLISHED);
744 assert(zfpm_g->sock >= 0);
5adc2528 745
d62a17ae 746 ibuf = zfpm_g->ibuf;
5adc2528 747
d62a17ae 748 already = stream_get_endp(ibuf);
749 if (already < FPM_MSG_HDR_LEN) {
750 ssize_t nbyte;
5adc2528 751
d62a17ae 752 nbyte = stream_read_try(ibuf, zfpm_g->sock,
753 FPM_MSG_HDR_LEN - already);
754 if (nbyte == 0 || nbyte == -1) {
677f704d
DS
755 if (nbyte == -1) {
756 char buffer[1024];
757
772270f3
QY
758 snprintf(buffer, sizeof(buffer),
759 "closed socket in read(%d): %s", errno,
760 safe_strerror(errno));
677f704d 761 zfpm_connection_down(buffer);
996c9314 762 } else
677f704d 763 zfpm_connection_down("closed socket in read");
cc9f21da 764 return;
d62a17ae 765 }
5adc2528 766
d62a17ae 767 if (nbyte != (ssize_t)(FPM_MSG_HDR_LEN - already))
768 goto done;
5adc2528 769
d62a17ae 770 already = FPM_MSG_HDR_LEN;
771 }
5adc2528 772
d62a17ae 773 stream_set_getp(ibuf, 0);
5adc2528 774
d62a17ae 775 hdr = (fpm_msg_hdr_t *)stream_pnt(ibuf);
5adc2528 776
d62a17ae 777 if (!fpm_msg_hdr_ok(hdr)) {
778 zfpm_connection_down("invalid message header");
cc9f21da 779 return;
5adc2528
AS
780 }
781
d62a17ae 782 msg_len = fpm_msg_len(hdr);
5adc2528 783
d62a17ae 784 /*
785 * Read out the rest of the packet.
786 */
787 if (already < msg_len) {
788 ssize_t nbyte;
5adc2528 789
d62a17ae 790 nbyte = stream_read_try(ibuf, zfpm_g->sock, msg_len - already);
5adc2528 791
d62a17ae 792 if (nbyte == 0 || nbyte == -1) {
677f704d
DS
793 if (nbyte == -1) {
794 char buffer[1024];
795
772270f3
QY
796 snprintf(buffer, sizeof(buffer),
797 "failed to read message(%d) %s", errno,
798 safe_strerror(errno));
677f704d 799 zfpm_connection_down(buffer);
996c9314 800 } else
677f704d 801 zfpm_connection_down("failed to read message");
cc9f21da 802 return;
d62a17ae 803 }
804
805 if (nbyte != (ssize_t)(msg_len - already))
806 goto done;
807 }
808
d62a17ae 809 /*
810 * Just throw it away for now.
811 */
812 stream_reset(ibuf);
813
814done:
815 zfpm_read_on();
5adc2528
AS
816}
817
21d814eb
AD
818static bool zfpm_updates_pending(void)
819{
820 if (!(TAILQ_EMPTY(&zfpm_g->dest_q)) || !(TAILQ_EMPTY(&zfpm_g->mac_q)))
821 return true;
822
823 return false;
824}
825
5adc2528
AS
826/*
827 * zfpm_writes_pending
828 *
2951a7a4 829 * Returns true if we may have something to write to the FPM.
5adc2528 830 */
d62a17ae 831static int zfpm_writes_pending(void)
5adc2528
AS
832{
833
d62a17ae 834 /*
835 * Check if there is any data in the outbound buffer that has not
836 * been written to the socket yet.
837 */
838 if (stream_get_endp(zfpm_g->obuf) - stream_get_getp(zfpm_g->obuf))
839 return 1;
5adc2528 840
d62a17ae 841 /*
21d814eb 842 * Check if there are any updates scheduled on the outbound queues.
d62a17ae 843 */
21d814eb 844 if (zfpm_updates_pending())
d62a17ae 845 return 1;
5adc2528 846
d62a17ae 847 return 0;
5adc2528
AS
848}
849
850/*
851 * zfpm_encode_route
852 *
853 * Encode a message to the FPM with information about the given route.
854 *
855 * Returns the number of bytes written to the buffer. 0 or a negative
856 * value indicates an error.
857 */
d62a17ae 858static inline int zfpm_encode_route(rib_dest_t *dest, struct route_entry *re,
859 char *in_buf, size_t in_buf_len,
860 fpm_msg_type_e *msg_type)
5adc2528 861{
d62a17ae 862 size_t len;
9bf75362 863#ifdef HAVE_NETLINK
d62a17ae 864 int cmd;
9bf75362 865#endif
d62a17ae 866 len = 0;
5adc2528 867
d62a17ae 868 *msg_type = FPM_MSG_TYPE_NONE;
5adc2528 869
d62a17ae 870 switch (zfpm_g->message_format) {
5adc2528 871
d62a17ae 872 case ZFPM_MSG_FORMAT_PROTOBUF:
fb0aa886 873#ifdef HAVE_PROTOBUF
d62a17ae 874 len = zfpm_protobuf_encode_route(dest, re, (uint8_t *)in_buf,
875 in_buf_len);
876 *msg_type = FPM_MSG_TYPE_PROTOBUF;
fb0aa886 877#endif
d62a17ae 878 break;
5adc2528 879
d62a17ae 880 case ZFPM_MSG_FORMAT_NETLINK:
fb0aa886 881#ifdef HAVE_NETLINK
d62a17ae 882 *msg_type = FPM_MSG_TYPE_NETLINK;
883 cmd = re ? RTM_NEWROUTE : RTM_DELROUTE;
884 len = zfpm_netlink_encode_route(cmd, dest, re, in_buf,
885 in_buf_len);
886 assert(fpm_msg_align(len) == len);
5adc2528 887#endif /* HAVE_NETLINK */
d62a17ae 888 break;
fb0aa886 889
a98701f0 890 case ZFPM_MSG_FORMAT_NONE:
d62a17ae 891 break;
892 }
fb0aa886 893
d62a17ae 894 return len;
5adc2528
AS
895}
896
897/*
898 * zfpm_route_for_update
899 *
f0f77c9a 900 * Returns the re that is to be sent to the FPM for a given dest.
5adc2528 901 */
d62a17ae 902struct route_entry *zfpm_route_for_update(rib_dest_t *dest)
5adc2528 903{
5f7a4718 904 return dest->selected_fib;
5adc2528
AS
905}
906
907/*
21d814eb 908 * Define an enum for return codes for queue processing functions
5adc2528 909 *
21d814eb
AD
910 * FPM_WRITE_STOP: This return code indicates that the write buffer is full.
911 * Stop processing all the queues and empty the buffer by writing its content
912 * to the socket.
913 *
914 * FPM_GOTO_NEXT_Q: This return code indicates that either this queue is
915 * empty or we have processed enough updates from this queue.
916 * So, move on to the next queue.
5adc2528 917 */
21d814eb
AD
918enum {
919 FPM_WRITE_STOP = 0,
920 FPM_GOTO_NEXT_Q = 1
921};
922
923#define FPM_QUEUE_PROCESS_LIMIT 10000
924
925/*
926 * zfpm_build_route_updates
927 *
928 * Process the dest_q queue and write FPM messages to the outbound buffer.
929 */
930static int zfpm_build_route_updates(void)
5adc2528 931{
d62a17ae 932 struct stream *s;
933 rib_dest_t *dest;
934 unsigned char *buf, *data, *buf_end;
935 size_t msg_len;
936 size_t data_len;
937 fpm_msg_hdr_t *hdr;
938 struct route_entry *re;
939 int is_add, write_msg;
940 fpm_msg_type_e msg_type;
21d814eb 941 uint16_t q_limit;
d62a17ae 942
21d814eb
AD
943 if (TAILQ_EMPTY(&zfpm_g->dest_q))
944 return FPM_GOTO_NEXT_Q;
d62a17ae 945
21d814eb
AD
946 s = zfpm_g->obuf;
947 q_limit = FPM_QUEUE_PROCESS_LIMIT;
d62a17ae 948
21d814eb 949 do {
d62a17ae 950 /*
951 * Make sure there is enough space to write another message.
952 */
953 if (STREAM_WRITEABLE(s) < FPM_MAX_MSG_LEN)
21d814eb 954 return FPM_WRITE_STOP;
d62a17ae 955
956 buf = STREAM_DATA(s) + stream_get_endp(s);
957 buf_end = buf + STREAM_WRITEABLE(s);
958
959 dest = TAILQ_FIRST(&zfpm_g->dest_q);
960 if (!dest)
21d814eb 961 return FPM_GOTO_NEXT_Q;
d62a17ae 962
963 assert(CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM));
964
965 hdr = (fpm_msg_hdr_t *)buf;
966 hdr->version = FPM_PROTO_VERSION;
967
968 data = fpm_msg_data(hdr);
969
970 re = zfpm_route_for_update(dest);
971 is_add = re ? 1 : 0;
972
973 write_msg = 1;
974
975 /*
976 * If this is a route deletion, and we have not sent the route
977 * to
978 * the FPM previously, skip it.
979 */
980 if (!is_add && !CHECK_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM)) {
981 write_msg = 0;
982 zfpm_g->stats.nop_deletes_skipped++;
983 }
984
985 if (write_msg) {
986 data_len = zfpm_encode_route(dest, re, (char *)data,
987 buf_end - data, &msg_type);
988
d62a17ae 989 if (data_len) {
990 hdr->msg_type = msg_type;
991 msg_len = fpm_data_len_to_msg_len(data_len);
992 hdr->msg_len = htons(msg_len);
993 stream_forward_endp(s, msg_len);
994
995 if (is_add)
996 zfpm_g->stats.route_adds++;
997 else
998 zfpm_g->stats.route_dels++;
5306e6cf 999 } else {
1000 zlog_err("%s: Encoding Prefix: %pRN No valid nexthops",
1001 __func__, dest->rnode);
d62a17ae 1002 }
1003 }
1004
1005 /*
1006 * Remove the dest from the queue, and reset the flag.
1007 */
1008 UNSET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
1009 TAILQ_REMOVE(&zfpm_g->dest_q, dest, fpm_q_entries);
1010
1011 if (is_add) {
1012 SET_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM);
1013 } else {
1014 UNSET_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM);
1015 }
1016
1017 /*
1018 * Delete the destination if necessary.
1019 */
1020 if (rib_gc_dest(dest->rnode))
1021 zfpm_g->stats.dests_del_after_update++;
1022
21d814eb
AD
1023 q_limit--;
1024 if (q_limit == 0) {
1025 /*
1026 * We have processed enough updates in this queue.
1027 * Now yield for other queues.
1028 */
1029 return FPM_GOTO_NEXT_Q;
1030 }
c5431822 1031 } while (true);
21d814eb
AD
1032}
1033
1034/*
1035 * zfpm_encode_mac
1036 *
1037 * Encode a message to FPM with information about the given MAC.
1038 *
1039 * Returns the number of bytes written to the buffer.
1040 */
1041static inline int zfpm_encode_mac(struct fpm_mac_info_t *mac, char *in_buf,
1042 size_t in_buf_len, fpm_msg_type_e *msg_type)
1043{
1044 size_t len = 0;
1045
1046 *msg_type = FPM_MSG_TYPE_NONE;
1047
1048 switch (zfpm_g->message_format) {
1049
1050 case ZFPM_MSG_FORMAT_NONE:
1051 break;
1052 case ZFPM_MSG_FORMAT_NETLINK:
9da60d0a
AD
1053#ifdef HAVE_NETLINK
1054 len = zfpm_netlink_encode_mac(mac, in_buf, in_buf_len);
1055 assert(fpm_msg_align(len) == len);
1056 *msg_type = FPM_MSG_TYPE_NETLINK;
1057#endif /* HAVE_NETLINK */
21d814eb
AD
1058 break;
1059 case ZFPM_MSG_FORMAT_PROTOBUF:
1060 break;
1061 }
1062 return len;
1063}
1064
1065static int zfpm_build_mac_updates(void)
1066{
1067 struct stream *s;
1068 struct fpm_mac_info_t *mac;
1069 unsigned char *buf, *data, *buf_end;
1070 fpm_msg_hdr_t *hdr;
1071 size_t data_len, msg_len;
1072 fpm_msg_type_e msg_type;
1073 uint16_t q_limit;
1074
1075 if (TAILQ_EMPTY(&zfpm_g->mac_q))
1076 return FPM_GOTO_NEXT_Q;
1077
1078 s = zfpm_g->obuf;
1079 q_limit = FPM_QUEUE_PROCESS_LIMIT;
1080
1081 do {
1082 /* Make sure there is enough space to write another message. */
1083 if (STREAM_WRITEABLE(s) < FPM_MAX_MAC_MSG_LEN)
1084 return FPM_WRITE_STOP;
1085
1086 buf = STREAM_DATA(s) + stream_get_endp(s);
1087 buf_end = buf + STREAM_WRITEABLE(s);
1088
1089 mac = TAILQ_FIRST(&zfpm_g->mac_q);
1090 if (!mac)
1091 return FPM_GOTO_NEXT_Q;
1092
1093 /* Check for no-op */
1094 if (!CHECK_FLAG(mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM)) {
1095 zfpm_g->stats.nop_deletes_skipped++;
1096 zfpm_mac_info_del(mac);
1097 continue;
1098 }
1099
1100 hdr = (fpm_msg_hdr_t *)buf;
1101 hdr->version = FPM_PROTO_VERSION;
1102
1103 data = fpm_msg_data(hdr);
1104 data_len = zfpm_encode_mac(mac, (char *)data, buf_end - data,
1105 &msg_type);
9da60d0a 1106 assert(data_len);
21d814eb
AD
1107
1108 hdr->msg_type = msg_type;
1109 msg_len = fpm_data_len_to_msg_len(data_len);
1110 hdr->msg_len = htons(msg_len);
1111 stream_forward_endp(s, msg_len);
1112
1113 /* Remove the MAC from the queue, and delete it. */
1114 zfpm_mac_info_del(mac);
1115
1116 q_limit--;
1117 if (q_limit == 0) {
1118 /*
1119 * We have processed enough updates in this queue.
1120 * Now yield for other queues.
1121 */
1122 return FPM_GOTO_NEXT_Q;
1123 }
d62a17ae 1124 } while (1);
5adc2528
AS
1125}
1126
21d814eb
AD
1127/*
1128 * zfpm_build_updates
1129 *
1130 * Process the outgoing queues and write messages to the outbound
1131 * buffer.
1132 */
1133static void zfpm_build_updates(void)
1134{
1135 struct stream *s;
1136
1137 s = zfpm_g->obuf;
1138 assert(stream_empty(s));
1139
1140 do {
1141 /*
1142 * Stop processing the queues if zfpm_g->obuf is full
1143 * or we do not have more updates to process
1144 */
1145 if (zfpm_build_mac_updates() == FPM_WRITE_STOP)
1146 break;
1147 if (zfpm_build_route_updates() == FPM_WRITE_STOP)
1148 break;
1149 } while (zfpm_updates_pending());
1150}
1151
5adc2528
AS
1152/*
1153 * zfpm_write_cb
1154 */
cc9f21da 1155static void zfpm_write_cb(struct thread *thread)
5adc2528 1156{
d62a17ae 1157 struct stream *s;
1158 int num_writes;
1159
1160 zfpm_g->stats.write_cb_calls++;
d62a17ae 1161
1162 /*
1163 * Check if async connect is now done.
1164 */
1165 if (zfpm_g->state == ZFPM_STATE_CONNECTING) {
1166 zfpm_connect_check();
cc9f21da 1167 return;
d62a17ae 1168 }
5adc2528 1169
d62a17ae 1170 assert(zfpm_g->state == ZFPM_STATE_ESTABLISHED);
1171 assert(zfpm_g->sock >= 0);
5adc2528 1172
d62a17ae 1173 num_writes = 0;
5adc2528 1174
d62a17ae 1175 do {
1176 int bytes_to_write, bytes_written;
5adc2528 1177
d62a17ae 1178 s = zfpm_g->obuf;
5adc2528 1179
d62a17ae 1180 /*
1181 * If the stream is empty, try fill it up with data.
1182 */
1183 if (stream_empty(s)) {
1184 zfpm_build_updates();
1185 }
5adc2528 1186
d62a17ae 1187 bytes_to_write = stream_get_endp(s) - stream_get_getp(s);
1188 if (!bytes_to_write)
1189 break;
5adc2528 1190
d62a17ae 1191 bytes_written =
2d34fb80 1192 write(zfpm_g->sock, stream_pnt(s), bytes_to_write);
d62a17ae 1193 zfpm_g->stats.write_calls++;
1194 num_writes++;
5adc2528 1195
d62a17ae 1196 if (bytes_written < 0) {
1197 if (ERRNO_IO_RETRY(errno))
1198 break;
5adc2528 1199
d62a17ae 1200 zfpm_connection_down("failed to write to socket");
cc9f21da 1201 return;
d62a17ae 1202 }
5adc2528 1203
d62a17ae 1204 if (bytes_written != bytes_to_write) {
5adc2528 1205
d62a17ae 1206 /*
1207 * Partial write.
1208 */
1209 stream_forward_getp(s, bytes_written);
1210 zfpm_g->stats.partial_writes++;
1211 break;
1212 }
5adc2528 1213
d62a17ae 1214 /*
1215 * We've written out the entire contents of the stream.
1216 */
1217 stream_reset(s);
5adc2528 1218
d62a17ae 1219 if (num_writes >= ZFPM_MAX_WRITES_PER_RUN) {
1220 zfpm_g->stats.max_writes_hit++;
1221 break;
1222 }
5adc2528 1223
d62a17ae 1224 if (zfpm_thread_should_yield(thread)) {
1225 zfpm_g->stats.t_write_yields++;
1226 break;
1227 }
1228 } while (1);
5adc2528 1229
d62a17ae 1230 if (zfpm_writes_pending())
1231 zfpm_write_on();
5adc2528
AS
1232}
1233
1234/*
1235 * zfpm_connect_cb
1236 */
cc9f21da 1237static void zfpm_connect_cb(struct thread *t)
5adc2528 1238{
d62a17ae 1239 int sock, ret;
1240 struct sockaddr_in serv;
1241
d62a17ae 1242 assert(zfpm_g->state == ZFPM_STATE_ACTIVE);
1243
1244 sock = socket(AF_INET, SOCK_STREAM, 0);
1245 if (sock < 0) {
14a4d9d0 1246 zlog_err("Failed to create socket for connect(): %s",
d62a17ae 1247 strerror(errno));
1248 zfpm_g->stats.connect_no_sock++;
cc9f21da 1249 return;
d62a17ae 1250 }
1251
1252 set_nonblocking(sock);
1253
1254 /* Make server socket. */
1255 memset(&serv, 0, sizeof(serv));
1256 serv.sin_family = AF_INET;
1257 serv.sin_port = htons(zfpm_g->fpm_port);
5adc2528 1258#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
d62a17ae 1259 serv.sin_len = sizeof(struct sockaddr_in);
5adc2528 1260#endif /* HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */
d62a17ae 1261 if (!zfpm_g->fpm_server)
1262 serv.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1263 else
1264 serv.sin_addr.s_addr = (zfpm_g->fpm_server);
1265
1266 /*
1267 * Connect to the FPM.
1268 */
1269 zfpm_g->connect_calls++;
1270 zfpm_g->stats.connect_calls++;
1271 zfpm_g->last_connect_call_time = monotime(NULL);
1272
1273 ret = connect(sock, (struct sockaddr *)&serv, sizeof(serv));
1274 if (ret >= 0) {
1275 zfpm_g->sock = sock;
1276 zfpm_connection_up("connect succeeded");
cc9f21da 1277 return;
d62a17ae 1278 }
1279
1280 if (errno == EINPROGRESS) {
1281 zfpm_g->sock = sock;
1282 zfpm_read_on();
1283 zfpm_write_on();
1284 zfpm_set_state(ZFPM_STATE_CONNECTING,
1285 "async connect in progress");
cc9f21da 1286 return;
d62a17ae 1287 }
1288
1289 zlog_info("can't connect to FPM %d: %s", sock, safe_strerror(errno));
1290 close(sock);
1291
1292 /*
1293 * Restart timer for retrying connection.
1294 */
1295 zfpm_start_connect_timer("connect() failed");
5adc2528
AS
1296}
1297
1298/*
1299 * zfpm_set_state
1300 *
1301 * Move state machine into the given state.
1302 */
1d6a3ee8 1303static void zfpm_set_state(enum zfpm_state state, const char *reason)
5adc2528 1304{
1d6a3ee8 1305 enum zfpm_state cur_state = zfpm_g->state;
d62a17ae 1306
1307 if (!reason)
1308 reason = "Unknown";
1309
1310 if (state == cur_state)
1311 return;
1312
1313 zfpm_debug("beginning state transition %s -> %s. Reason: %s",
1314 zfpm_state_to_str(cur_state), zfpm_state_to_str(state),
1315 reason);
1316
1317 switch (state) {
1318
1319 case ZFPM_STATE_IDLE:
1320 assert(cur_state == ZFPM_STATE_ESTABLISHED);
1321 break;
1322
1323 case ZFPM_STATE_ACTIVE:
1324 assert(cur_state == ZFPM_STATE_IDLE
1325 || cur_state == ZFPM_STATE_CONNECTING);
1326 assert(zfpm_g->t_connect);
1327 break;
1328
1329 case ZFPM_STATE_CONNECTING:
1330 assert(zfpm_g->sock);
1331 assert(cur_state == ZFPM_STATE_ACTIVE);
1332 assert(zfpm_g->t_read);
1333 assert(zfpm_g->t_write);
1334 break;
1335
1336 case ZFPM_STATE_ESTABLISHED:
1337 assert(cur_state == ZFPM_STATE_ACTIVE
1338 || cur_state == ZFPM_STATE_CONNECTING);
1339 assert(zfpm_g->sock);
1340 assert(zfpm_g->t_read);
1341 assert(zfpm_g->t_write);
1342 break;
1343 }
1344
1345 zfpm_g->state = state;
5adc2528
AS
1346}
1347
1348/*
1349 * zfpm_calc_connect_delay
1350 *
1351 * Returns the number of seconds after which we should attempt to
1352 * reconnect to the FPM.
1353 */
d62a17ae 1354static long zfpm_calc_connect_delay(void)
5adc2528 1355{
d62a17ae 1356 time_t elapsed;
5adc2528 1357
d62a17ae 1358 /*
1359 * Return 0 if this is our first attempt to connect.
1360 */
1361 if (zfpm_g->connect_calls == 0) {
1362 return 0;
1363 }
5adc2528 1364
d62a17ae 1365 elapsed = zfpm_get_elapsed_time(zfpm_g->last_connect_call_time);
5adc2528 1366
d62a17ae 1367 if (elapsed > ZFPM_CONNECT_RETRY_IVL) {
1368 return 0;
1369 }
5adc2528 1370
d62a17ae 1371 return ZFPM_CONNECT_RETRY_IVL - elapsed;
5adc2528
AS
1372}
1373
1374/*
1375 * zfpm_start_connect_timer
1376 */
d62a17ae 1377static void zfpm_start_connect_timer(const char *reason)
5adc2528 1378{
d62a17ae 1379 long delay_secs;
5adc2528 1380
d62a17ae 1381 assert(!zfpm_g->t_connect);
1382 assert(zfpm_g->sock < 0);
5adc2528 1383
d62a17ae 1384 assert(zfpm_g->state == ZFPM_STATE_IDLE
1385 || zfpm_g->state == ZFPM_STATE_ACTIVE
1386 || zfpm_g->state == ZFPM_STATE_CONNECTING);
5adc2528 1387
d62a17ae 1388 delay_secs = zfpm_calc_connect_delay();
1389 zfpm_debug("scheduling connect in %ld seconds", delay_secs);
5adc2528 1390
d62a17ae 1391 thread_add_timer(zfpm_g->master, zfpm_connect_cb, 0, delay_secs,
1392 &zfpm_g->t_connect);
1393 zfpm_set_state(ZFPM_STATE_ACTIVE, reason);
5adc2528
AS
1394}
1395
1396/*
1397 * zfpm_is_enabled
1398 *
2951a7a4 1399 * Returns true if the zebra FPM module has been enabled.
5adc2528 1400 */
d62a17ae 1401static inline int zfpm_is_enabled(void)
5adc2528 1402{
d62a17ae 1403 return zfpm_g->enabled;
5adc2528
AS
1404}
1405
1406/*
1407 * zfpm_conn_is_up
1408 *
2951a7a4 1409 * Returns true if the connection to the FPM is up.
5adc2528 1410 */
d62a17ae 1411static inline int zfpm_conn_is_up(void)
5adc2528 1412{
d62a17ae 1413 if (zfpm_g->state != ZFPM_STATE_ESTABLISHED)
1414 return 0;
5adc2528 1415
d62a17ae 1416 assert(zfpm_g->sock >= 0);
5adc2528 1417
d62a17ae 1418 return 1;
5adc2528
AS
1419}
1420
1421/*
1422 * zfpm_trigger_update
1423 *
1424 * The zebra code invokes this function to indicate that we should
1425 * send an update to the FPM about the given route_node.
1426 */
d62a17ae 1427static int zfpm_trigger_update(struct route_node *rn, const char *reason)
5adc2528 1428{
d62a17ae 1429 rib_dest_t *dest;
d62a17ae 1430
1431 /*
1432 * Ignore if the connection is down. We will update the FPM about
1433 * all destinations once the connection comes up.
1434 */
1435 if (!zfpm_conn_is_up())
1436 return 0;
1437
1438 dest = rib_dest_from_rnode(rn);
1439
d62a17ae 1440 if (CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM)) {
1441 zfpm_g->stats.redundant_triggers++;
1442 return 0;
1443 }
1444
1445 if (reason) {
2dbe669b
DA
1446 zfpm_debug("%pFX triggering update to FPM - Reason: %s", &rn->p,
1447 reason);
d62a17ae 1448 }
1449
1450 SET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
1451 TAILQ_INSERT_TAIL(&zfpm_g->dest_q, dest, fpm_q_entries);
1452 zfpm_g->stats.updates_triggered++;
1453
1454 /*
1455 * Make sure that writes are enabled.
1456 */
1457 if (zfpm_g->t_write)
1458 return 0;
1459
1460 zfpm_write_on();
1461 return 0;
5adc2528
AS
1462}
1463
0d0f516c 1464/*
1465 * zfpm_trigger_remove
1466 *
1467 * The zebra code invokes this function to indicate that we should
1468 * send an remove to the FPM about the given route_node.
1469 */
1470
1471static int zfpm_trigger_remove(struct route_node *rn)
1472{
1473 rib_dest_t *dest;
1474
1475 if (!zfpm_conn_is_up())
1476 return 0;
1477
1478 dest = rib_dest_from_rnode(rn);
1479 if (!CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM))
1480 return 0;
1481
1482 zfpm_debug("%pRN Removing from update queue shutting down", rn);
1483
1484 UNSET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
1485 TAILQ_REMOVE(&zfpm_g->dest_q, dest, fpm_q_entries);
1486
1487 return 0;
1488}
1489
e5218ec8
AD
1490/*
1491 * Generate Key for FPM MAC info hash entry
e5218ec8
AD
1492 */
1493static unsigned int zfpm_mac_info_hash_keymake(const void *p)
1494{
1495 struct fpm_mac_info_t *fpm_mac = (struct fpm_mac_info_t *)p;
1496 uint32_t mac_key;
1497
1498 mac_key = jhash(fpm_mac->macaddr.octet, ETH_ALEN, 0xa5a5a55a);
1499
1500 return jhash_2words(mac_key, fpm_mac->vni, 0);
1501}
1502
1503/*
1504 * Compare function for FPM MAC info hash lookup
1505 */
1506static bool zfpm_mac_info_cmp(const void *p1, const void *p2)
1507{
1508 const struct fpm_mac_info_t *fpm_mac1 = p1;
1509 const struct fpm_mac_info_t *fpm_mac2 = p2;
1510
1511 if (memcmp(fpm_mac1->macaddr.octet, fpm_mac2->macaddr.octet, ETH_ALEN)
1512 != 0)
1513 return false;
e5218ec8
AD
1514 if (fpm_mac1->vni != fpm_mac2->vni)
1515 return false;
1516
1517 return true;
1518}
1519
1520/*
1521 * Lookup FPM MAC info hash entry.
1522 */
1523static struct fpm_mac_info_t *zfpm_mac_info_lookup(struct fpm_mac_info_t *key)
1524{
1525 return hash_lookup(zfpm_g->fpm_mac_info_table, key);
1526}
1527
1528/*
1529 * Callback to allocate fpm_mac_info_t structure.
1530 */
1531static void *zfpm_mac_info_alloc(void *p)
1532{
1533 const struct fpm_mac_info_t *key = p;
1534 struct fpm_mac_info_t *fpm_mac;
1535
1536 fpm_mac = XCALLOC(MTYPE_FPM_MAC_INFO, sizeof(struct fpm_mac_info_t));
1537
1538 memcpy(&fpm_mac->macaddr, &key->macaddr, ETH_ALEN);
e5218ec8
AD
1539 fpm_mac->vni = key->vni;
1540
1541 return (void *)fpm_mac;
1542}
1543
1544/*
1545 * Delink and free fpm_mac_info_t.
1546 */
1547static void zfpm_mac_info_del(struct fpm_mac_info_t *fpm_mac)
1548{
1549 hash_release(zfpm_g->fpm_mac_info_table, fpm_mac);
1550 TAILQ_REMOVE(&zfpm_g->mac_q, fpm_mac, fpm_mac_q_entries);
1551 XFREE(MTYPE_FPM_MAC_INFO, fpm_mac);
1552}
1553
a780a738
AD
1554/*
1555 * zfpm_trigger_rmac_update
1556 *
1557 * Zebra code invokes this function to indicate that we should
1558 * send an update to FPM for given MAC entry.
1559 *
1560 * This function checks if we already have enqueued an update for this RMAC,
1561 * If yes, update the same fpm_mac_info_t. Else, create and enqueue an update.
1562 */
3198b2b3 1563static int zfpm_trigger_rmac_update(struct zebra_mac *rmac,
05843a27 1564 struct zebra_l3vni *zl3vni, bool delete,
3198b2b3 1565 const char *reason)
a780a738 1566{
a780a738
AD
1567 struct fpm_mac_info_t *fpm_mac, key;
1568 struct interface *vxlan_if, *svi_if;
44f7f132 1569 bool mac_found = false;
a780a738
AD
1570
1571 /*
1572 * Ignore if the connection is down. We will update the FPM about
1573 * all destinations once the connection comes up.
1574 */
1575 if (!zfpm_conn_is_up())
1576 return 0;
1577
1578 if (reason) {
5e9f9adb
DL
1579 zfpm_debug("triggering update to FPM - Reason: %s - %pEA",
1580 reason, &rmac->macaddr);
a780a738
AD
1581 }
1582
1583 vxlan_if = zl3vni_map_to_vxlan_if(zl3vni);
1584 svi_if = zl3vni_map_to_svi_if(zl3vni);
1585
6006b807 1586 memset(&key, 0, sizeof(key));
a780a738
AD
1587
1588 memcpy(&key.macaddr, &rmac->macaddr, ETH_ALEN);
a780a738
AD
1589 key.vni = zl3vni->vni;
1590
1591 /* Check if this MAC is already present in the queue. */
1592 fpm_mac = zfpm_mac_info_lookup(&key);
1593
1594 if (fpm_mac) {
44f7f132 1595 mac_found = true;
a780a738
AD
1596
1597 /*
44f7f132
AD
1598 * If the enqueued op is "add" and current op is "delete",
1599 * this is a noop. So, Unset ZEBRA_MAC_UPDATE_FPM flag.
1600 * While processing FPM queue, we will silently delete this
1601 * MAC entry without sending any update for this MAC.
a780a738 1602 */
44f7f132
AD
1603 if (!CHECK_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM) &&
1604 delete == 1) {
a780a738
AD
1605 SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM);
1606 UNSET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM);
44f7f132 1607 return 0;
a780a738 1608 }
8e3aae66 1609 } else
44f7f132
AD
1610 fpm_mac = hash_get(zfpm_g->fpm_mac_info_table, &key,
1611 zfpm_mac_info_alloc);
a780a738 1612
44f7f132 1613 fpm_mac->r_vtep_ip.s_addr = rmac->fwd_info.r_vtep_ip.s_addr;
c5431822 1614 fpm_mac->zebra_flags = rmac->flags;
a780a738
AD
1615 fpm_mac->vxlan_if = vxlan_if ? vxlan_if->ifindex : 0;
1616 fpm_mac->svi_if = svi_if ? svi_if->ifindex : 0;
1617
1618 SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM);
1619 if (delete)
1620 SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM);
44f7f132
AD
1621 else
1622 UNSET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM);
a780a738 1623
44f7f132
AD
1624 if (!mac_found)
1625 TAILQ_INSERT_TAIL(&zfpm_g->mac_q, fpm_mac, fpm_mac_q_entries);
a780a738
AD
1626
1627 zfpm_g->stats.updates_triggered++;
1628
a780a738
AD
1629 /* If writes are already enabled, return. */
1630 if (zfpm_g->t_write)
1631 return 0;
1632
1633 zfpm_write_on();
1634 return 0;
1635}
1636
fbe748e5
AD
1637/*
1638 * This function is called when the FPM connections is established.
1639 * Iterate over all the RMAC entries for the given L3VNI
1640 * and enqueue the RMAC for FPM processing.
1641 */
1ac88792 1642static void zfpm_trigger_rmac_update_wrapper(struct hash_bucket *bucket,
fbe748e5
AD
1643 void *args)
1644{
3198b2b3 1645 struct zebra_mac *zrmac = (struct zebra_mac *)bucket->data;
05843a27 1646 struct zebra_l3vni *zl3vni = (struct zebra_l3vni *)args;
fbe748e5
AD
1647
1648 zfpm_trigger_rmac_update(zrmac, zl3vni, false, "RMAC added");
1649}
1650
1651/*
1652 * This function is called when the FPM connections is established.
1653 * This function iterates over all the L3VNIs to trigger
1654 * FPM updates for RMACs currently available.
1655 */
1ac88792 1656static void zfpm_iterate_rmac_table(struct hash_bucket *bucket, void *args)
fbe748e5 1657{
05843a27 1658 struct zebra_l3vni *zl3vni = (struct zebra_l3vni *)bucket->data;
fbe748e5
AD
1659
1660 hash_iterate(zl3vni->rmac_table, zfpm_trigger_rmac_update_wrapper,
1661 (void *)zl3vni);
1662}
1663
5adc2528 1664/*
eeaf257b 1665 * struct zfpm_statsimer_cb
5adc2528 1666 */
cc9f21da 1667static void zfpm_stats_timer_cb(struct thread *t)
5adc2528 1668{
d62a17ae 1669 zfpm_g->t_stats = NULL;
5adc2528 1670
d62a17ae 1671 /*
1672 * Remember the stats collected in the last interval for display
1673 * purposes.
1674 */
1675 zfpm_stats_copy(&zfpm_g->stats, &zfpm_g->last_ivl_stats);
5adc2528 1676
d62a17ae 1677 /*
1678 * Add the current set of stats into the cumulative statistics.
1679 */
1680 zfpm_stats_compose(&zfpm_g->cumulative_stats, &zfpm_g->stats,
1681 &zfpm_g->cumulative_stats);
5adc2528 1682
d62a17ae 1683 /*
1684 * Start collecting stats afresh over the next interval.
1685 */
1686 zfpm_stats_reset(&zfpm_g->stats);
5adc2528 1687
d62a17ae 1688 zfpm_start_stats_timer();
5adc2528
AS
1689}
1690
1691/*
1692 * zfpm_stop_stats_timer
1693 */
d62a17ae 1694static void zfpm_stop_stats_timer(void)
5adc2528 1695{
d62a17ae 1696 if (!zfpm_g->t_stats)
1697 return;
5adc2528 1698
d62a17ae 1699 zfpm_debug("Stopping existing stats timer");
146bcb9b 1700 THREAD_OFF(zfpm_g->t_stats);
5adc2528
AS
1701}
1702
1703/*
1704 * zfpm_start_stats_timer
1705 */
d62a17ae 1706void zfpm_start_stats_timer(void)
5adc2528 1707{
d62a17ae 1708 assert(!zfpm_g->t_stats);
5adc2528 1709
d62a17ae 1710 thread_add_timer(zfpm_g->master, zfpm_stats_timer_cb, 0,
1711 ZFPM_STATS_IVL_SECS, &zfpm_g->t_stats);
5adc2528
AS
1712}
1713
1714/*
1715 * Helper macro for zfpm_show_stats() below.
1716 */
d62a17ae 1717#define ZFPM_SHOW_STAT(counter) \
1718 do { \
1719 vty_out(vty, "%-40s %10lu %16lu\n", #counter, \
1720 total_stats.counter, zfpm_g->last_ivl_stats.counter); \
1721 } while (0)
5adc2528
AS
1722
1723/*
1724 * zfpm_show_stats
1725 */
d62a17ae 1726static void zfpm_show_stats(struct vty *vty)
5adc2528 1727{
eeaf257b 1728 struct zfpm_stats total_stats;
d62a17ae 1729 time_t elapsed;
1730
1731 vty_out(vty, "\n%-40s %10s Last %2d secs\n\n", "Counter", "Total",
1732 ZFPM_STATS_IVL_SECS);
1733
1734 /*
1735 * Compute the total stats up to this instant.
1736 */
1737 zfpm_stats_compose(&zfpm_g->cumulative_stats, &zfpm_g->stats,
1738 &total_stats);
1739
1740 ZFPM_SHOW_STAT(connect_calls);
1741 ZFPM_SHOW_STAT(connect_no_sock);
1742 ZFPM_SHOW_STAT(read_cb_calls);
1743 ZFPM_SHOW_STAT(write_cb_calls);
1744 ZFPM_SHOW_STAT(write_calls);
1745 ZFPM_SHOW_STAT(partial_writes);
1746 ZFPM_SHOW_STAT(max_writes_hit);
1747 ZFPM_SHOW_STAT(t_write_yields);
1748 ZFPM_SHOW_STAT(nop_deletes_skipped);
1749 ZFPM_SHOW_STAT(route_adds);
1750 ZFPM_SHOW_STAT(route_dels);
1751 ZFPM_SHOW_STAT(updates_triggered);
d62a17ae 1752 ZFPM_SHOW_STAT(redundant_triggers);
1753 ZFPM_SHOW_STAT(dests_del_after_update);
1754 ZFPM_SHOW_STAT(t_conn_down_starts);
1755 ZFPM_SHOW_STAT(t_conn_down_dests_processed);
1756 ZFPM_SHOW_STAT(t_conn_down_yields);
1757 ZFPM_SHOW_STAT(t_conn_down_finishes);
1758 ZFPM_SHOW_STAT(t_conn_up_starts);
1759 ZFPM_SHOW_STAT(t_conn_up_dests_processed);
1760 ZFPM_SHOW_STAT(t_conn_up_yields);
1761 ZFPM_SHOW_STAT(t_conn_up_aborts);
1762 ZFPM_SHOW_STAT(t_conn_up_finishes);
1763
1764 if (!zfpm_g->last_stats_clear_time)
1765 return;
1766
1767 elapsed = zfpm_get_elapsed_time(zfpm_g->last_stats_clear_time);
1768
1769 vty_out(vty, "\nStats were cleared %lu seconds ago\n",
1770 (unsigned long)elapsed);
5adc2528
AS
1771}
1772
1773/*
1774 * zfpm_clear_stats
1775 */
d62a17ae 1776static void zfpm_clear_stats(struct vty *vty)
5adc2528 1777{
d62a17ae 1778 if (!zfpm_is_enabled()) {
1779 vty_out(vty, "The FPM module is not enabled...\n");
1780 return;
1781 }
5adc2528 1782
d62a17ae 1783 zfpm_stats_reset(&zfpm_g->stats);
1784 zfpm_stats_reset(&zfpm_g->last_ivl_stats);
1785 zfpm_stats_reset(&zfpm_g->cumulative_stats);
5adc2528 1786
d62a17ae 1787 zfpm_stop_stats_timer();
1788 zfpm_start_stats_timer();
5adc2528 1789
d62a17ae 1790 zfpm_g->last_stats_clear_time = monotime(NULL);
5adc2528 1791
d62a17ae 1792 vty_out(vty, "Cleared FPM stats\n");
5adc2528
AS
1793}
1794
1795/*
1796 * show_zebra_fpm_stats
1797 */
1798DEFUN (show_zebra_fpm_stats,
1799 show_zebra_fpm_stats_cmd,
1800 "show zebra fpm stats",
1801 SHOW_STR
41e7fb80 1802 ZEBRA_STR
5adc2528
AS
1803 "Forwarding Path Manager information\n"
1804 "Statistics\n")
1805{
d62a17ae 1806 zfpm_show_stats(vty);
1807 return CMD_SUCCESS;
5adc2528
AS
1808}
1809
1810/*
1811 * clear_zebra_fpm_stats
1812 */
1813DEFUN (clear_zebra_fpm_stats,
1814 clear_zebra_fpm_stats_cmd,
1815 "clear zebra fpm stats",
1816 CLEAR_STR
41e7fb80 1817 ZEBRA_STR
5adc2528
AS
1818 "Clear Forwarding Path Manager information\n"
1819 "Statistics\n")
1820{
d62a17ae 1821 zfpm_clear_stats(vty);
1822 return CMD_SUCCESS;
5adc2528
AS
1823}
1824
711ff0ba 1825/*
d62a17ae 1826 * update fpm connection information
711ff0ba 1827 */
a0dfca37 1828DEFUN (fpm_remote_ip,
e52702f2 1829 fpm_remote_ip_cmd,
a0dfca37
DL
1830 "fpm connection ip A.B.C.D port (1-65535)",
1831 "Forwarding Path Manager\n"
1832 "Configure FPM connection\n"
1833 "Connect to IPv4 address\n"
1834 "Connect to IPv4 address\n"
1835 "TCP port number\n"
1836 "TCP port number\n")
711ff0ba
USK
1837{
1838
d62a17ae 1839 in_addr_t fpm_server;
1840 uint32_t port_no;
711ff0ba 1841
d62a17ae 1842 fpm_server = inet_addr(argv[3]->arg);
1843 if (fpm_server == INADDR_NONE)
1844 return CMD_ERR_INCOMPLETE;
711ff0ba 1845
d62a17ae 1846 port_no = atoi(argv[5]->arg);
1847 if (port_no < TCP_MIN_PORT || port_no > TCP_MAX_PORT)
1848 return CMD_ERR_INCOMPLETE;
711ff0ba 1849
d62a17ae 1850 zfpm_g->fpm_server = fpm_server;
1851 zfpm_g->fpm_port = port_no;
711ff0ba
USK
1852
1853
d62a17ae 1854 return CMD_SUCCESS;
711ff0ba
USK
1855}
1856
a0dfca37 1857DEFUN (no_fpm_remote_ip,
e52702f2 1858 no_fpm_remote_ip_cmd,
a0dfca37
DL
1859 "no fpm connection ip A.B.C.D port (1-65535)",
1860 NO_STR
1861 "Forwarding Path Manager\n"
1862 "Remove configured FPM connection\n"
1863 "Connect to IPv4 address\n"
1864 "Connect to IPv4 address\n"
1865 "TCP port number\n"
1866 "TCP port number\n")
711ff0ba 1867{
d62a17ae 1868 if (zfpm_g->fpm_server != inet_addr(argv[4]->arg)
1869 || zfpm_g->fpm_port != atoi(argv[6]->arg))
1870 return CMD_ERR_NO_MATCH;
711ff0ba 1871
d62a17ae 1872 zfpm_g->fpm_server = FPM_DEFAULT_IP;
1873 zfpm_g->fpm_port = FPM_DEFAULT_PORT;
711ff0ba 1874
d62a17ae 1875 return CMD_SUCCESS;
711ff0ba 1876}
711ff0ba 1877
fb0aa886
AS
1878/*
1879 * zfpm_init_message_format
1880 */
d62a17ae 1881static inline void zfpm_init_message_format(const char *format)
fb0aa886 1882{
d62a17ae 1883 int have_netlink, have_protobuf;
fb0aa886 1884
fb0aa886 1885#ifdef HAVE_NETLINK
d62a17ae 1886 have_netlink = 1;
4b2792b5 1887#else
d62a17ae 1888 have_netlink = 0;
fb0aa886
AS
1889#endif
1890
1891#ifdef HAVE_PROTOBUF
d62a17ae 1892 have_protobuf = 1;
4b2792b5 1893#else
d62a17ae 1894 have_protobuf = 0;
fb0aa886
AS
1895#endif
1896
d62a17ae 1897 zfpm_g->message_format = ZFPM_MSG_FORMAT_NONE;
fb0aa886 1898
d62a17ae 1899 if (!format) {
1900 if (have_netlink) {
1901 zfpm_g->message_format = ZFPM_MSG_FORMAT_NETLINK;
1902 } else if (have_protobuf) {
1903 zfpm_g->message_format = ZFPM_MSG_FORMAT_PROTOBUF;
1904 }
1905 return;
fb0aa886 1906 }
fb0aa886 1907
d62a17ae 1908 if (!strcmp("netlink", format)) {
1909 if (!have_netlink) {
1c50c1c0
QY
1910 flog_err(EC_ZEBRA_NETLINK_NOT_AVAILABLE,
1911 "FPM netlink message format is not available");
d62a17ae 1912 return;
1913 }
1914 zfpm_g->message_format = ZFPM_MSG_FORMAT_NETLINK;
1915 return;
fb0aa886 1916 }
fb0aa886 1917
d62a17ae 1918 if (!strcmp("protobuf", format)) {
1919 if (!have_protobuf) {
af4c2728 1920 flog_err(
e914ccbe 1921 EC_ZEBRA_PROTOBUF_NOT_AVAILABLE,
d62a17ae 1922 "FPM protobuf message format is not available");
1923 return;
1924 }
8b9cf71c 1925 flog_warn(EC_ZEBRA_PROTOBUF_NOT_AVAILABLE,
3efd0893 1926 "FPM protobuf message format is deprecated and scheduled to be removed. Please convert to using netlink format or contact dev@lists.frrouting.org with your use case.");
d62a17ae 1927 zfpm_g->message_format = ZFPM_MSG_FORMAT_PROTOBUF;
1928 return;
fb0aa886 1929 }
fb0aa886 1930
e914ccbe 1931 flog_warn(EC_ZEBRA_FPM_FORMAT_UNKNOWN, "Unknown fpm format '%s'",
9df414fe 1932 format);
fb0aa886
AS
1933}
1934
711ff0ba 1935/**
d62a17ae 1936 * fpm_remote_srv_write
711ff0ba 1937 *
d62a17ae 1938 * Module to write remote fpm connection
711ff0ba
USK
1939 *
1940 * Returns ZERO on success.
1941 */
1942
d62a17ae 1943static int fpm_remote_srv_write(struct vty *vty)
711ff0ba 1944{
d62a17ae 1945 struct in_addr in;
711ff0ba 1946
d62a17ae 1947 in.s_addr = zfpm_g->fpm_server;
711ff0ba 1948
9d1c2659 1949 if ((zfpm_g->fpm_server != FPM_DEFAULT_IP
996c9314
LB
1950 && zfpm_g->fpm_server != INADDR_ANY)
1951 || (zfpm_g->fpm_port != FPM_DEFAULT_PORT && zfpm_g->fpm_port != 0))
9bcef951 1952 vty_out(vty, "fpm connection ip %pI4 port %d\n", &in,
d62a17ae 1953 zfpm_g->fpm_port);
711ff0ba 1954
d62a17ae 1955 return 0;
711ff0ba
USK
1956}
1957
1958
612c2c15 1959static int fpm_remote_srv_write(struct vty *vty);
4f8ea50c 1960/* Zebra node */
62b346ee 1961static struct cmd_node zebra_node = {
f4b8291f 1962 .name = "zebra",
62b346ee 1963 .node = ZEBRA_NODE,
24389580 1964 .parent_node = CONFIG_NODE,
62b346ee 1965 .prompt = "",
612c2c15 1966 .config_write = fpm_remote_srv_write,
62b346ee 1967};
4f8ea50c
DL
1968
1969
5adc2528
AS
1970/**
1971 * zfpm_init
1972 *
1973 * One-time initialization of the Zebra FPM module.
1974 *
1975 * @param[in] port port at which FPM is running.
2951a7a4 1976 * @param[in] enable true if the zebra FPM module should be enabled
fb0aa886 1977 * @param[in] format to use to talk to the FPM. Can be 'netink' or 'protobuf'.
5adc2528 1978 *
2951a7a4 1979 * Returns true on success.
5adc2528 1980 */
d62a17ae 1981static int zfpm_init(struct thread_master *master)
5adc2528 1982{
d62a17ae 1983 int enable = 1;
1984 uint16_t port = 0;
1985 const char *format = THIS_MODULE->load_args;
5adc2528 1986
d62a17ae 1987 memset(zfpm_g, 0, sizeof(*zfpm_g));
1988 zfpm_g->master = master;
1989 TAILQ_INIT(&zfpm_g->dest_q);
e5218ec8
AD
1990 TAILQ_INIT(&zfpm_g->mac_q);
1991
1992 /* Create hash table for fpm_mac_info_t enties */
1993 zfpm_g->fpm_mac_info_table = hash_create(zfpm_mac_info_hash_keymake,
fbe748e5
AD
1994 zfpm_mac_info_cmp,
1995 "FPM MAC info hash table");
e5218ec8 1996
d62a17ae 1997 zfpm_g->sock = -1;
1998 zfpm_g->state = ZFPM_STATE_IDLE;
5adc2528 1999
d62a17ae 2000 zfpm_stats_init(&zfpm_g->stats);
2001 zfpm_stats_init(&zfpm_g->last_ivl_stats);
2002 zfpm_stats_init(&zfpm_g->cumulative_stats);
5adc2528 2003
316d2d52 2004 memset(&ipv4ll_gateway, 0, sizeof(ipv4ll_gateway));
b51c6597
DS
2005 if (inet_pton(AF_INET, ipv4_ll_buf, &ipv4ll_gateway.ipv4) != 1)
2006 zlog_warn("inet_pton failed for %s", ipv4_ll_buf);
316d2d52 2007
612c2c15 2008 install_node(&zebra_node);
d62a17ae 2009 install_element(ENABLE_NODE, &show_zebra_fpm_stats_cmd);
2010 install_element(ENABLE_NODE, &clear_zebra_fpm_stats_cmd);
2011 install_element(CONFIG_NODE, &fpm_remote_ip_cmd);
2012 install_element(CONFIG_NODE, &no_fpm_remote_ip_cmd);
5adc2528 2013
d62a17ae 2014 zfpm_init_message_format(format);
fb0aa886 2015
d62a17ae 2016 /*
2017 * Disable FPM interface if no suitable format is available.
2018 */
2019 if (zfpm_g->message_format == ZFPM_MSG_FORMAT_NONE)
2020 enable = 0;
fb0aa886 2021
d62a17ae 2022 zfpm_g->enabled = enable;
5adc2528 2023
d62a17ae 2024 if (!zfpm_g->fpm_server)
2025 zfpm_g->fpm_server = FPM_DEFAULT_IP;
711ff0ba 2026
d62a17ae 2027 if (!port)
2028 port = FPM_DEFAULT_PORT;
5adc2528 2029
d62a17ae 2030 zfpm_g->fpm_port = port;
5adc2528 2031
d62a17ae 2032 zfpm_g->obuf = stream_new(ZFPM_OBUF_SIZE);
2033 zfpm_g->ibuf = stream_new(ZFPM_IBUF_SIZE);
5adc2528 2034
d62a17ae 2035 zfpm_start_stats_timer();
2036 zfpm_start_connect_timer("initialized");
2037 return 0;
4f8ea50c 2038}
5adc2528 2039
f0c459f0
DS
2040static int zfpm_fini(void)
2041{
2042 zfpm_write_off();
2043 zfpm_read_off();
2044 zfpm_connect_off();
2045
2046 zfpm_stop_stats_timer();
2047
2048 hook_unregister(rib_update, zfpm_trigger_update);
2049 return 0;
2050}
2051
d62a17ae 2052static int zebra_fpm_module_init(void)
4f8ea50c 2053{
d62a17ae 2054 hook_register(rib_update, zfpm_trigger_update);
0d0f516c 2055 hook_register(rib_shutdown, zfpm_trigger_remove);
a780a738 2056 hook_register(zebra_rmac_update, zfpm_trigger_rmac_update);
d62a17ae 2057 hook_register(frr_late_init, zfpm_init);
f0c459f0 2058 hook_register(frr_early_fini, zfpm_fini);
d62a17ae 2059 return 0;
5adc2528 2060}
4f8ea50c 2061
d62a17ae 2062FRR_MODULE_SETUP(.name = "zebra_fpm", .version = FRR_VERSION,
2063 .description = "zebra FPM (Forwarding Plane Manager) module",
80413c20
DL
2064 .init = zebra_fpm_module_init,
2065);