]> git.proxmox.com Git - mirror_frr.git/blame - zebra/zebra_fpm.c
*: require semicolon after FRR_DAEMON_INFO & co.
[mirror_frr.git] / zebra / zebra_fpm.c
CommitLineData
5adc2528
AS
1/*
2 * Main implementation file for interface to Forwarding Plane Manager.
3 *
4 * Copyright (C) 2012 by Open Source Routing.
5 * Copyright (C) 2012 by Internet Systems Consortium, Inc. ("ISC")
6 *
7 * This file is part of GNU Zebra.
8 *
9 * GNU Zebra is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
12 * later version.
13 *
14 * GNU Zebra is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
896014f4
DL
19 * You should have received a copy of the GNU General Public License along
20 * with this program; see the file COPYING; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
5adc2528
AS
22 */
23
24#include <zebra.h>
25
26#include "log.h"
4f8ea50c 27#include "libfrr.h"
5adc2528
AS
28#include "stream.h"
29#include "thread.h"
30#include "network.h"
31#include "command.h"
4f8ea50c 32#include "version.h"
e5218ec8 33#include "jhash.h"
5adc2528
AS
34
35#include "zebra/rib.h"
7c551956
DS
36#include "zebra/zserv.h"
37#include "zebra/zebra_ns.h"
38#include "zebra/zebra_vrf.h"
67aeb554 39#include "zebra/zebra_errors.h"
e5218ec8 40#include "zebra/zebra_memory.h"
5adc2528
AS
41
42#include "fpm/fpm.h"
5adc2528 43#include "zebra_fpm_private.h"
e5218ec8 44#include "zebra/zebra_router.h"
a780a738 45#include "zebra_vxlan_private.h"
e5218ec8
AD
46
47DEFINE_MTYPE_STATIC(ZEBRA, FPM_MAC_INFO, "FPM_MAC_INFO");
5adc2528
AS
48
49/*
50 * Interval at which we attempt to connect to the FPM.
51 */
52#define ZFPM_CONNECT_RETRY_IVL 5
53
54/*
55 * Sizes of outgoing and incoming stream buffers for writing/reading
56 * FPM messages.
57 */
58#define ZFPM_OBUF_SIZE (2 * FPM_MAX_MSG_LEN)
59#define ZFPM_IBUF_SIZE (FPM_MAX_MSG_LEN)
60
61/*
62 * The maximum number of times the FPM socket write callback can call
63 * 'write' before it yields.
64 */
65#define ZFPM_MAX_WRITES_PER_RUN 10
66
67/*
68 * Interval over which we collect statistics.
69 */
70#define ZFPM_STATS_IVL_SECS 10
fbe748e5
AD
71#define FPM_MAX_MAC_MSG_LEN 512
72
1ac88792 73static void zfpm_iterate_rmac_table(struct hash_bucket *bucket, void *args);
5adc2528
AS
74
75/*
76 * Structure that holds state for iterating over all route_node
77 * structures that are candidates for being communicated to the FPM.
78 */
332cba05 79struct zfpm_rnodes_iter {
d62a17ae 80 rib_tables_iter_t tables_iter;
81 route_table_iter_t iter;
332cba05 82};
5adc2528
AS
83
84/*
85 * Statistics.
86 */
eeaf257b 87struct zfpm_stats {
d62a17ae 88 unsigned long connect_calls;
89 unsigned long connect_no_sock;
5adc2528 90
d62a17ae 91 unsigned long read_cb_calls;
5adc2528 92
d62a17ae 93 unsigned long write_cb_calls;
94 unsigned long write_calls;
95 unsigned long partial_writes;
96 unsigned long max_writes_hit;
97 unsigned long t_write_yields;
5adc2528 98
d62a17ae 99 unsigned long nop_deletes_skipped;
100 unsigned long route_adds;
101 unsigned long route_dels;
5adc2528 102
d62a17ae 103 unsigned long updates_triggered;
104 unsigned long redundant_triggers;
5adc2528 105
d62a17ae 106 unsigned long dests_del_after_update;
5adc2528 107
d62a17ae 108 unsigned long t_conn_down_starts;
109 unsigned long t_conn_down_dests_processed;
110 unsigned long t_conn_down_yields;
111 unsigned long t_conn_down_finishes;
5adc2528 112
d62a17ae 113 unsigned long t_conn_up_starts;
114 unsigned long t_conn_up_dests_processed;
115 unsigned long t_conn_up_yields;
116 unsigned long t_conn_up_aborts;
117 unsigned long t_conn_up_finishes;
eeaf257b 118};
5adc2528
AS
119
120/*
121 * States for the FPM state machine.
122 */
1d6a3ee8 123enum zfpm_state {
5adc2528 124
d62a17ae 125 /*
126 * In this state we are not yet ready to connect to the FPM. This
127 * can happen when this module is disabled, or if we're cleaning up
128 * after a connection has gone down.
129 */
130 ZFPM_STATE_IDLE,
131
132 /*
133 * Ready to talk to the FPM and periodically trying to connect to
134 * it.
135 */
136 ZFPM_STATE_ACTIVE,
137
138 /*
139 * In the middle of bringing up a TCP connection. Specifically,
140 * waiting for a connect() call to complete asynchronously.
141 */
142 ZFPM_STATE_CONNECTING,
143
144 /*
145 * TCP connection to the FPM is up.
146 */
147 ZFPM_STATE_ESTABLISHED
5adc2528 148
1d6a3ee8 149};
5adc2528 150
fb0aa886
AS
151/*
152 * Message format to be used to communicate with the FPM.
153 */
a78c2b98 154enum zfpm_msg_format {
d62a17ae 155 ZFPM_MSG_FORMAT_NONE,
156 ZFPM_MSG_FORMAT_NETLINK,
157 ZFPM_MSG_FORMAT_PROTOBUF,
a78c2b98
DS
158};
159
5adc2528
AS
160/*
161 * Globals.
162 */
768e40bd 163struct zfpm_glob {
d62a17ae 164
165 /*
166 * True if the FPM module has been enabled.
167 */
168 int enabled;
169
170 /*
171 * Message format to be used to communicate with the fpm.
172 */
a78c2b98 173 enum zfpm_msg_format message_format;
d62a17ae 174
175 struct thread_master *master;
176
1d6a3ee8 177 enum zfpm_state state;
d62a17ae 178
179 in_addr_t fpm_server;
180 /*
181 * Port on which the FPM is running.
182 */
183 int fpm_port;
184
185 /*
186 * List of rib_dest_t structures to be processed
187 */
188 TAILQ_HEAD(zfpm_dest_q, rib_dest_t_) dest_q;
189
e5218ec8
AD
190 /*
191 * List of fpm_mac_info structures to be processed
192 */
193 TAILQ_HEAD(zfpm_mac_q, fpm_mac_info_t) mac_q;
194
195 /*
196 * Hash table of fpm_mac_info_t entries
197 *
198 * While adding fpm_mac_info_t for a MAC to the mac_q,
199 * it is possible that another fpm_mac_info_t node for the this MAC
200 * is already present in the queue.
201 * This is possible in the case of consecutive add->delete operations.
202 * To avoid such duplicate insertions in the mac_q,
203 * define a hash table for fpm_mac_info_t which can be looked up
204 * to see if an fpm_mac_info_t node for a MAC is already present
205 * in the mac_q.
206 */
207 struct hash *fpm_mac_info_table;
208
d62a17ae 209 /*
210 * Stream socket to the FPM.
211 */
212 int sock;
213
214 /*
215 * Buffers for messages to/from the FPM.
216 */
217 struct stream *obuf;
218 struct stream *ibuf;
219
220 /*
221 * Threads for I/O.
222 */
223 struct thread *t_connect;
224 struct thread *t_write;
225 struct thread *t_read;
226
227 /*
228 * Thread to clean up after the TCP connection to the FPM goes down
229 * and the state that belongs to it.
230 */
231 struct thread *t_conn_down;
232
233 struct {
332cba05 234 struct zfpm_rnodes_iter iter;
d62a17ae 235 } t_conn_down_state;
236
237 /*
238 * Thread to take actions once the TCP conn to the FPM comes up, and
239 * the state that belongs to it.
240 */
241 struct thread *t_conn_up;
242
243 struct {
332cba05 244 struct zfpm_rnodes_iter iter;
d62a17ae 245 } t_conn_up_state;
246
247 unsigned long connect_calls;
248 time_t last_connect_call_time;
249
250 /*
251 * Stats from the start of the current statistics interval up to
252 * now. These are the counters we typically update in the code.
253 */
eeaf257b 254 struct zfpm_stats stats;
d62a17ae 255
256 /*
257 * Statistics that were gathered in the last collection interval.
258 */
eeaf257b 259 struct zfpm_stats last_ivl_stats;
d62a17ae 260
261 /*
262 * Cumulative stats from the last clear to the start of the current
263 * statistics interval.
264 */
eeaf257b 265 struct zfpm_stats cumulative_stats;
d62a17ae 266
267 /*
268 * Stats interval timer.
269 */
270 struct thread *t_stats;
271
272 /*
273 * If non-zero, the last time when statistics were cleared.
274 */
275 time_t last_stats_clear_time;
e840edca
KK
276
277 /*
278 * Flag to track the MAC dump status to FPM
279 */
280 bool fpm_mac_dump_done;
768e40bd 281};
5adc2528 282
768e40bd
DS
283static struct zfpm_glob zfpm_glob_space;
284static struct zfpm_glob *zfpm_g = &zfpm_glob_space;
5adc2528 285
d62a17ae 286static int zfpm_trigger_update(struct route_node *rn, const char *reason);
4f8ea50c 287
d62a17ae 288static int zfpm_read_cb(struct thread *thread);
289static int zfpm_write_cb(struct thread *thread);
5adc2528 290
1d6a3ee8 291static void zfpm_set_state(enum zfpm_state state, const char *reason);
d62a17ae 292static void zfpm_start_connect_timer(const char *reason);
293static void zfpm_start_stats_timer(void);
a780a738 294static void zfpm_mac_info_del(struct fpm_mac_info_t *fpm_mac);
5adc2528
AS
295
296/*
297 * zfpm_thread_should_yield
298 */
d62a17ae 299static inline int zfpm_thread_should_yield(struct thread *t)
5adc2528 300{
d62a17ae 301 return thread_should_yield(t);
5adc2528
AS
302}
303
304/*
305 * zfpm_state_to_str
306 */
1d6a3ee8 307static const char *zfpm_state_to_str(enum zfpm_state state)
5adc2528 308{
d62a17ae 309 switch (state) {
5adc2528 310
d62a17ae 311 case ZFPM_STATE_IDLE:
312 return "idle";
5adc2528 313
d62a17ae 314 case ZFPM_STATE_ACTIVE:
315 return "active";
5adc2528 316
d62a17ae 317 case ZFPM_STATE_CONNECTING:
318 return "connecting";
5adc2528 319
d62a17ae 320 case ZFPM_STATE_ESTABLISHED:
321 return "established";
5adc2528 322
d62a17ae 323 default:
324 return "unknown";
325 }
5adc2528
AS
326}
327
5adc2528
AS
328/*
329 * zfpm_get_elapsed_time
330 *
331 * Returns the time elapsed (in seconds) since the given time.
332 */
d62a17ae 333static time_t zfpm_get_elapsed_time(time_t reference)
5adc2528 334{
d62a17ae 335 time_t now;
5adc2528 336
d62a17ae 337 now = monotime(NULL);
5adc2528 338
d62a17ae 339 if (now < reference) {
340 assert(0);
341 return 0;
342 }
5adc2528 343
d62a17ae 344 return now - reference;
5adc2528
AS
345}
346
5adc2528
AS
347/*
348 * zfpm_rnodes_iter_init
349 */
332cba05 350static inline void zfpm_rnodes_iter_init(struct zfpm_rnodes_iter *iter)
5adc2528 351{
d62a17ae 352 memset(iter, 0, sizeof(*iter));
353 rib_tables_iter_init(&iter->tables_iter);
354
355 /*
356 * This is a hack, but it makes implementing 'next' easier by
357 * ensuring that route_table_iter_next() will return NULL the first
358 * time we call it.
359 */
360 route_table_iter_init(&iter->iter, NULL);
361 route_table_iter_cleanup(&iter->iter);
5adc2528
AS
362}
363
364/*
365 * zfpm_rnodes_iter_next
366 */
332cba05
DS
367static inline struct route_node *
368zfpm_rnodes_iter_next(struct zfpm_rnodes_iter *iter)
5adc2528 369{
d62a17ae 370 struct route_node *rn;
371 struct route_table *table;
5adc2528 372
d62a17ae 373 while (1) {
374 rn = route_table_iter_next(&iter->iter);
375 if (rn)
376 return rn;
5adc2528 377
d62a17ae 378 /*
379 * We've made our way through this table, go to the next one.
380 */
381 route_table_iter_cleanup(&iter->iter);
5adc2528 382
c6bbea17 383 table = rib_tables_iter_next(&iter->tables_iter);
5adc2528 384
d62a17ae 385 if (!table)
386 return NULL;
5adc2528 387
d62a17ae 388 route_table_iter_init(&iter->iter, table);
389 }
5adc2528 390
d62a17ae 391 return NULL;
5adc2528
AS
392}
393
394/*
395 * zfpm_rnodes_iter_pause
396 */
332cba05 397static inline void zfpm_rnodes_iter_pause(struct zfpm_rnodes_iter *iter)
5adc2528 398{
d62a17ae 399 route_table_iter_pause(&iter->iter);
5adc2528
AS
400}
401
402/*
403 * zfpm_rnodes_iter_cleanup
404 */
332cba05 405static inline void zfpm_rnodes_iter_cleanup(struct zfpm_rnodes_iter *iter)
5adc2528 406{
d62a17ae 407 route_table_iter_cleanup(&iter->iter);
408 rib_tables_iter_cleanup(&iter->tables_iter);
5adc2528
AS
409}
410
411/*
412 * zfpm_stats_init
413 *
414 * Initialize a statistics block.
415 */
eeaf257b 416static inline void zfpm_stats_init(struct zfpm_stats *stats)
5adc2528 417{
d62a17ae 418 memset(stats, 0, sizeof(*stats));
5adc2528
AS
419}
420
421/*
422 * zfpm_stats_reset
423 */
eeaf257b 424static inline void zfpm_stats_reset(struct zfpm_stats *stats)
5adc2528 425{
d62a17ae 426 zfpm_stats_init(stats);
5adc2528
AS
427}
428
429/*
430 * zfpm_stats_copy
431 */
eeaf257b
DS
432static inline void zfpm_stats_copy(const struct zfpm_stats *src,
433 struct zfpm_stats *dest)
5adc2528 434{
d62a17ae 435 memcpy(dest, src, sizeof(*dest));
5adc2528
AS
436}
437
438/*
439 * zfpm_stats_compose
440 *
441 * Total up the statistics in two stats structures ('s1 and 's2') and
442 * return the result in the third argument, 'result'. Note that the
443 * pointer 'result' may be the same as 's1' or 's2'.
444 *
445 * For simplicity, the implementation below assumes that the stats
446 * structure is composed entirely of counters. This can easily be
447 * changed when necessary.
448 */
eeaf257b
DS
449static void zfpm_stats_compose(const struct zfpm_stats *s1,
450 const struct zfpm_stats *s2,
451 struct zfpm_stats *result)
5adc2528 452{
d62a17ae 453 const unsigned long *p1, *p2;
454 unsigned long *result_p;
455 int i, num_counters;
5adc2528 456
d62a17ae 457 p1 = (const unsigned long *)s1;
458 p2 = (const unsigned long *)s2;
459 result_p = (unsigned long *)result;
5adc2528 460
eeaf257b 461 num_counters = (sizeof(struct zfpm_stats) / sizeof(unsigned long));
5adc2528 462
d62a17ae 463 for (i = 0; i < num_counters; i++) {
464 result_p[i] = p1[i] + p2[i];
465 }
5adc2528
AS
466}
467
468/*
469 * zfpm_read_on
470 */
d62a17ae 471static inline void zfpm_read_on(void)
5adc2528 472{
d62a17ae 473 assert(!zfpm_g->t_read);
474 assert(zfpm_g->sock >= 0);
5adc2528 475
d62a17ae 476 thread_add_read(zfpm_g->master, zfpm_read_cb, 0, zfpm_g->sock,
477 &zfpm_g->t_read);
5adc2528
AS
478}
479
480/*
481 * zfpm_write_on
482 */
d62a17ae 483static inline void zfpm_write_on(void)
5adc2528 484{
d62a17ae 485 assert(!zfpm_g->t_write);
486 assert(zfpm_g->sock >= 0);
5adc2528 487
d62a17ae 488 thread_add_write(zfpm_g->master, zfpm_write_cb, 0, zfpm_g->sock,
489 &zfpm_g->t_write);
5adc2528
AS
490}
491
492/*
493 * zfpm_read_off
494 */
d62a17ae 495static inline void zfpm_read_off(void)
5adc2528 496{
50478845 497 thread_cancel(&zfpm_g->t_read);
5adc2528
AS
498}
499
500/*
501 * zfpm_write_off
502 */
d62a17ae 503static inline void zfpm_write_off(void)
5adc2528 504{
50478845 505 thread_cancel(&zfpm_g->t_write);
5adc2528
AS
506}
507
f0c459f0
DS
508static inline void zfpm_connect_off(void)
509{
50478845 510 thread_cancel(&zfpm_g->t_connect);
f0c459f0
DS
511}
512
5adc2528
AS
513/*
514 * zfpm_conn_up_thread_cb
515 *
516 * Callback for actions to be taken when the connection to the FPM
517 * comes up.
518 */
d62a17ae 519static int zfpm_conn_up_thread_cb(struct thread *thread)
5adc2528 520{
d62a17ae 521 struct route_node *rnode;
332cba05 522 struct zfpm_rnodes_iter *iter;
d62a17ae 523 rib_dest_t *dest;
5adc2528 524
d62a17ae 525 iter = &zfpm_g->t_conn_up_state.iter;
5adc2528 526
d62a17ae 527 if (zfpm_g->state != ZFPM_STATE_ESTABLISHED) {
528 zfpm_debug(
529 "Connection not up anymore, conn_up thread aborting");
530 zfpm_g->stats.t_conn_up_aborts++;
531 goto done;
532 }
5adc2528 533
e840edca
KK
534 if (!zfpm_g->fpm_mac_dump_done) {
535 /* Enqueue FPM updates for all the RMAC entries */
536 hash_iterate(zrouter.l3vni_table, zfpm_iterate_rmac_table,
537 NULL);
538 /* mark dump done so that its not repeated after yield */
539 zfpm_g->fpm_mac_dump_done = true;
540 }
fbe748e5 541
d62a17ae 542 while ((rnode = zfpm_rnodes_iter_next(iter))) {
543 dest = rib_dest_from_rnode(rnode);
544
545 if (dest) {
546 zfpm_g->stats.t_conn_up_dests_processed++;
547 zfpm_trigger_update(rnode, NULL);
548 }
549
550 /*
551 * Yield if need be.
552 */
553 if (!zfpm_thread_should_yield(thread))
554 continue;
555
556 zfpm_g->stats.t_conn_up_yields++;
557 zfpm_rnodes_iter_pause(iter);
d62a17ae 558 thread_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb,
559 NULL, 0, &zfpm_g->t_conn_up);
560 return 0;
5adc2528
AS
561 }
562
d62a17ae 563 zfpm_g->stats.t_conn_up_finishes++;
564
565done:
566 zfpm_rnodes_iter_cleanup(iter);
567 return 0;
5adc2528
AS
568}
569
570/*
571 * zfpm_connection_up
572 *
573 * Called when the connection to the FPM comes up.
574 */
d62a17ae 575static void zfpm_connection_up(const char *detail)
5adc2528 576{
d62a17ae 577 assert(zfpm_g->sock >= 0);
578 zfpm_read_on();
579 zfpm_write_on();
580 zfpm_set_state(ZFPM_STATE_ESTABLISHED, detail);
581
582 /*
583 * Start thread to push existing routes to the FPM.
584 */
ef1dbba8 585 thread_cancel(&zfpm_g->t_conn_up);
d62a17ae 586
587 zfpm_rnodes_iter_init(&zfpm_g->t_conn_up_state.iter);
e840edca 588 zfpm_g->fpm_mac_dump_done = false;
d62a17ae 589
590 zfpm_debug("Starting conn_up thread");
ef1dbba8 591
d62a17ae 592 thread_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb, NULL, 0,
593 &zfpm_g->t_conn_up);
594 zfpm_g->stats.t_conn_up_starts++;
5adc2528
AS
595}
596
597/*
598 * zfpm_connect_check
599 *
600 * Check if an asynchronous connect() to the FPM is complete.
601 */
d62a17ae 602static void zfpm_connect_check(void)
5adc2528 603{
d62a17ae 604 int status;
605 socklen_t slen;
606 int ret;
607
608 zfpm_read_off();
609 zfpm_write_off();
610
611 slen = sizeof(status);
612 ret = getsockopt(zfpm_g->sock, SOL_SOCKET, SO_ERROR, (void *)&status,
613 &slen);
614
615 if (ret >= 0 && status == 0) {
616 zfpm_connection_up("async connect complete");
617 return;
618 }
619
620 /*
621 * getsockopt() failed or indicated an error on the socket.
622 */
623 close(zfpm_g->sock);
624 zfpm_g->sock = -1;
625
626 zfpm_start_connect_timer("getsockopt() after async connect failed");
627 return;
5adc2528
AS
628}
629
630/*
631 * zfpm_conn_down_thread_cb
632 *
633 * Callback that is invoked to clean up state after the TCP connection
634 * to the FPM goes down.
635 */
d62a17ae 636static int zfpm_conn_down_thread_cb(struct thread *thread)
5adc2528 637{
d62a17ae 638 struct route_node *rnode;
332cba05 639 struct zfpm_rnodes_iter *iter;
d62a17ae 640 rib_dest_t *dest;
a780a738 641 struct fpm_mac_info_t *mac = NULL;
5adc2528 642
d62a17ae 643 assert(zfpm_g->state == ZFPM_STATE_IDLE);
5adc2528 644
a780a738
AD
645 /*
646 * Delink and free all fpm_mac_info_t nodes
647 * in the mac_q and fpm_mac_info_hash
648 */
649 while ((mac = TAILQ_FIRST(&zfpm_g->mac_q)) != NULL)
650 zfpm_mac_info_del(mac);
651
d62a17ae 652 zfpm_g->t_conn_down = NULL;
5adc2528 653
d62a17ae 654 iter = &zfpm_g->t_conn_down_state.iter;
5adc2528 655
d62a17ae 656 while ((rnode = zfpm_rnodes_iter_next(iter))) {
657 dest = rib_dest_from_rnode(rnode);
5adc2528 658
d62a17ae 659 if (dest) {
660 if (CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM)) {
661 TAILQ_REMOVE(&zfpm_g->dest_q, dest,
662 fpm_q_entries);
663 }
664
665 UNSET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
666 UNSET_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM);
5adc2528 667
d62a17ae 668 zfpm_g->stats.t_conn_down_dests_processed++;
5adc2528 669
d62a17ae 670 /*
671 * Check if the dest should be deleted.
672 */
673 rib_gc_dest(rnode);
674 }
5adc2528 675
d62a17ae 676 /*
677 * Yield if need be.
678 */
679 if (!zfpm_thread_should_yield(thread))
680 continue;
681
682 zfpm_g->stats.t_conn_down_yields++;
683 zfpm_rnodes_iter_pause(iter);
684 zfpm_g->t_conn_down = NULL;
685 thread_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb,
686 NULL, 0, &zfpm_g->t_conn_down);
687 return 0;
5adc2528
AS
688 }
689
d62a17ae 690 zfpm_g->stats.t_conn_down_finishes++;
691 zfpm_rnodes_iter_cleanup(iter);
692
693 /*
694 * Start the process of connecting to the FPM again.
695 */
696 zfpm_start_connect_timer("cleanup complete");
697 return 0;
5adc2528
AS
698}
699
700/*
701 * zfpm_connection_down
702 *
703 * Called when the connection to the FPM has gone down.
704 */
d62a17ae 705static void zfpm_connection_down(const char *detail)
5adc2528 706{
d62a17ae 707 if (!detail)
708 detail = "unknown";
5adc2528 709
d62a17ae 710 assert(zfpm_g->state == ZFPM_STATE_ESTABLISHED);
5adc2528 711
d62a17ae 712 zlog_info("connection to the FPM has gone down: %s", detail);
5adc2528 713
d62a17ae 714 zfpm_read_off();
715 zfpm_write_off();
5adc2528 716
d62a17ae 717 stream_reset(zfpm_g->ibuf);
718 stream_reset(zfpm_g->obuf);
5adc2528 719
d62a17ae 720 if (zfpm_g->sock >= 0) {
721 close(zfpm_g->sock);
722 zfpm_g->sock = -1;
723 }
5adc2528 724
d62a17ae 725 /*
726 * Start thread to clean up state after the connection goes down.
727 */
728 assert(!zfpm_g->t_conn_down);
d62a17ae 729 zfpm_rnodes_iter_init(&zfpm_g->t_conn_down_state.iter);
730 zfpm_g->t_conn_down = NULL;
731 thread_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb, NULL, 0,
732 &zfpm_g->t_conn_down);
733 zfpm_g->stats.t_conn_down_starts++;
734
735 zfpm_set_state(ZFPM_STATE_IDLE, detail);
5adc2528
AS
736}
737
738/*
739 * zfpm_read_cb
740 */
d62a17ae 741static int zfpm_read_cb(struct thread *thread)
5adc2528 742{
d62a17ae 743 size_t already;
744 struct stream *ibuf;
745 uint16_t msg_len;
746 fpm_msg_hdr_t *hdr;
747
748 zfpm_g->stats.read_cb_calls++;
d62a17ae 749
750 /*
751 * Check if async connect is now done.
752 */
753 if (zfpm_g->state == ZFPM_STATE_CONNECTING) {
754 zfpm_connect_check();
755 return 0;
5adc2528
AS
756 }
757
d62a17ae 758 assert(zfpm_g->state == ZFPM_STATE_ESTABLISHED);
759 assert(zfpm_g->sock >= 0);
5adc2528 760
d62a17ae 761 ibuf = zfpm_g->ibuf;
5adc2528 762
d62a17ae 763 already = stream_get_endp(ibuf);
764 if (already < FPM_MSG_HDR_LEN) {
765 ssize_t nbyte;
5adc2528 766
d62a17ae 767 nbyte = stream_read_try(ibuf, zfpm_g->sock,
768 FPM_MSG_HDR_LEN - already);
769 if (nbyte == 0 || nbyte == -1) {
677f704d
DS
770 if (nbyte == -1) {
771 char buffer[1024];
772
772270f3
QY
773 snprintf(buffer, sizeof(buffer),
774 "closed socket in read(%d): %s", errno,
775 safe_strerror(errno));
677f704d 776 zfpm_connection_down(buffer);
996c9314 777 } else
677f704d 778 zfpm_connection_down("closed socket in read");
d62a17ae 779 return 0;
780 }
5adc2528 781
d62a17ae 782 if (nbyte != (ssize_t)(FPM_MSG_HDR_LEN - already))
783 goto done;
5adc2528 784
d62a17ae 785 already = FPM_MSG_HDR_LEN;
786 }
5adc2528 787
d62a17ae 788 stream_set_getp(ibuf, 0);
5adc2528 789
d62a17ae 790 hdr = (fpm_msg_hdr_t *)stream_pnt(ibuf);
5adc2528 791
d62a17ae 792 if (!fpm_msg_hdr_ok(hdr)) {
793 zfpm_connection_down("invalid message header");
794 return 0;
5adc2528
AS
795 }
796
d62a17ae 797 msg_len = fpm_msg_len(hdr);
5adc2528 798
d62a17ae 799 /*
800 * Read out the rest of the packet.
801 */
802 if (already < msg_len) {
803 ssize_t nbyte;
5adc2528 804
d62a17ae 805 nbyte = stream_read_try(ibuf, zfpm_g->sock, msg_len - already);
5adc2528 806
d62a17ae 807 if (nbyte == 0 || nbyte == -1) {
677f704d
DS
808 if (nbyte == -1) {
809 char buffer[1024];
810
772270f3
QY
811 snprintf(buffer, sizeof(buffer),
812 "failed to read message(%d) %s", errno,
813 safe_strerror(errno));
677f704d 814 zfpm_connection_down(buffer);
996c9314 815 } else
677f704d 816 zfpm_connection_down("failed to read message");
d62a17ae 817 return 0;
818 }
819
820 if (nbyte != (ssize_t)(msg_len - already))
821 goto done;
822 }
823
d62a17ae 824 /*
825 * Just throw it away for now.
826 */
827 stream_reset(ibuf);
828
829done:
830 zfpm_read_on();
831 return 0;
5adc2528
AS
832}
833
21d814eb
AD
834static bool zfpm_updates_pending(void)
835{
836 if (!(TAILQ_EMPTY(&zfpm_g->dest_q)) || !(TAILQ_EMPTY(&zfpm_g->mac_q)))
837 return true;
838
839 return false;
840}
841
5adc2528
AS
842/*
843 * zfpm_writes_pending
844 *
2951a7a4 845 * Returns true if we may have something to write to the FPM.
5adc2528 846 */
d62a17ae 847static int zfpm_writes_pending(void)
5adc2528
AS
848{
849
d62a17ae 850 /*
851 * Check if there is any data in the outbound buffer that has not
852 * been written to the socket yet.
853 */
854 if (stream_get_endp(zfpm_g->obuf) - stream_get_getp(zfpm_g->obuf))
855 return 1;
5adc2528 856
d62a17ae 857 /*
21d814eb 858 * Check if there are any updates scheduled on the outbound queues.
d62a17ae 859 */
21d814eb 860 if (zfpm_updates_pending())
d62a17ae 861 return 1;
5adc2528 862
d62a17ae 863 return 0;
5adc2528
AS
864}
865
866/*
867 * zfpm_encode_route
868 *
869 * Encode a message to the FPM with information about the given route.
870 *
871 * Returns the number of bytes written to the buffer. 0 or a negative
872 * value indicates an error.
873 */
d62a17ae 874static inline int zfpm_encode_route(rib_dest_t *dest, struct route_entry *re,
875 char *in_buf, size_t in_buf_len,
876 fpm_msg_type_e *msg_type)
5adc2528 877{
d62a17ae 878 size_t len;
9bf75362 879#ifdef HAVE_NETLINK
d62a17ae 880 int cmd;
9bf75362 881#endif
d62a17ae 882 len = 0;
5adc2528 883
d62a17ae 884 *msg_type = FPM_MSG_TYPE_NONE;
5adc2528 885
d62a17ae 886 switch (zfpm_g->message_format) {
5adc2528 887
d62a17ae 888 case ZFPM_MSG_FORMAT_PROTOBUF:
fb0aa886 889#ifdef HAVE_PROTOBUF
d62a17ae 890 len = zfpm_protobuf_encode_route(dest, re, (uint8_t *)in_buf,
891 in_buf_len);
892 *msg_type = FPM_MSG_TYPE_PROTOBUF;
fb0aa886 893#endif
d62a17ae 894 break;
5adc2528 895
d62a17ae 896 case ZFPM_MSG_FORMAT_NETLINK:
fb0aa886 897#ifdef HAVE_NETLINK
d62a17ae 898 *msg_type = FPM_MSG_TYPE_NETLINK;
899 cmd = re ? RTM_NEWROUTE : RTM_DELROUTE;
900 len = zfpm_netlink_encode_route(cmd, dest, re, in_buf,
901 in_buf_len);
902 assert(fpm_msg_align(len) == len);
903 *msg_type = FPM_MSG_TYPE_NETLINK;
5adc2528 904#endif /* HAVE_NETLINK */
d62a17ae 905 break;
fb0aa886 906
d62a17ae 907 default:
908 break;
909 }
fb0aa886 910
d62a17ae 911 return len;
5adc2528
AS
912}
913
914/*
915 * zfpm_route_for_update
916 *
f0f77c9a 917 * Returns the re that is to be sent to the FPM for a given dest.
5adc2528 918 */
d62a17ae 919struct route_entry *zfpm_route_for_update(rib_dest_t *dest)
5adc2528 920{
5f7a4718 921 return dest->selected_fib;
5adc2528
AS
922}
923
924/*
21d814eb 925 * Define an enum for return codes for queue processing functions
5adc2528 926 *
21d814eb
AD
927 * FPM_WRITE_STOP: This return code indicates that the write buffer is full.
928 * Stop processing all the queues and empty the buffer by writing its content
929 * to the socket.
930 *
931 * FPM_GOTO_NEXT_Q: This return code indicates that either this queue is
932 * empty or we have processed enough updates from this queue.
933 * So, move on to the next queue.
5adc2528 934 */
21d814eb
AD
935enum {
936 FPM_WRITE_STOP = 0,
937 FPM_GOTO_NEXT_Q = 1
938};
939
940#define FPM_QUEUE_PROCESS_LIMIT 10000
941
942/*
943 * zfpm_build_route_updates
944 *
945 * Process the dest_q queue and write FPM messages to the outbound buffer.
946 */
947static int zfpm_build_route_updates(void)
5adc2528 948{
d62a17ae 949 struct stream *s;
950 rib_dest_t *dest;
951 unsigned char *buf, *data, *buf_end;
952 size_t msg_len;
953 size_t data_len;
954 fpm_msg_hdr_t *hdr;
955 struct route_entry *re;
956 int is_add, write_msg;
957 fpm_msg_type_e msg_type;
21d814eb 958 uint16_t q_limit;
d62a17ae 959
21d814eb
AD
960 if (TAILQ_EMPTY(&zfpm_g->dest_q))
961 return FPM_GOTO_NEXT_Q;
d62a17ae 962
21d814eb
AD
963 s = zfpm_g->obuf;
964 q_limit = FPM_QUEUE_PROCESS_LIMIT;
d62a17ae 965
21d814eb 966 do {
d62a17ae 967 /*
968 * Make sure there is enough space to write another message.
969 */
970 if (STREAM_WRITEABLE(s) < FPM_MAX_MSG_LEN)
21d814eb 971 return FPM_WRITE_STOP;
d62a17ae 972
973 buf = STREAM_DATA(s) + stream_get_endp(s);
974 buf_end = buf + STREAM_WRITEABLE(s);
975
976 dest = TAILQ_FIRST(&zfpm_g->dest_q);
977 if (!dest)
21d814eb 978 return FPM_GOTO_NEXT_Q;
d62a17ae 979
980 assert(CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM));
981
982 hdr = (fpm_msg_hdr_t *)buf;
983 hdr->version = FPM_PROTO_VERSION;
984
985 data = fpm_msg_data(hdr);
986
987 re = zfpm_route_for_update(dest);
988 is_add = re ? 1 : 0;
989
990 write_msg = 1;
991
992 /*
993 * If this is a route deletion, and we have not sent the route
994 * to
995 * the FPM previously, skip it.
996 */
997 if (!is_add && !CHECK_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM)) {
998 write_msg = 0;
999 zfpm_g->stats.nop_deletes_skipped++;
1000 }
1001
1002 if (write_msg) {
1003 data_len = zfpm_encode_route(dest, re, (char *)data,
1004 buf_end - data, &msg_type);
1005
1006 assert(data_len);
1007 if (data_len) {
1008 hdr->msg_type = msg_type;
1009 msg_len = fpm_data_len_to_msg_len(data_len);
1010 hdr->msg_len = htons(msg_len);
1011 stream_forward_endp(s, msg_len);
1012
1013 if (is_add)
1014 zfpm_g->stats.route_adds++;
1015 else
1016 zfpm_g->stats.route_dels++;
1017 }
1018 }
1019
1020 /*
1021 * Remove the dest from the queue, and reset the flag.
1022 */
1023 UNSET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
1024 TAILQ_REMOVE(&zfpm_g->dest_q, dest, fpm_q_entries);
1025
1026 if (is_add) {
1027 SET_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM);
1028 } else {
1029 UNSET_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM);
1030 }
1031
1032 /*
1033 * Delete the destination if necessary.
1034 */
1035 if (rib_gc_dest(dest->rnode))
1036 zfpm_g->stats.dests_del_after_update++;
1037
21d814eb
AD
1038 q_limit--;
1039 if (q_limit == 0) {
1040 /*
1041 * We have processed enough updates in this queue.
1042 * Now yield for other queues.
1043 */
1044 return FPM_GOTO_NEXT_Q;
1045 }
c5431822 1046 } while (true);
21d814eb
AD
1047}
1048
1049/*
1050 * zfpm_encode_mac
1051 *
1052 * Encode a message to FPM with information about the given MAC.
1053 *
1054 * Returns the number of bytes written to the buffer.
1055 */
1056static inline int zfpm_encode_mac(struct fpm_mac_info_t *mac, char *in_buf,
1057 size_t in_buf_len, fpm_msg_type_e *msg_type)
1058{
1059 size_t len = 0;
1060
1061 *msg_type = FPM_MSG_TYPE_NONE;
1062
1063 switch (zfpm_g->message_format) {
1064
1065 case ZFPM_MSG_FORMAT_NONE:
1066 break;
1067 case ZFPM_MSG_FORMAT_NETLINK:
9da60d0a
AD
1068#ifdef HAVE_NETLINK
1069 len = zfpm_netlink_encode_mac(mac, in_buf, in_buf_len);
1070 assert(fpm_msg_align(len) == len);
1071 *msg_type = FPM_MSG_TYPE_NETLINK;
1072#endif /* HAVE_NETLINK */
21d814eb
AD
1073 break;
1074 case ZFPM_MSG_FORMAT_PROTOBUF:
1075 break;
1076 }
1077 return len;
1078}
1079
1080static int zfpm_build_mac_updates(void)
1081{
1082 struct stream *s;
1083 struct fpm_mac_info_t *mac;
1084 unsigned char *buf, *data, *buf_end;
1085 fpm_msg_hdr_t *hdr;
1086 size_t data_len, msg_len;
1087 fpm_msg_type_e msg_type;
1088 uint16_t q_limit;
1089
1090 if (TAILQ_EMPTY(&zfpm_g->mac_q))
1091 return FPM_GOTO_NEXT_Q;
1092
1093 s = zfpm_g->obuf;
1094 q_limit = FPM_QUEUE_PROCESS_LIMIT;
1095
1096 do {
1097 /* Make sure there is enough space to write another message. */
1098 if (STREAM_WRITEABLE(s) < FPM_MAX_MAC_MSG_LEN)
1099 return FPM_WRITE_STOP;
1100
1101 buf = STREAM_DATA(s) + stream_get_endp(s);
1102 buf_end = buf + STREAM_WRITEABLE(s);
1103
1104 mac = TAILQ_FIRST(&zfpm_g->mac_q);
1105 if (!mac)
1106 return FPM_GOTO_NEXT_Q;
1107
1108 /* Check for no-op */
1109 if (!CHECK_FLAG(mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM)) {
1110 zfpm_g->stats.nop_deletes_skipped++;
1111 zfpm_mac_info_del(mac);
1112 continue;
1113 }
1114
1115 hdr = (fpm_msg_hdr_t *)buf;
1116 hdr->version = FPM_PROTO_VERSION;
1117
1118 data = fpm_msg_data(hdr);
1119 data_len = zfpm_encode_mac(mac, (char *)data, buf_end - data,
1120 &msg_type);
9da60d0a 1121 assert(data_len);
21d814eb
AD
1122
1123 hdr->msg_type = msg_type;
1124 msg_len = fpm_data_len_to_msg_len(data_len);
1125 hdr->msg_len = htons(msg_len);
1126 stream_forward_endp(s, msg_len);
1127
1128 /* Remove the MAC from the queue, and delete it. */
1129 zfpm_mac_info_del(mac);
1130
1131 q_limit--;
1132 if (q_limit == 0) {
1133 /*
1134 * We have processed enough updates in this queue.
1135 * Now yield for other queues.
1136 */
1137 return FPM_GOTO_NEXT_Q;
1138 }
d62a17ae 1139 } while (1);
5adc2528
AS
1140}
1141
21d814eb
AD
1142/*
1143 * zfpm_build_updates
1144 *
1145 * Process the outgoing queues and write messages to the outbound
1146 * buffer.
1147 */
1148static void zfpm_build_updates(void)
1149{
1150 struct stream *s;
1151
1152 s = zfpm_g->obuf;
1153 assert(stream_empty(s));
1154
1155 do {
1156 /*
1157 * Stop processing the queues if zfpm_g->obuf is full
1158 * or we do not have more updates to process
1159 */
1160 if (zfpm_build_mac_updates() == FPM_WRITE_STOP)
1161 break;
1162 if (zfpm_build_route_updates() == FPM_WRITE_STOP)
1163 break;
1164 } while (zfpm_updates_pending());
1165}
1166
5adc2528
AS
1167/*
1168 * zfpm_write_cb
1169 */
d62a17ae 1170static int zfpm_write_cb(struct thread *thread)
5adc2528 1171{
d62a17ae 1172 struct stream *s;
1173 int num_writes;
1174
1175 zfpm_g->stats.write_cb_calls++;
d62a17ae 1176
1177 /*
1178 * Check if async connect is now done.
1179 */
1180 if (zfpm_g->state == ZFPM_STATE_CONNECTING) {
1181 zfpm_connect_check();
1182 return 0;
1183 }
5adc2528 1184
d62a17ae 1185 assert(zfpm_g->state == ZFPM_STATE_ESTABLISHED);
1186 assert(zfpm_g->sock >= 0);
5adc2528 1187
d62a17ae 1188 num_writes = 0;
5adc2528 1189
d62a17ae 1190 do {
1191 int bytes_to_write, bytes_written;
5adc2528 1192
d62a17ae 1193 s = zfpm_g->obuf;
5adc2528 1194
d62a17ae 1195 /*
1196 * If the stream is empty, try fill it up with data.
1197 */
1198 if (stream_empty(s)) {
1199 zfpm_build_updates();
1200 }
5adc2528 1201
d62a17ae 1202 bytes_to_write = stream_get_endp(s) - stream_get_getp(s);
1203 if (!bytes_to_write)
1204 break;
5adc2528 1205
d62a17ae 1206 bytes_written =
2d34fb80 1207 write(zfpm_g->sock, stream_pnt(s), bytes_to_write);
d62a17ae 1208 zfpm_g->stats.write_calls++;
1209 num_writes++;
5adc2528 1210
d62a17ae 1211 if (bytes_written < 0) {
1212 if (ERRNO_IO_RETRY(errno))
1213 break;
5adc2528 1214
d62a17ae 1215 zfpm_connection_down("failed to write to socket");
1216 return 0;
1217 }
5adc2528 1218
d62a17ae 1219 if (bytes_written != bytes_to_write) {
5adc2528 1220
d62a17ae 1221 /*
1222 * Partial write.
1223 */
1224 stream_forward_getp(s, bytes_written);
1225 zfpm_g->stats.partial_writes++;
1226 break;
1227 }
5adc2528 1228
d62a17ae 1229 /*
1230 * We've written out the entire contents of the stream.
1231 */
1232 stream_reset(s);
5adc2528 1233
d62a17ae 1234 if (num_writes >= ZFPM_MAX_WRITES_PER_RUN) {
1235 zfpm_g->stats.max_writes_hit++;
1236 break;
1237 }
5adc2528 1238
d62a17ae 1239 if (zfpm_thread_should_yield(thread)) {
1240 zfpm_g->stats.t_write_yields++;
1241 break;
1242 }
1243 } while (1);
5adc2528 1244
d62a17ae 1245 if (zfpm_writes_pending())
1246 zfpm_write_on();
5adc2528 1247
d62a17ae 1248 return 0;
5adc2528
AS
1249}
1250
1251/*
1252 * zfpm_connect_cb
1253 */
d62a17ae 1254static int zfpm_connect_cb(struct thread *t)
5adc2528 1255{
d62a17ae 1256 int sock, ret;
1257 struct sockaddr_in serv;
1258
d62a17ae 1259 assert(zfpm_g->state == ZFPM_STATE_ACTIVE);
1260
1261 sock = socket(AF_INET, SOCK_STREAM, 0);
1262 if (sock < 0) {
14a4d9d0 1263 zlog_err("Failed to create socket for connect(): %s",
d62a17ae 1264 strerror(errno));
1265 zfpm_g->stats.connect_no_sock++;
1266 return 0;
1267 }
1268
1269 set_nonblocking(sock);
1270
1271 /* Make server socket. */
1272 memset(&serv, 0, sizeof(serv));
1273 serv.sin_family = AF_INET;
1274 serv.sin_port = htons(zfpm_g->fpm_port);
5adc2528 1275#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
d62a17ae 1276 serv.sin_len = sizeof(struct sockaddr_in);
5adc2528 1277#endif /* HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */
d62a17ae 1278 if (!zfpm_g->fpm_server)
1279 serv.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1280 else
1281 serv.sin_addr.s_addr = (zfpm_g->fpm_server);
1282
1283 /*
1284 * Connect to the FPM.
1285 */
1286 zfpm_g->connect_calls++;
1287 zfpm_g->stats.connect_calls++;
1288 zfpm_g->last_connect_call_time = monotime(NULL);
1289
1290 ret = connect(sock, (struct sockaddr *)&serv, sizeof(serv));
1291 if (ret >= 0) {
1292 zfpm_g->sock = sock;
1293 zfpm_connection_up("connect succeeded");
1294 return 1;
1295 }
1296
1297 if (errno == EINPROGRESS) {
1298 zfpm_g->sock = sock;
1299 zfpm_read_on();
1300 zfpm_write_on();
1301 zfpm_set_state(ZFPM_STATE_CONNECTING,
1302 "async connect in progress");
1303 return 0;
1304 }
1305
1306 zlog_info("can't connect to FPM %d: %s", sock, safe_strerror(errno));
1307 close(sock);
1308
1309 /*
1310 * Restart timer for retrying connection.
1311 */
1312 zfpm_start_connect_timer("connect() failed");
1313 return 0;
5adc2528
AS
1314}
1315
1316/*
1317 * zfpm_set_state
1318 *
1319 * Move state machine into the given state.
1320 */
1d6a3ee8 1321static void zfpm_set_state(enum zfpm_state state, const char *reason)
5adc2528 1322{
1d6a3ee8 1323 enum zfpm_state cur_state = zfpm_g->state;
d62a17ae 1324
1325 if (!reason)
1326 reason = "Unknown";
1327
1328 if (state == cur_state)
1329 return;
1330
1331 zfpm_debug("beginning state transition %s -> %s. Reason: %s",
1332 zfpm_state_to_str(cur_state), zfpm_state_to_str(state),
1333 reason);
1334
1335 switch (state) {
1336
1337 case ZFPM_STATE_IDLE:
1338 assert(cur_state == ZFPM_STATE_ESTABLISHED);
1339 break;
1340
1341 case ZFPM_STATE_ACTIVE:
1342 assert(cur_state == ZFPM_STATE_IDLE
1343 || cur_state == ZFPM_STATE_CONNECTING);
1344 assert(zfpm_g->t_connect);
1345 break;
1346
1347 case ZFPM_STATE_CONNECTING:
1348 assert(zfpm_g->sock);
1349 assert(cur_state == ZFPM_STATE_ACTIVE);
1350 assert(zfpm_g->t_read);
1351 assert(zfpm_g->t_write);
1352 break;
1353
1354 case ZFPM_STATE_ESTABLISHED:
1355 assert(cur_state == ZFPM_STATE_ACTIVE
1356 || cur_state == ZFPM_STATE_CONNECTING);
1357 assert(zfpm_g->sock);
1358 assert(zfpm_g->t_read);
1359 assert(zfpm_g->t_write);
1360 break;
1361 }
1362
1363 zfpm_g->state = state;
5adc2528
AS
1364}
1365
1366/*
1367 * zfpm_calc_connect_delay
1368 *
1369 * Returns the number of seconds after which we should attempt to
1370 * reconnect to the FPM.
1371 */
d62a17ae 1372static long zfpm_calc_connect_delay(void)
5adc2528 1373{
d62a17ae 1374 time_t elapsed;
5adc2528 1375
d62a17ae 1376 /*
1377 * Return 0 if this is our first attempt to connect.
1378 */
1379 if (zfpm_g->connect_calls == 0) {
1380 return 0;
1381 }
5adc2528 1382
d62a17ae 1383 elapsed = zfpm_get_elapsed_time(zfpm_g->last_connect_call_time);
5adc2528 1384
d62a17ae 1385 if (elapsed > ZFPM_CONNECT_RETRY_IVL) {
1386 return 0;
1387 }
5adc2528 1388
d62a17ae 1389 return ZFPM_CONNECT_RETRY_IVL - elapsed;
5adc2528
AS
1390}
1391
1392/*
1393 * zfpm_start_connect_timer
1394 */
d62a17ae 1395static void zfpm_start_connect_timer(const char *reason)
5adc2528 1396{
d62a17ae 1397 long delay_secs;
5adc2528 1398
d62a17ae 1399 assert(!zfpm_g->t_connect);
1400 assert(zfpm_g->sock < 0);
5adc2528 1401
d62a17ae 1402 assert(zfpm_g->state == ZFPM_STATE_IDLE
1403 || zfpm_g->state == ZFPM_STATE_ACTIVE
1404 || zfpm_g->state == ZFPM_STATE_CONNECTING);
5adc2528 1405
d62a17ae 1406 delay_secs = zfpm_calc_connect_delay();
1407 zfpm_debug("scheduling connect in %ld seconds", delay_secs);
5adc2528 1408
d62a17ae 1409 thread_add_timer(zfpm_g->master, zfpm_connect_cb, 0, delay_secs,
1410 &zfpm_g->t_connect);
1411 zfpm_set_state(ZFPM_STATE_ACTIVE, reason);
5adc2528
AS
1412}
1413
1414/*
1415 * zfpm_is_enabled
1416 *
2951a7a4 1417 * Returns true if the zebra FPM module has been enabled.
5adc2528 1418 */
d62a17ae 1419static inline int zfpm_is_enabled(void)
5adc2528 1420{
d62a17ae 1421 return zfpm_g->enabled;
5adc2528
AS
1422}
1423
1424/*
1425 * zfpm_conn_is_up
1426 *
2951a7a4 1427 * Returns true if the connection to the FPM is up.
5adc2528 1428 */
d62a17ae 1429static inline int zfpm_conn_is_up(void)
5adc2528 1430{
d62a17ae 1431 if (zfpm_g->state != ZFPM_STATE_ESTABLISHED)
1432 return 0;
5adc2528 1433
d62a17ae 1434 assert(zfpm_g->sock >= 0);
5adc2528 1435
d62a17ae 1436 return 1;
5adc2528
AS
1437}
1438
1439/*
1440 * zfpm_trigger_update
1441 *
1442 * The zebra code invokes this function to indicate that we should
1443 * send an update to the FPM about the given route_node.
1444 */
d62a17ae 1445static int zfpm_trigger_update(struct route_node *rn, const char *reason)
5adc2528 1446{
d62a17ae 1447 rib_dest_t *dest;
d62a17ae 1448
1449 /*
1450 * Ignore if the connection is down. We will update the FPM about
1451 * all destinations once the connection comes up.
1452 */
1453 if (!zfpm_conn_is_up())
1454 return 0;
1455
1456 dest = rib_dest_from_rnode(rn);
1457
d62a17ae 1458 if (CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM)) {
1459 zfpm_g->stats.redundant_triggers++;
1460 return 0;
1461 }
1462
1463 if (reason) {
2dbe669b
DA
1464 zfpm_debug("%pFX triggering update to FPM - Reason: %s", &rn->p,
1465 reason);
d62a17ae 1466 }
1467
1468 SET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
1469 TAILQ_INSERT_TAIL(&zfpm_g->dest_q, dest, fpm_q_entries);
1470 zfpm_g->stats.updates_triggered++;
1471
1472 /*
1473 * Make sure that writes are enabled.
1474 */
1475 if (zfpm_g->t_write)
1476 return 0;
1477
1478 zfpm_write_on();
1479 return 0;
5adc2528
AS
1480}
1481
e5218ec8
AD
1482/*
1483 * Generate Key for FPM MAC info hash entry
e5218ec8
AD
1484 */
1485static unsigned int zfpm_mac_info_hash_keymake(const void *p)
1486{
1487 struct fpm_mac_info_t *fpm_mac = (struct fpm_mac_info_t *)p;
1488 uint32_t mac_key;
1489
1490 mac_key = jhash(fpm_mac->macaddr.octet, ETH_ALEN, 0xa5a5a55a);
1491
1492 return jhash_2words(mac_key, fpm_mac->vni, 0);
1493}
1494
1495/*
1496 * Compare function for FPM MAC info hash lookup
1497 */
1498static bool zfpm_mac_info_cmp(const void *p1, const void *p2)
1499{
1500 const struct fpm_mac_info_t *fpm_mac1 = p1;
1501 const struct fpm_mac_info_t *fpm_mac2 = p2;
1502
1503 if (memcmp(fpm_mac1->macaddr.octet, fpm_mac2->macaddr.octet, ETH_ALEN)
1504 != 0)
1505 return false;
e5218ec8
AD
1506 if (fpm_mac1->vni != fpm_mac2->vni)
1507 return false;
1508
1509 return true;
1510}
1511
1512/*
1513 * Lookup FPM MAC info hash entry.
1514 */
1515static struct fpm_mac_info_t *zfpm_mac_info_lookup(struct fpm_mac_info_t *key)
1516{
1517 return hash_lookup(zfpm_g->fpm_mac_info_table, key);
1518}
1519
1520/*
1521 * Callback to allocate fpm_mac_info_t structure.
1522 */
1523static void *zfpm_mac_info_alloc(void *p)
1524{
1525 const struct fpm_mac_info_t *key = p;
1526 struct fpm_mac_info_t *fpm_mac;
1527
1528 fpm_mac = XCALLOC(MTYPE_FPM_MAC_INFO, sizeof(struct fpm_mac_info_t));
1529
1530 memcpy(&fpm_mac->macaddr, &key->macaddr, ETH_ALEN);
e5218ec8
AD
1531 fpm_mac->vni = key->vni;
1532
1533 return (void *)fpm_mac;
1534}
1535
1536/*
1537 * Delink and free fpm_mac_info_t.
1538 */
1539static void zfpm_mac_info_del(struct fpm_mac_info_t *fpm_mac)
1540{
1541 hash_release(zfpm_g->fpm_mac_info_table, fpm_mac);
1542 TAILQ_REMOVE(&zfpm_g->mac_q, fpm_mac, fpm_mac_q_entries);
1543 XFREE(MTYPE_FPM_MAC_INFO, fpm_mac);
1544}
1545
a780a738
AD
1546/*
1547 * zfpm_trigger_rmac_update
1548 *
1549 * Zebra code invokes this function to indicate that we should
1550 * send an update to FPM for given MAC entry.
1551 *
1552 * This function checks if we already have enqueued an update for this RMAC,
1553 * If yes, update the same fpm_mac_info_t. Else, create and enqueue an update.
1554 */
1555static int zfpm_trigger_rmac_update(zebra_mac_t *rmac, zebra_l3vni_t *zl3vni,
1556 bool delete, const char *reason)
1557{
a780a738
AD
1558 struct fpm_mac_info_t *fpm_mac, key;
1559 struct interface *vxlan_if, *svi_if;
44f7f132 1560 bool mac_found = false;
a780a738
AD
1561
1562 /*
1563 * Ignore if the connection is down. We will update the FPM about
1564 * all destinations once the connection comes up.
1565 */
1566 if (!zfpm_conn_is_up())
1567 return 0;
1568
1569 if (reason) {
5e9f9adb
DL
1570 zfpm_debug("triggering update to FPM - Reason: %s - %pEA",
1571 reason, &rmac->macaddr);
a780a738
AD
1572 }
1573
1574 vxlan_if = zl3vni_map_to_vxlan_if(zl3vni);
1575 svi_if = zl3vni_map_to_svi_if(zl3vni);
1576
1577 memset(&key, 0, sizeof(struct fpm_mac_info_t));
1578
1579 memcpy(&key.macaddr, &rmac->macaddr, ETH_ALEN);
a780a738
AD
1580 key.vni = zl3vni->vni;
1581
1582 /* Check if this MAC is already present in the queue. */
1583 fpm_mac = zfpm_mac_info_lookup(&key);
1584
1585 if (fpm_mac) {
44f7f132 1586 mac_found = true;
a780a738
AD
1587
1588 /*
44f7f132
AD
1589 * If the enqueued op is "add" and current op is "delete",
1590 * this is a noop. So, Unset ZEBRA_MAC_UPDATE_FPM flag.
1591 * While processing FPM queue, we will silently delete this
1592 * MAC entry without sending any update for this MAC.
a780a738 1593 */
44f7f132
AD
1594 if (!CHECK_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM) &&
1595 delete == 1) {
a780a738
AD
1596 SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM);
1597 UNSET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM);
44f7f132 1598 return 0;
a780a738 1599 }
44f7f132
AD
1600 } else {
1601 fpm_mac = hash_get(zfpm_g->fpm_mac_info_table, &key,
1602 zfpm_mac_info_alloc);
1603 if (!fpm_mac)
1604 return 0;
a780a738
AD
1605 }
1606
44f7f132 1607 fpm_mac->r_vtep_ip.s_addr = rmac->fwd_info.r_vtep_ip.s_addr;
c5431822 1608 fpm_mac->zebra_flags = rmac->flags;
a780a738
AD
1609 fpm_mac->vxlan_if = vxlan_if ? vxlan_if->ifindex : 0;
1610 fpm_mac->svi_if = svi_if ? svi_if->ifindex : 0;
1611
1612 SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM);
1613 if (delete)
1614 SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM);
44f7f132
AD
1615 else
1616 UNSET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM);
a780a738 1617
44f7f132
AD
1618 if (!mac_found)
1619 TAILQ_INSERT_TAIL(&zfpm_g->mac_q, fpm_mac, fpm_mac_q_entries);
a780a738
AD
1620
1621 zfpm_g->stats.updates_triggered++;
1622
a780a738
AD
1623 /* If writes are already enabled, return. */
1624 if (zfpm_g->t_write)
1625 return 0;
1626
1627 zfpm_write_on();
1628 return 0;
1629}
1630
fbe748e5
AD
1631/*
1632 * This function is called when the FPM connections is established.
1633 * Iterate over all the RMAC entries for the given L3VNI
1634 * and enqueue the RMAC for FPM processing.
1635 */
1ac88792 1636static void zfpm_trigger_rmac_update_wrapper(struct hash_bucket *bucket,
fbe748e5
AD
1637 void *args)
1638{
1ac88792 1639 zebra_mac_t *zrmac = (zebra_mac_t *)bucket->data;
fbe748e5
AD
1640 zebra_l3vni_t *zl3vni = (zebra_l3vni_t *)args;
1641
1642 zfpm_trigger_rmac_update(zrmac, zl3vni, false, "RMAC added");
1643}
1644
1645/*
1646 * This function is called when the FPM connections is established.
1647 * This function iterates over all the L3VNIs to trigger
1648 * FPM updates for RMACs currently available.
1649 */
1ac88792 1650static void zfpm_iterate_rmac_table(struct hash_bucket *bucket, void *args)
fbe748e5 1651{
1ac88792 1652 zebra_l3vni_t *zl3vni = (zebra_l3vni_t *)bucket->data;
fbe748e5
AD
1653
1654 hash_iterate(zl3vni->rmac_table, zfpm_trigger_rmac_update_wrapper,
1655 (void *)zl3vni);
1656}
1657
5adc2528 1658/*
eeaf257b 1659 * struct zfpm_statsimer_cb
5adc2528 1660 */
d62a17ae 1661static int zfpm_stats_timer_cb(struct thread *t)
5adc2528 1662{
d62a17ae 1663 zfpm_g->t_stats = NULL;
5adc2528 1664
d62a17ae 1665 /*
1666 * Remember the stats collected in the last interval for display
1667 * purposes.
1668 */
1669 zfpm_stats_copy(&zfpm_g->stats, &zfpm_g->last_ivl_stats);
5adc2528 1670
d62a17ae 1671 /*
1672 * Add the current set of stats into the cumulative statistics.
1673 */
1674 zfpm_stats_compose(&zfpm_g->cumulative_stats, &zfpm_g->stats,
1675 &zfpm_g->cumulative_stats);
5adc2528 1676
d62a17ae 1677 /*
1678 * Start collecting stats afresh over the next interval.
1679 */
1680 zfpm_stats_reset(&zfpm_g->stats);
5adc2528 1681
d62a17ae 1682 zfpm_start_stats_timer();
5adc2528 1683
d62a17ae 1684 return 0;
5adc2528
AS
1685}
1686
1687/*
1688 * zfpm_stop_stats_timer
1689 */
d62a17ae 1690static void zfpm_stop_stats_timer(void)
5adc2528 1691{
d62a17ae 1692 if (!zfpm_g->t_stats)
1693 return;
5adc2528 1694
d62a17ae 1695 zfpm_debug("Stopping existing stats timer");
50478845 1696 thread_cancel(&zfpm_g->t_stats);
5adc2528
AS
1697}
1698
1699/*
1700 * zfpm_start_stats_timer
1701 */
d62a17ae 1702void zfpm_start_stats_timer(void)
5adc2528 1703{
d62a17ae 1704 assert(!zfpm_g->t_stats);
5adc2528 1705
d62a17ae 1706 thread_add_timer(zfpm_g->master, zfpm_stats_timer_cb, 0,
1707 ZFPM_STATS_IVL_SECS, &zfpm_g->t_stats);
5adc2528
AS
1708}
1709
1710/*
1711 * Helper macro for zfpm_show_stats() below.
1712 */
d62a17ae 1713#define ZFPM_SHOW_STAT(counter) \
1714 do { \
1715 vty_out(vty, "%-40s %10lu %16lu\n", #counter, \
1716 total_stats.counter, zfpm_g->last_ivl_stats.counter); \
1717 } while (0)
5adc2528
AS
1718
1719/*
1720 * zfpm_show_stats
1721 */
d62a17ae 1722static void zfpm_show_stats(struct vty *vty)
5adc2528 1723{
eeaf257b 1724 struct zfpm_stats total_stats;
d62a17ae 1725 time_t elapsed;
1726
1727 vty_out(vty, "\n%-40s %10s Last %2d secs\n\n", "Counter", "Total",
1728 ZFPM_STATS_IVL_SECS);
1729
1730 /*
1731 * Compute the total stats up to this instant.
1732 */
1733 zfpm_stats_compose(&zfpm_g->cumulative_stats, &zfpm_g->stats,
1734 &total_stats);
1735
1736 ZFPM_SHOW_STAT(connect_calls);
1737 ZFPM_SHOW_STAT(connect_no_sock);
1738 ZFPM_SHOW_STAT(read_cb_calls);
1739 ZFPM_SHOW_STAT(write_cb_calls);
1740 ZFPM_SHOW_STAT(write_calls);
1741 ZFPM_SHOW_STAT(partial_writes);
1742 ZFPM_SHOW_STAT(max_writes_hit);
1743 ZFPM_SHOW_STAT(t_write_yields);
1744 ZFPM_SHOW_STAT(nop_deletes_skipped);
1745 ZFPM_SHOW_STAT(route_adds);
1746 ZFPM_SHOW_STAT(route_dels);
1747 ZFPM_SHOW_STAT(updates_triggered);
d62a17ae 1748 ZFPM_SHOW_STAT(redundant_triggers);
1749 ZFPM_SHOW_STAT(dests_del_after_update);
1750 ZFPM_SHOW_STAT(t_conn_down_starts);
1751 ZFPM_SHOW_STAT(t_conn_down_dests_processed);
1752 ZFPM_SHOW_STAT(t_conn_down_yields);
1753 ZFPM_SHOW_STAT(t_conn_down_finishes);
1754 ZFPM_SHOW_STAT(t_conn_up_starts);
1755 ZFPM_SHOW_STAT(t_conn_up_dests_processed);
1756 ZFPM_SHOW_STAT(t_conn_up_yields);
1757 ZFPM_SHOW_STAT(t_conn_up_aborts);
1758 ZFPM_SHOW_STAT(t_conn_up_finishes);
1759
1760 if (!zfpm_g->last_stats_clear_time)
1761 return;
1762
1763 elapsed = zfpm_get_elapsed_time(zfpm_g->last_stats_clear_time);
1764
1765 vty_out(vty, "\nStats were cleared %lu seconds ago\n",
1766 (unsigned long)elapsed);
5adc2528
AS
1767}
1768
1769/*
1770 * zfpm_clear_stats
1771 */
d62a17ae 1772static void zfpm_clear_stats(struct vty *vty)
5adc2528 1773{
d62a17ae 1774 if (!zfpm_is_enabled()) {
1775 vty_out(vty, "The FPM module is not enabled...\n");
1776 return;
1777 }
5adc2528 1778
d62a17ae 1779 zfpm_stats_reset(&zfpm_g->stats);
1780 zfpm_stats_reset(&zfpm_g->last_ivl_stats);
1781 zfpm_stats_reset(&zfpm_g->cumulative_stats);
5adc2528 1782
d62a17ae 1783 zfpm_stop_stats_timer();
1784 zfpm_start_stats_timer();
5adc2528 1785
d62a17ae 1786 zfpm_g->last_stats_clear_time = monotime(NULL);
5adc2528 1787
d62a17ae 1788 vty_out(vty, "Cleared FPM stats\n");
5adc2528
AS
1789}
1790
1791/*
1792 * show_zebra_fpm_stats
1793 */
1794DEFUN (show_zebra_fpm_stats,
1795 show_zebra_fpm_stats_cmd,
1796 "show zebra fpm stats",
1797 SHOW_STR
41e7fb80 1798 ZEBRA_STR
5adc2528
AS
1799 "Forwarding Path Manager information\n"
1800 "Statistics\n")
1801{
d62a17ae 1802 zfpm_show_stats(vty);
1803 return CMD_SUCCESS;
5adc2528
AS
1804}
1805
1806/*
1807 * clear_zebra_fpm_stats
1808 */
1809DEFUN (clear_zebra_fpm_stats,
1810 clear_zebra_fpm_stats_cmd,
1811 "clear zebra fpm stats",
1812 CLEAR_STR
41e7fb80 1813 ZEBRA_STR
5adc2528
AS
1814 "Clear Forwarding Path Manager information\n"
1815 "Statistics\n")
1816{
d62a17ae 1817 zfpm_clear_stats(vty);
1818 return CMD_SUCCESS;
5adc2528
AS
1819}
1820
711ff0ba 1821/*
d62a17ae 1822 * update fpm connection information
711ff0ba 1823 */
e52702f2
QY
1824DEFUN ( fpm_remote_ip,
1825 fpm_remote_ip_cmd,
1826 "fpm connection ip A.B.C.D port (1-65535)",
711ff0ba
USK
1827 "fpm connection remote ip and port\n"
1828 "Remote fpm server ip A.B.C.D\n"
1829 "Enter ip ")
1830{
1831
d62a17ae 1832 in_addr_t fpm_server;
1833 uint32_t port_no;
711ff0ba 1834
d62a17ae 1835 fpm_server = inet_addr(argv[3]->arg);
1836 if (fpm_server == INADDR_NONE)
1837 return CMD_ERR_INCOMPLETE;
711ff0ba 1838
d62a17ae 1839 port_no = atoi(argv[5]->arg);
1840 if (port_no < TCP_MIN_PORT || port_no > TCP_MAX_PORT)
1841 return CMD_ERR_INCOMPLETE;
711ff0ba 1842
d62a17ae 1843 zfpm_g->fpm_server = fpm_server;
1844 zfpm_g->fpm_port = port_no;
711ff0ba
USK
1845
1846
d62a17ae 1847 return CMD_SUCCESS;
711ff0ba
USK
1848}
1849
e52702f2
QY
1850DEFUN ( no_fpm_remote_ip,
1851 no_fpm_remote_ip_cmd,
1852 "no fpm connection ip A.B.C.D port (1-65535)",
711ff0ba
USK
1853 "fpm connection remote ip and port\n"
1854 "Connection\n"
1855 "Remote fpm server ip A.B.C.D\n"
1856 "Enter ip ")
1857{
d62a17ae 1858 if (zfpm_g->fpm_server != inet_addr(argv[4]->arg)
1859 || zfpm_g->fpm_port != atoi(argv[6]->arg))
1860 return CMD_ERR_NO_MATCH;
711ff0ba 1861
d62a17ae 1862 zfpm_g->fpm_server = FPM_DEFAULT_IP;
1863 zfpm_g->fpm_port = FPM_DEFAULT_PORT;
711ff0ba 1864
d62a17ae 1865 return CMD_SUCCESS;
711ff0ba 1866}
711ff0ba 1867
fb0aa886
AS
1868/*
1869 * zfpm_init_message_format
1870 */
d62a17ae 1871static inline void zfpm_init_message_format(const char *format)
fb0aa886 1872{
d62a17ae 1873 int have_netlink, have_protobuf;
fb0aa886 1874
fb0aa886 1875#ifdef HAVE_NETLINK
d62a17ae 1876 have_netlink = 1;
4b2792b5 1877#else
d62a17ae 1878 have_netlink = 0;
fb0aa886
AS
1879#endif
1880
1881#ifdef HAVE_PROTOBUF
d62a17ae 1882 have_protobuf = 1;
4b2792b5 1883#else
d62a17ae 1884 have_protobuf = 0;
fb0aa886
AS
1885#endif
1886
d62a17ae 1887 zfpm_g->message_format = ZFPM_MSG_FORMAT_NONE;
fb0aa886 1888
d62a17ae 1889 if (!format) {
1890 if (have_netlink) {
1891 zfpm_g->message_format = ZFPM_MSG_FORMAT_NETLINK;
1892 } else if (have_protobuf) {
1893 zfpm_g->message_format = ZFPM_MSG_FORMAT_PROTOBUF;
1894 }
1895 return;
fb0aa886 1896 }
fb0aa886 1897
d62a17ae 1898 if (!strcmp("netlink", format)) {
1899 if (!have_netlink) {
1c50c1c0
QY
1900 flog_err(EC_ZEBRA_NETLINK_NOT_AVAILABLE,
1901 "FPM netlink message format is not available");
d62a17ae 1902 return;
1903 }
1904 zfpm_g->message_format = ZFPM_MSG_FORMAT_NETLINK;
1905 return;
fb0aa886 1906 }
fb0aa886 1907
d62a17ae 1908 if (!strcmp("protobuf", format)) {
1909 if (!have_protobuf) {
af4c2728 1910 flog_err(
e914ccbe 1911 EC_ZEBRA_PROTOBUF_NOT_AVAILABLE,
d62a17ae 1912 "FPM protobuf message format is not available");
1913 return;
1914 }
8b9cf71c 1915 flog_warn(EC_ZEBRA_PROTOBUF_NOT_AVAILABLE,
3efd0893 1916 "FPM protobuf message format is deprecated and scheduled to be removed. Please convert to using netlink format or contact dev@lists.frrouting.org with your use case.");
d62a17ae 1917 zfpm_g->message_format = ZFPM_MSG_FORMAT_PROTOBUF;
1918 return;
fb0aa886 1919 }
fb0aa886 1920
e914ccbe 1921 flog_warn(EC_ZEBRA_FPM_FORMAT_UNKNOWN, "Unknown fpm format '%s'",
9df414fe 1922 format);
fb0aa886
AS
1923}
1924
711ff0ba 1925/**
d62a17ae 1926 * fpm_remote_srv_write
711ff0ba 1927 *
d62a17ae 1928 * Module to write remote fpm connection
711ff0ba
USK
1929 *
1930 * Returns ZERO on success.
1931 */
1932
d62a17ae 1933static int fpm_remote_srv_write(struct vty *vty)
711ff0ba 1934{
d62a17ae 1935 struct in_addr in;
711ff0ba 1936
d62a17ae 1937 in.s_addr = zfpm_g->fpm_server;
711ff0ba 1938
9d1c2659 1939 if ((zfpm_g->fpm_server != FPM_DEFAULT_IP
996c9314
LB
1940 && zfpm_g->fpm_server != INADDR_ANY)
1941 || (zfpm_g->fpm_port != FPM_DEFAULT_PORT && zfpm_g->fpm_port != 0))
9bcef951 1942 vty_out(vty, "fpm connection ip %pI4 port %d\n", &in,
d62a17ae 1943 zfpm_g->fpm_port);
711ff0ba 1944
d62a17ae 1945 return 0;
711ff0ba
USK
1946}
1947
1948
612c2c15 1949static int fpm_remote_srv_write(struct vty *vty);
4f8ea50c 1950/* Zebra node */
62b346ee 1951static struct cmd_node zebra_node = {
f4b8291f 1952 .name = "zebra",
62b346ee 1953 .node = ZEBRA_NODE,
24389580 1954 .parent_node = CONFIG_NODE,
62b346ee 1955 .prompt = "",
612c2c15 1956 .config_write = fpm_remote_srv_write,
62b346ee 1957};
4f8ea50c
DL
1958
1959
5adc2528
AS
1960/**
1961 * zfpm_init
1962 *
1963 * One-time initialization of the Zebra FPM module.
1964 *
1965 * @param[in] port port at which FPM is running.
2951a7a4 1966 * @param[in] enable true if the zebra FPM module should be enabled
fb0aa886 1967 * @param[in] format to use to talk to the FPM. Can be 'netink' or 'protobuf'.
5adc2528 1968 *
2951a7a4 1969 * Returns true on success.
5adc2528 1970 */
d62a17ae 1971static int zfpm_init(struct thread_master *master)
5adc2528 1972{
d62a17ae 1973 int enable = 1;
1974 uint16_t port = 0;
1975 const char *format = THIS_MODULE->load_args;
5adc2528 1976
d62a17ae 1977 memset(zfpm_g, 0, sizeof(*zfpm_g));
1978 zfpm_g->master = master;
1979 TAILQ_INIT(&zfpm_g->dest_q);
e5218ec8
AD
1980 TAILQ_INIT(&zfpm_g->mac_q);
1981
1982 /* Create hash table for fpm_mac_info_t enties */
1983 zfpm_g->fpm_mac_info_table = hash_create(zfpm_mac_info_hash_keymake,
fbe748e5
AD
1984 zfpm_mac_info_cmp,
1985 "FPM MAC info hash table");
e5218ec8 1986
d62a17ae 1987 zfpm_g->sock = -1;
1988 zfpm_g->state = ZFPM_STATE_IDLE;
5adc2528 1989
d62a17ae 1990 zfpm_stats_init(&zfpm_g->stats);
1991 zfpm_stats_init(&zfpm_g->last_ivl_stats);
1992 zfpm_stats_init(&zfpm_g->cumulative_stats);
5adc2528 1993
612c2c15 1994 install_node(&zebra_node);
d62a17ae 1995 install_element(ENABLE_NODE, &show_zebra_fpm_stats_cmd);
1996 install_element(ENABLE_NODE, &clear_zebra_fpm_stats_cmd);
1997 install_element(CONFIG_NODE, &fpm_remote_ip_cmd);
1998 install_element(CONFIG_NODE, &no_fpm_remote_ip_cmd);
5adc2528 1999
d62a17ae 2000 zfpm_init_message_format(format);
fb0aa886 2001
d62a17ae 2002 /*
2003 * Disable FPM interface if no suitable format is available.
2004 */
2005 if (zfpm_g->message_format == ZFPM_MSG_FORMAT_NONE)
2006 enable = 0;
fb0aa886 2007
d62a17ae 2008 zfpm_g->enabled = enable;
5adc2528 2009
d62a17ae 2010 if (!zfpm_g->fpm_server)
2011 zfpm_g->fpm_server = FPM_DEFAULT_IP;
711ff0ba 2012
d62a17ae 2013 if (!port)
2014 port = FPM_DEFAULT_PORT;
5adc2528 2015
d62a17ae 2016 zfpm_g->fpm_port = port;
5adc2528 2017
d62a17ae 2018 zfpm_g->obuf = stream_new(ZFPM_OBUF_SIZE);
2019 zfpm_g->ibuf = stream_new(ZFPM_IBUF_SIZE);
5adc2528 2020
d62a17ae 2021 zfpm_start_stats_timer();
2022 zfpm_start_connect_timer("initialized");
2023 return 0;
4f8ea50c 2024}
5adc2528 2025
f0c459f0
DS
2026static int zfpm_fini(void)
2027{
2028 zfpm_write_off();
2029 zfpm_read_off();
2030 zfpm_connect_off();
2031
2032 zfpm_stop_stats_timer();
2033
2034 hook_unregister(rib_update, zfpm_trigger_update);
2035 return 0;
2036}
2037
d62a17ae 2038static int zebra_fpm_module_init(void)
4f8ea50c 2039{
d62a17ae 2040 hook_register(rib_update, zfpm_trigger_update);
a780a738 2041 hook_register(zebra_rmac_update, zfpm_trigger_rmac_update);
d62a17ae 2042 hook_register(frr_late_init, zfpm_init);
f0c459f0 2043 hook_register(frr_early_fini, zfpm_fini);
d62a17ae 2044 return 0;
5adc2528 2045}
4f8ea50c 2046
d62a17ae 2047FRR_MODULE_SETUP(.name = "zebra_fpm", .version = FRR_VERSION,
2048 .description = "zebra FPM (Forwarding Plane Manager) module",
80413c20
DL
2049 .init = zebra_fpm_module_init,
2050);