]> git.proxmox.com Git - mirror_frr.git/blame - zebra/zebra_fpm.c
zebra: Remove typedef of zfpm_stats_t
[mirror_frr.git] / zebra / zebra_fpm.c
CommitLineData
5adc2528
AS
1/*
2 * Main implementation file for interface to Forwarding Plane Manager.
3 *
4 * Copyright (C) 2012 by Open Source Routing.
5 * Copyright (C) 2012 by Internet Systems Consortium, Inc. ("ISC")
6 *
7 * This file is part of GNU Zebra.
8 *
9 * GNU Zebra is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
12 * later version.
13 *
14 * GNU Zebra is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
896014f4
DL
19 * You should have received a copy of the GNU General Public License along
20 * with this program; see the file COPYING; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
5adc2528
AS
22 */
23
24#include <zebra.h>
25
26#include "log.h"
4f8ea50c 27#include "libfrr.h"
5adc2528
AS
28#include "stream.h"
29#include "thread.h"
30#include "network.h"
31#include "command.h"
4f8ea50c 32#include "version.h"
e5218ec8 33#include "jhash.h"
5adc2528
AS
34
35#include "zebra/rib.h"
7c551956
DS
36#include "zebra/zserv.h"
37#include "zebra/zebra_ns.h"
38#include "zebra/zebra_vrf.h"
67aeb554 39#include "zebra/zebra_errors.h"
e5218ec8 40#include "zebra/zebra_memory.h"
5adc2528
AS
41
42#include "fpm/fpm.h"
5adc2528 43#include "zebra_fpm_private.h"
e5218ec8 44#include "zebra/zebra_router.h"
a780a738 45#include "zebra_vxlan_private.h"
e5218ec8
AD
46
47DEFINE_MTYPE_STATIC(ZEBRA, FPM_MAC_INFO, "FPM_MAC_INFO");
5adc2528
AS
48
49/*
50 * Interval at which we attempt to connect to the FPM.
51 */
52#define ZFPM_CONNECT_RETRY_IVL 5
53
54/*
55 * Sizes of outgoing and incoming stream buffers for writing/reading
56 * FPM messages.
57 */
58#define ZFPM_OBUF_SIZE (2 * FPM_MAX_MSG_LEN)
59#define ZFPM_IBUF_SIZE (FPM_MAX_MSG_LEN)
60
61/*
62 * The maximum number of times the FPM socket write callback can call
63 * 'write' before it yields.
64 */
65#define ZFPM_MAX_WRITES_PER_RUN 10
66
67/*
68 * Interval over which we collect statistics.
69 */
70#define ZFPM_STATS_IVL_SECS 10
fbe748e5
AD
71#define FPM_MAX_MAC_MSG_LEN 512
72
7f5818fb 73static void zfpm_iterate_rmac_table(struct hash_bucket *backet, void *args);
5adc2528
AS
74
75/*
76 * Structure that holds state for iterating over all route_node
77 * structures that are candidates for being communicated to the FPM.
78 */
332cba05 79struct zfpm_rnodes_iter {
d62a17ae 80 rib_tables_iter_t tables_iter;
81 route_table_iter_t iter;
332cba05 82};
5adc2528
AS
83
84/*
85 * Statistics.
86 */
eeaf257b 87struct zfpm_stats {
d62a17ae 88 unsigned long connect_calls;
89 unsigned long connect_no_sock;
5adc2528 90
d62a17ae 91 unsigned long read_cb_calls;
5adc2528 92
d62a17ae 93 unsigned long write_cb_calls;
94 unsigned long write_calls;
95 unsigned long partial_writes;
96 unsigned long max_writes_hit;
97 unsigned long t_write_yields;
5adc2528 98
d62a17ae 99 unsigned long nop_deletes_skipped;
100 unsigned long route_adds;
101 unsigned long route_dels;
5adc2528 102
d62a17ae 103 unsigned long updates_triggered;
104 unsigned long redundant_triggers;
5adc2528 105
d62a17ae 106 unsigned long dests_del_after_update;
5adc2528 107
d62a17ae 108 unsigned long t_conn_down_starts;
109 unsigned long t_conn_down_dests_processed;
110 unsigned long t_conn_down_yields;
111 unsigned long t_conn_down_finishes;
5adc2528 112
d62a17ae 113 unsigned long t_conn_up_starts;
114 unsigned long t_conn_up_dests_processed;
115 unsigned long t_conn_up_yields;
116 unsigned long t_conn_up_aborts;
117 unsigned long t_conn_up_finishes;
eeaf257b 118};
5adc2528
AS
119
120/*
121 * States for the FPM state machine.
122 */
123typedef enum {
124
d62a17ae 125 /*
126 * In this state we are not yet ready to connect to the FPM. This
127 * can happen when this module is disabled, or if we're cleaning up
128 * after a connection has gone down.
129 */
130 ZFPM_STATE_IDLE,
131
132 /*
133 * Ready to talk to the FPM and periodically trying to connect to
134 * it.
135 */
136 ZFPM_STATE_ACTIVE,
137
138 /*
139 * In the middle of bringing up a TCP connection. Specifically,
140 * waiting for a connect() call to complete asynchronously.
141 */
142 ZFPM_STATE_CONNECTING,
143
144 /*
145 * TCP connection to the FPM is up.
146 */
147 ZFPM_STATE_ESTABLISHED
5adc2528
AS
148
149} zfpm_state_t;
150
fb0aa886
AS
151/*
152 * Message format to be used to communicate with the FPM.
153 */
d62a17ae 154typedef enum {
155 ZFPM_MSG_FORMAT_NONE,
156 ZFPM_MSG_FORMAT_NETLINK,
157 ZFPM_MSG_FORMAT_PROTOBUF,
fb0aa886 158} zfpm_msg_format_e;
5adc2528
AS
159/*
160 * Globals.
161 */
d62a17ae 162typedef struct zfpm_glob_t_ {
163
164 /*
165 * True if the FPM module has been enabled.
166 */
167 int enabled;
168
169 /*
170 * Message format to be used to communicate with the fpm.
171 */
172 zfpm_msg_format_e message_format;
173
174 struct thread_master *master;
175
176 zfpm_state_t state;
177
178 in_addr_t fpm_server;
179 /*
180 * Port on which the FPM is running.
181 */
182 int fpm_port;
183
184 /*
185 * List of rib_dest_t structures to be processed
186 */
187 TAILQ_HEAD(zfpm_dest_q, rib_dest_t_) dest_q;
188
e5218ec8
AD
189 /*
190 * List of fpm_mac_info structures to be processed
191 */
192 TAILQ_HEAD(zfpm_mac_q, fpm_mac_info_t) mac_q;
193
194 /*
195 * Hash table of fpm_mac_info_t entries
196 *
197 * While adding fpm_mac_info_t for a MAC to the mac_q,
198 * it is possible that another fpm_mac_info_t node for the this MAC
199 * is already present in the queue.
200 * This is possible in the case of consecutive add->delete operations.
201 * To avoid such duplicate insertions in the mac_q,
202 * define a hash table for fpm_mac_info_t which can be looked up
203 * to see if an fpm_mac_info_t node for a MAC is already present
204 * in the mac_q.
205 */
206 struct hash *fpm_mac_info_table;
207
d62a17ae 208 /*
209 * Stream socket to the FPM.
210 */
211 int sock;
212
213 /*
214 * Buffers for messages to/from the FPM.
215 */
216 struct stream *obuf;
217 struct stream *ibuf;
218
219 /*
220 * Threads for I/O.
221 */
222 struct thread *t_connect;
223 struct thread *t_write;
224 struct thread *t_read;
225
226 /*
227 * Thread to clean up after the TCP connection to the FPM goes down
228 * and the state that belongs to it.
229 */
230 struct thread *t_conn_down;
231
232 struct {
332cba05 233 struct zfpm_rnodes_iter iter;
d62a17ae 234 } t_conn_down_state;
235
236 /*
237 * Thread to take actions once the TCP conn to the FPM comes up, and
238 * the state that belongs to it.
239 */
240 struct thread *t_conn_up;
241
242 struct {
332cba05 243 struct zfpm_rnodes_iter iter;
d62a17ae 244 } t_conn_up_state;
245
246 unsigned long connect_calls;
247 time_t last_connect_call_time;
248
249 /*
250 * Stats from the start of the current statistics interval up to
251 * now. These are the counters we typically update in the code.
252 */
eeaf257b 253 struct zfpm_stats stats;
d62a17ae 254
255 /*
256 * Statistics that were gathered in the last collection interval.
257 */
eeaf257b 258 struct zfpm_stats last_ivl_stats;
d62a17ae 259
260 /*
261 * Cumulative stats from the last clear to the start of the current
262 * statistics interval.
263 */
eeaf257b 264 struct zfpm_stats cumulative_stats;
d62a17ae 265
266 /*
267 * Stats interval timer.
268 */
269 struct thread *t_stats;
270
271 /*
272 * If non-zero, the last time when statistics were cleared.
273 */
274 time_t last_stats_clear_time;
5adc2528
AS
275
276} zfpm_glob_t;
277
278static zfpm_glob_t zfpm_glob_space;
279static zfpm_glob_t *zfpm_g = &zfpm_glob_space;
280
d62a17ae 281static int zfpm_trigger_update(struct route_node *rn, const char *reason);
4f8ea50c 282
d62a17ae 283static int zfpm_read_cb(struct thread *thread);
284static int zfpm_write_cb(struct thread *thread);
5adc2528 285
d62a17ae 286static void zfpm_set_state(zfpm_state_t state, const char *reason);
287static void zfpm_start_connect_timer(const char *reason);
288static void zfpm_start_stats_timer(void);
a780a738 289static void zfpm_mac_info_del(struct fpm_mac_info_t *fpm_mac);
5adc2528
AS
290
291/*
292 * zfpm_thread_should_yield
293 */
d62a17ae 294static inline int zfpm_thread_should_yield(struct thread *t)
5adc2528 295{
d62a17ae 296 return thread_should_yield(t);
5adc2528
AS
297}
298
299/*
300 * zfpm_state_to_str
301 */
d62a17ae 302static const char *zfpm_state_to_str(zfpm_state_t state)
5adc2528 303{
d62a17ae 304 switch (state) {
5adc2528 305
d62a17ae 306 case ZFPM_STATE_IDLE:
307 return "idle";
5adc2528 308
d62a17ae 309 case ZFPM_STATE_ACTIVE:
310 return "active";
5adc2528 311
d62a17ae 312 case ZFPM_STATE_CONNECTING:
313 return "connecting";
5adc2528 314
d62a17ae 315 case ZFPM_STATE_ESTABLISHED:
316 return "established";
5adc2528 317
d62a17ae 318 default:
319 return "unknown";
320 }
5adc2528
AS
321}
322
5adc2528
AS
323/*
324 * zfpm_get_elapsed_time
325 *
326 * Returns the time elapsed (in seconds) since the given time.
327 */
d62a17ae 328static time_t zfpm_get_elapsed_time(time_t reference)
5adc2528 329{
d62a17ae 330 time_t now;
5adc2528 331
d62a17ae 332 now = monotime(NULL);
5adc2528 333
d62a17ae 334 if (now < reference) {
335 assert(0);
336 return 0;
337 }
5adc2528 338
d62a17ae 339 return now - reference;
5adc2528
AS
340}
341
5adc2528
AS
342/*
343 * zfpm_rnodes_iter_init
344 */
332cba05 345static inline void zfpm_rnodes_iter_init(struct zfpm_rnodes_iter *iter)
5adc2528 346{
d62a17ae 347 memset(iter, 0, sizeof(*iter));
348 rib_tables_iter_init(&iter->tables_iter);
349
350 /*
351 * This is a hack, but it makes implementing 'next' easier by
352 * ensuring that route_table_iter_next() will return NULL the first
353 * time we call it.
354 */
355 route_table_iter_init(&iter->iter, NULL);
356 route_table_iter_cleanup(&iter->iter);
5adc2528
AS
357}
358
359/*
360 * zfpm_rnodes_iter_next
361 */
332cba05
DS
362static inline struct route_node *
363zfpm_rnodes_iter_next(struct zfpm_rnodes_iter *iter)
5adc2528 364{
d62a17ae 365 struct route_node *rn;
366 struct route_table *table;
5adc2528 367
d62a17ae 368 while (1) {
369 rn = route_table_iter_next(&iter->iter);
370 if (rn)
371 return rn;
5adc2528 372
d62a17ae 373 /*
374 * We've made our way through this table, go to the next one.
375 */
376 route_table_iter_cleanup(&iter->iter);
5adc2528 377
c6bbea17 378 table = rib_tables_iter_next(&iter->tables_iter);
5adc2528 379
d62a17ae 380 if (!table)
381 return NULL;
5adc2528 382
d62a17ae 383 route_table_iter_init(&iter->iter, table);
384 }
5adc2528 385
d62a17ae 386 return NULL;
5adc2528
AS
387}
388
389/*
390 * zfpm_rnodes_iter_pause
391 */
332cba05 392static inline void zfpm_rnodes_iter_pause(struct zfpm_rnodes_iter *iter)
5adc2528 393{
d62a17ae 394 route_table_iter_pause(&iter->iter);
5adc2528
AS
395}
396
397/*
398 * zfpm_rnodes_iter_cleanup
399 */
332cba05 400static inline void zfpm_rnodes_iter_cleanup(struct zfpm_rnodes_iter *iter)
5adc2528 401{
d62a17ae 402 route_table_iter_cleanup(&iter->iter);
403 rib_tables_iter_cleanup(&iter->tables_iter);
5adc2528
AS
404}
405
406/*
407 * zfpm_stats_init
408 *
409 * Initialize a statistics block.
410 */
eeaf257b 411static inline void zfpm_stats_init(struct zfpm_stats *stats)
5adc2528 412{
d62a17ae 413 memset(stats, 0, sizeof(*stats));
5adc2528
AS
414}
415
416/*
417 * zfpm_stats_reset
418 */
eeaf257b 419static inline void zfpm_stats_reset(struct zfpm_stats *stats)
5adc2528 420{
d62a17ae 421 zfpm_stats_init(stats);
5adc2528
AS
422}
423
424/*
425 * zfpm_stats_copy
426 */
eeaf257b
DS
427static inline void zfpm_stats_copy(const struct zfpm_stats *src,
428 struct zfpm_stats *dest)
5adc2528 429{
d62a17ae 430 memcpy(dest, src, sizeof(*dest));
5adc2528
AS
431}
432
433/*
434 * zfpm_stats_compose
435 *
436 * Total up the statistics in two stats structures ('s1 and 's2') and
437 * return the result in the third argument, 'result'. Note that the
438 * pointer 'result' may be the same as 's1' or 's2'.
439 *
440 * For simplicity, the implementation below assumes that the stats
441 * structure is composed entirely of counters. This can easily be
442 * changed when necessary.
443 */
eeaf257b
DS
444static void zfpm_stats_compose(const struct zfpm_stats *s1,
445 const struct zfpm_stats *s2,
446 struct zfpm_stats *result)
5adc2528 447{
d62a17ae 448 const unsigned long *p1, *p2;
449 unsigned long *result_p;
450 int i, num_counters;
5adc2528 451
d62a17ae 452 p1 = (const unsigned long *)s1;
453 p2 = (const unsigned long *)s2;
454 result_p = (unsigned long *)result;
5adc2528 455
eeaf257b 456 num_counters = (sizeof(struct zfpm_stats) / sizeof(unsigned long));
5adc2528 457
d62a17ae 458 for (i = 0; i < num_counters; i++) {
459 result_p[i] = p1[i] + p2[i];
460 }
5adc2528
AS
461}
462
463/*
464 * zfpm_read_on
465 */
d62a17ae 466static inline void zfpm_read_on(void)
5adc2528 467{
d62a17ae 468 assert(!zfpm_g->t_read);
469 assert(zfpm_g->sock >= 0);
5adc2528 470
d62a17ae 471 thread_add_read(zfpm_g->master, zfpm_read_cb, 0, zfpm_g->sock,
472 &zfpm_g->t_read);
5adc2528
AS
473}
474
475/*
476 * zfpm_write_on
477 */
d62a17ae 478static inline void zfpm_write_on(void)
5adc2528 479{
d62a17ae 480 assert(!zfpm_g->t_write);
481 assert(zfpm_g->sock >= 0);
5adc2528 482
d62a17ae 483 thread_add_write(zfpm_g->master, zfpm_write_cb, 0, zfpm_g->sock,
484 &zfpm_g->t_write);
5adc2528
AS
485}
486
487/*
488 * zfpm_read_off
489 */
d62a17ae 490static inline void zfpm_read_off(void)
5adc2528 491{
d62a17ae 492 THREAD_READ_OFF(zfpm_g->t_read);
5adc2528
AS
493}
494
495/*
496 * zfpm_write_off
497 */
d62a17ae 498static inline void zfpm_write_off(void)
5adc2528 499{
d62a17ae 500 THREAD_WRITE_OFF(zfpm_g->t_write);
5adc2528
AS
501}
502
f0c459f0
DS
503static inline void zfpm_connect_off(void)
504{
505 THREAD_TIMER_OFF(zfpm_g->t_connect);
506}
507
5adc2528
AS
508/*
509 * zfpm_conn_up_thread_cb
510 *
511 * Callback for actions to be taken when the connection to the FPM
512 * comes up.
513 */
d62a17ae 514static int zfpm_conn_up_thread_cb(struct thread *thread)
5adc2528 515{
d62a17ae 516 struct route_node *rnode;
332cba05 517 struct zfpm_rnodes_iter *iter;
d62a17ae 518 rib_dest_t *dest;
5adc2528 519
d62a17ae 520 zfpm_g->t_conn_up = NULL;
5adc2528 521
d62a17ae 522 iter = &zfpm_g->t_conn_up_state.iter;
5adc2528 523
d62a17ae 524 if (zfpm_g->state != ZFPM_STATE_ESTABLISHED) {
525 zfpm_debug(
526 "Connection not up anymore, conn_up thread aborting");
527 zfpm_g->stats.t_conn_up_aborts++;
528 goto done;
529 }
5adc2528 530
fbe748e5
AD
531 /* Enqueue FPM updates for all the RMAC entries */
532 hash_iterate(zrouter.l3vni_table, zfpm_iterate_rmac_table, NULL);
533
d62a17ae 534 while ((rnode = zfpm_rnodes_iter_next(iter))) {
535 dest = rib_dest_from_rnode(rnode);
536
537 if (dest) {
538 zfpm_g->stats.t_conn_up_dests_processed++;
539 zfpm_trigger_update(rnode, NULL);
540 }
541
542 /*
543 * Yield if need be.
544 */
545 if (!zfpm_thread_should_yield(thread))
546 continue;
547
548 zfpm_g->stats.t_conn_up_yields++;
549 zfpm_rnodes_iter_pause(iter);
550 zfpm_g->t_conn_up = NULL;
551 thread_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb,
552 NULL, 0, &zfpm_g->t_conn_up);
553 return 0;
5adc2528
AS
554 }
555
d62a17ae 556 zfpm_g->stats.t_conn_up_finishes++;
557
558done:
559 zfpm_rnodes_iter_cleanup(iter);
560 return 0;
5adc2528
AS
561}
562
563/*
564 * zfpm_connection_up
565 *
566 * Called when the connection to the FPM comes up.
567 */
d62a17ae 568static void zfpm_connection_up(const char *detail)
5adc2528 569{
d62a17ae 570 assert(zfpm_g->sock >= 0);
571 zfpm_read_on();
572 zfpm_write_on();
573 zfpm_set_state(ZFPM_STATE_ESTABLISHED, detail);
574
575 /*
576 * Start thread to push existing routes to the FPM.
577 */
578 assert(!zfpm_g->t_conn_up);
579
580 zfpm_rnodes_iter_init(&zfpm_g->t_conn_up_state.iter);
581
582 zfpm_debug("Starting conn_up thread");
583 zfpm_g->t_conn_up = NULL;
584 thread_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb, NULL, 0,
585 &zfpm_g->t_conn_up);
586 zfpm_g->stats.t_conn_up_starts++;
5adc2528
AS
587}
588
589/*
590 * zfpm_connect_check
591 *
592 * Check if an asynchronous connect() to the FPM is complete.
593 */
d62a17ae 594static void zfpm_connect_check(void)
5adc2528 595{
d62a17ae 596 int status;
597 socklen_t slen;
598 int ret;
599
600 zfpm_read_off();
601 zfpm_write_off();
602
603 slen = sizeof(status);
604 ret = getsockopt(zfpm_g->sock, SOL_SOCKET, SO_ERROR, (void *)&status,
605 &slen);
606
607 if (ret >= 0 && status == 0) {
608 zfpm_connection_up("async connect complete");
609 return;
610 }
611
612 /*
613 * getsockopt() failed or indicated an error on the socket.
614 */
615 close(zfpm_g->sock);
616 zfpm_g->sock = -1;
617
618 zfpm_start_connect_timer("getsockopt() after async connect failed");
619 return;
5adc2528
AS
620}
621
622/*
623 * zfpm_conn_down_thread_cb
624 *
625 * Callback that is invoked to clean up state after the TCP connection
626 * to the FPM goes down.
627 */
d62a17ae 628static int zfpm_conn_down_thread_cb(struct thread *thread)
5adc2528 629{
d62a17ae 630 struct route_node *rnode;
332cba05 631 struct zfpm_rnodes_iter *iter;
d62a17ae 632 rib_dest_t *dest;
a780a738 633 struct fpm_mac_info_t *mac = NULL;
5adc2528 634
d62a17ae 635 assert(zfpm_g->state == ZFPM_STATE_IDLE);
5adc2528 636
a780a738
AD
637 /*
638 * Delink and free all fpm_mac_info_t nodes
639 * in the mac_q and fpm_mac_info_hash
640 */
641 while ((mac = TAILQ_FIRST(&zfpm_g->mac_q)) != NULL)
642 zfpm_mac_info_del(mac);
643
d62a17ae 644 zfpm_g->t_conn_down = NULL;
5adc2528 645
d62a17ae 646 iter = &zfpm_g->t_conn_down_state.iter;
5adc2528 647
d62a17ae 648 while ((rnode = zfpm_rnodes_iter_next(iter))) {
649 dest = rib_dest_from_rnode(rnode);
5adc2528 650
d62a17ae 651 if (dest) {
652 if (CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM)) {
653 TAILQ_REMOVE(&zfpm_g->dest_q, dest,
654 fpm_q_entries);
655 }
656
657 UNSET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
658 UNSET_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM);
5adc2528 659
d62a17ae 660 zfpm_g->stats.t_conn_down_dests_processed++;
5adc2528 661
d62a17ae 662 /*
663 * Check if the dest should be deleted.
664 */
665 rib_gc_dest(rnode);
666 }
5adc2528 667
d62a17ae 668 /*
669 * Yield if need be.
670 */
671 if (!zfpm_thread_should_yield(thread))
672 continue;
673
674 zfpm_g->stats.t_conn_down_yields++;
675 zfpm_rnodes_iter_pause(iter);
676 zfpm_g->t_conn_down = NULL;
677 thread_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb,
678 NULL, 0, &zfpm_g->t_conn_down);
679 return 0;
5adc2528
AS
680 }
681
d62a17ae 682 zfpm_g->stats.t_conn_down_finishes++;
683 zfpm_rnodes_iter_cleanup(iter);
684
685 /*
686 * Start the process of connecting to the FPM again.
687 */
688 zfpm_start_connect_timer("cleanup complete");
689 return 0;
5adc2528
AS
690}
691
692/*
693 * zfpm_connection_down
694 *
695 * Called when the connection to the FPM has gone down.
696 */
d62a17ae 697static void zfpm_connection_down(const char *detail)
5adc2528 698{
d62a17ae 699 if (!detail)
700 detail = "unknown";
5adc2528 701
d62a17ae 702 assert(zfpm_g->state == ZFPM_STATE_ESTABLISHED);
5adc2528 703
d62a17ae 704 zlog_info("connection to the FPM has gone down: %s", detail);
5adc2528 705
d62a17ae 706 zfpm_read_off();
707 zfpm_write_off();
5adc2528 708
d62a17ae 709 stream_reset(zfpm_g->ibuf);
710 stream_reset(zfpm_g->obuf);
5adc2528 711
d62a17ae 712 if (zfpm_g->sock >= 0) {
713 close(zfpm_g->sock);
714 zfpm_g->sock = -1;
715 }
5adc2528 716
d62a17ae 717 /*
718 * Start thread to clean up state after the connection goes down.
719 */
720 assert(!zfpm_g->t_conn_down);
d62a17ae 721 zfpm_rnodes_iter_init(&zfpm_g->t_conn_down_state.iter);
722 zfpm_g->t_conn_down = NULL;
723 thread_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb, NULL, 0,
724 &zfpm_g->t_conn_down);
725 zfpm_g->stats.t_conn_down_starts++;
726
727 zfpm_set_state(ZFPM_STATE_IDLE, detail);
5adc2528
AS
728}
729
730/*
731 * zfpm_read_cb
732 */
d62a17ae 733static int zfpm_read_cb(struct thread *thread)
5adc2528 734{
d62a17ae 735 size_t already;
736 struct stream *ibuf;
737 uint16_t msg_len;
738 fpm_msg_hdr_t *hdr;
739
740 zfpm_g->stats.read_cb_calls++;
d62a17ae 741
742 /*
743 * Check if async connect is now done.
744 */
745 if (zfpm_g->state == ZFPM_STATE_CONNECTING) {
746 zfpm_connect_check();
747 return 0;
5adc2528
AS
748 }
749
d62a17ae 750 assert(zfpm_g->state == ZFPM_STATE_ESTABLISHED);
751 assert(zfpm_g->sock >= 0);
5adc2528 752
d62a17ae 753 ibuf = zfpm_g->ibuf;
5adc2528 754
d62a17ae 755 already = stream_get_endp(ibuf);
756 if (already < FPM_MSG_HDR_LEN) {
757 ssize_t nbyte;
5adc2528 758
d62a17ae 759 nbyte = stream_read_try(ibuf, zfpm_g->sock,
760 FPM_MSG_HDR_LEN - already);
761 if (nbyte == 0 || nbyte == -1) {
677f704d
DS
762 if (nbyte == -1) {
763 char buffer[1024];
764
772270f3
QY
765 snprintf(buffer, sizeof(buffer),
766 "closed socket in read(%d): %s", errno,
767 safe_strerror(errno));
677f704d 768 zfpm_connection_down(buffer);
996c9314 769 } else
677f704d 770 zfpm_connection_down("closed socket in read");
d62a17ae 771 return 0;
772 }
5adc2528 773
d62a17ae 774 if (nbyte != (ssize_t)(FPM_MSG_HDR_LEN - already))
775 goto done;
5adc2528 776
d62a17ae 777 already = FPM_MSG_HDR_LEN;
778 }
5adc2528 779
d62a17ae 780 stream_set_getp(ibuf, 0);
5adc2528 781
d62a17ae 782 hdr = (fpm_msg_hdr_t *)stream_pnt(ibuf);
5adc2528 783
d62a17ae 784 if (!fpm_msg_hdr_ok(hdr)) {
785 zfpm_connection_down("invalid message header");
786 return 0;
5adc2528
AS
787 }
788
d62a17ae 789 msg_len = fpm_msg_len(hdr);
5adc2528 790
d62a17ae 791 /*
792 * Read out the rest of the packet.
793 */
794 if (already < msg_len) {
795 ssize_t nbyte;
5adc2528 796
d62a17ae 797 nbyte = stream_read_try(ibuf, zfpm_g->sock, msg_len - already);
5adc2528 798
d62a17ae 799 if (nbyte == 0 || nbyte == -1) {
677f704d
DS
800 if (nbyte == -1) {
801 char buffer[1024];
802
772270f3
QY
803 snprintf(buffer, sizeof(buffer),
804 "failed to read message(%d) %s", errno,
805 safe_strerror(errno));
677f704d 806 zfpm_connection_down(buffer);
996c9314 807 } else
677f704d 808 zfpm_connection_down("failed to read message");
d62a17ae 809 return 0;
810 }
811
812 if (nbyte != (ssize_t)(msg_len - already))
813 goto done;
814 }
815
d62a17ae 816 /*
817 * Just throw it away for now.
818 */
819 stream_reset(ibuf);
820
821done:
822 zfpm_read_on();
823 return 0;
5adc2528
AS
824}
825
21d814eb
AD
826static bool zfpm_updates_pending(void)
827{
828 if (!(TAILQ_EMPTY(&zfpm_g->dest_q)) || !(TAILQ_EMPTY(&zfpm_g->mac_q)))
829 return true;
830
831 return false;
832}
833
5adc2528
AS
834/*
835 * zfpm_writes_pending
836 *
2951a7a4 837 * Returns true if we may have something to write to the FPM.
5adc2528 838 */
d62a17ae 839static int zfpm_writes_pending(void)
5adc2528
AS
840{
841
d62a17ae 842 /*
843 * Check if there is any data in the outbound buffer that has not
844 * been written to the socket yet.
845 */
846 if (stream_get_endp(zfpm_g->obuf) - stream_get_getp(zfpm_g->obuf))
847 return 1;
5adc2528 848
d62a17ae 849 /*
21d814eb 850 * Check if there are any updates scheduled on the outbound queues.
d62a17ae 851 */
21d814eb 852 if (zfpm_updates_pending())
d62a17ae 853 return 1;
5adc2528 854
d62a17ae 855 return 0;
5adc2528
AS
856}
857
858/*
859 * zfpm_encode_route
860 *
861 * Encode a message to the FPM with information about the given route.
862 *
863 * Returns the number of bytes written to the buffer. 0 or a negative
864 * value indicates an error.
865 */
d62a17ae 866static inline int zfpm_encode_route(rib_dest_t *dest, struct route_entry *re,
867 char *in_buf, size_t in_buf_len,
868 fpm_msg_type_e *msg_type)
5adc2528 869{
d62a17ae 870 size_t len;
9bf75362 871#ifdef HAVE_NETLINK
d62a17ae 872 int cmd;
9bf75362 873#endif
d62a17ae 874 len = 0;
5adc2528 875
d62a17ae 876 *msg_type = FPM_MSG_TYPE_NONE;
5adc2528 877
d62a17ae 878 switch (zfpm_g->message_format) {
5adc2528 879
d62a17ae 880 case ZFPM_MSG_FORMAT_PROTOBUF:
fb0aa886 881#ifdef HAVE_PROTOBUF
d62a17ae 882 len = zfpm_protobuf_encode_route(dest, re, (uint8_t *)in_buf,
883 in_buf_len);
884 *msg_type = FPM_MSG_TYPE_PROTOBUF;
fb0aa886 885#endif
d62a17ae 886 break;
5adc2528 887
d62a17ae 888 case ZFPM_MSG_FORMAT_NETLINK:
fb0aa886 889#ifdef HAVE_NETLINK
d62a17ae 890 *msg_type = FPM_MSG_TYPE_NETLINK;
891 cmd = re ? RTM_NEWROUTE : RTM_DELROUTE;
892 len = zfpm_netlink_encode_route(cmd, dest, re, in_buf,
893 in_buf_len);
894 assert(fpm_msg_align(len) == len);
895 *msg_type = FPM_MSG_TYPE_NETLINK;
5adc2528 896#endif /* HAVE_NETLINK */
d62a17ae 897 break;
fb0aa886 898
d62a17ae 899 default:
900 break;
901 }
fb0aa886 902
d62a17ae 903 return len;
5adc2528
AS
904}
905
906/*
907 * zfpm_route_for_update
908 *
f0f77c9a 909 * Returns the re that is to be sent to the FPM for a given dest.
5adc2528 910 */
d62a17ae 911struct route_entry *zfpm_route_for_update(rib_dest_t *dest)
5adc2528 912{
5f7a4718 913 return dest->selected_fib;
5adc2528
AS
914}
915
916/*
21d814eb 917 * Define an enum for return codes for queue processing functions
5adc2528 918 *
21d814eb
AD
919 * FPM_WRITE_STOP: This return code indicates that the write buffer is full.
920 * Stop processing all the queues and empty the buffer by writing its content
921 * to the socket.
922 *
923 * FPM_GOTO_NEXT_Q: This return code indicates that either this queue is
924 * empty or we have processed enough updates from this queue.
925 * So, move on to the next queue.
5adc2528 926 */
21d814eb
AD
927enum {
928 FPM_WRITE_STOP = 0,
929 FPM_GOTO_NEXT_Q = 1
930};
931
932#define FPM_QUEUE_PROCESS_LIMIT 10000
933
934/*
935 * zfpm_build_route_updates
936 *
937 * Process the dest_q queue and write FPM messages to the outbound buffer.
938 */
939static int zfpm_build_route_updates(void)
5adc2528 940{
d62a17ae 941 struct stream *s;
942 rib_dest_t *dest;
943 unsigned char *buf, *data, *buf_end;
944 size_t msg_len;
945 size_t data_len;
946 fpm_msg_hdr_t *hdr;
947 struct route_entry *re;
948 int is_add, write_msg;
949 fpm_msg_type_e msg_type;
21d814eb 950 uint16_t q_limit;
d62a17ae 951
21d814eb
AD
952 if (TAILQ_EMPTY(&zfpm_g->dest_q))
953 return FPM_GOTO_NEXT_Q;
d62a17ae 954
21d814eb
AD
955 s = zfpm_g->obuf;
956 q_limit = FPM_QUEUE_PROCESS_LIMIT;
d62a17ae 957
21d814eb 958 do {
d62a17ae 959 /*
960 * Make sure there is enough space to write another message.
961 */
962 if (STREAM_WRITEABLE(s) < FPM_MAX_MSG_LEN)
21d814eb 963 return FPM_WRITE_STOP;
d62a17ae 964
965 buf = STREAM_DATA(s) + stream_get_endp(s);
966 buf_end = buf + STREAM_WRITEABLE(s);
967
968 dest = TAILQ_FIRST(&zfpm_g->dest_q);
969 if (!dest)
21d814eb 970 return FPM_GOTO_NEXT_Q;
d62a17ae 971
972 assert(CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM));
973
974 hdr = (fpm_msg_hdr_t *)buf;
975 hdr->version = FPM_PROTO_VERSION;
976
977 data = fpm_msg_data(hdr);
978
979 re = zfpm_route_for_update(dest);
980 is_add = re ? 1 : 0;
981
982 write_msg = 1;
983
984 /*
985 * If this is a route deletion, and we have not sent the route
986 * to
987 * the FPM previously, skip it.
988 */
989 if (!is_add && !CHECK_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM)) {
990 write_msg = 0;
991 zfpm_g->stats.nop_deletes_skipped++;
992 }
993
994 if (write_msg) {
995 data_len = zfpm_encode_route(dest, re, (char *)data,
996 buf_end - data, &msg_type);
997
998 assert(data_len);
999 if (data_len) {
1000 hdr->msg_type = msg_type;
1001 msg_len = fpm_data_len_to_msg_len(data_len);
1002 hdr->msg_len = htons(msg_len);
1003 stream_forward_endp(s, msg_len);
1004
1005 if (is_add)
1006 zfpm_g->stats.route_adds++;
1007 else
1008 zfpm_g->stats.route_dels++;
1009 }
1010 }
1011
1012 /*
1013 * Remove the dest from the queue, and reset the flag.
1014 */
1015 UNSET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
1016 TAILQ_REMOVE(&zfpm_g->dest_q, dest, fpm_q_entries);
1017
1018 if (is_add) {
1019 SET_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM);
1020 } else {
1021 UNSET_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM);
1022 }
1023
1024 /*
1025 * Delete the destination if necessary.
1026 */
1027 if (rib_gc_dest(dest->rnode))
1028 zfpm_g->stats.dests_del_after_update++;
1029
21d814eb
AD
1030 q_limit--;
1031 if (q_limit == 0) {
1032 /*
1033 * We have processed enough updates in this queue.
1034 * Now yield for other queues.
1035 */
1036 return FPM_GOTO_NEXT_Q;
1037 }
c5431822 1038 } while (true);
21d814eb
AD
1039}
1040
1041/*
1042 * zfpm_encode_mac
1043 *
1044 * Encode a message to FPM with information about the given MAC.
1045 *
1046 * Returns the number of bytes written to the buffer.
1047 */
1048static inline int zfpm_encode_mac(struct fpm_mac_info_t *mac, char *in_buf,
1049 size_t in_buf_len, fpm_msg_type_e *msg_type)
1050{
1051 size_t len = 0;
1052
1053 *msg_type = FPM_MSG_TYPE_NONE;
1054
1055 switch (zfpm_g->message_format) {
1056
1057 case ZFPM_MSG_FORMAT_NONE:
1058 break;
1059 case ZFPM_MSG_FORMAT_NETLINK:
9da60d0a
AD
1060#ifdef HAVE_NETLINK
1061 len = zfpm_netlink_encode_mac(mac, in_buf, in_buf_len);
1062 assert(fpm_msg_align(len) == len);
1063 *msg_type = FPM_MSG_TYPE_NETLINK;
1064#endif /* HAVE_NETLINK */
21d814eb
AD
1065 break;
1066 case ZFPM_MSG_FORMAT_PROTOBUF:
1067 break;
1068 }
1069 return len;
1070}
1071
1072static int zfpm_build_mac_updates(void)
1073{
1074 struct stream *s;
1075 struct fpm_mac_info_t *mac;
1076 unsigned char *buf, *data, *buf_end;
1077 fpm_msg_hdr_t *hdr;
1078 size_t data_len, msg_len;
1079 fpm_msg_type_e msg_type;
1080 uint16_t q_limit;
1081
1082 if (TAILQ_EMPTY(&zfpm_g->mac_q))
1083 return FPM_GOTO_NEXT_Q;
1084
1085 s = zfpm_g->obuf;
1086 q_limit = FPM_QUEUE_PROCESS_LIMIT;
1087
1088 do {
1089 /* Make sure there is enough space to write another message. */
1090 if (STREAM_WRITEABLE(s) < FPM_MAX_MAC_MSG_LEN)
1091 return FPM_WRITE_STOP;
1092
1093 buf = STREAM_DATA(s) + stream_get_endp(s);
1094 buf_end = buf + STREAM_WRITEABLE(s);
1095
1096 mac = TAILQ_FIRST(&zfpm_g->mac_q);
1097 if (!mac)
1098 return FPM_GOTO_NEXT_Q;
1099
1100 /* Check for no-op */
1101 if (!CHECK_FLAG(mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM)) {
1102 zfpm_g->stats.nop_deletes_skipped++;
1103 zfpm_mac_info_del(mac);
1104 continue;
1105 }
1106
1107 hdr = (fpm_msg_hdr_t *)buf;
1108 hdr->version = FPM_PROTO_VERSION;
1109
1110 data = fpm_msg_data(hdr);
1111 data_len = zfpm_encode_mac(mac, (char *)data, buf_end - data,
1112 &msg_type);
9da60d0a 1113 assert(data_len);
21d814eb
AD
1114
1115 hdr->msg_type = msg_type;
1116 msg_len = fpm_data_len_to_msg_len(data_len);
1117 hdr->msg_len = htons(msg_len);
1118 stream_forward_endp(s, msg_len);
1119
1120 /* Remove the MAC from the queue, and delete it. */
1121 zfpm_mac_info_del(mac);
1122
1123 q_limit--;
1124 if (q_limit == 0) {
1125 /*
1126 * We have processed enough updates in this queue.
1127 * Now yield for other queues.
1128 */
1129 return FPM_GOTO_NEXT_Q;
1130 }
d62a17ae 1131 } while (1);
5adc2528
AS
1132}
1133
21d814eb
AD
1134/*
1135 * zfpm_build_updates
1136 *
1137 * Process the outgoing queues and write messages to the outbound
1138 * buffer.
1139 */
1140static void zfpm_build_updates(void)
1141{
1142 struct stream *s;
1143
1144 s = zfpm_g->obuf;
1145 assert(stream_empty(s));
1146
1147 do {
1148 /*
1149 * Stop processing the queues if zfpm_g->obuf is full
1150 * or we do not have more updates to process
1151 */
1152 if (zfpm_build_mac_updates() == FPM_WRITE_STOP)
1153 break;
1154 if (zfpm_build_route_updates() == FPM_WRITE_STOP)
1155 break;
1156 } while (zfpm_updates_pending());
1157}
1158
5adc2528
AS
1159/*
1160 * zfpm_write_cb
1161 */
d62a17ae 1162static int zfpm_write_cb(struct thread *thread)
5adc2528 1163{
d62a17ae 1164 struct stream *s;
1165 int num_writes;
1166
1167 zfpm_g->stats.write_cb_calls++;
d62a17ae 1168
1169 /*
1170 * Check if async connect is now done.
1171 */
1172 if (zfpm_g->state == ZFPM_STATE_CONNECTING) {
1173 zfpm_connect_check();
1174 return 0;
1175 }
5adc2528 1176
d62a17ae 1177 assert(zfpm_g->state == ZFPM_STATE_ESTABLISHED);
1178 assert(zfpm_g->sock >= 0);
5adc2528 1179
d62a17ae 1180 num_writes = 0;
5adc2528 1181
d62a17ae 1182 do {
1183 int bytes_to_write, bytes_written;
5adc2528 1184
d62a17ae 1185 s = zfpm_g->obuf;
5adc2528 1186
d62a17ae 1187 /*
1188 * If the stream is empty, try fill it up with data.
1189 */
1190 if (stream_empty(s)) {
1191 zfpm_build_updates();
1192 }
5adc2528 1193
d62a17ae 1194 bytes_to_write = stream_get_endp(s) - stream_get_getp(s);
1195 if (!bytes_to_write)
1196 break;
5adc2528 1197
d62a17ae 1198 bytes_written =
2d34fb80 1199 write(zfpm_g->sock, stream_pnt(s), bytes_to_write);
d62a17ae 1200 zfpm_g->stats.write_calls++;
1201 num_writes++;
5adc2528 1202
d62a17ae 1203 if (bytes_written < 0) {
1204 if (ERRNO_IO_RETRY(errno))
1205 break;
5adc2528 1206
d62a17ae 1207 zfpm_connection_down("failed to write to socket");
1208 return 0;
1209 }
5adc2528 1210
d62a17ae 1211 if (bytes_written != bytes_to_write) {
5adc2528 1212
d62a17ae 1213 /*
1214 * Partial write.
1215 */
1216 stream_forward_getp(s, bytes_written);
1217 zfpm_g->stats.partial_writes++;
1218 break;
1219 }
5adc2528 1220
d62a17ae 1221 /*
1222 * We've written out the entire contents of the stream.
1223 */
1224 stream_reset(s);
5adc2528 1225
d62a17ae 1226 if (num_writes >= ZFPM_MAX_WRITES_PER_RUN) {
1227 zfpm_g->stats.max_writes_hit++;
1228 break;
1229 }
5adc2528 1230
d62a17ae 1231 if (zfpm_thread_should_yield(thread)) {
1232 zfpm_g->stats.t_write_yields++;
1233 break;
1234 }
1235 } while (1);
5adc2528 1236
d62a17ae 1237 if (zfpm_writes_pending())
1238 zfpm_write_on();
5adc2528 1239
d62a17ae 1240 return 0;
5adc2528
AS
1241}
1242
1243/*
1244 * zfpm_connect_cb
1245 */
d62a17ae 1246static int zfpm_connect_cb(struct thread *t)
5adc2528 1247{
d62a17ae 1248 int sock, ret;
1249 struct sockaddr_in serv;
1250
d62a17ae 1251 assert(zfpm_g->state == ZFPM_STATE_ACTIVE);
1252
1253 sock = socket(AF_INET, SOCK_STREAM, 0);
1254 if (sock < 0) {
14a4d9d0 1255 zlog_err("Failed to create socket for connect(): %s",
d62a17ae 1256 strerror(errno));
1257 zfpm_g->stats.connect_no_sock++;
1258 return 0;
1259 }
1260
1261 set_nonblocking(sock);
1262
1263 /* Make server socket. */
1264 memset(&serv, 0, sizeof(serv));
1265 serv.sin_family = AF_INET;
1266 serv.sin_port = htons(zfpm_g->fpm_port);
5adc2528 1267#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
d62a17ae 1268 serv.sin_len = sizeof(struct sockaddr_in);
5adc2528 1269#endif /* HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */
d62a17ae 1270 if (!zfpm_g->fpm_server)
1271 serv.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1272 else
1273 serv.sin_addr.s_addr = (zfpm_g->fpm_server);
1274
1275 /*
1276 * Connect to the FPM.
1277 */
1278 zfpm_g->connect_calls++;
1279 zfpm_g->stats.connect_calls++;
1280 zfpm_g->last_connect_call_time = monotime(NULL);
1281
1282 ret = connect(sock, (struct sockaddr *)&serv, sizeof(serv));
1283 if (ret >= 0) {
1284 zfpm_g->sock = sock;
1285 zfpm_connection_up("connect succeeded");
1286 return 1;
1287 }
1288
1289 if (errno == EINPROGRESS) {
1290 zfpm_g->sock = sock;
1291 zfpm_read_on();
1292 zfpm_write_on();
1293 zfpm_set_state(ZFPM_STATE_CONNECTING,
1294 "async connect in progress");
1295 return 0;
1296 }
1297
1298 zlog_info("can't connect to FPM %d: %s", sock, safe_strerror(errno));
1299 close(sock);
1300
1301 /*
1302 * Restart timer for retrying connection.
1303 */
1304 zfpm_start_connect_timer("connect() failed");
1305 return 0;
5adc2528
AS
1306}
1307
1308/*
1309 * zfpm_set_state
1310 *
1311 * Move state machine into the given state.
1312 */
d62a17ae 1313static void zfpm_set_state(zfpm_state_t state, const char *reason)
5adc2528 1314{
d62a17ae 1315 zfpm_state_t cur_state = zfpm_g->state;
1316
1317 if (!reason)
1318 reason = "Unknown";
1319
1320 if (state == cur_state)
1321 return;
1322
1323 zfpm_debug("beginning state transition %s -> %s. Reason: %s",
1324 zfpm_state_to_str(cur_state), zfpm_state_to_str(state),
1325 reason);
1326
1327 switch (state) {
1328
1329 case ZFPM_STATE_IDLE:
1330 assert(cur_state == ZFPM_STATE_ESTABLISHED);
1331 break;
1332
1333 case ZFPM_STATE_ACTIVE:
1334 assert(cur_state == ZFPM_STATE_IDLE
1335 || cur_state == ZFPM_STATE_CONNECTING);
1336 assert(zfpm_g->t_connect);
1337 break;
1338
1339 case ZFPM_STATE_CONNECTING:
1340 assert(zfpm_g->sock);
1341 assert(cur_state == ZFPM_STATE_ACTIVE);
1342 assert(zfpm_g->t_read);
1343 assert(zfpm_g->t_write);
1344 break;
1345
1346 case ZFPM_STATE_ESTABLISHED:
1347 assert(cur_state == ZFPM_STATE_ACTIVE
1348 || cur_state == ZFPM_STATE_CONNECTING);
1349 assert(zfpm_g->sock);
1350 assert(zfpm_g->t_read);
1351 assert(zfpm_g->t_write);
1352 break;
1353 }
1354
1355 zfpm_g->state = state;
5adc2528
AS
1356}
1357
1358/*
1359 * zfpm_calc_connect_delay
1360 *
1361 * Returns the number of seconds after which we should attempt to
1362 * reconnect to the FPM.
1363 */
d62a17ae 1364static long zfpm_calc_connect_delay(void)
5adc2528 1365{
d62a17ae 1366 time_t elapsed;
5adc2528 1367
d62a17ae 1368 /*
1369 * Return 0 if this is our first attempt to connect.
1370 */
1371 if (zfpm_g->connect_calls == 0) {
1372 return 0;
1373 }
5adc2528 1374
d62a17ae 1375 elapsed = zfpm_get_elapsed_time(zfpm_g->last_connect_call_time);
5adc2528 1376
d62a17ae 1377 if (elapsed > ZFPM_CONNECT_RETRY_IVL) {
1378 return 0;
1379 }
5adc2528 1380
d62a17ae 1381 return ZFPM_CONNECT_RETRY_IVL - elapsed;
5adc2528
AS
1382}
1383
1384/*
1385 * zfpm_start_connect_timer
1386 */
d62a17ae 1387static void zfpm_start_connect_timer(const char *reason)
5adc2528 1388{
d62a17ae 1389 long delay_secs;
5adc2528 1390
d62a17ae 1391 assert(!zfpm_g->t_connect);
1392 assert(zfpm_g->sock < 0);
5adc2528 1393
d62a17ae 1394 assert(zfpm_g->state == ZFPM_STATE_IDLE
1395 || zfpm_g->state == ZFPM_STATE_ACTIVE
1396 || zfpm_g->state == ZFPM_STATE_CONNECTING);
5adc2528 1397
d62a17ae 1398 delay_secs = zfpm_calc_connect_delay();
1399 zfpm_debug("scheduling connect in %ld seconds", delay_secs);
5adc2528 1400
d62a17ae 1401 thread_add_timer(zfpm_g->master, zfpm_connect_cb, 0, delay_secs,
1402 &zfpm_g->t_connect);
1403 zfpm_set_state(ZFPM_STATE_ACTIVE, reason);
5adc2528
AS
1404}
1405
1406/*
1407 * zfpm_is_enabled
1408 *
2951a7a4 1409 * Returns true if the zebra FPM module has been enabled.
5adc2528 1410 */
d62a17ae 1411static inline int zfpm_is_enabled(void)
5adc2528 1412{
d62a17ae 1413 return zfpm_g->enabled;
5adc2528
AS
1414}
1415
1416/*
1417 * zfpm_conn_is_up
1418 *
2951a7a4 1419 * Returns true if the connection to the FPM is up.
5adc2528 1420 */
d62a17ae 1421static inline int zfpm_conn_is_up(void)
5adc2528 1422{
d62a17ae 1423 if (zfpm_g->state != ZFPM_STATE_ESTABLISHED)
1424 return 0;
5adc2528 1425
d62a17ae 1426 assert(zfpm_g->sock >= 0);
5adc2528 1427
d62a17ae 1428 return 1;
5adc2528
AS
1429}
1430
1431/*
1432 * zfpm_trigger_update
1433 *
1434 * The zebra code invokes this function to indicate that we should
1435 * send an update to the FPM about the given route_node.
1436 */
d62a17ae 1437static int zfpm_trigger_update(struct route_node *rn, const char *reason)
5adc2528 1438{
d62a17ae 1439 rib_dest_t *dest;
1440 char buf[PREFIX_STRLEN];
1441
1442 /*
1443 * Ignore if the connection is down. We will update the FPM about
1444 * all destinations once the connection comes up.
1445 */
1446 if (!zfpm_conn_is_up())
1447 return 0;
1448
1449 dest = rib_dest_from_rnode(rn);
1450
d62a17ae 1451 if (CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM)) {
1452 zfpm_g->stats.redundant_triggers++;
1453 return 0;
1454 }
1455
1456 if (reason) {
1457 zfpm_debug("%s triggering update to FPM - Reason: %s",
1458 prefix2str(&rn->p, buf, sizeof(buf)), reason);
1459 }
1460
1461 SET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
1462 TAILQ_INSERT_TAIL(&zfpm_g->dest_q, dest, fpm_q_entries);
1463 zfpm_g->stats.updates_triggered++;
1464
1465 /*
1466 * Make sure that writes are enabled.
1467 */
1468 if (zfpm_g->t_write)
1469 return 0;
1470
1471 zfpm_write_on();
1472 return 0;
5adc2528
AS
1473}
1474
e5218ec8
AD
1475/*
1476 * Generate Key for FPM MAC info hash entry
e5218ec8
AD
1477 */
1478static unsigned int zfpm_mac_info_hash_keymake(const void *p)
1479{
1480 struct fpm_mac_info_t *fpm_mac = (struct fpm_mac_info_t *)p;
1481 uint32_t mac_key;
1482
1483 mac_key = jhash(fpm_mac->macaddr.octet, ETH_ALEN, 0xa5a5a55a);
1484
1485 return jhash_2words(mac_key, fpm_mac->vni, 0);
1486}
1487
1488/*
1489 * Compare function for FPM MAC info hash lookup
1490 */
1491static bool zfpm_mac_info_cmp(const void *p1, const void *p2)
1492{
1493 const struct fpm_mac_info_t *fpm_mac1 = p1;
1494 const struct fpm_mac_info_t *fpm_mac2 = p2;
1495
1496 if (memcmp(fpm_mac1->macaddr.octet, fpm_mac2->macaddr.octet, ETH_ALEN)
1497 != 0)
1498 return false;
e5218ec8
AD
1499 if (fpm_mac1->vni != fpm_mac2->vni)
1500 return false;
1501
1502 return true;
1503}
1504
1505/*
1506 * Lookup FPM MAC info hash entry.
1507 */
1508static struct fpm_mac_info_t *zfpm_mac_info_lookup(struct fpm_mac_info_t *key)
1509{
1510 return hash_lookup(zfpm_g->fpm_mac_info_table, key);
1511}
1512
1513/*
1514 * Callback to allocate fpm_mac_info_t structure.
1515 */
1516static void *zfpm_mac_info_alloc(void *p)
1517{
1518 const struct fpm_mac_info_t *key = p;
1519 struct fpm_mac_info_t *fpm_mac;
1520
1521 fpm_mac = XCALLOC(MTYPE_FPM_MAC_INFO, sizeof(struct fpm_mac_info_t));
1522
1523 memcpy(&fpm_mac->macaddr, &key->macaddr, ETH_ALEN);
e5218ec8
AD
1524 fpm_mac->vni = key->vni;
1525
1526 return (void *)fpm_mac;
1527}
1528
1529/*
1530 * Delink and free fpm_mac_info_t.
1531 */
1532static void zfpm_mac_info_del(struct fpm_mac_info_t *fpm_mac)
1533{
1534 hash_release(zfpm_g->fpm_mac_info_table, fpm_mac);
1535 TAILQ_REMOVE(&zfpm_g->mac_q, fpm_mac, fpm_mac_q_entries);
1536 XFREE(MTYPE_FPM_MAC_INFO, fpm_mac);
1537}
1538
a780a738
AD
1539/*
1540 * zfpm_trigger_rmac_update
1541 *
1542 * Zebra code invokes this function to indicate that we should
1543 * send an update to FPM for given MAC entry.
1544 *
1545 * This function checks if we already have enqueued an update for this RMAC,
1546 * If yes, update the same fpm_mac_info_t. Else, create and enqueue an update.
1547 */
1548static int zfpm_trigger_rmac_update(zebra_mac_t *rmac, zebra_l3vni_t *zl3vni,
1549 bool delete, const char *reason)
1550{
1551 char buf[ETHER_ADDR_STRLEN];
1552 struct fpm_mac_info_t *fpm_mac, key;
1553 struct interface *vxlan_if, *svi_if;
44f7f132 1554 bool mac_found = false;
a780a738
AD
1555
1556 /*
1557 * Ignore if the connection is down. We will update the FPM about
1558 * all destinations once the connection comes up.
1559 */
1560 if (!zfpm_conn_is_up())
1561 return 0;
1562
1563 if (reason) {
1564 zfpm_debug("triggering update to FPM - Reason: %s - %s",
1565 reason,
1566 prefix_mac2str(&rmac->macaddr, buf, sizeof(buf)));
1567 }
1568
1569 vxlan_if = zl3vni_map_to_vxlan_if(zl3vni);
1570 svi_if = zl3vni_map_to_svi_if(zl3vni);
1571
1572 memset(&key, 0, sizeof(struct fpm_mac_info_t));
1573
1574 memcpy(&key.macaddr, &rmac->macaddr, ETH_ALEN);
a780a738
AD
1575 key.vni = zl3vni->vni;
1576
1577 /* Check if this MAC is already present in the queue. */
1578 fpm_mac = zfpm_mac_info_lookup(&key);
1579
1580 if (fpm_mac) {
44f7f132 1581 mac_found = true;
a780a738
AD
1582
1583 /*
44f7f132
AD
1584 * If the enqueued op is "add" and current op is "delete",
1585 * this is a noop. So, Unset ZEBRA_MAC_UPDATE_FPM flag.
1586 * While processing FPM queue, we will silently delete this
1587 * MAC entry without sending any update for this MAC.
a780a738 1588 */
44f7f132
AD
1589 if (!CHECK_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM) &&
1590 delete == 1) {
a780a738
AD
1591 SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM);
1592 UNSET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM);
44f7f132 1593 return 0;
a780a738 1594 }
44f7f132
AD
1595 } else {
1596 fpm_mac = hash_get(zfpm_g->fpm_mac_info_table, &key,
1597 zfpm_mac_info_alloc);
1598 if (!fpm_mac)
1599 return 0;
a780a738
AD
1600 }
1601
44f7f132 1602 fpm_mac->r_vtep_ip.s_addr = rmac->fwd_info.r_vtep_ip.s_addr;
c5431822 1603 fpm_mac->zebra_flags = rmac->flags;
a780a738
AD
1604 fpm_mac->vxlan_if = vxlan_if ? vxlan_if->ifindex : 0;
1605 fpm_mac->svi_if = svi_if ? svi_if->ifindex : 0;
1606
1607 SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM);
1608 if (delete)
1609 SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM);
44f7f132
AD
1610 else
1611 UNSET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM);
a780a738 1612
44f7f132
AD
1613 if (!mac_found)
1614 TAILQ_INSERT_TAIL(&zfpm_g->mac_q, fpm_mac, fpm_mac_q_entries);
a780a738
AD
1615
1616 zfpm_g->stats.updates_triggered++;
1617
a780a738
AD
1618 /* If writes are already enabled, return. */
1619 if (zfpm_g->t_write)
1620 return 0;
1621
1622 zfpm_write_on();
1623 return 0;
1624}
1625
fbe748e5
AD
1626/*
1627 * This function is called when the FPM connections is established.
1628 * Iterate over all the RMAC entries for the given L3VNI
1629 * and enqueue the RMAC for FPM processing.
1630 */
7f5818fb 1631static void zfpm_trigger_rmac_update_wrapper(struct hash_bucket *backet,
fbe748e5
AD
1632 void *args)
1633{
1634 zebra_mac_t *zrmac = (zebra_mac_t *)backet->data;
1635 zebra_l3vni_t *zl3vni = (zebra_l3vni_t *)args;
1636
1637 zfpm_trigger_rmac_update(zrmac, zl3vni, false, "RMAC added");
1638}
1639
1640/*
1641 * This function is called when the FPM connections is established.
1642 * This function iterates over all the L3VNIs to trigger
1643 * FPM updates for RMACs currently available.
1644 */
7f5818fb 1645static void zfpm_iterate_rmac_table(struct hash_bucket *backet, void *args)
fbe748e5
AD
1646{
1647 zebra_l3vni_t *zl3vni = (zebra_l3vni_t *)backet->data;
1648
1649 hash_iterate(zl3vni->rmac_table, zfpm_trigger_rmac_update_wrapper,
1650 (void *)zl3vni);
1651}
1652
5adc2528 1653/*
eeaf257b 1654 * struct zfpm_statsimer_cb
5adc2528 1655 */
d62a17ae 1656static int zfpm_stats_timer_cb(struct thread *t)
5adc2528 1657{
d62a17ae 1658 zfpm_g->t_stats = NULL;
5adc2528 1659
d62a17ae 1660 /*
1661 * Remember the stats collected in the last interval for display
1662 * purposes.
1663 */
1664 zfpm_stats_copy(&zfpm_g->stats, &zfpm_g->last_ivl_stats);
5adc2528 1665
d62a17ae 1666 /*
1667 * Add the current set of stats into the cumulative statistics.
1668 */
1669 zfpm_stats_compose(&zfpm_g->cumulative_stats, &zfpm_g->stats,
1670 &zfpm_g->cumulative_stats);
5adc2528 1671
d62a17ae 1672 /*
1673 * Start collecting stats afresh over the next interval.
1674 */
1675 zfpm_stats_reset(&zfpm_g->stats);
5adc2528 1676
d62a17ae 1677 zfpm_start_stats_timer();
5adc2528 1678
d62a17ae 1679 return 0;
5adc2528
AS
1680}
1681
1682/*
1683 * zfpm_stop_stats_timer
1684 */
d62a17ae 1685static void zfpm_stop_stats_timer(void)
5adc2528 1686{
d62a17ae 1687 if (!zfpm_g->t_stats)
1688 return;
5adc2528 1689
d62a17ae 1690 zfpm_debug("Stopping existing stats timer");
1691 THREAD_TIMER_OFF(zfpm_g->t_stats);
5adc2528
AS
1692}
1693
1694/*
1695 * zfpm_start_stats_timer
1696 */
d62a17ae 1697void zfpm_start_stats_timer(void)
5adc2528 1698{
d62a17ae 1699 assert(!zfpm_g->t_stats);
5adc2528 1700
d62a17ae 1701 thread_add_timer(zfpm_g->master, zfpm_stats_timer_cb, 0,
1702 ZFPM_STATS_IVL_SECS, &zfpm_g->t_stats);
5adc2528
AS
1703}
1704
1705/*
1706 * Helper macro for zfpm_show_stats() below.
1707 */
d62a17ae 1708#define ZFPM_SHOW_STAT(counter) \
1709 do { \
1710 vty_out(vty, "%-40s %10lu %16lu\n", #counter, \
1711 total_stats.counter, zfpm_g->last_ivl_stats.counter); \
1712 } while (0)
5adc2528
AS
1713
1714/*
1715 * zfpm_show_stats
1716 */
d62a17ae 1717static void zfpm_show_stats(struct vty *vty)
5adc2528 1718{
eeaf257b 1719 struct zfpm_stats total_stats;
d62a17ae 1720 time_t elapsed;
1721
1722 vty_out(vty, "\n%-40s %10s Last %2d secs\n\n", "Counter", "Total",
1723 ZFPM_STATS_IVL_SECS);
1724
1725 /*
1726 * Compute the total stats up to this instant.
1727 */
1728 zfpm_stats_compose(&zfpm_g->cumulative_stats, &zfpm_g->stats,
1729 &total_stats);
1730
1731 ZFPM_SHOW_STAT(connect_calls);
1732 ZFPM_SHOW_STAT(connect_no_sock);
1733 ZFPM_SHOW_STAT(read_cb_calls);
1734 ZFPM_SHOW_STAT(write_cb_calls);
1735 ZFPM_SHOW_STAT(write_calls);
1736 ZFPM_SHOW_STAT(partial_writes);
1737 ZFPM_SHOW_STAT(max_writes_hit);
1738 ZFPM_SHOW_STAT(t_write_yields);
1739 ZFPM_SHOW_STAT(nop_deletes_skipped);
1740 ZFPM_SHOW_STAT(route_adds);
1741 ZFPM_SHOW_STAT(route_dels);
1742 ZFPM_SHOW_STAT(updates_triggered);
d62a17ae 1743 ZFPM_SHOW_STAT(redundant_triggers);
1744 ZFPM_SHOW_STAT(dests_del_after_update);
1745 ZFPM_SHOW_STAT(t_conn_down_starts);
1746 ZFPM_SHOW_STAT(t_conn_down_dests_processed);
1747 ZFPM_SHOW_STAT(t_conn_down_yields);
1748 ZFPM_SHOW_STAT(t_conn_down_finishes);
1749 ZFPM_SHOW_STAT(t_conn_up_starts);
1750 ZFPM_SHOW_STAT(t_conn_up_dests_processed);
1751 ZFPM_SHOW_STAT(t_conn_up_yields);
1752 ZFPM_SHOW_STAT(t_conn_up_aborts);
1753 ZFPM_SHOW_STAT(t_conn_up_finishes);
1754
1755 if (!zfpm_g->last_stats_clear_time)
1756 return;
1757
1758 elapsed = zfpm_get_elapsed_time(zfpm_g->last_stats_clear_time);
1759
1760 vty_out(vty, "\nStats were cleared %lu seconds ago\n",
1761 (unsigned long)elapsed);
5adc2528
AS
1762}
1763
1764/*
1765 * zfpm_clear_stats
1766 */
d62a17ae 1767static void zfpm_clear_stats(struct vty *vty)
5adc2528 1768{
d62a17ae 1769 if (!zfpm_is_enabled()) {
1770 vty_out(vty, "The FPM module is not enabled...\n");
1771 return;
1772 }
5adc2528 1773
d62a17ae 1774 zfpm_stats_reset(&zfpm_g->stats);
1775 zfpm_stats_reset(&zfpm_g->last_ivl_stats);
1776 zfpm_stats_reset(&zfpm_g->cumulative_stats);
5adc2528 1777
d62a17ae 1778 zfpm_stop_stats_timer();
1779 zfpm_start_stats_timer();
5adc2528 1780
d62a17ae 1781 zfpm_g->last_stats_clear_time = monotime(NULL);
5adc2528 1782
d62a17ae 1783 vty_out(vty, "Cleared FPM stats\n");
5adc2528
AS
1784}
1785
1786/*
1787 * show_zebra_fpm_stats
1788 */
1789DEFUN (show_zebra_fpm_stats,
1790 show_zebra_fpm_stats_cmd,
1791 "show zebra fpm stats",
1792 SHOW_STR
41e7fb80 1793 ZEBRA_STR
5adc2528
AS
1794 "Forwarding Path Manager information\n"
1795 "Statistics\n")
1796{
d62a17ae 1797 zfpm_show_stats(vty);
1798 return CMD_SUCCESS;
5adc2528
AS
1799}
1800
1801/*
1802 * clear_zebra_fpm_stats
1803 */
1804DEFUN (clear_zebra_fpm_stats,
1805 clear_zebra_fpm_stats_cmd,
1806 "clear zebra fpm stats",
1807 CLEAR_STR
41e7fb80 1808 ZEBRA_STR
5adc2528
AS
1809 "Clear Forwarding Path Manager information\n"
1810 "Statistics\n")
1811{
d62a17ae 1812 zfpm_clear_stats(vty);
1813 return CMD_SUCCESS;
5adc2528
AS
1814}
1815
711ff0ba 1816/*
d62a17ae 1817 * update fpm connection information
711ff0ba 1818 */
e52702f2
QY
1819DEFUN ( fpm_remote_ip,
1820 fpm_remote_ip_cmd,
1821 "fpm connection ip A.B.C.D port (1-65535)",
711ff0ba
USK
1822 "fpm connection remote ip and port\n"
1823 "Remote fpm server ip A.B.C.D\n"
1824 "Enter ip ")
1825{
1826
d62a17ae 1827 in_addr_t fpm_server;
1828 uint32_t port_no;
711ff0ba 1829
d62a17ae 1830 fpm_server = inet_addr(argv[3]->arg);
1831 if (fpm_server == INADDR_NONE)
1832 return CMD_ERR_INCOMPLETE;
711ff0ba 1833
d62a17ae 1834 port_no = atoi(argv[5]->arg);
1835 if (port_no < TCP_MIN_PORT || port_no > TCP_MAX_PORT)
1836 return CMD_ERR_INCOMPLETE;
711ff0ba 1837
d62a17ae 1838 zfpm_g->fpm_server = fpm_server;
1839 zfpm_g->fpm_port = port_no;
711ff0ba
USK
1840
1841
d62a17ae 1842 return CMD_SUCCESS;
711ff0ba
USK
1843}
1844
e52702f2
QY
1845DEFUN ( no_fpm_remote_ip,
1846 no_fpm_remote_ip_cmd,
1847 "no fpm connection ip A.B.C.D port (1-65535)",
711ff0ba
USK
1848 "fpm connection remote ip and port\n"
1849 "Connection\n"
1850 "Remote fpm server ip A.B.C.D\n"
1851 "Enter ip ")
1852{
d62a17ae 1853 if (zfpm_g->fpm_server != inet_addr(argv[4]->arg)
1854 || zfpm_g->fpm_port != atoi(argv[6]->arg))
1855 return CMD_ERR_NO_MATCH;
711ff0ba 1856
d62a17ae 1857 zfpm_g->fpm_server = FPM_DEFAULT_IP;
1858 zfpm_g->fpm_port = FPM_DEFAULT_PORT;
711ff0ba 1859
d62a17ae 1860 return CMD_SUCCESS;
711ff0ba 1861}
711ff0ba 1862
fb0aa886
AS
1863/*
1864 * zfpm_init_message_format
1865 */
d62a17ae 1866static inline void zfpm_init_message_format(const char *format)
fb0aa886 1867{
d62a17ae 1868 int have_netlink, have_protobuf;
fb0aa886 1869
fb0aa886 1870#ifdef HAVE_NETLINK
d62a17ae 1871 have_netlink = 1;
4b2792b5 1872#else
d62a17ae 1873 have_netlink = 0;
fb0aa886
AS
1874#endif
1875
1876#ifdef HAVE_PROTOBUF
d62a17ae 1877 have_protobuf = 1;
4b2792b5 1878#else
d62a17ae 1879 have_protobuf = 0;
fb0aa886
AS
1880#endif
1881
d62a17ae 1882 zfpm_g->message_format = ZFPM_MSG_FORMAT_NONE;
fb0aa886 1883
d62a17ae 1884 if (!format) {
1885 if (have_netlink) {
1886 zfpm_g->message_format = ZFPM_MSG_FORMAT_NETLINK;
1887 } else if (have_protobuf) {
1888 zfpm_g->message_format = ZFPM_MSG_FORMAT_PROTOBUF;
1889 }
1890 return;
fb0aa886 1891 }
fb0aa886 1892
d62a17ae 1893 if (!strcmp("netlink", format)) {
1894 if (!have_netlink) {
1c50c1c0
QY
1895 flog_err(EC_ZEBRA_NETLINK_NOT_AVAILABLE,
1896 "FPM netlink message format is not available");
d62a17ae 1897 return;
1898 }
1899 zfpm_g->message_format = ZFPM_MSG_FORMAT_NETLINK;
1900 return;
fb0aa886 1901 }
fb0aa886 1902
d62a17ae 1903 if (!strcmp("protobuf", format)) {
1904 if (!have_protobuf) {
af4c2728 1905 flog_err(
e914ccbe 1906 EC_ZEBRA_PROTOBUF_NOT_AVAILABLE,
d62a17ae 1907 "FPM protobuf message format is not available");
1908 return;
1909 }
8b9cf71c
DL
1910 flog_warn(EC_ZEBRA_PROTOBUF_NOT_AVAILABLE,
1911 "FPM protobuf message format is deprecated and scheduled to be removed. "
1912 "Please convert to using netlink format or contact dev@lists.frrouting.org with your use case.");
d62a17ae 1913 zfpm_g->message_format = ZFPM_MSG_FORMAT_PROTOBUF;
1914 return;
fb0aa886 1915 }
fb0aa886 1916
e914ccbe 1917 flog_warn(EC_ZEBRA_FPM_FORMAT_UNKNOWN, "Unknown fpm format '%s'",
9df414fe 1918 format);
fb0aa886
AS
1919}
1920
711ff0ba 1921/**
d62a17ae 1922 * fpm_remote_srv_write
711ff0ba 1923 *
d62a17ae 1924 * Module to write remote fpm connection
711ff0ba
USK
1925 *
1926 * Returns ZERO on success.
1927 */
1928
d62a17ae 1929static int fpm_remote_srv_write(struct vty *vty)
711ff0ba 1930{
d62a17ae 1931 struct in_addr in;
711ff0ba 1932
d62a17ae 1933 in.s_addr = zfpm_g->fpm_server;
711ff0ba 1934
9d1c2659 1935 if ((zfpm_g->fpm_server != FPM_DEFAULT_IP
996c9314
LB
1936 && zfpm_g->fpm_server != INADDR_ANY)
1937 || (zfpm_g->fpm_port != FPM_DEFAULT_PORT && zfpm_g->fpm_port != 0))
d62a17ae 1938 vty_out(vty, "fpm connection ip %s port %d\n", inet_ntoa(in),
1939 zfpm_g->fpm_port);
711ff0ba 1940
d62a17ae 1941 return 0;
711ff0ba
USK
1942}
1943
1944
612c2c15 1945static int fpm_remote_srv_write(struct vty *vty);
4f8ea50c 1946/* Zebra node */
62b346ee 1947static struct cmd_node zebra_node = {
f4b8291f 1948 .name = "zebra",
62b346ee 1949 .node = ZEBRA_NODE,
24389580 1950 .parent_node = CONFIG_NODE,
62b346ee 1951 .prompt = "",
612c2c15 1952 .config_write = fpm_remote_srv_write,
62b346ee 1953};
4f8ea50c
DL
1954
1955
5adc2528
AS
1956/**
1957 * zfpm_init
1958 *
1959 * One-time initialization of the Zebra FPM module.
1960 *
1961 * @param[in] port port at which FPM is running.
2951a7a4 1962 * @param[in] enable true if the zebra FPM module should be enabled
fb0aa886 1963 * @param[in] format to use to talk to the FPM. Can be 'netink' or 'protobuf'.
5adc2528 1964 *
2951a7a4 1965 * Returns true on success.
5adc2528 1966 */
d62a17ae 1967static int zfpm_init(struct thread_master *master)
5adc2528 1968{
d62a17ae 1969 int enable = 1;
1970 uint16_t port = 0;
1971 const char *format = THIS_MODULE->load_args;
5adc2528 1972
d62a17ae 1973 memset(zfpm_g, 0, sizeof(*zfpm_g));
1974 zfpm_g->master = master;
1975 TAILQ_INIT(&zfpm_g->dest_q);
e5218ec8
AD
1976 TAILQ_INIT(&zfpm_g->mac_q);
1977
1978 /* Create hash table for fpm_mac_info_t enties */
1979 zfpm_g->fpm_mac_info_table = hash_create(zfpm_mac_info_hash_keymake,
fbe748e5
AD
1980 zfpm_mac_info_cmp,
1981 "FPM MAC info hash table");
e5218ec8 1982
d62a17ae 1983 zfpm_g->sock = -1;
1984 zfpm_g->state = ZFPM_STATE_IDLE;
5adc2528 1985
d62a17ae 1986 zfpm_stats_init(&zfpm_g->stats);
1987 zfpm_stats_init(&zfpm_g->last_ivl_stats);
1988 zfpm_stats_init(&zfpm_g->cumulative_stats);
5adc2528 1989
612c2c15 1990 install_node(&zebra_node);
d62a17ae 1991 install_element(ENABLE_NODE, &show_zebra_fpm_stats_cmd);
1992 install_element(ENABLE_NODE, &clear_zebra_fpm_stats_cmd);
1993 install_element(CONFIG_NODE, &fpm_remote_ip_cmd);
1994 install_element(CONFIG_NODE, &no_fpm_remote_ip_cmd);
5adc2528 1995
d62a17ae 1996 zfpm_init_message_format(format);
fb0aa886 1997
d62a17ae 1998 /*
1999 * Disable FPM interface if no suitable format is available.
2000 */
2001 if (zfpm_g->message_format == ZFPM_MSG_FORMAT_NONE)
2002 enable = 0;
fb0aa886 2003
d62a17ae 2004 zfpm_g->enabled = enable;
5adc2528 2005
d62a17ae 2006 if (!zfpm_g->fpm_server)
2007 zfpm_g->fpm_server = FPM_DEFAULT_IP;
711ff0ba 2008
d62a17ae 2009 if (!port)
2010 port = FPM_DEFAULT_PORT;
5adc2528 2011
d62a17ae 2012 zfpm_g->fpm_port = port;
5adc2528 2013
d62a17ae 2014 zfpm_g->obuf = stream_new(ZFPM_OBUF_SIZE);
2015 zfpm_g->ibuf = stream_new(ZFPM_IBUF_SIZE);
5adc2528 2016
d62a17ae 2017 zfpm_start_stats_timer();
2018 zfpm_start_connect_timer("initialized");
2019 return 0;
4f8ea50c 2020}
5adc2528 2021
f0c459f0
DS
2022static int zfpm_fini(void)
2023{
2024 zfpm_write_off();
2025 zfpm_read_off();
2026 zfpm_connect_off();
2027
2028 zfpm_stop_stats_timer();
2029
2030 hook_unregister(rib_update, zfpm_trigger_update);
2031 return 0;
2032}
2033
d62a17ae 2034static int zebra_fpm_module_init(void)
4f8ea50c 2035{
d62a17ae 2036 hook_register(rib_update, zfpm_trigger_update);
a780a738 2037 hook_register(zebra_rmac_update, zfpm_trigger_rmac_update);
d62a17ae 2038 hook_register(frr_late_init, zfpm_init);
f0c459f0 2039 hook_register(frr_early_fini, zfpm_fini);
d62a17ae 2040 return 0;
5adc2528 2041}
4f8ea50c 2042
d62a17ae 2043FRR_MODULE_SETUP(.name = "zebra_fpm", .version = FRR_VERSION,
2044 .description = "zebra FPM (Forwarding Plane Manager) module",
2045 .init = zebra_fpm_module_init, )