]> git.proxmox.com Git - mirror_frr.git/blame - zebra/zebra_fpm.c
zebra: Remove typedef around zfpm_rnodes_iter
[mirror_frr.git] / zebra / zebra_fpm.c
CommitLineData
5adc2528
AS
1/*
2 * Main implementation file for interface to Forwarding Plane Manager.
3 *
4 * Copyright (C) 2012 by Open Source Routing.
5 * Copyright (C) 2012 by Internet Systems Consortium, Inc. ("ISC")
6 *
7 * This file is part of GNU Zebra.
8 *
9 * GNU Zebra is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
12 * later version.
13 *
14 * GNU Zebra is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
896014f4
DL
19 * You should have received a copy of the GNU General Public License along
20 * with this program; see the file COPYING; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
5adc2528
AS
22 */
23
24#include <zebra.h>
25
26#include "log.h"
4f8ea50c 27#include "libfrr.h"
5adc2528
AS
28#include "stream.h"
29#include "thread.h"
30#include "network.h"
31#include "command.h"
4f8ea50c 32#include "version.h"
e5218ec8 33#include "jhash.h"
5adc2528
AS
34
35#include "zebra/rib.h"
7c551956
DS
36#include "zebra/zserv.h"
37#include "zebra/zebra_ns.h"
38#include "zebra/zebra_vrf.h"
67aeb554 39#include "zebra/zebra_errors.h"
e5218ec8 40#include "zebra/zebra_memory.h"
5adc2528
AS
41
42#include "fpm/fpm.h"
5adc2528 43#include "zebra_fpm_private.h"
e5218ec8 44#include "zebra/zebra_router.h"
a780a738 45#include "zebra_vxlan_private.h"
e5218ec8
AD
46
47DEFINE_MTYPE_STATIC(ZEBRA, FPM_MAC_INFO, "FPM_MAC_INFO");
5adc2528
AS
48
49/*
50 * Interval at which we attempt to connect to the FPM.
51 */
52#define ZFPM_CONNECT_RETRY_IVL 5
53
54/*
55 * Sizes of outgoing and incoming stream buffers for writing/reading
56 * FPM messages.
57 */
58#define ZFPM_OBUF_SIZE (2 * FPM_MAX_MSG_LEN)
59#define ZFPM_IBUF_SIZE (FPM_MAX_MSG_LEN)
60
61/*
62 * The maximum number of times the FPM socket write callback can call
63 * 'write' before it yields.
64 */
65#define ZFPM_MAX_WRITES_PER_RUN 10
66
67/*
68 * Interval over which we collect statistics.
69 */
70#define ZFPM_STATS_IVL_SECS 10
fbe748e5
AD
71#define FPM_MAX_MAC_MSG_LEN 512
72
7f5818fb 73static void zfpm_iterate_rmac_table(struct hash_bucket *backet, void *args);
5adc2528
AS
74
75/*
76 * Structure that holds state for iterating over all route_node
77 * structures that are candidates for being communicated to the FPM.
78 */
332cba05 79struct zfpm_rnodes_iter {
d62a17ae 80 rib_tables_iter_t tables_iter;
81 route_table_iter_t iter;
332cba05 82};
5adc2528
AS
83
84/*
85 * Statistics.
86 */
87typedef struct zfpm_stats_t_ {
d62a17ae 88 unsigned long connect_calls;
89 unsigned long connect_no_sock;
5adc2528 90
d62a17ae 91 unsigned long read_cb_calls;
5adc2528 92
d62a17ae 93 unsigned long write_cb_calls;
94 unsigned long write_calls;
95 unsigned long partial_writes;
96 unsigned long max_writes_hit;
97 unsigned long t_write_yields;
5adc2528 98
d62a17ae 99 unsigned long nop_deletes_skipped;
100 unsigned long route_adds;
101 unsigned long route_dels;
5adc2528 102
d62a17ae 103 unsigned long updates_triggered;
104 unsigned long redundant_triggers;
5adc2528 105
d62a17ae 106 unsigned long dests_del_after_update;
5adc2528 107
d62a17ae 108 unsigned long t_conn_down_starts;
109 unsigned long t_conn_down_dests_processed;
110 unsigned long t_conn_down_yields;
111 unsigned long t_conn_down_finishes;
5adc2528 112
d62a17ae 113 unsigned long t_conn_up_starts;
114 unsigned long t_conn_up_dests_processed;
115 unsigned long t_conn_up_yields;
116 unsigned long t_conn_up_aborts;
117 unsigned long t_conn_up_finishes;
5adc2528
AS
118
119} zfpm_stats_t;
120
121/*
122 * States for the FPM state machine.
123 */
124typedef enum {
125
d62a17ae 126 /*
127 * In this state we are not yet ready to connect to the FPM. This
128 * can happen when this module is disabled, or if we're cleaning up
129 * after a connection has gone down.
130 */
131 ZFPM_STATE_IDLE,
132
133 /*
134 * Ready to talk to the FPM and periodically trying to connect to
135 * it.
136 */
137 ZFPM_STATE_ACTIVE,
138
139 /*
140 * In the middle of bringing up a TCP connection. Specifically,
141 * waiting for a connect() call to complete asynchronously.
142 */
143 ZFPM_STATE_CONNECTING,
144
145 /*
146 * TCP connection to the FPM is up.
147 */
148 ZFPM_STATE_ESTABLISHED
5adc2528
AS
149
150} zfpm_state_t;
151
fb0aa886
AS
152/*
153 * Message format to be used to communicate with the FPM.
154 */
d62a17ae 155typedef enum {
156 ZFPM_MSG_FORMAT_NONE,
157 ZFPM_MSG_FORMAT_NETLINK,
158 ZFPM_MSG_FORMAT_PROTOBUF,
fb0aa886 159} zfpm_msg_format_e;
5adc2528
AS
160/*
161 * Globals.
162 */
d62a17ae 163typedef struct zfpm_glob_t_ {
164
165 /*
166 * True if the FPM module has been enabled.
167 */
168 int enabled;
169
170 /*
171 * Message format to be used to communicate with the fpm.
172 */
173 zfpm_msg_format_e message_format;
174
175 struct thread_master *master;
176
177 zfpm_state_t state;
178
179 in_addr_t fpm_server;
180 /*
181 * Port on which the FPM is running.
182 */
183 int fpm_port;
184
185 /*
186 * List of rib_dest_t structures to be processed
187 */
188 TAILQ_HEAD(zfpm_dest_q, rib_dest_t_) dest_q;
189
e5218ec8
AD
190 /*
191 * List of fpm_mac_info structures to be processed
192 */
193 TAILQ_HEAD(zfpm_mac_q, fpm_mac_info_t) mac_q;
194
195 /*
196 * Hash table of fpm_mac_info_t entries
197 *
198 * While adding fpm_mac_info_t for a MAC to the mac_q,
199 * it is possible that another fpm_mac_info_t node for the this MAC
200 * is already present in the queue.
201 * This is possible in the case of consecutive add->delete operations.
202 * To avoid such duplicate insertions in the mac_q,
203 * define a hash table for fpm_mac_info_t which can be looked up
204 * to see if an fpm_mac_info_t node for a MAC is already present
205 * in the mac_q.
206 */
207 struct hash *fpm_mac_info_table;
208
d62a17ae 209 /*
210 * Stream socket to the FPM.
211 */
212 int sock;
213
214 /*
215 * Buffers for messages to/from the FPM.
216 */
217 struct stream *obuf;
218 struct stream *ibuf;
219
220 /*
221 * Threads for I/O.
222 */
223 struct thread *t_connect;
224 struct thread *t_write;
225 struct thread *t_read;
226
227 /*
228 * Thread to clean up after the TCP connection to the FPM goes down
229 * and the state that belongs to it.
230 */
231 struct thread *t_conn_down;
232
233 struct {
332cba05 234 struct zfpm_rnodes_iter iter;
d62a17ae 235 } t_conn_down_state;
236
237 /*
238 * Thread to take actions once the TCP conn to the FPM comes up, and
239 * the state that belongs to it.
240 */
241 struct thread *t_conn_up;
242
243 struct {
332cba05 244 struct zfpm_rnodes_iter iter;
d62a17ae 245 } t_conn_up_state;
246
247 unsigned long connect_calls;
248 time_t last_connect_call_time;
249
250 /*
251 * Stats from the start of the current statistics interval up to
252 * now. These are the counters we typically update in the code.
253 */
254 zfpm_stats_t stats;
255
256 /*
257 * Statistics that were gathered in the last collection interval.
258 */
259 zfpm_stats_t last_ivl_stats;
260
261 /*
262 * Cumulative stats from the last clear to the start of the current
263 * statistics interval.
264 */
265 zfpm_stats_t cumulative_stats;
266
267 /*
268 * Stats interval timer.
269 */
270 struct thread *t_stats;
271
272 /*
273 * If non-zero, the last time when statistics were cleared.
274 */
275 time_t last_stats_clear_time;
5adc2528
AS
276
277} zfpm_glob_t;
278
279static zfpm_glob_t zfpm_glob_space;
280static zfpm_glob_t *zfpm_g = &zfpm_glob_space;
281
d62a17ae 282static int zfpm_trigger_update(struct route_node *rn, const char *reason);
4f8ea50c 283
d62a17ae 284static int zfpm_read_cb(struct thread *thread);
285static int zfpm_write_cb(struct thread *thread);
5adc2528 286
d62a17ae 287static void zfpm_set_state(zfpm_state_t state, const char *reason);
288static void zfpm_start_connect_timer(const char *reason);
289static void zfpm_start_stats_timer(void);
a780a738 290static void zfpm_mac_info_del(struct fpm_mac_info_t *fpm_mac);
5adc2528
AS
291
292/*
293 * zfpm_thread_should_yield
294 */
d62a17ae 295static inline int zfpm_thread_should_yield(struct thread *t)
5adc2528 296{
d62a17ae 297 return thread_should_yield(t);
5adc2528
AS
298}
299
300/*
301 * zfpm_state_to_str
302 */
d62a17ae 303static const char *zfpm_state_to_str(zfpm_state_t state)
5adc2528 304{
d62a17ae 305 switch (state) {
5adc2528 306
d62a17ae 307 case ZFPM_STATE_IDLE:
308 return "idle";
5adc2528 309
d62a17ae 310 case ZFPM_STATE_ACTIVE:
311 return "active";
5adc2528 312
d62a17ae 313 case ZFPM_STATE_CONNECTING:
314 return "connecting";
5adc2528 315
d62a17ae 316 case ZFPM_STATE_ESTABLISHED:
317 return "established";
5adc2528 318
d62a17ae 319 default:
320 return "unknown";
321 }
5adc2528
AS
322}
323
5adc2528
AS
324/*
325 * zfpm_get_elapsed_time
326 *
327 * Returns the time elapsed (in seconds) since the given time.
328 */
d62a17ae 329static time_t zfpm_get_elapsed_time(time_t reference)
5adc2528 330{
d62a17ae 331 time_t now;
5adc2528 332
d62a17ae 333 now = monotime(NULL);
5adc2528 334
d62a17ae 335 if (now < reference) {
336 assert(0);
337 return 0;
338 }
5adc2528 339
d62a17ae 340 return now - reference;
5adc2528
AS
341}
342
5adc2528
AS
343/*
344 * zfpm_rnodes_iter_init
345 */
332cba05 346static inline void zfpm_rnodes_iter_init(struct zfpm_rnodes_iter *iter)
5adc2528 347{
d62a17ae 348 memset(iter, 0, sizeof(*iter));
349 rib_tables_iter_init(&iter->tables_iter);
350
351 /*
352 * This is a hack, but it makes implementing 'next' easier by
353 * ensuring that route_table_iter_next() will return NULL the first
354 * time we call it.
355 */
356 route_table_iter_init(&iter->iter, NULL);
357 route_table_iter_cleanup(&iter->iter);
5adc2528
AS
358}
359
360/*
361 * zfpm_rnodes_iter_next
362 */
332cba05
DS
363static inline struct route_node *
364zfpm_rnodes_iter_next(struct zfpm_rnodes_iter *iter)
5adc2528 365{
d62a17ae 366 struct route_node *rn;
367 struct route_table *table;
5adc2528 368
d62a17ae 369 while (1) {
370 rn = route_table_iter_next(&iter->iter);
371 if (rn)
372 return rn;
5adc2528 373
d62a17ae 374 /*
375 * We've made our way through this table, go to the next one.
376 */
377 route_table_iter_cleanup(&iter->iter);
5adc2528 378
c6bbea17 379 table = rib_tables_iter_next(&iter->tables_iter);
5adc2528 380
d62a17ae 381 if (!table)
382 return NULL;
5adc2528 383
d62a17ae 384 route_table_iter_init(&iter->iter, table);
385 }
5adc2528 386
d62a17ae 387 return NULL;
5adc2528
AS
388}
389
390/*
391 * zfpm_rnodes_iter_pause
392 */
332cba05 393static inline void zfpm_rnodes_iter_pause(struct zfpm_rnodes_iter *iter)
5adc2528 394{
d62a17ae 395 route_table_iter_pause(&iter->iter);
5adc2528
AS
396}
397
398/*
399 * zfpm_rnodes_iter_cleanup
400 */
332cba05 401static inline void zfpm_rnodes_iter_cleanup(struct zfpm_rnodes_iter *iter)
5adc2528 402{
d62a17ae 403 route_table_iter_cleanup(&iter->iter);
404 rib_tables_iter_cleanup(&iter->tables_iter);
5adc2528
AS
405}
406
407/*
408 * zfpm_stats_init
409 *
410 * Initialize a statistics block.
411 */
d62a17ae 412static inline void zfpm_stats_init(zfpm_stats_t *stats)
5adc2528 413{
d62a17ae 414 memset(stats, 0, sizeof(*stats));
5adc2528
AS
415}
416
417/*
418 * zfpm_stats_reset
419 */
d62a17ae 420static inline void zfpm_stats_reset(zfpm_stats_t *stats)
5adc2528 421{
d62a17ae 422 zfpm_stats_init(stats);
5adc2528
AS
423}
424
425/*
426 * zfpm_stats_copy
427 */
d62a17ae 428static inline void zfpm_stats_copy(const zfpm_stats_t *src, zfpm_stats_t *dest)
5adc2528 429{
d62a17ae 430 memcpy(dest, src, sizeof(*dest));
5adc2528
AS
431}
432
433/*
434 * zfpm_stats_compose
435 *
436 * Total up the statistics in two stats structures ('s1 and 's2') and
437 * return the result in the third argument, 'result'. Note that the
438 * pointer 'result' may be the same as 's1' or 's2'.
439 *
440 * For simplicity, the implementation below assumes that the stats
441 * structure is composed entirely of counters. This can easily be
442 * changed when necessary.
443 */
d62a17ae 444static void zfpm_stats_compose(const zfpm_stats_t *s1, const zfpm_stats_t *s2,
445 zfpm_stats_t *result)
5adc2528 446{
d62a17ae 447 const unsigned long *p1, *p2;
448 unsigned long *result_p;
449 int i, num_counters;
5adc2528 450
d62a17ae 451 p1 = (const unsigned long *)s1;
452 p2 = (const unsigned long *)s2;
453 result_p = (unsigned long *)result;
5adc2528 454
d62a17ae 455 num_counters = (sizeof(zfpm_stats_t) / sizeof(unsigned long));
5adc2528 456
d62a17ae 457 for (i = 0; i < num_counters; i++) {
458 result_p[i] = p1[i] + p2[i];
459 }
5adc2528
AS
460}
461
462/*
463 * zfpm_read_on
464 */
d62a17ae 465static inline void zfpm_read_on(void)
5adc2528 466{
d62a17ae 467 assert(!zfpm_g->t_read);
468 assert(zfpm_g->sock >= 0);
5adc2528 469
d62a17ae 470 thread_add_read(zfpm_g->master, zfpm_read_cb, 0, zfpm_g->sock,
471 &zfpm_g->t_read);
5adc2528
AS
472}
473
474/*
475 * zfpm_write_on
476 */
d62a17ae 477static inline void zfpm_write_on(void)
5adc2528 478{
d62a17ae 479 assert(!zfpm_g->t_write);
480 assert(zfpm_g->sock >= 0);
5adc2528 481
d62a17ae 482 thread_add_write(zfpm_g->master, zfpm_write_cb, 0, zfpm_g->sock,
483 &zfpm_g->t_write);
5adc2528
AS
484}
485
486/*
487 * zfpm_read_off
488 */
d62a17ae 489static inline void zfpm_read_off(void)
5adc2528 490{
d62a17ae 491 THREAD_READ_OFF(zfpm_g->t_read);
5adc2528
AS
492}
493
494/*
495 * zfpm_write_off
496 */
d62a17ae 497static inline void zfpm_write_off(void)
5adc2528 498{
d62a17ae 499 THREAD_WRITE_OFF(zfpm_g->t_write);
5adc2528
AS
500}
501
f0c459f0
DS
502static inline void zfpm_connect_off(void)
503{
504 THREAD_TIMER_OFF(zfpm_g->t_connect);
505}
506
5adc2528
AS
507/*
508 * zfpm_conn_up_thread_cb
509 *
510 * Callback for actions to be taken when the connection to the FPM
511 * comes up.
512 */
d62a17ae 513static int zfpm_conn_up_thread_cb(struct thread *thread)
5adc2528 514{
d62a17ae 515 struct route_node *rnode;
332cba05 516 struct zfpm_rnodes_iter *iter;
d62a17ae 517 rib_dest_t *dest;
5adc2528 518
d62a17ae 519 zfpm_g->t_conn_up = NULL;
5adc2528 520
d62a17ae 521 iter = &zfpm_g->t_conn_up_state.iter;
5adc2528 522
d62a17ae 523 if (zfpm_g->state != ZFPM_STATE_ESTABLISHED) {
524 zfpm_debug(
525 "Connection not up anymore, conn_up thread aborting");
526 zfpm_g->stats.t_conn_up_aborts++;
527 goto done;
528 }
5adc2528 529
fbe748e5
AD
530 /* Enqueue FPM updates for all the RMAC entries */
531 hash_iterate(zrouter.l3vni_table, zfpm_iterate_rmac_table, NULL);
532
d62a17ae 533 while ((rnode = zfpm_rnodes_iter_next(iter))) {
534 dest = rib_dest_from_rnode(rnode);
535
536 if (dest) {
537 zfpm_g->stats.t_conn_up_dests_processed++;
538 zfpm_trigger_update(rnode, NULL);
539 }
540
541 /*
542 * Yield if need be.
543 */
544 if (!zfpm_thread_should_yield(thread))
545 continue;
546
547 zfpm_g->stats.t_conn_up_yields++;
548 zfpm_rnodes_iter_pause(iter);
549 zfpm_g->t_conn_up = NULL;
550 thread_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb,
551 NULL, 0, &zfpm_g->t_conn_up);
552 return 0;
5adc2528
AS
553 }
554
d62a17ae 555 zfpm_g->stats.t_conn_up_finishes++;
556
557done:
558 zfpm_rnodes_iter_cleanup(iter);
559 return 0;
5adc2528
AS
560}
561
562/*
563 * zfpm_connection_up
564 *
565 * Called when the connection to the FPM comes up.
566 */
d62a17ae 567static void zfpm_connection_up(const char *detail)
5adc2528 568{
d62a17ae 569 assert(zfpm_g->sock >= 0);
570 zfpm_read_on();
571 zfpm_write_on();
572 zfpm_set_state(ZFPM_STATE_ESTABLISHED, detail);
573
574 /*
575 * Start thread to push existing routes to the FPM.
576 */
577 assert(!zfpm_g->t_conn_up);
578
579 zfpm_rnodes_iter_init(&zfpm_g->t_conn_up_state.iter);
580
581 zfpm_debug("Starting conn_up thread");
582 zfpm_g->t_conn_up = NULL;
583 thread_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb, NULL, 0,
584 &zfpm_g->t_conn_up);
585 zfpm_g->stats.t_conn_up_starts++;
5adc2528
AS
586}
587
588/*
589 * zfpm_connect_check
590 *
591 * Check if an asynchronous connect() to the FPM is complete.
592 */
d62a17ae 593static void zfpm_connect_check(void)
5adc2528 594{
d62a17ae 595 int status;
596 socklen_t slen;
597 int ret;
598
599 zfpm_read_off();
600 zfpm_write_off();
601
602 slen = sizeof(status);
603 ret = getsockopt(zfpm_g->sock, SOL_SOCKET, SO_ERROR, (void *)&status,
604 &slen);
605
606 if (ret >= 0 && status == 0) {
607 zfpm_connection_up("async connect complete");
608 return;
609 }
610
611 /*
612 * getsockopt() failed or indicated an error on the socket.
613 */
614 close(zfpm_g->sock);
615 zfpm_g->sock = -1;
616
617 zfpm_start_connect_timer("getsockopt() after async connect failed");
618 return;
5adc2528
AS
619}
620
621/*
622 * zfpm_conn_down_thread_cb
623 *
624 * Callback that is invoked to clean up state after the TCP connection
625 * to the FPM goes down.
626 */
d62a17ae 627static int zfpm_conn_down_thread_cb(struct thread *thread)
5adc2528 628{
d62a17ae 629 struct route_node *rnode;
332cba05 630 struct zfpm_rnodes_iter *iter;
d62a17ae 631 rib_dest_t *dest;
a780a738 632 struct fpm_mac_info_t *mac = NULL;
5adc2528 633
d62a17ae 634 assert(zfpm_g->state == ZFPM_STATE_IDLE);
5adc2528 635
a780a738
AD
636 /*
637 * Delink and free all fpm_mac_info_t nodes
638 * in the mac_q and fpm_mac_info_hash
639 */
640 while ((mac = TAILQ_FIRST(&zfpm_g->mac_q)) != NULL)
641 zfpm_mac_info_del(mac);
642
d62a17ae 643 zfpm_g->t_conn_down = NULL;
5adc2528 644
d62a17ae 645 iter = &zfpm_g->t_conn_down_state.iter;
5adc2528 646
d62a17ae 647 while ((rnode = zfpm_rnodes_iter_next(iter))) {
648 dest = rib_dest_from_rnode(rnode);
5adc2528 649
d62a17ae 650 if (dest) {
651 if (CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM)) {
652 TAILQ_REMOVE(&zfpm_g->dest_q, dest,
653 fpm_q_entries);
654 }
655
656 UNSET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
657 UNSET_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM);
5adc2528 658
d62a17ae 659 zfpm_g->stats.t_conn_down_dests_processed++;
5adc2528 660
d62a17ae 661 /*
662 * Check if the dest should be deleted.
663 */
664 rib_gc_dest(rnode);
665 }
5adc2528 666
d62a17ae 667 /*
668 * Yield if need be.
669 */
670 if (!zfpm_thread_should_yield(thread))
671 continue;
672
673 zfpm_g->stats.t_conn_down_yields++;
674 zfpm_rnodes_iter_pause(iter);
675 zfpm_g->t_conn_down = NULL;
676 thread_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb,
677 NULL, 0, &zfpm_g->t_conn_down);
678 return 0;
5adc2528
AS
679 }
680
d62a17ae 681 zfpm_g->stats.t_conn_down_finishes++;
682 zfpm_rnodes_iter_cleanup(iter);
683
684 /*
685 * Start the process of connecting to the FPM again.
686 */
687 zfpm_start_connect_timer("cleanup complete");
688 return 0;
5adc2528
AS
689}
690
691/*
692 * zfpm_connection_down
693 *
694 * Called when the connection to the FPM has gone down.
695 */
d62a17ae 696static void zfpm_connection_down(const char *detail)
5adc2528 697{
d62a17ae 698 if (!detail)
699 detail = "unknown";
5adc2528 700
d62a17ae 701 assert(zfpm_g->state == ZFPM_STATE_ESTABLISHED);
5adc2528 702
d62a17ae 703 zlog_info("connection to the FPM has gone down: %s", detail);
5adc2528 704
d62a17ae 705 zfpm_read_off();
706 zfpm_write_off();
5adc2528 707
d62a17ae 708 stream_reset(zfpm_g->ibuf);
709 stream_reset(zfpm_g->obuf);
5adc2528 710
d62a17ae 711 if (zfpm_g->sock >= 0) {
712 close(zfpm_g->sock);
713 zfpm_g->sock = -1;
714 }
5adc2528 715
d62a17ae 716 /*
717 * Start thread to clean up state after the connection goes down.
718 */
719 assert(!zfpm_g->t_conn_down);
d62a17ae 720 zfpm_rnodes_iter_init(&zfpm_g->t_conn_down_state.iter);
721 zfpm_g->t_conn_down = NULL;
722 thread_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb, NULL, 0,
723 &zfpm_g->t_conn_down);
724 zfpm_g->stats.t_conn_down_starts++;
725
726 zfpm_set_state(ZFPM_STATE_IDLE, detail);
5adc2528
AS
727}
728
729/*
730 * zfpm_read_cb
731 */
d62a17ae 732static int zfpm_read_cb(struct thread *thread)
5adc2528 733{
d62a17ae 734 size_t already;
735 struct stream *ibuf;
736 uint16_t msg_len;
737 fpm_msg_hdr_t *hdr;
738
739 zfpm_g->stats.read_cb_calls++;
d62a17ae 740
741 /*
742 * Check if async connect is now done.
743 */
744 if (zfpm_g->state == ZFPM_STATE_CONNECTING) {
745 zfpm_connect_check();
746 return 0;
5adc2528
AS
747 }
748
d62a17ae 749 assert(zfpm_g->state == ZFPM_STATE_ESTABLISHED);
750 assert(zfpm_g->sock >= 0);
5adc2528 751
d62a17ae 752 ibuf = zfpm_g->ibuf;
5adc2528 753
d62a17ae 754 already = stream_get_endp(ibuf);
755 if (already < FPM_MSG_HDR_LEN) {
756 ssize_t nbyte;
5adc2528 757
d62a17ae 758 nbyte = stream_read_try(ibuf, zfpm_g->sock,
759 FPM_MSG_HDR_LEN - already);
760 if (nbyte == 0 || nbyte == -1) {
677f704d
DS
761 if (nbyte == -1) {
762 char buffer[1024];
763
772270f3
QY
764 snprintf(buffer, sizeof(buffer),
765 "closed socket in read(%d): %s", errno,
766 safe_strerror(errno));
677f704d 767 zfpm_connection_down(buffer);
996c9314 768 } else
677f704d 769 zfpm_connection_down("closed socket in read");
d62a17ae 770 return 0;
771 }
5adc2528 772
d62a17ae 773 if (nbyte != (ssize_t)(FPM_MSG_HDR_LEN - already))
774 goto done;
5adc2528 775
d62a17ae 776 already = FPM_MSG_HDR_LEN;
777 }
5adc2528 778
d62a17ae 779 stream_set_getp(ibuf, 0);
5adc2528 780
d62a17ae 781 hdr = (fpm_msg_hdr_t *)stream_pnt(ibuf);
5adc2528 782
d62a17ae 783 if (!fpm_msg_hdr_ok(hdr)) {
784 zfpm_connection_down("invalid message header");
785 return 0;
5adc2528
AS
786 }
787
d62a17ae 788 msg_len = fpm_msg_len(hdr);
5adc2528 789
d62a17ae 790 /*
791 * Read out the rest of the packet.
792 */
793 if (already < msg_len) {
794 ssize_t nbyte;
5adc2528 795
d62a17ae 796 nbyte = stream_read_try(ibuf, zfpm_g->sock, msg_len - already);
5adc2528 797
d62a17ae 798 if (nbyte == 0 || nbyte == -1) {
677f704d
DS
799 if (nbyte == -1) {
800 char buffer[1024];
801
772270f3
QY
802 snprintf(buffer, sizeof(buffer),
803 "failed to read message(%d) %s", errno,
804 safe_strerror(errno));
677f704d 805 zfpm_connection_down(buffer);
996c9314 806 } else
677f704d 807 zfpm_connection_down("failed to read message");
d62a17ae 808 return 0;
809 }
810
811 if (nbyte != (ssize_t)(msg_len - already))
812 goto done;
813 }
814
d62a17ae 815 /*
816 * Just throw it away for now.
817 */
818 stream_reset(ibuf);
819
820done:
821 zfpm_read_on();
822 return 0;
5adc2528
AS
823}
824
21d814eb
AD
825static bool zfpm_updates_pending(void)
826{
827 if (!(TAILQ_EMPTY(&zfpm_g->dest_q)) || !(TAILQ_EMPTY(&zfpm_g->mac_q)))
828 return true;
829
830 return false;
831}
832
5adc2528
AS
833/*
834 * zfpm_writes_pending
835 *
2951a7a4 836 * Returns true if we may have something to write to the FPM.
5adc2528 837 */
d62a17ae 838static int zfpm_writes_pending(void)
5adc2528
AS
839{
840
d62a17ae 841 /*
842 * Check if there is any data in the outbound buffer that has not
843 * been written to the socket yet.
844 */
845 if (stream_get_endp(zfpm_g->obuf) - stream_get_getp(zfpm_g->obuf))
846 return 1;
5adc2528 847
d62a17ae 848 /*
21d814eb 849 * Check if there are any updates scheduled on the outbound queues.
d62a17ae 850 */
21d814eb 851 if (zfpm_updates_pending())
d62a17ae 852 return 1;
5adc2528 853
d62a17ae 854 return 0;
5adc2528
AS
855}
856
857/*
858 * zfpm_encode_route
859 *
860 * Encode a message to the FPM with information about the given route.
861 *
862 * Returns the number of bytes written to the buffer. 0 or a negative
863 * value indicates an error.
864 */
d62a17ae 865static inline int zfpm_encode_route(rib_dest_t *dest, struct route_entry *re,
866 char *in_buf, size_t in_buf_len,
867 fpm_msg_type_e *msg_type)
5adc2528 868{
d62a17ae 869 size_t len;
9bf75362 870#ifdef HAVE_NETLINK
d62a17ae 871 int cmd;
9bf75362 872#endif
d62a17ae 873 len = 0;
5adc2528 874
d62a17ae 875 *msg_type = FPM_MSG_TYPE_NONE;
5adc2528 876
d62a17ae 877 switch (zfpm_g->message_format) {
5adc2528 878
d62a17ae 879 case ZFPM_MSG_FORMAT_PROTOBUF:
fb0aa886 880#ifdef HAVE_PROTOBUF
d62a17ae 881 len = zfpm_protobuf_encode_route(dest, re, (uint8_t *)in_buf,
882 in_buf_len);
883 *msg_type = FPM_MSG_TYPE_PROTOBUF;
fb0aa886 884#endif
d62a17ae 885 break;
5adc2528 886
d62a17ae 887 case ZFPM_MSG_FORMAT_NETLINK:
fb0aa886 888#ifdef HAVE_NETLINK
d62a17ae 889 *msg_type = FPM_MSG_TYPE_NETLINK;
890 cmd = re ? RTM_NEWROUTE : RTM_DELROUTE;
891 len = zfpm_netlink_encode_route(cmd, dest, re, in_buf,
892 in_buf_len);
893 assert(fpm_msg_align(len) == len);
894 *msg_type = FPM_MSG_TYPE_NETLINK;
5adc2528 895#endif /* HAVE_NETLINK */
d62a17ae 896 break;
fb0aa886 897
d62a17ae 898 default:
899 break;
900 }
fb0aa886 901
d62a17ae 902 return len;
5adc2528
AS
903}
904
905/*
906 * zfpm_route_for_update
907 *
f0f77c9a 908 * Returns the re that is to be sent to the FPM for a given dest.
5adc2528 909 */
d62a17ae 910struct route_entry *zfpm_route_for_update(rib_dest_t *dest)
5adc2528 911{
5f7a4718 912 return dest->selected_fib;
5adc2528
AS
913}
914
915/*
21d814eb 916 * Define an enum for return codes for queue processing functions
5adc2528 917 *
21d814eb
AD
918 * FPM_WRITE_STOP: This return code indicates that the write buffer is full.
919 * Stop processing all the queues and empty the buffer by writing its content
920 * to the socket.
921 *
922 * FPM_GOTO_NEXT_Q: This return code indicates that either this queue is
923 * empty or we have processed enough updates from this queue.
924 * So, move on to the next queue.
5adc2528 925 */
21d814eb
AD
926enum {
927 FPM_WRITE_STOP = 0,
928 FPM_GOTO_NEXT_Q = 1
929};
930
931#define FPM_QUEUE_PROCESS_LIMIT 10000
932
933/*
934 * zfpm_build_route_updates
935 *
936 * Process the dest_q queue and write FPM messages to the outbound buffer.
937 */
938static int zfpm_build_route_updates(void)
5adc2528 939{
d62a17ae 940 struct stream *s;
941 rib_dest_t *dest;
942 unsigned char *buf, *data, *buf_end;
943 size_t msg_len;
944 size_t data_len;
945 fpm_msg_hdr_t *hdr;
946 struct route_entry *re;
947 int is_add, write_msg;
948 fpm_msg_type_e msg_type;
21d814eb 949 uint16_t q_limit;
d62a17ae 950
21d814eb
AD
951 if (TAILQ_EMPTY(&zfpm_g->dest_q))
952 return FPM_GOTO_NEXT_Q;
d62a17ae 953
21d814eb
AD
954 s = zfpm_g->obuf;
955 q_limit = FPM_QUEUE_PROCESS_LIMIT;
d62a17ae 956
21d814eb 957 do {
d62a17ae 958 /*
959 * Make sure there is enough space to write another message.
960 */
961 if (STREAM_WRITEABLE(s) < FPM_MAX_MSG_LEN)
21d814eb 962 return FPM_WRITE_STOP;
d62a17ae 963
964 buf = STREAM_DATA(s) + stream_get_endp(s);
965 buf_end = buf + STREAM_WRITEABLE(s);
966
967 dest = TAILQ_FIRST(&zfpm_g->dest_q);
968 if (!dest)
21d814eb 969 return FPM_GOTO_NEXT_Q;
d62a17ae 970
971 assert(CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM));
972
973 hdr = (fpm_msg_hdr_t *)buf;
974 hdr->version = FPM_PROTO_VERSION;
975
976 data = fpm_msg_data(hdr);
977
978 re = zfpm_route_for_update(dest);
979 is_add = re ? 1 : 0;
980
981 write_msg = 1;
982
983 /*
984 * If this is a route deletion, and we have not sent the route
985 * to
986 * the FPM previously, skip it.
987 */
988 if (!is_add && !CHECK_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM)) {
989 write_msg = 0;
990 zfpm_g->stats.nop_deletes_skipped++;
991 }
992
993 if (write_msg) {
994 data_len = zfpm_encode_route(dest, re, (char *)data,
995 buf_end - data, &msg_type);
996
997 assert(data_len);
998 if (data_len) {
999 hdr->msg_type = msg_type;
1000 msg_len = fpm_data_len_to_msg_len(data_len);
1001 hdr->msg_len = htons(msg_len);
1002 stream_forward_endp(s, msg_len);
1003
1004 if (is_add)
1005 zfpm_g->stats.route_adds++;
1006 else
1007 zfpm_g->stats.route_dels++;
1008 }
1009 }
1010
1011 /*
1012 * Remove the dest from the queue, and reset the flag.
1013 */
1014 UNSET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
1015 TAILQ_REMOVE(&zfpm_g->dest_q, dest, fpm_q_entries);
1016
1017 if (is_add) {
1018 SET_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM);
1019 } else {
1020 UNSET_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM);
1021 }
1022
1023 /*
1024 * Delete the destination if necessary.
1025 */
1026 if (rib_gc_dest(dest->rnode))
1027 zfpm_g->stats.dests_del_after_update++;
1028
21d814eb
AD
1029 q_limit--;
1030 if (q_limit == 0) {
1031 /*
1032 * We have processed enough updates in this queue.
1033 * Now yield for other queues.
1034 */
1035 return FPM_GOTO_NEXT_Q;
1036 }
c5431822 1037 } while (true);
21d814eb
AD
1038}
1039
1040/*
1041 * zfpm_encode_mac
1042 *
1043 * Encode a message to FPM with information about the given MAC.
1044 *
1045 * Returns the number of bytes written to the buffer.
1046 */
1047static inline int zfpm_encode_mac(struct fpm_mac_info_t *mac, char *in_buf,
1048 size_t in_buf_len, fpm_msg_type_e *msg_type)
1049{
1050 size_t len = 0;
1051
1052 *msg_type = FPM_MSG_TYPE_NONE;
1053
1054 switch (zfpm_g->message_format) {
1055
1056 case ZFPM_MSG_FORMAT_NONE:
1057 break;
1058 case ZFPM_MSG_FORMAT_NETLINK:
9da60d0a
AD
1059#ifdef HAVE_NETLINK
1060 len = zfpm_netlink_encode_mac(mac, in_buf, in_buf_len);
1061 assert(fpm_msg_align(len) == len);
1062 *msg_type = FPM_MSG_TYPE_NETLINK;
1063#endif /* HAVE_NETLINK */
21d814eb
AD
1064 break;
1065 case ZFPM_MSG_FORMAT_PROTOBUF:
1066 break;
1067 }
1068 return len;
1069}
1070
1071static int zfpm_build_mac_updates(void)
1072{
1073 struct stream *s;
1074 struct fpm_mac_info_t *mac;
1075 unsigned char *buf, *data, *buf_end;
1076 fpm_msg_hdr_t *hdr;
1077 size_t data_len, msg_len;
1078 fpm_msg_type_e msg_type;
1079 uint16_t q_limit;
1080
1081 if (TAILQ_EMPTY(&zfpm_g->mac_q))
1082 return FPM_GOTO_NEXT_Q;
1083
1084 s = zfpm_g->obuf;
1085 q_limit = FPM_QUEUE_PROCESS_LIMIT;
1086
1087 do {
1088 /* Make sure there is enough space to write another message. */
1089 if (STREAM_WRITEABLE(s) < FPM_MAX_MAC_MSG_LEN)
1090 return FPM_WRITE_STOP;
1091
1092 buf = STREAM_DATA(s) + stream_get_endp(s);
1093 buf_end = buf + STREAM_WRITEABLE(s);
1094
1095 mac = TAILQ_FIRST(&zfpm_g->mac_q);
1096 if (!mac)
1097 return FPM_GOTO_NEXT_Q;
1098
1099 /* Check for no-op */
1100 if (!CHECK_FLAG(mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM)) {
1101 zfpm_g->stats.nop_deletes_skipped++;
1102 zfpm_mac_info_del(mac);
1103 continue;
1104 }
1105
1106 hdr = (fpm_msg_hdr_t *)buf;
1107 hdr->version = FPM_PROTO_VERSION;
1108
1109 data = fpm_msg_data(hdr);
1110 data_len = zfpm_encode_mac(mac, (char *)data, buf_end - data,
1111 &msg_type);
9da60d0a 1112 assert(data_len);
21d814eb
AD
1113
1114 hdr->msg_type = msg_type;
1115 msg_len = fpm_data_len_to_msg_len(data_len);
1116 hdr->msg_len = htons(msg_len);
1117 stream_forward_endp(s, msg_len);
1118
1119 /* Remove the MAC from the queue, and delete it. */
1120 zfpm_mac_info_del(mac);
1121
1122 q_limit--;
1123 if (q_limit == 0) {
1124 /*
1125 * We have processed enough updates in this queue.
1126 * Now yield for other queues.
1127 */
1128 return FPM_GOTO_NEXT_Q;
1129 }
d62a17ae 1130 } while (1);
5adc2528
AS
1131}
1132
21d814eb
AD
1133/*
1134 * zfpm_build_updates
1135 *
1136 * Process the outgoing queues and write messages to the outbound
1137 * buffer.
1138 */
1139static void zfpm_build_updates(void)
1140{
1141 struct stream *s;
1142
1143 s = zfpm_g->obuf;
1144 assert(stream_empty(s));
1145
1146 do {
1147 /*
1148 * Stop processing the queues if zfpm_g->obuf is full
1149 * or we do not have more updates to process
1150 */
1151 if (zfpm_build_mac_updates() == FPM_WRITE_STOP)
1152 break;
1153 if (zfpm_build_route_updates() == FPM_WRITE_STOP)
1154 break;
1155 } while (zfpm_updates_pending());
1156}
1157
5adc2528
AS
1158/*
1159 * zfpm_write_cb
1160 */
d62a17ae 1161static int zfpm_write_cb(struct thread *thread)
5adc2528 1162{
d62a17ae 1163 struct stream *s;
1164 int num_writes;
1165
1166 zfpm_g->stats.write_cb_calls++;
d62a17ae 1167
1168 /*
1169 * Check if async connect is now done.
1170 */
1171 if (zfpm_g->state == ZFPM_STATE_CONNECTING) {
1172 zfpm_connect_check();
1173 return 0;
1174 }
5adc2528 1175
d62a17ae 1176 assert(zfpm_g->state == ZFPM_STATE_ESTABLISHED);
1177 assert(zfpm_g->sock >= 0);
5adc2528 1178
d62a17ae 1179 num_writes = 0;
5adc2528 1180
d62a17ae 1181 do {
1182 int bytes_to_write, bytes_written;
5adc2528 1183
d62a17ae 1184 s = zfpm_g->obuf;
5adc2528 1185
d62a17ae 1186 /*
1187 * If the stream is empty, try fill it up with data.
1188 */
1189 if (stream_empty(s)) {
1190 zfpm_build_updates();
1191 }
5adc2528 1192
d62a17ae 1193 bytes_to_write = stream_get_endp(s) - stream_get_getp(s);
1194 if (!bytes_to_write)
1195 break;
5adc2528 1196
d62a17ae 1197 bytes_written =
2d34fb80 1198 write(zfpm_g->sock, stream_pnt(s), bytes_to_write);
d62a17ae 1199 zfpm_g->stats.write_calls++;
1200 num_writes++;
5adc2528 1201
d62a17ae 1202 if (bytes_written < 0) {
1203 if (ERRNO_IO_RETRY(errno))
1204 break;
5adc2528 1205
d62a17ae 1206 zfpm_connection_down("failed to write to socket");
1207 return 0;
1208 }
5adc2528 1209
d62a17ae 1210 if (bytes_written != bytes_to_write) {
5adc2528 1211
d62a17ae 1212 /*
1213 * Partial write.
1214 */
1215 stream_forward_getp(s, bytes_written);
1216 zfpm_g->stats.partial_writes++;
1217 break;
1218 }
5adc2528 1219
d62a17ae 1220 /*
1221 * We've written out the entire contents of the stream.
1222 */
1223 stream_reset(s);
5adc2528 1224
d62a17ae 1225 if (num_writes >= ZFPM_MAX_WRITES_PER_RUN) {
1226 zfpm_g->stats.max_writes_hit++;
1227 break;
1228 }
5adc2528 1229
d62a17ae 1230 if (zfpm_thread_should_yield(thread)) {
1231 zfpm_g->stats.t_write_yields++;
1232 break;
1233 }
1234 } while (1);
5adc2528 1235
d62a17ae 1236 if (zfpm_writes_pending())
1237 zfpm_write_on();
5adc2528 1238
d62a17ae 1239 return 0;
5adc2528
AS
1240}
1241
1242/*
1243 * zfpm_connect_cb
1244 */
d62a17ae 1245static int zfpm_connect_cb(struct thread *t)
5adc2528 1246{
d62a17ae 1247 int sock, ret;
1248 struct sockaddr_in serv;
1249
d62a17ae 1250 assert(zfpm_g->state == ZFPM_STATE_ACTIVE);
1251
1252 sock = socket(AF_INET, SOCK_STREAM, 0);
1253 if (sock < 0) {
14a4d9d0 1254 zlog_err("Failed to create socket for connect(): %s",
d62a17ae 1255 strerror(errno));
1256 zfpm_g->stats.connect_no_sock++;
1257 return 0;
1258 }
1259
1260 set_nonblocking(sock);
1261
1262 /* Make server socket. */
1263 memset(&serv, 0, sizeof(serv));
1264 serv.sin_family = AF_INET;
1265 serv.sin_port = htons(zfpm_g->fpm_port);
5adc2528 1266#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
d62a17ae 1267 serv.sin_len = sizeof(struct sockaddr_in);
5adc2528 1268#endif /* HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */
d62a17ae 1269 if (!zfpm_g->fpm_server)
1270 serv.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1271 else
1272 serv.sin_addr.s_addr = (zfpm_g->fpm_server);
1273
1274 /*
1275 * Connect to the FPM.
1276 */
1277 zfpm_g->connect_calls++;
1278 zfpm_g->stats.connect_calls++;
1279 zfpm_g->last_connect_call_time = monotime(NULL);
1280
1281 ret = connect(sock, (struct sockaddr *)&serv, sizeof(serv));
1282 if (ret >= 0) {
1283 zfpm_g->sock = sock;
1284 zfpm_connection_up("connect succeeded");
1285 return 1;
1286 }
1287
1288 if (errno == EINPROGRESS) {
1289 zfpm_g->sock = sock;
1290 zfpm_read_on();
1291 zfpm_write_on();
1292 zfpm_set_state(ZFPM_STATE_CONNECTING,
1293 "async connect in progress");
1294 return 0;
1295 }
1296
1297 zlog_info("can't connect to FPM %d: %s", sock, safe_strerror(errno));
1298 close(sock);
1299
1300 /*
1301 * Restart timer for retrying connection.
1302 */
1303 zfpm_start_connect_timer("connect() failed");
1304 return 0;
5adc2528
AS
1305}
1306
1307/*
1308 * zfpm_set_state
1309 *
1310 * Move state machine into the given state.
1311 */
d62a17ae 1312static void zfpm_set_state(zfpm_state_t state, const char *reason)
5adc2528 1313{
d62a17ae 1314 zfpm_state_t cur_state = zfpm_g->state;
1315
1316 if (!reason)
1317 reason = "Unknown";
1318
1319 if (state == cur_state)
1320 return;
1321
1322 zfpm_debug("beginning state transition %s -> %s. Reason: %s",
1323 zfpm_state_to_str(cur_state), zfpm_state_to_str(state),
1324 reason);
1325
1326 switch (state) {
1327
1328 case ZFPM_STATE_IDLE:
1329 assert(cur_state == ZFPM_STATE_ESTABLISHED);
1330 break;
1331
1332 case ZFPM_STATE_ACTIVE:
1333 assert(cur_state == ZFPM_STATE_IDLE
1334 || cur_state == ZFPM_STATE_CONNECTING);
1335 assert(zfpm_g->t_connect);
1336 break;
1337
1338 case ZFPM_STATE_CONNECTING:
1339 assert(zfpm_g->sock);
1340 assert(cur_state == ZFPM_STATE_ACTIVE);
1341 assert(zfpm_g->t_read);
1342 assert(zfpm_g->t_write);
1343 break;
1344
1345 case ZFPM_STATE_ESTABLISHED:
1346 assert(cur_state == ZFPM_STATE_ACTIVE
1347 || cur_state == ZFPM_STATE_CONNECTING);
1348 assert(zfpm_g->sock);
1349 assert(zfpm_g->t_read);
1350 assert(zfpm_g->t_write);
1351 break;
1352 }
1353
1354 zfpm_g->state = state;
5adc2528
AS
1355}
1356
1357/*
1358 * zfpm_calc_connect_delay
1359 *
1360 * Returns the number of seconds after which we should attempt to
1361 * reconnect to the FPM.
1362 */
d62a17ae 1363static long zfpm_calc_connect_delay(void)
5adc2528 1364{
d62a17ae 1365 time_t elapsed;
5adc2528 1366
d62a17ae 1367 /*
1368 * Return 0 if this is our first attempt to connect.
1369 */
1370 if (zfpm_g->connect_calls == 0) {
1371 return 0;
1372 }
5adc2528 1373
d62a17ae 1374 elapsed = zfpm_get_elapsed_time(zfpm_g->last_connect_call_time);
5adc2528 1375
d62a17ae 1376 if (elapsed > ZFPM_CONNECT_RETRY_IVL) {
1377 return 0;
1378 }
5adc2528 1379
d62a17ae 1380 return ZFPM_CONNECT_RETRY_IVL - elapsed;
5adc2528
AS
1381}
1382
1383/*
1384 * zfpm_start_connect_timer
1385 */
d62a17ae 1386static void zfpm_start_connect_timer(const char *reason)
5adc2528 1387{
d62a17ae 1388 long delay_secs;
5adc2528 1389
d62a17ae 1390 assert(!zfpm_g->t_connect);
1391 assert(zfpm_g->sock < 0);
5adc2528 1392
d62a17ae 1393 assert(zfpm_g->state == ZFPM_STATE_IDLE
1394 || zfpm_g->state == ZFPM_STATE_ACTIVE
1395 || zfpm_g->state == ZFPM_STATE_CONNECTING);
5adc2528 1396
d62a17ae 1397 delay_secs = zfpm_calc_connect_delay();
1398 zfpm_debug("scheduling connect in %ld seconds", delay_secs);
5adc2528 1399
d62a17ae 1400 thread_add_timer(zfpm_g->master, zfpm_connect_cb, 0, delay_secs,
1401 &zfpm_g->t_connect);
1402 zfpm_set_state(ZFPM_STATE_ACTIVE, reason);
5adc2528
AS
1403}
1404
1405/*
1406 * zfpm_is_enabled
1407 *
2951a7a4 1408 * Returns true if the zebra FPM module has been enabled.
5adc2528 1409 */
d62a17ae 1410static inline int zfpm_is_enabled(void)
5adc2528 1411{
d62a17ae 1412 return zfpm_g->enabled;
5adc2528
AS
1413}
1414
1415/*
1416 * zfpm_conn_is_up
1417 *
2951a7a4 1418 * Returns true if the connection to the FPM is up.
5adc2528 1419 */
d62a17ae 1420static inline int zfpm_conn_is_up(void)
5adc2528 1421{
d62a17ae 1422 if (zfpm_g->state != ZFPM_STATE_ESTABLISHED)
1423 return 0;
5adc2528 1424
d62a17ae 1425 assert(zfpm_g->sock >= 0);
5adc2528 1426
d62a17ae 1427 return 1;
5adc2528
AS
1428}
1429
1430/*
1431 * zfpm_trigger_update
1432 *
1433 * The zebra code invokes this function to indicate that we should
1434 * send an update to the FPM about the given route_node.
1435 */
d62a17ae 1436static int zfpm_trigger_update(struct route_node *rn, const char *reason)
5adc2528 1437{
d62a17ae 1438 rib_dest_t *dest;
1439 char buf[PREFIX_STRLEN];
1440
1441 /*
1442 * Ignore if the connection is down. We will update the FPM about
1443 * all destinations once the connection comes up.
1444 */
1445 if (!zfpm_conn_is_up())
1446 return 0;
1447
1448 dest = rib_dest_from_rnode(rn);
1449
d62a17ae 1450 if (CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM)) {
1451 zfpm_g->stats.redundant_triggers++;
1452 return 0;
1453 }
1454
1455 if (reason) {
1456 zfpm_debug("%s triggering update to FPM - Reason: %s",
1457 prefix2str(&rn->p, buf, sizeof(buf)), reason);
1458 }
1459
1460 SET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
1461 TAILQ_INSERT_TAIL(&zfpm_g->dest_q, dest, fpm_q_entries);
1462 zfpm_g->stats.updates_triggered++;
1463
1464 /*
1465 * Make sure that writes are enabled.
1466 */
1467 if (zfpm_g->t_write)
1468 return 0;
1469
1470 zfpm_write_on();
1471 return 0;
5adc2528
AS
1472}
1473
e5218ec8
AD
1474/*
1475 * Generate Key for FPM MAC info hash entry
e5218ec8
AD
1476 */
1477static unsigned int zfpm_mac_info_hash_keymake(const void *p)
1478{
1479 struct fpm_mac_info_t *fpm_mac = (struct fpm_mac_info_t *)p;
1480 uint32_t mac_key;
1481
1482 mac_key = jhash(fpm_mac->macaddr.octet, ETH_ALEN, 0xa5a5a55a);
1483
1484 return jhash_2words(mac_key, fpm_mac->vni, 0);
1485}
1486
1487/*
1488 * Compare function for FPM MAC info hash lookup
1489 */
1490static bool zfpm_mac_info_cmp(const void *p1, const void *p2)
1491{
1492 const struct fpm_mac_info_t *fpm_mac1 = p1;
1493 const struct fpm_mac_info_t *fpm_mac2 = p2;
1494
1495 if (memcmp(fpm_mac1->macaddr.octet, fpm_mac2->macaddr.octet, ETH_ALEN)
1496 != 0)
1497 return false;
e5218ec8
AD
1498 if (fpm_mac1->vni != fpm_mac2->vni)
1499 return false;
1500
1501 return true;
1502}
1503
1504/*
1505 * Lookup FPM MAC info hash entry.
1506 */
1507static struct fpm_mac_info_t *zfpm_mac_info_lookup(struct fpm_mac_info_t *key)
1508{
1509 return hash_lookup(zfpm_g->fpm_mac_info_table, key);
1510}
1511
1512/*
1513 * Callback to allocate fpm_mac_info_t structure.
1514 */
1515static void *zfpm_mac_info_alloc(void *p)
1516{
1517 const struct fpm_mac_info_t *key = p;
1518 struct fpm_mac_info_t *fpm_mac;
1519
1520 fpm_mac = XCALLOC(MTYPE_FPM_MAC_INFO, sizeof(struct fpm_mac_info_t));
1521
1522 memcpy(&fpm_mac->macaddr, &key->macaddr, ETH_ALEN);
e5218ec8
AD
1523 fpm_mac->vni = key->vni;
1524
1525 return (void *)fpm_mac;
1526}
1527
1528/*
1529 * Delink and free fpm_mac_info_t.
1530 */
1531static void zfpm_mac_info_del(struct fpm_mac_info_t *fpm_mac)
1532{
1533 hash_release(zfpm_g->fpm_mac_info_table, fpm_mac);
1534 TAILQ_REMOVE(&zfpm_g->mac_q, fpm_mac, fpm_mac_q_entries);
1535 XFREE(MTYPE_FPM_MAC_INFO, fpm_mac);
1536}
1537
a780a738
AD
1538/*
1539 * zfpm_trigger_rmac_update
1540 *
1541 * Zebra code invokes this function to indicate that we should
1542 * send an update to FPM for given MAC entry.
1543 *
1544 * This function checks if we already have enqueued an update for this RMAC,
1545 * If yes, update the same fpm_mac_info_t. Else, create and enqueue an update.
1546 */
1547static int zfpm_trigger_rmac_update(zebra_mac_t *rmac, zebra_l3vni_t *zl3vni,
1548 bool delete, const char *reason)
1549{
1550 char buf[ETHER_ADDR_STRLEN];
1551 struct fpm_mac_info_t *fpm_mac, key;
1552 struct interface *vxlan_if, *svi_if;
44f7f132 1553 bool mac_found = false;
a780a738
AD
1554
1555 /*
1556 * Ignore if the connection is down. We will update the FPM about
1557 * all destinations once the connection comes up.
1558 */
1559 if (!zfpm_conn_is_up())
1560 return 0;
1561
1562 if (reason) {
1563 zfpm_debug("triggering update to FPM - Reason: %s - %s",
1564 reason,
1565 prefix_mac2str(&rmac->macaddr, buf, sizeof(buf)));
1566 }
1567
1568 vxlan_if = zl3vni_map_to_vxlan_if(zl3vni);
1569 svi_if = zl3vni_map_to_svi_if(zl3vni);
1570
1571 memset(&key, 0, sizeof(struct fpm_mac_info_t));
1572
1573 memcpy(&key.macaddr, &rmac->macaddr, ETH_ALEN);
a780a738
AD
1574 key.vni = zl3vni->vni;
1575
1576 /* Check if this MAC is already present in the queue. */
1577 fpm_mac = zfpm_mac_info_lookup(&key);
1578
1579 if (fpm_mac) {
44f7f132 1580 mac_found = true;
a780a738
AD
1581
1582 /*
44f7f132
AD
1583 * If the enqueued op is "add" and current op is "delete",
1584 * this is a noop. So, Unset ZEBRA_MAC_UPDATE_FPM flag.
1585 * While processing FPM queue, we will silently delete this
1586 * MAC entry without sending any update for this MAC.
a780a738 1587 */
44f7f132
AD
1588 if (!CHECK_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM) &&
1589 delete == 1) {
a780a738
AD
1590 SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM);
1591 UNSET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM);
44f7f132 1592 return 0;
a780a738 1593 }
44f7f132
AD
1594 } else {
1595 fpm_mac = hash_get(zfpm_g->fpm_mac_info_table, &key,
1596 zfpm_mac_info_alloc);
1597 if (!fpm_mac)
1598 return 0;
a780a738
AD
1599 }
1600
44f7f132 1601 fpm_mac->r_vtep_ip.s_addr = rmac->fwd_info.r_vtep_ip.s_addr;
c5431822 1602 fpm_mac->zebra_flags = rmac->flags;
a780a738
AD
1603 fpm_mac->vxlan_if = vxlan_if ? vxlan_if->ifindex : 0;
1604 fpm_mac->svi_if = svi_if ? svi_if->ifindex : 0;
1605
1606 SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM);
1607 if (delete)
1608 SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM);
44f7f132
AD
1609 else
1610 UNSET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM);
a780a738 1611
44f7f132
AD
1612 if (!mac_found)
1613 TAILQ_INSERT_TAIL(&zfpm_g->mac_q, fpm_mac, fpm_mac_q_entries);
a780a738
AD
1614
1615 zfpm_g->stats.updates_triggered++;
1616
a780a738
AD
1617 /* If writes are already enabled, return. */
1618 if (zfpm_g->t_write)
1619 return 0;
1620
1621 zfpm_write_on();
1622 return 0;
1623}
1624
fbe748e5
AD
1625/*
1626 * This function is called when the FPM connections is established.
1627 * Iterate over all the RMAC entries for the given L3VNI
1628 * and enqueue the RMAC for FPM processing.
1629 */
7f5818fb 1630static void zfpm_trigger_rmac_update_wrapper(struct hash_bucket *backet,
fbe748e5
AD
1631 void *args)
1632{
1633 zebra_mac_t *zrmac = (zebra_mac_t *)backet->data;
1634 zebra_l3vni_t *zl3vni = (zebra_l3vni_t *)args;
1635
1636 zfpm_trigger_rmac_update(zrmac, zl3vni, false, "RMAC added");
1637}
1638
1639/*
1640 * This function is called when the FPM connections is established.
1641 * This function iterates over all the L3VNIs to trigger
1642 * FPM updates for RMACs currently available.
1643 */
7f5818fb 1644static void zfpm_iterate_rmac_table(struct hash_bucket *backet, void *args)
fbe748e5
AD
1645{
1646 zebra_l3vni_t *zl3vni = (zebra_l3vni_t *)backet->data;
1647
1648 hash_iterate(zl3vni->rmac_table, zfpm_trigger_rmac_update_wrapper,
1649 (void *)zl3vni);
1650}
1651
5adc2528
AS
1652/*
1653 * zfpm_stats_timer_cb
1654 */
d62a17ae 1655static int zfpm_stats_timer_cb(struct thread *t)
5adc2528 1656{
d62a17ae 1657 zfpm_g->t_stats = NULL;
5adc2528 1658
d62a17ae 1659 /*
1660 * Remember the stats collected in the last interval for display
1661 * purposes.
1662 */
1663 zfpm_stats_copy(&zfpm_g->stats, &zfpm_g->last_ivl_stats);
5adc2528 1664
d62a17ae 1665 /*
1666 * Add the current set of stats into the cumulative statistics.
1667 */
1668 zfpm_stats_compose(&zfpm_g->cumulative_stats, &zfpm_g->stats,
1669 &zfpm_g->cumulative_stats);
5adc2528 1670
d62a17ae 1671 /*
1672 * Start collecting stats afresh over the next interval.
1673 */
1674 zfpm_stats_reset(&zfpm_g->stats);
5adc2528 1675
d62a17ae 1676 zfpm_start_stats_timer();
5adc2528 1677
d62a17ae 1678 return 0;
5adc2528
AS
1679}
1680
1681/*
1682 * zfpm_stop_stats_timer
1683 */
d62a17ae 1684static void zfpm_stop_stats_timer(void)
5adc2528 1685{
d62a17ae 1686 if (!zfpm_g->t_stats)
1687 return;
5adc2528 1688
d62a17ae 1689 zfpm_debug("Stopping existing stats timer");
1690 THREAD_TIMER_OFF(zfpm_g->t_stats);
5adc2528
AS
1691}
1692
1693/*
1694 * zfpm_start_stats_timer
1695 */
d62a17ae 1696void zfpm_start_stats_timer(void)
5adc2528 1697{
d62a17ae 1698 assert(!zfpm_g->t_stats);
5adc2528 1699
d62a17ae 1700 thread_add_timer(zfpm_g->master, zfpm_stats_timer_cb, 0,
1701 ZFPM_STATS_IVL_SECS, &zfpm_g->t_stats);
5adc2528
AS
1702}
1703
1704/*
1705 * Helper macro for zfpm_show_stats() below.
1706 */
d62a17ae 1707#define ZFPM_SHOW_STAT(counter) \
1708 do { \
1709 vty_out(vty, "%-40s %10lu %16lu\n", #counter, \
1710 total_stats.counter, zfpm_g->last_ivl_stats.counter); \
1711 } while (0)
5adc2528
AS
1712
1713/*
1714 * zfpm_show_stats
1715 */
d62a17ae 1716static void zfpm_show_stats(struct vty *vty)
5adc2528 1717{
d62a17ae 1718 zfpm_stats_t total_stats;
1719 time_t elapsed;
1720
1721 vty_out(vty, "\n%-40s %10s Last %2d secs\n\n", "Counter", "Total",
1722 ZFPM_STATS_IVL_SECS);
1723
1724 /*
1725 * Compute the total stats up to this instant.
1726 */
1727 zfpm_stats_compose(&zfpm_g->cumulative_stats, &zfpm_g->stats,
1728 &total_stats);
1729
1730 ZFPM_SHOW_STAT(connect_calls);
1731 ZFPM_SHOW_STAT(connect_no_sock);
1732 ZFPM_SHOW_STAT(read_cb_calls);
1733 ZFPM_SHOW_STAT(write_cb_calls);
1734 ZFPM_SHOW_STAT(write_calls);
1735 ZFPM_SHOW_STAT(partial_writes);
1736 ZFPM_SHOW_STAT(max_writes_hit);
1737 ZFPM_SHOW_STAT(t_write_yields);
1738 ZFPM_SHOW_STAT(nop_deletes_skipped);
1739 ZFPM_SHOW_STAT(route_adds);
1740 ZFPM_SHOW_STAT(route_dels);
1741 ZFPM_SHOW_STAT(updates_triggered);
d62a17ae 1742 ZFPM_SHOW_STAT(redundant_triggers);
1743 ZFPM_SHOW_STAT(dests_del_after_update);
1744 ZFPM_SHOW_STAT(t_conn_down_starts);
1745 ZFPM_SHOW_STAT(t_conn_down_dests_processed);
1746 ZFPM_SHOW_STAT(t_conn_down_yields);
1747 ZFPM_SHOW_STAT(t_conn_down_finishes);
1748 ZFPM_SHOW_STAT(t_conn_up_starts);
1749 ZFPM_SHOW_STAT(t_conn_up_dests_processed);
1750 ZFPM_SHOW_STAT(t_conn_up_yields);
1751 ZFPM_SHOW_STAT(t_conn_up_aborts);
1752 ZFPM_SHOW_STAT(t_conn_up_finishes);
1753
1754 if (!zfpm_g->last_stats_clear_time)
1755 return;
1756
1757 elapsed = zfpm_get_elapsed_time(zfpm_g->last_stats_clear_time);
1758
1759 vty_out(vty, "\nStats were cleared %lu seconds ago\n",
1760 (unsigned long)elapsed);
5adc2528
AS
1761}
1762
1763/*
1764 * zfpm_clear_stats
1765 */
d62a17ae 1766static void zfpm_clear_stats(struct vty *vty)
5adc2528 1767{
d62a17ae 1768 if (!zfpm_is_enabled()) {
1769 vty_out(vty, "The FPM module is not enabled...\n");
1770 return;
1771 }
5adc2528 1772
d62a17ae 1773 zfpm_stats_reset(&zfpm_g->stats);
1774 zfpm_stats_reset(&zfpm_g->last_ivl_stats);
1775 zfpm_stats_reset(&zfpm_g->cumulative_stats);
5adc2528 1776
d62a17ae 1777 zfpm_stop_stats_timer();
1778 zfpm_start_stats_timer();
5adc2528 1779
d62a17ae 1780 zfpm_g->last_stats_clear_time = monotime(NULL);
5adc2528 1781
d62a17ae 1782 vty_out(vty, "Cleared FPM stats\n");
5adc2528
AS
1783}
1784
1785/*
1786 * show_zebra_fpm_stats
1787 */
1788DEFUN (show_zebra_fpm_stats,
1789 show_zebra_fpm_stats_cmd,
1790 "show zebra fpm stats",
1791 SHOW_STR
41e7fb80 1792 ZEBRA_STR
5adc2528
AS
1793 "Forwarding Path Manager information\n"
1794 "Statistics\n")
1795{
d62a17ae 1796 zfpm_show_stats(vty);
1797 return CMD_SUCCESS;
5adc2528
AS
1798}
1799
1800/*
1801 * clear_zebra_fpm_stats
1802 */
1803DEFUN (clear_zebra_fpm_stats,
1804 clear_zebra_fpm_stats_cmd,
1805 "clear zebra fpm stats",
1806 CLEAR_STR
41e7fb80 1807 ZEBRA_STR
5adc2528
AS
1808 "Clear Forwarding Path Manager information\n"
1809 "Statistics\n")
1810{
d62a17ae 1811 zfpm_clear_stats(vty);
1812 return CMD_SUCCESS;
5adc2528
AS
1813}
1814
711ff0ba 1815/*
d62a17ae 1816 * update fpm connection information
711ff0ba 1817 */
e52702f2
QY
1818DEFUN ( fpm_remote_ip,
1819 fpm_remote_ip_cmd,
1820 "fpm connection ip A.B.C.D port (1-65535)",
711ff0ba
USK
1821 "fpm connection remote ip and port\n"
1822 "Remote fpm server ip A.B.C.D\n"
1823 "Enter ip ")
1824{
1825
d62a17ae 1826 in_addr_t fpm_server;
1827 uint32_t port_no;
711ff0ba 1828
d62a17ae 1829 fpm_server = inet_addr(argv[3]->arg);
1830 if (fpm_server == INADDR_NONE)
1831 return CMD_ERR_INCOMPLETE;
711ff0ba 1832
d62a17ae 1833 port_no = atoi(argv[5]->arg);
1834 if (port_no < TCP_MIN_PORT || port_no > TCP_MAX_PORT)
1835 return CMD_ERR_INCOMPLETE;
711ff0ba 1836
d62a17ae 1837 zfpm_g->fpm_server = fpm_server;
1838 zfpm_g->fpm_port = port_no;
711ff0ba
USK
1839
1840
d62a17ae 1841 return CMD_SUCCESS;
711ff0ba
USK
1842}
1843
e52702f2
QY
1844DEFUN ( no_fpm_remote_ip,
1845 no_fpm_remote_ip_cmd,
1846 "no fpm connection ip A.B.C.D port (1-65535)",
711ff0ba
USK
1847 "fpm connection remote ip and port\n"
1848 "Connection\n"
1849 "Remote fpm server ip A.B.C.D\n"
1850 "Enter ip ")
1851{
d62a17ae 1852 if (zfpm_g->fpm_server != inet_addr(argv[4]->arg)
1853 || zfpm_g->fpm_port != atoi(argv[6]->arg))
1854 return CMD_ERR_NO_MATCH;
711ff0ba 1855
d62a17ae 1856 zfpm_g->fpm_server = FPM_DEFAULT_IP;
1857 zfpm_g->fpm_port = FPM_DEFAULT_PORT;
711ff0ba 1858
d62a17ae 1859 return CMD_SUCCESS;
711ff0ba 1860}
711ff0ba 1861
fb0aa886
AS
1862/*
1863 * zfpm_init_message_format
1864 */
d62a17ae 1865static inline void zfpm_init_message_format(const char *format)
fb0aa886 1866{
d62a17ae 1867 int have_netlink, have_protobuf;
fb0aa886 1868
fb0aa886 1869#ifdef HAVE_NETLINK
d62a17ae 1870 have_netlink = 1;
4b2792b5 1871#else
d62a17ae 1872 have_netlink = 0;
fb0aa886
AS
1873#endif
1874
1875#ifdef HAVE_PROTOBUF
d62a17ae 1876 have_protobuf = 1;
4b2792b5 1877#else
d62a17ae 1878 have_protobuf = 0;
fb0aa886
AS
1879#endif
1880
d62a17ae 1881 zfpm_g->message_format = ZFPM_MSG_FORMAT_NONE;
fb0aa886 1882
d62a17ae 1883 if (!format) {
1884 if (have_netlink) {
1885 zfpm_g->message_format = ZFPM_MSG_FORMAT_NETLINK;
1886 } else if (have_protobuf) {
1887 zfpm_g->message_format = ZFPM_MSG_FORMAT_PROTOBUF;
1888 }
1889 return;
fb0aa886 1890 }
fb0aa886 1891
d62a17ae 1892 if (!strcmp("netlink", format)) {
1893 if (!have_netlink) {
1c50c1c0
QY
1894 flog_err(EC_ZEBRA_NETLINK_NOT_AVAILABLE,
1895 "FPM netlink message format is not available");
d62a17ae 1896 return;
1897 }
1898 zfpm_g->message_format = ZFPM_MSG_FORMAT_NETLINK;
1899 return;
fb0aa886 1900 }
fb0aa886 1901
d62a17ae 1902 if (!strcmp("protobuf", format)) {
1903 if (!have_protobuf) {
af4c2728 1904 flog_err(
e914ccbe 1905 EC_ZEBRA_PROTOBUF_NOT_AVAILABLE,
d62a17ae 1906 "FPM protobuf message format is not available");
1907 return;
1908 }
8b9cf71c
DL
1909 flog_warn(EC_ZEBRA_PROTOBUF_NOT_AVAILABLE,
1910 "FPM protobuf message format is deprecated and scheduled to be removed. "
1911 "Please convert to using netlink format or contact dev@lists.frrouting.org with your use case.");
d62a17ae 1912 zfpm_g->message_format = ZFPM_MSG_FORMAT_PROTOBUF;
1913 return;
fb0aa886 1914 }
fb0aa886 1915
e914ccbe 1916 flog_warn(EC_ZEBRA_FPM_FORMAT_UNKNOWN, "Unknown fpm format '%s'",
9df414fe 1917 format);
fb0aa886
AS
1918}
1919
711ff0ba 1920/**
d62a17ae 1921 * fpm_remote_srv_write
711ff0ba 1922 *
d62a17ae 1923 * Module to write remote fpm connection
711ff0ba
USK
1924 *
1925 * Returns ZERO on success.
1926 */
1927
d62a17ae 1928static int fpm_remote_srv_write(struct vty *vty)
711ff0ba 1929{
d62a17ae 1930 struct in_addr in;
711ff0ba 1931
d62a17ae 1932 in.s_addr = zfpm_g->fpm_server;
711ff0ba 1933
9d1c2659 1934 if ((zfpm_g->fpm_server != FPM_DEFAULT_IP
996c9314
LB
1935 && zfpm_g->fpm_server != INADDR_ANY)
1936 || (zfpm_g->fpm_port != FPM_DEFAULT_PORT && zfpm_g->fpm_port != 0))
d62a17ae 1937 vty_out(vty, "fpm connection ip %s port %d\n", inet_ntoa(in),
1938 zfpm_g->fpm_port);
711ff0ba 1939
d62a17ae 1940 return 0;
711ff0ba
USK
1941}
1942
1943
612c2c15 1944static int fpm_remote_srv_write(struct vty *vty);
4f8ea50c 1945/* Zebra node */
62b346ee 1946static struct cmd_node zebra_node = {
f4b8291f 1947 .name = "zebra",
62b346ee 1948 .node = ZEBRA_NODE,
24389580 1949 .parent_node = CONFIG_NODE,
62b346ee 1950 .prompt = "",
612c2c15 1951 .config_write = fpm_remote_srv_write,
62b346ee 1952};
4f8ea50c
DL
1953
1954
5adc2528
AS
1955/**
1956 * zfpm_init
1957 *
1958 * One-time initialization of the Zebra FPM module.
1959 *
1960 * @param[in] port port at which FPM is running.
2951a7a4 1961 * @param[in] enable true if the zebra FPM module should be enabled
fb0aa886 1962 * @param[in] format to use to talk to the FPM. Can be 'netink' or 'protobuf'.
5adc2528 1963 *
2951a7a4 1964 * Returns true on success.
5adc2528 1965 */
d62a17ae 1966static int zfpm_init(struct thread_master *master)
5adc2528 1967{
d62a17ae 1968 int enable = 1;
1969 uint16_t port = 0;
1970 const char *format = THIS_MODULE->load_args;
5adc2528 1971
d62a17ae 1972 memset(zfpm_g, 0, sizeof(*zfpm_g));
1973 zfpm_g->master = master;
1974 TAILQ_INIT(&zfpm_g->dest_q);
e5218ec8
AD
1975 TAILQ_INIT(&zfpm_g->mac_q);
1976
1977 /* Create hash table for fpm_mac_info_t enties */
1978 zfpm_g->fpm_mac_info_table = hash_create(zfpm_mac_info_hash_keymake,
fbe748e5
AD
1979 zfpm_mac_info_cmp,
1980 "FPM MAC info hash table");
e5218ec8 1981
d62a17ae 1982 zfpm_g->sock = -1;
1983 zfpm_g->state = ZFPM_STATE_IDLE;
5adc2528 1984
d62a17ae 1985 zfpm_stats_init(&zfpm_g->stats);
1986 zfpm_stats_init(&zfpm_g->last_ivl_stats);
1987 zfpm_stats_init(&zfpm_g->cumulative_stats);
5adc2528 1988
612c2c15 1989 install_node(&zebra_node);
d62a17ae 1990 install_element(ENABLE_NODE, &show_zebra_fpm_stats_cmd);
1991 install_element(ENABLE_NODE, &clear_zebra_fpm_stats_cmd);
1992 install_element(CONFIG_NODE, &fpm_remote_ip_cmd);
1993 install_element(CONFIG_NODE, &no_fpm_remote_ip_cmd);
5adc2528 1994
d62a17ae 1995 zfpm_init_message_format(format);
fb0aa886 1996
d62a17ae 1997 /*
1998 * Disable FPM interface if no suitable format is available.
1999 */
2000 if (zfpm_g->message_format == ZFPM_MSG_FORMAT_NONE)
2001 enable = 0;
fb0aa886 2002
d62a17ae 2003 zfpm_g->enabled = enable;
5adc2528 2004
d62a17ae 2005 if (!zfpm_g->fpm_server)
2006 zfpm_g->fpm_server = FPM_DEFAULT_IP;
711ff0ba 2007
d62a17ae 2008 if (!port)
2009 port = FPM_DEFAULT_PORT;
5adc2528 2010
d62a17ae 2011 zfpm_g->fpm_port = port;
5adc2528 2012
d62a17ae 2013 zfpm_g->obuf = stream_new(ZFPM_OBUF_SIZE);
2014 zfpm_g->ibuf = stream_new(ZFPM_IBUF_SIZE);
5adc2528 2015
d62a17ae 2016 zfpm_start_stats_timer();
2017 zfpm_start_connect_timer("initialized");
2018 return 0;
4f8ea50c 2019}
5adc2528 2020
f0c459f0
DS
2021static int zfpm_fini(void)
2022{
2023 zfpm_write_off();
2024 zfpm_read_off();
2025 zfpm_connect_off();
2026
2027 zfpm_stop_stats_timer();
2028
2029 hook_unregister(rib_update, zfpm_trigger_update);
2030 return 0;
2031}
2032
d62a17ae 2033static int zebra_fpm_module_init(void)
4f8ea50c 2034{
d62a17ae 2035 hook_register(rib_update, zfpm_trigger_update);
a780a738 2036 hook_register(zebra_rmac_update, zfpm_trigger_rmac_update);
d62a17ae 2037 hook_register(frr_late_init, zfpm_init);
f0c459f0 2038 hook_register(frr_early_fini, zfpm_fini);
d62a17ae 2039 return 0;
5adc2528 2040}
4f8ea50c 2041
d62a17ae 2042FRR_MODULE_SETUP(.name = "zebra_fpm", .version = FRR_VERSION,
2043 .description = "zebra FPM (Forwarding Plane Manager) module",
2044 .init = zebra_fpm_module_init, )