]> git.proxmox.com Git - mirror_frr.git/blame - zebra/zebra_fpm.c
zebra: On shutdown stop hook calls for fpm rmac updates
[mirror_frr.git] / zebra / zebra_fpm.c
CommitLineData
acddc0ed 1// SPDX-License-Identifier: GPL-2.0-or-later
5adc2528
AS
2/*
3 * Main implementation file for interface to Forwarding Plane Manager.
4 *
5 * Copyright (C) 2012 by Open Source Routing.
6 * Copyright (C) 2012 by Internet Systems Consortium, Inc. ("ISC")
5adc2528
AS
7 */
8
9#include <zebra.h>
10
11#include "log.h"
4f8ea50c 12#include "libfrr.h"
5adc2528 13#include "stream.h"
24a58196 14#include "frrevent.h"
5adc2528
AS
15#include "network.h"
16#include "command.h"
09781197 17#include "lib/version.h"
e5218ec8 18#include "jhash.h"
5adc2528
AS
19
20#include "zebra/rib.h"
7c551956
DS
21#include "zebra/zserv.h"
22#include "zebra/zebra_ns.h"
23#include "zebra/zebra_vrf.h"
67aeb554 24#include "zebra/zebra_errors.h"
5adc2528
AS
25
26#include "fpm/fpm.h"
5adc2528 27#include "zebra_fpm_private.h"
e5218ec8 28#include "zebra/zebra_router.h"
a780a738 29#include "zebra_vxlan_private.h"
e5218ec8
AD
30
31DEFINE_MTYPE_STATIC(ZEBRA, FPM_MAC_INFO, "FPM_MAC_INFO");
5adc2528
AS
32
33/*
34 * Interval at which we attempt to connect to the FPM.
35 */
36#define ZFPM_CONNECT_RETRY_IVL 5
37
38/*
39 * Sizes of outgoing and incoming stream buffers for writing/reading
40 * FPM messages.
41 */
42#define ZFPM_OBUF_SIZE (2 * FPM_MAX_MSG_LEN)
43#define ZFPM_IBUF_SIZE (FPM_MAX_MSG_LEN)
44
45/*
46 * The maximum number of times the FPM socket write callback can call
47 * 'write' before it yields.
48 */
49#define ZFPM_MAX_WRITES_PER_RUN 10
50
51/*
52 * Interval over which we collect statistics.
53 */
54#define ZFPM_STATS_IVL_SECS 10
fbe748e5
AD
55#define FPM_MAX_MAC_MSG_LEN 512
56
1ac88792 57static void zfpm_iterate_rmac_table(struct hash_bucket *bucket, void *args);
5adc2528
AS
58
59/*
60 * Structure that holds state for iterating over all route_node
61 * structures that are candidates for being communicated to the FPM.
62 */
332cba05 63struct zfpm_rnodes_iter {
d62a17ae 64 rib_tables_iter_t tables_iter;
65 route_table_iter_t iter;
332cba05 66};
5adc2528
AS
67
68/*
69 * Statistics.
70 */
eeaf257b 71struct zfpm_stats {
d62a17ae 72 unsigned long connect_calls;
73 unsigned long connect_no_sock;
5adc2528 74
d62a17ae 75 unsigned long read_cb_calls;
5adc2528 76
d62a17ae 77 unsigned long write_cb_calls;
78 unsigned long write_calls;
79 unsigned long partial_writes;
80 unsigned long max_writes_hit;
81 unsigned long t_write_yields;
5adc2528 82
d62a17ae 83 unsigned long nop_deletes_skipped;
84 unsigned long route_adds;
85 unsigned long route_dels;
5adc2528 86
d62a17ae 87 unsigned long updates_triggered;
88 unsigned long redundant_triggers;
5adc2528 89
d62a17ae 90 unsigned long dests_del_after_update;
5adc2528 91
d62a17ae 92 unsigned long t_conn_down_starts;
93 unsigned long t_conn_down_dests_processed;
94 unsigned long t_conn_down_yields;
95 unsigned long t_conn_down_finishes;
5adc2528 96
d62a17ae 97 unsigned long t_conn_up_starts;
98 unsigned long t_conn_up_dests_processed;
99 unsigned long t_conn_up_yields;
100 unsigned long t_conn_up_aborts;
101 unsigned long t_conn_up_finishes;
eeaf257b 102};
5adc2528
AS
103
104/*
105 * States for the FPM state machine.
106 */
1d6a3ee8 107enum zfpm_state {
5adc2528 108
d62a17ae 109 /*
110 * In this state we are not yet ready to connect to the FPM. This
111 * can happen when this module is disabled, or if we're cleaning up
112 * after a connection has gone down.
113 */
114 ZFPM_STATE_IDLE,
115
116 /*
117 * Ready to talk to the FPM and periodically trying to connect to
118 * it.
119 */
120 ZFPM_STATE_ACTIVE,
121
122 /*
123 * In the middle of bringing up a TCP connection. Specifically,
124 * waiting for a connect() call to complete asynchronously.
125 */
126 ZFPM_STATE_CONNECTING,
127
128 /*
129 * TCP connection to the FPM is up.
130 */
131 ZFPM_STATE_ESTABLISHED
5adc2528 132
1d6a3ee8 133};
5adc2528 134
fb0aa886
AS
135/*
136 * Message format to be used to communicate with the FPM.
137 */
a78c2b98 138enum zfpm_msg_format {
d62a17ae 139 ZFPM_MSG_FORMAT_NONE,
140 ZFPM_MSG_FORMAT_NETLINK,
141 ZFPM_MSG_FORMAT_PROTOBUF,
a78c2b98
DS
142};
143
5adc2528
AS
144/*
145 * Globals.
146 */
768e40bd 147struct zfpm_glob {
d62a17ae 148
149 /*
150 * True if the FPM module has been enabled.
151 */
152 int enabled;
153
154 /*
155 * Message format to be used to communicate with the fpm.
156 */
a78c2b98 157 enum zfpm_msg_format message_format;
d62a17ae 158
cd9d0537 159 struct event_loop *master;
d62a17ae 160
1d6a3ee8 161 enum zfpm_state state;
d62a17ae 162
163 in_addr_t fpm_server;
164 /*
165 * Port on which the FPM is running.
166 */
167 int fpm_port;
168
169 /*
170 * List of rib_dest_t structures to be processed
171 */
172 TAILQ_HEAD(zfpm_dest_q, rib_dest_t_) dest_q;
173
e5218ec8
AD
174 /*
175 * List of fpm_mac_info structures to be processed
176 */
177 TAILQ_HEAD(zfpm_mac_q, fpm_mac_info_t) mac_q;
178
179 /*
180 * Hash table of fpm_mac_info_t entries
181 *
182 * While adding fpm_mac_info_t for a MAC to the mac_q,
183 * it is possible that another fpm_mac_info_t node for the this MAC
184 * is already present in the queue.
185 * This is possible in the case of consecutive add->delete operations.
186 * To avoid such duplicate insertions in the mac_q,
187 * define a hash table for fpm_mac_info_t which can be looked up
188 * to see if an fpm_mac_info_t node for a MAC is already present
189 * in the mac_q.
190 */
191 struct hash *fpm_mac_info_table;
192
d62a17ae 193 /*
194 * Stream socket to the FPM.
195 */
196 int sock;
197
198 /*
199 * Buffers for messages to/from the FPM.
200 */
201 struct stream *obuf;
202 struct stream *ibuf;
203
204 /*
205 * Threads for I/O.
206 */
e6685141
DS
207 struct event *t_connect;
208 struct event *t_write;
209 struct event *t_read;
d62a17ae 210
211 /*
212 * Thread to clean up after the TCP connection to the FPM goes down
213 * and the state that belongs to it.
214 */
e6685141 215 struct event *t_conn_down;
d62a17ae 216
217 struct {
332cba05 218 struct zfpm_rnodes_iter iter;
d62a17ae 219 } t_conn_down_state;
220
221 /*
222 * Thread to take actions once the TCP conn to the FPM comes up, and
223 * the state that belongs to it.
224 */
e6685141 225 struct event *t_conn_up;
d62a17ae 226
227 struct {
332cba05 228 struct zfpm_rnodes_iter iter;
d62a17ae 229 } t_conn_up_state;
230
231 unsigned long connect_calls;
232 time_t last_connect_call_time;
233
234 /*
235 * Stats from the start of the current statistics interval up to
236 * now. These are the counters we typically update in the code.
237 */
eeaf257b 238 struct zfpm_stats stats;
d62a17ae 239
240 /*
241 * Statistics that were gathered in the last collection interval.
242 */
eeaf257b 243 struct zfpm_stats last_ivl_stats;
d62a17ae 244
245 /*
246 * Cumulative stats from the last clear to the start of the current
247 * statistics interval.
248 */
eeaf257b 249 struct zfpm_stats cumulative_stats;
d62a17ae 250
251 /*
252 * Stats interval timer.
253 */
e6685141 254 struct event *t_stats;
d62a17ae 255
256 /*
257 * If non-zero, the last time when statistics were cleared.
258 */
259 time_t last_stats_clear_time;
e840edca
KK
260
261 /*
262 * Flag to track the MAC dump status to FPM
263 */
264 bool fpm_mac_dump_done;
768e40bd 265};
5adc2528 266
768e40bd
DS
267static struct zfpm_glob zfpm_glob_space;
268static struct zfpm_glob *zfpm_g = &zfpm_glob_space;
5adc2528 269
d62a17ae 270static int zfpm_trigger_update(struct route_node *rn, const char *reason);
4f8ea50c 271
e6685141
DS
272static void zfpm_read_cb(struct event *thread);
273static void zfpm_write_cb(struct event *thread);
5adc2528 274
1d6a3ee8 275static void zfpm_set_state(enum zfpm_state state, const char *reason);
d62a17ae 276static void zfpm_start_connect_timer(const char *reason);
277static void zfpm_start_stats_timer(void);
a780a738 278static void zfpm_mac_info_del(struct fpm_mac_info_t *fpm_mac);
5adc2528 279
316d2d52
NK
280static const char ipv4_ll_buf[16] = "169.254.0.1";
281union g_addr ipv4ll_gateway;
282
5adc2528
AS
283/*
284 * zfpm_thread_should_yield
285 */
e6685141 286static inline int zfpm_thread_should_yield(struct event *t)
5adc2528 287{
70c35c11 288 return event_should_yield(t);
5adc2528
AS
289}
290
291/*
292 * zfpm_state_to_str
293 */
1d6a3ee8 294static const char *zfpm_state_to_str(enum zfpm_state state)
5adc2528 295{
d62a17ae 296 switch (state) {
5adc2528 297
d62a17ae 298 case ZFPM_STATE_IDLE:
299 return "idle";
5adc2528 300
d62a17ae 301 case ZFPM_STATE_ACTIVE:
302 return "active";
5adc2528 303
d62a17ae 304 case ZFPM_STATE_CONNECTING:
305 return "connecting";
5adc2528 306
d62a17ae 307 case ZFPM_STATE_ESTABLISHED:
308 return "established";
5adc2528 309
d62a17ae 310 default:
311 return "unknown";
312 }
5adc2528
AS
313}
314
5adc2528
AS
315/*
316 * zfpm_get_elapsed_time
317 *
318 * Returns the time elapsed (in seconds) since the given time.
319 */
d62a17ae 320static time_t zfpm_get_elapsed_time(time_t reference)
5adc2528 321{
d62a17ae 322 time_t now;
5adc2528 323
d62a17ae 324 now = monotime(NULL);
5adc2528 325
d62a17ae 326 if (now < reference) {
327 assert(0);
328 return 0;
329 }
5adc2528 330
d62a17ae 331 return now - reference;
5adc2528
AS
332}
333
5adc2528
AS
334/*
335 * zfpm_rnodes_iter_init
336 */
332cba05 337static inline void zfpm_rnodes_iter_init(struct zfpm_rnodes_iter *iter)
5adc2528 338{
d62a17ae 339 memset(iter, 0, sizeof(*iter));
340 rib_tables_iter_init(&iter->tables_iter);
341
342 /*
343 * This is a hack, but it makes implementing 'next' easier by
344 * ensuring that route_table_iter_next() will return NULL the first
345 * time we call it.
346 */
347 route_table_iter_init(&iter->iter, NULL);
348 route_table_iter_cleanup(&iter->iter);
5adc2528
AS
349}
350
351/*
352 * zfpm_rnodes_iter_next
353 */
332cba05
DS
354static inline struct route_node *
355zfpm_rnodes_iter_next(struct zfpm_rnodes_iter *iter)
5adc2528 356{
d62a17ae 357 struct route_node *rn;
358 struct route_table *table;
5adc2528 359
d62a17ae 360 while (1) {
361 rn = route_table_iter_next(&iter->iter);
362 if (rn)
363 return rn;
5adc2528 364
d62a17ae 365 /*
366 * We've made our way through this table, go to the next one.
367 */
368 route_table_iter_cleanup(&iter->iter);
5adc2528 369
c6bbea17 370 table = rib_tables_iter_next(&iter->tables_iter);
5adc2528 371
d62a17ae 372 if (!table)
373 return NULL;
5adc2528 374
d62a17ae 375 route_table_iter_init(&iter->iter, table);
376 }
5adc2528 377
d62a17ae 378 return NULL;
5adc2528
AS
379}
380
381/*
382 * zfpm_rnodes_iter_pause
383 */
332cba05 384static inline void zfpm_rnodes_iter_pause(struct zfpm_rnodes_iter *iter)
5adc2528 385{
d62a17ae 386 route_table_iter_pause(&iter->iter);
5adc2528
AS
387}
388
389/*
390 * zfpm_rnodes_iter_cleanup
391 */
332cba05 392static inline void zfpm_rnodes_iter_cleanup(struct zfpm_rnodes_iter *iter)
5adc2528 393{
d62a17ae 394 route_table_iter_cleanup(&iter->iter);
395 rib_tables_iter_cleanup(&iter->tables_iter);
5adc2528
AS
396}
397
398/*
399 * zfpm_stats_init
400 *
401 * Initialize a statistics block.
402 */
eeaf257b 403static inline void zfpm_stats_init(struct zfpm_stats *stats)
5adc2528 404{
d62a17ae 405 memset(stats, 0, sizeof(*stats));
5adc2528
AS
406}
407
408/*
409 * zfpm_stats_reset
410 */
eeaf257b 411static inline void zfpm_stats_reset(struct zfpm_stats *stats)
5adc2528 412{
d62a17ae 413 zfpm_stats_init(stats);
5adc2528
AS
414}
415
416/*
417 * zfpm_stats_copy
418 */
eeaf257b
DS
419static inline void zfpm_stats_copy(const struct zfpm_stats *src,
420 struct zfpm_stats *dest)
5adc2528 421{
d62a17ae 422 memcpy(dest, src, sizeof(*dest));
5adc2528
AS
423}
424
425/*
426 * zfpm_stats_compose
427 *
428 * Total up the statistics in two stats structures ('s1 and 's2') and
429 * return the result in the third argument, 'result'. Note that the
430 * pointer 'result' may be the same as 's1' or 's2'.
431 *
432 * For simplicity, the implementation below assumes that the stats
433 * structure is composed entirely of counters. This can easily be
434 * changed when necessary.
435 */
eeaf257b
DS
436static void zfpm_stats_compose(const struct zfpm_stats *s1,
437 const struct zfpm_stats *s2,
438 struct zfpm_stats *result)
5adc2528 439{
d62a17ae 440 const unsigned long *p1, *p2;
441 unsigned long *result_p;
442 int i, num_counters;
5adc2528 443
d62a17ae 444 p1 = (const unsigned long *)s1;
445 p2 = (const unsigned long *)s2;
446 result_p = (unsigned long *)result;
5adc2528 447
eeaf257b 448 num_counters = (sizeof(struct zfpm_stats) / sizeof(unsigned long));
5adc2528 449
d62a17ae 450 for (i = 0; i < num_counters; i++) {
451 result_p[i] = p1[i] + p2[i];
452 }
5adc2528
AS
453}
454
455/*
456 * zfpm_read_on
457 */
d62a17ae 458static inline void zfpm_read_on(void)
5adc2528 459{
d62a17ae 460 assert(!zfpm_g->t_read);
461 assert(zfpm_g->sock >= 0);
5adc2528 462
907a2395
DS
463 event_add_read(zfpm_g->master, zfpm_read_cb, 0, zfpm_g->sock,
464 &zfpm_g->t_read);
5adc2528
AS
465}
466
467/*
468 * zfpm_write_on
469 */
d62a17ae 470static inline void zfpm_write_on(void)
5adc2528 471{
d62a17ae 472 assert(!zfpm_g->t_write);
473 assert(zfpm_g->sock >= 0);
5adc2528 474
907a2395
DS
475 event_add_write(zfpm_g->master, zfpm_write_cb, 0, zfpm_g->sock,
476 &zfpm_g->t_write);
5adc2528
AS
477}
478
479/*
480 * zfpm_read_off
481 */
d62a17ae 482static inline void zfpm_read_off(void)
5adc2528 483{
e16d030c 484 EVENT_OFF(zfpm_g->t_read);
5adc2528
AS
485}
486
487/*
488 * zfpm_write_off
489 */
d62a17ae 490static inline void zfpm_write_off(void)
5adc2528 491{
e16d030c 492 EVENT_OFF(zfpm_g->t_write);
5adc2528
AS
493}
494
f0c459f0
DS
495static inline void zfpm_connect_off(void)
496{
e16d030c 497 EVENT_OFF(zfpm_g->t_connect);
f0c459f0
DS
498}
499
54033432
DS
500static inline void zfpm_conn_down_off(void)
501{
502 EVENT_OFF(zfpm_g->t_conn_down);
503}
504
5adc2528
AS
505/*
506 * zfpm_conn_up_thread_cb
507 *
508 * Callback for actions to be taken when the connection to the FPM
509 * comes up.
510 */
e6685141 511static void zfpm_conn_up_thread_cb(struct event *thread)
5adc2528 512{
d62a17ae 513 struct route_node *rnode;
332cba05 514 struct zfpm_rnodes_iter *iter;
d62a17ae 515 rib_dest_t *dest;
5adc2528 516
d62a17ae 517 iter = &zfpm_g->t_conn_up_state.iter;
5adc2528 518
d62a17ae 519 if (zfpm_g->state != ZFPM_STATE_ESTABLISHED) {
520 zfpm_debug(
521 "Connection not up anymore, conn_up thread aborting");
522 zfpm_g->stats.t_conn_up_aborts++;
523 goto done;
524 }
5adc2528 525
e840edca
KK
526 if (!zfpm_g->fpm_mac_dump_done) {
527 /* Enqueue FPM updates for all the RMAC entries */
528 hash_iterate(zrouter.l3vni_table, zfpm_iterate_rmac_table,
529 NULL);
530 /* mark dump done so that its not repeated after yield */
531 zfpm_g->fpm_mac_dump_done = true;
532 }
fbe748e5 533
d62a17ae 534 while ((rnode = zfpm_rnodes_iter_next(iter))) {
535 dest = rib_dest_from_rnode(rnode);
536
537 if (dest) {
538 zfpm_g->stats.t_conn_up_dests_processed++;
539 zfpm_trigger_update(rnode, NULL);
540 }
541
542 /*
543 * Yield if need be.
544 */
545 if (!zfpm_thread_should_yield(thread))
546 continue;
547
548 zfpm_g->stats.t_conn_up_yields++;
549 zfpm_rnodes_iter_pause(iter);
907a2395
DS
550 event_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb,
551 NULL, 0, &zfpm_g->t_conn_up);
cc9f21da 552 return;
5adc2528
AS
553 }
554
d62a17ae 555 zfpm_g->stats.t_conn_up_finishes++;
556
557done:
558 zfpm_rnodes_iter_cleanup(iter);
5adc2528
AS
559}
560
561/*
562 * zfpm_connection_up
563 *
564 * Called when the connection to the FPM comes up.
565 */
d62a17ae 566static void zfpm_connection_up(const char *detail)
5adc2528 567{
d62a17ae 568 assert(zfpm_g->sock >= 0);
569 zfpm_read_on();
570 zfpm_write_on();
571 zfpm_set_state(ZFPM_STATE_ESTABLISHED, detail);
572
573 /*
574 * Start thread to push existing routes to the FPM.
575 */
e16d030c 576 EVENT_OFF(zfpm_g->t_conn_up);
d62a17ae 577
578 zfpm_rnodes_iter_init(&zfpm_g->t_conn_up_state.iter);
e840edca 579 zfpm_g->fpm_mac_dump_done = false;
d62a17ae 580
581 zfpm_debug("Starting conn_up thread");
ef1dbba8 582
907a2395
DS
583 event_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb, NULL, 0,
584 &zfpm_g->t_conn_up);
d62a17ae 585 zfpm_g->stats.t_conn_up_starts++;
5adc2528
AS
586}
587
588/*
589 * zfpm_connect_check
590 *
591 * Check if an asynchronous connect() to the FPM is complete.
592 */
d62a17ae 593static void zfpm_connect_check(void)
5adc2528 594{
d62a17ae 595 int status;
596 socklen_t slen;
597 int ret;
598
599 zfpm_read_off();
600 zfpm_write_off();
601
602 slen = sizeof(status);
603 ret = getsockopt(zfpm_g->sock, SOL_SOCKET, SO_ERROR, (void *)&status,
604 &slen);
605
606 if (ret >= 0 && status == 0) {
607 zfpm_connection_up("async connect complete");
608 return;
609 }
610
611 /*
612 * getsockopt() failed or indicated an error on the socket.
613 */
614 close(zfpm_g->sock);
615 zfpm_g->sock = -1;
616
617 zfpm_start_connect_timer("getsockopt() after async connect failed");
618 return;
5adc2528
AS
619}
620
621/*
622 * zfpm_conn_down_thread_cb
623 *
624 * Callback that is invoked to clean up state after the TCP connection
625 * to the FPM goes down.
626 */
e6685141 627static void zfpm_conn_down_thread_cb(struct event *thread)
5adc2528 628{
d62a17ae 629 struct route_node *rnode;
332cba05 630 struct zfpm_rnodes_iter *iter;
d62a17ae 631 rib_dest_t *dest;
a780a738 632 struct fpm_mac_info_t *mac = NULL;
5adc2528 633
d62a17ae 634 assert(zfpm_g->state == ZFPM_STATE_IDLE);
5adc2528 635
a780a738
AD
636 /*
637 * Delink and free all fpm_mac_info_t nodes
638 * in the mac_q and fpm_mac_info_hash
639 */
640 while ((mac = TAILQ_FIRST(&zfpm_g->mac_q)) != NULL)
641 zfpm_mac_info_del(mac);
642
d62a17ae 643 iter = &zfpm_g->t_conn_down_state.iter;
5adc2528 644
d62a17ae 645 while ((rnode = zfpm_rnodes_iter_next(iter))) {
646 dest = rib_dest_from_rnode(rnode);
5adc2528 647
d62a17ae 648 if (dest) {
649 if (CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM)) {
650 TAILQ_REMOVE(&zfpm_g->dest_q, dest,
651 fpm_q_entries);
652 }
653
654 UNSET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
655 UNSET_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM);
5adc2528 656
d62a17ae 657 zfpm_g->stats.t_conn_down_dests_processed++;
5adc2528 658
d62a17ae 659 /*
660 * Check if the dest should be deleted.
661 */
662 rib_gc_dest(rnode);
663 }
5adc2528 664
d62a17ae 665 /*
666 * Yield if need be.
667 */
668 if (!zfpm_thread_should_yield(thread))
669 continue;
670
671 zfpm_g->stats.t_conn_down_yields++;
672 zfpm_rnodes_iter_pause(iter);
907a2395
DS
673 event_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb,
674 NULL, 0, &zfpm_g->t_conn_down);
cc9f21da 675 return;
5adc2528
AS
676 }
677
d62a17ae 678 zfpm_g->stats.t_conn_down_finishes++;
679 zfpm_rnodes_iter_cleanup(iter);
680
681 /*
682 * Start the process of connecting to the FPM again.
683 */
684 zfpm_start_connect_timer("cleanup complete");
5adc2528
AS
685}
686
687/*
688 * zfpm_connection_down
689 *
690 * Called when the connection to the FPM has gone down.
691 */
d62a17ae 692static void zfpm_connection_down(const char *detail)
5adc2528 693{
d62a17ae 694 if (!detail)
695 detail = "unknown";
5adc2528 696
d62a17ae 697 assert(zfpm_g->state == ZFPM_STATE_ESTABLISHED);
5adc2528 698
d62a17ae 699 zlog_info("connection to the FPM has gone down: %s", detail);
5adc2528 700
d62a17ae 701 zfpm_read_off();
702 zfpm_write_off();
5adc2528 703
d62a17ae 704 stream_reset(zfpm_g->ibuf);
705 stream_reset(zfpm_g->obuf);
5adc2528 706
d62a17ae 707 if (zfpm_g->sock >= 0) {
708 close(zfpm_g->sock);
709 zfpm_g->sock = -1;
710 }
5adc2528 711
d62a17ae 712 /*
713 * Start thread to clean up state after the connection goes down.
714 */
715 assert(!zfpm_g->t_conn_down);
d62a17ae 716 zfpm_rnodes_iter_init(&zfpm_g->t_conn_down_state.iter);
54033432 717 zfpm_conn_down_off();
907a2395
DS
718 event_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb, NULL, 0,
719 &zfpm_g->t_conn_down);
d62a17ae 720 zfpm_g->stats.t_conn_down_starts++;
721
722 zfpm_set_state(ZFPM_STATE_IDLE, detail);
5adc2528
AS
723}
724
725/*
726 * zfpm_read_cb
727 */
e6685141 728static void zfpm_read_cb(struct event *thread)
5adc2528 729{
d62a17ae 730 size_t already;
731 struct stream *ibuf;
732 uint16_t msg_len;
733 fpm_msg_hdr_t *hdr;
734
735 zfpm_g->stats.read_cb_calls++;
d62a17ae 736
737 /*
738 * Check if async connect is now done.
739 */
740 if (zfpm_g->state == ZFPM_STATE_CONNECTING) {
741 zfpm_connect_check();
cc9f21da 742 return;
5adc2528
AS
743 }
744
d62a17ae 745 assert(zfpm_g->state == ZFPM_STATE_ESTABLISHED);
746 assert(zfpm_g->sock >= 0);
5adc2528 747
d62a17ae 748 ibuf = zfpm_g->ibuf;
5adc2528 749
d62a17ae 750 already = stream_get_endp(ibuf);
751 if (already < FPM_MSG_HDR_LEN) {
752 ssize_t nbyte;
5adc2528 753
d62a17ae 754 nbyte = stream_read_try(ibuf, zfpm_g->sock,
755 FPM_MSG_HDR_LEN - already);
756 if (nbyte == 0 || nbyte == -1) {
677f704d
DS
757 if (nbyte == -1) {
758 char buffer[1024];
759
772270f3
QY
760 snprintf(buffer, sizeof(buffer),
761 "closed socket in read(%d): %s", errno,
762 safe_strerror(errno));
677f704d 763 zfpm_connection_down(buffer);
996c9314 764 } else
677f704d 765 zfpm_connection_down("closed socket in read");
cc9f21da 766 return;
d62a17ae 767 }
5adc2528 768
d62a17ae 769 if (nbyte != (ssize_t)(FPM_MSG_HDR_LEN - already))
770 goto done;
5adc2528 771
d62a17ae 772 already = FPM_MSG_HDR_LEN;
773 }
5adc2528 774
d62a17ae 775 stream_set_getp(ibuf, 0);
5adc2528 776
d62a17ae 777 hdr = (fpm_msg_hdr_t *)stream_pnt(ibuf);
5adc2528 778
d62a17ae 779 if (!fpm_msg_hdr_ok(hdr)) {
780 zfpm_connection_down("invalid message header");
cc9f21da 781 return;
5adc2528
AS
782 }
783
d62a17ae 784 msg_len = fpm_msg_len(hdr);
5adc2528 785
d62a17ae 786 /*
787 * Read out the rest of the packet.
788 */
789 if (already < msg_len) {
790 ssize_t nbyte;
5adc2528 791
d62a17ae 792 nbyte = stream_read_try(ibuf, zfpm_g->sock, msg_len - already);
5adc2528 793
d62a17ae 794 if (nbyte == 0 || nbyte == -1) {
677f704d
DS
795 if (nbyte == -1) {
796 char buffer[1024];
797
772270f3
QY
798 snprintf(buffer, sizeof(buffer),
799 "failed to read message(%d) %s", errno,
800 safe_strerror(errno));
677f704d 801 zfpm_connection_down(buffer);
996c9314 802 } else
677f704d 803 zfpm_connection_down("failed to read message");
cc9f21da 804 return;
d62a17ae 805 }
806
807 if (nbyte != (ssize_t)(msg_len - already))
808 goto done;
809 }
810
d62a17ae 811 /*
812 * Just throw it away for now.
813 */
814 stream_reset(ibuf);
815
816done:
817 zfpm_read_on();
5adc2528
AS
818}
819
21d814eb
AD
820static bool zfpm_updates_pending(void)
821{
822 if (!(TAILQ_EMPTY(&zfpm_g->dest_q)) || !(TAILQ_EMPTY(&zfpm_g->mac_q)))
823 return true;
824
825 return false;
826}
827
5adc2528
AS
828/*
829 * zfpm_writes_pending
830 *
2951a7a4 831 * Returns true if we may have something to write to the FPM.
5adc2528 832 */
d62a17ae 833static int zfpm_writes_pending(void)
5adc2528
AS
834{
835
d62a17ae 836 /*
837 * Check if there is any data in the outbound buffer that has not
838 * been written to the socket yet.
839 */
840 if (stream_get_endp(zfpm_g->obuf) - stream_get_getp(zfpm_g->obuf))
841 return 1;
5adc2528 842
d62a17ae 843 /*
21d814eb 844 * Check if there are any updates scheduled on the outbound queues.
d62a17ae 845 */
21d814eb 846 if (zfpm_updates_pending())
d62a17ae 847 return 1;
5adc2528 848
d62a17ae 849 return 0;
5adc2528
AS
850}
851
852/*
853 * zfpm_encode_route
854 *
855 * Encode a message to the FPM with information about the given route.
856 *
857 * Returns the number of bytes written to the buffer. 0 or a negative
858 * value indicates an error.
859 */
d62a17ae 860static inline int zfpm_encode_route(rib_dest_t *dest, struct route_entry *re,
861 char *in_buf, size_t in_buf_len,
862 fpm_msg_type_e *msg_type)
5adc2528 863{
d62a17ae 864 size_t len;
9bf75362 865#ifdef HAVE_NETLINK
d62a17ae 866 int cmd;
9bf75362 867#endif
d62a17ae 868 len = 0;
5adc2528 869
d62a17ae 870 *msg_type = FPM_MSG_TYPE_NONE;
5adc2528 871
d62a17ae 872 switch (zfpm_g->message_format) {
5adc2528 873
d62a17ae 874 case ZFPM_MSG_FORMAT_PROTOBUF:
fb0aa886 875#ifdef HAVE_PROTOBUF
d62a17ae 876 len = zfpm_protobuf_encode_route(dest, re, (uint8_t *)in_buf,
877 in_buf_len);
878 *msg_type = FPM_MSG_TYPE_PROTOBUF;
fb0aa886 879#endif
d62a17ae 880 break;
5adc2528 881
d62a17ae 882 case ZFPM_MSG_FORMAT_NETLINK:
fb0aa886 883#ifdef HAVE_NETLINK
d62a17ae 884 *msg_type = FPM_MSG_TYPE_NETLINK;
885 cmd = re ? RTM_NEWROUTE : RTM_DELROUTE;
886 len = zfpm_netlink_encode_route(cmd, dest, re, in_buf,
887 in_buf_len);
888 assert(fpm_msg_align(len) == len);
5adc2528 889#endif /* HAVE_NETLINK */
d62a17ae 890 break;
fb0aa886 891
a98701f0 892 case ZFPM_MSG_FORMAT_NONE:
d62a17ae 893 break;
894 }
fb0aa886 895
d62a17ae 896 return len;
5adc2528
AS
897}
898
899/*
900 * zfpm_route_for_update
901 *
f0f77c9a 902 * Returns the re that is to be sent to the FPM for a given dest.
5adc2528 903 */
d62a17ae 904struct route_entry *zfpm_route_for_update(rib_dest_t *dest)
5adc2528 905{
5f7a4718 906 return dest->selected_fib;
5adc2528
AS
907}
908
909/*
21d814eb 910 * Define an enum for return codes for queue processing functions
5adc2528 911 *
21d814eb
AD
912 * FPM_WRITE_STOP: This return code indicates that the write buffer is full.
913 * Stop processing all the queues and empty the buffer by writing its content
914 * to the socket.
915 *
916 * FPM_GOTO_NEXT_Q: This return code indicates that either this queue is
917 * empty or we have processed enough updates from this queue.
918 * So, move on to the next queue.
5adc2528 919 */
21d814eb
AD
920enum {
921 FPM_WRITE_STOP = 0,
922 FPM_GOTO_NEXT_Q = 1
923};
924
925#define FPM_QUEUE_PROCESS_LIMIT 10000
926
927/*
928 * zfpm_build_route_updates
929 *
930 * Process the dest_q queue and write FPM messages to the outbound buffer.
931 */
932static int zfpm_build_route_updates(void)
5adc2528 933{
d62a17ae 934 struct stream *s;
935 rib_dest_t *dest;
936 unsigned char *buf, *data, *buf_end;
937 size_t msg_len;
938 size_t data_len;
939 fpm_msg_hdr_t *hdr;
940 struct route_entry *re;
941 int is_add, write_msg;
942 fpm_msg_type_e msg_type;
21d814eb 943 uint16_t q_limit;
d62a17ae 944
21d814eb
AD
945 if (TAILQ_EMPTY(&zfpm_g->dest_q))
946 return FPM_GOTO_NEXT_Q;
d62a17ae 947
21d814eb
AD
948 s = zfpm_g->obuf;
949 q_limit = FPM_QUEUE_PROCESS_LIMIT;
d62a17ae 950
21d814eb 951 do {
d62a17ae 952 /*
953 * Make sure there is enough space to write another message.
954 */
955 if (STREAM_WRITEABLE(s) < FPM_MAX_MSG_LEN)
21d814eb 956 return FPM_WRITE_STOP;
d62a17ae 957
958 buf = STREAM_DATA(s) + stream_get_endp(s);
959 buf_end = buf + STREAM_WRITEABLE(s);
960
961 dest = TAILQ_FIRST(&zfpm_g->dest_q);
962 if (!dest)
21d814eb 963 return FPM_GOTO_NEXT_Q;
d62a17ae 964
965 assert(CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM));
966
967 hdr = (fpm_msg_hdr_t *)buf;
968 hdr->version = FPM_PROTO_VERSION;
969
970 data = fpm_msg_data(hdr);
971
972 re = zfpm_route_for_update(dest);
973 is_add = re ? 1 : 0;
974
975 write_msg = 1;
976
977 /*
978 * If this is a route deletion, and we have not sent the route
979 * to
980 * the FPM previously, skip it.
981 */
982 if (!is_add && !CHECK_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM)) {
983 write_msg = 0;
984 zfpm_g->stats.nop_deletes_skipped++;
985 }
986
987 if (write_msg) {
988 data_len = zfpm_encode_route(dest, re, (char *)data,
989 buf_end - data, &msg_type);
990
d62a17ae 991 if (data_len) {
992 hdr->msg_type = msg_type;
993 msg_len = fpm_data_len_to_msg_len(data_len);
994 hdr->msg_len = htons(msg_len);
995 stream_forward_endp(s, msg_len);
996
997 if (is_add)
998 zfpm_g->stats.route_adds++;
999 else
1000 zfpm_g->stats.route_dels++;
5306e6cf 1001 } else {
1002 zlog_err("%s: Encoding Prefix: %pRN No valid nexthops",
1003 __func__, dest->rnode);
d62a17ae 1004 }
1005 }
1006
1007 /*
1008 * Remove the dest from the queue, and reset the flag.
1009 */
1010 UNSET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
1011 TAILQ_REMOVE(&zfpm_g->dest_q, dest, fpm_q_entries);
1012
1013 if (is_add) {
1014 SET_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM);
1015 } else {
1016 UNSET_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM);
1017 }
1018
1019 /*
1020 * Delete the destination if necessary.
1021 */
1022 if (rib_gc_dest(dest->rnode))
1023 zfpm_g->stats.dests_del_after_update++;
1024
21d814eb
AD
1025 q_limit--;
1026 if (q_limit == 0) {
1027 /*
1028 * We have processed enough updates in this queue.
1029 * Now yield for other queues.
1030 */
1031 return FPM_GOTO_NEXT_Q;
1032 }
c5431822 1033 } while (true);
21d814eb
AD
1034}
1035
1036/*
1037 * zfpm_encode_mac
1038 *
1039 * Encode a message to FPM with information about the given MAC.
1040 *
1041 * Returns the number of bytes written to the buffer.
1042 */
1043static inline int zfpm_encode_mac(struct fpm_mac_info_t *mac, char *in_buf,
1044 size_t in_buf_len, fpm_msg_type_e *msg_type)
1045{
1046 size_t len = 0;
1047
1048 *msg_type = FPM_MSG_TYPE_NONE;
1049
1050 switch (zfpm_g->message_format) {
1051
1052 case ZFPM_MSG_FORMAT_NONE:
1053 break;
1054 case ZFPM_MSG_FORMAT_NETLINK:
9da60d0a
AD
1055#ifdef HAVE_NETLINK
1056 len = zfpm_netlink_encode_mac(mac, in_buf, in_buf_len);
1057 assert(fpm_msg_align(len) == len);
1058 *msg_type = FPM_MSG_TYPE_NETLINK;
1059#endif /* HAVE_NETLINK */
21d814eb
AD
1060 break;
1061 case ZFPM_MSG_FORMAT_PROTOBUF:
1062 break;
1063 }
1064 return len;
1065}
1066
1067static int zfpm_build_mac_updates(void)
1068{
1069 struct stream *s;
1070 struct fpm_mac_info_t *mac;
1071 unsigned char *buf, *data, *buf_end;
1072 fpm_msg_hdr_t *hdr;
1073 size_t data_len, msg_len;
1074 fpm_msg_type_e msg_type;
1075 uint16_t q_limit;
1076
1077 if (TAILQ_EMPTY(&zfpm_g->mac_q))
1078 return FPM_GOTO_NEXT_Q;
1079
1080 s = zfpm_g->obuf;
1081 q_limit = FPM_QUEUE_PROCESS_LIMIT;
1082
1083 do {
1084 /* Make sure there is enough space to write another message. */
1085 if (STREAM_WRITEABLE(s) < FPM_MAX_MAC_MSG_LEN)
1086 return FPM_WRITE_STOP;
1087
1088 buf = STREAM_DATA(s) + stream_get_endp(s);
1089 buf_end = buf + STREAM_WRITEABLE(s);
1090
1091 mac = TAILQ_FIRST(&zfpm_g->mac_q);
1092 if (!mac)
1093 return FPM_GOTO_NEXT_Q;
1094
1095 /* Check for no-op */
1096 if (!CHECK_FLAG(mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM)) {
1097 zfpm_g->stats.nop_deletes_skipped++;
1098 zfpm_mac_info_del(mac);
1099 continue;
1100 }
1101
1102 hdr = (fpm_msg_hdr_t *)buf;
1103 hdr->version = FPM_PROTO_VERSION;
1104
1105 data = fpm_msg_data(hdr);
1106 data_len = zfpm_encode_mac(mac, (char *)data, buf_end - data,
1107 &msg_type);
9da60d0a 1108 assert(data_len);
21d814eb
AD
1109
1110 hdr->msg_type = msg_type;
1111 msg_len = fpm_data_len_to_msg_len(data_len);
1112 hdr->msg_len = htons(msg_len);
1113 stream_forward_endp(s, msg_len);
1114
1115 /* Remove the MAC from the queue, and delete it. */
1116 zfpm_mac_info_del(mac);
1117
1118 q_limit--;
1119 if (q_limit == 0) {
1120 /*
1121 * We have processed enough updates in this queue.
1122 * Now yield for other queues.
1123 */
1124 return FPM_GOTO_NEXT_Q;
1125 }
d62a17ae 1126 } while (1);
5adc2528
AS
1127}
1128
21d814eb
AD
1129/*
1130 * zfpm_build_updates
1131 *
1132 * Process the outgoing queues and write messages to the outbound
1133 * buffer.
1134 */
1135static void zfpm_build_updates(void)
1136{
1137 struct stream *s;
1138
1139 s = zfpm_g->obuf;
1140 assert(stream_empty(s));
1141
1142 do {
1143 /*
1144 * Stop processing the queues if zfpm_g->obuf is full
1145 * or we do not have more updates to process
1146 */
1147 if (zfpm_build_mac_updates() == FPM_WRITE_STOP)
1148 break;
1149 if (zfpm_build_route_updates() == FPM_WRITE_STOP)
1150 break;
1151 } while (zfpm_updates_pending());
1152}
1153
5adc2528
AS
1154/*
1155 * zfpm_write_cb
1156 */
e6685141 1157static void zfpm_write_cb(struct event *thread)
5adc2528 1158{
d62a17ae 1159 struct stream *s;
1160 int num_writes;
1161
1162 zfpm_g->stats.write_cb_calls++;
d62a17ae 1163
1164 /*
1165 * Check if async connect is now done.
1166 */
1167 if (zfpm_g->state == ZFPM_STATE_CONNECTING) {
1168 zfpm_connect_check();
cc9f21da 1169 return;
d62a17ae 1170 }
5adc2528 1171
d62a17ae 1172 assert(zfpm_g->state == ZFPM_STATE_ESTABLISHED);
1173 assert(zfpm_g->sock >= 0);
5adc2528 1174
d62a17ae 1175 num_writes = 0;
5adc2528 1176
d62a17ae 1177 do {
1178 int bytes_to_write, bytes_written;
5adc2528 1179
d62a17ae 1180 s = zfpm_g->obuf;
5adc2528 1181
d62a17ae 1182 /*
1183 * If the stream is empty, try fill it up with data.
1184 */
1185 if (stream_empty(s)) {
1186 zfpm_build_updates();
1187 }
5adc2528 1188
d62a17ae 1189 bytes_to_write = stream_get_endp(s) - stream_get_getp(s);
1190 if (!bytes_to_write)
1191 break;
5adc2528 1192
d62a17ae 1193 bytes_written =
2d34fb80 1194 write(zfpm_g->sock, stream_pnt(s), bytes_to_write);
d62a17ae 1195 zfpm_g->stats.write_calls++;
1196 num_writes++;
5adc2528 1197
d62a17ae 1198 if (bytes_written < 0) {
1199 if (ERRNO_IO_RETRY(errno))
1200 break;
5adc2528 1201
d62a17ae 1202 zfpm_connection_down("failed to write to socket");
cc9f21da 1203 return;
d62a17ae 1204 }
5adc2528 1205
d62a17ae 1206 if (bytes_written != bytes_to_write) {
5adc2528 1207
d62a17ae 1208 /*
1209 * Partial write.
1210 */
1211 stream_forward_getp(s, bytes_written);
1212 zfpm_g->stats.partial_writes++;
1213 break;
1214 }
5adc2528 1215
d62a17ae 1216 /*
1217 * We've written out the entire contents of the stream.
1218 */
1219 stream_reset(s);
5adc2528 1220
d62a17ae 1221 if (num_writes >= ZFPM_MAX_WRITES_PER_RUN) {
1222 zfpm_g->stats.max_writes_hit++;
1223 break;
1224 }
5adc2528 1225
d62a17ae 1226 if (zfpm_thread_should_yield(thread)) {
1227 zfpm_g->stats.t_write_yields++;
1228 break;
1229 }
1230 } while (1);
5adc2528 1231
d62a17ae 1232 if (zfpm_writes_pending())
1233 zfpm_write_on();
5adc2528
AS
1234}
1235
1236/*
1237 * zfpm_connect_cb
1238 */
e6685141 1239static void zfpm_connect_cb(struct event *t)
5adc2528 1240{
d62a17ae 1241 int sock, ret;
1242 struct sockaddr_in serv;
1243
d62a17ae 1244 assert(zfpm_g->state == ZFPM_STATE_ACTIVE);
1245
1246 sock = socket(AF_INET, SOCK_STREAM, 0);
1247 if (sock < 0) {
14a4d9d0 1248 zlog_err("Failed to create socket for connect(): %s",
d62a17ae 1249 strerror(errno));
1250 zfpm_g->stats.connect_no_sock++;
cc9f21da 1251 return;
d62a17ae 1252 }
1253
1254 set_nonblocking(sock);
1255
1256 /* Make server socket. */
1257 memset(&serv, 0, sizeof(serv));
1258 serv.sin_family = AF_INET;
1259 serv.sin_port = htons(zfpm_g->fpm_port);
5adc2528 1260#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
d62a17ae 1261 serv.sin_len = sizeof(struct sockaddr_in);
5adc2528 1262#endif /* HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */
d62a17ae 1263 if (!zfpm_g->fpm_server)
1264 serv.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1265 else
1266 serv.sin_addr.s_addr = (zfpm_g->fpm_server);
1267
1268 /*
1269 * Connect to the FPM.
1270 */
1271 zfpm_g->connect_calls++;
1272 zfpm_g->stats.connect_calls++;
1273 zfpm_g->last_connect_call_time = monotime(NULL);
1274
1275 ret = connect(sock, (struct sockaddr *)&serv, sizeof(serv));
1276 if (ret >= 0) {
1277 zfpm_g->sock = sock;
1278 zfpm_connection_up("connect succeeded");
cc9f21da 1279 return;
d62a17ae 1280 }
1281
1282 if (errno == EINPROGRESS) {
1283 zfpm_g->sock = sock;
1284 zfpm_read_on();
1285 zfpm_write_on();
1286 zfpm_set_state(ZFPM_STATE_CONNECTING,
1287 "async connect in progress");
cc9f21da 1288 return;
d62a17ae 1289 }
1290
1291 zlog_info("can't connect to FPM %d: %s", sock, safe_strerror(errno));
1292 close(sock);
1293
1294 /*
1295 * Restart timer for retrying connection.
1296 */
1297 zfpm_start_connect_timer("connect() failed");
5adc2528
AS
1298}
1299
1300/*
1301 * zfpm_set_state
1302 *
1303 * Move state machine into the given state.
1304 */
1d6a3ee8 1305static void zfpm_set_state(enum zfpm_state state, const char *reason)
5adc2528 1306{
1d6a3ee8 1307 enum zfpm_state cur_state = zfpm_g->state;
d62a17ae 1308
1309 if (!reason)
1310 reason = "Unknown";
1311
1312 if (state == cur_state)
1313 return;
1314
1315 zfpm_debug("beginning state transition %s -> %s. Reason: %s",
1316 zfpm_state_to_str(cur_state), zfpm_state_to_str(state),
1317 reason);
1318
1319 switch (state) {
1320
1321 case ZFPM_STATE_IDLE:
1322 assert(cur_state == ZFPM_STATE_ESTABLISHED);
1323 break;
1324
1325 case ZFPM_STATE_ACTIVE:
1326 assert(cur_state == ZFPM_STATE_IDLE
1327 || cur_state == ZFPM_STATE_CONNECTING);
1328 assert(zfpm_g->t_connect);
1329 break;
1330
1331 case ZFPM_STATE_CONNECTING:
1332 assert(zfpm_g->sock);
1333 assert(cur_state == ZFPM_STATE_ACTIVE);
1334 assert(zfpm_g->t_read);
1335 assert(zfpm_g->t_write);
1336 break;
1337
1338 case ZFPM_STATE_ESTABLISHED:
1339 assert(cur_state == ZFPM_STATE_ACTIVE
1340 || cur_state == ZFPM_STATE_CONNECTING);
1341 assert(zfpm_g->sock);
1342 assert(zfpm_g->t_read);
1343 assert(zfpm_g->t_write);
1344 break;
1345 }
1346
1347 zfpm_g->state = state;
5adc2528
AS
1348}
1349
1350/*
1351 * zfpm_calc_connect_delay
1352 *
1353 * Returns the number of seconds after which we should attempt to
1354 * reconnect to the FPM.
1355 */
d62a17ae 1356static long zfpm_calc_connect_delay(void)
5adc2528 1357{
d62a17ae 1358 time_t elapsed;
5adc2528 1359
d62a17ae 1360 /*
1361 * Return 0 if this is our first attempt to connect.
1362 */
1363 if (zfpm_g->connect_calls == 0) {
1364 return 0;
1365 }
5adc2528 1366
d62a17ae 1367 elapsed = zfpm_get_elapsed_time(zfpm_g->last_connect_call_time);
5adc2528 1368
d62a17ae 1369 if (elapsed > ZFPM_CONNECT_RETRY_IVL) {
1370 return 0;
1371 }
5adc2528 1372
d62a17ae 1373 return ZFPM_CONNECT_RETRY_IVL - elapsed;
5adc2528
AS
1374}
1375
1376/*
1377 * zfpm_start_connect_timer
1378 */
d62a17ae 1379static void zfpm_start_connect_timer(const char *reason)
5adc2528 1380{
d62a17ae 1381 long delay_secs;
5adc2528 1382
d62a17ae 1383 assert(!zfpm_g->t_connect);
1384 assert(zfpm_g->sock < 0);
5adc2528 1385
d62a17ae 1386 assert(zfpm_g->state == ZFPM_STATE_IDLE
1387 || zfpm_g->state == ZFPM_STATE_ACTIVE
1388 || zfpm_g->state == ZFPM_STATE_CONNECTING);
5adc2528 1389
d62a17ae 1390 delay_secs = zfpm_calc_connect_delay();
1391 zfpm_debug("scheduling connect in %ld seconds", delay_secs);
5adc2528 1392
907a2395
DS
1393 event_add_timer(zfpm_g->master, zfpm_connect_cb, 0, delay_secs,
1394 &zfpm_g->t_connect);
d62a17ae 1395 zfpm_set_state(ZFPM_STATE_ACTIVE, reason);
5adc2528
AS
1396}
1397
1398/*
1399 * zfpm_is_enabled
1400 *
2951a7a4 1401 * Returns true if the zebra FPM module has been enabled.
5adc2528 1402 */
d62a17ae 1403static inline int zfpm_is_enabled(void)
5adc2528 1404{
d62a17ae 1405 return zfpm_g->enabled;
5adc2528
AS
1406}
1407
1408/*
1409 * zfpm_conn_is_up
1410 *
2951a7a4 1411 * Returns true if the connection to the FPM is up.
5adc2528 1412 */
d62a17ae 1413static inline int zfpm_conn_is_up(void)
5adc2528 1414{
d62a17ae 1415 if (zfpm_g->state != ZFPM_STATE_ESTABLISHED)
1416 return 0;
5adc2528 1417
d62a17ae 1418 assert(zfpm_g->sock >= 0);
5adc2528 1419
d62a17ae 1420 return 1;
5adc2528
AS
1421}
1422
1423/*
1424 * zfpm_trigger_update
1425 *
1426 * The zebra code invokes this function to indicate that we should
1427 * send an update to the FPM about the given route_node.
1428 */
d62a17ae 1429static int zfpm_trigger_update(struct route_node *rn, const char *reason)
5adc2528 1430{
d62a17ae 1431 rib_dest_t *dest;
d62a17ae 1432
1433 /*
1434 * Ignore if the connection is down. We will update the FPM about
1435 * all destinations once the connection comes up.
1436 */
1437 if (!zfpm_conn_is_up())
1438 return 0;
1439
1440 dest = rib_dest_from_rnode(rn);
1441
d62a17ae 1442 if (CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM)) {
1443 zfpm_g->stats.redundant_triggers++;
1444 return 0;
1445 }
1446
1447 if (reason) {
2dbe669b
DA
1448 zfpm_debug("%pFX triggering update to FPM - Reason: %s", &rn->p,
1449 reason);
d62a17ae 1450 }
1451
1452 SET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
1453 TAILQ_INSERT_TAIL(&zfpm_g->dest_q, dest, fpm_q_entries);
1454 zfpm_g->stats.updates_triggered++;
1455
1456 /*
1457 * Make sure that writes are enabled.
1458 */
1459 if (zfpm_g->t_write)
1460 return 0;
1461
1462 zfpm_write_on();
1463 return 0;
5adc2528
AS
1464}
1465
0d0f516c 1466/*
1467 * zfpm_trigger_remove
1468 *
1469 * The zebra code invokes this function to indicate that we should
1470 * send an remove to the FPM about the given route_node.
1471 */
1472
1473static int zfpm_trigger_remove(struct route_node *rn)
1474{
1475 rib_dest_t *dest;
1476
1477 if (!zfpm_conn_is_up())
1478 return 0;
1479
1480 dest = rib_dest_from_rnode(rn);
1481 if (!CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM))
1482 return 0;
1483
1484 zfpm_debug("%pRN Removing from update queue shutting down", rn);
1485
1486 UNSET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
1487 TAILQ_REMOVE(&zfpm_g->dest_q, dest, fpm_q_entries);
1488
1489 return 0;
1490}
1491
e5218ec8
AD
1492/*
1493 * Generate Key for FPM MAC info hash entry
e5218ec8
AD
1494 */
1495static unsigned int zfpm_mac_info_hash_keymake(const void *p)
1496{
1497 struct fpm_mac_info_t *fpm_mac = (struct fpm_mac_info_t *)p;
1498 uint32_t mac_key;
1499
1500 mac_key = jhash(fpm_mac->macaddr.octet, ETH_ALEN, 0xa5a5a55a);
1501
1502 return jhash_2words(mac_key, fpm_mac->vni, 0);
1503}
1504
1505/*
1506 * Compare function for FPM MAC info hash lookup
1507 */
1508static bool zfpm_mac_info_cmp(const void *p1, const void *p2)
1509{
1510 const struct fpm_mac_info_t *fpm_mac1 = p1;
1511 const struct fpm_mac_info_t *fpm_mac2 = p2;
1512
1513 if (memcmp(fpm_mac1->macaddr.octet, fpm_mac2->macaddr.octet, ETH_ALEN)
1514 != 0)
1515 return false;
e5218ec8
AD
1516 if (fpm_mac1->vni != fpm_mac2->vni)
1517 return false;
1518
1519 return true;
1520}
1521
1522/*
1523 * Lookup FPM MAC info hash entry.
1524 */
1525static struct fpm_mac_info_t *zfpm_mac_info_lookup(struct fpm_mac_info_t *key)
1526{
1527 return hash_lookup(zfpm_g->fpm_mac_info_table, key);
1528}
1529
1530/*
1531 * Callback to allocate fpm_mac_info_t structure.
1532 */
1533static void *zfpm_mac_info_alloc(void *p)
1534{
1535 const struct fpm_mac_info_t *key = p;
1536 struct fpm_mac_info_t *fpm_mac;
1537
1538 fpm_mac = XCALLOC(MTYPE_FPM_MAC_INFO, sizeof(struct fpm_mac_info_t));
1539
1540 memcpy(&fpm_mac->macaddr, &key->macaddr, ETH_ALEN);
e5218ec8
AD
1541 fpm_mac->vni = key->vni;
1542
1543 return (void *)fpm_mac;
1544}
1545
1546/*
1547 * Delink and free fpm_mac_info_t.
1548 */
1549static void zfpm_mac_info_del(struct fpm_mac_info_t *fpm_mac)
1550{
1551 hash_release(zfpm_g->fpm_mac_info_table, fpm_mac);
1552 TAILQ_REMOVE(&zfpm_g->mac_q, fpm_mac, fpm_mac_q_entries);
1553 XFREE(MTYPE_FPM_MAC_INFO, fpm_mac);
1554}
1555
a780a738
AD
1556/*
1557 * zfpm_trigger_rmac_update
1558 *
1559 * Zebra code invokes this function to indicate that we should
1560 * send an update to FPM for given MAC entry.
1561 *
1562 * This function checks if we already have enqueued an update for this RMAC,
1563 * If yes, update the same fpm_mac_info_t. Else, create and enqueue an update.
1564 */
3198b2b3 1565static int zfpm_trigger_rmac_update(struct zebra_mac *rmac,
05843a27 1566 struct zebra_l3vni *zl3vni, bool delete,
3198b2b3 1567 const char *reason)
a780a738 1568{
a780a738
AD
1569 struct fpm_mac_info_t *fpm_mac, key;
1570 struct interface *vxlan_if, *svi_if;
44f7f132 1571 bool mac_found = false;
a780a738
AD
1572
1573 /*
1574 * Ignore if the connection is down. We will update the FPM about
1575 * all destinations once the connection comes up.
1576 */
1577 if (!zfpm_conn_is_up())
1578 return 0;
1579
1580 if (reason) {
5e9f9adb
DL
1581 zfpm_debug("triggering update to FPM - Reason: %s - %pEA",
1582 reason, &rmac->macaddr);
a780a738
AD
1583 }
1584
1585 vxlan_if = zl3vni_map_to_vxlan_if(zl3vni);
1586 svi_if = zl3vni_map_to_svi_if(zl3vni);
1587
6006b807 1588 memset(&key, 0, sizeof(key));
a780a738
AD
1589
1590 memcpy(&key.macaddr, &rmac->macaddr, ETH_ALEN);
a780a738
AD
1591 key.vni = zl3vni->vni;
1592
1593 /* Check if this MAC is already present in the queue. */
1594 fpm_mac = zfpm_mac_info_lookup(&key);
1595
1596 if (fpm_mac) {
44f7f132 1597 mac_found = true;
a780a738
AD
1598
1599 /*
44f7f132
AD
1600 * If the enqueued op is "add" and current op is "delete",
1601 * this is a noop. So, Unset ZEBRA_MAC_UPDATE_FPM flag.
1602 * While processing FPM queue, we will silently delete this
1603 * MAC entry without sending any update for this MAC.
a780a738 1604 */
44f7f132
AD
1605 if (!CHECK_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM) &&
1606 delete == 1) {
a780a738
AD
1607 SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM);
1608 UNSET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM);
44f7f132 1609 return 0;
a780a738 1610 }
8e3aae66 1611 } else
44f7f132
AD
1612 fpm_mac = hash_get(zfpm_g->fpm_mac_info_table, &key,
1613 zfpm_mac_info_alloc);
a780a738 1614
44f7f132 1615 fpm_mac->r_vtep_ip.s_addr = rmac->fwd_info.r_vtep_ip.s_addr;
c5431822 1616 fpm_mac->zebra_flags = rmac->flags;
a780a738
AD
1617 fpm_mac->vxlan_if = vxlan_if ? vxlan_if->ifindex : 0;
1618 fpm_mac->svi_if = svi_if ? svi_if->ifindex : 0;
1619
1620 SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM);
1621 if (delete)
1622 SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM);
44f7f132
AD
1623 else
1624 UNSET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM);
a780a738 1625
44f7f132
AD
1626 if (!mac_found)
1627 TAILQ_INSERT_TAIL(&zfpm_g->mac_q, fpm_mac, fpm_mac_q_entries);
a780a738
AD
1628
1629 zfpm_g->stats.updates_triggered++;
1630
a780a738
AD
1631 /* If writes are already enabled, return. */
1632 if (zfpm_g->t_write)
1633 return 0;
1634
1635 zfpm_write_on();
1636 return 0;
1637}
1638
fbe748e5
AD
1639/*
1640 * This function is called when the FPM connections is established.
1641 * Iterate over all the RMAC entries for the given L3VNI
1642 * and enqueue the RMAC for FPM processing.
1643 */
1ac88792 1644static void zfpm_trigger_rmac_update_wrapper(struct hash_bucket *bucket,
fbe748e5
AD
1645 void *args)
1646{
3198b2b3 1647 struct zebra_mac *zrmac = (struct zebra_mac *)bucket->data;
05843a27 1648 struct zebra_l3vni *zl3vni = (struct zebra_l3vni *)args;
fbe748e5
AD
1649
1650 zfpm_trigger_rmac_update(zrmac, zl3vni, false, "RMAC added");
1651}
1652
1653/*
1654 * This function is called when the FPM connections is established.
1655 * This function iterates over all the L3VNIs to trigger
1656 * FPM updates for RMACs currently available.
1657 */
1ac88792 1658static void zfpm_iterate_rmac_table(struct hash_bucket *bucket, void *args)
fbe748e5 1659{
05843a27 1660 struct zebra_l3vni *zl3vni = (struct zebra_l3vni *)bucket->data;
fbe748e5
AD
1661
1662 hash_iterate(zl3vni->rmac_table, zfpm_trigger_rmac_update_wrapper,
1663 (void *)zl3vni);
1664}
1665
5adc2528 1666/*
eeaf257b 1667 * struct zfpm_statsimer_cb
5adc2528 1668 */
e6685141 1669static void zfpm_stats_timer_cb(struct event *t)
5adc2528 1670{
d62a17ae 1671 zfpm_g->t_stats = NULL;
5adc2528 1672
d62a17ae 1673 /*
1674 * Remember the stats collected in the last interval for display
1675 * purposes.
1676 */
1677 zfpm_stats_copy(&zfpm_g->stats, &zfpm_g->last_ivl_stats);
5adc2528 1678
d62a17ae 1679 /*
1680 * Add the current set of stats into the cumulative statistics.
1681 */
1682 zfpm_stats_compose(&zfpm_g->cumulative_stats, &zfpm_g->stats,
1683 &zfpm_g->cumulative_stats);
5adc2528 1684
d62a17ae 1685 /*
1686 * Start collecting stats afresh over the next interval.
1687 */
1688 zfpm_stats_reset(&zfpm_g->stats);
5adc2528 1689
d62a17ae 1690 zfpm_start_stats_timer();
5adc2528
AS
1691}
1692
1693/*
1694 * zfpm_stop_stats_timer
1695 */
d62a17ae 1696static void zfpm_stop_stats_timer(void)
5adc2528 1697{
d62a17ae 1698 if (!zfpm_g->t_stats)
1699 return;
5adc2528 1700
d62a17ae 1701 zfpm_debug("Stopping existing stats timer");
e16d030c 1702 EVENT_OFF(zfpm_g->t_stats);
5adc2528
AS
1703}
1704
1705/*
1706 * zfpm_start_stats_timer
1707 */
d62a17ae 1708void zfpm_start_stats_timer(void)
5adc2528 1709{
d62a17ae 1710 assert(!zfpm_g->t_stats);
5adc2528 1711
907a2395
DS
1712 event_add_timer(zfpm_g->master, zfpm_stats_timer_cb, 0,
1713 ZFPM_STATS_IVL_SECS, &zfpm_g->t_stats);
5adc2528
AS
1714}
1715
1716/*
1717 * Helper macro for zfpm_show_stats() below.
1718 */
d62a17ae 1719#define ZFPM_SHOW_STAT(counter) \
1720 do { \
1721 vty_out(vty, "%-40s %10lu %16lu\n", #counter, \
1722 total_stats.counter, zfpm_g->last_ivl_stats.counter); \
1723 } while (0)
5adc2528
AS
1724
1725/*
1726 * zfpm_show_stats
1727 */
d62a17ae 1728static void zfpm_show_stats(struct vty *vty)
5adc2528 1729{
eeaf257b 1730 struct zfpm_stats total_stats;
d62a17ae 1731 time_t elapsed;
1732
1733 vty_out(vty, "\n%-40s %10s Last %2d secs\n\n", "Counter", "Total",
1734 ZFPM_STATS_IVL_SECS);
1735
1736 /*
1737 * Compute the total stats up to this instant.
1738 */
1739 zfpm_stats_compose(&zfpm_g->cumulative_stats, &zfpm_g->stats,
1740 &total_stats);
1741
1742 ZFPM_SHOW_STAT(connect_calls);
1743 ZFPM_SHOW_STAT(connect_no_sock);
1744 ZFPM_SHOW_STAT(read_cb_calls);
1745 ZFPM_SHOW_STAT(write_cb_calls);
1746 ZFPM_SHOW_STAT(write_calls);
1747 ZFPM_SHOW_STAT(partial_writes);
1748 ZFPM_SHOW_STAT(max_writes_hit);
1749 ZFPM_SHOW_STAT(t_write_yields);
1750 ZFPM_SHOW_STAT(nop_deletes_skipped);
1751 ZFPM_SHOW_STAT(route_adds);
1752 ZFPM_SHOW_STAT(route_dels);
1753 ZFPM_SHOW_STAT(updates_triggered);
d62a17ae 1754 ZFPM_SHOW_STAT(redundant_triggers);
1755 ZFPM_SHOW_STAT(dests_del_after_update);
1756 ZFPM_SHOW_STAT(t_conn_down_starts);
1757 ZFPM_SHOW_STAT(t_conn_down_dests_processed);
1758 ZFPM_SHOW_STAT(t_conn_down_yields);
1759 ZFPM_SHOW_STAT(t_conn_down_finishes);
1760 ZFPM_SHOW_STAT(t_conn_up_starts);
1761 ZFPM_SHOW_STAT(t_conn_up_dests_processed);
1762 ZFPM_SHOW_STAT(t_conn_up_yields);
1763 ZFPM_SHOW_STAT(t_conn_up_aborts);
1764 ZFPM_SHOW_STAT(t_conn_up_finishes);
1765
1766 if (!zfpm_g->last_stats_clear_time)
1767 return;
1768
1769 elapsed = zfpm_get_elapsed_time(zfpm_g->last_stats_clear_time);
1770
1771 vty_out(vty, "\nStats were cleared %lu seconds ago\n",
1772 (unsigned long)elapsed);
5adc2528
AS
1773}
1774
1775/*
1776 * zfpm_clear_stats
1777 */
d62a17ae 1778static void zfpm_clear_stats(struct vty *vty)
5adc2528 1779{
d62a17ae 1780 if (!zfpm_is_enabled()) {
1781 vty_out(vty, "The FPM module is not enabled...\n");
1782 return;
1783 }
5adc2528 1784
d62a17ae 1785 zfpm_stats_reset(&zfpm_g->stats);
1786 zfpm_stats_reset(&zfpm_g->last_ivl_stats);
1787 zfpm_stats_reset(&zfpm_g->cumulative_stats);
5adc2528 1788
d62a17ae 1789 zfpm_stop_stats_timer();
1790 zfpm_start_stats_timer();
5adc2528 1791
d62a17ae 1792 zfpm_g->last_stats_clear_time = monotime(NULL);
5adc2528 1793
d62a17ae 1794 vty_out(vty, "Cleared FPM stats\n");
5adc2528
AS
1795}
1796
1797/*
1798 * show_zebra_fpm_stats
1799 */
1800DEFUN (show_zebra_fpm_stats,
1801 show_zebra_fpm_stats_cmd,
1802 "show zebra fpm stats",
1803 SHOW_STR
41e7fb80 1804 ZEBRA_STR
5adc2528
AS
1805 "Forwarding Path Manager information\n"
1806 "Statistics\n")
1807{
d62a17ae 1808 zfpm_show_stats(vty);
1809 return CMD_SUCCESS;
5adc2528
AS
1810}
1811
1812/*
1813 * clear_zebra_fpm_stats
1814 */
1815DEFUN (clear_zebra_fpm_stats,
1816 clear_zebra_fpm_stats_cmd,
1817 "clear zebra fpm stats",
1818 CLEAR_STR
41e7fb80 1819 ZEBRA_STR
5adc2528
AS
1820 "Clear Forwarding Path Manager information\n"
1821 "Statistics\n")
1822{
d62a17ae 1823 zfpm_clear_stats(vty);
1824 return CMD_SUCCESS;
5adc2528
AS
1825}
1826
711ff0ba 1827/*
d62a17ae 1828 * update fpm connection information
711ff0ba 1829 */
a0dfca37 1830DEFUN (fpm_remote_ip,
e52702f2 1831 fpm_remote_ip_cmd,
a0dfca37
DL
1832 "fpm connection ip A.B.C.D port (1-65535)",
1833 "Forwarding Path Manager\n"
1834 "Configure FPM connection\n"
1835 "Connect to IPv4 address\n"
1836 "Connect to IPv4 address\n"
1837 "TCP port number\n"
1838 "TCP port number\n")
711ff0ba
USK
1839{
1840
d62a17ae 1841 in_addr_t fpm_server;
1842 uint32_t port_no;
711ff0ba 1843
d62a17ae 1844 fpm_server = inet_addr(argv[3]->arg);
1845 if (fpm_server == INADDR_NONE)
1846 return CMD_ERR_INCOMPLETE;
711ff0ba 1847
d62a17ae 1848 port_no = atoi(argv[5]->arg);
1849 if (port_no < TCP_MIN_PORT || port_no > TCP_MAX_PORT)
1850 return CMD_ERR_INCOMPLETE;
711ff0ba 1851
d62a17ae 1852 zfpm_g->fpm_server = fpm_server;
1853 zfpm_g->fpm_port = port_no;
711ff0ba
USK
1854
1855
d62a17ae 1856 return CMD_SUCCESS;
711ff0ba
USK
1857}
1858
a0dfca37 1859DEFUN (no_fpm_remote_ip,
e52702f2 1860 no_fpm_remote_ip_cmd,
a0dfca37
DL
1861 "no fpm connection ip A.B.C.D port (1-65535)",
1862 NO_STR
1863 "Forwarding Path Manager\n"
1864 "Remove configured FPM connection\n"
1865 "Connect to IPv4 address\n"
1866 "Connect to IPv4 address\n"
1867 "TCP port number\n"
1868 "TCP port number\n")
711ff0ba 1869{
d62a17ae 1870 if (zfpm_g->fpm_server != inet_addr(argv[4]->arg)
1871 || zfpm_g->fpm_port != atoi(argv[6]->arg))
1872 return CMD_ERR_NO_MATCH;
711ff0ba 1873
d62a17ae 1874 zfpm_g->fpm_server = FPM_DEFAULT_IP;
1875 zfpm_g->fpm_port = FPM_DEFAULT_PORT;
711ff0ba 1876
d62a17ae 1877 return CMD_SUCCESS;
711ff0ba 1878}
711ff0ba 1879
fb0aa886
AS
1880/*
1881 * zfpm_init_message_format
1882 */
d62a17ae 1883static inline void zfpm_init_message_format(const char *format)
fb0aa886 1884{
d62a17ae 1885 int have_netlink, have_protobuf;
fb0aa886 1886
fb0aa886 1887#ifdef HAVE_NETLINK
d62a17ae 1888 have_netlink = 1;
4b2792b5 1889#else
d62a17ae 1890 have_netlink = 0;
fb0aa886
AS
1891#endif
1892
1893#ifdef HAVE_PROTOBUF
d62a17ae 1894 have_protobuf = 1;
4b2792b5 1895#else
d62a17ae 1896 have_protobuf = 0;
fb0aa886
AS
1897#endif
1898
d62a17ae 1899 zfpm_g->message_format = ZFPM_MSG_FORMAT_NONE;
fb0aa886 1900
d62a17ae 1901 if (!format) {
1902 if (have_netlink) {
1903 zfpm_g->message_format = ZFPM_MSG_FORMAT_NETLINK;
1904 } else if (have_protobuf) {
1905 zfpm_g->message_format = ZFPM_MSG_FORMAT_PROTOBUF;
1906 }
1907 return;
fb0aa886 1908 }
fb0aa886 1909
d62a17ae 1910 if (!strcmp("netlink", format)) {
1911 if (!have_netlink) {
1c50c1c0
QY
1912 flog_err(EC_ZEBRA_NETLINK_NOT_AVAILABLE,
1913 "FPM netlink message format is not available");
d62a17ae 1914 return;
1915 }
1916 zfpm_g->message_format = ZFPM_MSG_FORMAT_NETLINK;
1917 return;
fb0aa886 1918 }
fb0aa886 1919
d62a17ae 1920 if (!strcmp("protobuf", format)) {
1921 if (!have_protobuf) {
af4c2728 1922 flog_err(
e914ccbe 1923 EC_ZEBRA_PROTOBUF_NOT_AVAILABLE,
d62a17ae 1924 "FPM protobuf message format is not available");
1925 return;
1926 }
8b9cf71c 1927 flog_warn(EC_ZEBRA_PROTOBUF_NOT_AVAILABLE,
3efd0893 1928 "FPM protobuf message format is deprecated and scheduled to be removed. Please convert to using netlink format or contact dev@lists.frrouting.org with your use case.");
d62a17ae 1929 zfpm_g->message_format = ZFPM_MSG_FORMAT_PROTOBUF;
1930 return;
fb0aa886 1931 }
fb0aa886 1932
e914ccbe 1933 flog_warn(EC_ZEBRA_FPM_FORMAT_UNKNOWN, "Unknown fpm format '%s'",
9df414fe 1934 format);
fb0aa886
AS
1935}
1936
711ff0ba 1937/**
d62a17ae 1938 * fpm_remote_srv_write
711ff0ba 1939 *
d62a17ae 1940 * Module to write remote fpm connection
711ff0ba
USK
1941 *
1942 * Returns ZERO on success.
1943 */
1944
d62a17ae 1945static int fpm_remote_srv_write(struct vty *vty)
711ff0ba 1946{
d62a17ae 1947 struct in_addr in;
711ff0ba 1948
d62a17ae 1949 in.s_addr = zfpm_g->fpm_server;
711ff0ba 1950
9d1c2659 1951 if ((zfpm_g->fpm_server != FPM_DEFAULT_IP
996c9314
LB
1952 && zfpm_g->fpm_server != INADDR_ANY)
1953 || (zfpm_g->fpm_port != FPM_DEFAULT_PORT && zfpm_g->fpm_port != 0))
9bcef951 1954 vty_out(vty, "fpm connection ip %pI4 port %d\n", &in,
d62a17ae 1955 zfpm_g->fpm_port);
711ff0ba 1956
d62a17ae 1957 return 0;
711ff0ba
USK
1958}
1959
1960
612c2c15 1961static int fpm_remote_srv_write(struct vty *vty);
4f8ea50c 1962/* Zebra node */
62b346ee 1963static struct cmd_node zebra_node = {
f4b8291f 1964 .name = "zebra",
62b346ee 1965 .node = ZEBRA_NODE,
24389580 1966 .parent_node = CONFIG_NODE,
62b346ee 1967 .prompt = "",
612c2c15 1968 .config_write = fpm_remote_srv_write,
62b346ee 1969};
4f8ea50c
DL
1970
1971
5adc2528
AS
1972/**
1973 * zfpm_init
1974 *
1975 * One-time initialization of the Zebra FPM module.
1976 *
1977 * @param[in] port port at which FPM is running.
2951a7a4 1978 * @param[in] enable true if the zebra FPM module should be enabled
fb0aa886 1979 * @param[in] format to use to talk to the FPM. Can be 'netink' or 'protobuf'.
5adc2528 1980 *
2951a7a4 1981 * Returns true on success.
5adc2528 1982 */
cd9d0537 1983static int zfpm_init(struct event_loop *master)
5adc2528 1984{
d62a17ae 1985 int enable = 1;
1986 uint16_t port = 0;
1987 const char *format = THIS_MODULE->load_args;
5adc2528 1988
d62a17ae 1989 memset(zfpm_g, 0, sizeof(*zfpm_g));
1990 zfpm_g->master = master;
1991 TAILQ_INIT(&zfpm_g->dest_q);
e5218ec8
AD
1992 TAILQ_INIT(&zfpm_g->mac_q);
1993
1994 /* Create hash table for fpm_mac_info_t enties */
1995 zfpm_g->fpm_mac_info_table = hash_create(zfpm_mac_info_hash_keymake,
fbe748e5
AD
1996 zfpm_mac_info_cmp,
1997 "FPM MAC info hash table");
e5218ec8 1998
d62a17ae 1999 zfpm_g->sock = -1;
2000 zfpm_g->state = ZFPM_STATE_IDLE;
5adc2528 2001
d62a17ae 2002 zfpm_stats_init(&zfpm_g->stats);
2003 zfpm_stats_init(&zfpm_g->last_ivl_stats);
2004 zfpm_stats_init(&zfpm_g->cumulative_stats);
5adc2528 2005
316d2d52 2006 memset(&ipv4ll_gateway, 0, sizeof(ipv4ll_gateway));
b51c6597
DS
2007 if (inet_pton(AF_INET, ipv4_ll_buf, &ipv4ll_gateway.ipv4) != 1)
2008 zlog_warn("inet_pton failed for %s", ipv4_ll_buf);
316d2d52 2009
612c2c15 2010 install_node(&zebra_node);
d62a17ae 2011 install_element(ENABLE_NODE, &show_zebra_fpm_stats_cmd);
2012 install_element(ENABLE_NODE, &clear_zebra_fpm_stats_cmd);
2013 install_element(CONFIG_NODE, &fpm_remote_ip_cmd);
2014 install_element(CONFIG_NODE, &no_fpm_remote_ip_cmd);
5adc2528 2015
d62a17ae 2016 zfpm_init_message_format(format);
fb0aa886 2017
d62a17ae 2018 /*
2019 * Disable FPM interface if no suitable format is available.
2020 */
2021 if (zfpm_g->message_format == ZFPM_MSG_FORMAT_NONE)
2022 enable = 0;
fb0aa886 2023
d62a17ae 2024 zfpm_g->enabled = enable;
5adc2528 2025
d62a17ae 2026 if (!zfpm_g->fpm_server)
2027 zfpm_g->fpm_server = FPM_DEFAULT_IP;
711ff0ba 2028
d62a17ae 2029 if (!port)
2030 port = FPM_DEFAULT_PORT;
5adc2528 2031
d62a17ae 2032 zfpm_g->fpm_port = port;
5adc2528 2033
d62a17ae 2034 zfpm_g->obuf = stream_new(ZFPM_OBUF_SIZE);
2035 zfpm_g->ibuf = stream_new(ZFPM_IBUF_SIZE);
5adc2528 2036
d62a17ae 2037 zfpm_start_stats_timer();
2038 zfpm_start_connect_timer("initialized");
2039 return 0;
4f8ea50c 2040}
5adc2528 2041
f0c459f0
DS
2042static int zfpm_fini(void)
2043{
2044 zfpm_write_off();
2045 zfpm_read_off();
2046 zfpm_connect_off();
54033432 2047 zfpm_conn_down_off();
f0c459f0
DS
2048
2049 zfpm_stop_stats_timer();
2050
2051 hook_unregister(rib_update, zfpm_trigger_update);
5ec001aa
DS
2052 hook_unregister(zebra_rmac_update, zfpm_trigger_rmac_update);
2053
f0c459f0
DS
2054 return 0;
2055}
2056
d62a17ae 2057static int zebra_fpm_module_init(void)
4f8ea50c 2058{
d62a17ae 2059 hook_register(rib_update, zfpm_trigger_update);
0d0f516c 2060 hook_register(rib_shutdown, zfpm_trigger_remove);
a780a738 2061 hook_register(zebra_rmac_update, zfpm_trigger_rmac_update);
d62a17ae 2062 hook_register(frr_late_init, zfpm_init);
f0c459f0 2063 hook_register(frr_early_fini, zfpm_fini);
d62a17ae 2064 return 0;
5adc2528 2065}
4f8ea50c 2066
d62a17ae 2067FRR_MODULE_SETUP(.name = "zebra_fpm", .version = FRR_VERSION,
2068 .description = "zebra FPM (Forwarding Plane Manager) module",
80413c20
DL
2069 .init = zebra_fpm_module_init,
2070);