2 * Main implementation file for interface to Forwarding Plane Manager.
4 * Copyright (C) 2012 by Open Source Routing.
5 * Copyright (C) 2012 by Internet Systems Consortium, Inc. ("ISC")
7 * This file is part of GNU Zebra.
9 * GNU Zebra is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
14 * GNU Zebra is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; see the file COPYING; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
35 #include "zebra/rib.h"
36 #include "zebra/zserv.h"
37 #include "zebra/zebra_ns.h"
38 #include "zebra/zebra_vrf.h"
39 #include "zebra/zebra_errors.h"
40 #include "zebra/zebra_memory.h"
43 #include "zebra_fpm_private.h"
44 #include "zebra/zebra_router.h"
45 #include "zebra_vxlan_private.h"
47 DEFINE_MTYPE_STATIC(ZEBRA
, FPM_MAC_INFO
, "FPM_MAC_INFO");
50 * Interval at which we attempt to connect to the FPM.
52 #define ZFPM_CONNECT_RETRY_IVL 5
55 * Sizes of outgoing and incoming stream buffers for writing/reading
58 #define ZFPM_OBUF_SIZE (2 * FPM_MAX_MSG_LEN)
59 #define ZFPM_IBUF_SIZE (FPM_MAX_MSG_LEN)
62 * The maximum number of times the FPM socket write callback can call
63 * 'write' before it yields.
65 #define ZFPM_MAX_WRITES_PER_RUN 10
68 * Interval over which we collect statistics.
70 #define ZFPM_STATS_IVL_SECS 10
71 #define FPM_MAX_MAC_MSG_LEN 512
73 static void zfpm_iterate_rmac_table(struct hash_bucket
*backet
, void *args
);
76 * Structure that holds state for iterating over all route_node
77 * structures that are candidates for being communicated to the FPM.
79 struct zfpm_rnodes_iter
{
80 rib_tables_iter_t tables_iter
;
81 route_table_iter_t iter
;
88 unsigned long connect_calls
;
89 unsigned long connect_no_sock
;
91 unsigned long read_cb_calls
;
93 unsigned long write_cb_calls
;
94 unsigned long write_calls
;
95 unsigned long partial_writes
;
96 unsigned long max_writes_hit
;
97 unsigned long t_write_yields
;
99 unsigned long nop_deletes_skipped
;
100 unsigned long route_adds
;
101 unsigned long route_dels
;
103 unsigned long updates_triggered
;
104 unsigned long redundant_triggers
;
106 unsigned long dests_del_after_update
;
108 unsigned long t_conn_down_starts
;
109 unsigned long t_conn_down_dests_processed
;
110 unsigned long t_conn_down_yields
;
111 unsigned long t_conn_down_finishes
;
113 unsigned long t_conn_up_starts
;
114 unsigned long t_conn_up_dests_processed
;
115 unsigned long t_conn_up_yields
;
116 unsigned long t_conn_up_aborts
;
117 unsigned long t_conn_up_finishes
;
121 * States for the FPM state machine.
126 * In this state we are not yet ready to connect to the FPM. This
127 * can happen when this module is disabled, or if we're cleaning up
128 * after a connection has gone down.
133 * Ready to talk to the FPM and periodically trying to connect to
139 * In the middle of bringing up a TCP connection. Specifically,
140 * waiting for a connect() call to complete asynchronously.
142 ZFPM_STATE_CONNECTING
,
145 * TCP connection to the FPM is up.
147 ZFPM_STATE_ESTABLISHED
152 * Message format to be used to communicate with the FPM.
154 enum zfpm_msg_format
{
155 ZFPM_MSG_FORMAT_NONE
,
156 ZFPM_MSG_FORMAT_NETLINK
,
157 ZFPM_MSG_FORMAT_PROTOBUF
,
166 * True if the FPM module has been enabled.
171 * Message format to be used to communicate with the fpm.
173 enum zfpm_msg_format message_format
;
175 struct thread_master
*master
;
177 enum zfpm_state state
;
179 in_addr_t fpm_server
;
181 * Port on which the FPM is running.
186 * List of rib_dest_t structures to be processed
188 TAILQ_HEAD(zfpm_dest_q
, rib_dest_t_
) dest_q
;
191 * List of fpm_mac_info structures to be processed
193 TAILQ_HEAD(zfpm_mac_q
, fpm_mac_info_t
) mac_q
;
196 * Hash table of fpm_mac_info_t entries
198 * While adding fpm_mac_info_t for a MAC to the mac_q,
199 * it is possible that another fpm_mac_info_t node for the this MAC
200 * is already present in the queue.
201 * This is possible in the case of consecutive add->delete operations.
202 * To avoid such duplicate insertions in the mac_q,
203 * define a hash table for fpm_mac_info_t which can be looked up
204 * to see if an fpm_mac_info_t node for a MAC is already present
207 struct hash
*fpm_mac_info_table
;
210 * Stream socket to the FPM.
215 * Buffers for messages to/from the FPM.
223 struct thread
*t_connect
;
224 struct thread
*t_write
;
225 struct thread
*t_read
;
228 * Thread to clean up after the TCP connection to the FPM goes down
229 * and the state that belongs to it.
231 struct thread
*t_conn_down
;
234 struct zfpm_rnodes_iter iter
;
238 * Thread to take actions once the TCP conn to the FPM comes up, and
239 * the state that belongs to it.
241 struct thread
*t_conn_up
;
244 struct zfpm_rnodes_iter iter
;
247 unsigned long connect_calls
;
248 time_t last_connect_call_time
;
251 * Stats from the start of the current statistics interval up to
252 * now. These are the counters we typically update in the code.
254 struct zfpm_stats stats
;
257 * Statistics that were gathered in the last collection interval.
259 struct zfpm_stats last_ivl_stats
;
262 * Cumulative stats from the last clear to the start of the current
263 * statistics interval.
265 struct zfpm_stats cumulative_stats
;
268 * Stats interval timer.
270 struct thread
*t_stats
;
273 * If non-zero, the last time when statistics were cleared.
275 time_t last_stats_clear_time
;
278 static struct zfpm_glob zfpm_glob_space
;
279 static struct zfpm_glob
*zfpm_g
= &zfpm_glob_space
;
281 static int zfpm_trigger_update(struct route_node
*rn
, const char *reason
);
283 static int zfpm_read_cb(struct thread
*thread
);
284 static int zfpm_write_cb(struct thread
*thread
);
286 static void zfpm_set_state(enum zfpm_state state
, const char *reason
);
287 static void zfpm_start_connect_timer(const char *reason
);
288 static void zfpm_start_stats_timer(void);
289 static void zfpm_mac_info_del(struct fpm_mac_info_t
*fpm_mac
);
292 * zfpm_thread_should_yield
294 static inline int zfpm_thread_should_yield(struct thread
*t
)
296 return thread_should_yield(t
);
302 static const char *zfpm_state_to_str(enum zfpm_state state
)
306 case ZFPM_STATE_IDLE
:
309 case ZFPM_STATE_ACTIVE
:
312 case ZFPM_STATE_CONNECTING
:
315 case ZFPM_STATE_ESTABLISHED
:
316 return "established";
324 * zfpm_get_elapsed_time
326 * Returns the time elapsed (in seconds) since the given time.
328 static time_t zfpm_get_elapsed_time(time_t reference
)
332 now
= monotime(NULL
);
334 if (now
< reference
) {
339 return now
- reference
;
343 * zfpm_rnodes_iter_init
345 static inline void zfpm_rnodes_iter_init(struct zfpm_rnodes_iter
*iter
)
347 memset(iter
, 0, sizeof(*iter
));
348 rib_tables_iter_init(&iter
->tables_iter
);
351 * This is a hack, but it makes implementing 'next' easier by
352 * ensuring that route_table_iter_next() will return NULL the first
355 route_table_iter_init(&iter
->iter
, NULL
);
356 route_table_iter_cleanup(&iter
->iter
);
360 * zfpm_rnodes_iter_next
362 static inline struct route_node
*
363 zfpm_rnodes_iter_next(struct zfpm_rnodes_iter
*iter
)
365 struct route_node
*rn
;
366 struct route_table
*table
;
369 rn
= route_table_iter_next(&iter
->iter
);
374 * We've made our way through this table, go to the next one.
376 route_table_iter_cleanup(&iter
->iter
);
378 table
= rib_tables_iter_next(&iter
->tables_iter
);
383 route_table_iter_init(&iter
->iter
, table
);
390 * zfpm_rnodes_iter_pause
392 static inline void zfpm_rnodes_iter_pause(struct zfpm_rnodes_iter
*iter
)
394 route_table_iter_pause(&iter
->iter
);
398 * zfpm_rnodes_iter_cleanup
400 static inline void zfpm_rnodes_iter_cleanup(struct zfpm_rnodes_iter
*iter
)
402 route_table_iter_cleanup(&iter
->iter
);
403 rib_tables_iter_cleanup(&iter
->tables_iter
);
409 * Initialize a statistics block.
411 static inline void zfpm_stats_init(struct zfpm_stats
*stats
)
413 memset(stats
, 0, sizeof(*stats
));
419 static inline void zfpm_stats_reset(struct zfpm_stats
*stats
)
421 zfpm_stats_init(stats
);
427 static inline void zfpm_stats_copy(const struct zfpm_stats
*src
,
428 struct zfpm_stats
*dest
)
430 memcpy(dest
, src
, sizeof(*dest
));
436 * Total up the statistics in two stats structures ('s1 and 's2') and
437 * return the result in the third argument, 'result'. Note that the
438 * pointer 'result' may be the same as 's1' or 's2'.
440 * For simplicity, the implementation below assumes that the stats
441 * structure is composed entirely of counters. This can easily be
442 * changed when necessary.
444 static void zfpm_stats_compose(const struct zfpm_stats
*s1
,
445 const struct zfpm_stats
*s2
,
446 struct zfpm_stats
*result
)
448 const unsigned long *p1
, *p2
;
449 unsigned long *result_p
;
452 p1
= (const unsigned long *)s1
;
453 p2
= (const unsigned long *)s2
;
454 result_p
= (unsigned long *)result
;
456 num_counters
= (sizeof(struct zfpm_stats
) / sizeof(unsigned long));
458 for (i
= 0; i
< num_counters
; i
++) {
459 result_p
[i
] = p1
[i
] + p2
[i
];
466 static inline void zfpm_read_on(void)
468 assert(!zfpm_g
->t_read
);
469 assert(zfpm_g
->sock
>= 0);
471 thread_add_read(zfpm_g
->master
, zfpm_read_cb
, 0, zfpm_g
->sock
,
478 static inline void zfpm_write_on(void)
480 assert(!zfpm_g
->t_write
);
481 assert(zfpm_g
->sock
>= 0);
483 thread_add_write(zfpm_g
->master
, zfpm_write_cb
, 0, zfpm_g
->sock
,
490 static inline void zfpm_read_off(void)
492 THREAD_READ_OFF(zfpm_g
->t_read
);
498 static inline void zfpm_write_off(void)
500 THREAD_WRITE_OFF(zfpm_g
->t_write
);
503 static inline void zfpm_connect_off(void)
505 THREAD_TIMER_OFF(zfpm_g
->t_connect
);
509 * zfpm_conn_up_thread_cb
511 * Callback for actions to be taken when the connection to the FPM
514 static int zfpm_conn_up_thread_cb(struct thread
*thread
)
516 struct route_node
*rnode
;
517 struct zfpm_rnodes_iter
*iter
;
520 zfpm_g
->t_conn_up
= NULL
;
522 iter
= &zfpm_g
->t_conn_up_state
.iter
;
524 if (zfpm_g
->state
!= ZFPM_STATE_ESTABLISHED
) {
526 "Connection not up anymore, conn_up thread aborting");
527 zfpm_g
->stats
.t_conn_up_aborts
++;
531 /* Enqueue FPM updates for all the RMAC entries */
532 hash_iterate(zrouter
.l3vni_table
, zfpm_iterate_rmac_table
, NULL
);
534 while ((rnode
= zfpm_rnodes_iter_next(iter
))) {
535 dest
= rib_dest_from_rnode(rnode
);
538 zfpm_g
->stats
.t_conn_up_dests_processed
++;
539 zfpm_trigger_update(rnode
, NULL
);
545 if (!zfpm_thread_should_yield(thread
))
548 zfpm_g
->stats
.t_conn_up_yields
++;
549 zfpm_rnodes_iter_pause(iter
);
550 zfpm_g
->t_conn_up
= NULL
;
551 thread_add_timer_msec(zfpm_g
->master
, zfpm_conn_up_thread_cb
,
552 NULL
, 0, &zfpm_g
->t_conn_up
);
556 zfpm_g
->stats
.t_conn_up_finishes
++;
559 zfpm_rnodes_iter_cleanup(iter
);
566 * Called when the connection to the FPM comes up.
568 static void zfpm_connection_up(const char *detail
)
570 assert(zfpm_g
->sock
>= 0);
573 zfpm_set_state(ZFPM_STATE_ESTABLISHED
, detail
);
576 * Start thread to push existing routes to the FPM.
578 assert(!zfpm_g
->t_conn_up
);
580 zfpm_rnodes_iter_init(&zfpm_g
->t_conn_up_state
.iter
);
582 zfpm_debug("Starting conn_up thread");
583 zfpm_g
->t_conn_up
= NULL
;
584 thread_add_timer_msec(zfpm_g
->master
, zfpm_conn_up_thread_cb
, NULL
, 0,
586 zfpm_g
->stats
.t_conn_up_starts
++;
592 * Check if an asynchronous connect() to the FPM is complete.
594 static void zfpm_connect_check(void)
603 slen
= sizeof(status
);
604 ret
= getsockopt(zfpm_g
->sock
, SOL_SOCKET
, SO_ERROR
, (void *)&status
,
607 if (ret
>= 0 && status
== 0) {
608 zfpm_connection_up("async connect complete");
613 * getsockopt() failed or indicated an error on the socket.
618 zfpm_start_connect_timer("getsockopt() after async connect failed");
623 * zfpm_conn_down_thread_cb
625 * Callback that is invoked to clean up state after the TCP connection
626 * to the FPM goes down.
628 static int zfpm_conn_down_thread_cb(struct thread
*thread
)
630 struct route_node
*rnode
;
631 struct zfpm_rnodes_iter
*iter
;
633 struct fpm_mac_info_t
*mac
= NULL
;
635 assert(zfpm_g
->state
== ZFPM_STATE_IDLE
);
638 * Delink and free all fpm_mac_info_t nodes
639 * in the mac_q and fpm_mac_info_hash
641 while ((mac
= TAILQ_FIRST(&zfpm_g
->mac_q
)) != NULL
)
642 zfpm_mac_info_del(mac
);
644 zfpm_g
->t_conn_down
= NULL
;
646 iter
= &zfpm_g
->t_conn_down_state
.iter
;
648 while ((rnode
= zfpm_rnodes_iter_next(iter
))) {
649 dest
= rib_dest_from_rnode(rnode
);
652 if (CHECK_FLAG(dest
->flags
, RIB_DEST_UPDATE_FPM
)) {
653 TAILQ_REMOVE(&zfpm_g
->dest_q
, dest
,
657 UNSET_FLAG(dest
->flags
, RIB_DEST_UPDATE_FPM
);
658 UNSET_FLAG(dest
->flags
, RIB_DEST_SENT_TO_FPM
);
660 zfpm_g
->stats
.t_conn_down_dests_processed
++;
663 * Check if the dest should be deleted.
671 if (!zfpm_thread_should_yield(thread
))
674 zfpm_g
->stats
.t_conn_down_yields
++;
675 zfpm_rnodes_iter_pause(iter
);
676 zfpm_g
->t_conn_down
= NULL
;
677 thread_add_timer_msec(zfpm_g
->master
, zfpm_conn_down_thread_cb
,
678 NULL
, 0, &zfpm_g
->t_conn_down
);
682 zfpm_g
->stats
.t_conn_down_finishes
++;
683 zfpm_rnodes_iter_cleanup(iter
);
686 * Start the process of connecting to the FPM again.
688 zfpm_start_connect_timer("cleanup complete");
693 * zfpm_connection_down
695 * Called when the connection to the FPM has gone down.
697 static void zfpm_connection_down(const char *detail
)
702 assert(zfpm_g
->state
== ZFPM_STATE_ESTABLISHED
);
704 zlog_info("connection to the FPM has gone down: %s", detail
);
709 stream_reset(zfpm_g
->ibuf
);
710 stream_reset(zfpm_g
->obuf
);
712 if (zfpm_g
->sock
>= 0) {
718 * Start thread to clean up state after the connection goes down.
720 assert(!zfpm_g
->t_conn_down
);
721 zfpm_rnodes_iter_init(&zfpm_g
->t_conn_down_state
.iter
);
722 zfpm_g
->t_conn_down
= NULL
;
723 thread_add_timer_msec(zfpm_g
->master
, zfpm_conn_down_thread_cb
, NULL
, 0,
724 &zfpm_g
->t_conn_down
);
725 zfpm_g
->stats
.t_conn_down_starts
++;
727 zfpm_set_state(ZFPM_STATE_IDLE
, detail
);
733 static int zfpm_read_cb(struct thread
*thread
)
740 zfpm_g
->stats
.read_cb_calls
++;
743 * Check if async connect is now done.
745 if (zfpm_g
->state
== ZFPM_STATE_CONNECTING
) {
746 zfpm_connect_check();
750 assert(zfpm_g
->state
== ZFPM_STATE_ESTABLISHED
);
751 assert(zfpm_g
->sock
>= 0);
755 already
= stream_get_endp(ibuf
);
756 if (already
< FPM_MSG_HDR_LEN
) {
759 nbyte
= stream_read_try(ibuf
, zfpm_g
->sock
,
760 FPM_MSG_HDR_LEN
- already
);
761 if (nbyte
== 0 || nbyte
== -1) {
765 snprintf(buffer
, sizeof(buffer
),
766 "closed socket in read(%d): %s", errno
,
767 safe_strerror(errno
));
768 zfpm_connection_down(buffer
);
770 zfpm_connection_down("closed socket in read");
774 if (nbyte
!= (ssize_t
)(FPM_MSG_HDR_LEN
- already
))
777 already
= FPM_MSG_HDR_LEN
;
780 stream_set_getp(ibuf
, 0);
782 hdr
= (fpm_msg_hdr_t
*)stream_pnt(ibuf
);
784 if (!fpm_msg_hdr_ok(hdr
)) {
785 zfpm_connection_down("invalid message header");
789 msg_len
= fpm_msg_len(hdr
);
792 * Read out the rest of the packet.
794 if (already
< msg_len
) {
797 nbyte
= stream_read_try(ibuf
, zfpm_g
->sock
, msg_len
- already
);
799 if (nbyte
== 0 || nbyte
== -1) {
803 snprintf(buffer
, sizeof(buffer
),
804 "failed to read message(%d) %s", errno
,
805 safe_strerror(errno
));
806 zfpm_connection_down(buffer
);
808 zfpm_connection_down("failed to read message");
812 if (nbyte
!= (ssize_t
)(msg_len
- already
))
817 * Just throw it away for now.
826 static bool zfpm_updates_pending(void)
828 if (!(TAILQ_EMPTY(&zfpm_g
->dest_q
)) || !(TAILQ_EMPTY(&zfpm_g
->mac_q
)))
835 * zfpm_writes_pending
837 * Returns true if we may have something to write to the FPM.
839 static int zfpm_writes_pending(void)
843 * Check if there is any data in the outbound buffer that has not
844 * been written to the socket yet.
846 if (stream_get_endp(zfpm_g
->obuf
) - stream_get_getp(zfpm_g
->obuf
))
850 * Check if there are any updates scheduled on the outbound queues.
852 if (zfpm_updates_pending())
861 * Encode a message to the FPM with information about the given route.
863 * Returns the number of bytes written to the buffer. 0 or a negative
864 * value indicates an error.
866 static inline int zfpm_encode_route(rib_dest_t
*dest
, struct route_entry
*re
,
867 char *in_buf
, size_t in_buf_len
,
868 fpm_msg_type_e
*msg_type
)
876 *msg_type
= FPM_MSG_TYPE_NONE
;
878 switch (zfpm_g
->message_format
) {
880 case ZFPM_MSG_FORMAT_PROTOBUF
:
882 len
= zfpm_protobuf_encode_route(dest
, re
, (uint8_t *)in_buf
,
884 *msg_type
= FPM_MSG_TYPE_PROTOBUF
;
888 case ZFPM_MSG_FORMAT_NETLINK
:
890 *msg_type
= FPM_MSG_TYPE_NETLINK
;
891 cmd
= re
? RTM_NEWROUTE
: RTM_DELROUTE
;
892 len
= zfpm_netlink_encode_route(cmd
, dest
, re
, in_buf
,
894 assert(fpm_msg_align(len
) == len
);
895 *msg_type
= FPM_MSG_TYPE_NETLINK
;
896 #endif /* HAVE_NETLINK */
907 * zfpm_route_for_update
909 * Returns the re that is to be sent to the FPM for a given dest.
911 struct route_entry
*zfpm_route_for_update(rib_dest_t
*dest
)
913 return dest
->selected_fib
;
917 * Define an enum for return codes for queue processing functions
919 * FPM_WRITE_STOP: This return code indicates that the write buffer is full.
920 * Stop processing all the queues and empty the buffer by writing its content
923 * FPM_GOTO_NEXT_Q: This return code indicates that either this queue is
924 * empty or we have processed enough updates from this queue.
925 * So, move on to the next queue.
932 #define FPM_QUEUE_PROCESS_LIMIT 10000
935 * zfpm_build_route_updates
937 * Process the dest_q queue and write FPM messages to the outbound buffer.
939 static int zfpm_build_route_updates(void)
943 unsigned char *buf
, *data
, *buf_end
;
947 struct route_entry
*re
;
948 int is_add
, write_msg
;
949 fpm_msg_type_e msg_type
;
952 if (TAILQ_EMPTY(&zfpm_g
->dest_q
))
953 return FPM_GOTO_NEXT_Q
;
956 q_limit
= FPM_QUEUE_PROCESS_LIMIT
;
960 * Make sure there is enough space to write another message.
962 if (STREAM_WRITEABLE(s
) < FPM_MAX_MSG_LEN
)
963 return FPM_WRITE_STOP
;
965 buf
= STREAM_DATA(s
) + stream_get_endp(s
);
966 buf_end
= buf
+ STREAM_WRITEABLE(s
);
968 dest
= TAILQ_FIRST(&zfpm_g
->dest_q
);
970 return FPM_GOTO_NEXT_Q
;
972 assert(CHECK_FLAG(dest
->flags
, RIB_DEST_UPDATE_FPM
));
974 hdr
= (fpm_msg_hdr_t
*)buf
;
975 hdr
->version
= FPM_PROTO_VERSION
;
977 data
= fpm_msg_data(hdr
);
979 re
= zfpm_route_for_update(dest
);
985 * If this is a route deletion, and we have not sent the route
987 * the FPM previously, skip it.
989 if (!is_add
&& !CHECK_FLAG(dest
->flags
, RIB_DEST_SENT_TO_FPM
)) {
991 zfpm_g
->stats
.nop_deletes_skipped
++;
995 data_len
= zfpm_encode_route(dest
, re
, (char *)data
,
996 buf_end
- data
, &msg_type
);
1000 hdr
->msg_type
= msg_type
;
1001 msg_len
= fpm_data_len_to_msg_len(data_len
);
1002 hdr
->msg_len
= htons(msg_len
);
1003 stream_forward_endp(s
, msg_len
);
1006 zfpm_g
->stats
.route_adds
++;
1008 zfpm_g
->stats
.route_dels
++;
1013 * Remove the dest from the queue, and reset the flag.
1015 UNSET_FLAG(dest
->flags
, RIB_DEST_UPDATE_FPM
);
1016 TAILQ_REMOVE(&zfpm_g
->dest_q
, dest
, fpm_q_entries
);
1019 SET_FLAG(dest
->flags
, RIB_DEST_SENT_TO_FPM
);
1021 UNSET_FLAG(dest
->flags
, RIB_DEST_SENT_TO_FPM
);
1025 * Delete the destination if necessary.
1027 if (rib_gc_dest(dest
->rnode
))
1028 zfpm_g
->stats
.dests_del_after_update
++;
1033 * We have processed enough updates in this queue.
1034 * Now yield for other queues.
1036 return FPM_GOTO_NEXT_Q
;
1044 * Encode a message to FPM with information about the given MAC.
1046 * Returns the number of bytes written to the buffer.
1048 static inline int zfpm_encode_mac(struct fpm_mac_info_t
*mac
, char *in_buf
,
1049 size_t in_buf_len
, fpm_msg_type_e
*msg_type
)
1053 *msg_type
= FPM_MSG_TYPE_NONE
;
1055 switch (zfpm_g
->message_format
) {
1057 case ZFPM_MSG_FORMAT_NONE
:
1059 case ZFPM_MSG_FORMAT_NETLINK
:
1061 len
= zfpm_netlink_encode_mac(mac
, in_buf
, in_buf_len
);
1062 assert(fpm_msg_align(len
) == len
);
1063 *msg_type
= FPM_MSG_TYPE_NETLINK
;
1064 #endif /* HAVE_NETLINK */
1066 case ZFPM_MSG_FORMAT_PROTOBUF
:
1072 static int zfpm_build_mac_updates(void)
1075 struct fpm_mac_info_t
*mac
;
1076 unsigned char *buf
, *data
, *buf_end
;
1078 size_t data_len
, msg_len
;
1079 fpm_msg_type_e msg_type
;
1082 if (TAILQ_EMPTY(&zfpm_g
->mac_q
))
1083 return FPM_GOTO_NEXT_Q
;
1086 q_limit
= FPM_QUEUE_PROCESS_LIMIT
;
1089 /* Make sure there is enough space to write another message. */
1090 if (STREAM_WRITEABLE(s
) < FPM_MAX_MAC_MSG_LEN
)
1091 return FPM_WRITE_STOP
;
1093 buf
= STREAM_DATA(s
) + stream_get_endp(s
);
1094 buf_end
= buf
+ STREAM_WRITEABLE(s
);
1096 mac
= TAILQ_FIRST(&zfpm_g
->mac_q
);
1098 return FPM_GOTO_NEXT_Q
;
1100 /* Check for no-op */
1101 if (!CHECK_FLAG(mac
->fpm_flags
, ZEBRA_MAC_UPDATE_FPM
)) {
1102 zfpm_g
->stats
.nop_deletes_skipped
++;
1103 zfpm_mac_info_del(mac
);
1107 hdr
= (fpm_msg_hdr_t
*)buf
;
1108 hdr
->version
= FPM_PROTO_VERSION
;
1110 data
= fpm_msg_data(hdr
);
1111 data_len
= zfpm_encode_mac(mac
, (char *)data
, buf_end
- data
,
1115 hdr
->msg_type
= msg_type
;
1116 msg_len
= fpm_data_len_to_msg_len(data_len
);
1117 hdr
->msg_len
= htons(msg_len
);
1118 stream_forward_endp(s
, msg_len
);
1120 /* Remove the MAC from the queue, and delete it. */
1121 zfpm_mac_info_del(mac
);
1126 * We have processed enough updates in this queue.
1127 * Now yield for other queues.
1129 return FPM_GOTO_NEXT_Q
;
1135 * zfpm_build_updates
1137 * Process the outgoing queues and write messages to the outbound
1140 static void zfpm_build_updates(void)
1145 assert(stream_empty(s
));
1149 * Stop processing the queues if zfpm_g->obuf is full
1150 * or we do not have more updates to process
1152 if (zfpm_build_mac_updates() == FPM_WRITE_STOP
)
1154 if (zfpm_build_route_updates() == FPM_WRITE_STOP
)
1156 } while (zfpm_updates_pending());
1162 static int zfpm_write_cb(struct thread
*thread
)
1167 zfpm_g
->stats
.write_cb_calls
++;
1170 * Check if async connect is now done.
1172 if (zfpm_g
->state
== ZFPM_STATE_CONNECTING
) {
1173 zfpm_connect_check();
1177 assert(zfpm_g
->state
== ZFPM_STATE_ESTABLISHED
);
1178 assert(zfpm_g
->sock
>= 0);
1183 int bytes_to_write
, bytes_written
;
1188 * If the stream is empty, try fill it up with data.
1190 if (stream_empty(s
)) {
1191 zfpm_build_updates();
1194 bytes_to_write
= stream_get_endp(s
) - stream_get_getp(s
);
1195 if (!bytes_to_write
)
1199 write(zfpm_g
->sock
, stream_pnt(s
), bytes_to_write
);
1200 zfpm_g
->stats
.write_calls
++;
1203 if (bytes_written
< 0) {
1204 if (ERRNO_IO_RETRY(errno
))
1207 zfpm_connection_down("failed to write to socket");
1211 if (bytes_written
!= bytes_to_write
) {
1216 stream_forward_getp(s
, bytes_written
);
1217 zfpm_g
->stats
.partial_writes
++;
1222 * We've written out the entire contents of the stream.
1226 if (num_writes
>= ZFPM_MAX_WRITES_PER_RUN
) {
1227 zfpm_g
->stats
.max_writes_hit
++;
1231 if (zfpm_thread_should_yield(thread
)) {
1232 zfpm_g
->stats
.t_write_yields
++;
1237 if (zfpm_writes_pending())
1246 static int zfpm_connect_cb(struct thread
*t
)
1249 struct sockaddr_in serv
;
1251 assert(zfpm_g
->state
== ZFPM_STATE_ACTIVE
);
1253 sock
= socket(AF_INET
, SOCK_STREAM
, 0);
1255 zlog_err("Failed to create socket for connect(): %s",
1257 zfpm_g
->stats
.connect_no_sock
++;
1261 set_nonblocking(sock
);
1263 /* Make server socket. */
1264 memset(&serv
, 0, sizeof(serv
));
1265 serv
.sin_family
= AF_INET
;
1266 serv
.sin_port
= htons(zfpm_g
->fpm_port
);
1267 #ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
1268 serv
.sin_len
= sizeof(struct sockaddr_in
);
1269 #endif /* HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */
1270 if (!zfpm_g
->fpm_server
)
1271 serv
.sin_addr
.s_addr
= htonl(INADDR_LOOPBACK
);
1273 serv
.sin_addr
.s_addr
= (zfpm_g
->fpm_server
);
1276 * Connect to the FPM.
1278 zfpm_g
->connect_calls
++;
1279 zfpm_g
->stats
.connect_calls
++;
1280 zfpm_g
->last_connect_call_time
= monotime(NULL
);
1282 ret
= connect(sock
, (struct sockaddr
*)&serv
, sizeof(serv
));
1284 zfpm_g
->sock
= sock
;
1285 zfpm_connection_up("connect succeeded");
1289 if (errno
== EINPROGRESS
) {
1290 zfpm_g
->sock
= sock
;
1293 zfpm_set_state(ZFPM_STATE_CONNECTING
,
1294 "async connect in progress");
1298 zlog_info("can't connect to FPM %d: %s", sock
, safe_strerror(errno
));
1302 * Restart timer for retrying connection.
1304 zfpm_start_connect_timer("connect() failed");
1311 * Move state machine into the given state.
1313 static void zfpm_set_state(enum zfpm_state state
, const char *reason
)
1315 enum zfpm_state cur_state
= zfpm_g
->state
;
1320 if (state
== cur_state
)
1323 zfpm_debug("beginning state transition %s -> %s. Reason: %s",
1324 zfpm_state_to_str(cur_state
), zfpm_state_to_str(state
),
1329 case ZFPM_STATE_IDLE
:
1330 assert(cur_state
== ZFPM_STATE_ESTABLISHED
);
1333 case ZFPM_STATE_ACTIVE
:
1334 assert(cur_state
== ZFPM_STATE_IDLE
1335 || cur_state
== ZFPM_STATE_CONNECTING
);
1336 assert(zfpm_g
->t_connect
);
1339 case ZFPM_STATE_CONNECTING
:
1340 assert(zfpm_g
->sock
);
1341 assert(cur_state
== ZFPM_STATE_ACTIVE
);
1342 assert(zfpm_g
->t_read
);
1343 assert(zfpm_g
->t_write
);
1346 case ZFPM_STATE_ESTABLISHED
:
1347 assert(cur_state
== ZFPM_STATE_ACTIVE
1348 || cur_state
== ZFPM_STATE_CONNECTING
);
1349 assert(zfpm_g
->sock
);
1350 assert(zfpm_g
->t_read
);
1351 assert(zfpm_g
->t_write
);
1355 zfpm_g
->state
= state
;
1359 * zfpm_calc_connect_delay
1361 * Returns the number of seconds after which we should attempt to
1362 * reconnect to the FPM.
1364 static long zfpm_calc_connect_delay(void)
1369 * Return 0 if this is our first attempt to connect.
1371 if (zfpm_g
->connect_calls
== 0) {
1375 elapsed
= zfpm_get_elapsed_time(zfpm_g
->last_connect_call_time
);
1377 if (elapsed
> ZFPM_CONNECT_RETRY_IVL
) {
1381 return ZFPM_CONNECT_RETRY_IVL
- elapsed
;
1385 * zfpm_start_connect_timer
1387 static void zfpm_start_connect_timer(const char *reason
)
1391 assert(!zfpm_g
->t_connect
);
1392 assert(zfpm_g
->sock
< 0);
1394 assert(zfpm_g
->state
== ZFPM_STATE_IDLE
1395 || zfpm_g
->state
== ZFPM_STATE_ACTIVE
1396 || zfpm_g
->state
== ZFPM_STATE_CONNECTING
);
1398 delay_secs
= zfpm_calc_connect_delay();
1399 zfpm_debug("scheduling connect in %ld seconds", delay_secs
);
1401 thread_add_timer(zfpm_g
->master
, zfpm_connect_cb
, 0, delay_secs
,
1402 &zfpm_g
->t_connect
);
1403 zfpm_set_state(ZFPM_STATE_ACTIVE
, reason
);
1409 * Returns true if the zebra FPM module has been enabled.
1411 static inline int zfpm_is_enabled(void)
1413 return zfpm_g
->enabled
;
1419 * Returns true if the connection to the FPM is up.
1421 static inline int zfpm_conn_is_up(void)
1423 if (zfpm_g
->state
!= ZFPM_STATE_ESTABLISHED
)
1426 assert(zfpm_g
->sock
>= 0);
1432 * zfpm_trigger_update
1434 * The zebra code invokes this function to indicate that we should
1435 * send an update to the FPM about the given route_node.
1437 static int zfpm_trigger_update(struct route_node
*rn
, const char *reason
)
1442 * Ignore if the connection is down. We will update the FPM about
1443 * all destinations once the connection comes up.
1445 if (!zfpm_conn_is_up())
1448 dest
= rib_dest_from_rnode(rn
);
1450 if (CHECK_FLAG(dest
->flags
, RIB_DEST_UPDATE_FPM
)) {
1451 zfpm_g
->stats
.redundant_triggers
++;
1456 zfpm_debug("%pFX triggering update to FPM - Reason: %s", &rn
->p
,
1460 SET_FLAG(dest
->flags
, RIB_DEST_UPDATE_FPM
);
1461 TAILQ_INSERT_TAIL(&zfpm_g
->dest_q
, dest
, fpm_q_entries
);
1462 zfpm_g
->stats
.updates_triggered
++;
1465 * Make sure that writes are enabled.
1467 if (zfpm_g
->t_write
)
1475 * Generate Key for FPM MAC info hash entry
1477 static unsigned int zfpm_mac_info_hash_keymake(const void *p
)
1479 struct fpm_mac_info_t
*fpm_mac
= (struct fpm_mac_info_t
*)p
;
1482 mac_key
= jhash(fpm_mac
->macaddr
.octet
, ETH_ALEN
, 0xa5a5a55a);
1484 return jhash_2words(mac_key
, fpm_mac
->vni
, 0);
1488 * Compare function for FPM MAC info hash lookup
1490 static bool zfpm_mac_info_cmp(const void *p1
, const void *p2
)
1492 const struct fpm_mac_info_t
*fpm_mac1
= p1
;
1493 const struct fpm_mac_info_t
*fpm_mac2
= p2
;
1495 if (memcmp(fpm_mac1
->macaddr
.octet
, fpm_mac2
->macaddr
.octet
, ETH_ALEN
)
1498 if (fpm_mac1
->vni
!= fpm_mac2
->vni
)
1505 * Lookup FPM MAC info hash entry.
1507 static struct fpm_mac_info_t
*zfpm_mac_info_lookup(struct fpm_mac_info_t
*key
)
1509 return hash_lookup(zfpm_g
->fpm_mac_info_table
, key
);
1513 * Callback to allocate fpm_mac_info_t structure.
1515 static void *zfpm_mac_info_alloc(void *p
)
1517 const struct fpm_mac_info_t
*key
= p
;
1518 struct fpm_mac_info_t
*fpm_mac
;
1520 fpm_mac
= XCALLOC(MTYPE_FPM_MAC_INFO
, sizeof(struct fpm_mac_info_t
));
1522 memcpy(&fpm_mac
->macaddr
, &key
->macaddr
, ETH_ALEN
);
1523 fpm_mac
->vni
= key
->vni
;
1525 return (void *)fpm_mac
;
1529 * Delink and free fpm_mac_info_t.
1531 static void zfpm_mac_info_del(struct fpm_mac_info_t
*fpm_mac
)
1533 hash_release(zfpm_g
->fpm_mac_info_table
, fpm_mac
);
1534 TAILQ_REMOVE(&zfpm_g
->mac_q
, fpm_mac
, fpm_mac_q_entries
);
1535 XFREE(MTYPE_FPM_MAC_INFO
, fpm_mac
);
1539 * zfpm_trigger_rmac_update
1541 * Zebra code invokes this function to indicate that we should
1542 * send an update to FPM for given MAC entry.
1544 * This function checks if we already have enqueued an update for this RMAC,
1545 * If yes, update the same fpm_mac_info_t. Else, create and enqueue an update.
1547 static int zfpm_trigger_rmac_update(zebra_mac_t
*rmac
, zebra_l3vni_t
*zl3vni
,
1548 bool delete, const char *reason
)
1550 char buf
[ETHER_ADDR_STRLEN
];
1551 struct fpm_mac_info_t
*fpm_mac
, key
;
1552 struct interface
*vxlan_if
, *svi_if
;
1553 bool mac_found
= false;
1556 * Ignore if the connection is down. We will update the FPM about
1557 * all destinations once the connection comes up.
1559 if (!zfpm_conn_is_up())
1563 zfpm_debug("triggering update to FPM - Reason: %s - %s",
1565 prefix_mac2str(&rmac
->macaddr
, buf
, sizeof(buf
)));
1568 vxlan_if
= zl3vni_map_to_vxlan_if(zl3vni
);
1569 svi_if
= zl3vni_map_to_svi_if(zl3vni
);
1571 memset(&key
, 0, sizeof(struct fpm_mac_info_t
));
1573 memcpy(&key
.macaddr
, &rmac
->macaddr
, ETH_ALEN
);
1574 key
.vni
= zl3vni
->vni
;
1576 /* Check if this MAC is already present in the queue. */
1577 fpm_mac
= zfpm_mac_info_lookup(&key
);
1583 * If the enqueued op is "add" and current op is "delete",
1584 * this is a noop. So, Unset ZEBRA_MAC_UPDATE_FPM flag.
1585 * While processing FPM queue, we will silently delete this
1586 * MAC entry without sending any update for this MAC.
1588 if (!CHECK_FLAG(fpm_mac
->fpm_flags
, ZEBRA_MAC_DELETE_FPM
) &&
1590 SET_FLAG(fpm_mac
->fpm_flags
, ZEBRA_MAC_DELETE_FPM
);
1591 UNSET_FLAG(fpm_mac
->fpm_flags
, ZEBRA_MAC_UPDATE_FPM
);
1595 fpm_mac
= hash_get(zfpm_g
->fpm_mac_info_table
, &key
,
1596 zfpm_mac_info_alloc
);
1601 fpm_mac
->r_vtep_ip
.s_addr
= rmac
->fwd_info
.r_vtep_ip
.s_addr
;
1602 fpm_mac
->zebra_flags
= rmac
->flags
;
1603 fpm_mac
->vxlan_if
= vxlan_if
? vxlan_if
->ifindex
: 0;
1604 fpm_mac
->svi_if
= svi_if
? svi_if
->ifindex
: 0;
1606 SET_FLAG(fpm_mac
->fpm_flags
, ZEBRA_MAC_UPDATE_FPM
);
1608 SET_FLAG(fpm_mac
->fpm_flags
, ZEBRA_MAC_DELETE_FPM
);
1610 UNSET_FLAG(fpm_mac
->fpm_flags
, ZEBRA_MAC_DELETE_FPM
);
1613 TAILQ_INSERT_TAIL(&zfpm_g
->mac_q
, fpm_mac
, fpm_mac_q_entries
);
1615 zfpm_g
->stats
.updates_triggered
++;
1617 /* If writes are already enabled, return. */
1618 if (zfpm_g
->t_write
)
1626 * This function is called when the FPM connections is established.
1627 * Iterate over all the RMAC entries for the given L3VNI
1628 * and enqueue the RMAC for FPM processing.
1630 static void zfpm_trigger_rmac_update_wrapper(struct hash_bucket
*backet
,
1633 zebra_mac_t
*zrmac
= (zebra_mac_t
*)backet
->data
;
1634 zebra_l3vni_t
*zl3vni
= (zebra_l3vni_t
*)args
;
1636 zfpm_trigger_rmac_update(zrmac
, zl3vni
, false, "RMAC added");
1640 * This function is called when the FPM connections is established.
1641 * This function iterates over all the L3VNIs to trigger
1642 * FPM updates for RMACs currently available.
1644 static void zfpm_iterate_rmac_table(struct hash_bucket
*backet
, void *args
)
1646 zebra_l3vni_t
*zl3vni
= (zebra_l3vni_t
*)backet
->data
;
1648 hash_iterate(zl3vni
->rmac_table
, zfpm_trigger_rmac_update_wrapper
,
1653 * struct zfpm_statsimer_cb
1655 static int zfpm_stats_timer_cb(struct thread
*t
)
1657 zfpm_g
->t_stats
= NULL
;
1660 * Remember the stats collected in the last interval for display
1663 zfpm_stats_copy(&zfpm_g
->stats
, &zfpm_g
->last_ivl_stats
);
1666 * Add the current set of stats into the cumulative statistics.
1668 zfpm_stats_compose(&zfpm_g
->cumulative_stats
, &zfpm_g
->stats
,
1669 &zfpm_g
->cumulative_stats
);
1672 * Start collecting stats afresh over the next interval.
1674 zfpm_stats_reset(&zfpm_g
->stats
);
1676 zfpm_start_stats_timer();
1682 * zfpm_stop_stats_timer
1684 static void zfpm_stop_stats_timer(void)
1686 if (!zfpm_g
->t_stats
)
1689 zfpm_debug("Stopping existing stats timer");
1690 THREAD_TIMER_OFF(zfpm_g
->t_stats
);
1694 * zfpm_start_stats_timer
1696 void zfpm_start_stats_timer(void)
1698 assert(!zfpm_g
->t_stats
);
1700 thread_add_timer(zfpm_g
->master
, zfpm_stats_timer_cb
, 0,
1701 ZFPM_STATS_IVL_SECS
, &zfpm_g
->t_stats
);
1705 * Helper macro for zfpm_show_stats() below.
1707 #define ZFPM_SHOW_STAT(counter) \
1709 vty_out(vty, "%-40s %10lu %16lu\n", #counter, \
1710 total_stats.counter, zfpm_g->last_ivl_stats.counter); \
1716 static void zfpm_show_stats(struct vty
*vty
)
1718 struct zfpm_stats total_stats
;
1721 vty_out(vty
, "\n%-40s %10s Last %2d secs\n\n", "Counter", "Total",
1722 ZFPM_STATS_IVL_SECS
);
1725 * Compute the total stats up to this instant.
1727 zfpm_stats_compose(&zfpm_g
->cumulative_stats
, &zfpm_g
->stats
,
1730 ZFPM_SHOW_STAT(connect_calls
);
1731 ZFPM_SHOW_STAT(connect_no_sock
);
1732 ZFPM_SHOW_STAT(read_cb_calls
);
1733 ZFPM_SHOW_STAT(write_cb_calls
);
1734 ZFPM_SHOW_STAT(write_calls
);
1735 ZFPM_SHOW_STAT(partial_writes
);
1736 ZFPM_SHOW_STAT(max_writes_hit
);
1737 ZFPM_SHOW_STAT(t_write_yields
);
1738 ZFPM_SHOW_STAT(nop_deletes_skipped
);
1739 ZFPM_SHOW_STAT(route_adds
);
1740 ZFPM_SHOW_STAT(route_dels
);
1741 ZFPM_SHOW_STAT(updates_triggered
);
1742 ZFPM_SHOW_STAT(redundant_triggers
);
1743 ZFPM_SHOW_STAT(dests_del_after_update
);
1744 ZFPM_SHOW_STAT(t_conn_down_starts
);
1745 ZFPM_SHOW_STAT(t_conn_down_dests_processed
);
1746 ZFPM_SHOW_STAT(t_conn_down_yields
);
1747 ZFPM_SHOW_STAT(t_conn_down_finishes
);
1748 ZFPM_SHOW_STAT(t_conn_up_starts
);
1749 ZFPM_SHOW_STAT(t_conn_up_dests_processed
);
1750 ZFPM_SHOW_STAT(t_conn_up_yields
);
1751 ZFPM_SHOW_STAT(t_conn_up_aborts
);
1752 ZFPM_SHOW_STAT(t_conn_up_finishes
);
1754 if (!zfpm_g
->last_stats_clear_time
)
1757 elapsed
= zfpm_get_elapsed_time(zfpm_g
->last_stats_clear_time
);
1759 vty_out(vty
, "\nStats were cleared %lu seconds ago\n",
1760 (unsigned long)elapsed
);
1766 static void zfpm_clear_stats(struct vty
*vty
)
1768 if (!zfpm_is_enabled()) {
1769 vty_out(vty
, "The FPM module is not enabled...\n");
1773 zfpm_stats_reset(&zfpm_g
->stats
);
1774 zfpm_stats_reset(&zfpm_g
->last_ivl_stats
);
1775 zfpm_stats_reset(&zfpm_g
->cumulative_stats
);
1777 zfpm_stop_stats_timer();
1778 zfpm_start_stats_timer();
1780 zfpm_g
->last_stats_clear_time
= monotime(NULL
);
1782 vty_out(vty
, "Cleared FPM stats\n");
1786 * show_zebra_fpm_stats
1788 DEFUN (show_zebra_fpm_stats
,
1789 show_zebra_fpm_stats_cmd
,
1790 "show zebra fpm stats",
1793 "Forwarding Path Manager information\n"
1796 zfpm_show_stats(vty
);
1801 * clear_zebra_fpm_stats
1803 DEFUN (clear_zebra_fpm_stats
,
1804 clear_zebra_fpm_stats_cmd
,
1805 "clear zebra fpm stats",
1808 "Clear Forwarding Path Manager information\n"
1811 zfpm_clear_stats(vty
);
1816 * update fpm connection information
1818 DEFUN ( fpm_remote_ip
,
1820 "fpm connection ip A.B.C.D port (1-65535)",
1821 "fpm connection remote ip and port\n"
1822 "Remote fpm server ip A.B.C.D\n"
1826 in_addr_t fpm_server
;
1829 fpm_server
= inet_addr(argv
[3]->arg
);
1830 if (fpm_server
== INADDR_NONE
)
1831 return CMD_ERR_INCOMPLETE
;
1833 port_no
= atoi(argv
[5]->arg
);
1834 if (port_no
< TCP_MIN_PORT
|| port_no
> TCP_MAX_PORT
)
1835 return CMD_ERR_INCOMPLETE
;
1837 zfpm_g
->fpm_server
= fpm_server
;
1838 zfpm_g
->fpm_port
= port_no
;
1844 DEFUN ( no_fpm_remote_ip
,
1845 no_fpm_remote_ip_cmd
,
1846 "no fpm connection ip A.B.C.D port (1-65535)",
1847 "fpm connection remote ip and port\n"
1849 "Remote fpm server ip A.B.C.D\n"
1852 if (zfpm_g
->fpm_server
!= inet_addr(argv
[4]->arg
)
1853 || zfpm_g
->fpm_port
!= atoi(argv
[6]->arg
))
1854 return CMD_ERR_NO_MATCH
;
1856 zfpm_g
->fpm_server
= FPM_DEFAULT_IP
;
1857 zfpm_g
->fpm_port
= FPM_DEFAULT_PORT
;
1863 * zfpm_init_message_format
1865 static inline void zfpm_init_message_format(const char *format
)
1867 int have_netlink
, have_protobuf
;
1875 #ifdef HAVE_PROTOBUF
1881 zfpm_g
->message_format
= ZFPM_MSG_FORMAT_NONE
;
1885 zfpm_g
->message_format
= ZFPM_MSG_FORMAT_NETLINK
;
1886 } else if (have_protobuf
) {
1887 zfpm_g
->message_format
= ZFPM_MSG_FORMAT_PROTOBUF
;
1892 if (!strcmp("netlink", format
)) {
1893 if (!have_netlink
) {
1894 flog_err(EC_ZEBRA_NETLINK_NOT_AVAILABLE
,
1895 "FPM netlink message format is not available");
1898 zfpm_g
->message_format
= ZFPM_MSG_FORMAT_NETLINK
;
1902 if (!strcmp("protobuf", format
)) {
1903 if (!have_protobuf
) {
1905 EC_ZEBRA_PROTOBUF_NOT_AVAILABLE
,
1906 "FPM protobuf message format is not available");
1909 flog_warn(EC_ZEBRA_PROTOBUF_NOT_AVAILABLE
,
1910 "FPM protobuf message format is deprecated and scheduled to be removed. Please convert to using netlink format or contact dev@lists.frrouting.org with your use case.");
1911 zfpm_g
->message_format
= ZFPM_MSG_FORMAT_PROTOBUF
;
1915 flog_warn(EC_ZEBRA_FPM_FORMAT_UNKNOWN
, "Unknown fpm format '%s'",
1920 * fpm_remote_srv_write
1922 * Module to write remote fpm connection
1924 * Returns ZERO on success.
1927 static int fpm_remote_srv_write(struct vty
*vty
)
1931 in
.s_addr
= zfpm_g
->fpm_server
;
1933 if ((zfpm_g
->fpm_server
!= FPM_DEFAULT_IP
1934 && zfpm_g
->fpm_server
!= INADDR_ANY
)
1935 || (zfpm_g
->fpm_port
!= FPM_DEFAULT_PORT
&& zfpm_g
->fpm_port
!= 0))
1936 vty_out(vty
, "fpm connection ip %s port %d\n", inet_ntoa(in
),
1943 static int fpm_remote_srv_write(struct vty
*vty
);
1945 static struct cmd_node zebra_node
= {
1948 .parent_node
= CONFIG_NODE
,
1950 .config_write
= fpm_remote_srv_write
,
1957 * One-time initialization of the Zebra FPM module.
1959 * @param[in] port port at which FPM is running.
1960 * @param[in] enable true if the zebra FPM module should be enabled
1961 * @param[in] format to use to talk to the FPM. Can be 'netink' or 'protobuf'.
1963 * Returns true on success.
1965 static int zfpm_init(struct thread_master
*master
)
1969 const char *format
= THIS_MODULE
->load_args
;
1971 memset(zfpm_g
, 0, sizeof(*zfpm_g
));
1972 zfpm_g
->master
= master
;
1973 TAILQ_INIT(&zfpm_g
->dest_q
);
1974 TAILQ_INIT(&zfpm_g
->mac_q
);
1976 /* Create hash table for fpm_mac_info_t enties */
1977 zfpm_g
->fpm_mac_info_table
= hash_create(zfpm_mac_info_hash_keymake
,
1979 "FPM MAC info hash table");
1982 zfpm_g
->state
= ZFPM_STATE_IDLE
;
1984 zfpm_stats_init(&zfpm_g
->stats
);
1985 zfpm_stats_init(&zfpm_g
->last_ivl_stats
);
1986 zfpm_stats_init(&zfpm_g
->cumulative_stats
);
1988 install_node(&zebra_node
);
1989 install_element(ENABLE_NODE
, &show_zebra_fpm_stats_cmd
);
1990 install_element(ENABLE_NODE
, &clear_zebra_fpm_stats_cmd
);
1991 install_element(CONFIG_NODE
, &fpm_remote_ip_cmd
);
1992 install_element(CONFIG_NODE
, &no_fpm_remote_ip_cmd
);
1994 zfpm_init_message_format(format
);
1997 * Disable FPM interface if no suitable format is available.
1999 if (zfpm_g
->message_format
== ZFPM_MSG_FORMAT_NONE
)
2002 zfpm_g
->enabled
= enable
;
2004 if (!zfpm_g
->fpm_server
)
2005 zfpm_g
->fpm_server
= FPM_DEFAULT_IP
;
2008 port
= FPM_DEFAULT_PORT
;
2010 zfpm_g
->fpm_port
= port
;
2012 zfpm_g
->obuf
= stream_new(ZFPM_OBUF_SIZE
);
2013 zfpm_g
->ibuf
= stream_new(ZFPM_IBUF_SIZE
);
2015 zfpm_start_stats_timer();
2016 zfpm_start_connect_timer("initialized");
2020 static int zfpm_fini(void)
2026 zfpm_stop_stats_timer();
2028 hook_unregister(rib_update
, zfpm_trigger_update
);
2032 static int zebra_fpm_module_init(void)
2034 hook_register(rib_update
, zfpm_trigger_update
);
2035 hook_register(zebra_rmac_update
, zfpm_trigger_rmac_update
);
2036 hook_register(frr_late_init
, zfpm_init
);
2037 hook_register(frr_early_fini
, zfpm_fini
);
2041 FRR_MODULE_SETUP(.name
= "zebra_fpm", .version
= FRR_VERSION
,
2042 .description
= "zebra FPM (Forwarding Plane Manager) module",
2043 .init
= zebra_fpm_module_init
, )