]> git.proxmox.com Git - mirror_frr.git/blame - zebra/zebra_fpm.c
*: snmp: convert into modules
[mirror_frr.git] / zebra / zebra_fpm.c
CommitLineData
5adc2528
AS
1/*
2 * Main implementation file for interface to Forwarding Plane Manager.
3 *
4 * Copyright (C) 2012 by Open Source Routing.
5 * Copyright (C) 2012 by Internet Systems Consortium, Inc. ("ISC")
6 *
7 * This file is part of GNU Zebra.
8 *
9 * GNU Zebra is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
12 * later version.
13 *
14 * GNU Zebra is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with GNU Zebra; see the file COPYING. If not, write to the Free
21 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
22 * 02111-1307, USA.
23 */
24
25#include <zebra.h>
26
27#include "log.h"
28#include "stream.h"
29#include "thread.h"
30#include "network.h"
31#include "command.h"
32
33#include "zebra/rib.h"
7c551956
DS
34#include "zebra/zserv.h"
35#include "zebra/zebra_ns.h"
36#include "zebra/zebra_vrf.h"
5adc2528
AS
37
38#include "fpm/fpm.h"
39#include "zebra_fpm.h"
40#include "zebra_fpm_private.h"
41
42/*
43 * Interval at which we attempt to connect to the FPM.
44 */
45#define ZFPM_CONNECT_RETRY_IVL 5
46
47/*
48 * Sizes of outgoing and incoming stream buffers for writing/reading
49 * FPM messages.
50 */
51#define ZFPM_OBUF_SIZE (2 * FPM_MAX_MSG_LEN)
52#define ZFPM_IBUF_SIZE (FPM_MAX_MSG_LEN)
53
54/*
55 * The maximum number of times the FPM socket write callback can call
56 * 'write' before it yields.
57 */
58#define ZFPM_MAX_WRITES_PER_RUN 10
59
60/*
61 * Interval over which we collect statistics.
62 */
63#define ZFPM_STATS_IVL_SECS 10
64
65/*
66 * Structure that holds state for iterating over all route_node
67 * structures that are candidates for being communicated to the FPM.
68 */
69typedef struct zfpm_rnodes_iter_t_
70{
71 rib_tables_iter_t tables_iter;
72 route_table_iter_t iter;
73} zfpm_rnodes_iter_t;
74
75/*
76 * Statistics.
77 */
78typedef struct zfpm_stats_t_ {
79 unsigned long connect_calls;
80 unsigned long connect_no_sock;
81
82 unsigned long read_cb_calls;
83
84 unsigned long write_cb_calls;
85 unsigned long write_calls;
86 unsigned long partial_writes;
87 unsigned long max_writes_hit;
88 unsigned long t_write_yields;
89
90 unsigned long nop_deletes_skipped;
91 unsigned long route_adds;
92 unsigned long route_dels;
93
94 unsigned long updates_triggered;
95 unsigned long redundant_triggers;
96 unsigned long non_fpm_table_triggers;
97
98 unsigned long dests_del_after_update;
99
100 unsigned long t_conn_down_starts;
101 unsigned long t_conn_down_dests_processed;
102 unsigned long t_conn_down_yields;
103 unsigned long t_conn_down_finishes;
104
105 unsigned long t_conn_up_starts;
106 unsigned long t_conn_up_dests_processed;
107 unsigned long t_conn_up_yields;
108 unsigned long t_conn_up_aborts;
109 unsigned long t_conn_up_finishes;
110
111} zfpm_stats_t;
112
113/*
114 * States for the FPM state machine.
115 */
116typedef enum {
117
118 /*
119 * In this state we are not yet ready to connect to the FPM. This
120 * can happen when this module is disabled, or if we're cleaning up
121 * after a connection has gone down.
122 */
123 ZFPM_STATE_IDLE,
124
125 /*
126 * Ready to talk to the FPM and periodically trying to connect to
127 * it.
128 */
129 ZFPM_STATE_ACTIVE,
130
131 /*
132 * In the middle of bringing up a TCP connection. Specifically,
133 * waiting for a connect() call to complete asynchronously.
134 */
135 ZFPM_STATE_CONNECTING,
136
137 /*
138 * TCP connection to the FPM is up.
139 */
140 ZFPM_STATE_ESTABLISHED
141
142} zfpm_state_t;
143
fb0aa886
AS
144/*
145 * Message format to be used to communicate with the FPM.
146 */
147typedef enum
148{
149 ZFPM_MSG_FORMAT_NONE,
150 ZFPM_MSG_FORMAT_NETLINK,
151 ZFPM_MSG_FORMAT_PROTOBUF,
152} zfpm_msg_format_e;
5adc2528
AS
153/*
154 * Globals.
155 */
156typedef struct zfpm_glob_t_
157{
158
159 /*
160 * True if the FPM module has been enabled.
161 */
162 int enabled;
163
fb0aa886
AS
164 /*
165 * Message format to be used to communicate with the fpm.
166 */
167 zfpm_msg_format_e message_format;
168
5adc2528
AS
169 struct thread_master *master;
170
171 zfpm_state_t state;
172
711ff0ba 173 in_addr_t fpm_server;
5adc2528
AS
174 /*
175 * Port on which the FPM is running.
176 */
177 int fpm_port;
178
179 /*
180 * List of rib_dest_t structures to be processed
181 */
182 TAILQ_HEAD (zfpm_dest_q, rib_dest_t_) dest_q;
183
184 /*
185 * Stream socket to the FPM.
186 */
187 int sock;
188
189 /*
190 * Buffers for messages to/from the FPM.
191 */
192 struct stream *obuf;
193 struct stream *ibuf;
194
195 /*
196 * Threads for I/O.
197 */
198 struct thread *t_connect;
199 struct thread *t_write;
200 struct thread *t_read;
201
202 /*
203 * Thread to clean up after the TCP connection to the FPM goes down
204 * and the state that belongs to it.
205 */
206 struct thread *t_conn_down;
207
208 struct {
209 zfpm_rnodes_iter_t iter;
210 } t_conn_down_state;
211
212 /*
213 * Thread to take actions once the TCP conn to the FPM comes up, and
214 * the state that belongs to it.
215 */
216 struct thread *t_conn_up;
217
218 struct {
219 zfpm_rnodes_iter_t iter;
220 } t_conn_up_state;
221
222 unsigned long connect_calls;
223 time_t last_connect_call_time;
224
225 /*
226 * Stats from the start of the current statistics interval up to
227 * now. These are the counters we typically update in the code.
228 */
229 zfpm_stats_t stats;
230
231 /*
232 * Statistics that were gathered in the last collection interval.
233 */
234 zfpm_stats_t last_ivl_stats;
235
236 /*
237 * Cumulative stats from the last clear to the start of the current
238 * statistics interval.
239 */
240 zfpm_stats_t cumulative_stats;
241
242 /*
243 * Stats interval timer.
244 */
245 struct thread *t_stats;
246
247 /*
248 * If non-zero, the last time when statistics were cleared.
249 */
250 time_t last_stats_clear_time;
251
252} zfpm_glob_t;
253
254static zfpm_glob_t zfpm_glob_space;
255static zfpm_glob_t *zfpm_g = &zfpm_glob_space;
256
257static int zfpm_read_cb (struct thread *thread);
258static int zfpm_write_cb (struct thread *thread);
259
260static void zfpm_set_state (zfpm_state_t state, const char *reason);
261static void zfpm_start_connect_timer (const char *reason);
262static void zfpm_start_stats_timer (void);
263
264/*
265 * zfpm_thread_should_yield
266 */
267static inline int
268zfpm_thread_should_yield (struct thread *t)
269{
270 return thread_should_yield (t);
271}
272
273/*
274 * zfpm_state_to_str
275 */
276static const char *
277zfpm_state_to_str (zfpm_state_t state)
278{
279 switch (state)
280 {
281
282 case ZFPM_STATE_IDLE:
283 return "idle";
284
285 case ZFPM_STATE_ACTIVE:
286 return "active";
287
288 case ZFPM_STATE_CONNECTING:
289 return "connecting";
290
291 case ZFPM_STATE_ESTABLISHED:
292 return "established";
293
294 default:
295 return "unknown";
296 }
297}
298
5adc2528
AS
299/*
300 * zfpm_get_elapsed_time
301 *
302 * Returns the time elapsed (in seconds) since the given time.
303 */
304static time_t
305zfpm_get_elapsed_time (time_t reference)
306{
307 time_t now;
308
cf672a86 309 now = monotime(NULL);
5adc2528
AS
310
311 if (now < reference)
312 {
313 assert (0);
314 return 0;
315 }
316
317 return now - reference;
318}
319
320/*
321 * zfpm_is_table_for_fpm
322 *
323 * Returns TRUE if the the given table is to be communicated to the
324 * FPM.
325 */
326static inline int
327zfpm_is_table_for_fpm (struct route_table *table)
328{
329 rib_table_info_t *info;
330
331 info = rib_table_info (table);
332
333 /*
334 * We only send the unicast tables in the main instance to the FPM
335 * at this point.
336 */
661512bf 337 if (zvrf_id (info->zvrf) != 0)
5adc2528
AS
338 return 0;
339
340 if (info->safi != SAFI_UNICAST)
341 return 0;
342
343 return 1;
344}
345
346/*
347 * zfpm_rnodes_iter_init
348 */
349static inline void
350zfpm_rnodes_iter_init (zfpm_rnodes_iter_t *iter)
351{
352 memset (iter, 0, sizeof (*iter));
353 rib_tables_iter_init (&iter->tables_iter);
354
355 /*
356 * This is a hack, but it makes implementing 'next' easier by
357 * ensuring that route_table_iter_next() will return NULL the first
358 * time we call it.
359 */
360 route_table_iter_init (&iter->iter, NULL);
361 route_table_iter_cleanup (&iter->iter);
362}
363
364/*
365 * zfpm_rnodes_iter_next
366 */
367static inline struct route_node *
368zfpm_rnodes_iter_next (zfpm_rnodes_iter_t *iter)
369{
370 struct route_node *rn;
371 struct route_table *table;
372
373 while (1)
374 {
375 rn = route_table_iter_next (&iter->iter);
376 if (rn)
377 return rn;
378
379 /*
380 * We've made our way through this table, go to the next one.
381 */
382 route_table_iter_cleanup (&iter->iter);
383
384 while ((table = rib_tables_iter_next (&iter->tables_iter)))
385 {
386 if (zfpm_is_table_for_fpm (table))
387 break;
388 }
389
390 if (!table)
391 return NULL;
392
393 route_table_iter_init (&iter->iter, table);
394 }
395
396 return NULL;
397}
398
399/*
400 * zfpm_rnodes_iter_pause
401 */
402static inline void
403zfpm_rnodes_iter_pause (zfpm_rnodes_iter_t *iter)
404{
405 route_table_iter_pause (&iter->iter);
406}
407
408/*
409 * zfpm_rnodes_iter_cleanup
410 */
411static inline void
412zfpm_rnodes_iter_cleanup (zfpm_rnodes_iter_t *iter)
413{
414 route_table_iter_cleanup (&iter->iter);
415 rib_tables_iter_cleanup (&iter->tables_iter);
416}
417
418/*
419 * zfpm_stats_init
420 *
421 * Initialize a statistics block.
422 */
423static inline void
424zfpm_stats_init (zfpm_stats_t *stats)
425{
426 memset (stats, 0, sizeof (*stats));
427}
428
429/*
430 * zfpm_stats_reset
431 */
432static inline void
433zfpm_stats_reset (zfpm_stats_t *stats)
434{
435 zfpm_stats_init (stats);
436}
437
438/*
439 * zfpm_stats_copy
440 */
441static inline void
442zfpm_stats_copy (const zfpm_stats_t *src, zfpm_stats_t *dest)
443{
444 memcpy (dest, src, sizeof (*dest));
445}
446
447/*
448 * zfpm_stats_compose
449 *
450 * Total up the statistics in two stats structures ('s1 and 's2') and
451 * return the result in the third argument, 'result'. Note that the
452 * pointer 'result' may be the same as 's1' or 's2'.
453 *
454 * For simplicity, the implementation below assumes that the stats
455 * structure is composed entirely of counters. This can easily be
456 * changed when necessary.
457 */
458static void
459zfpm_stats_compose (const zfpm_stats_t *s1, const zfpm_stats_t *s2,
460 zfpm_stats_t *result)
461{
462 const unsigned long *p1, *p2;
463 unsigned long *result_p;
464 int i, num_counters;
465
466 p1 = (const unsigned long *) s1;
467 p2 = (const unsigned long *) s2;
468 result_p = (unsigned long *) result;
469
470 num_counters = (sizeof (zfpm_stats_t) / sizeof (unsigned long));
471
472 for (i = 0; i < num_counters; i++)
473 {
474 result_p[i] = p1[i] + p2[i];
475 }
476}
477
478/*
479 * zfpm_read_on
480 */
481static inline void
482zfpm_read_on (void)
483{
484 assert (!zfpm_g->t_read);
485 assert (zfpm_g->sock >= 0);
486
487 THREAD_READ_ON (zfpm_g->master, zfpm_g->t_read, zfpm_read_cb, 0,
488 zfpm_g->sock);
489}
490
491/*
492 * zfpm_write_on
493 */
494static inline void
495zfpm_write_on (void)
496{
497 assert (!zfpm_g->t_write);
498 assert (zfpm_g->sock >= 0);
499
500 THREAD_WRITE_ON (zfpm_g->master, zfpm_g->t_write, zfpm_write_cb, 0,
501 zfpm_g->sock);
502}
503
504/*
505 * zfpm_read_off
506 */
507static inline void
508zfpm_read_off (void)
509{
510 THREAD_READ_OFF (zfpm_g->t_read);
511}
512
513/*
514 * zfpm_write_off
515 */
516static inline void
517zfpm_write_off (void)
518{
519 THREAD_WRITE_OFF (zfpm_g->t_write);
520}
521
522/*
523 * zfpm_conn_up_thread_cb
524 *
525 * Callback for actions to be taken when the connection to the FPM
526 * comes up.
527 */
528static int
529zfpm_conn_up_thread_cb (struct thread *thread)
530{
531 struct route_node *rnode;
532 zfpm_rnodes_iter_t *iter;
533 rib_dest_t *dest;
534
535 assert (zfpm_g->t_conn_up);
536 zfpm_g->t_conn_up = NULL;
537
538 iter = &zfpm_g->t_conn_up_state.iter;
539
540 if (zfpm_g->state != ZFPM_STATE_ESTABLISHED)
541 {
542 zfpm_debug ("Connection not up anymore, conn_up thread aborting");
543 zfpm_g->stats.t_conn_up_aborts++;
544 goto done;
545 }
546
547 while ((rnode = zfpm_rnodes_iter_next (iter)))
548 {
549 dest = rib_dest_from_rnode (rnode);
550
551 if (dest)
552 {
553 zfpm_g->stats.t_conn_up_dests_processed++;
554 zfpm_trigger_update (rnode, NULL);
555 }
556
557 /*
558 * Yield if need be.
559 */
560 if (!zfpm_thread_should_yield (thread))
561 continue;
562
563 zfpm_g->stats.t_conn_up_yields++;
564 zfpm_rnodes_iter_pause (iter);
565 zfpm_g->t_conn_up = thread_add_background (zfpm_g->master,
566 zfpm_conn_up_thread_cb,
567 0, 0);
568 return 0;
569 }
570
571 zfpm_g->stats.t_conn_up_finishes++;
572
573 done:
574 zfpm_rnodes_iter_cleanup (iter);
575 return 0;
576}
577
578/*
579 * zfpm_connection_up
580 *
581 * Called when the connection to the FPM comes up.
582 */
583static void
584zfpm_connection_up (const char *detail)
585{
586 assert (zfpm_g->sock >= 0);
587 zfpm_read_on ();
588 zfpm_write_on ();
589 zfpm_set_state (ZFPM_STATE_ESTABLISHED, detail);
590
591 /*
592 * Start thread to push existing routes to the FPM.
593 */
594 assert (!zfpm_g->t_conn_up);
595
596 zfpm_rnodes_iter_init (&zfpm_g->t_conn_up_state.iter);
597
598 zfpm_debug ("Starting conn_up thread");
599 zfpm_g->t_conn_up = thread_add_background (zfpm_g->master,
600 zfpm_conn_up_thread_cb, 0, 0);
601 zfpm_g->stats.t_conn_up_starts++;
602}
603
604/*
605 * zfpm_connect_check
606 *
607 * Check if an asynchronous connect() to the FPM is complete.
608 */
609static void
35dece84 610zfpm_connect_check (void)
5adc2528
AS
611{
612 int status;
613 socklen_t slen;
614 int ret;
615
616 zfpm_read_off ();
617 zfpm_write_off ();
618
619 slen = sizeof (status);
620 ret = getsockopt (zfpm_g->sock, SOL_SOCKET, SO_ERROR, (void *) &status,
621 &slen);
622
623 if (ret >= 0 && status == 0)
624 {
625 zfpm_connection_up ("async connect complete");
626 return;
627 }
628
629 /*
630 * getsockopt() failed or indicated an error on the socket.
631 */
632 close (zfpm_g->sock);
633 zfpm_g->sock = -1;
634
635 zfpm_start_connect_timer ("getsockopt() after async connect failed");
636 return;
637}
638
639/*
640 * zfpm_conn_down_thread_cb
641 *
642 * Callback that is invoked to clean up state after the TCP connection
643 * to the FPM goes down.
644 */
645static int
646zfpm_conn_down_thread_cb (struct thread *thread)
647{
648 struct route_node *rnode;
649 zfpm_rnodes_iter_t *iter;
650 rib_dest_t *dest;
651
652 assert (zfpm_g->state == ZFPM_STATE_IDLE);
653
654 assert (zfpm_g->t_conn_down);
655 zfpm_g->t_conn_down = NULL;
656
657 iter = &zfpm_g->t_conn_down_state.iter;
658
659 while ((rnode = zfpm_rnodes_iter_next (iter)))
660 {
661 dest = rib_dest_from_rnode (rnode);
662
663 if (dest)
664 {
665 if (CHECK_FLAG (dest->flags, RIB_DEST_UPDATE_FPM))
666 {
667 TAILQ_REMOVE (&zfpm_g->dest_q, dest, fpm_q_entries);
668 }
669
670 UNSET_FLAG (dest->flags, RIB_DEST_UPDATE_FPM);
671 UNSET_FLAG (dest->flags, RIB_DEST_SENT_TO_FPM);
672
673 zfpm_g->stats.t_conn_down_dests_processed++;
674
675 /*
676 * Check if the dest should be deleted.
677 */
678 rib_gc_dest(rnode);
679 }
680
681 /*
682 * Yield if need be.
683 */
684 if (!zfpm_thread_should_yield (thread))
685 continue;
686
687 zfpm_g->stats.t_conn_down_yields++;
688 zfpm_rnodes_iter_pause (iter);
689 zfpm_g->t_conn_down = thread_add_background (zfpm_g->master,
690 zfpm_conn_down_thread_cb,
691 0, 0);
692 return 0;
693 }
694
695 zfpm_g->stats.t_conn_down_finishes++;
696 zfpm_rnodes_iter_cleanup (iter);
697
698 /*
699 * Start the process of connecting to the FPM again.
700 */
701 zfpm_start_connect_timer ("cleanup complete");
702 return 0;
703}
704
705/*
706 * zfpm_connection_down
707 *
708 * Called when the connection to the FPM has gone down.
709 */
710static void
711zfpm_connection_down (const char *detail)
712{
713 if (!detail)
714 detail = "unknown";
715
716 assert (zfpm_g->state == ZFPM_STATE_ESTABLISHED);
717
718 zlog_info ("connection to the FPM has gone down: %s", detail);
719
720 zfpm_read_off ();
721 zfpm_write_off ();
722
723 stream_reset (zfpm_g->ibuf);
724 stream_reset (zfpm_g->obuf);
725
726 if (zfpm_g->sock >= 0) {
727 close (zfpm_g->sock);
728 zfpm_g->sock = -1;
729 }
730
731 /*
732 * Start thread to clean up state after the connection goes down.
733 */
734 assert (!zfpm_g->t_conn_down);
735 zfpm_debug ("Starting conn_down thread");
736 zfpm_rnodes_iter_init (&zfpm_g->t_conn_down_state.iter);
737 zfpm_g->t_conn_down = thread_add_background (zfpm_g->master,
738 zfpm_conn_down_thread_cb, 0, 0);
739 zfpm_g->stats.t_conn_down_starts++;
740
741 zfpm_set_state (ZFPM_STATE_IDLE, detail);
742}
743
744/*
745 * zfpm_read_cb
746 */
747static int
748zfpm_read_cb (struct thread *thread)
749{
750 size_t already;
751 struct stream *ibuf;
752 uint16_t msg_len;
753 fpm_msg_hdr_t *hdr;
754
755 zfpm_g->stats.read_cb_calls++;
756 assert (zfpm_g->t_read);
757 zfpm_g->t_read = NULL;
758
759 /*
760 * Check if async connect is now done.
761 */
762 if (zfpm_g->state == ZFPM_STATE_CONNECTING)
763 {
764 zfpm_connect_check();
765 return 0;
766 }
767
768 assert (zfpm_g->state == ZFPM_STATE_ESTABLISHED);
769 assert (zfpm_g->sock >= 0);
770
771 ibuf = zfpm_g->ibuf;
772
773 already = stream_get_endp (ibuf);
774 if (already < FPM_MSG_HDR_LEN)
775 {
776 ssize_t nbyte;
777
778 nbyte = stream_read_try (ibuf, zfpm_g->sock, FPM_MSG_HDR_LEN - already);
779 if (nbyte == 0 || nbyte == -1)
780 {
781 zfpm_connection_down ("closed socket in read");
782 return 0;
783 }
784
785 if (nbyte != (ssize_t) (FPM_MSG_HDR_LEN - already))
786 goto done;
787
788 already = FPM_MSG_HDR_LEN;
789 }
790
791 stream_set_getp (ibuf, 0);
792
793 hdr = (fpm_msg_hdr_t *) stream_pnt (ibuf);
794
795 if (!fpm_msg_hdr_ok (hdr))
796 {
797 zfpm_connection_down ("invalid message header");
798 return 0;
799 }
800
801 msg_len = fpm_msg_len (hdr);
802
803 /*
804 * Read out the rest of the packet.
805 */
806 if (already < msg_len)
807 {
808 ssize_t nbyte;
809
810 nbyte = stream_read_try (ibuf, zfpm_g->sock, msg_len - already);
811
812 if (nbyte == 0 || nbyte == -1)
813 {
814 zfpm_connection_down ("failed to read message");
815 return 0;
816 }
817
818 if (nbyte != (ssize_t) (msg_len - already))
819 goto done;
820 }
821
822 zfpm_debug ("Read out a full fpm message");
823
824 /*
825 * Just throw it away for now.
826 */
827 stream_reset (ibuf);
828
829 done:
830 zfpm_read_on ();
831 return 0;
832}
833
834/*
835 * zfpm_writes_pending
836 *
837 * Returns TRUE if we may have something to write to the FPM.
838 */
839static int
840zfpm_writes_pending (void)
841{
842
843 /*
844 * Check if there is any data in the outbound buffer that has not
845 * been written to the socket yet.
846 */
847 if (stream_get_endp (zfpm_g->obuf) - stream_get_getp (zfpm_g->obuf))
848 return 1;
849
850 /*
851 * Check if there are any prefixes on the outbound queue.
852 */
853 if (!TAILQ_EMPTY (&zfpm_g->dest_q))
854 return 1;
855
856 return 0;
857}
858
859/*
860 * zfpm_encode_route
861 *
862 * Encode a message to the FPM with information about the given route.
863 *
864 * Returns the number of bytes written to the buffer. 0 or a negative
865 * value indicates an error.
866 */
867static inline int
868zfpm_encode_route (rib_dest_t *dest, struct rib *rib, char *in_buf,
fb0aa886 869 size_t in_buf_len, fpm_msg_type_e *msg_type)
5adc2528 870{
fb0aa886 871 size_t len;
9bf75362 872#ifdef HAVE_NETLINK
5adc2528 873 int cmd;
9bf75362 874#endif
fb0aa886 875 len = 0;
5adc2528 876
fb0aa886 877 *msg_type = FPM_MSG_TYPE_NONE;
5adc2528 878
fb0aa886 879 switch (zfpm_g->message_format) {
5adc2528 880
fb0aa886
AS
881 case ZFPM_MSG_FORMAT_PROTOBUF:
882#ifdef HAVE_PROTOBUF
883 len = zfpm_protobuf_encode_route (dest, rib, (uint8_t *) in_buf,
884 in_buf_len);
885 *msg_type = FPM_MSG_TYPE_PROTOBUF;
886#endif
887 break;
5adc2528 888
fb0aa886
AS
889 case ZFPM_MSG_FORMAT_NETLINK:
890#ifdef HAVE_NETLINK
891 *msg_type = FPM_MSG_TYPE_NETLINK;
892 cmd = rib ? RTM_NEWROUTE : RTM_DELROUTE;
893 len = zfpm_netlink_encode_route (cmd, dest, rib, in_buf, in_buf_len);
894 assert(fpm_msg_align(len) == len);
895 *msg_type = FPM_MSG_TYPE_NETLINK;
5adc2528 896#endif /* HAVE_NETLINK */
fb0aa886
AS
897 break;
898
899 default:
900 break;
901 }
902
903 return len;
904
5adc2528
AS
905}
906
907/*
908 * zfpm_route_for_update
909 *
910 * Returns the rib that is to be sent to the FPM for a given dest.
911 */
fb0aa886 912struct rib *
5adc2528
AS
913zfpm_route_for_update (rib_dest_t *dest)
914{
915 struct rib *rib;
916
917 RIB_DEST_FOREACH_ROUTE (dest, rib)
918 {
446bb95e 919 if (!CHECK_FLAG (rib->status, RIB_ENTRY_SELECTED_FIB))
5adc2528
AS
920 continue;
921
922 return rib;
923 }
924
925 /*
926 * We have no route for this destination.
927 */
928 return NULL;
929}
930
931/*
932 * zfpm_build_updates
933 *
934 * Process the outgoing queue and write messages to the outbound
935 * buffer.
936 */
937static void
938zfpm_build_updates (void)
939{
940 struct stream *s;
941 rib_dest_t *dest;
942 unsigned char *buf, *data, *buf_end;
943 size_t msg_len;
944 size_t data_len;
945 fpm_msg_hdr_t *hdr;
946 struct rib *rib;
947 int is_add, write_msg;
fb0aa886 948 fpm_msg_type_e msg_type;
5adc2528
AS
949
950 s = zfpm_g->obuf;
951
952 assert (stream_empty (s));
953
954 do {
955
956 /*
957 * Make sure there is enough space to write another message.
958 */
959 if (STREAM_WRITEABLE (s) < FPM_MAX_MSG_LEN)
960 break;
961
962 buf = STREAM_DATA (s) + stream_get_endp (s);
963 buf_end = buf + STREAM_WRITEABLE (s);
964
965 dest = TAILQ_FIRST (&zfpm_g->dest_q);
966 if (!dest)
967 break;
968
969 assert (CHECK_FLAG (dest->flags, RIB_DEST_UPDATE_FPM));
970
971 hdr = (fpm_msg_hdr_t *) buf;
972 hdr->version = FPM_PROTO_VERSION;
5adc2528
AS
973
974 data = fpm_msg_data (hdr);
975
976 rib = zfpm_route_for_update (dest);
977 is_add = rib ? 1 : 0;
978
979 write_msg = 1;
980
981 /*
982 * If this is a route deletion, and we have not sent the route to
983 * the FPM previously, skip it.
984 */
985 if (!is_add && !CHECK_FLAG (dest->flags, RIB_DEST_SENT_TO_FPM))
986 {
987 write_msg = 0;
988 zfpm_g->stats.nop_deletes_skipped++;
989 }
990
991 if (write_msg) {
fb0aa886
AS
992 data_len = zfpm_encode_route (dest, rib, (char *) data, buf_end - data,
993 &msg_type);
5adc2528
AS
994
995 assert (data_len);
996 if (data_len)
997 {
fb0aa886 998 hdr->msg_type = msg_type;
5adc2528
AS
999 msg_len = fpm_data_len_to_msg_len (data_len);
1000 hdr->msg_len = htons (msg_len);
1001 stream_forward_endp (s, msg_len);
1002
1003 if (is_add)
1004 zfpm_g->stats.route_adds++;
1005 else
1006 zfpm_g->stats.route_dels++;
1007 }
1008 }
1009
1010 /*
1011 * Remove the dest from the queue, and reset the flag.
1012 */
1013 UNSET_FLAG (dest->flags, RIB_DEST_UPDATE_FPM);
1014 TAILQ_REMOVE (&zfpm_g->dest_q, dest, fpm_q_entries);
1015
1016 if (is_add)
1017 {
1018 SET_FLAG (dest->flags, RIB_DEST_SENT_TO_FPM);
1019 }
1020 else
1021 {
1022 UNSET_FLAG (dest->flags, RIB_DEST_SENT_TO_FPM);
1023 }
1024
1025 /*
1026 * Delete the destination if necessary.
1027 */
1028 if (rib_gc_dest (dest->rnode))
1029 zfpm_g->stats.dests_del_after_update++;
1030
1031 } while (1);
1032
1033}
1034
1035/*
1036 * zfpm_write_cb
1037 */
1038static int
1039zfpm_write_cb (struct thread *thread)
1040{
1041 struct stream *s;
1042 int num_writes;
1043
1044 zfpm_g->stats.write_cb_calls++;
1045 assert (zfpm_g->t_write);
1046 zfpm_g->t_write = NULL;
1047
1048 /*
1049 * Check if async connect is now done.
1050 */
1051 if (zfpm_g->state == ZFPM_STATE_CONNECTING)
1052 {
1053 zfpm_connect_check ();
1054 return 0;
1055 }
1056
1057 assert (zfpm_g->state == ZFPM_STATE_ESTABLISHED);
1058 assert (zfpm_g->sock >= 0);
1059
1060 num_writes = 0;
1061
1062 do
1063 {
1064 int bytes_to_write, bytes_written;
1065
1066 s = zfpm_g->obuf;
1067
1068 /*
1069 * If the stream is empty, try fill it up with data.
1070 */
1071 if (stream_empty (s))
1072 {
1073 zfpm_build_updates ();
1074 }
1075
1076 bytes_to_write = stream_get_endp (s) - stream_get_getp (s);
1077 if (!bytes_to_write)
1078 break;
1079
1080 bytes_written = write (zfpm_g->sock, STREAM_PNT (s), bytes_to_write);
1081 zfpm_g->stats.write_calls++;
1082 num_writes++;
1083
1084 if (bytes_written < 0)
1085 {
1086 if (ERRNO_IO_RETRY (errno))
1087 break;
1088
1089 zfpm_connection_down ("failed to write to socket");
1090 return 0;
1091 }
1092
1093 if (bytes_written != bytes_to_write)
1094 {
1095
1096 /*
1097 * Partial write.
1098 */
1099 stream_forward_getp (s, bytes_written);
1100 zfpm_g->stats.partial_writes++;
1101 break;
1102 }
1103
1104 /*
1105 * We've written out the entire contents of the stream.
1106 */
1107 stream_reset (s);
1108
1109 if (num_writes >= ZFPM_MAX_WRITES_PER_RUN)
1110 {
1111 zfpm_g->stats.max_writes_hit++;
1112 break;
1113 }
1114
1115 if (zfpm_thread_should_yield (thread))
1116 {
1117 zfpm_g->stats.t_write_yields++;
1118 break;
1119 }
1120 } while (1);
1121
1122 if (zfpm_writes_pending ())
1123 zfpm_write_on ();
1124
1125 return 0;
1126}
1127
1128/*
1129 * zfpm_connect_cb
1130 */
1131static int
1132zfpm_connect_cb (struct thread *t)
1133{
1134 int sock, ret;
1135 struct sockaddr_in serv;
1136
1137 assert (zfpm_g->t_connect);
1138 zfpm_g->t_connect = NULL;
1139 assert (zfpm_g->state == ZFPM_STATE_ACTIVE);
1140
1141 sock = socket (AF_INET, SOCK_STREAM, 0);
1142 if (sock < 0)
1143 {
1144 zfpm_debug ("Failed to create socket for connect(): %s", strerror(errno));
1145 zfpm_g->stats.connect_no_sock++;
1146 return 0;
1147 }
1148
1149 set_nonblocking(sock);
1150
1151 /* Make server socket. */
1152 memset (&serv, 0, sizeof (serv));
1153 serv.sin_family = AF_INET;
1154 serv.sin_port = htons (zfpm_g->fpm_port);
1155#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
1156 serv.sin_len = sizeof (struct sockaddr_in);
1157#endif /* HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */
711ff0ba
USK
1158 if (!zfpm_g->fpm_server)
1159 serv.sin_addr.s_addr = htonl (INADDR_LOOPBACK);
1160 else
1161 serv.sin_addr.s_addr = (zfpm_g->fpm_server);
5adc2528
AS
1162
1163 /*
1164 * Connect to the FPM.
1165 */
1166 zfpm_g->connect_calls++;
1167 zfpm_g->stats.connect_calls++;
cf672a86 1168 zfpm_g->last_connect_call_time = monotime(NULL);
5adc2528
AS
1169
1170 ret = connect (sock, (struct sockaddr *) &serv, sizeof (serv));
1171 if (ret >= 0)
1172 {
1173 zfpm_g->sock = sock;
1174 zfpm_connection_up ("connect succeeded");
1175 return 1;
1176 }
1177
1178 if (errno == EINPROGRESS)
1179 {
1180 zfpm_g->sock = sock;
1181 zfpm_read_on ();
1182 zfpm_write_on ();
1183 zfpm_set_state (ZFPM_STATE_CONNECTING, "async connect in progress");
1184 return 0;
1185 }
1186
1187 zlog_info ("can't connect to FPM %d: %s", sock, safe_strerror (errno));
1188 close (sock);
1189
1190 /*
1191 * Restart timer for retrying connection.
1192 */
1193 zfpm_start_connect_timer ("connect() failed");
1194 return 0;
1195}
1196
1197/*
1198 * zfpm_set_state
1199 *
1200 * Move state machine into the given state.
1201 */
1202static void
1203zfpm_set_state (zfpm_state_t state, const char *reason)
1204{
1205 zfpm_state_t cur_state = zfpm_g->state;
1206
1207 if (!reason)
1208 reason = "Unknown";
1209
1210 if (state == cur_state)
1211 return;
1212
1213 zfpm_debug("beginning state transition %s -> %s. Reason: %s",
1214 zfpm_state_to_str (cur_state), zfpm_state_to_str (state),
1215 reason);
1216
1217 switch (state) {
1218
1219 case ZFPM_STATE_IDLE:
1220 assert (cur_state == ZFPM_STATE_ESTABLISHED);
1221 break;
1222
1223 case ZFPM_STATE_ACTIVE:
1224 assert (cur_state == ZFPM_STATE_IDLE ||
1225 cur_state == ZFPM_STATE_CONNECTING);
1226 assert (zfpm_g->t_connect);
1227 break;
1228
1229 case ZFPM_STATE_CONNECTING:
1230 assert (zfpm_g->sock);
1231 assert (cur_state == ZFPM_STATE_ACTIVE);
1232 assert (zfpm_g->t_read);
1233 assert (zfpm_g->t_write);
1234 break;
1235
1236 case ZFPM_STATE_ESTABLISHED:
1237 assert (cur_state == ZFPM_STATE_ACTIVE ||
1238 cur_state == ZFPM_STATE_CONNECTING);
1239 assert (zfpm_g->sock);
1240 assert (zfpm_g->t_read);
1241 assert (zfpm_g->t_write);
1242 break;
1243 }
1244
1245 zfpm_g->state = state;
1246}
1247
1248/*
1249 * zfpm_calc_connect_delay
1250 *
1251 * Returns the number of seconds after which we should attempt to
1252 * reconnect to the FPM.
1253 */
1254static long
1255zfpm_calc_connect_delay (void)
1256{
1257 time_t elapsed;
1258
1259 /*
1260 * Return 0 if this is our first attempt to connect.
1261 */
1262 if (zfpm_g->connect_calls == 0)
1263 {
1264 return 0;
1265 }
1266
1267 elapsed = zfpm_get_elapsed_time (zfpm_g->last_connect_call_time);
1268
1269 if (elapsed > ZFPM_CONNECT_RETRY_IVL) {
1270 return 0;
1271 }
1272
1273 return ZFPM_CONNECT_RETRY_IVL - elapsed;
1274}
1275
1276/*
1277 * zfpm_start_connect_timer
1278 */
1279static void
1280zfpm_start_connect_timer (const char *reason)
1281{
1282 long delay_secs;
1283
1284 assert (!zfpm_g->t_connect);
1285 assert (zfpm_g->sock < 0);
1286
1287 assert(zfpm_g->state == ZFPM_STATE_IDLE ||
1288 zfpm_g->state == ZFPM_STATE_ACTIVE ||
1289 zfpm_g->state == ZFPM_STATE_CONNECTING);
1290
1291 delay_secs = zfpm_calc_connect_delay();
1292 zfpm_debug ("scheduling connect in %ld seconds", delay_secs);
1293
1294 THREAD_TIMER_ON (zfpm_g->master, zfpm_g->t_connect, zfpm_connect_cb, 0,
1295 delay_secs);
1296 zfpm_set_state (ZFPM_STATE_ACTIVE, reason);
1297}
1298
5697001a 1299#if defined (HAVE_FPM)
5adc2528
AS
1300/*
1301 * zfpm_is_enabled
1302 *
1303 * Returns TRUE if the zebra FPM module has been enabled.
1304 */
1305static inline int
1306zfpm_is_enabled (void)
1307{
1308 return zfpm_g->enabled;
1309}
5697001a 1310#endif
5adc2528
AS
1311
1312/*
1313 * zfpm_conn_is_up
1314 *
1315 * Returns TRUE if the connection to the FPM is up.
1316 */
1317static inline int
1318zfpm_conn_is_up (void)
1319{
1320 if (zfpm_g->state != ZFPM_STATE_ESTABLISHED)
1321 return 0;
1322
1323 assert (zfpm_g->sock >= 0);
1324
1325 return 1;
1326}
1327
1328/*
1329 * zfpm_trigger_update
1330 *
1331 * The zebra code invokes this function to indicate that we should
1332 * send an update to the FPM about the given route_node.
1333 */
1334void
1335zfpm_trigger_update (struct route_node *rn, const char *reason)
1336{
1337 rib_dest_t *dest;
35d921cc 1338 char buf[PREFIX_STRLEN];
5adc2528
AS
1339
1340 /*
1341 * Ignore if the connection is down. We will update the FPM about
1342 * all destinations once the connection comes up.
1343 */
1344 if (!zfpm_conn_is_up ())
1345 return;
1346
1347 dest = rib_dest_from_rnode (rn);
1348
1349 /*
1350 * Ignore the trigger if the dest is not in a table that we would
1351 * send to the FPM.
1352 */
1353 if (!zfpm_is_table_for_fpm (rib_dest_table (dest)))
1354 {
1355 zfpm_g->stats.non_fpm_table_triggers++;
1356 return;
1357 }
1358
1359 if (CHECK_FLAG (dest->flags, RIB_DEST_UPDATE_FPM)) {
1360 zfpm_g->stats.redundant_triggers++;
1361 return;
1362 }
1363
1364 if (reason)
1365 {
35d921cc
TT
1366 zfpm_debug ("%s triggering update to FPM - Reason: %s",
1367 prefix2str (&rn->p, buf, sizeof(buf)), reason);
5adc2528
AS
1368 }
1369
1370 SET_FLAG (dest->flags, RIB_DEST_UPDATE_FPM);
1371 TAILQ_INSERT_TAIL (&zfpm_g->dest_q, dest, fpm_q_entries);
1372 zfpm_g->stats.updates_triggered++;
1373
1374 /*
1375 * Make sure that writes are enabled.
1376 */
1377 if (zfpm_g->t_write)
1378 return;
1379
1380 zfpm_write_on ();
1381}
1382
1383/*
1384 * zfpm_stats_timer_cb
1385 */
1386static int
1387zfpm_stats_timer_cb (struct thread *t)
1388{
1389 assert (zfpm_g->t_stats);
1390 zfpm_g->t_stats = NULL;
1391
1392 /*
1393 * Remember the stats collected in the last interval for display
1394 * purposes.
1395 */
1396 zfpm_stats_copy (&zfpm_g->stats, &zfpm_g->last_ivl_stats);
1397
1398 /*
1399 * Add the current set of stats into the cumulative statistics.
1400 */
1401 zfpm_stats_compose (&zfpm_g->cumulative_stats, &zfpm_g->stats,
1402 &zfpm_g->cumulative_stats);
1403
1404 /*
1405 * Start collecting stats afresh over the next interval.
1406 */
1407 zfpm_stats_reset (&zfpm_g->stats);
1408
1409 zfpm_start_stats_timer ();
1410
1411 return 0;
1412}
1413
5697001a 1414#if defined (HAVE_FPM)
5adc2528
AS
1415/*
1416 * zfpm_stop_stats_timer
1417 */
1418static void
1419zfpm_stop_stats_timer (void)
1420{
1421 if (!zfpm_g->t_stats)
1422 return;
1423
1424 zfpm_debug ("Stopping existing stats timer");
1425 THREAD_TIMER_OFF (zfpm_g->t_stats);
1426}
5697001a 1427#endif
5adc2528
AS
1428
1429/*
1430 * zfpm_start_stats_timer
1431 */
1432void
1433zfpm_start_stats_timer (void)
1434{
1435 assert (!zfpm_g->t_stats);
1436
1437 THREAD_TIMER_ON (zfpm_g->master, zfpm_g->t_stats, zfpm_stats_timer_cb, 0,
1438 ZFPM_STATS_IVL_SECS);
1439}
1440
1441/*
1442 * Helper macro for zfpm_show_stats() below.
1443 */
1444#define ZFPM_SHOW_STAT(counter) \
1445 do { \
1446 vty_out (vty, "%-40s %10lu %16lu%s", #counter, total_stats.counter, \
1447 zfpm_g->last_ivl_stats.counter, VTY_NEWLINE); \
1448 } while (0)
1449
5697001a 1450#if defined (HAVE_FPM)
5adc2528
AS
1451/*
1452 * zfpm_show_stats
1453 */
1454static void
1455zfpm_show_stats (struct vty *vty)
1456{
1457 zfpm_stats_t total_stats;
1458 time_t elapsed;
1459
1460 vty_out (vty, "%s%-40s %10s Last %2d secs%s%s", VTY_NEWLINE, "Counter",
1461 "Total", ZFPM_STATS_IVL_SECS, VTY_NEWLINE, VTY_NEWLINE);
1462
1463 /*
1464 * Compute the total stats up to this instant.
1465 */
1466 zfpm_stats_compose (&zfpm_g->cumulative_stats, &zfpm_g->stats,
1467 &total_stats);
1468
1469 ZFPM_SHOW_STAT (connect_calls);
1470 ZFPM_SHOW_STAT (connect_no_sock);
1471 ZFPM_SHOW_STAT (read_cb_calls);
1472 ZFPM_SHOW_STAT (write_cb_calls);
1473 ZFPM_SHOW_STAT (write_calls);
1474 ZFPM_SHOW_STAT (partial_writes);
1475 ZFPM_SHOW_STAT (max_writes_hit);
1476 ZFPM_SHOW_STAT (t_write_yields);
1477 ZFPM_SHOW_STAT (nop_deletes_skipped);
1478 ZFPM_SHOW_STAT (route_adds);
1479 ZFPM_SHOW_STAT (route_dels);
1480 ZFPM_SHOW_STAT (updates_triggered);
1481 ZFPM_SHOW_STAT (non_fpm_table_triggers);
1482 ZFPM_SHOW_STAT (redundant_triggers);
1483 ZFPM_SHOW_STAT (dests_del_after_update);
1484 ZFPM_SHOW_STAT (t_conn_down_starts);
1485 ZFPM_SHOW_STAT (t_conn_down_dests_processed);
1486 ZFPM_SHOW_STAT (t_conn_down_yields);
1487 ZFPM_SHOW_STAT (t_conn_down_finishes);
1488 ZFPM_SHOW_STAT (t_conn_up_starts);
1489 ZFPM_SHOW_STAT (t_conn_up_dests_processed);
1490 ZFPM_SHOW_STAT (t_conn_up_yields);
1491 ZFPM_SHOW_STAT (t_conn_up_aborts);
1492 ZFPM_SHOW_STAT (t_conn_up_finishes);
1493
1494 if (!zfpm_g->last_stats_clear_time)
1495 return;
1496
1497 elapsed = zfpm_get_elapsed_time (zfpm_g->last_stats_clear_time);
1498
1499 vty_out (vty, "%sStats were cleared %lu seconds ago%s", VTY_NEWLINE,
1500 (unsigned long) elapsed, VTY_NEWLINE);
1501}
1502
1503/*
1504 * zfpm_clear_stats
1505 */
1506static void
1507zfpm_clear_stats (struct vty *vty)
1508{
1509 if (!zfpm_is_enabled ())
1510 {
1511 vty_out (vty, "The FPM module is not enabled...%s", VTY_NEWLINE);
1512 return;
1513 }
1514
1515 zfpm_stats_reset (&zfpm_g->stats);
1516 zfpm_stats_reset (&zfpm_g->last_ivl_stats);
1517 zfpm_stats_reset (&zfpm_g->cumulative_stats);
1518
1519 zfpm_stop_stats_timer ();
1520 zfpm_start_stats_timer ();
1521
cf672a86 1522 zfpm_g->last_stats_clear_time = monotime(NULL);
5adc2528
AS
1523
1524 vty_out (vty, "Cleared FPM stats%s", VTY_NEWLINE);
1525}
1526
1527/*
1528 * show_zebra_fpm_stats
1529 */
1530DEFUN (show_zebra_fpm_stats,
1531 show_zebra_fpm_stats_cmd,
1532 "show zebra fpm stats",
1533 SHOW_STR
1534 "Zebra information\n"
1535 "Forwarding Path Manager information\n"
1536 "Statistics\n")
1537{
1538 zfpm_show_stats (vty);
1539 return CMD_SUCCESS;
1540}
1541
1542/*
1543 * clear_zebra_fpm_stats
1544 */
1545DEFUN (clear_zebra_fpm_stats,
1546 clear_zebra_fpm_stats_cmd,
1547 "clear zebra fpm stats",
1548 CLEAR_STR
1549 "Zebra information\n"
1550 "Clear Forwarding Path Manager information\n"
1551 "Statistics\n")
1552{
1553 zfpm_clear_stats (vty);
1554 return CMD_SUCCESS;
1555}
1556
711ff0ba
USK
1557/*
1558 * update fpm connection information
1559 */
e52702f2
QY
1560DEFUN ( fpm_remote_ip,
1561 fpm_remote_ip_cmd,
1562 "fpm connection ip A.B.C.D port (1-65535)",
711ff0ba
USK
1563 "fpm connection remote ip and port\n"
1564 "Remote fpm server ip A.B.C.D\n"
1565 "Enter ip ")
1566{
1567
1568 in_addr_t fpm_server;
1569 uint32_t port_no;
1570
e52702f2 1571 fpm_server = inet_addr (argv[3]->arg);
711ff0ba
USK
1572 if (fpm_server == INADDR_NONE)
1573 return CMD_ERR_INCOMPLETE;
1574
e52702f2 1575 port_no = atoi (argv[5]->arg);
711ff0ba
USK
1576 if (port_no < TCP_MIN_PORT || port_no > TCP_MAX_PORT)
1577 return CMD_ERR_INCOMPLETE;
1578
1579 zfpm_g->fpm_server = fpm_server;
1580 zfpm_g->fpm_port = port_no;
1581
1582
1583 return CMD_SUCCESS;
1584}
1585
e52702f2
QY
1586DEFUN ( no_fpm_remote_ip,
1587 no_fpm_remote_ip_cmd,
1588 "no fpm connection ip A.B.C.D port (1-65535)",
711ff0ba
USK
1589 "fpm connection remote ip and port\n"
1590 "Connection\n"
1591 "Remote fpm server ip A.B.C.D\n"
1592 "Enter ip ")
1593{
e52702f2
QY
1594 if (zfpm_g->fpm_server != inet_addr (argv[4]->arg) ||
1595 zfpm_g->fpm_port != atoi (argv[6]->arg))
711ff0ba
USK
1596 return CMD_ERR_NO_MATCH;
1597
1598 zfpm_g->fpm_server = FPM_DEFAULT_IP;
1599 zfpm_g->fpm_port = FPM_DEFAULT_PORT;
1600
1601 return CMD_SUCCESS;
1602}
5697001a 1603#endif
711ff0ba 1604
fb0aa886
AS
1605/*
1606 * zfpm_init_message_format
1607 */
1608static inline void
1609zfpm_init_message_format (const char *format)
1610{
1611 int have_netlink, have_protobuf;
1612
fb0aa886
AS
1613#ifdef HAVE_NETLINK
1614 have_netlink = 1;
4b2792b5
DS
1615#else
1616 have_netlink = 0;
fb0aa886
AS
1617#endif
1618
1619#ifdef HAVE_PROTOBUF
1620 have_protobuf = 1;
4b2792b5
DS
1621#else
1622 have_protobuf = 0;
fb0aa886
AS
1623#endif
1624
1625 zfpm_g->message_format = ZFPM_MSG_FORMAT_NONE;
1626
1627 if (!format)
1628 {
1629 if (have_netlink)
1630 {
1631 zfpm_g->message_format = ZFPM_MSG_FORMAT_NETLINK;
1632 }
1633 else if (have_protobuf)
1634 {
1635 zfpm_g->message_format = ZFPM_MSG_FORMAT_PROTOBUF;
1636 }
1637 return;
1638 }
1639
1640 if (!strcmp ("netlink", format))
1641 {
1642 if (!have_netlink)
1643 {
1644 zlog_err ("FPM netlink message format is not available");
1645 return;
1646 }
1647 zfpm_g->message_format = ZFPM_MSG_FORMAT_NETLINK;
1648 return;
1649 }
1650
1651 if (!strcmp ("protobuf", format))
1652 {
1653 if (!have_protobuf)
1654 {
1655 zlog_err ("FPM protobuf message format is not available");
1656 return;
1657 }
1658 zfpm_g->message_format = ZFPM_MSG_FORMAT_PROTOBUF;
1659 return;
1660 }
1661
1662 zlog_warn ("Unknown fpm format '%s'", format);
1663}
1664
711ff0ba
USK
1665/**
1666 * fpm_remote_srv_write
1667 *
1668 * Module to write remote fpm connection
1669 *
1670 * Returns ZERO on success.
1671 */
1672
1673int fpm_remote_srv_write (struct vty *vty )
1674{
1675 struct in_addr in;
1676
1677 in.s_addr = zfpm_g->fpm_server;
1678
1679 if (zfpm_g->fpm_server != FPM_DEFAULT_IP ||
1680 zfpm_g->fpm_port != FPM_DEFAULT_PORT)
1681 vty_out (vty,"fpm connection ip %s port %d%s", inet_ntoa (in),zfpm_g->fpm_port,VTY_NEWLINE);
1682
1683 return 0;
1684}
1685
1686
5adc2528
AS
1687/**
1688 * zfpm_init
1689 *
1690 * One-time initialization of the Zebra FPM module.
1691 *
1692 * @param[in] port port at which FPM is running.
1693 * @param[in] enable TRUE if the zebra FPM module should be enabled
fb0aa886 1694 * @param[in] format to use to talk to the FPM. Can be 'netink' or 'protobuf'.
5adc2528
AS
1695 *
1696 * Returns TRUE on success.
1697 */
1698int
fb0aa886
AS
1699zfpm_init (struct thread_master *master, int enable, uint16_t port,
1700 const char *format)
5adc2528
AS
1701{
1702 static int initialized = 0;
1703
1704 if (initialized) {
1705 return 1;
1706 }
1707
1708 initialized = 1;
1709
1710 memset (zfpm_g, 0, sizeof (*zfpm_g));
1711 zfpm_g->master = master;
1712 TAILQ_INIT(&zfpm_g->dest_q);
1713 zfpm_g->sock = -1;
1714 zfpm_g->state = ZFPM_STATE_IDLE;
1715
5adc2528
AS
1716 zfpm_stats_init (&zfpm_g->stats);
1717 zfpm_stats_init (&zfpm_g->last_ivl_stats);
1718 zfpm_stats_init (&zfpm_g->cumulative_stats);
1719
5697001a 1720#if defined (HAVE_FPM)
5adc2528
AS
1721 install_element (ENABLE_NODE, &show_zebra_fpm_stats_cmd);
1722 install_element (ENABLE_NODE, &clear_zebra_fpm_stats_cmd);
711ff0ba
USK
1723 install_element (CONFIG_NODE, &fpm_remote_ip_cmd);
1724 install_element (CONFIG_NODE, &no_fpm_remote_ip_cmd);
5697001a 1725#endif
5adc2528 1726
fb0aa886
AS
1727 zfpm_init_message_format(format);
1728
1729 /*
1730 * Disable FPM interface if no suitable format is available.
1731 */
1732 if (zfpm_g->message_format == ZFPM_MSG_FORMAT_NONE)
1733 enable = 0;
1734
1735 zfpm_g->enabled = enable;
5adc2528
AS
1736
1737 if (!enable) {
1738 return 1;
1739 }
1740
711ff0ba
USK
1741 if (!zfpm_g->fpm_server)
1742 zfpm_g->fpm_server = FPM_DEFAULT_IP;
1743
5adc2528
AS
1744 if (!port)
1745 port = FPM_DEFAULT_PORT;
1746
1747 zfpm_g->fpm_port = port;
1748
1749 zfpm_g->obuf = stream_new (ZFPM_OBUF_SIZE);
1750 zfpm_g->ibuf = stream_new (ZFPM_IBUF_SIZE);
1751
1752 zfpm_start_stats_timer ();
1753 zfpm_start_connect_timer ("initialized");
1754
1755 return 1;
1756}