]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zebra_fpm.c
Merge pull request #9650 from mjstapp/fix_dup_lookup_netlink
[mirror_frr.git] / zebra / zebra_fpm.c
1 /*
2 * Main implementation file for interface to Forwarding Plane Manager.
3 *
4 * Copyright (C) 2012 by Open Source Routing.
5 * Copyright (C) 2012 by Internet Systems Consortium, Inc. ("ISC")
6 *
7 * This file is part of GNU Zebra.
8 *
9 * GNU Zebra is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
12 * later version.
13 *
14 * GNU Zebra is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; see the file COPYING; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24 #include <zebra.h>
25
26 #include "log.h"
27 #include "libfrr.h"
28 #include "stream.h"
29 #include "thread.h"
30 #include "network.h"
31 #include "command.h"
32 #include "lib/version.h"
33 #include "jhash.h"
34
35 #include "zebra/rib.h"
36 #include "zebra/zserv.h"
37 #include "zebra/zebra_ns.h"
38 #include "zebra/zebra_vrf.h"
39 #include "zebra/zebra_errors.h"
40
41 #include "fpm/fpm.h"
42 #include "zebra_fpm_private.h"
43 #include "zebra/zebra_router.h"
44 #include "zebra_vxlan_private.h"
45
46 DEFINE_MTYPE_STATIC(ZEBRA, FPM_MAC_INFO, "FPM_MAC_INFO");
47
48 /*
49 * Interval at which we attempt to connect to the FPM.
50 */
51 #define ZFPM_CONNECT_RETRY_IVL 5
52
53 /*
54 * Sizes of outgoing and incoming stream buffers for writing/reading
55 * FPM messages.
56 */
57 #define ZFPM_OBUF_SIZE (2 * FPM_MAX_MSG_LEN)
58 #define ZFPM_IBUF_SIZE (FPM_MAX_MSG_LEN)
59
60 /*
61 * The maximum number of times the FPM socket write callback can call
62 * 'write' before it yields.
63 */
64 #define ZFPM_MAX_WRITES_PER_RUN 10
65
66 /*
67 * Interval over which we collect statistics.
68 */
69 #define ZFPM_STATS_IVL_SECS 10
70 #define FPM_MAX_MAC_MSG_LEN 512
71
72 static void zfpm_iterate_rmac_table(struct hash_bucket *bucket, void *args);
73
74 /*
75 * Structure that holds state for iterating over all route_node
76 * structures that are candidates for being communicated to the FPM.
77 */
78 struct zfpm_rnodes_iter {
79 rib_tables_iter_t tables_iter;
80 route_table_iter_t iter;
81 };
82
83 /*
84 * Statistics.
85 */
86 struct zfpm_stats {
87 unsigned long connect_calls;
88 unsigned long connect_no_sock;
89
90 unsigned long read_cb_calls;
91
92 unsigned long write_cb_calls;
93 unsigned long write_calls;
94 unsigned long partial_writes;
95 unsigned long max_writes_hit;
96 unsigned long t_write_yields;
97
98 unsigned long nop_deletes_skipped;
99 unsigned long route_adds;
100 unsigned long route_dels;
101
102 unsigned long updates_triggered;
103 unsigned long redundant_triggers;
104
105 unsigned long dests_del_after_update;
106
107 unsigned long t_conn_down_starts;
108 unsigned long t_conn_down_dests_processed;
109 unsigned long t_conn_down_yields;
110 unsigned long t_conn_down_finishes;
111
112 unsigned long t_conn_up_starts;
113 unsigned long t_conn_up_dests_processed;
114 unsigned long t_conn_up_yields;
115 unsigned long t_conn_up_aborts;
116 unsigned long t_conn_up_finishes;
117 };
118
119 /*
120 * States for the FPM state machine.
121 */
122 enum zfpm_state {
123
124 /*
125 * In this state we are not yet ready to connect to the FPM. This
126 * can happen when this module is disabled, or if we're cleaning up
127 * after a connection has gone down.
128 */
129 ZFPM_STATE_IDLE,
130
131 /*
132 * Ready to talk to the FPM and periodically trying to connect to
133 * it.
134 */
135 ZFPM_STATE_ACTIVE,
136
137 /*
138 * In the middle of bringing up a TCP connection. Specifically,
139 * waiting for a connect() call to complete asynchronously.
140 */
141 ZFPM_STATE_CONNECTING,
142
143 /*
144 * TCP connection to the FPM is up.
145 */
146 ZFPM_STATE_ESTABLISHED
147
148 };
149
150 /*
151 * Message format to be used to communicate with the FPM.
152 */
153 enum zfpm_msg_format {
154 ZFPM_MSG_FORMAT_NONE,
155 ZFPM_MSG_FORMAT_NETLINK,
156 ZFPM_MSG_FORMAT_PROTOBUF,
157 };
158
159 /*
160 * Globals.
161 */
162 struct zfpm_glob {
163
164 /*
165 * True if the FPM module has been enabled.
166 */
167 int enabled;
168
169 /*
170 * Message format to be used to communicate with the fpm.
171 */
172 enum zfpm_msg_format message_format;
173
174 struct thread_master *master;
175
176 enum zfpm_state state;
177
178 in_addr_t fpm_server;
179 /*
180 * Port on which the FPM is running.
181 */
182 int fpm_port;
183
184 /*
185 * List of rib_dest_t structures to be processed
186 */
187 TAILQ_HEAD(zfpm_dest_q, rib_dest_t_) dest_q;
188
189 /*
190 * List of fpm_mac_info structures to be processed
191 */
192 TAILQ_HEAD(zfpm_mac_q, fpm_mac_info_t) mac_q;
193
194 /*
195 * Hash table of fpm_mac_info_t entries
196 *
197 * While adding fpm_mac_info_t for a MAC to the mac_q,
198 * it is possible that another fpm_mac_info_t node for the this MAC
199 * is already present in the queue.
200 * This is possible in the case of consecutive add->delete operations.
201 * To avoid such duplicate insertions in the mac_q,
202 * define a hash table for fpm_mac_info_t which can be looked up
203 * to see if an fpm_mac_info_t node for a MAC is already present
204 * in the mac_q.
205 */
206 struct hash *fpm_mac_info_table;
207
208 /*
209 * Stream socket to the FPM.
210 */
211 int sock;
212
213 /*
214 * Buffers for messages to/from the FPM.
215 */
216 struct stream *obuf;
217 struct stream *ibuf;
218
219 /*
220 * Threads for I/O.
221 */
222 struct thread *t_connect;
223 struct thread *t_write;
224 struct thread *t_read;
225
226 /*
227 * Thread to clean up after the TCP connection to the FPM goes down
228 * and the state that belongs to it.
229 */
230 struct thread *t_conn_down;
231
232 struct {
233 struct zfpm_rnodes_iter iter;
234 } t_conn_down_state;
235
236 /*
237 * Thread to take actions once the TCP conn to the FPM comes up, and
238 * the state that belongs to it.
239 */
240 struct thread *t_conn_up;
241
242 struct {
243 struct zfpm_rnodes_iter iter;
244 } t_conn_up_state;
245
246 unsigned long connect_calls;
247 time_t last_connect_call_time;
248
249 /*
250 * Stats from the start of the current statistics interval up to
251 * now. These are the counters we typically update in the code.
252 */
253 struct zfpm_stats stats;
254
255 /*
256 * Statistics that were gathered in the last collection interval.
257 */
258 struct zfpm_stats last_ivl_stats;
259
260 /*
261 * Cumulative stats from the last clear to the start of the current
262 * statistics interval.
263 */
264 struct zfpm_stats cumulative_stats;
265
266 /*
267 * Stats interval timer.
268 */
269 struct thread *t_stats;
270
271 /*
272 * If non-zero, the last time when statistics were cleared.
273 */
274 time_t last_stats_clear_time;
275
276 /*
277 * Flag to track the MAC dump status to FPM
278 */
279 bool fpm_mac_dump_done;
280 };
281
282 static struct zfpm_glob zfpm_glob_space;
283 static struct zfpm_glob *zfpm_g = &zfpm_glob_space;
284
285 static int zfpm_trigger_update(struct route_node *rn, const char *reason);
286
287 static int zfpm_read_cb(struct thread *thread);
288 static int zfpm_write_cb(struct thread *thread);
289
290 static void zfpm_set_state(enum zfpm_state state, const char *reason);
291 static void zfpm_start_connect_timer(const char *reason);
292 static void zfpm_start_stats_timer(void);
293 static void zfpm_mac_info_del(struct fpm_mac_info_t *fpm_mac);
294
295 static const char ipv4_ll_buf[16] = "169.254.0.1";
296 union g_addr ipv4ll_gateway;
297
298 /*
299 * zfpm_thread_should_yield
300 */
301 static inline int zfpm_thread_should_yield(struct thread *t)
302 {
303 return thread_should_yield(t);
304 }
305
306 /*
307 * zfpm_state_to_str
308 */
309 static const char *zfpm_state_to_str(enum zfpm_state state)
310 {
311 switch (state) {
312
313 case ZFPM_STATE_IDLE:
314 return "idle";
315
316 case ZFPM_STATE_ACTIVE:
317 return "active";
318
319 case ZFPM_STATE_CONNECTING:
320 return "connecting";
321
322 case ZFPM_STATE_ESTABLISHED:
323 return "established";
324
325 default:
326 return "unknown";
327 }
328 }
329
330 /*
331 * zfpm_get_elapsed_time
332 *
333 * Returns the time elapsed (in seconds) since the given time.
334 */
335 static time_t zfpm_get_elapsed_time(time_t reference)
336 {
337 time_t now;
338
339 now = monotime(NULL);
340
341 if (now < reference) {
342 assert(0);
343 return 0;
344 }
345
346 return now - reference;
347 }
348
349 /*
350 * zfpm_rnodes_iter_init
351 */
352 static inline void zfpm_rnodes_iter_init(struct zfpm_rnodes_iter *iter)
353 {
354 memset(iter, 0, sizeof(*iter));
355 rib_tables_iter_init(&iter->tables_iter);
356
357 /*
358 * This is a hack, but it makes implementing 'next' easier by
359 * ensuring that route_table_iter_next() will return NULL the first
360 * time we call it.
361 */
362 route_table_iter_init(&iter->iter, NULL);
363 route_table_iter_cleanup(&iter->iter);
364 }
365
366 /*
367 * zfpm_rnodes_iter_next
368 */
369 static inline struct route_node *
370 zfpm_rnodes_iter_next(struct zfpm_rnodes_iter *iter)
371 {
372 struct route_node *rn;
373 struct route_table *table;
374
375 while (1) {
376 rn = route_table_iter_next(&iter->iter);
377 if (rn)
378 return rn;
379
380 /*
381 * We've made our way through this table, go to the next one.
382 */
383 route_table_iter_cleanup(&iter->iter);
384
385 table = rib_tables_iter_next(&iter->tables_iter);
386
387 if (!table)
388 return NULL;
389
390 route_table_iter_init(&iter->iter, table);
391 }
392
393 return NULL;
394 }
395
396 /*
397 * zfpm_rnodes_iter_pause
398 */
399 static inline void zfpm_rnodes_iter_pause(struct zfpm_rnodes_iter *iter)
400 {
401 route_table_iter_pause(&iter->iter);
402 }
403
404 /*
405 * zfpm_rnodes_iter_cleanup
406 */
407 static inline void zfpm_rnodes_iter_cleanup(struct zfpm_rnodes_iter *iter)
408 {
409 route_table_iter_cleanup(&iter->iter);
410 rib_tables_iter_cleanup(&iter->tables_iter);
411 }
412
413 /*
414 * zfpm_stats_init
415 *
416 * Initialize a statistics block.
417 */
418 static inline void zfpm_stats_init(struct zfpm_stats *stats)
419 {
420 memset(stats, 0, sizeof(*stats));
421 }
422
423 /*
424 * zfpm_stats_reset
425 */
426 static inline void zfpm_stats_reset(struct zfpm_stats *stats)
427 {
428 zfpm_stats_init(stats);
429 }
430
431 /*
432 * zfpm_stats_copy
433 */
434 static inline void zfpm_stats_copy(const struct zfpm_stats *src,
435 struct zfpm_stats *dest)
436 {
437 memcpy(dest, src, sizeof(*dest));
438 }
439
440 /*
441 * zfpm_stats_compose
442 *
443 * Total up the statistics in two stats structures ('s1 and 's2') and
444 * return the result in the third argument, 'result'. Note that the
445 * pointer 'result' may be the same as 's1' or 's2'.
446 *
447 * For simplicity, the implementation below assumes that the stats
448 * structure is composed entirely of counters. This can easily be
449 * changed when necessary.
450 */
451 static void zfpm_stats_compose(const struct zfpm_stats *s1,
452 const struct zfpm_stats *s2,
453 struct zfpm_stats *result)
454 {
455 const unsigned long *p1, *p2;
456 unsigned long *result_p;
457 int i, num_counters;
458
459 p1 = (const unsigned long *)s1;
460 p2 = (const unsigned long *)s2;
461 result_p = (unsigned long *)result;
462
463 num_counters = (sizeof(struct zfpm_stats) / sizeof(unsigned long));
464
465 for (i = 0; i < num_counters; i++) {
466 result_p[i] = p1[i] + p2[i];
467 }
468 }
469
470 /*
471 * zfpm_read_on
472 */
473 static inline void zfpm_read_on(void)
474 {
475 assert(!zfpm_g->t_read);
476 assert(zfpm_g->sock >= 0);
477
478 thread_add_read(zfpm_g->master, zfpm_read_cb, 0, zfpm_g->sock,
479 &zfpm_g->t_read);
480 }
481
482 /*
483 * zfpm_write_on
484 */
485 static inline void zfpm_write_on(void)
486 {
487 assert(!zfpm_g->t_write);
488 assert(zfpm_g->sock >= 0);
489
490 thread_add_write(zfpm_g->master, zfpm_write_cb, 0, zfpm_g->sock,
491 &zfpm_g->t_write);
492 }
493
494 /*
495 * zfpm_read_off
496 */
497 static inline void zfpm_read_off(void)
498 {
499 thread_cancel(&zfpm_g->t_read);
500 }
501
502 /*
503 * zfpm_write_off
504 */
505 static inline void zfpm_write_off(void)
506 {
507 thread_cancel(&zfpm_g->t_write);
508 }
509
510 static inline void zfpm_connect_off(void)
511 {
512 thread_cancel(&zfpm_g->t_connect);
513 }
514
515 /*
516 * zfpm_conn_up_thread_cb
517 *
518 * Callback for actions to be taken when the connection to the FPM
519 * comes up.
520 */
521 static int zfpm_conn_up_thread_cb(struct thread *thread)
522 {
523 struct route_node *rnode;
524 struct zfpm_rnodes_iter *iter;
525 rib_dest_t *dest;
526
527 iter = &zfpm_g->t_conn_up_state.iter;
528
529 if (zfpm_g->state != ZFPM_STATE_ESTABLISHED) {
530 zfpm_debug(
531 "Connection not up anymore, conn_up thread aborting");
532 zfpm_g->stats.t_conn_up_aborts++;
533 goto done;
534 }
535
536 if (!zfpm_g->fpm_mac_dump_done) {
537 /* Enqueue FPM updates for all the RMAC entries */
538 hash_iterate(zrouter.l3vni_table, zfpm_iterate_rmac_table,
539 NULL);
540 /* mark dump done so that its not repeated after yield */
541 zfpm_g->fpm_mac_dump_done = true;
542 }
543
544 while ((rnode = zfpm_rnodes_iter_next(iter))) {
545 dest = rib_dest_from_rnode(rnode);
546
547 if (dest) {
548 zfpm_g->stats.t_conn_up_dests_processed++;
549 zfpm_trigger_update(rnode, NULL);
550 }
551
552 /*
553 * Yield if need be.
554 */
555 if (!zfpm_thread_should_yield(thread))
556 continue;
557
558 zfpm_g->stats.t_conn_up_yields++;
559 zfpm_rnodes_iter_pause(iter);
560 thread_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb,
561 NULL, 0, &zfpm_g->t_conn_up);
562 return 0;
563 }
564
565 zfpm_g->stats.t_conn_up_finishes++;
566
567 done:
568 zfpm_rnodes_iter_cleanup(iter);
569 return 0;
570 }
571
572 /*
573 * zfpm_connection_up
574 *
575 * Called when the connection to the FPM comes up.
576 */
577 static void zfpm_connection_up(const char *detail)
578 {
579 assert(zfpm_g->sock >= 0);
580 zfpm_read_on();
581 zfpm_write_on();
582 zfpm_set_state(ZFPM_STATE_ESTABLISHED, detail);
583
584 /*
585 * Start thread to push existing routes to the FPM.
586 */
587 thread_cancel(&zfpm_g->t_conn_up);
588
589 zfpm_rnodes_iter_init(&zfpm_g->t_conn_up_state.iter);
590 zfpm_g->fpm_mac_dump_done = false;
591
592 zfpm_debug("Starting conn_up thread");
593
594 thread_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb, NULL, 0,
595 &zfpm_g->t_conn_up);
596 zfpm_g->stats.t_conn_up_starts++;
597 }
598
599 /*
600 * zfpm_connect_check
601 *
602 * Check if an asynchronous connect() to the FPM is complete.
603 */
604 static void zfpm_connect_check(void)
605 {
606 int status;
607 socklen_t slen;
608 int ret;
609
610 zfpm_read_off();
611 zfpm_write_off();
612
613 slen = sizeof(status);
614 ret = getsockopt(zfpm_g->sock, SOL_SOCKET, SO_ERROR, (void *)&status,
615 &slen);
616
617 if (ret >= 0 && status == 0) {
618 zfpm_connection_up("async connect complete");
619 return;
620 }
621
622 /*
623 * getsockopt() failed or indicated an error on the socket.
624 */
625 close(zfpm_g->sock);
626 zfpm_g->sock = -1;
627
628 zfpm_start_connect_timer("getsockopt() after async connect failed");
629 return;
630 }
631
632 /*
633 * zfpm_conn_down_thread_cb
634 *
635 * Callback that is invoked to clean up state after the TCP connection
636 * to the FPM goes down.
637 */
638 static int zfpm_conn_down_thread_cb(struct thread *thread)
639 {
640 struct route_node *rnode;
641 struct zfpm_rnodes_iter *iter;
642 rib_dest_t *dest;
643 struct fpm_mac_info_t *mac = NULL;
644
645 assert(zfpm_g->state == ZFPM_STATE_IDLE);
646
647 /*
648 * Delink and free all fpm_mac_info_t nodes
649 * in the mac_q and fpm_mac_info_hash
650 */
651 while ((mac = TAILQ_FIRST(&zfpm_g->mac_q)) != NULL)
652 zfpm_mac_info_del(mac);
653
654 zfpm_g->t_conn_down = NULL;
655
656 iter = &zfpm_g->t_conn_down_state.iter;
657
658 while ((rnode = zfpm_rnodes_iter_next(iter))) {
659 dest = rib_dest_from_rnode(rnode);
660
661 if (dest) {
662 if (CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM)) {
663 TAILQ_REMOVE(&zfpm_g->dest_q, dest,
664 fpm_q_entries);
665 }
666
667 UNSET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
668 UNSET_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM);
669
670 zfpm_g->stats.t_conn_down_dests_processed++;
671
672 /*
673 * Check if the dest should be deleted.
674 */
675 rib_gc_dest(rnode);
676 }
677
678 /*
679 * Yield if need be.
680 */
681 if (!zfpm_thread_should_yield(thread))
682 continue;
683
684 zfpm_g->stats.t_conn_down_yields++;
685 zfpm_rnodes_iter_pause(iter);
686 zfpm_g->t_conn_down = NULL;
687 thread_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb,
688 NULL, 0, &zfpm_g->t_conn_down);
689 return 0;
690 }
691
692 zfpm_g->stats.t_conn_down_finishes++;
693 zfpm_rnodes_iter_cleanup(iter);
694
695 /*
696 * Start the process of connecting to the FPM again.
697 */
698 zfpm_start_connect_timer("cleanup complete");
699 return 0;
700 }
701
702 /*
703 * zfpm_connection_down
704 *
705 * Called when the connection to the FPM has gone down.
706 */
707 static void zfpm_connection_down(const char *detail)
708 {
709 if (!detail)
710 detail = "unknown";
711
712 assert(zfpm_g->state == ZFPM_STATE_ESTABLISHED);
713
714 zlog_info("connection to the FPM has gone down: %s", detail);
715
716 zfpm_read_off();
717 zfpm_write_off();
718
719 stream_reset(zfpm_g->ibuf);
720 stream_reset(zfpm_g->obuf);
721
722 if (zfpm_g->sock >= 0) {
723 close(zfpm_g->sock);
724 zfpm_g->sock = -1;
725 }
726
727 /*
728 * Start thread to clean up state after the connection goes down.
729 */
730 assert(!zfpm_g->t_conn_down);
731 zfpm_rnodes_iter_init(&zfpm_g->t_conn_down_state.iter);
732 zfpm_g->t_conn_down = NULL;
733 thread_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb, NULL, 0,
734 &zfpm_g->t_conn_down);
735 zfpm_g->stats.t_conn_down_starts++;
736
737 zfpm_set_state(ZFPM_STATE_IDLE, detail);
738 }
739
740 /*
741 * zfpm_read_cb
742 */
743 static int zfpm_read_cb(struct thread *thread)
744 {
745 size_t already;
746 struct stream *ibuf;
747 uint16_t msg_len;
748 fpm_msg_hdr_t *hdr;
749
750 zfpm_g->stats.read_cb_calls++;
751
752 /*
753 * Check if async connect is now done.
754 */
755 if (zfpm_g->state == ZFPM_STATE_CONNECTING) {
756 zfpm_connect_check();
757 return 0;
758 }
759
760 assert(zfpm_g->state == ZFPM_STATE_ESTABLISHED);
761 assert(zfpm_g->sock >= 0);
762
763 ibuf = zfpm_g->ibuf;
764
765 already = stream_get_endp(ibuf);
766 if (already < FPM_MSG_HDR_LEN) {
767 ssize_t nbyte;
768
769 nbyte = stream_read_try(ibuf, zfpm_g->sock,
770 FPM_MSG_HDR_LEN - already);
771 if (nbyte == 0 || nbyte == -1) {
772 if (nbyte == -1) {
773 char buffer[1024];
774
775 snprintf(buffer, sizeof(buffer),
776 "closed socket in read(%d): %s", errno,
777 safe_strerror(errno));
778 zfpm_connection_down(buffer);
779 } else
780 zfpm_connection_down("closed socket in read");
781 return 0;
782 }
783
784 if (nbyte != (ssize_t)(FPM_MSG_HDR_LEN - already))
785 goto done;
786
787 already = FPM_MSG_HDR_LEN;
788 }
789
790 stream_set_getp(ibuf, 0);
791
792 hdr = (fpm_msg_hdr_t *)stream_pnt(ibuf);
793
794 if (!fpm_msg_hdr_ok(hdr)) {
795 zfpm_connection_down("invalid message header");
796 return 0;
797 }
798
799 msg_len = fpm_msg_len(hdr);
800
801 /*
802 * Read out the rest of the packet.
803 */
804 if (already < msg_len) {
805 ssize_t nbyte;
806
807 nbyte = stream_read_try(ibuf, zfpm_g->sock, msg_len - already);
808
809 if (nbyte == 0 || nbyte == -1) {
810 if (nbyte == -1) {
811 char buffer[1024];
812
813 snprintf(buffer, sizeof(buffer),
814 "failed to read message(%d) %s", errno,
815 safe_strerror(errno));
816 zfpm_connection_down(buffer);
817 } else
818 zfpm_connection_down("failed to read message");
819 return 0;
820 }
821
822 if (nbyte != (ssize_t)(msg_len - already))
823 goto done;
824 }
825
826 /*
827 * Just throw it away for now.
828 */
829 stream_reset(ibuf);
830
831 done:
832 zfpm_read_on();
833 return 0;
834 }
835
836 static bool zfpm_updates_pending(void)
837 {
838 if (!(TAILQ_EMPTY(&zfpm_g->dest_q)) || !(TAILQ_EMPTY(&zfpm_g->mac_q)))
839 return true;
840
841 return false;
842 }
843
844 /*
845 * zfpm_writes_pending
846 *
847 * Returns true if we may have something to write to the FPM.
848 */
849 static int zfpm_writes_pending(void)
850 {
851
852 /*
853 * Check if there is any data in the outbound buffer that has not
854 * been written to the socket yet.
855 */
856 if (stream_get_endp(zfpm_g->obuf) - stream_get_getp(zfpm_g->obuf))
857 return 1;
858
859 /*
860 * Check if there are any updates scheduled on the outbound queues.
861 */
862 if (zfpm_updates_pending())
863 return 1;
864
865 return 0;
866 }
867
868 /*
869 * zfpm_encode_route
870 *
871 * Encode a message to the FPM with information about the given route.
872 *
873 * Returns the number of bytes written to the buffer. 0 or a negative
874 * value indicates an error.
875 */
876 static inline int zfpm_encode_route(rib_dest_t *dest, struct route_entry *re,
877 char *in_buf, size_t in_buf_len,
878 fpm_msg_type_e *msg_type)
879 {
880 size_t len;
881 #ifdef HAVE_NETLINK
882 int cmd;
883 #endif
884 len = 0;
885
886 *msg_type = FPM_MSG_TYPE_NONE;
887
888 switch (zfpm_g->message_format) {
889
890 case ZFPM_MSG_FORMAT_PROTOBUF:
891 #ifdef HAVE_PROTOBUF
892 len = zfpm_protobuf_encode_route(dest, re, (uint8_t *)in_buf,
893 in_buf_len);
894 *msg_type = FPM_MSG_TYPE_PROTOBUF;
895 #endif
896 break;
897
898 case ZFPM_MSG_FORMAT_NETLINK:
899 #ifdef HAVE_NETLINK
900 *msg_type = FPM_MSG_TYPE_NETLINK;
901 cmd = re ? RTM_NEWROUTE : RTM_DELROUTE;
902 len = zfpm_netlink_encode_route(cmd, dest, re, in_buf,
903 in_buf_len);
904 assert(fpm_msg_align(len) == len);
905 *msg_type = FPM_MSG_TYPE_NETLINK;
906 #endif /* HAVE_NETLINK */
907 break;
908
909 default:
910 break;
911 }
912
913 return len;
914 }
915
916 /*
917 * zfpm_route_for_update
918 *
919 * Returns the re that is to be sent to the FPM for a given dest.
920 */
921 struct route_entry *zfpm_route_for_update(rib_dest_t *dest)
922 {
923 return dest->selected_fib;
924 }
925
926 /*
927 * Define an enum for return codes for queue processing functions
928 *
929 * FPM_WRITE_STOP: This return code indicates that the write buffer is full.
930 * Stop processing all the queues and empty the buffer by writing its content
931 * to the socket.
932 *
933 * FPM_GOTO_NEXT_Q: This return code indicates that either this queue is
934 * empty or we have processed enough updates from this queue.
935 * So, move on to the next queue.
936 */
937 enum {
938 FPM_WRITE_STOP = 0,
939 FPM_GOTO_NEXT_Q = 1
940 };
941
942 #define FPM_QUEUE_PROCESS_LIMIT 10000
943
944 /*
945 * zfpm_build_route_updates
946 *
947 * Process the dest_q queue and write FPM messages to the outbound buffer.
948 */
949 static int zfpm_build_route_updates(void)
950 {
951 struct stream *s;
952 rib_dest_t *dest;
953 unsigned char *buf, *data, *buf_end;
954 size_t msg_len;
955 size_t data_len;
956 fpm_msg_hdr_t *hdr;
957 struct route_entry *re;
958 int is_add, write_msg;
959 fpm_msg_type_e msg_type;
960 uint16_t q_limit;
961
962 if (TAILQ_EMPTY(&zfpm_g->dest_q))
963 return FPM_GOTO_NEXT_Q;
964
965 s = zfpm_g->obuf;
966 q_limit = FPM_QUEUE_PROCESS_LIMIT;
967
968 do {
969 /*
970 * Make sure there is enough space to write another message.
971 */
972 if (STREAM_WRITEABLE(s) < FPM_MAX_MSG_LEN)
973 return FPM_WRITE_STOP;
974
975 buf = STREAM_DATA(s) + stream_get_endp(s);
976 buf_end = buf + STREAM_WRITEABLE(s);
977
978 dest = TAILQ_FIRST(&zfpm_g->dest_q);
979 if (!dest)
980 return FPM_GOTO_NEXT_Q;
981
982 assert(CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM));
983
984 hdr = (fpm_msg_hdr_t *)buf;
985 hdr->version = FPM_PROTO_VERSION;
986
987 data = fpm_msg_data(hdr);
988
989 re = zfpm_route_for_update(dest);
990 is_add = re ? 1 : 0;
991
992 write_msg = 1;
993
994 /*
995 * If this is a route deletion, and we have not sent the route
996 * to
997 * the FPM previously, skip it.
998 */
999 if (!is_add && !CHECK_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM)) {
1000 write_msg = 0;
1001 zfpm_g->stats.nop_deletes_skipped++;
1002 }
1003
1004 if (write_msg) {
1005 data_len = zfpm_encode_route(dest, re, (char *)data,
1006 buf_end - data, &msg_type);
1007
1008 if (data_len) {
1009 hdr->msg_type = msg_type;
1010 msg_len = fpm_data_len_to_msg_len(data_len);
1011 hdr->msg_len = htons(msg_len);
1012 stream_forward_endp(s, msg_len);
1013
1014 if (is_add)
1015 zfpm_g->stats.route_adds++;
1016 else
1017 zfpm_g->stats.route_dels++;
1018 } else {
1019 zlog_err("%s: Encoding Prefix: %pRN No valid nexthops",
1020 __func__, dest->rnode);
1021 }
1022 }
1023
1024 /*
1025 * Remove the dest from the queue, and reset the flag.
1026 */
1027 UNSET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
1028 TAILQ_REMOVE(&zfpm_g->dest_q, dest, fpm_q_entries);
1029
1030 if (is_add) {
1031 SET_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM);
1032 } else {
1033 UNSET_FLAG(dest->flags, RIB_DEST_SENT_TO_FPM);
1034 }
1035
1036 /*
1037 * Delete the destination if necessary.
1038 */
1039 if (rib_gc_dest(dest->rnode))
1040 zfpm_g->stats.dests_del_after_update++;
1041
1042 q_limit--;
1043 if (q_limit == 0) {
1044 /*
1045 * We have processed enough updates in this queue.
1046 * Now yield for other queues.
1047 */
1048 return FPM_GOTO_NEXT_Q;
1049 }
1050 } while (true);
1051 }
1052
1053 /*
1054 * zfpm_encode_mac
1055 *
1056 * Encode a message to FPM with information about the given MAC.
1057 *
1058 * Returns the number of bytes written to the buffer.
1059 */
1060 static inline int zfpm_encode_mac(struct fpm_mac_info_t *mac, char *in_buf,
1061 size_t in_buf_len, fpm_msg_type_e *msg_type)
1062 {
1063 size_t len = 0;
1064
1065 *msg_type = FPM_MSG_TYPE_NONE;
1066
1067 switch (zfpm_g->message_format) {
1068
1069 case ZFPM_MSG_FORMAT_NONE:
1070 break;
1071 case ZFPM_MSG_FORMAT_NETLINK:
1072 #ifdef HAVE_NETLINK
1073 len = zfpm_netlink_encode_mac(mac, in_buf, in_buf_len);
1074 assert(fpm_msg_align(len) == len);
1075 *msg_type = FPM_MSG_TYPE_NETLINK;
1076 #endif /* HAVE_NETLINK */
1077 break;
1078 case ZFPM_MSG_FORMAT_PROTOBUF:
1079 break;
1080 }
1081 return len;
1082 }
1083
1084 static int zfpm_build_mac_updates(void)
1085 {
1086 struct stream *s;
1087 struct fpm_mac_info_t *mac;
1088 unsigned char *buf, *data, *buf_end;
1089 fpm_msg_hdr_t *hdr;
1090 size_t data_len, msg_len;
1091 fpm_msg_type_e msg_type;
1092 uint16_t q_limit;
1093
1094 if (TAILQ_EMPTY(&zfpm_g->mac_q))
1095 return FPM_GOTO_NEXT_Q;
1096
1097 s = zfpm_g->obuf;
1098 q_limit = FPM_QUEUE_PROCESS_LIMIT;
1099
1100 do {
1101 /* Make sure there is enough space to write another message. */
1102 if (STREAM_WRITEABLE(s) < FPM_MAX_MAC_MSG_LEN)
1103 return FPM_WRITE_STOP;
1104
1105 buf = STREAM_DATA(s) + stream_get_endp(s);
1106 buf_end = buf + STREAM_WRITEABLE(s);
1107
1108 mac = TAILQ_FIRST(&zfpm_g->mac_q);
1109 if (!mac)
1110 return FPM_GOTO_NEXT_Q;
1111
1112 /* Check for no-op */
1113 if (!CHECK_FLAG(mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM)) {
1114 zfpm_g->stats.nop_deletes_skipped++;
1115 zfpm_mac_info_del(mac);
1116 continue;
1117 }
1118
1119 hdr = (fpm_msg_hdr_t *)buf;
1120 hdr->version = FPM_PROTO_VERSION;
1121
1122 data = fpm_msg_data(hdr);
1123 data_len = zfpm_encode_mac(mac, (char *)data, buf_end - data,
1124 &msg_type);
1125 assert(data_len);
1126
1127 hdr->msg_type = msg_type;
1128 msg_len = fpm_data_len_to_msg_len(data_len);
1129 hdr->msg_len = htons(msg_len);
1130 stream_forward_endp(s, msg_len);
1131
1132 /* Remove the MAC from the queue, and delete it. */
1133 zfpm_mac_info_del(mac);
1134
1135 q_limit--;
1136 if (q_limit == 0) {
1137 /*
1138 * We have processed enough updates in this queue.
1139 * Now yield for other queues.
1140 */
1141 return FPM_GOTO_NEXT_Q;
1142 }
1143 } while (1);
1144 }
1145
1146 /*
1147 * zfpm_build_updates
1148 *
1149 * Process the outgoing queues and write messages to the outbound
1150 * buffer.
1151 */
1152 static void zfpm_build_updates(void)
1153 {
1154 struct stream *s;
1155
1156 s = zfpm_g->obuf;
1157 assert(stream_empty(s));
1158
1159 do {
1160 /*
1161 * Stop processing the queues if zfpm_g->obuf is full
1162 * or we do not have more updates to process
1163 */
1164 if (zfpm_build_mac_updates() == FPM_WRITE_STOP)
1165 break;
1166 if (zfpm_build_route_updates() == FPM_WRITE_STOP)
1167 break;
1168 } while (zfpm_updates_pending());
1169 }
1170
1171 /*
1172 * zfpm_write_cb
1173 */
1174 static int zfpm_write_cb(struct thread *thread)
1175 {
1176 struct stream *s;
1177 int num_writes;
1178
1179 zfpm_g->stats.write_cb_calls++;
1180
1181 /*
1182 * Check if async connect is now done.
1183 */
1184 if (zfpm_g->state == ZFPM_STATE_CONNECTING) {
1185 zfpm_connect_check();
1186 return 0;
1187 }
1188
1189 assert(zfpm_g->state == ZFPM_STATE_ESTABLISHED);
1190 assert(zfpm_g->sock >= 0);
1191
1192 num_writes = 0;
1193
1194 do {
1195 int bytes_to_write, bytes_written;
1196
1197 s = zfpm_g->obuf;
1198
1199 /*
1200 * If the stream is empty, try fill it up with data.
1201 */
1202 if (stream_empty(s)) {
1203 zfpm_build_updates();
1204 }
1205
1206 bytes_to_write = stream_get_endp(s) - stream_get_getp(s);
1207 if (!bytes_to_write)
1208 break;
1209
1210 bytes_written =
1211 write(zfpm_g->sock, stream_pnt(s), bytes_to_write);
1212 zfpm_g->stats.write_calls++;
1213 num_writes++;
1214
1215 if (bytes_written < 0) {
1216 if (ERRNO_IO_RETRY(errno))
1217 break;
1218
1219 zfpm_connection_down("failed to write to socket");
1220 return 0;
1221 }
1222
1223 if (bytes_written != bytes_to_write) {
1224
1225 /*
1226 * Partial write.
1227 */
1228 stream_forward_getp(s, bytes_written);
1229 zfpm_g->stats.partial_writes++;
1230 break;
1231 }
1232
1233 /*
1234 * We've written out the entire contents of the stream.
1235 */
1236 stream_reset(s);
1237
1238 if (num_writes >= ZFPM_MAX_WRITES_PER_RUN) {
1239 zfpm_g->stats.max_writes_hit++;
1240 break;
1241 }
1242
1243 if (zfpm_thread_should_yield(thread)) {
1244 zfpm_g->stats.t_write_yields++;
1245 break;
1246 }
1247 } while (1);
1248
1249 if (zfpm_writes_pending())
1250 zfpm_write_on();
1251
1252 return 0;
1253 }
1254
1255 /*
1256 * zfpm_connect_cb
1257 */
1258 static int zfpm_connect_cb(struct thread *t)
1259 {
1260 int sock, ret;
1261 struct sockaddr_in serv;
1262
1263 assert(zfpm_g->state == ZFPM_STATE_ACTIVE);
1264
1265 sock = socket(AF_INET, SOCK_STREAM, 0);
1266 if (sock < 0) {
1267 zlog_err("Failed to create socket for connect(): %s",
1268 strerror(errno));
1269 zfpm_g->stats.connect_no_sock++;
1270 return 0;
1271 }
1272
1273 set_nonblocking(sock);
1274
1275 /* Make server socket. */
1276 memset(&serv, 0, sizeof(serv));
1277 serv.sin_family = AF_INET;
1278 serv.sin_port = htons(zfpm_g->fpm_port);
1279 #ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
1280 serv.sin_len = sizeof(struct sockaddr_in);
1281 #endif /* HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */
1282 if (!zfpm_g->fpm_server)
1283 serv.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1284 else
1285 serv.sin_addr.s_addr = (zfpm_g->fpm_server);
1286
1287 /*
1288 * Connect to the FPM.
1289 */
1290 zfpm_g->connect_calls++;
1291 zfpm_g->stats.connect_calls++;
1292 zfpm_g->last_connect_call_time = monotime(NULL);
1293
1294 ret = connect(sock, (struct sockaddr *)&serv, sizeof(serv));
1295 if (ret >= 0) {
1296 zfpm_g->sock = sock;
1297 zfpm_connection_up("connect succeeded");
1298 return 1;
1299 }
1300
1301 if (errno == EINPROGRESS) {
1302 zfpm_g->sock = sock;
1303 zfpm_read_on();
1304 zfpm_write_on();
1305 zfpm_set_state(ZFPM_STATE_CONNECTING,
1306 "async connect in progress");
1307 return 0;
1308 }
1309
1310 zlog_info("can't connect to FPM %d: %s", sock, safe_strerror(errno));
1311 close(sock);
1312
1313 /*
1314 * Restart timer for retrying connection.
1315 */
1316 zfpm_start_connect_timer("connect() failed");
1317 return 0;
1318 }
1319
1320 /*
1321 * zfpm_set_state
1322 *
1323 * Move state machine into the given state.
1324 */
1325 static void zfpm_set_state(enum zfpm_state state, const char *reason)
1326 {
1327 enum zfpm_state cur_state = zfpm_g->state;
1328
1329 if (!reason)
1330 reason = "Unknown";
1331
1332 if (state == cur_state)
1333 return;
1334
1335 zfpm_debug("beginning state transition %s -> %s. Reason: %s",
1336 zfpm_state_to_str(cur_state), zfpm_state_to_str(state),
1337 reason);
1338
1339 switch (state) {
1340
1341 case ZFPM_STATE_IDLE:
1342 assert(cur_state == ZFPM_STATE_ESTABLISHED);
1343 break;
1344
1345 case ZFPM_STATE_ACTIVE:
1346 assert(cur_state == ZFPM_STATE_IDLE
1347 || cur_state == ZFPM_STATE_CONNECTING);
1348 assert(zfpm_g->t_connect);
1349 break;
1350
1351 case ZFPM_STATE_CONNECTING:
1352 assert(zfpm_g->sock);
1353 assert(cur_state == ZFPM_STATE_ACTIVE);
1354 assert(zfpm_g->t_read);
1355 assert(zfpm_g->t_write);
1356 break;
1357
1358 case ZFPM_STATE_ESTABLISHED:
1359 assert(cur_state == ZFPM_STATE_ACTIVE
1360 || cur_state == ZFPM_STATE_CONNECTING);
1361 assert(zfpm_g->sock);
1362 assert(zfpm_g->t_read);
1363 assert(zfpm_g->t_write);
1364 break;
1365 }
1366
1367 zfpm_g->state = state;
1368 }
1369
1370 /*
1371 * zfpm_calc_connect_delay
1372 *
1373 * Returns the number of seconds after which we should attempt to
1374 * reconnect to the FPM.
1375 */
1376 static long zfpm_calc_connect_delay(void)
1377 {
1378 time_t elapsed;
1379
1380 /*
1381 * Return 0 if this is our first attempt to connect.
1382 */
1383 if (zfpm_g->connect_calls == 0) {
1384 return 0;
1385 }
1386
1387 elapsed = zfpm_get_elapsed_time(zfpm_g->last_connect_call_time);
1388
1389 if (elapsed > ZFPM_CONNECT_RETRY_IVL) {
1390 return 0;
1391 }
1392
1393 return ZFPM_CONNECT_RETRY_IVL - elapsed;
1394 }
1395
1396 /*
1397 * zfpm_start_connect_timer
1398 */
1399 static void zfpm_start_connect_timer(const char *reason)
1400 {
1401 long delay_secs;
1402
1403 assert(!zfpm_g->t_connect);
1404 assert(zfpm_g->sock < 0);
1405
1406 assert(zfpm_g->state == ZFPM_STATE_IDLE
1407 || zfpm_g->state == ZFPM_STATE_ACTIVE
1408 || zfpm_g->state == ZFPM_STATE_CONNECTING);
1409
1410 delay_secs = zfpm_calc_connect_delay();
1411 zfpm_debug("scheduling connect in %ld seconds", delay_secs);
1412
1413 thread_add_timer(zfpm_g->master, zfpm_connect_cb, 0, delay_secs,
1414 &zfpm_g->t_connect);
1415 zfpm_set_state(ZFPM_STATE_ACTIVE, reason);
1416 }
1417
1418 /*
1419 * zfpm_is_enabled
1420 *
1421 * Returns true if the zebra FPM module has been enabled.
1422 */
1423 static inline int zfpm_is_enabled(void)
1424 {
1425 return zfpm_g->enabled;
1426 }
1427
1428 /*
1429 * zfpm_conn_is_up
1430 *
1431 * Returns true if the connection to the FPM is up.
1432 */
1433 static inline int zfpm_conn_is_up(void)
1434 {
1435 if (zfpm_g->state != ZFPM_STATE_ESTABLISHED)
1436 return 0;
1437
1438 assert(zfpm_g->sock >= 0);
1439
1440 return 1;
1441 }
1442
1443 /*
1444 * zfpm_trigger_update
1445 *
1446 * The zebra code invokes this function to indicate that we should
1447 * send an update to the FPM about the given route_node.
1448 */
1449 static int zfpm_trigger_update(struct route_node *rn, const char *reason)
1450 {
1451 rib_dest_t *dest;
1452
1453 /*
1454 * Ignore if the connection is down. We will update the FPM about
1455 * all destinations once the connection comes up.
1456 */
1457 if (!zfpm_conn_is_up())
1458 return 0;
1459
1460 dest = rib_dest_from_rnode(rn);
1461
1462 if (CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM)) {
1463 zfpm_g->stats.redundant_triggers++;
1464 return 0;
1465 }
1466
1467 if (reason) {
1468 zfpm_debug("%pFX triggering update to FPM - Reason: %s", &rn->p,
1469 reason);
1470 }
1471
1472 SET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM);
1473 TAILQ_INSERT_TAIL(&zfpm_g->dest_q, dest, fpm_q_entries);
1474 zfpm_g->stats.updates_triggered++;
1475
1476 /*
1477 * Make sure that writes are enabled.
1478 */
1479 if (zfpm_g->t_write)
1480 return 0;
1481
1482 zfpm_write_on();
1483 return 0;
1484 }
1485
1486 /*
1487 * Generate Key for FPM MAC info hash entry
1488 */
1489 static unsigned int zfpm_mac_info_hash_keymake(const void *p)
1490 {
1491 struct fpm_mac_info_t *fpm_mac = (struct fpm_mac_info_t *)p;
1492 uint32_t mac_key;
1493
1494 mac_key = jhash(fpm_mac->macaddr.octet, ETH_ALEN, 0xa5a5a55a);
1495
1496 return jhash_2words(mac_key, fpm_mac->vni, 0);
1497 }
1498
1499 /*
1500 * Compare function for FPM MAC info hash lookup
1501 */
1502 static bool zfpm_mac_info_cmp(const void *p1, const void *p2)
1503 {
1504 const struct fpm_mac_info_t *fpm_mac1 = p1;
1505 const struct fpm_mac_info_t *fpm_mac2 = p2;
1506
1507 if (memcmp(fpm_mac1->macaddr.octet, fpm_mac2->macaddr.octet, ETH_ALEN)
1508 != 0)
1509 return false;
1510 if (fpm_mac1->vni != fpm_mac2->vni)
1511 return false;
1512
1513 return true;
1514 }
1515
1516 /*
1517 * Lookup FPM MAC info hash entry.
1518 */
1519 static struct fpm_mac_info_t *zfpm_mac_info_lookup(struct fpm_mac_info_t *key)
1520 {
1521 return hash_lookup(zfpm_g->fpm_mac_info_table, key);
1522 }
1523
1524 /*
1525 * Callback to allocate fpm_mac_info_t structure.
1526 */
1527 static void *zfpm_mac_info_alloc(void *p)
1528 {
1529 const struct fpm_mac_info_t *key = p;
1530 struct fpm_mac_info_t *fpm_mac;
1531
1532 fpm_mac = XCALLOC(MTYPE_FPM_MAC_INFO, sizeof(struct fpm_mac_info_t));
1533
1534 memcpy(&fpm_mac->macaddr, &key->macaddr, ETH_ALEN);
1535 fpm_mac->vni = key->vni;
1536
1537 return (void *)fpm_mac;
1538 }
1539
1540 /*
1541 * Delink and free fpm_mac_info_t.
1542 */
1543 static void zfpm_mac_info_del(struct fpm_mac_info_t *fpm_mac)
1544 {
1545 hash_release(zfpm_g->fpm_mac_info_table, fpm_mac);
1546 TAILQ_REMOVE(&zfpm_g->mac_q, fpm_mac, fpm_mac_q_entries);
1547 XFREE(MTYPE_FPM_MAC_INFO, fpm_mac);
1548 }
1549
1550 /*
1551 * zfpm_trigger_rmac_update
1552 *
1553 * Zebra code invokes this function to indicate that we should
1554 * send an update to FPM for given MAC entry.
1555 *
1556 * This function checks if we already have enqueued an update for this RMAC,
1557 * If yes, update the same fpm_mac_info_t. Else, create and enqueue an update.
1558 */
1559 static int zfpm_trigger_rmac_update(struct zebra_mac *rmac,
1560 struct zebra_l3vni *zl3vni, bool delete,
1561 const char *reason)
1562 {
1563 struct fpm_mac_info_t *fpm_mac, key;
1564 struct interface *vxlan_if, *svi_if;
1565 bool mac_found = false;
1566
1567 /*
1568 * Ignore if the connection is down. We will update the FPM about
1569 * all destinations once the connection comes up.
1570 */
1571 if (!zfpm_conn_is_up())
1572 return 0;
1573
1574 if (reason) {
1575 zfpm_debug("triggering update to FPM - Reason: %s - %pEA",
1576 reason, &rmac->macaddr);
1577 }
1578
1579 vxlan_if = zl3vni_map_to_vxlan_if(zl3vni);
1580 svi_if = zl3vni_map_to_svi_if(zl3vni);
1581
1582 memset(&key, 0, sizeof(struct fpm_mac_info_t));
1583
1584 memcpy(&key.macaddr, &rmac->macaddr, ETH_ALEN);
1585 key.vni = zl3vni->vni;
1586
1587 /* Check if this MAC is already present in the queue. */
1588 fpm_mac = zfpm_mac_info_lookup(&key);
1589
1590 if (fpm_mac) {
1591 mac_found = true;
1592
1593 /*
1594 * If the enqueued op is "add" and current op is "delete",
1595 * this is a noop. So, Unset ZEBRA_MAC_UPDATE_FPM flag.
1596 * While processing FPM queue, we will silently delete this
1597 * MAC entry without sending any update for this MAC.
1598 */
1599 if (!CHECK_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM) &&
1600 delete == 1) {
1601 SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM);
1602 UNSET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM);
1603 return 0;
1604 }
1605 } else {
1606 fpm_mac = hash_get(zfpm_g->fpm_mac_info_table, &key,
1607 zfpm_mac_info_alloc);
1608 if (!fpm_mac)
1609 return 0;
1610 }
1611
1612 fpm_mac->r_vtep_ip.s_addr = rmac->fwd_info.r_vtep_ip.s_addr;
1613 fpm_mac->zebra_flags = rmac->flags;
1614 fpm_mac->vxlan_if = vxlan_if ? vxlan_if->ifindex : 0;
1615 fpm_mac->svi_if = svi_if ? svi_if->ifindex : 0;
1616
1617 SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM);
1618 if (delete)
1619 SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM);
1620 else
1621 UNSET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM);
1622
1623 if (!mac_found)
1624 TAILQ_INSERT_TAIL(&zfpm_g->mac_q, fpm_mac, fpm_mac_q_entries);
1625
1626 zfpm_g->stats.updates_triggered++;
1627
1628 /* If writes are already enabled, return. */
1629 if (zfpm_g->t_write)
1630 return 0;
1631
1632 zfpm_write_on();
1633 return 0;
1634 }
1635
1636 /*
1637 * This function is called when the FPM connections is established.
1638 * Iterate over all the RMAC entries for the given L3VNI
1639 * and enqueue the RMAC for FPM processing.
1640 */
1641 static void zfpm_trigger_rmac_update_wrapper(struct hash_bucket *bucket,
1642 void *args)
1643 {
1644 struct zebra_mac *zrmac = (struct zebra_mac *)bucket->data;
1645 struct zebra_l3vni *zl3vni = (struct zebra_l3vni *)args;
1646
1647 zfpm_trigger_rmac_update(zrmac, zl3vni, false, "RMAC added");
1648 }
1649
1650 /*
1651 * This function is called when the FPM connections is established.
1652 * This function iterates over all the L3VNIs to trigger
1653 * FPM updates for RMACs currently available.
1654 */
1655 static void zfpm_iterate_rmac_table(struct hash_bucket *bucket, void *args)
1656 {
1657 struct zebra_l3vni *zl3vni = (struct zebra_l3vni *)bucket->data;
1658
1659 hash_iterate(zl3vni->rmac_table, zfpm_trigger_rmac_update_wrapper,
1660 (void *)zl3vni);
1661 }
1662
1663 /*
1664 * struct zfpm_statsimer_cb
1665 */
1666 static int zfpm_stats_timer_cb(struct thread *t)
1667 {
1668 zfpm_g->t_stats = NULL;
1669
1670 /*
1671 * Remember the stats collected in the last interval for display
1672 * purposes.
1673 */
1674 zfpm_stats_copy(&zfpm_g->stats, &zfpm_g->last_ivl_stats);
1675
1676 /*
1677 * Add the current set of stats into the cumulative statistics.
1678 */
1679 zfpm_stats_compose(&zfpm_g->cumulative_stats, &zfpm_g->stats,
1680 &zfpm_g->cumulative_stats);
1681
1682 /*
1683 * Start collecting stats afresh over the next interval.
1684 */
1685 zfpm_stats_reset(&zfpm_g->stats);
1686
1687 zfpm_start_stats_timer();
1688
1689 return 0;
1690 }
1691
1692 /*
1693 * zfpm_stop_stats_timer
1694 */
1695 static void zfpm_stop_stats_timer(void)
1696 {
1697 if (!zfpm_g->t_stats)
1698 return;
1699
1700 zfpm_debug("Stopping existing stats timer");
1701 thread_cancel(&zfpm_g->t_stats);
1702 }
1703
1704 /*
1705 * zfpm_start_stats_timer
1706 */
1707 void zfpm_start_stats_timer(void)
1708 {
1709 assert(!zfpm_g->t_stats);
1710
1711 thread_add_timer(zfpm_g->master, zfpm_stats_timer_cb, 0,
1712 ZFPM_STATS_IVL_SECS, &zfpm_g->t_stats);
1713 }
1714
1715 /*
1716 * Helper macro for zfpm_show_stats() below.
1717 */
1718 #define ZFPM_SHOW_STAT(counter) \
1719 do { \
1720 vty_out(vty, "%-40s %10lu %16lu\n", #counter, \
1721 total_stats.counter, zfpm_g->last_ivl_stats.counter); \
1722 } while (0)
1723
1724 /*
1725 * zfpm_show_stats
1726 */
1727 static void zfpm_show_stats(struct vty *vty)
1728 {
1729 struct zfpm_stats total_stats;
1730 time_t elapsed;
1731
1732 vty_out(vty, "\n%-40s %10s Last %2d secs\n\n", "Counter", "Total",
1733 ZFPM_STATS_IVL_SECS);
1734
1735 /*
1736 * Compute the total stats up to this instant.
1737 */
1738 zfpm_stats_compose(&zfpm_g->cumulative_stats, &zfpm_g->stats,
1739 &total_stats);
1740
1741 ZFPM_SHOW_STAT(connect_calls);
1742 ZFPM_SHOW_STAT(connect_no_sock);
1743 ZFPM_SHOW_STAT(read_cb_calls);
1744 ZFPM_SHOW_STAT(write_cb_calls);
1745 ZFPM_SHOW_STAT(write_calls);
1746 ZFPM_SHOW_STAT(partial_writes);
1747 ZFPM_SHOW_STAT(max_writes_hit);
1748 ZFPM_SHOW_STAT(t_write_yields);
1749 ZFPM_SHOW_STAT(nop_deletes_skipped);
1750 ZFPM_SHOW_STAT(route_adds);
1751 ZFPM_SHOW_STAT(route_dels);
1752 ZFPM_SHOW_STAT(updates_triggered);
1753 ZFPM_SHOW_STAT(redundant_triggers);
1754 ZFPM_SHOW_STAT(dests_del_after_update);
1755 ZFPM_SHOW_STAT(t_conn_down_starts);
1756 ZFPM_SHOW_STAT(t_conn_down_dests_processed);
1757 ZFPM_SHOW_STAT(t_conn_down_yields);
1758 ZFPM_SHOW_STAT(t_conn_down_finishes);
1759 ZFPM_SHOW_STAT(t_conn_up_starts);
1760 ZFPM_SHOW_STAT(t_conn_up_dests_processed);
1761 ZFPM_SHOW_STAT(t_conn_up_yields);
1762 ZFPM_SHOW_STAT(t_conn_up_aborts);
1763 ZFPM_SHOW_STAT(t_conn_up_finishes);
1764
1765 if (!zfpm_g->last_stats_clear_time)
1766 return;
1767
1768 elapsed = zfpm_get_elapsed_time(zfpm_g->last_stats_clear_time);
1769
1770 vty_out(vty, "\nStats were cleared %lu seconds ago\n",
1771 (unsigned long)elapsed);
1772 }
1773
1774 /*
1775 * zfpm_clear_stats
1776 */
1777 static void zfpm_clear_stats(struct vty *vty)
1778 {
1779 if (!zfpm_is_enabled()) {
1780 vty_out(vty, "The FPM module is not enabled...\n");
1781 return;
1782 }
1783
1784 zfpm_stats_reset(&zfpm_g->stats);
1785 zfpm_stats_reset(&zfpm_g->last_ivl_stats);
1786 zfpm_stats_reset(&zfpm_g->cumulative_stats);
1787
1788 zfpm_stop_stats_timer();
1789 zfpm_start_stats_timer();
1790
1791 zfpm_g->last_stats_clear_time = monotime(NULL);
1792
1793 vty_out(vty, "Cleared FPM stats\n");
1794 }
1795
1796 /*
1797 * show_zebra_fpm_stats
1798 */
1799 DEFUN (show_zebra_fpm_stats,
1800 show_zebra_fpm_stats_cmd,
1801 "show zebra fpm stats",
1802 SHOW_STR
1803 ZEBRA_STR
1804 "Forwarding Path Manager information\n"
1805 "Statistics\n")
1806 {
1807 zfpm_show_stats(vty);
1808 return CMD_SUCCESS;
1809 }
1810
1811 /*
1812 * clear_zebra_fpm_stats
1813 */
1814 DEFUN (clear_zebra_fpm_stats,
1815 clear_zebra_fpm_stats_cmd,
1816 "clear zebra fpm stats",
1817 CLEAR_STR
1818 ZEBRA_STR
1819 "Clear Forwarding Path Manager information\n"
1820 "Statistics\n")
1821 {
1822 zfpm_clear_stats(vty);
1823 return CMD_SUCCESS;
1824 }
1825
1826 /*
1827 * update fpm connection information
1828 */
1829 DEFUN ( fpm_remote_ip,
1830 fpm_remote_ip_cmd,
1831 "fpm connection ip A.B.C.D port (1-65535)",
1832 "fpm connection remote ip and port\n"
1833 "Remote fpm server ip A.B.C.D\n"
1834 "Enter ip ")
1835 {
1836
1837 in_addr_t fpm_server;
1838 uint32_t port_no;
1839
1840 fpm_server = inet_addr(argv[3]->arg);
1841 if (fpm_server == INADDR_NONE)
1842 return CMD_ERR_INCOMPLETE;
1843
1844 port_no = atoi(argv[5]->arg);
1845 if (port_no < TCP_MIN_PORT || port_no > TCP_MAX_PORT)
1846 return CMD_ERR_INCOMPLETE;
1847
1848 zfpm_g->fpm_server = fpm_server;
1849 zfpm_g->fpm_port = port_no;
1850
1851
1852 return CMD_SUCCESS;
1853 }
1854
1855 DEFUN ( no_fpm_remote_ip,
1856 no_fpm_remote_ip_cmd,
1857 "no fpm connection ip A.B.C.D port (1-65535)",
1858 "fpm connection remote ip and port\n"
1859 "Connection\n"
1860 "Remote fpm server ip A.B.C.D\n"
1861 "Enter ip ")
1862 {
1863 if (zfpm_g->fpm_server != inet_addr(argv[4]->arg)
1864 || zfpm_g->fpm_port != atoi(argv[6]->arg))
1865 return CMD_ERR_NO_MATCH;
1866
1867 zfpm_g->fpm_server = FPM_DEFAULT_IP;
1868 zfpm_g->fpm_port = FPM_DEFAULT_PORT;
1869
1870 return CMD_SUCCESS;
1871 }
1872
1873 /*
1874 * zfpm_init_message_format
1875 */
1876 static inline void zfpm_init_message_format(const char *format)
1877 {
1878 int have_netlink, have_protobuf;
1879
1880 #ifdef HAVE_NETLINK
1881 have_netlink = 1;
1882 #else
1883 have_netlink = 0;
1884 #endif
1885
1886 #ifdef HAVE_PROTOBUF
1887 have_protobuf = 1;
1888 #else
1889 have_protobuf = 0;
1890 #endif
1891
1892 zfpm_g->message_format = ZFPM_MSG_FORMAT_NONE;
1893
1894 if (!format) {
1895 if (have_netlink) {
1896 zfpm_g->message_format = ZFPM_MSG_FORMAT_NETLINK;
1897 } else if (have_protobuf) {
1898 zfpm_g->message_format = ZFPM_MSG_FORMAT_PROTOBUF;
1899 }
1900 return;
1901 }
1902
1903 if (!strcmp("netlink", format)) {
1904 if (!have_netlink) {
1905 flog_err(EC_ZEBRA_NETLINK_NOT_AVAILABLE,
1906 "FPM netlink message format is not available");
1907 return;
1908 }
1909 zfpm_g->message_format = ZFPM_MSG_FORMAT_NETLINK;
1910 return;
1911 }
1912
1913 if (!strcmp("protobuf", format)) {
1914 if (!have_protobuf) {
1915 flog_err(
1916 EC_ZEBRA_PROTOBUF_NOT_AVAILABLE,
1917 "FPM protobuf message format is not available");
1918 return;
1919 }
1920 flog_warn(EC_ZEBRA_PROTOBUF_NOT_AVAILABLE,
1921 "FPM protobuf message format is deprecated and scheduled to be removed. Please convert to using netlink format or contact dev@lists.frrouting.org with your use case.");
1922 zfpm_g->message_format = ZFPM_MSG_FORMAT_PROTOBUF;
1923 return;
1924 }
1925
1926 flog_warn(EC_ZEBRA_FPM_FORMAT_UNKNOWN, "Unknown fpm format '%s'",
1927 format);
1928 }
1929
1930 /**
1931 * fpm_remote_srv_write
1932 *
1933 * Module to write remote fpm connection
1934 *
1935 * Returns ZERO on success.
1936 */
1937
1938 static int fpm_remote_srv_write(struct vty *vty)
1939 {
1940 struct in_addr in;
1941
1942 in.s_addr = zfpm_g->fpm_server;
1943
1944 if ((zfpm_g->fpm_server != FPM_DEFAULT_IP
1945 && zfpm_g->fpm_server != INADDR_ANY)
1946 || (zfpm_g->fpm_port != FPM_DEFAULT_PORT && zfpm_g->fpm_port != 0))
1947 vty_out(vty, "fpm connection ip %pI4 port %d\n", &in,
1948 zfpm_g->fpm_port);
1949
1950 return 0;
1951 }
1952
1953
1954 static int fpm_remote_srv_write(struct vty *vty);
1955 /* Zebra node */
1956 static struct cmd_node zebra_node = {
1957 .name = "zebra",
1958 .node = ZEBRA_NODE,
1959 .parent_node = CONFIG_NODE,
1960 .prompt = "",
1961 .config_write = fpm_remote_srv_write,
1962 };
1963
1964
1965 /**
1966 * zfpm_init
1967 *
1968 * One-time initialization of the Zebra FPM module.
1969 *
1970 * @param[in] port port at which FPM is running.
1971 * @param[in] enable true if the zebra FPM module should be enabled
1972 * @param[in] format to use to talk to the FPM. Can be 'netink' or 'protobuf'.
1973 *
1974 * Returns true on success.
1975 */
1976 static int zfpm_init(struct thread_master *master)
1977 {
1978 int enable = 1;
1979 uint16_t port = 0;
1980 const char *format = THIS_MODULE->load_args;
1981
1982 memset(zfpm_g, 0, sizeof(*zfpm_g));
1983 zfpm_g->master = master;
1984 TAILQ_INIT(&zfpm_g->dest_q);
1985 TAILQ_INIT(&zfpm_g->mac_q);
1986
1987 /* Create hash table for fpm_mac_info_t enties */
1988 zfpm_g->fpm_mac_info_table = hash_create(zfpm_mac_info_hash_keymake,
1989 zfpm_mac_info_cmp,
1990 "FPM MAC info hash table");
1991
1992 zfpm_g->sock = -1;
1993 zfpm_g->state = ZFPM_STATE_IDLE;
1994
1995 zfpm_stats_init(&zfpm_g->stats);
1996 zfpm_stats_init(&zfpm_g->last_ivl_stats);
1997 zfpm_stats_init(&zfpm_g->cumulative_stats);
1998
1999 memset(&ipv4ll_gateway, 0, sizeof(ipv4ll_gateway));
2000 if (inet_pton(AF_INET, ipv4_ll_buf, &ipv4ll_gateway.ipv4) != 1)
2001 zlog_warn("inet_pton failed for %s", ipv4_ll_buf);
2002
2003 install_node(&zebra_node);
2004 install_element(ENABLE_NODE, &show_zebra_fpm_stats_cmd);
2005 install_element(ENABLE_NODE, &clear_zebra_fpm_stats_cmd);
2006 install_element(CONFIG_NODE, &fpm_remote_ip_cmd);
2007 install_element(CONFIG_NODE, &no_fpm_remote_ip_cmd);
2008
2009 zfpm_init_message_format(format);
2010
2011 /*
2012 * Disable FPM interface if no suitable format is available.
2013 */
2014 if (zfpm_g->message_format == ZFPM_MSG_FORMAT_NONE)
2015 enable = 0;
2016
2017 zfpm_g->enabled = enable;
2018
2019 if (!zfpm_g->fpm_server)
2020 zfpm_g->fpm_server = FPM_DEFAULT_IP;
2021
2022 if (!port)
2023 port = FPM_DEFAULT_PORT;
2024
2025 zfpm_g->fpm_port = port;
2026
2027 zfpm_g->obuf = stream_new(ZFPM_OBUF_SIZE);
2028 zfpm_g->ibuf = stream_new(ZFPM_IBUF_SIZE);
2029
2030 zfpm_start_stats_timer();
2031 zfpm_start_connect_timer("initialized");
2032 return 0;
2033 }
2034
2035 static int zfpm_fini(void)
2036 {
2037 zfpm_write_off();
2038 zfpm_read_off();
2039 zfpm_connect_off();
2040
2041 zfpm_stop_stats_timer();
2042
2043 hook_unregister(rib_update, zfpm_trigger_update);
2044 return 0;
2045 }
2046
2047 static int zebra_fpm_module_init(void)
2048 {
2049 hook_register(rib_update, zfpm_trigger_update);
2050 hook_register(zebra_rmac_update, zfpm_trigger_rmac_update);
2051 hook_register(frr_late_init, zfpm_init);
2052 hook_register(frr_early_fini, zfpm_fini);
2053 return 0;
2054 }
2055
2056 FRR_MODULE_SETUP(.name = "zebra_fpm", .version = FRR_VERSION,
2057 .description = "zebra FPM (Forwarding Plane Manager) module",
2058 .init = zebra_fpm_module_init,
2059 );