]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zserv.c
bgpd: Fix for large AS paths which are split into segments
[mirror_frr.git] / zebra / zserv.c
1 /*
2 * Zebra API server.
3 * Portions:
4 * Copyright (C) 1997-1999 Kunihiro Ishiguro
5 * Copyright (C) 2015-2018 Cumulus Networks, Inc.
6 * et al.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; see the file COPYING; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <zebra.h>
24
25 /* clang-format off */
26 #include <errno.h> /* for errno */
27 #include <netinet/in.h> /* for sockaddr_in */
28 #include <stdint.h> /* for uint8_t */
29 #include <stdio.h> /* for snprintf */
30 #include <sys/socket.h> /* for sockaddr_storage, AF_UNIX, accept... */
31 #include <sys/stat.h> /* for umask, mode_t */
32 #include <sys/un.h> /* for sockaddr_un */
33 #include <time.h> /* for NULL, tm, gmtime, time_t */
34 #include <unistd.h> /* for close, unlink, ssize_t */
35
36 #include "lib/buffer.h" /* for BUFFER_EMPTY, BUFFER_ERROR, BUFFE... */
37 #include "lib/command.h" /* for vty, install_element, CMD_SUCCESS... */
38 #include "lib/hook.h" /* for DEFINE_HOOK, DEFINE_KOOH, hook_call */
39 #include "lib/linklist.h" /* for ALL_LIST_ELEMENTS_RO, ALL_LIST_EL... */
40 #include "lib/libfrr.h" /* for frr_zclient_addr */
41 #include "lib/log.h" /* for zlog_warn, zlog_debug, safe_strerror */
42 #include "lib/memory.h" /* for MTYPE_TMP, XCALLOC, XFREE */
43 #include "lib/monotime.h" /* for monotime, ONE_DAY_SECOND, ONE_WEE... */
44 #include "lib/network.h" /* for set_nonblocking */
45 #include "lib/privs.h" /* for zebra_privs_t, ZPRIVS_LOWER, ZPRI... */
46 #include "lib/route_types.h" /* for ZEBRA_ROUTE_MAX */
47 #include "lib/sockopt.h" /* for setsockopt_so_recvbuf, setsockopt... */
48 #include "lib/sockunion.h" /* for sockopt_reuseaddr, sockopt_reuseport */
49 #include "lib/stream.h" /* for STREAM_SIZE, stream (ptr only), ... */
50 #include "lib/thread.h" /* for thread (ptr only), THREAD_ARG, ... */
51 #include "lib/vrf.h" /* for vrf_info_lookup, VRF_DEFAULT */
52 #include "lib/vty.h" /* for vty_out, vty (ptr only) */
53 #include "lib/zassert.h" /* for assert */
54 #include "lib/zclient.h" /* for zmsghdr, ZEBRA_HEADER_SIZE, ZEBRA... */
55 #include "lib/frr_pthread.h" /* for frr_pthread_new, frr_pthread_stop... */
56 #include "lib/frratomic.h" /* for atomic_load_explicit, atomic_stor... */
57 #include "lib/lib_errors.h" /* for generic ferr ids */
58
59 #include "zebra/debug.h" /* for various debugging macros */
60 #include "zebra/rib.h" /* for rib_score_proto */
61 #include "zebra/zapi_msg.h" /* for zserv_handle_commands */
62 #include "zebra/zebra_vrf.h" /* for zebra_vrf_lookup_by_id, zvrf */
63 #include "zebra/zserv.h" /* for zserv */
64 /* clang-format on */
65
66 /* privileges */
67 extern struct zebra_privs_t zserv_privs;
68
69 /*
70 * Client thread events.
71 *
72 * These are used almost exclusively by client threads to drive their own event
73 * loops. The only exception is in zserv_client_create(), which pushes an
74 * initial ZSERV_CLIENT_READ event to start the API handler loop.
75 */
76 enum zserv_client_event {
77 /* Schedule a socket read */
78 ZSERV_CLIENT_READ,
79 /* Schedule a buffer write */
80 ZSERV_CLIENT_WRITE,
81 };
82
83 /*
84 * Main thread events.
85 *
86 * These are used by client threads to notify the main thread about various
87 * events and to make processing requests.
88 */
89 enum zserv_event {
90 /* Schedule listen job on Zebra API socket */
91 ZSERV_ACCEPT,
92 /* The calling client has packets on its input buffer */
93 ZSERV_PROCESS_MESSAGES,
94 /* The calling client wishes to be killed */
95 ZSERV_HANDLE_CLIENT_FAIL,
96 };
97
98 /*
99 * Zebra server event driver for all client threads.
100 *
101 * This is essentially a wrapper around thread_add_event() that centralizes
102 * those scheduling calls into one place.
103 *
104 * All calls to this function schedule an event on the pthread running the
105 * provided client.
106 *
107 * client
108 * the client in question, and thread target
109 *
110 * event
111 * the event to notify them about
112 */
113 static void zserv_client_event(struct zserv *client,
114 enum zserv_client_event event);
115
116 /*
117 * Zebra server event driver for the main thread.
118 *
119 * This is essentially a wrapper around thread_add_event() that centralizes
120 * those scheduling calls into one place.
121 *
122 * All calls to this function schedule an event on Zebra's main pthread.
123 *
124 * client
125 * the client in question
126 *
127 * event
128 * the event to notify the main thread about
129 */
130 static void zserv_event(struct zserv *client, enum zserv_event event);
131
132
133 /* Client thread lifecycle -------------------------------------------------- */
134
135 /*
136 * Log zapi message to zlog.
137 *
138 * errmsg (optional)
139 * Debugging message
140 *
141 * msg
142 * The message
143 *
144 * hdr (optional)
145 * The message header
146 */
147 static void zserv_log_message(const char *errmsg, struct stream *msg,
148 struct zmsghdr *hdr)
149 {
150 zlog_debug("Rx'd ZAPI message");
151 if (errmsg)
152 zlog_debug("%s", errmsg);
153 if (hdr) {
154 zlog_debug(" Length: %d", hdr->length);
155 zlog_debug("Command: %s", zserv_command_string(hdr->command));
156 zlog_debug(" VRF: %u", hdr->vrf_id);
157 }
158 zlog_hexdump(msg->data, STREAM_READABLE(msg));
159 }
160
161 /*
162 * Gracefully shut down a client connection.
163 *
164 * Cancel any pending tasks for the client's thread. Then schedule a task on
165 * the main thread to shut down the calling thread.
166 *
167 * It is not safe to close the client socket in this function. The socket is
168 * owned by the main thread.
169 *
170 * Must be called from the client pthread, never the main thread.
171 */
172 static void zserv_client_fail(struct zserv *client)
173 {
174 zlog_warn("Client '%s' encountered an error and is shutting down.",
175 zebra_route_string(client->proto));
176
177 atomic_store_explicit(&client->pthread->running, false,
178 memory_order_relaxed);
179
180 THREAD_OFF(client->t_read);
181 THREAD_OFF(client->t_write);
182 zserv_event(client, ZSERV_HANDLE_CLIENT_FAIL);
183 }
184
185 /*
186 * Write all pending messages to client socket.
187 *
188 * This function first attempts to flush any buffered data. If unsuccessful,
189 * the function reschedules itself and returns. If successful, it pops all
190 * available messages from the output queue and continues to write data
191 * directly to the socket until the socket would block. If the socket never
192 * blocks and all data is written, the function returns without rescheduling
193 * itself. If the socket ends up throwing EWOULDBLOCK, the remaining data is
194 * buffered and the function reschedules itself.
195 *
196 * The utility of the buffer is that it allows us to vastly reduce lock
197 * contention by allowing us to pop *all* messages off the output queue at once
198 * instead of locking and unlocking each time we want to pop a single message
199 * off the queue. The same thing could arguably be accomplished faster by
200 * allowing the main thread to write directly into the buffer instead of
201 * enqueuing packets onto an intermediary queue, but the intermediary queue
202 * allows us to expose information about input and output queues to the user in
203 * terms of number of packets rather than size of data.
204 */
205 static int zserv_write(struct thread *thread)
206 {
207 struct zserv *client = THREAD_ARG(thread);
208 struct stream *msg;
209 uint32_t wcmd = 0;
210 struct stream_fifo *cache;
211
212 /* If we have any data pending, try to flush it first */
213 switch (buffer_flush_all(client->wb, client->sock)) {
214 case BUFFER_ERROR:
215 goto zwrite_fail;
216 case BUFFER_PENDING:
217 atomic_store_explicit(&client->last_write_time,
218 (uint32_t)monotime(NULL),
219 memory_order_relaxed);
220 zserv_client_event(client, ZSERV_CLIENT_WRITE);
221 return 0;
222 case BUFFER_EMPTY:
223 break;
224 }
225
226 cache = stream_fifo_new();
227
228 pthread_mutex_lock(&client->obuf_mtx);
229 {
230 while (stream_fifo_head(client->obuf_fifo))
231 stream_fifo_push(cache,
232 stream_fifo_pop(client->obuf_fifo));
233 }
234 pthread_mutex_unlock(&client->obuf_mtx);
235
236 if (cache->tail) {
237 msg = cache->tail;
238 stream_set_getp(msg, 0);
239 wcmd = stream_getw_from(msg, 6);
240 }
241
242 while (stream_fifo_head(cache)) {
243 msg = stream_fifo_pop(cache);
244 buffer_put(client->wb, STREAM_DATA(msg), stream_get_endp(msg));
245 stream_free(msg);
246 }
247
248 stream_fifo_free(cache);
249
250 /* If we have any data pending, try to flush it first */
251 switch (buffer_flush_all(client->wb, client->sock)) {
252 case BUFFER_ERROR:
253 goto zwrite_fail;
254 case BUFFER_PENDING:
255 atomic_store_explicit(&client->last_write_time,
256 (uint32_t)monotime(NULL),
257 memory_order_relaxed);
258 zserv_client_event(client, ZSERV_CLIENT_WRITE);
259 return 0;
260 case BUFFER_EMPTY:
261 break;
262 }
263
264 atomic_store_explicit(&client->last_write_cmd, wcmd,
265 memory_order_relaxed);
266
267 atomic_store_explicit(&client->last_write_time,
268 (uint32_t)monotime(NULL), memory_order_relaxed);
269
270 return 0;
271
272 zwrite_fail:
273 zlog_warn("%s: could not write to %s [fd = %d], closing.", __func__,
274 zebra_route_string(client->proto), client->sock);
275 zserv_client_fail(client);
276 return 0;
277 }
278
279 /*
280 * Read and process data from a client socket.
281 *
282 * The responsibilities here are to read raw data from the client socket,
283 * validate the header, encapsulate it into a single stream object, push it
284 * onto the input queue and then notify the main thread that there is new data
285 * available.
286 *
287 * This function first looks for any data in the client structure's working
288 * input buffer. If data is present, it is assumed that reading stopped in a
289 * previous invocation of this task and needs to be resumed to finish a message.
290 * Otherwise, the socket data stream is assumed to be at the beginning of a new
291 * ZAPI message (specifically at the header). The header is read and validated.
292 * If the header passed validation then the length field found in the header is
293 * used to compute the total length of the message. That much data is read (but
294 * not inspected), appended to the header, placed into a stream and pushed onto
295 * the client's input queue. A task is then scheduled on the main thread to
296 * process the client's input queue. Finally, if all of this was successful,
297 * this task reschedules itself.
298 *
299 * Any failure in any of these actions is handled by terminating the client.
300 */
301 static int zserv_read(struct thread *thread)
302 {
303 struct zserv *client = THREAD_ARG(thread);
304 int sock;
305 size_t already;
306 struct stream_fifo *cache;
307 uint32_t p2p_orig;
308
309 uint32_t p2p;
310 struct zmsghdr hdr;
311
312 p2p_orig = atomic_load_explicit(&zebrad.packets_to_process,
313 memory_order_relaxed);
314 cache = stream_fifo_new();
315 p2p = p2p_orig;
316 sock = THREAD_FD(thread);
317
318 while (p2p) {
319 ssize_t nb;
320 bool hdrvalid;
321 char errmsg[256];
322
323 already = stream_get_endp(client->ibuf_work);
324
325 /* Read length and command (if we don't have it already). */
326 if (already < ZEBRA_HEADER_SIZE) {
327 nb = stream_read_try(client->ibuf_work, sock,
328 ZEBRA_HEADER_SIZE - already);
329 if ((nb == 0 || nb == -1)) {
330 if (IS_ZEBRA_DEBUG_EVENT)
331 zlog_debug("connection closed socket [%d]",
332 sock);
333 goto zread_fail;
334 }
335 if (nb != (ssize_t)(ZEBRA_HEADER_SIZE - already)) {
336 /* Try again later. */
337 break;
338 }
339 already = ZEBRA_HEADER_SIZE;
340 }
341
342 /* Reset to read from the beginning of the incoming packet. */
343 stream_set_getp(client->ibuf_work, 0);
344
345 /* Fetch header values */
346 hdrvalid = zapi_parse_header(client->ibuf_work, &hdr);
347
348 if (!hdrvalid) {
349 snprintf(errmsg, sizeof(errmsg),
350 "%s: Message has corrupt header", __func__);
351 zserv_log_message(errmsg, client->ibuf_work, NULL);
352 goto zread_fail;
353 }
354
355 /* Validate header */
356 if (hdr.marker != ZEBRA_HEADER_MARKER
357 || hdr.version != ZSERV_VERSION) {
358 snprintf(
359 errmsg, sizeof(errmsg),
360 "Message has corrupt header\n%s: socket %d version mismatch, marker %d, version %d",
361 __func__, sock, hdr.marker, hdr.version);
362 zserv_log_message(errmsg, client->ibuf_work, &hdr);
363 goto zread_fail;
364 }
365 if (hdr.length < ZEBRA_HEADER_SIZE) {
366 snprintf(
367 errmsg, sizeof(errmsg),
368 "Message has corrupt header\n%s: socket %d message length %u is less than header size %d",
369 __func__, sock, hdr.length, ZEBRA_HEADER_SIZE);
370 zserv_log_message(errmsg, client->ibuf_work, &hdr);
371 goto zread_fail;
372 }
373 if (hdr.length > STREAM_SIZE(client->ibuf_work)) {
374 snprintf(
375 errmsg, sizeof(errmsg),
376 "Message has corrupt header\n%s: socket %d message length %u exceeds buffer size %lu",
377 __func__, sock, hdr.length,
378 (unsigned long)STREAM_SIZE(client->ibuf_work));
379 zserv_log_message(errmsg, client->ibuf_work, &hdr);
380 goto zread_fail;
381 }
382
383 /* Read rest of data. */
384 if (already < hdr.length) {
385 nb = stream_read_try(client->ibuf_work, sock,
386 hdr.length - already);
387 if ((nb == 0 || nb == -1)) {
388 if (IS_ZEBRA_DEBUG_EVENT)
389 zlog_debug(
390 "connection closed [%d] when reading zebra data",
391 sock);
392 goto zread_fail;
393 }
394 if (nb != (ssize_t)(hdr.length - already)) {
395 /* Try again later. */
396 break;
397 }
398 }
399
400 /* Debug packet information. */
401 if (IS_ZEBRA_DEBUG_EVENT)
402 zlog_debug("zebra message comes from socket [%d]",
403 sock);
404
405 if (IS_ZEBRA_DEBUG_PACKET && IS_ZEBRA_DEBUG_RECV)
406 zserv_log_message(NULL, client->ibuf_work, &hdr);
407
408 stream_set_getp(client->ibuf_work, 0);
409 struct stream *msg = stream_dup(client->ibuf_work);
410
411 stream_fifo_push(cache, msg);
412 stream_reset(client->ibuf_work);
413 p2p--;
414 }
415
416 if (p2p < p2p_orig) {
417 /* update session statistics */
418 atomic_store_explicit(&client->last_read_time, monotime(NULL),
419 memory_order_relaxed);
420 atomic_store_explicit(&client->last_read_cmd, hdr.command,
421 memory_order_relaxed);
422
423 /* publish read packets on client's input queue */
424 pthread_mutex_lock(&client->ibuf_mtx);
425 {
426 while (cache->head)
427 stream_fifo_push(client->ibuf_fifo,
428 stream_fifo_pop(cache));
429 }
430 pthread_mutex_unlock(&client->ibuf_mtx);
431
432 /* Schedule job to process those packets */
433 zserv_event(client, ZSERV_PROCESS_MESSAGES);
434
435 }
436
437 if (IS_ZEBRA_DEBUG_PACKET)
438 zlog_debug("Read %d packets", p2p_orig - p2p);
439
440 /* Reschedule ourselves */
441 zserv_client_event(client, ZSERV_CLIENT_READ);
442
443 stream_fifo_free(cache);
444
445 return 0;
446
447 zread_fail:
448 stream_fifo_free(cache);
449 zserv_client_fail(client);
450 return -1;
451 }
452
453 static void zserv_client_event(struct zserv *client,
454 enum zserv_client_event event)
455 {
456 switch (event) {
457 case ZSERV_CLIENT_READ:
458 thread_add_read(client->pthread->master, zserv_read, client,
459 client->sock, &client->t_read);
460 break;
461 case ZSERV_CLIENT_WRITE:
462 thread_add_write(client->pthread->master, zserv_write, client,
463 client->sock, &client->t_write);
464 break;
465 }
466 }
467
468 /* Main thread lifecycle ---------------------------------------------------- */
469
470 /*
471 * Read and process messages from a client.
472 *
473 * This task runs on the main pthread. It is scheduled by client pthreads when
474 * they have new messages available on their input queues. The client is passed
475 * as the task argument.
476 *
477 * Each message is popped off the client's input queue and the action associated
478 * with the message is executed. This proceeds until there are no more messages,
479 * an error occurs, or the processing limit is reached.
480 *
481 * The client's I/O thread can push at most zebrad.packets_to_process messages
482 * onto the input buffer before notifying us there are packets to read. As long
483 * as we always process zebrad.packets_to_process messages here, then we can
484 * rely on the read thread to handle queuing this task enough times to process
485 * everything on the input queue.
486 */
487 static int zserv_process_messages(struct thread *thread)
488 {
489 struct zserv *client = THREAD_ARG(thread);
490 struct stream *msg;
491 struct stream_fifo *cache = stream_fifo_new();
492
493 uint32_t p2p = zebrad.packets_to_process;
494
495 pthread_mutex_lock(&client->ibuf_mtx);
496 {
497 uint32_t i;
498 for (i = 0; i < p2p && stream_fifo_head(client->ibuf_fifo);
499 ++i) {
500 msg = stream_fifo_pop(client->ibuf_fifo);
501 stream_fifo_push(cache, msg);
502 }
503
504 msg = NULL;
505 }
506 pthread_mutex_unlock(&client->ibuf_mtx);
507
508 while (stream_fifo_head(cache)) {
509 msg = stream_fifo_pop(cache);
510 zserv_handle_commands(client, msg);
511 stream_free(msg);
512 }
513
514 stream_fifo_free(cache);
515
516 return 0;
517 }
518
519 int zserv_send_message(struct zserv *client, struct stream *msg)
520 {
521 /*
522 * This is a somewhat poorly named variable added with Zebra's portion
523 * of the label manager. That component does not use the regular
524 * zserv/zapi_msg interface for handling its messages, as the client
525 * itself runs in-process. Instead it uses synchronous writes on the
526 * zserv client's socket directly in the zread* handlers for its
527 * message types. Furthermore, it cannot handle the usual messages
528 * Zebra sends (such as those for interface changes) and so has added
529 * this flag and check here as a hack to suppress all messages that it
530 * does not explicitly know about.
531 *
532 * In any case this needs to be cleaned up at some point.
533 *
534 * See also:
535 * zread_label_manager_request
536 * zsend_label_manager_connect_response
537 * zsend_assign_label_chunk_response
538 * ...
539 */
540 if (client->is_synchronous)
541 return 0;
542
543 pthread_mutex_lock(&client->obuf_mtx);
544 {
545 stream_fifo_push(client->obuf_fifo, msg);
546 }
547 pthread_mutex_unlock(&client->obuf_mtx);
548
549 zserv_client_event(client, ZSERV_CLIENT_WRITE);
550
551 return 0;
552 }
553
554
555 /* Hooks for client connect / disconnect */
556 DEFINE_HOOK(zserv_client_connect, (struct zserv *client), (client));
557 DEFINE_KOOH(zserv_client_close, (struct zserv *client), (client));
558
559 /*
560 * Deinitialize zebra client.
561 *
562 * - Deregister and deinitialize related internal resources
563 * - Gracefully close socket
564 * - Free associated resources
565 * - Free client structure
566 *
567 * This does *not* take any action on the struct thread * fields. These are
568 * managed by the owning pthread and any tasks associated with them must have
569 * been stopped prior to invoking this function.
570 */
571 static void zserv_client_free(struct zserv *client)
572 {
573 hook_call(zserv_client_close, client);
574
575 /* Close file descriptor. */
576 if (client->sock) {
577 unsigned long nroutes;
578
579 close(client->sock);
580
581 nroutes = rib_score_proto(client->proto, client->instance);
582 zlog_notice(
583 "client %d disconnected. %lu %s routes removed from the rib",
584 client->sock, nroutes,
585 zebra_route_string(client->proto));
586 client->sock = -1;
587 }
588
589 /* Free stream buffers. */
590 if (client->ibuf_work)
591 stream_free(client->ibuf_work);
592 if (client->obuf_work)
593 stream_free(client->obuf_work);
594 if (client->ibuf_fifo)
595 stream_fifo_free(client->ibuf_fifo);
596 if (client->obuf_fifo)
597 stream_fifo_free(client->obuf_fifo);
598 if (client->wb)
599 buffer_free(client->wb);
600
601 /* Free buffer mutexes */
602 pthread_mutex_destroy(&client->obuf_mtx);
603 pthread_mutex_destroy(&client->ibuf_mtx);
604
605 /* Free bitmaps. */
606 for (afi_t afi = AFI_IP; afi < AFI_MAX; afi++)
607 for (int i = 0; i < ZEBRA_ROUTE_MAX; i++)
608 vrf_bitmap_free(client->redist[afi][i]);
609
610 vrf_bitmap_free(client->redist_default);
611 vrf_bitmap_free(client->ifinfo);
612 vrf_bitmap_free(client->ridinfo);
613
614 XFREE(MTYPE_TMP, client);
615 }
616
617 void zserv_close_client(struct zserv *client)
618 {
619 /* synchronously stop and join pthread */
620 frr_pthread_stop(client->pthread, NULL);
621
622 if (IS_ZEBRA_DEBUG_EVENT)
623 zlog_debug("Closing client '%s'",
624 zebra_route_string(client->proto));
625
626 thread_cancel_event(zebrad.master, client);
627 THREAD_OFF(client->t_cleanup);
628
629 /* destroy pthread */
630 frr_pthread_destroy(client->pthread);
631 client->pthread = NULL;
632
633 /* remove from client list */
634 listnode_delete(zebrad.client_list, client);
635
636 /* delete client */
637 zserv_client_free(client);
638 }
639
640 /*
641 * This task is scheduled by a ZAPI client pthread on the main pthread when it
642 * wants to stop itself. When this executes, the client connection should
643 * already have been closed and the thread will most likely have died, but its
644 * resources still need to be cleaned up.
645 */
646 static int zserv_handle_client_fail(struct thread *thread)
647 {
648 struct zserv *client = THREAD_ARG(thread);
649
650 zserv_close_client(client);
651 return 0;
652 }
653
654 /*
655 * Create a new client.
656 *
657 * This is called when a new connection is accept()'d on the ZAPI socket. It
658 * initializes new client structure, notifies any subscribers of the connection
659 * event and spawns the client's thread.
660 *
661 * sock
662 * client's socket file descriptor
663 */
664 static struct zserv *zserv_client_create(int sock)
665 {
666 struct zserv *client;
667 int i;
668 afi_t afi;
669
670 client = XCALLOC(MTYPE_TMP, sizeof(struct zserv));
671
672 /* Make client input/output buffer. */
673 client->sock = sock;
674 client->ibuf_fifo = stream_fifo_new();
675 client->obuf_fifo = stream_fifo_new();
676 client->ibuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ);
677 client->obuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ);
678 pthread_mutex_init(&client->ibuf_mtx, NULL);
679 pthread_mutex_init(&client->obuf_mtx, NULL);
680 client->wb = buffer_new(0);
681
682 /* Set table number. */
683 client->rtm_table = zebrad.rtm_table_default;
684
685 atomic_store_explicit(&client->connect_time, (uint32_t) monotime(NULL),
686 memory_order_relaxed);
687
688 /* Initialize flags */
689 for (afi = AFI_IP; afi < AFI_MAX; afi++)
690 for (i = 0; i < ZEBRA_ROUTE_MAX; i++)
691 client->redist[afi][i] = vrf_bitmap_init();
692 client->redist_default = vrf_bitmap_init();
693 client->ifinfo = vrf_bitmap_init();
694 client->ridinfo = vrf_bitmap_init();
695
696 /* by default, it's not a synchronous client */
697 client->is_synchronous = 0;
698
699 /* Add this client to linked list. */
700 listnode_add(zebrad.client_list, client);
701
702 struct frr_pthread_attr zclient_pthr_attrs = {
703 .id = frr_pthread_get_id(),
704 .start = frr_pthread_attr_default.start,
705 .stop = frr_pthread_attr_default.stop
706 };
707 client->pthread =
708 frr_pthread_new(&zclient_pthr_attrs, "Zebra API client thread",
709 "zebra_apic");
710
711 zebra_vrf_update_all(client);
712
713 /* start read loop */
714 zserv_client_event(client, ZSERV_CLIENT_READ);
715
716 /* call callbacks */
717 hook_call(zserv_client_connect, client);
718
719 /* start pthread */
720 frr_pthread_run(client->pthread, NULL);
721
722 return client;
723 }
724
725 /*
726 * Accept socket connection.
727 */
728 static int zserv_accept(struct thread *thread)
729 {
730 int accept_sock;
731 int client_sock;
732 struct sockaddr_in client;
733 socklen_t len;
734
735 accept_sock = THREAD_FD(thread);
736
737 /* Reregister myself. */
738 zserv_event(NULL, ZSERV_ACCEPT);
739
740 len = sizeof(struct sockaddr_in);
741 client_sock = accept(accept_sock, (struct sockaddr *)&client, &len);
742
743 if (client_sock < 0) {
744 zlog_warn("Can't accept zebra socket: %s",
745 safe_strerror(errno));
746 return -1;
747 }
748
749 /* Make client socket non-blocking. */
750 set_nonblocking(client_sock);
751
752 /* Create new zebra client. */
753 zserv_client_create(client_sock);
754
755 return 0;
756 }
757
758 void zserv_start(char *path)
759 {
760 int ret;
761 mode_t old_mask;
762 struct sockaddr_storage sa;
763 socklen_t sa_len;
764
765 if (!frr_zclient_addr(&sa, &sa_len, path))
766 /* should be caught in zebra main() */
767 return;
768
769 /* Set umask */
770 old_mask = umask(0077);
771
772 /* Make UNIX domain socket. */
773 zebrad.sock = socket(sa.ss_family, SOCK_STREAM, 0);
774 if (zebrad.sock < 0) {
775 zlog_warn("Can't create zserv socket: %s",
776 safe_strerror(errno));
777 zlog_warn(
778 "zebra can't provide full functionality due to above error");
779 return;
780 }
781
782 if (sa.ss_family != AF_UNIX) {
783 sockopt_reuseaddr(zebrad.sock);
784 sockopt_reuseport(zebrad.sock);
785 } else {
786 struct sockaddr_un *suna = (struct sockaddr_un *)&sa;
787 if (suna->sun_path[0])
788 unlink(suna->sun_path);
789 }
790
791 frr_elevate_privs(&zserv_privs) {
792 setsockopt_so_recvbuf(zebrad.sock, 1048576);
793 setsockopt_so_sendbuf(zebrad.sock, 1048576);
794 }
795
796 frr_elevate_privs((sa.ss_family != AF_UNIX) ? &zserv_privs : NULL) {
797 ret = bind(zebrad.sock, (struct sockaddr *)&sa, sa_len);
798 }
799 if (ret < 0) {
800 zlog_warn("Can't bind zserv socket on %s: %s", path,
801 safe_strerror(errno));
802 zlog_warn(
803 "zebra can't provide full functionality due to above error");
804 close(zebrad.sock);
805 zebrad.sock = -1;
806 return;
807 }
808
809 ret = listen(zebrad.sock, 5);
810 if (ret < 0) {
811 zlog_warn("Can't listen to zserv socket %s: %s", path,
812 safe_strerror(errno));
813 zlog_warn(
814 "zebra can't provide full functionality due to above error");
815 close(zebrad.sock);
816 zebrad.sock = -1;
817 return;
818 }
819
820 umask(old_mask);
821
822 zserv_event(NULL, ZSERV_ACCEPT);
823 }
824
825 void zserv_event(struct zserv *client, enum zserv_event event)
826 {
827 switch (event) {
828 case ZSERV_ACCEPT:
829 thread_add_read(zebrad.master, zserv_accept, NULL, zebrad.sock,
830 NULL);
831 break;
832 case ZSERV_PROCESS_MESSAGES:
833 thread_add_event(zebrad.master, zserv_process_messages, client,
834 0, NULL);
835 break;
836 case ZSERV_HANDLE_CLIENT_FAIL:
837 thread_add_event(zebrad.master, zserv_handle_client_fail,
838 client, 0, &client->t_cleanup);
839 }
840 }
841
842
843 /* General purpose ---------------------------------------------------------- */
844
845 #define ZEBRA_TIME_BUF 32
846 static char *zserv_time_buf(time_t *time1, char *buf, int buflen)
847 {
848 struct tm *tm;
849 time_t now;
850
851 assert(buf != NULL);
852 assert(buflen >= ZEBRA_TIME_BUF);
853 assert(time1 != NULL);
854
855 if (!*time1) {
856 snprintf(buf, buflen, "never ");
857 return (buf);
858 }
859
860 now = monotime(NULL);
861 now -= *time1;
862 tm = gmtime(&now);
863
864 if (now < ONE_DAY_SECOND)
865 snprintf(buf, buflen, "%02d:%02d:%02d", tm->tm_hour, tm->tm_min,
866 tm->tm_sec);
867 else if (now < ONE_WEEK_SECOND)
868 snprintf(buf, buflen, "%dd%02dh%02dm", tm->tm_yday, tm->tm_hour,
869 tm->tm_min);
870 else
871 snprintf(buf, buflen, "%02dw%dd%02dh", tm->tm_yday / 7,
872 tm->tm_yday - ((tm->tm_yday / 7) * 7), tm->tm_hour);
873 return buf;
874 }
875
876 static void zebra_show_client_detail(struct vty *vty, struct zserv *client)
877 {
878 char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF];
879 char wbuf[ZEBRA_TIME_BUF], nhbuf[ZEBRA_TIME_BUF], mbuf[ZEBRA_TIME_BUF];
880 time_t connect_time, last_read_time, last_write_time;
881 uint16_t last_read_cmd, last_write_cmd;
882
883 vty_out(vty, "Client: %s", zebra_route_string(client->proto));
884 if (client->instance)
885 vty_out(vty, " Instance: %d", client->instance);
886 vty_out(vty, "\n");
887
888 vty_out(vty, "------------------------ \n");
889 vty_out(vty, "FD: %d \n", client->sock);
890 vty_out(vty, "Route Table ID: %d \n", client->rtm_table);
891
892 connect_time = (time_t) atomic_load_explicit(&client->connect_time,
893 memory_order_relaxed);
894
895 vty_out(vty, "Connect Time: %s \n",
896 zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF));
897 if (client->nh_reg_time) {
898 vty_out(vty, "Nexthop Registry Time: %s \n",
899 zserv_time_buf(&client->nh_reg_time, nhbuf,
900 ZEBRA_TIME_BUF));
901 if (client->nh_last_upd_time)
902 vty_out(vty, "Nexthop Last Update Time: %s \n",
903 zserv_time_buf(&client->nh_last_upd_time, mbuf,
904 ZEBRA_TIME_BUF));
905 else
906 vty_out(vty, "No Nexthop Update sent\n");
907 } else
908 vty_out(vty, "Not registered for Nexthop Updates\n");
909
910 last_read_time = (time_t)atomic_load_explicit(&client->last_read_time,
911 memory_order_relaxed);
912 last_write_time = (time_t)atomic_load_explicit(&client->last_write_time,
913 memory_order_relaxed);
914
915 last_read_cmd = atomic_load_explicit(&client->last_read_cmd,
916 memory_order_relaxed);
917 last_write_cmd = atomic_load_explicit(&client->last_write_cmd,
918 memory_order_relaxed);
919
920 vty_out(vty, "Last Msg Rx Time: %s \n",
921 zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF));
922 vty_out(vty, "Last Msg Tx Time: %s \n",
923 zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF));
924 if (last_read_cmd)
925 vty_out(vty, "Last Rcvd Cmd: %s \n",
926 zserv_command_string(last_read_cmd));
927 if (last_write_cmd)
928 vty_out(vty, "Last Sent Cmd: %s \n",
929 zserv_command_string(last_write_cmd));
930 vty_out(vty, "\n");
931
932 vty_out(vty, "Type Add Update Del \n");
933 vty_out(vty, "================================================== \n");
934 vty_out(vty, "IPv4 %-12d%-12d%-12d\n", client->v4_route_add_cnt,
935 client->v4_route_upd8_cnt, client->v4_route_del_cnt);
936 vty_out(vty, "IPv6 %-12d%-12d%-12d\n", client->v6_route_add_cnt,
937 client->v6_route_upd8_cnt, client->v6_route_del_cnt);
938 vty_out(vty, "Redist:v4 %-12d%-12d%-12d\n", client->redist_v4_add_cnt,
939 0, client->redist_v4_del_cnt);
940 vty_out(vty, "Redist:v6 %-12d%-12d%-12d\n", client->redist_v6_add_cnt,
941 0, client->redist_v6_del_cnt);
942 vty_out(vty, "Connected %-12d%-12d%-12d\n", client->ifadd_cnt, 0,
943 client->ifdel_cnt);
944 vty_out(vty, "BFD peer %-12d%-12d%-12d\n", client->bfd_peer_add_cnt,
945 client->bfd_peer_upd8_cnt, client->bfd_peer_del_cnt);
946 vty_out(vty, "Interface Up Notifications: %d\n", client->ifup_cnt);
947 vty_out(vty, "Interface Down Notifications: %d\n", client->ifdown_cnt);
948 vty_out(vty, "VNI add notifications: %d\n", client->vniadd_cnt);
949 vty_out(vty, "VNI delete notifications: %d\n", client->vnidel_cnt);
950 vty_out(vty, "L3-VNI add notifications: %d\n", client->l3vniadd_cnt);
951 vty_out(vty, "L3-VNI delete notifications: %d\n", client->l3vnidel_cnt);
952 vty_out(vty, "MAC-IP add notifications: %d\n", client->macipadd_cnt);
953 vty_out(vty, "MAC-IP delete notifications: %d\n", client->macipdel_cnt);
954
955 #if defined DEV_BUILD
956 vty_out(vty, "Input Fifo: %zu:%zu Output Fifo: %zu:%zu\n",
957 client->ibuf_fifo->count, client->ibuf_fifo->max_count,
958 client->obuf_fifo->count, client->obuf_fifo->max_count);
959 #endif
960 vty_out(vty, "\n");
961 return;
962 }
963
964 static void zebra_show_client_brief(struct vty *vty, struct zserv *client)
965 {
966 char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF];
967 char wbuf[ZEBRA_TIME_BUF];
968 time_t connect_time, last_read_time, last_write_time;
969
970 connect_time = (time_t)atomic_load_explicit(&client->connect_time,
971 memory_order_relaxed);
972 last_read_time = (time_t)atomic_load_explicit(&client->last_read_time,
973 memory_order_relaxed);
974 last_write_time = (time_t)atomic_load_explicit(&client->last_write_time,
975 memory_order_relaxed);
976
977 vty_out(vty, "%-8s%12s %12s%12s%8d/%-8d%8d/%-8d\n",
978 zebra_route_string(client->proto),
979 zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF),
980 zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF),
981 zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF),
982 client->v4_route_add_cnt + client->v4_route_upd8_cnt,
983 client->v4_route_del_cnt,
984 client->v6_route_add_cnt + client->v6_route_upd8_cnt,
985 client->v6_route_del_cnt);
986 }
987
988 struct zserv *zserv_find_client(uint8_t proto, unsigned short instance)
989 {
990 struct listnode *node, *nnode;
991 struct zserv *client;
992
993 for (ALL_LIST_ELEMENTS(zebrad.client_list, node, nnode, client)) {
994 if (client->proto == proto && client->instance == instance)
995 return client;
996 }
997
998 return NULL;
999 }
1000
1001 /* This command is for debugging purpose. */
1002 DEFUN (show_zebra_client,
1003 show_zebra_client_cmd,
1004 "show zebra client",
1005 SHOW_STR
1006 ZEBRA_STR
1007 "Client information\n")
1008 {
1009 struct listnode *node;
1010 struct zserv *client;
1011
1012 for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client))
1013 zebra_show_client_detail(vty, client);
1014
1015 return CMD_SUCCESS;
1016 }
1017
1018 /* This command is for debugging purpose. */
1019 DEFUN (show_zebra_client_summary,
1020 show_zebra_client_summary_cmd,
1021 "show zebra client summary",
1022 SHOW_STR
1023 ZEBRA_STR
1024 "Client information brief\n"
1025 "Brief Summary\n")
1026 {
1027 struct listnode *node;
1028 struct zserv *client;
1029
1030 vty_out(vty,
1031 "Name Connect Time Last Read Last Write IPv4 Routes IPv6 Routes \n");
1032 vty_out(vty,
1033 "--------------------------------------------------------------------------------\n");
1034
1035 for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client))
1036 zebra_show_client_brief(vty, client);
1037
1038 vty_out(vty, "Routes column shows (added+updated)/deleted\n");
1039 return CMD_SUCCESS;
1040 }
1041
1042 #if defined(HANDLE_ZAPI_FUZZING)
1043 void zserv_read_file(char *input)
1044 {
1045 int fd;
1046 struct thread t;
1047
1048 fd = open(input, O_RDONLY | O_NONBLOCK);
1049 t.u.fd = fd;
1050
1051 zserv_client_create(fd);
1052 }
1053 #endif
1054
1055 void zserv_init(void)
1056 {
1057 /* Client list init. */
1058 zebrad.client_list = list_new();
1059
1060 /* Misc init. */
1061 zebrad.sock = -1;
1062
1063 install_element(ENABLE_NODE, &show_zebra_client_cmd);
1064 install_element(ENABLE_NODE, &show_zebra_client_summary_cmd);
1065 }