]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zserv.c
zebra: dont delete pthreads from under themselves
[mirror_frr.git] / zebra / zserv.c
1 /*
2 * Zebra API server.
3 * Portions:
4 * Copyright (C) 1997-1999 Kunihiro Ishiguro
5 * Copyright (C) 2015-2018 Cumulus Networks, Inc.
6 * et al.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; see the file COPYING; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <zebra.h>
24
25 /* clang-format off */
26 #include <errno.h> /* for errno */
27 #include <netinet/in.h> /* for sockaddr_in */
28 #include <stdint.h> /* for uint8_t */
29 #include <stdio.h> /* for snprintf */
30 #include <sys/socket.h> /* for sockaddr_storage, AF_UNIX, accept... */
31 #include <sys/stat.h> /* for umask, mode_t */
32 #include <sys/un.h> /* for sockaddr_un */
33 #include <time.h> /* for NULL, tm, gmtime, time_t */
34 #include <unistd.h> /* for close, unlink, ssize_t */
35
36 #include "lib/buffer.h" /* for BUFFER_EMPTY, BUFFER_ERROR, BUFFE... */
37 #include "lib/command.h" /* for vty, install_element, CMD_SUCCESS... */
38 #include "lib/hook.h" /* for DEFINE_HOOK, DEFINE_KOOH, hook_call */
39 #include "lib/linklist.h" /* for ALL_LIST_ELEMENTS_RO, ALL_LIST_EL... */
40 #include "lib/libfrr.h" /* for frr_zclient_addr */
41 #include "lib/log.h" /* for zlog_warn, zlog_debug, safe_strerror */
42 #include "lib/memory.h" /* for MTYPE_TMP, XCALLOC, XFREE */
43 #include "lib/monotime.h" /* for monotime, ONE_DAY_SECOND, ONE_WEE... */
44 #include "lib/network.h" /* for set_nonblocking */
45 #include "lib/privs.h" /* for zebra_privs_t, ZPRIVS_LOWER, ZPRI... */
46 #include "lib/route_types.h" /* for ZEBRA_ROUTE_MAX */
47 #include "lib/sockopt.h" /* for setsockopt_so_recvbuf, setsockopt... */
48 #include "lib/sockunion.h" /* for sockopt_reuseaddr, sockopt_reuseport */
49 #include "lib/stream.h" /* for STREAM_SIZE, stream (ptr only), ... */
50 #include "lib/thread.h" /* for thread (ptr only), THREAD_ARG, ... */
51 #include "lib/vrf.h" /* for vrf_info_lookup, VRF_DEFAULT */
52 #include "lib/vty.h" /* for vty_out, vty (ptr only) */
53 #include "lib/zassert.h" /* for assert */
54 #include "lib/zclient.h" /* for zmsghdr, ZEBRA_HEADER_SIZE, ZEBRA... */
55 #include "lib/frr_pthread.h" /* for frr_pthread_new, frr_pthread_stop... */
56 #include "lib/frratomic.h" /* for atomic_load_explicit, atomic_stor... */
57
58 #include "zebra/debug.h" /* for various debugging macros */
59 #include "zebra/rib.h" /* for rib_score_proto */
60 #include "zebra/zapi_msg.h" /* for zserv_handle_commands */
61 #include "zebra/zebra_vrf.h" /* for zebra_vrf_lookup_by_id, zvrf */
62 #include "zebra/zserv.h" /* for zserv */
63 /* clang-format on */
64
65 /* privileges */
66 extern struct zebra_privs_t zserv_privs;
67
68 /*
69 * Client thread events.
70 *
71 * These are used almost exclusively by client threads to drive their own event
72 * loops. The only exception is in zserv_client_create(), which pushes an
73 * initial ZSERV_CLIENT_READ event to start the API handler loop.
74 */
75 enum zserv_client_event {
76 /* Schedule a socket read */
77 ZSERV_CLIENT_READ,
78 /* Schedule a buffer write */
79 ZSERV_CLIENT_WRITE,
80 };
81
82 /*
83 * Main thread events.
84 *
85 * These are used by client threads to notify the main thread about various
86 * events and to make processing requests.
87 */
88 enum zserv_event {
89 /* Schedule listen job on Zebra API socket */
90 ZSERV_ACCEPT,
91 /* The calling client has packets on its input buffer */
92 ZSERV_PROCESS_MESSAGES,
93 /* The calling client wishes to be killed */
94 ZSERV_HANDLE_CLIENT_FAIL,
95 };
96
97 /*
98 * Zebra server event driver for all client threads.
99 *
100 * This is essentially a wrapper around thread_add_event() that centralizes
101 * those scheduling calls into one place.
102 *
103 * All calls to this function schedule an event on the pthread running the
104 * provided client.
105 *
106 * client
107 * the client in question, and thread target
108 *
109 * event
110 * the event to notify them about
111 */
112 static void zserv_client_event(struct zserv *client,
113 enum zserv_client_event event);
114
115 /*
116 * Zebra server event driver for the main thread.
117 *
118 * This is essentially a wrapper around thread_add_event() that centralizes
119 * those scheduling calls into one place.
120 *
121 * All calls to this function schedule an event on Zebra's main pthread.
122 *
123 * client
124 * the client in question
125 *
126 * event
127 * the event to notify the main thread about
128 */
129 static void zserv_event(struct zserv *client, enum zserv_event event);
130
131
132 /* Client thread lifecycle -------------------------------------------------- */
133
134 /*
135 * Log zapi message to zlog.
136 *
137 * errmsg (optional)
138 * Debugging message
139 *
140 * msg
141 * The message
142 *
143 * hdr (optional)
144 * The message header
145 */
146 static void zserv_log_message(const char *errmsg, struct stream *msg,
147 struct zmsghdr *hdr)
148 {
149 zlog_debug("Rx'd ZAPI message");
150 if (errmsg)
151 zlog_debug("%s", errmsg);
152 if (hdr) {
153 zlog_debug(" Length: %d", hdr->length);
154 zlog_debug("Command: %s", zserv_command_string(hdr->command));
155 zlog_debug(" VRF: %u", hdr->vrf_id);
156 }
157 zlog_hexdump(msg->data, STREAM_READABLE(msg));
158 }
159
160 /*
161 * Gracefully shut down a client connection.
162 *
163 * Cancel any pending tasks for the client's thread. Then schedule a task on
164 * the main thread to shut down the calling thread.
165 *
166 * Must be called from the client pthread, never the main thread.
167 */
168 static void zserv_client_fail(struct zserv *client)
169 {
170 zlog_warn("Client '%s' encountered an error and is shutting down.",
171 zebra_route_string(client->proto));
172
173 atomic_store_explicit(&client->pthread->running, false,
174 memory_order_relaxed);
175 if (client->sock > 0) {
176 close(client->sock);
177 client->sock = -1;
178 }
179 THREAD_OFF(client->t_read);
180 THREAD_OFF(client->t_write);
181 zserv_event(client, ZSERV_HANDLE_CLIENT_FAIL);
182 }
183
184 /*
185 * Write all pending messages to client socket.
186 *
187 * This function first attempts to flush any buffered data. If unsuccessful,
188 * the function reschedules itself and returns. If successful, it pops all
189 * available messages from the output queue and continues to write data
190 * directly to the socket until the socket would block. If the socket never
191 * blocks and all data is written, the function returns without rescheduling
192 * itself. If the socket ends up throwing EWOULDBLOCK, the remaining data is
193 * buffered and the function reschedules itself.
194 *
195 * The utility of the buffer is that it allows us to vastly reduce lock
196 * contention by allowing us to pop *all* messages off the output queue at once
197 * instead of locking and unlocking each time we want to pop a single message
198 * off the queue. The same thing could arguably be accomplished faster by
199 * allowing the main thread to write directly into the buffer instead of
200 * enqueuing packets onto an intermediary queue, but the intermediary queue
201 * allows us to expose information about input and output queues to the user in
202 * terms of number of packets rather than size of data.
203 */
204 static int zserv_write(struct thread *thread)
205 {
206 struct zserv *client = THREAD_ARG(thread);
207 struct stream *msg;
208 uint32_t wcmd = 0;
209 struct stream_fifo *cache;
210
211 /* If we have any data pending, try to flush it first */
212 switch (buffer_flush_all(client->wb, client->sock)) {
213 case BUFFER_ERROR:
214 goto zwrite_fail;
215 case BUFFER_PENDING:
216 atomic_store_explicit(&client->last_write_time,
217 (uint32_t)monotime(NULL),
218 memory_order_relaxed);
219 zserv_client_event(client, ZSERV_CLIENT_WRITE);
220 return 0;
221 case BUFFER_EMPTY:
222 break;
223 }
224
225 cache = stream_fifo_new();
226
227 pthread_mutex_lock(&client->obuf_mtx);
228 {
229 while (stream_fifo_head(client->obuf_fifo))
230 stream_fifo_push(cache,
231 stream_fifo_pop(client->obuf_fifo));
232 }
233 pthread_mutex_unlock(&client->obuf_mtx);
234
235 if (cache->tail) {
236 msg = cache->tail;
237 stream_set_getp(msg, 0);
238 wcmd = stream_getw_from(msg, 6);
239 }
240
241 while (stream_fifo_head(cache)) {
242 msg = stream_fifo_pop(cache);
243 buffer_put(client->wb, STREAM_DATA(msg), stream_get_endp(msg));
244 stream_free(msg);
245 }
246
247 stream_fifo_free(cache);
248
249 /* If we have any data pending, try to flush it first */
250 switch (buffer_flush_all(client->wb, client->sock)) {
251 case BUFFER_ERROR:
252 goto zwrite_fail;
253 case BUFFER_PENDING:
254 atomic_store_explicit(&client->last_write_time,
255 (uint32_t)monotime(NULL),
256 memory_order_relaxed);
257 zserv_client_event(client, ZSERV_CLIENT_WRITE);
258 return 0;
259 case BUFFER_EMPTY:
260 break;
261 }
262
263 atomic_store_explicit(&client->last_write_cmd, wcmd,
264 memory_order_relaxed);
265
266 atomic_store_explicit(&client->last_write_time,
267 (uint32_t)monotime(NULL), memory_order_relaxed);
268
269 return 0;
270
271 zwrite_fail:
272 zlog_warn("%s: could not write to %s [fd = %d], closing.", __func__,
273 zebra_route_string(client->proto), client->sock);
274 zserv_client_fail(client);
275 return 0;
276 }
277
278 /*
279 * Read and process data from a client socket.
280 *
281 * The responsibilities here are to read raw data from the client socket,
282 * validate the header, encapsulate it into a single stream object, push it
283 * onto the input queue and then notify the main thread that there is new data
284 * available.
285 *
286 * This function first looks for any data in the client structure's working
287 * input buffer. If data is present, it is assumed that reading stopped in a
288 * previous invocation of this task and needs to be resumed to finish a message.
289 * Otherwise, the socket data stream is assumed to be at the beginning of a new
290 * ZAPI message (specifically at the header). The header is read and validated.
291 * If the header passed validation then the length field found in the header is
292 * used to compute the total length of the message. That much data is read (but
293 * not inspected), appended to the header, placed into a stream and pushed onto
294 * the client's input queue. A task is then scheduled on the main thread to
295 * process the client's input queue. Finally, if all of this was successful,
296 * this task reschedules itself.
297 *
298 * Any failure in any of these actions is handled by terminating the client.
299 */
300 static int zserv_read(struct thread *thread)
301 {
302 struct zserv *client = THREAD_ARG(thread);
303 int sock;
304 size_t already;
305 struct stream_fifo *cache;
306 uint32_t p2p_orig;
307
308 uint32_t p2p;
309 struct zmsghdr hdr;
310
311 p2p_orig = atomic_load_explicit(&zebrad.packets_to_process,
312 memory_order_relaxed);
313 cache = stream_fifo_new();
314 p2p = p2p_orig;
315 sock = THREAD_FD(thread);
316
317 while (p2p) {
318 ssize_t nb;
319 bool hdrvalid;
320 char errmsg[256];
321
322 already = stream_get_endp(client->ibuf_work);
323
324 /* Read length and command (if we don't have it already). */
325 if (already < ZEBRA_HEADER_SIZE) {
326 nb = stream_read_try(client->ibuf_work, sock,
327 ZEBRA_HEADER_SIZE - already);
328 if ((nb == 0 || nb == -1)) {
329 if (IS_ZEBRA_DEBUG_EVENT)
330 zlog_debug("connection closed socket [%d]",
331 sock);
332 goto zread_fail;
333 }
334 if (nb != (ssize_t)(ZEBRA_HEADER_SIZE - already)) {
335 /* Try again later. */
336 break;
337 }
338 already = ZEBRA_HEADER_SIZE;
339 }
340
341 /* Reset to read from the beginning of the incoming packet. */
342 stream_set_getp(client->ibuf_work, 0);
343
344 /* Fetch header values */
345 hdrvalid = zapi_parse_header(client->ibuf_work, &hdr);
346
347 if (!hdrvalid) {
348 snprintf(errmsg, sizeof(errmsg),
349 "%s: Message has corrupt header", __func__);
350 zserv_log_message(errmsg, client->ibuf_work, NULL);
351 goto zread_fail;
352 }
353
354 /* Validate header */
355 if (hdr.marker != ZEBRA_HEADER_MARKER
356 || hdr.version != ZSERV_VERSION) {
357 snprintf(
358 errmsg, sizeof(errmsg),
359 "Message has corrupt header\n%s: socket %d version mismatch, marker %d, version %d",
360 __func__, sock, hdr.marker, hdr.version);
361 zserv_log_message(errmsg, client->ibuf_work, &hdr);
362 goto zread_fail;
363 }
364 if (hdr.length < ZEBRA_HEADER_SIZE) {
365 snprintf(
366 errmsg, sizeof(errmsg),
367 "Message has corrupt header\n%s: socket %d message length %u is less than header size %d",
368 __func__, sock, hdr.length, ZEBRA_HEADER_SIZE);
369 zserv_log_message(errmsg, client->ibuf_work, &hdr);
370 goto zread_fail;
371 }
372 if (hdr.length > STREAM_SIZE(client->ibuf_work)) {
373 snprintf(
374 errmsg, sizeof(errmsg),
375 "Message has corrupt header\n%s: socket %d message length %u exceeds buffer size %lu",
376 __func__, sock, hdr.length,
377 (unsigned long)STREAM_SIZE(client->ibuf_work));
378 zserv_log_message(errmsg, client->ibuf_work, &hdr);
379 goto zread_fail;
380 }
381
382 /* Read rest of data. */
383 if (already < hdr.length) {
384 nb = stream_read_try(client->ibuf_work, sock,
385 hdr.length - already);
386 if ((nb == 0 || nb == -1)) {
387 if (IS_ZEBRA_DEBUG_EVENT)
388 zlog_debug(
389 "connection closed [%d] when reading zebra data",
390 sock);
391 goto zread_fail;
392 }
393 if (nb != (ssize_t)(hdr.length - already)) {
394 /* Try again later. */
395 break;
396 }
397 }
398
399 /* Debug packet information. */
400 if (IS_ZEBRA_DEBUG_EVENT)
401 zlog_debug("zebra message comes from socket [%d]",
402 sock);
403
404 if (IS_ZEBRA_DEBUG_PACKET && IS_ZEBRA_DEBUG_RECV)
405 zserv_log_message(NULL, client->ibuf_work, &hdr);
406
407 stream_set_getp(client->ibuf_work, 0);
408 struct stream *msg = stream_dup(client->ibuf_work);
409
410 stream_fifo_push(cache, msg);
411 stream_reset(client->ibuf_work);
412 p2p--;
413 }
414
415 if (p2p < p2p_orig) {
416 /* update session statistics */
417 atomic_store_explicit(&client->last_read_time, monotime(NULL),
418 memory_order_relaxed);
419 atomic_store_explicit(&client->last_read_cmd, hdr.command,
420 memory_order_relaxed);
421
422 /* publish read packets on client's input queue */
423 pthread_mutex_lock(&client->ibuf_mtx);
424 {
425 while (cache->head)
426 stream_fifo_push(client->ibuf_fifo,
427 stream_fifo_pop(cache));
428 }
429 pthread_mutex_unlock(&client->ibuf_mtx);
430
431 /* Schedule job to process those packets */
432 zserv_event(client, ZSERV_PROCESS_MESSAGES);
433
434 }
435
436 if (IS_ZEBRA_DEBUG_PACKET)
437 zlog_debug("Read %d packets", p2p_orig - p2p);
438
439 /* Reschedule ourselves */
440 zserv_client_event(client, ZSERV_CLIENT_READ);
441
442 stream_fifo_free(cache);
443
444 return 0;
445
446 zread_fail:
447 stream_fifo_free(cache);
448 zserv_client_fail(client);
449 return -1;
450 }
451
452 static void zserv_client_event(struct zserv *client,
453 enum zserv_client_event event)
454 {
455 switch (event) {
456 case ZSERV_CLIENT_READ:
457 thread_add_read(client->pthread->master, zserv_read, client,
458 client->sock, &client->t_read);
459 break;
460 case ZSERV_CLIENT_WRITE:
461 thread_add_write(client->pthread->master, zserv_write, client,
462 client->sock, &client->t_write);
463 break;
464 }
465 }
466
467 /* Main thread lifecycle ---------------------------------------------------- */
468
469 /*
470 * Read and process messages from a client.
471 *
472 * This task runs on the main pthread. It is scheduled by client pthreads when
473 * they have new messages available on their input queues. The client is passed
474 * as the task argument.
475 *
476 * Each message is popped off the client's input queue and the action associated
477 * with the message is executed. This proceeds until there are no more messages,
478 * an error occurs, or the processing limit is reached.
479 *
480 * The client's I/O thread can push at most zebrad.packets_to_process messages
481 * onto the input buffer before notifying us there are packets to read. As long
482 * as we always process zebrad.packets_to_process messages here, then we can
483 * rely on the read thread to handle queuing this task enough times to process
484 * everything on the input queue.
485 */
486 static int zserv_process_messages(struct thread *thread)
487 {
488 struct zserv *client = THREAD_ARG(thread);
489 struct stream *msg;
490 struct stream_fifo *cache = stream_fifo_new();
491
492 uint32_t p2p = zebrad.packets_to_process;
493
494 pthread_mutex_lock(&client->ibuf_mtx);
495 {
496 uint32_t i;
497 for (i = 0; i < p2p && stream_fifo_head(client->ibuf_fifo);
498 ++i) {
499 msg = stream_fifo_pop(client->ibuf_fifo);
500 stream_fifo_push(cache, msg);
501 }
502
503 msg = NULL;
504 }
505 pthread_mutex_unlock(&client->ibuf_mtx);
506
507 while (stream_fifo_head(cache)) {
508 msg = stream_fifo_pop(cache);
509 zserv_handle_commands(client, msg);
510 stream_free(msg);
511 }
512
513 stream_fifo_free(cache);
514
515 return 0;
516 }
517
518 int zserv_send_message(struct zserv *client, struct stream *msg)
519 {
520 /*
521 * This is a somewhat poorly named variable added with Zebra's portion
522 * of the label manager. That component does not use the regular
523 * zserv/zapi_msg interface for handling its messages, as the client
524 * itself runs in-process. Instead it uses synchronous writes on the
525 * zserv client's socket directly in the zread* handlers for its
526 * message types. Furthermore, it cannot handle the usual messages
527 * Zebra sends (such as those for interface changes) and so has added
528 * this flag and check here as a hack to suppress all messages that it
529 * does not explicitly know about.
530 *
531 * In any case this needs to be cleaned up at some point.
532 *
533 * See also:
534 * zread_label_manager_request
535 * zsend_label_manager_connect_response
536 * zsend_assign_label_chunk_response
537 * ...
538 */
539 if (client->is_synchronous)
540 return 0;
541
542 pthread_mutex_lock(&client->obuf_mtx);
543 {
544 stream_fifo_push(client->obuf_fifo, msg);
545 }
546 pthread_mutex_unlock(&client->obuf_mtx);
547
548 zserv_client_event(client, ZSERV_CLIENT_WRITE);
549
550 return 0;
551 }
552
553
554 /* Hooks for client connect / disconnect */
555 DEFINE_HOOK(zserv_client_connect, (struct zserv *client), (client));
556 DEFINE_KOOH(zserv_client_close, (struct zserv *client), (client));
557
558 /*
559 * Deinitialize zebra client.
560 *
561 * - Deregister and deinitialize related internal resources
562 * - Gracefully close socket
563 * - Free associated resources
564 * - Free client structure
565 *
566 * This does *not* take any action on the struct thread * fields. These are
567 * managed by the owning pthread and any tasks associated with them must have
568 * been stopped prior to invoking this function.
569 */
570 static void zserv_client_free(struct zserv *client)
571 {
572 hook_call(zserv_client_close, client);
573
574 /* Close file descriptor. */
575 if (client->sock) {
576 unsigned long nroutes;
577
578 close(client->sock);
579 nroutes = rib_score_proto(client->proto, client->instance);
580 zlog_notice(
581 "client %d disconnected. %lu %s routes removed from the rib",
582 client->sock, nroutes,
583 zebra_route_string(client->proto));
584 client->sock = -1;
585 }
586
587 /* Free stream buffers. */
588 if (client->ibuf_work)
589 stream_free(client->ibuf_work);
590 if (client->obuf_work)
591 stream_free(client->obuf_work);
592 if (client->ibuf_fifo)
593 stream_fifo_free(client->ibuf_fifo);
594 if (client->obuf_fifo)
595 stream_fifo_free(client->obuf_fifo);
596 if (client->wb)
597 buffer_free(client->wb);
598
599 /* Free buffer mutexes */
600 pthread_mutex_destroy(&client->obuf_mtx);
601 pthread_mutex_destroy(&client->ibuf_mtx);
602
603 /* Free bitmaps. */
604 for (afi_t afi = AFI_IP; afi < AFI_MAX; afi++)
605 for (int i = 0; i < ZEBRA_ROUTE_MAX; i++)
606 vrf_bitmap_free(client->redist[afi][i]);
607
608 vrf_bitmap_free(client->redist_default);
609 vrf_bitmap_free(client->ifinfo);
610 vrf_bitmap_free(client->ridinfo);
611
612 XFREE(MTYPE_TMP, client);
613 }
614
615 void zserv_close_client(struct zserv *client)
616 {
617 /* synchronously stop and join pthread */
618 frr_pthread_stop(client->pthread, NULL);
619
620 if (IS_ZEBRA_DEBUG_EVENT)
621 zlog_debug("Closing client '%s'",
622 zebra_route_string(client->proto));
623
624 /* if file descriptor is still open, close it */
625 if (client->sock > 0) {
626 close(client->sock);
627 client->sock = -1;
628 }
629
630 thread_cancel_event(zebrad.master, client);
631 THREAD_OFF(client->t_cleanup);
632
633 /* destroy pthread */
634 frr_pthread_destroy(client->pthread);
635 client->pthread = NULL;
636
637 /* remove from client list */
638 listnode_delete(zebrad.client_list, client);
639
640 /* delete client */
641 zserv_client_free(client);
642 }
643
644 /*
645 * This task is scheduled by a ZAPI client pthread on the main pthread when it
646 * wants to stop itself. When this executes, the client connection should
647 * already have been closed and the thread will most likely have died, but its
648 * resources still need to be cleaned up.
649 */
650 static int zserv_handle_client_fail(struct thread *thread)
651 {
652 struct zserv *client = THREAD_ARG(thread);
653
654 zserv_close_client(client);
655 return 0;
656 }
657
658 /*
659 * Create a new client.
660 *
661 * This is called when a new connection is accept()'d on the ZAPI socket. It
662 * initializes new client structure, notifies any subscribers of the connection
663 * event and spawns the client's thread.
664 *
665 * sock
666 * client's socket file descriptor
667 */
668 static struct zserv *zserv_client_create(int sock)
669 {
670 struct zserv *client;
671 int i;
672 afi_t afi;
673
674 client = XCALLOC(MTYPE_TMP, sizeof(struct zserv));
675
676 /* Make client input/output buffer. */
677 client->sock = sock;
678 client->ibuf_fifo = stream_fifo_new();
679 client->obuf_fifo = stream_fifo_new();
680 client->ibuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ);
681 client->obuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ);
682 pthread_mutex_init(&client->ibuf_mtx, NULL);
683 pthread_mutex_init(&client->obuf_mtx, NULL);
684 client->wb = buffer_new(0);
685
686 /* Set table number. */
687 client->rtm_table = zebrad.rtm_table_default;
688
689 atomic_store_explicit(&client->connect_time, (uint32_t) monotime(NULL),
690 memory_order_relaxed);
691
692 /* Initialize flags */
693 for (afi = AFI_IP; afi < AFI_MAX; afi++)
694 for (i = 0; i < ZEBRA_ROUTE_MAX; i++)
695 client->redist[afi][i] = vrf_bitmap_init();
696 client->redist_default = vrf_bitmap_init();
697 client->ifinfo = vrf_bitmap_init();
698 client->ridinfo = vrf_bitmap_init();
699
700 /* by default, it's not a synchronous client */
701 client->is_synchronous = 0;
702
703 /* Add this client to linked list. */
704 listnode_add(zebrad.client_list, client);
705
706 struct frr_pthread_attr zclient_pthr_attrs = {
707 .id = frr_pthread_get_id(),
708 .start = frr_pthread_attr_default.start,
709 .stop = frr_pthread_attr_default.stop
710 };
711 client->pthread =
712 frr_pthread_new(&zclient_pthr_attrs, "Zebra API client thread");
713
714 zebra_vrf_update_all(client);
715
716 /* start read loop */
717 zserv_client_event(client, ZSERV_CLIENT_READ);
718
719 /* call callbacks */
720 hook_call(zserv_client_connect, client);
721
722 /* start pthread */
723 frr_pthread_run(client->pthread, NULL);
724
725 return client;
726 }
727
728 /*
729 * Accept socket connection.
730 */
731 static int zserv_accept(struct thread *thread)
732 {
733 int accept_sock;
734 int client_sock;
735 struct sockaddr_in client;
736 socklen_t len;
737
738 accept_sock = THREAD_FD(thread);
739
740 /* Reregister myself. */
741 zserv_event(NULL, ZSERV_ACCEPT);
742
743 len = sizeof(struct sockaddr_in);
744 client_sock = accept(accept_sock, (struct sockaddr *)&client, &len);
745
746 if (client_sock < 0) {
747 zlog_warn("Can't accept zebra socket: %s",
748 safe_strerror(errno));
749 return -1;
750 }
751
752 /* Make client socket non-blocking. */
753 set_nonblocking(client_sock);
754
755 /* Create new zebra client. */
756 zserv_client_create(client_sock);
757
758 return 0;
759 }
760
761 void zserv_start(char *path)
762 {
763 int ret;
764 mode_t old_mask;
765 struct sockaddr_storage sa;
766 socklen_t sa_len;
767
768 if (!frr_zclient_addr(&sa, &sa_len, path))
769 /* should be caught in zebra main() */
770 return;
771
772 /* Set umask */
773 old_mask = umask(0077);
774
775 /* Make UNIX domain socket. */
776 zebrad.sock = socket(sa.ss_family, SOCK_STREAM, 0);
777 if (zebrad.sock < 0) {
778 zlog_warn("Can't create zserv socket: %s",
779 safe_strerror(errno));
780 zlog_warn(
781 "zebra can't provide full functionality due to above error");
782 return;
783 }
784
785 if (sa.ss_family != AF_UNIX) {
786 sockopt_reuseaddr(zebrad.sock);
787 sockopt_reuseport(zebrad.sock);
788 } else {
789 struct sockaddr_un *suna = (struct sockaddr_un *)&sa;
790 if (suna->sun_path[0])
791 unlink(suna->sun_path);
792 }
793
794 zserv_privs.change(ZPRIVS_RAISE);
795 setsockopt_so_recvbuf(zebrad.sock, 1048576);
796 setsockopt_so_sendbuf(zebrad.sock, 1048576);
797 zserv_privs.change(ZPRIVS_LOWER);
798
799 if (sa.ss_family != AF_UNIX && zserv_privs.change(ZPRIVS_RAISE))
800 zlog_err("Can't raise privileges");
801
802 ret = bind(zebrad.sock, (struct sockaddr *)&sa, sa_len);
803 if (ret < 0) {
804 zlog_warn("Can't bind zserv socket on %s: %s", path,
805 safe_strerror(errno));
806 zlog_warn(
807 "zebra can't provide full functionality due to above error");
808 close(zebrad.sock);
809 zebrad.sock = -1;
810 return;
811 }
812 if (sa.ss_family != AF_UNIX && zserv_privs.change(ZPRIVS_LOWER))
813 zlog_err("Can't lower privileges");
814
815 ret = listen(zebrad.sock, 5);
816 if (ret < 0) {
817 zlog_warn("Can't listen to zserv socket %s: %s", path,
818 safe_strerror(errno));
819 zlog_warn(
820 "zebra can't provide full functionality due to above error");
821 close(zebrad.sock);
822 zebrad.sock = -1;
823 return;
824 }
825
826 umask(old_mask);
827
828 zserv_event(NULL, ZSERV_ACCEPT);
829 }
830
831 void zserv_event(struct zserv *client, enum zserv_event event)
832 {
833 switch (event) {
834 case ZSERV_ACCEPT:
835 thread_add_read(zebrad.master, zserv_accept, NULL, zebrad.sock,
836 NULL);
837 break;
838 case ZSERV_PROCESS_MESSAGES:
839 thread_add_event(zebrad.master, zserv_process_messages, client,
840 0, NULL);
841 break;
842 case ZSERV_HANDLE_CLIENT_FAIL:
843 thread_add_event(zebrad.master, zserv_handle_client_fail,
844 client, 0, &client->t_cleanup);
845 }
846 }
847
848
849 /* General purpose ---------------------------------------------------------- */
850
851 #define ZEBRA_TIME_BUF 32
852 static char *zserv_time_buf(time_t *time1, char *buf, int buflen)
853 {
854 struct tm *tm;
855 time_t now;
856
857 assert(buf != NULL);
858 assert(buflen >= ZEBRA_TIME_BUF);
859 assert(time1 != NULL);
860
861 if (!*time1) {
862 snprintf(buf, buflen, "never ");
863 return (buf);
864 }
865
866 now = monotime(NULL);
867 now -= *time1;
868 tm = gmtime(&now);
869
870 if (now < ONE_DAY_SECOND)
871 snprintf(buf, buflen, "%02d:%02d:%02d", tm->tm_hour, tm->tm_min,
872 tm->tm_sec);
873 else if (now < ONE_WEEK_SECOND)
874 snprintf(buf, buflen, "%dd%02dh%02dm", tm->tm_yday, tm->tm_hour,
875 tm->tm_min);
876 else
877 snprintf(buf, buflen, "%02dw%dd%02dh", tm->tm_yday / 7,
878 tm->tm_yday - ((tm->tm_yday / 7) * 7), tm->tm_hour);
879 return buf;
880 }
881
882 static void zebra_show_client_detail(struct vty *vty, struct zserv *client)
883 {
884 char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF];
885 char wbuf[ZEBRA_TIME_BUF], nhbuf[ZEBRA_TIME_BUF], mbuf[ZEBRA_TIME_BUF];
886 time_t connect_time, last_read_time, last_write_time;
887 uint16_t last_read_cmd, last_write_cmd;
888
889 vty_out(vty, "Client: %s", zebra_route_string(client->proto));
890 if (client->instance)
891 vty_out(vty, " Instance: %d", client->instance);
892 vty_out(vty, "\n");
893
894 vty_out(vty, "------------------------ \n");
895 vty_out(vty, "FD: %d \n", client->sock);
896 vty_out(vty, "Route Table ID: %d \n", client->rtm_table);
897
898 connect_time = (time_t) atomic_load_explicit(&client->connect_time,
899 memory_order_relaxed);
900
901 vty_out(vty, "Connect Time: %s \n",
902 zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF));
903 if (client->nh_reg_time) {
904 vty_out(vty, "Nexthop Registry Time: %s \n",
905 zserv_time_buf(&client->nh_reg_time, nhbuf,
906 ZEBRA_TIME_BUF));
907 if (client->nh_last_upd_time)
908 vty_out(vty, "Nexthop Last Update Time: %s \n",
909 zserv_time_buf(&client->nh_last_upd_time, mbuf,
910 ZEBRA_TIME_BUF));
911 else
912 vty_out(vty, "No Nexthop Update sent\n");
913 } else
914 vty_out(vty, "Not registered for Nexthop Updates\n");
915
916 last_read_time = (time_t)atomic_load_explicit(&client->last_read_time,
917 memory_order_relaxed);
918 last_write_time = (time_t)atomic_load_explicit(&client->last_write_time,
919 memory_order_relaxed);
920
921 last_read_cmd = atomic_load_explicit(&client->last_read_cmd,
922 memory_order_relaxed);
923 last_write_cmd = atomic_load_explicit(&client->last_write_cmd,
924 memory_order_relaxed);
925
926 vty_out(vty, "Last Msg Rx Time: %s \n",
927 zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF));
928 vty_out(vty, "Last Msg Tx Time: %s \n",
929 zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF));
930 if (last_read_cmd)
931 vty_out(vty, "Last Rcvd Cmd: %s \n",
932 zserv_command_string(last_read_cmd));
933 if (last_write_cmd)
934 vty_out(vty, "Last Sent Cmd: %s \n",
935 zserv_command_string(last_write_cmd));
936 vty_out(vty, "\n");
937
938 vty_out(vty, "Type Add Update Del \n");
939 vty_out(vty, "================================================== \n");
940 vty_out(vty, "IPv4 %-12d%-12d%-12d\n", client->v4_route_add_cnt,
941 client->v4_route_upd8_cnt, client->v4_route_del_cnt);
942 vty_out(vty, "IPv6 %-12d%-12d%-12d\n", client->v6_route_add_cnt,
943 client->v6_route_upd8_cnt, client->v6_route_del_cnt);
944 vty_out(vty, "Redist:v4 %-12d%-12d%-12d\n", client->redist_v4_add_cnt,
945 0, client->redist_v4_del_cnt);
946 vty_out(vty, "Redist:v6 %-12d%-12d%-12d\n", client->redist_v6_add_cnt,
947 0, client->redist_v6_del_cnt);
948 vty_out(vty, "Connected %-12d%-12d%-12d\n", client->ifadd_cnt, 0,
949 client->ifdel_cnt);
950 vty_out(vty, "BFD peer %-12d%-12d%-12d\n", client->bfd_peer_add_cnt,
951 client->bfd_peer_upd8_cnt, client->bfd_peer_del_cnt);
952 vty_out(vty, "Interface Up Notifications: %d\n", client->ifup_cnt);
953 vty_out(vty, "Interface Down Notifications: %d\n", client->ifdown_cnt);
954 vty_out(vty, "VNI add notifications: %d\n", client->vniadd_cnt);
955 vty_out(vty, "VNI delete notifications: %d\n", client->vnidel_cnt);
956 vty_out(vty, "L3-VNI add notifications: %d\n", client->l3vniadd_cnt);
957 vty_out(vty, "L3-VNI delete notifications: %d\n", client->l3vnidel_cnt);
958 vty_out(vty, "MAC-IP add notifications: %d\n", client->macipadd_cnt);
959 vty_out(vty, "MAC-IP delete notifications: %d\n", client->macipdel_cnt);
960
961 #if defined DEV_BUILD
962 vty_out(vty, "Input Fifo: %zu:%zu Output Fifo: %zu:%zu\n",
963 client->ibuf_fifo->count, client->ibuf_fifo->max_count,
964 client->obuf_fifo->count, client->obuf_fifo->max_count);
965 #endif
966 vty_out(vty, "\n");
967 return;
968 }
969
970 static void zebra_show_client_brief(struct vty *vty, struct zserv *client)
971 {
972 char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF];
973 char wbuf[ZEBRA_TIME_BUF];
974 time_t connect_time, last_read_time, last_write_time;
975
976 connect_time = (time_t)atomic_load_explicit(&client->connect_time,
977 memory_order_relaxed);
978 last_read_time = (time_t)atomic_load_explicit(&client->last_read_time,
979 memory_order_relaxed);
980 last_write_time = (time_t)atomic_load_explicit(&client->last_write_time,
981 memory_order_relaxed);
982
983 vty_out(vty, "%-8s%12s %12s%12s%8d/%-8d%8d/%-8d\n",
984 zebra_route_string(client->proto),
985 zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF),
986 zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF),
987 zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF),
988 client->v4_route_add_cnt + client->v4_route_upd8_cnt,
989 client->v4_route_del_cnt,
990 client->v6_route_add_cnt + client->v6_route_upd8_cnt,
991 client->v6_route_del_cnt);
992 }
993
994 struct zserv *zserv_find_client(uint8_t proto, unsigned short instance)
995 {
996 struct listnode *node, *nnode;
997 struct zserv *client;
998
999 for (ALL_LIST_ELEMENTS(zebrad.client_list, node, nnode, client)) {
1000 if (client->proto == proto && client->instance == instance)
1001 return client;
1002 }
1003
1004 return NULL;
1005 }
1006
1007 /* This command is for debugging purpose. */
1008 DEFUN (show_zebra_client,
1009 show_zebra_client_cmd,
1010 "show zebra client",
1011 SHOW_STR
1012 ZEBRA_STR
1013 "Client information\n")
1014 {
1015 struct listnode *node;
1016 struct zserv *client;
1017
1018 for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client))
1019 zebra_show_client_detail(vty, client);
1020
1021 return CMD_SUCCESS;
1022 }
1023
1024 /* This command is for debugging purpose. */
1025 DEFUN (show_zebra_client_summary,
1026 show_zebra_client_summary_cmd,
1027 "show zebra client summary",
1028 SHOW_STR
1029 ZEBRA_STR
1030 "Client information brief\n"
1031 "Brief Summary\n")
1032 {
1033 struct listnode *node;
1034 struct zserv *client;
1035
1036 vty_out(vty,
1037 "Name Connect Time Last Read Last Write IPv4 Routes IPv6 Routes \n");
1038 vty_out(vty,
1039 "--------------------------------------------------------------------------------\n");
1040
1041 for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client))
1042 zebra_show_client_brief(vty, client);
1043
1044 vty_out(vty, "Routes column shows (added+updated)/deleted\n");
1045 return CMD_SUCCESS;
1046 }
1047
1048 #if defined(HANDLE_ZAPI_FUZZING)
1049 void zserv_read_file(char *input)
1050 {
1051 int fd;
1052 struct thread t;
1053
1054 fd = open(input, O_RDONLY | O_NONBLOCK);
1055 t.u.fd = fd;
1056
1057 zserv_client_create(fd);
1058 }
1059 #endif
1060
1061 void zserv_init(void)
1062 {
1063 /* Client list init. */
1064 zebrad.client_list = list_new();
1065
1066 /* Misc init. */
1067 zebrad.sock = -1;
1068
1069 install_element(ENABLE_NODE, &show_zebra_client_cmd);
1070 install_element(ENABLE_NODE, &show_zebra_client_summary_cmd);
1071 }