]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zserv.c
740926e1afa853792851c654e92752eab44f6475
[mirror_frr.git] / zebra / zserv.c
1 /*
2 * Zebra API server.
3 * Portions:
4 * Copyright (C) 1997-1999 Kunihiro Ishiguro
5 * Copyright (C) 2015-2018 Cumulus Networks, Inc.
6 * et al.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; see the file COPYING; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <zebra.h>
24
25 /* clang-format off */
26 #include <errno.h> /* for errno */
27 #include <netinet/in.h> /* for sockaddr_in */
28 #include <stdint.h> /* for uint8_t */
29 #include <stdio.h> /* for snprintf */
30 #include <sys/socket.h> /* for sockaddr_storage, AF_UNIX, accept... */
31 #include <sys/stat.h> /* for umask, mode_t */
32 #include <sys/un.h> /* for sockaddr_un */
33 #include <time.h> /* for NULL, tm, gmtime, time_t */
34 #include <unistd.h> /* for close, unlink, ssize_t */
35
36 #include "lib/buffer.h" /* for BUFFER_EMPTY, BUFFER_ERROR, BUFFE... */
37 #include "lib/command.h" /* for vty, install_element, CMD_SUCCESS... */
38 #include "lib/hook.h" /* for DEFINE_HOOK, DEFINE_KOOH, hook_call */
39 #include "lib/linklist.h" /* for ALL_LIST_ELEMENTS_RO, ALL_LIST_EL... */
40 #include "lib/libfrr.h" /* for frr_zclient_addr */
41 #include "lib/log.h" /* for zlog_warn, zlog_debug, safe_strerror */
42 #include "lib/memory.h" /* for MTYPE_TMP, XCALLOC, XFREE */
43 #include "lib/monotime.h" /* for monotime, ONE_DAY_SECOND, ONE_WEE... */
44 #include "lib/network.h" /* for set_nonblocking */
45 #include "lib/privs.h" /* for zebra_privs_t, ZPRIVS_LOWER, ZPRI... */
46 #include "lib/route_types.h" /* for ZEBRA_ROUTE_MAX */
47 #include "lib/sockopt.h" /* for setsockopt_so_recvbuf, setsockopt... */
48 #include "lib/sockunion.h" /* for sockopt_reuseaddr, sockopt_reuseport */
49 #include "lib/stream.h" /* for STREAM_SIZE, stream (ptr only), ... */
50 #include "lib/thread.h" /* for thread (ptr only), THREAD_ARG, ... */
51 #include "lib/vrf.h" /* for vrf_info_lookup, VRF_DEFAULT */
52 #include "lib/vty.h" /* for vty_out, vty (ptr only) */
53 #include "lib/zassert.h" /* for assert */
54 #include "lib/zclient.h" /* for zmsghdr, ZEBRA_HEADER_SIZE, ZEBRA... */
55 #include "lib/frr_pthread.h" /* for frr_pthread_new, frr_pthread_stop... */
56
57 #include "zebra/debug.h" /* for various debugging macros */
58 #include "zebra/rib.h" /* for rib_score_proto */
59 #include "zebra/zapi_msg.h" /* for zserv_handle_commands */
60 #include "zebra/zebra_vrf.h" /* for zebra_vrf_lookup_by_id, zvrf */
61 #include "zebra/zserv.h" /* for zserv */
62 /* clang-format on */
63
64 /* privileges */
65 extern struct zebra_privs_t zserv_privs;
66
67 /*
68 * Client thread events.
69 *
70 * These are used almost exclusively by client threads to drive their own event
71 * loops. The only exception is in zebra_client_create(), which pushes an
72 * initial ZSERV_CLIENT_READ event to start the API handler loop.
73 */
74 enum zserv_client_event {
75 /* Schedule a socket read */
76 ZSERV_CLIENT_READ,
77 /* Schedule a buffer write */
78 ZSERV_CLIENT_WRITE,
79 };
80
81 /*
82 * Main thread events.
83 *
84 * These are used by client threads to notify the main thread about various
85 * events and to make processing requests.
86 */
87 enum zserv_event {
88 /* Schedule listen job on Zebra API socket */
89 ZSERV_ACCEPT,
90 /* The calling client has packets on its input buffer */
91 ZSERV_PROCESS_MESSAGES,
92 /* The calling client wishes to be killed */
93 ZSERV_HANDLE_CLOSE,
94 };
95
96 /*
97 * Zebra server event driver for all client threads.
98 *
99 * This is essentially a wrapper around thread_add_event() that centralizes
100 * those scheduling calls into one place.
101 *
102 * All calls to this function schedule an event on the pthread running the
103 * provided client.
104 *
105 * client
106 * the client in question, and thread target
107 *
108 * event
109 * the event to notify them about
110 */
111 static void zserv_client_event(struct zserv *client,
112 enum zserv_client_event event);
113
114 /*
115 * Zebra server event driver for the main thread.
116 *
117 * This is essentially a wrapper around thread_add_event() that centralizes
118 * those scheduling calls into one place.
119 *
120 * All calls to this function schedule an event on Zebra's main pthread.
121 *
122 * client
123 * the client in question
124 *
125 * event
126 * the event to notify the main thread about
127 */
128 static void zserv_event(struct zserv *client, enum zserv_event event);
129
130
131 /* Client thread lifecycle -------------------------------------------------- */
132
133 /*
134 * Log zapi message to zlog.
135 *
136 * errmsg (optional)
137 * Debugging message
138 *
139 * msg
140 * The message
141 *
142 * hdr (optional)
143 * The message header
144 */
145 static void zserv_log_message(const char *errmsg, struct stream *msg,
146 struct zmsghdr *hdr)
147 {
148 zlog_debug("Rx'd ZAPI message");
149 if (errmsg)
150 zlog_debug("%s", errmsg);
151 if (hdr) {
152 zlog_debug(" Length: %d", hdr->length);
153 zlog_debug("Command: %s", zserv_command_string(hdr->command));
154 zlog_debug(" VRF: %u", hdr->vrf_id);
155 }
156 zlog_hexdump(msg->data, STREAM_READABLE(msg));
157 }
158
159 /*
160 * Gracefully shut down a client connection.
161 *
162 * Cancel any pending tasks for the client's thread. Then schedule a task on the
163 * main thread to shut down the calling thread.
164 *
165 * Must be called from the client pthread, never the main thread.
166 */
167 static void zserv_client_close(struct zserv *client)
168 {
169 THREAD_OFF(client->t_read);
170 THREAD_OFF(client->t_write);
171 zserv_event(client, ZSERV_HANDLE_CLOSE);
172 }
173
174 /*
175 * Write all pending messages to client socket.
176 *
177 * This function first attempts to flush any buffered data. If unsuccessful,
178 * the function reschedules itself and returns. If successful, it pops all
179 * available messages from the output queue and continues to write data
180 * directly to the socket until the socket would block. If the socket never
181 * blocks and all data is written, the function returns without rescheduling
182 * itself. If the socket ends up throwing EWOULDBLOCK, the remaining data is
183 * buffered and the function reschedules itself.
184 *
185 * The utility of the buffer is that it allows us to vastly reduce lock
186 * contention by allowing us to pop *all* messages off the output queue at once
187 * instead of locking and unlocking each time we want to pop a single message
188 * off the queue. The same thing could arguably be accomplished faster by
189 * allowing the main thread to write directly into the buffer instead of
190 * enqueuing packets onto an intermediary queue, but the intermediary queue
191 * allows us to expose information about input and output queues to the user in
192 * terms of number of packets rather than size of data.
193 */
194 static int zserv_write(struct thread *thread)
195 {
196 struct zserv *client = THREAD_ARG(thread);
197 struct stream *msg;
198 uint32_t wcmd;
199 int writerv;
200 struct stream_fifo *cache;
201
202 /* If we have any data pending, try to flush it first */
203 switch (buffer_flush_available(client->wb, client->sock)) {
204 case BUFFER_ERROR:
205 goto zwrite_fail;
206 case BUFFER_PENDING:
207 client->last_write_time = monotime(NULL);
208 zserv_client_event(client, ZSERV_CLIENT_WRITE);
209 return 0;
210 case BUFFER_EMPTY:
211 break;
212 }
213
214 cache = stream_fifo_new();
215
216 pthread_mutex_lock(&client->obuf_mtx);
217 {
218 while (client->obuf_fifo->head)
219 stream_fifo_push(cache,
220 stream_fifo_pop(client->obuf_fifo));
221 }
222 pthread_mutex_unlock(&client->obuf_mtx);
223
224 while (stream_fifo_head(cache)) {
225 msg = stream_fifo_pop(cache);
226 stream_set_getp(msg, 0);
227
228 wcmd = stream_getw_from(msg, 6);
229 writerv = buffer_write(client->wb, client->sock,
230 STREAM_DATA(msg), stream_get_endp(msg));
231
232 switch (writerv) {
233 case BUFFER_ERROR:
234 stream_free(msg);
235 stream_fifo_free(cache);
236 goto zwrite_fail;
237 case BUFFER_PENDING:
238 case BUFFER_EMPTY:
239 break;
240 }
241
242 stream_free(msg);
243 }
244
245 if (!buffer_empty(client->wb))
246 zserv_client_event(client, ZSERV_CLIENT_WRITE);
247
248 stream_fifo_free(cache);
249
250 atomic_store_explicit(&client->last_write_cmd, wcmd,
251 memory_order_relaxed);
252
253 atomic_store_explicit(&client->last_write_time,
254 (uint32_t)monotime(NULL), memory_order_relaxed);
255
256 return 0;
257
258 zwrite_fail:
259 zlog_warn("%s: could not write to %s [fd = %d], closing.", __func__,
260 zebra_route_string(client->proto), client->sock);
261 zserv_client_close(client);
262 return 0;
263 }
264
265 /*
266 * Read and process data from a client socket.
267 *
268 * The responsibilities here are to read raw data from the client socket,
269 * validate the header, encapsulate it into a single stream object, push it
270 * onto the input queue and then notify the main thread that there is new data
271 * available.
272 *
273 * This function first looks for any data in the client structure's working
274 * input buffer. If data is present, it is assumed that reading stopped in a
275 * previous invocation of this task and needs to be resumed to finish a message.
276 * Otherwise, the socket data stream is assumed to be at the beginning of a new
277 * ZAPI message (specifically at the header). The header is read and validated.
278 * If the header passed validation then the length field found in the header is
279 * used to compute the total length of the message. That much data is read (but
280 * not inspected), appended to the header, placed into a stream and pushed onto
281 * the client's input queue. A task is then scheduled on the main thread to
282 * process the client's input queue. Finally, if all of this was successful,
283 * this task reschedules itself.
284 *
285 * Any failure in any of these actions is handled by terminating the client.
286 */
287 static int zserv_read(struct thread *thread)
288 {
289 int sock;
290 struct zserv *client;
291 size_t already;
292 struct stream_fifo *cache = stream_fifo_new();
293 uint32_t p2p_orig = atomic_load_explicit(&zebrad.packets_to_process,
294 memory_order_relaxed);
295 uint32_t p2p;
296 struct zmsghdr hdr;
297
298 p2p = p2p_orig;
299 sock = THREAD_FD(thread);
300 client = THREAD_ARG(thread);
301
302 while (p2p--) {
303 ssize_t nb;
304 bool hdrvalid;
305 char errmsg[256];
306
307 already = stream_get_endp(client->ibuf_work);
308
309 /* Read length and command (if we don't have it already). */
310 if (already < ZEBRA_HEADER_SIZE) {
311 nb = stream_read_try(client->ibuf_work, sock,
312 ZEBRA_HEADER_SIZE - already);
313 if ((nb == 0 || nb == -1) && IS_ZEBRA_DEBUG_EVENT)
314 zlog_debug("connection closed socket [%d]",
315 sock);
316 if ((nb == 0 || nb == -1))
317 goto zread_fail;
318 if (nb != (ssize_t)(ZEBRA_HEADER_SIZE - already)) {
319 /* Try again later. */
320 break;
321 }
322 already = ZEBRA_HEADER_SIZE;
323 }
324
325 /* Reset to read from the beginning of the incoming packet. */
326 stream_set_getp(client->ibuf_work, 0);
327
328 /* Fetch header values */
329 hdrvalid = zapi_parse_header(client->ibuf_work, &hdr);
330
331 if (!hdrvalid) {
332 snprintf(errmsg, sizeof(errmsg),
333 "%s: Message has corrupt header", __func__);
334 zserv_log_message(errmsg, client->ibuf_work, NULL);
335 goto zread_fail;
336 }
337
338 /* Validate header */
339 if (hdr.marker != ZEBRA_HEADER_MARKER
340 || hdr.version != ZSERV_VERSION) {
341 snprintf(
342 errmsg, sizeof(errmsg),
343 "Message has corrupt header\n%s: socket %d version mismatch, marker %d, version %d",
344 __func__, sock, hdr.marker, hdr.version);
345 zserv_log_message(errmsg, client->ibuf_work, &hdr);
346 goto zread_fail;
347 }
348 if (hdr.length < ZEBRA_HEADER_SIZE) {
349 snprintf(
350 errmsg, sizeof(errmsg),
351 "Message has corrupt header\n%s: socket %d message length %u is less than header size %d",
352 __func__, sock, hdr.length, ZEBRA_HEADER_SIZE);
353 zserv_log_message(errmsg, client->ibuf_work, &hdr);
354 goto zread_fail;
355 }
356 if (hdr.length > STREAM_SIZE(client->ibuf_work)) {
357 snprintf(
358 errmsg, sizeof(errmsg),
359 "Message has corrupt header\n%s: socket %d message length %u exceeds buffer size %lu",
360 __func__, sock, hdr.length,
361 (unsigned long)STREAM_SIZE(client->ibuf_work));
362 zserv_log_message(errmsg, client->ibuf_work, &hdr);
363 goto zread_fail;
364 }
365
366 /* Read rest of data. */
367 if (already < hdr.length) {
368 nb = stream_read_try(client->ibuf_work, sock,
369 hdr.length - already);
370 if ((nb == 0 || nb == -1) && IS_ZEBRA_DEBUG_EVENT)
371 zlog_debug(
372 "connection closed [%d] when reading zebra data",
373 sock);
374 if ((nb == 0 || nb == -1))
375 goto zread_fail;
376 if (nb != (ssize_t)(hdr.length - already)) {
377 /* Try again later. */
378 break;
379 }
380 }
381
382 /* Debug packet information. */
383 if (IS_ZEBRA_DEBUG_EVENT)
384 zlog_debug("zebra message comes from socket [%d]",
385 sock);
386
387 if (IS_ZEBRA_DEBUG_PACKET && IS_ZEBRA_DEBUG_RECV)
388 zserv_log_message(NULL, client->ibuf_work, &hdr);
389
390 stream_set_getp(client->ibuf_work, 0);
391 struct stream *msg = stream_dup(client->ibuf_work);
392
393 stream_fifo_push(cache, msg);
394 stream_reset(client->ibuf_work);
395 }
396
397 if (p2p < p2p_orig) {
398 /* update session statistics */
399 atomic_store_explicit(&client->last_read_time, monotime(NULL),
400 memory_order_relaxed);
401 atomic_store_explicit(&client->last_read_cmd, hdr.command,
402 memory_order_relaxed);
403
404 /* publish read packets on client's input queue */
405 pthread_mutex_lock(&client->ibuf_mtx);
406 {
407 while (cache->head)
408 stream_fifo_push(client->ibuf_fifo,
409 stream_fifo_pop(cache));
410 }
411 pthread_mutex_unlock(&client->ibuf_mtx);
412
413 /* Schedule job to process those packets */
414 zserv_event(client, ZSERV_PROCESS_MESSAGES);
415
416 }
417
418 if (IS_ZEBRA_DEBUG_PACKET)
419 zlog_debug("Read %d packets", p2p_orig - p2p);
420
421 /* Reschedule ourselves */
422 zserv_client_event(client, ZSERV_CLIENT_READ);
423
424 stream_fifo_free(cache);
425
426 return 0;
427
428 zread_fail:
429 stream_fifo_free(cache);
430 zserv_client_close(client);
431 return -1;
432 }
433
434 static void zserv_client_event(struct zserv *client,
435 enum zserv_client_event event)
436 {
437 switch (event) {
438 case ZSERV_CLIENT_READ:
439 thread_add_read(client->pthread->master, zserv_read, client,
440 client->sock, &client->t_read);
441 break;
442 case ZSERV_CLIENT_WRITE:
443 thread_add_write(client->pthread->master, zserv_write, client,
444 client->sock, &client->t_write);
445 break;
446 }
447 }
448
449 /* Main thread lifecycle ---------------------------------------------------- */
450
451 /*
452 * Read and process messages from a client.
453 *
454 * This task runs on the main pthread. It is scheduled by client pthreads when
455 * they have new messages available on their input queues. The client is passed
456 * as the task argument.
457 *
458 * Each message is popped off the client's input queue and the action associated
459 * with the message is executed. This proceeds until there are no more messages,
460 * an error occurs, or the processing limit is reached.
461 *
462 * The client's I/O thread can push at most zebrad.packets_to_process messages
463 * onto the input buffer before notifying us there are packets to read. As long
464 * as we always process zebrad.packets_to_process messages here, then we can
465 * rely on the read thread to handle queuing this task enough times to process
466 * everything on the input queue.
467 */
468 static int zserv_process_messages(struct thread *thread)
469 {
470 struct zserv *client = THREAD_ARG(thread);
471 struct stream *msg;
472 struct stream_fifo *cache = stream_fifo_new();
473
474 uint32_t p2p = zebrad.packets_to_process;
475
476 pthread_mutex_lock(&client->ibuf_mtx);
477 {
478 uint32_t i;
479 for (i = 0; i < p2p && stream_fifo_head(client->ibuf_fifo);
480 ++i) {
481 msg = stream_fifo_pop(client->ibuf_fifo);
482 stream_fifo_push(cache, msg);
483 }
484
485 msg = NULL;
486 }
487 pthread_mutex_unlock(&client->ibuf_mtx);
488
489 while (stream_fifo_head(cache)) {
490 msg = stream_fifo_pop(cache);
491 zserv_handle_commands(client, msg);
492 stream_free(msg);
493 }
494
495 stream_fifo_free(cache);
496
497 return 0;
498 }
499
500 int zserv_send_message(struct zserv *client, struct stream *msg)
501 {
502 /*
503 * This is a somewhat poorly named variable added with Zebra's portion
504 * of the label manager. That component does not use the regular
505 * zserv/zapi_msg interface for handling its messages, as the client
506 * itself runs in-process. Instead it uses synchronous writes on the
507 * zserv client's socket directly in the zread* handlers for its
508 * message types. Furthermore, it cannot handle the usual messages
509 * Zebra sends (such as those for interface changes) and so has added
510 * this flag and check here as a hack to suppress all messages that it
511 * does not explicitly know about.
512 *
513 * In any case this needs to be cleaned up at some point.
514 *
515 * See also:
516 * zread_label_manager_request
517 * zsend_label_manager_connect_response
518 * zsend_assign_label_chunk_response
519 * ...
520 */
521 if (client->is_synchronous)
522 return 0;
523
524 pthread_mutex_lock(&client->obuf_mtx);
525 {
526 stream_fifo_push(client->obuf_fifo, msg);
527 zserv_client_event(client, ZSERV_CLIENT_WRITE);
528 }
529 pthread_mutex_unlock(&client->obuf_mtx);
530 return 0;
531 }
532
533
534 /* Hooks for client connect / disconnect */
535 DEFINE_HOOK(zserv_client_connect, (struct zserv *client), (client));
536 DEFINE_KOOH(zserv_client_close, (struct zserv *client), (client));
537
538 /*
539 * Deinitialize zebra client.
540 *
541 * - Deregister and deinitialize related internal resources
542 * - Gracefully close socket
543 * - Free associated resources
544 * - Free client structure
545 *
546 * This does *not* take any action on the struct thread * fields. These are
547 * managed by the owning pthread and any tasks associated with them must have
548 * been stopped prior to invoking this function.
549 */
550 static void zserv_client_free(struct zserv *client)
551 {
552 hook_call(zserv_client_close, client);
553
554 /* Close file descriptor. */
555 if (client->sock) {
556 unsigned long nroutes;
557
558 close(client->sock);
559 nroutes = rib_score_proto(client->proto, client->instance);
560 zlog_notice(
561 "client %d disconnected. %lu %s routes removed from the rib",
562 client->sock, nroutes,
563 zebra_route_string(client->proto));
564 client->sock = -1;
565 }
566
567 /* Free stream buffers. */
568 if (client->ibuf_work)
569 stream_free(client->ibuf_work);
570 if (client->obuf_work)
571 stream_free(client->obuf_work);
572 if (client->ibuf_fifo)
573 stream_fifo_free(client->ibuf_fifo);
574 if (client->obuf_fifo)
575 stream_fifo_free(client->obuf_fifo);
576 if (client->wb)
577 buffer_free(client->wb);
578
579 /* Free buffer mutexes */
580 pthread_mutex_destroy(&client->obuf_mtx);
581 pthread_mutex_destroy(&client->ibuf_mtx);
582
583 /* Free bitmaps. */
584 for (afi_t afi = AFI_IP; afi < AFI_MAX; afi++)
585 for (int i = 0; i < ZEBRA_ROUTE_MAX; i++)
586 vrf_bitmap_free(client->redist[afi][i]);
587
588 vrf_bitmap_free(client->redist_default);
589 vrf_bitmap_free(client->ifinfo);
590 vrf_bitmap_free(client->ridinfo);
591
592 XFREE(MTYPE_TMP, client);
593 }
594
595 /*
596 * Finish closing a client.
597 *
598 * This task is scheduled by a ZAPI client pthread on the main pthread when it
599 * wants to stop itself. When this executes, the client connection should
600 * already have been closed. This task's responsibility is to gracefully
601 * terminate the client thread, update relevant internal datastructures and
602 * free any resources allocated by the main thread.
603 */
604 static int zserv_handle_client_close(struct thread *thread)
605 {
606 struct zserv *client = THREAD_ARG(thread);
607
608 /*
609 * Ensure these have been nulled. This does not equate to the
610 * associated task(s) being scheduled or unscheduled on the client
611 * pthread's threadmaster.
612 */
613 assert(!client->t_read);
614 assert(!client->t_write);
615
616 /* synchronously stop thread */
617 frr_pthread_stop(client->pthread, NULL);
618
619 /* destroy frr_pthread */
620 frr_pthread_destroy(client->pthread);
621 client->pthread = NULL;
622
623 listnode_delete(zebrad.client_list, client);
624 zserv_client_free(client);
625 return 0;
626 }
627
628 /*
629 * Create a new client.
630 *
631 * This is called when a new connection is accept()'d on the ZAPI socket. It
632 * initializes new client structure, notifies any subscribers of the connection
633 * event and spawns the client's thread.
634 *
635 * sock
636 * client's socket file descriptor
637 */
638 static void zserv_client_create(int sock)
639 {
640 struct zserv *client;
641 int i;
642 afi_t afi;
643
644 client = XCALLOC(MTYPE_TMP, sizeof(struct zserv));
645
646 /* Make client input/output buffer. */
647 client->sock = sock;
648 client->ibuf_fifo = stream_fifo_new();
649 client->obuf_fifo = stream_fifo_new();
650 client->ibuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ);
651 client->obuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ);
652 pthread_mutex_init(&client->ibuf_mtx, NULL);
653 pthread_mutex_init(&client->obuf_mtx, NULL);
654 client->wb = buffer_new(0);
655
656 /* Set table number. */
657 client->rtm_table = zebrad.rtm_table_default;
658
659 atomic_store_explicit(&client->connect_time, (uint32_t) monotime(NULL),
660 memory_order_relaxed);
661
662 /* Initialize flags */
663 for (afi = AFI_IP; afi < AFI_MAX; afi++)
664 for (i = 0; i < ZEBRA_ROUTE_MAX; i++)
665 client->redist[afi][i] = vrf_bitmap_init();
666 client->redist_default = vrf_bitmap_init();
667 client->ifinfo = vrf_bitmap_init();
668 client->ridinfo = vrf_bitmap_init();
669
670 /* by default, it's not a synchronous client */
671 client->is_synchronous = 0;
672
673 /* Add this client to linked list. */
674 listnode_add(zebrad.client_list, client);
675
676 struct frr_pthread_attr zclient_pthr_attrs = {
677 .id = frr_pthread_get_id(),
678 .start = frr_pthread_attr_default.start,
679 .stop = frr_pthread_attr_default.stop
680 };
681 client->pthread =
682 frr_pthread_new(&zclient_pthr_attrs, "Zebra API client thread");
683
684 zebra_vrf_update_all(client);
685
686 /* start read loop */
687 zserv_client_event(client, ZSERV_CLIENT_READ);
688
689 /* call callbacks */
690 hook_call(zserv_client_connect, client);
691
692 /* start pthread */
693 frr_pthread_run(client->pthread, NULL);
694 }
695
696 /*
697 * Accept socket connection.
698 */
699 static int zserv_accept(struct thread *thread)
700 {
701 int accept_sock;
702 int client_sock;
703 struct sockaddr_in client;
704 socklen_t len;
705
706 accept_sock = THREAD_FD(thread);
707
708 /* Reregister myself. */
709 zserv_event(NULL, ZSERV_ACCEPT);
710
711 len = sizeof(struct sockaddr_in);
712 client_sock = accept(accept_sock, (struct sockaddr *)&client, &len);
713
714 if (client_sock < 0) {
715 zlog_warn("Can't accept zebra socket: %s",
716 safe_strerror(errno));
717 return -1;
718 }
719
720 /* Make client socket non-blocking. */
721 set_nonblocking(client_sock);
722
723 /* Create new zebra client. */
724 zserv_client_create(client_sock);
725
726 return 0;
727 }
728
729 void zserv_start(char *path)
730 {
731 int ret;
732 mode_t old_mask;
733 struct sockaddr_storage sa;
734 socklen_t sa_len;
735
736 if (!frr_zclient_addr(&sa, &sa_len, path))
737 /* should be caught in zebra main() */
738 return;
739
740 /* Set umask */
741 old_mask = umask(0077);
742
743 /* Make UNIX domain socket. */
744 zebrad.sock = socket(sa.ss_family, SOCK_STREAM, 0);
745 if (zebrad.sock < 0) {
746 zlog_warn("Can't create zserv socket: %s",
747 safe_strerror(errno));
748 zlog_warn(
749 "zebra can't provide full functionality due to above error");
750 return;
751 }
752
753 if (sa.ss_family != AF_UNIX) {
754 sockopt_reuseaddr(zebrad.sock);
755 sockopt_reuseport(zebrad.sock);
756 } else {
757 struct sockaddr_un *suna = (struct sockaddr_un *)&sa;
758 if (suna->sun_path[0])
759 unlink(suna->sun_path);
760 }
761
762 zserv_privs.change(ZPRIVS_RAISE);
763 setsockopt_so_recvbuf(zebrad.sock, 1048576);
764 setsockopt_so_sendbuf(zebrad.sock, 1048576);
765 zserv_privs.change(ZPRIVS_LOWER);
766
767 if (sa.ss_family != AF_UNIX && zserv_privs.change(ZPRIVS_RAISE))
768 zlog_err("Can't raise privileges");
769
770 ret = bind(zebrad.sock, (struct sockaddr *)&sa, sa_len);
771 if (ret < 0) {
772 zlog_warn("Can't bind zserv socket on %s: %s", path,
773 safe_strerror(errno));
774 zlog_warn(
775 "zebra can't provide full functionality due to above error");
776 close(zebrad.sock);
777 zebrad.sock = -1;
778 return;
779 }
780 if (sa.ss_family != AF_UNIX && zserv_privs.change(ZPRIVS_LOWER))
781 zlog_err("Can't lower privileges");
782
783 ret = listen(zebrad.sock, 5);
784 if (ret < 0) {
785 zlog_warn("Can't listen to zserv socket %s: %s", path,
786 safe_strerror(errno));
787 zlog_warn(
788 "zebra can't provide full functionality due to above error");
789 close(zebrad.sock);
790 zebrad.sock = -1;
791 return;
792 }
793
794 umask(old_mask);
795
796 zserv_event(NULL, ZSERV_ACCEPT);
797 }
798
799 void zserv_event(struct zserv *client, enum zserv_event event)
800 {
801 switch (event) {
802 case ZSERV_ACCEPT:
803 thread_add_read(zebrad.master, zserv_accept, NULL, zebrad.sock,
804 NULL);
805 break;
806 case ZSERV_PROCESS_MESSAGES:
807 thread_add_event(zebrad.master, zserv_process_messages, client,
808 0, NULL);
809 break;
810 case ZSERV_HANDLE_CLOSE:
811 thread_add_event(zebrad.master, zserv_handle_client_close,
812 client, 0, NULL);
813 }
814 }
815
816
817 /* General purpose ---------------------------------------------------------- */
818
819 #define ZEBRA_TIME_BUF 32
820 static char *zserv_time_buf(time_t *time1, char *buf, int buflen)
821 {
822 struct tm *tm;
823 time_t now;
824
825 assert(buf != NULL);
826 assert(buflen >= ZEBRA_TIME_BUF);
827 assert(time1 != NULL);
828
829 if (!*time1) {
830 snprintf(buf, buflen, "never ");
831 return (buf);
832 }
833
834 now = monotime(NULL);
835 now -= *time1;
836 tm = gmtime(&now);
837
838 if (now < ONE_DAY_SECOND)
839 snprintf(buf, buflen, "%02d:%02d:%02d", tm->tm_hour, tm->tm_min,
840 tm->tm_sec);
841 else if (now < ONE_WEEK_SECOND)
842 snprintf(buf, buflen, "%dd%02dh%02dm", tm->tm_yday, tm->tm_hour,
843 tm->tm_min);
844 else
845 snprintf(buf, buflen, "%02dw%dd%02dh", tm->tm_yday / 7,
846 tm->tm_yday - ((tm->tm_yday / 7) * 7), tm->tm_hour);
847 return buf;
848 }
849
850 static void zebra_show_client_detail(struct vty *vty, struct zserv *client)
851 {
852 char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF];
853 char wbuf[ZEBRA_TIME_BUF], nhbuf[ZEBRA_TIME_BUF], mbuf[ZEBRA_TIME_BUF];
854 time_t connect_time, last_read_time, last_write_time;
855 uint16_t last_read_cmd, last_write_cmd;
856
857 vty_out(vty, "Client: %s", zebra_route_string(client->proto));
858 if (client->instance)
859 vty_out(vty, " Instance: %d", client->instance);
860 vty_out(vty, "\n");
861
862 vty_out(vty, "------------------------ \n");
863 vty_out(vty, "FD: %d \n", client->sock);
864 vty_out(vty, "Route Table ID: %d \n", client->rtm_table);
865
866 connect_time = (time_t) atomic_load_explicit(&client->connect_time,
867 memory_order_relaxed);
868
869 vty_out(vty, "Connect Time: %s \n",
870 zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF));
871 if (client->nh_reg_time) {
872 vty_out(vty, "Nexthop Registry Time: %s \n",
873 zserv_time_buf(&client->nh_reg_time, nhbuf,
874 ZEBRA_TIME_BUF));
875 if (client->nh_last_upd_time)
876 vty_out(vty, "Nexthop Last Update Time: %s \n",
877 zserv_time_buf(&client->nh_last_upd_time, mbuf,
878 ZEBRA_TIME_BUF));
879 else
880 vty_out(vty, "No Nexthop Update sent\n");
881 } else
882 vty_out(vty, "Not registered for Nexthop Updates\n");
883
884 last_read_time = (time_t)atomic_load_explicit(&client->last_read_time,
885 memory_order_relaxed);
886 last_write_time = (time_t)atomic_load_explicit(&client->last_write_time,
887 memory_order_relaxed);
888
889 last_read_cmd = atomic_load_explicit(&client->last_read_cmd,
890 memory_order_relaxed);
891 last_write_cmd = atomic_load_explicit(&client->last_write_cmd,
892 memory_order_relaxed);
893
894 vty_out(vty, "Last Msg Rx Time: %s \n",
895 zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF));
896 vty_out(vty, "Last Msg Tx Time: %s \n",
897 zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF));
898 if (last_read_cmd)
899 vty_out(vty, "Last Rcvd Cmd: %s \n",
900 zserv_command_string(last_read_cmd));
901 if (last_write_cmd)
902 vty_out(vty, "Last Sent Cmd: %s \n",
903 zserv_command_string(last_write_cmd));
904 vty_out(vty, "\n");
905
906 vty_out(vty, "Type Add Update Del \n");
907 vty_out(vty, "================================================== \n");
908 vty_out(vty, "IPv4 %-12d%-12d%-12d\n", client->v4_route_add_cnt,
909 client->v4_route_upd8_cnt, client->v4_route_del_cnt);
910 vty_out(vty, "IPv6 %-12d%-12d%-12d\n", client->v6_route_add_cnt,
911 client->v6_route_upd8_cnt, client->v6_route_del_cnt);
912 vty_out(vty, "Redist:v4 %-12d%-12d%-12d\n", client->redist_v4_add_cnt,
913 0, client->redist_v4_del_cnt);
914 vty_out(vty, "Redist:v6 %-12d%-12d%-12d\n", client->redist_v6_add_cnt,
915 0, client->redist_v6_del_cnt);
916 vty_out(vty, "Connected %-12d%-12d%-12d\n", client->ifadd_cnt, 0,
917 client->ifdel_cnt);
918 vty_out(vty, "BFD peer %-12d%-12d%-12d\n", client->bfd_peer_add_cnt,
919 client->bfd_peer_upd8_cnt, client->bfd_peer_del_cnt);
920 vty_out(vty, "Interface Up Notifications: %d\n", client->ifup_cnt);
921 vty_out(vty, "Interface Down Notifications: %d\n", client->ifdown_cnt);
922 vty_out(vty, "VNI add notifications: %d\n", client->vniadd_cnt);
923 vty_out(vty, "VNI delete notifications: %d\n", client->vnidel_cnt);
924 vty_out(vty, "L3-VNI add notifications: %d\n", client->l3vniadd_cnt);
925 vty_out(vty, "L3-VNI delete notifications: %d\n", client->l3vnidel_cnt);
926 vty_out(vty, "MAC-IP add notifications: %d\n", client->macipadd_cnt);
927 vty_out(vty, "MAC-IP delete notifications: %d\n", client->macipdel_cnt);
928
929 vty_out(vty, "\n");
930 return;
931 }
932
933 static void zebra_show_client_brief(struct vty *vty, struct zserv *client)
934 {
935 char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF];
936 char wbuf[ZEBRA_TIME_BUF];
937 time_t connect_time, last_read_time, last_write_time;
938
939 connect_time = (time_t)atomic_load_explicit(&client->connect_time,
940 memory_order_relaxed);
941 last_read_time = (time_t)atomic_load_explicit(&client->last_read_time,
942 memory_order_relaxed);
943 last_write_time = (time_t)atomic_load_explicit(&client->last_write_time,
944 memory_order_relaxed);
945
946 vty_out(vty, "%-8s%12s %12s%12s%8d/%-8d%8d/%-8d\n",
947 zebra_route_string(client->proto),
948 zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF),
949 zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF),
950 zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF),
951 client->v4_route_add_cnt + client->v4_route_upd8_cnt,
952 client->v4_route_del_cnt,
953 client->v6_route_add_cnt + client->v6_route_upd8_cnt,
954 client->v6_route_del_cnt);
955 }
956
957 struct zserv *zserv_find_client(uint8_t proto, unsigned short instance)
958 {
959 struct listnode *node, *nnode;
960 struct zserv *client;
961
962 for (ALL_LIST_ELEMENTS(zebrad.client_list, node, nnode, client)) {
963 if (client->proto == proto && client->instance == instance)
964 return client;
965 }
966
967 return NULL;
968 }
969
970 /* This command is for debugging purpose. */
971 DEFUN (show_zebra_client,
972 show_zebra_client_cmd,
973 "show zebra client",
974 SHOW_STR
975 ZEBRA_STR
976 "Client information\n")
977 {
978 struct listnode *node;
979 struct zserv *client;
980
981 for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client))
982 zebra_show_client_detail(vty, client);
983
984 return CMD_SUCCESS;
985 }
986
987 /* This command is for debugging purpose. */
988 DEFUN (show_zebra_client_summary,
989 show_zebra_client_summary_cmd,
990 "show zebra client summary",
991 SHOW_STR
992 ZEBRA_STR
993 "Client information brief\n"
994 "Brief Summary\n")
995 {
996 struct listnode *node;
997 struct zserv *client;
998
999 vty_out(vty,
1000 "Name Connect Time Last Read Last Write IPv4 Routes IPv6 Routes \n");
1001 vty_out(vty,
1002 "--------------------------------------------------------------------------------\n");
1003
1004 for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client))
1005 zebra_show_client_brief(vty, client);
1006
1007 vty_out(vty, "Routes column shows (added+updated)/deleted\n");
1008 return CMD_SUCCESS;
1009 }
1010
1011 #if defined(HANDLE_ZAPI_FUZZING)
1012 void zserv_read_file(char *input)
1013 {
1014 int fd;
1015 struct zserv *client = NULL;
1016 struct thread t;
1017
1018 zebra_client_create(-1);
1019
1020 frr_pthread_stop(client->pthread, NULL);
1021 frr_pthread_destroy(client->pthread);
1022 client->pthread = NULL;
1023
1024 t.arg = client;
1025
1026 fd = open(input, O_RDONLY | O_NONBLOCK);
1027 t.u.fd = fd;
1028
1029 zserv_read(&t);
1030
1031 close(fd);
1032 }
1033 #endif
1034
1035 void zserv_init(void)
1036 {
1037 /* Client list init. */
1038 zebrad.client_list = list_new();
1039 zebrad.client_list->del = (void (*)(void *)) zserv_client_free;
1040
1041 /* Misc init. */
1042 zebrad.sock = -1;
1043
1044 install_element(ENABLE_NODE, &show_zebra_client_cmd);
1045 install_element(ENABLE_NODE, &show_zebra_client_summary_cmd);
1046 }