]> git.proxmox.com Git - mirror_frr.git/blob - zebra/zserv.c
zebra: delay default vrf name after vrf initialization
[mirror_frr.git] / zebra / zserv.c
1 /*
2 * Zebra API server.
3 * Portions:
4 * Copyright (C) 1997-1999 Kunihiro Ishiguro
5 * Copyright (C) 2015-2018 Cumulus Networks, Inc.
6 * et al.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; see the file COPYING; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <zebra.h>
24
25 /* clang-format off */
26 #include <errno.h> /* for errno */
27 #include <netinet/in.h> /* for sockaddr_in */
28 #include <stdint.h> /* for uint8_t */
29 #include <stdio.h> /* for snprintf */
30 #include <sys/socket.h> /* for sockaddr_storage, AF_UNIX, accept... */
31 #include <sys/stat.h> /* for umask, mode_t */
32 #include <sys/un.h> /* for sockaddr_un */
33 #include <time.h> /* for NULL, tm, gmtime, time_t */
34 #include <unistd.h> /* for close, unlink, ssize_t */
35
36 #include "lib/buffer.h" /* for BUFFER_EMPTY, BUFFER_ERROR, BUFFE... */
37 #include "lib/command.h" /* for vty, install_element, CMD_SUCCESS... */
38 #include "lib/hook.h" /* for DEFINE_HOOK, DEFINE_KOOH, hook_call */
39 #include "lib/linklist.h" /* for ALL_LIST_ELEMENTS_RO, ALL_LIST_EL... */
40 #include "lib/libfrr.h" /* for frr_zclient_addr */
41 #include "lib/log.h" /* for zlog_warn, zlog_debug, safe_strerror */
42 #include "lib/memory.h" /* for MTYPE_TMP, XCALLOC, XFREE */
43 #include "lib/monotime.h" /* for monotime, ONE_DAY_SECOND, ONE_WEE... */
44 #include "lib/network.h" /* for set_nonblocking */
45 #include "lib/privs.h" /* for zebra_privs_t, ZPRIVS_LOWER, ZPRI... */
46 #include "lib/route_types.h" /* for ZEBRA_ROUTE_MAX */
47 #include "lib/sockopt.h" /* for setsockopt_so_recvbuf, setsockopt... */
48 #include "lib/sockunion.h" /* for sockopt_reuseaddr, sockopt_reuseport */
49 #include "lib/stream.h" /* for STREAM_SIZE, stream (ptr only), ... */
50 #include "lib/thread.h" /* for thread (ptr only), THREAD_ARG, ... */
51 #include "lib/vrf.h" /* for vrf_info_lookup, VRF_DEFAULT */
52 #include "lib/vty.h" /* for vty_out, vty (ptr only) */
53 #include "lib/zassert.h" /* for assert */
54 #include "lib/zclient.h" /* for zmsghdr, ZEBRA_HEADER_SIZE, ZEBRA... */
55 #include "lib/frr_pthread.h" /* for frr_pthread_new, frr_pthread_stop... */
56 #include "lib/frratomic.h" /* for atomic_load_explicit, atomic_stor... */
57 #include "lib/lib_errors.h" /* for generic ferr ids */
58
59 #include "zebra/debug.h" /* for various debugging macros */
60 #include "zebra/rib.h" /* for rib_score_proto */
61 #include "zebra/zapi_msg.h" /* for zserv_handle_commands */
62 #include "zebra/zebra_vrf.h" /* for zebra_vrf_lookup_by_id, zvrf */
63 #include "zebra/zserv.h" /* for zserv */
64 #include "zebra/zebra_errors.h" /* for error messages */
65 /* clang-format on */
66
67 /* privileges */
68 extern struct zebra_privs_t zserv_privs;
69
70 /*
71 * Client thread events.
72 *
73 * These are used almost exclusively by client threads to drive their own event
74 * loops. The only exception is in zserv_client_create(), which pushes an
75 * initial ZSERV_CLIENT_READ event to start the API handler loop.
76 */
77 enum zserv_client_event {
78 /* Schedule a socket read */
79 ZSERV_CLIENT_READ,
80 /* Schedule a buffer write */
81 ZSERV_CLIENT_WRITE,
82 };
83
84 /*
85 * Main thread events.
86 *
87 * These are used by client threads to notify the main thread about various
88 * events and to make processing requests.
89 */
90 enum zserv_event {
91 /* Schedule listen job on Zebra API socket */
92 ZSERV_ACCEPT,
93 /* The calling client has packets on its input buffer */
94 ZSERV_PROCESS_MESSAGES,
95 /* The calling client wishes to be killed */
96 ZSERV_HANDLE_CLIENT_FAIL,
97 };
98
99 /*
100 * Zebra server event driver for all client threads.
101 *
102 * This is essentially a wrapper around thread_add_event() that centralizes
103 * those scheduling calls into one place.
104 *
105 * All calls to this function schedule an event on the pthread running the
106 * provided client.
107 *
108 * client
109 * the client in question, and thread target
110 *
111 * event
112 * the event to notify them about
113 */
114 static void zserv_client_event(struct zserv *client,
115 enum zserv_client_event event);
116
117 /*
118 * Zebra server event driver for the main thread.
119 *
120 * This is essentially a wrapper around thread_add_event() that centralizes
121 * those scheduling calls into one place.
122 *
123 * All calls to this function schedule an event on Zebra's main pthread.
124 *
125 * client
126 * the client in question
127 *
128 * event
129 * the event to notify the main thread about
130 */
131 static void zserv_event(struct zserv *client, enum zserv_event event);
132
133
134 /* Client thread lifecycle -------------------------------------------------- */
135
136 /*
137 * Log zapi message to zlog.
138 *
139 * errmsg (optional)
140 * Debugging message
141 *
142 * msg
143 * The message
144 *
145 * hdr (optional)
146 * The message header
147 */
148 static void zserv_log_message(const char *errmsg, struct stream *msg,
149 struct zmsghdr *hdr)
150 {
151 zlog_debug("Rx'd ZAPI message");
152 if (errmsg)
153 zlog_debug("%s", errmsg);
154 if (hdr) {
155 zlog_debug(" Length: %d", hdr->length);
156 zlog_debug("Command: %s", zserv_command_string(hdr->command));
157 zlog_debug(" VRF: %u", hdr->vrf_id);
158 }
159 zlog_hexdump(msg->data, STREAM_READABLE(msg));
160 }
161
162 /*
163 * Gracefully shut down a client connection.
164 *
165 * Cancel any pending tasks for the client's thread. Then schedule a task on
166 * the main thread to shut down the calling thread.
167 *
168 * It is not safe to close the client socket in this function. The socket is
169 * owned by the main thread.
170 *
171 * Must be called from the client pthread, never the main thread.
172 */
173 static void zserv_client_fail(struct zserv *client)
174 {
175 flog_warn(EC_ZEBRA_CLIENT_IO_ERROR,
176 "Client '%s' encountered an error and is shutting down.",
177 zebra_route_string(client->proto));
178
179 atomic_store_explicit(&client->pthread->running, false,
180 memory_order_relaxed);
181
182 THREAD_OFF(client->t_read);
183 THREAD_OFF(client->t_write);
184 zserv_event(client, ZSERV_HANDLE_CLIENT_FAIL);
185 }
186
187 /*
188 * Write all pending messages to client socket.
189 *
190 * This function first attempts to flush any buffered data. If unsuccessful,
191 * the function reschedules itself and returns. If successful, it pops all
192 * available messages from the output queue and continues to write data
193 * directly to the socket until the socket would block. If the socket never
194 * blocks and all data is written, the function returns without rescheduling
195 * itself. If the socket ends up throwing EWOULDBLOCK, the remaining data is
196 * buffered and the function reschedules itself.
197 *
198 * The utility of the buffer is that it allows us to vastly reduce lock
199 * contention by allowing us to pop *all* messages off the output queue at once
200 * instead of locking and unlocking each time we want to pop a single message
201 * off the queue. The same thing could arguably be accomplished faster by
202 * allowing the main thread to write directly into the buffer instead of
203 * enqueuing packets onto an intermediary queue, but the intermediary queue
204 * allows us to expose information about input and output queues to the user in
205 * terms of number of packets rather than size of data.
206 */
207 static int zserv_write(struct thread *thread)
208 {
209 struct zserv *client = THREAD_ARG(thread);
210 struct stream *msg;
211 uint32_t wcmd = 0;
212 struct stream_fifo *cache;
213
214 /* If we have any data pending, try to flush it first */
215 switch (buffer_flush_all(client->wb, client->sock)) {
216 case BUFFER_ERROR:
217 goto zwrite_fail;
218 case BUFFER_PENDING:
219 atomic_store_explicit(&client->last_write_time,
220 (uint32_t)monotime(NULL),
221 memory_order_relaxed);
222 zserv_client_event(client, ZSERV_CLIENT_WRITE);
223 return 0;
224 case BUFFER_EMPTY:
225 break;
226 }
227
228 cache = stream_fifo_new();
229
230 pthread_mutex_lock(&client->obuf_mtx);
231 {
232 while (stream_fifo_head(client->obuf_fifo))
233 stream_fifo_push(cache,
234 stream_fifo_pop(client->obuf_fifo));
235 }
236 pthread_mutex_unlock(&client->obuf_mtx);
237
238 if (cache->tail) {
239 msg = cache->tail;
240 stream_set_getp(msg, 0);
241 wcmd = stream_getw_from(msg, 6);
242 }
243
244 while (stream_fifo_head(cache)) {
245 msg = stream_fifo_pop(cache);
246 buffer_put(client->wb, STREAM_DATA(msg), stream_get_endp(msg));
247 stream_free(msg);
248 }
249
250 stream_fifo_free(cache);
251
252 /* If we have any data pending, try to flush it first */
253 switch (buffer_flush_all(client->wb, client->sock)) {
254 case BUFFER_ERROR:
255 goto zwrite_fail;
256 case BUFFER_PENDING:
257 atomic_store_explicit(&client->last_write_time,
258 (uint32_t)monotime(NULL),
259 memory_order_relaxed);
260 zserv_client_event(client, ZSERV_CLIENT_WRITE);
261 return 0;
262 case BUFFER_EMPTY:
263 break;
264 }
265
266 atomic_store_explicit(&client->last_write_cmd, wcmd,
267 memory_order_relaxed);
268
269 atomic_store_explicit(&client->last_write_time,
270 (uint32_t)monotime(NULL), memory_order_relaxed);
271
272 return 0;
273
274 zwrite_fail:
275 flog_warn(EC_ZEBRA_CLIENT_WRITE_FAILED,
276 "%s: could not write to %s [fd = %d], closing.", __func__,
277 zebra_route_string(client->proto), client->sock);
278 zserv_client_fail(client);
279 return 0;
280 }
281
282 /*
283 * Read and process data from a client socket.
284 *
285 * The responsibilities here are to read raw data from the client socket,
286 * validate the header, encapsulate it into a single stream object, push it
287 * onto the input queue and then notify the main thread that there is new data
288 * available.
289 *
290 * This function first looks for any data in the client structure's working
291 * input buffer. If data is present, it is assumed that reading stopped in a
292 * previous invocation of this task and needs to be resumed to finish a message.
293 * Otherwise, the socket data stream is assumed to be at the beginning of a new
294 * ZAPI message (specifically at the header). The header is read and validated.
295 * If the header passed validation then the length field found in the header is
296 * used to compute the total length of the message. That much data is read (but
297 * not inspected), appended to the header, placed into a stream and pushed onto
298 * the client's input queue. A task is then scheduled on the main thread to
299 * process the client's input queue. Finally, if all of this was successful,
300 * this task reschedules itself.
301 *
302 * Any failure in any of these actions is handled by terminating the client.
303 */
304 static int zserv_read(struct thread *thread)
305 {
306 struct zserv *client = THREAD_ARG(thread);
307 int sock;
308 size_t already;
309 struct stream_fifo *cache;
310 uint32_t p2p_orig;
311
312 uint32_t p2p;
313 struct zmsghdr hdr;
314
315 p2p_orig = atomic_load_explicit(&zebrad.packets_to_process,
316 memory_order_relaxed);
317 cache = stream_fifo_new();
318 p2p = p2p_orig;
319 sock = THREAD_FD(thread);
320
321 while (p2p) {
322 ssize_t nb;
323 bool hdrvalid;
324 char errmsg[256];
325
326 already = stream_get_endp(client->ibuf_work);
327
328 /* Read length and command (if we don't have it already). */
329 if (already < ZEBRA_HEADER_SIZE) {
330 nb = stream_read_try(client->ibuf_work, sock,
331 ZEBRA_HEADER_SIZE - already);
332 if ((nb == 0 || nb == -1)) {
333 if (IS_ZEBRA_DEBUG_EVENT)
334 zlog_debug("connection closed socket [%d]",
335 sock);
336 goto zread_fail;
337 }
338 if (nb != (ssize_t)(ZEBRA_HEADER_SIZE - already)) {
339 /* Try again later. */
340 break;
341 }
342 already = ZEBRA_HEADER_SIZE;
343 }
344
345 /* Reset to read from the beginning of the incoming packet. */
346 stream_set_getp(client->ibuf_work, 0);
347
348 /* Fetch header values */
349 hdrvalid = zapi_parse_header(client->ibuf_work, &hdr);
350
351 if (!hdrvalid) {
352 snprintf(errmsg, sizeof(errmsg),
353 "%s: Message has corrupt header", __func__);
354 zserv_log_message(errmsg, client->ibuf_work, NULL);
355 goto zread_fail;
356 }
357
358 /* Validate header */
359 if (hdr.marker != ZEBRA_HEADER_MARKER
360 || hdr.version != ZSERV_VERSION) {
361 snprintf(
362 errmsg, sizeof(errmsg),
363 "Message has corrupt header\n%s: socket %d version mismatch, marker %d, version %d",
364 __func__, sock, hdr.marker, hdr.version);
365 zserv_log_message(errmsg, client->ibuf_work, &hdr);
366 goto zread_fail;
367 }
368 if (hdr.length < ZEBRA_HEADER_SIZE) {
369 snprintf(
370 errmsg, sizeof(errmsg),
371 "Message has corrupt header\n%s: socket %d message length %u is less than header size %d",
372 __func__, sock, hdr.length, ZEBRA_HEADER_SIZE);
373 zserv_log_message(errmsg, client->ibuf_work, &hdr);
374 goto zread_fail;
375 }
376 if (hdr.length > STREAM_SIZE(client->ibuf_work)) {
377 snprintf(
378 errmsg, sizeof(errmsg),
379 "Message has corrupt header\n%s: socket %d message length %u exceeds buffer size %lu",
380 __func__, sock, hdr.length,
381 (unsigned long)STREAM_SIZE(client->ibuf_work));
382 zserv_log_message(errmsg, client->ibuf_work, &hdr);
383 goto zread_fail;
384 }
385
386 /* Read rest of data. */
387 if (already < hdr.length) {
388 nb = stream_read_try(client->ibuf_work, sock,
389 hdr.length - already);
390 if ((nb == 0 || nb == -1)) {
391 if (IS_ZEBRA_DEBUG_EVENT)
392 zlog_debug(
393 "connection closed [%d] when reading zebra data",
394 sock);
395 goto zread_fail;
396 }
397 if (nb != (ssize_t)(hdr.length - already)) {
398 /* Try again later. */
399 break;
400 }
401 }
402
403 /* Debug packet information. */
404 if (IS_ZEBRA_DEBUG_EVENT)
405 zlog_debug("zebra message comes from socket [%d]",
406 sock);
407
408 if (IS_ZEBRA_DEBUG_PACKET && IS_ZEBRA_DEBUG_RECV)
409 zserv_log_message(NULL, client->ibuf_work, &hdr);
410
411 stream_set_getp(client->ibuf_work, 0);
412 struct stream *msg = stream_dup(client->ibuf_work);
413
414 stream_fifo_push(cache, msg);
415 stream_reset(client->ibuf_work);
416 p2p--;
417 }
418
419 if (p2p < p2p_orig) {
420 /* update session statistics */
421 atomic_store_explicit(&client->last_read_time, monotime(NULL),
422 memory_order_relaxed);
423 atomic_store_explicit(&client->last_read_cmd, hdr.command,
424 memory_order_relaxed);
425
426 /* publish read packets on client's input queue */
427 pthread_mutex_lock(&client->ibuf_mtx);
428 {
429 while (cache->head)
430 stream_fifo_push(client->ibuf_fifo,
431 stream_fifo_pop(cache));
432 }
433 pthread_mutex_unlock(&client->ibuf_mtx);
434
435 /* Schedule job to process those packets */
436 zserv_event(client, ZSERV_PROCESS_MESSAGES);
437
438 }
439
440 if (IS_ZEBRA_DEBUG_PACKET)
441 zlog_debug("Read %d packets", p2p_orig - p2p);
442
443 /* Reschedule ourselves */
444 zserv_client_event(client, ZSERV_CLIENT_READ);
445
446 stream_fifo_free(cache);
447
448 return 0;
449
450 zread_fail:
451 stream_fifo_free(cache);
452 zserv_client_fail(client);
453 return -1;
454 }
455
456 static void zserv_client_event(struct zserv *client,
457 enum zserv_client_event event)
458 {
459 switch (event) {
460 case ZSERV_CLIENT_READ:
461 thread_add_read(client->pthread->master, zserv_read, client,
462 client->sock, &client->t_read);
463 break;
464 case ZSERV_CLIENT_WRITE:
465 thread_add_write(client->pthread->master, zserv_write, client,
466 client->sock, &client->t_write);
467 break;
468 }
469 }
470
471 /* Main thread lifecycle ---------------------------------------------------- */
472
473 /*
474 * Read and process messages from a client.
475 *
476 * This task runs on the main pthread. It is scheduled by client pthreads when
477 * they have new messages available on their input queues. The client is passed
478 * as the task argument.
479 *
480 * Each message is popped off the client's input queue and the action associated
481 * with the message is executed. This proceeds until there are no more messages,
482 * an error occurs, or the processing limit is reached.
483 *
484 * The client's I/O thread can push at most zebrad.packets_to_process messages
485 * onto the input buffer before notifying us there are packets to read. As long
486 * as we always process zebrad.packets_to_process messages here, then we can
487 * rely on the read thread to handle queuing this task enough times to process
488 * everything on the input queue.
489 */
490 static int zserv_process_messages(struct thread *thread)
491 {
492 struct zserv *client = THREAD_ARG(thread);
493 struct stream *msg;
494 struct stream_fifo *cache = stream_fifo_new();
495
496 uint32_t p2p = zebrad.packets_to_process;
497
498 pthread_mutex_lock(&client->ibuf_mtx);
499 {
500 uint32_t i;
501 for (i = 0; i < p2p && stream_fifo_head(client->ibuf_fifo);
502 ++i) {
503 msg = stream_fifo_pop(client->ibuf_fifo);
504 stream_fifo_push(cache, msg);
505 }
506
507 msg = NULL;
508 }
509 pthread_mutex_unlock(&client->ibuf_mtx);
510
511 while (stream_fifo_head(cache)) {
512 msg = stream_fifo_pop(cache);
513 zserv_handle_commands(client, msg);
514 stream_free(msg);
515 }
516
517 stream_fifo_free(cache);
518
519 return 0;
520 }
521
522 int zserv_send_message(struct zserv *client, struct stream *msg)
523 {
524 /*
525 * This is a somewhat poorly named variable added with Zebra's portion
526 * of the label manager. That component does not use the regular
527 * zserv/zapi_msg interface for handling its messages, as the client
528 * itself runs in-process. Instead it uses synchronous writes on the
529 * zserv client's socket directly in the zread* handlers for its
530 * message types. Furthermore, it cannot handle the usual messages
531 * Zebra sends (such as those for interface changes) and so has added
532 * this flag and check here as a hack to suppress all messages that it
533 * does not explicitly know about.
534 *
535 * In any case this needs to be cleaned up at some point.
536 *
537 * See also:
538 * zread_label_manager_request
539 * zsend_label_manager_connect_response
540 * zsend_assign_label_chunk_response
541 * ...
542 */
543 if (client->is_synchronous)
544 return 0;
545
546 pthread_mutex_lock(&client->obuf_mtx);
547 {
548 stream_fifo_push(client->obuf_fifo, msg);
549 }
550 pthread_mutex_unlock(&client->obuf_mtx);
551
552 zserv_client_event(client, ZSERV_CLIENT_WRITE);
553
554 return 0;
555 }
556
557
558 /* Hooks for client connect / disconnect */
559 DEFINE_HOOK(zserv_client_connect, (struct zserv *client), (client));
560 DEFINE_KOOH(zserv_client_close, (struct zserv *client), (client));
561
562 /*
563 * Deinitialize zebra client.
564 *
565 * - Deregister and deinitialize related internal resources
566 * - Gracefully close socket
567 * - Free associated resources
568 * - Free client structure
569 *
570 * This does *not* take any action on the struct thread * fields. These are
571 * managed by the owning pthread and any tasks associated with them must have
572 * been stopped prior to invoking this function.
573 */
574 static void zserv_client_free(struct zserv *client)
575 {
576 hook_call(zserv_client_close, client);
577
578 /* Close file descriptor. */
579 if (client->sock) {
580 unsigned long nroutes;
581
582 close(client->sock);
583
584 nroutes = rib_score_proto(client->proto, client->instance);
585 zlog_notice(
586 "client %d disconnected. %lu %s routes removed from the rib",
587 client->sock, nroutes,
588 zebra_route_string(client->proto));
589 client->sock = -1;
590 }
591
592 /* Free stream buffers. */
593 if (client->ibuf_work)
594 stream_free(client->ibuf_work);
595 if (client->obuf_work)
596 stream_free(client->obuf_work);
597 if (client->ibuf_fifo)
598 stream_fifo_free(client->ibuf_fifo);
599 if (client->obuf_fifo)
600 stream_fifo_free(client->obuf_fifo);
601 if (client->wb)
602 buffer_free(client->wb);
603
604 /* Free buffer mutexes */
605 pthread_mutex_destroy(&client->obuf_mtx);
606 pthread_mutex_destroy(&client->ibuf_mtx);
607
608 /* Free bitmaps. */
609 for (afi_t afi = AFI_IP; afi < AFI_MAX; afi++)
610 for (int i = 0; i < ZEBRA_ROUTE_MAX; i++)
611 vrf_bitmap_free(client->redist[afi][i]);
612
613 vrf_bitmap_free(client->redist_default);
614 vrf_bitmap_free(client->ifinfo);
615 vrf_bitmap_free(client->ridinfo);
616
617 XFREE(MTYPE_TMP, client);
618 }
619
620 void zserv_close_client(struct zserv *client)
621 {
622 /* synchronously stop and join pthread */
623 frr_pthread_stop(client->pthread, NULL);
624
625 if (IS_ZEBRA_DEBUG_EVENT)
626 zlog_debug("Closing client '%s'",
627 zebra_route_string(client->proto));
628
629 thread_cancel_event(zebrad.master, client);
630 THREAD_OFF(client->t_cleanup);
631
632 /* destroy pthread */
633 frr_pthread_destroy(client->pthread);
634 client->pthread = NULL;
635
636 /* remove from client list */
637 listnode_delete(zebrad.client_list, client);
638
639 /* delete client */
640 zserv_client_free(client);
641 }
642
643 /*
644 * This task is scheduled by a ZAPI client pthread on the main pthread when it
645 * wants to stop itself. When this executes, the client connection should
646 * already have been closed and the thread will most likely have died, but its
647 * resources still need to be cleaned up.
648 */
649 static int zserv_handle_client_fail(struct thread *thread)
650 {
651 struct zserv *client = THREAD_ARG(thread);
652
653 zserv_close_client(client);
654 return 0;
655 }
656
657 /*
658 * Create a new client.
659 *
660 * This is called when a new connection is accept()'d on the ZAPI socket. It
661 * initializes new client structure, notifies any subscribers of the connection
662 * event and spawns the client's thread.
663 *
664 * sock
665 * client's socket file descriptor
666 */
667 static struct zserv *zserv_client_create(int sock)
668 {
669 struct zserv *client;
670 int i;
671 afi_t afi;
672
673 client = XCALLOC(MTYPE_TMP, sizeof(struct zserv));
674
675 /* Make client input/output buffer. */
676 client->sock = sock;
677 client->ibuf_fifo = stream_fifo_new();
678 client->obuf_fifo = stream_fifo_new();
679 client->ibuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ);
680 client->obuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ);
681 pthread_mutex_init(&client->ibuf_mtx, NULL);
682 pthread_mutex_init(&client->obuf_mtx, NULL);
683 client->wb = buffer_new(0);
684
685 /* Set table number. */
686 client->rtm_table = zebrad.rtm_table_default;
687
688 atomic_store_explicit(&client->connect_time, (uint32_t) monotime(NULL),
689 memory_order_relaxed);
690
691 /* Initialize flags */
692 for (afi = AFI_IP; afi < AFI_MAX; afi++)
693 for (i = 0; i < ZEBRA_ROUTE_MAX; i++)
694 client->redist[afi][i] = vrf_bitmap_init();
695 client->redist_default = vrf_bitmap_init();
696 client->ifinfo = vrf_bitmap_init();
697 client->ridinfo = vrf_bitmap_init();
698
699 /* by default, it's not a synchronous client */
700 client->is_synchronous = 0;
701
702 /* Add this client to linked list. */
703 listnode_add(zebrad.client_list, client);
704
705 struct frr_pthread_attr zclient_pthr_attrs = {
706 .start = frr_pthread_attr_default.start,
707 .stop = frr_pthread_attr_default.stop
708 };
709 client->pthread =
710 frr_pthread_new(&zclient_pthr_attrs, "Zebra API client thread",
711 "zebra_apic");
712
713 zebra_vrf_update_all(client);
714
715 /* start read loop */
716 zserv_client_event(client, ZSERV_CLIENT_READ);
717
718 /* call callbacks */
719 hook_call(zserv_client_connect, client);
720
721 /* start pthread */
722 frr_pthread_run(client->pthread, NULL);
723
724 return client;
725 }
726
727 /*
728 * Accept socket connection.
729 */
730 static int zserv_accept(struct thread *thread)
731 {
732 int accept_sock;
733 int client_sock;
734 struct sockaddr_in client;
735 socklen_t len;
736
737 accept_sock = THREAD_FD(thread);
738
739 /* Reregister myself. */
740 zserv_event(NULL, ZSERV_ACCEPT);
741
742 len = sizeof(struct sockaddr_in);
743 client_sock = accept(accept_sock, (struct sockaddr *)&client, &len);
744
745 if (client_sock < 0) {
746 flog_err_sys(EC_LIB_SOCKET, "Can't accept zebra socket: %s",
747 safe_strerror(errno));
748 return -1;
749 }
750
751 /* Make client socket non-blocking. */
752 set_nonblocking(client_sock);
753
754 /* Create new zebra client. */
755 zserv_client_create(client_sock);
756
757 return 0;
758 }
759
760 void zserv_start(char *path)
761 {
762 int ret;
763 mode_t old_mask;
764 struct sockaddr_storage sa;
765 socklen_t sa_len;
766
767 if (!frr_zclient_addr(&sa, &sa_len, path))
768 /* should be caught in zebra main() */
769 return;
770
771 /* Set umask */
772 old_mask = umask(0077);
773
774 /* Make UNIX domain socket. */
775 zebrad.sock = socket(sa.ss_family, SOCK_STREAM, 0);
776 if (zebrad.sock < 0) {
777 flog_err_sys(EC_LIB_SOCKET, "Can't create zserv socket: %s",
778 safe_strerror(errno));
779 return;
780 }
781
782 if (sa.ss_family != AF_UNIX) {
783 sockopt_reuseaddr(zebrad.sock);
784 sockopt_reuseport(zebrad.sock);
785 } else {
786 struct sockaddr_un *suna = (struct sockaddr_un *)&sa;
787 if (suna->sun_path[0])
788 unlink(suna->sun_path);
789 }
790
791 frr_elevate_privs(&zserv_privs) {
792 setsockopt_so_recvbuf(zebrad.sock, 1048576);
793 setsockopt_so_sendbuf(zebrad.sock, 1048576);
794 }
795
796 frr_elevate_privs((sa.ss_family != AF_UNIX) ? &zserv_privs : NULL) {
797 ret = bind(zebrad.sock, (struct sockaddr *)&sa, sa_len);
798 }
799 if (ret < 0) {
800 flog_err_sys(EC_LIB_SOCKET, "Can't bind zserv socket on %s: %s",
801 path, safe_strerror(errno));
802 close(zebrad.sock);
803 zebrad.sock = -1;
804 return;
805 }
806
807 ret = listen(zebrad.sock, 5);
808 if (ret < 0) {
809 flog_err_sys(EC_LIB_SOCKET,
810 "Can't listen to zserv socket %s: %s", path,
811 safe_strerror(errno));
812 close(zebrad.sock);
813 zebrad.sock = -1;
814 return;
815 }
816
817 umask(old_mask);
818
819 zserv_event(NULL, ZSERV_ACCEPT);
820 }
821
822 void zserv_event(struct zserv *client, enum zserv_event event)
823 {
824 switch (event) {
825 case ZSERV_ACCEPT:
826 thread_add_read(zebrad.master, zserv_accept, NULL, zebrad.sock,
827 NULL);
828 break;
829 case ZSERV_PROCESS_MESSAGES:
830 thread_add_event(zebrad.master, zserv_process_messages, client,
831 0, NULL);
832 break;
833 case ZSERV_HANDLE_CLIENT_FAIL:
834 thread_add_event(zebrad.master, zserv_handle_client_fail,
835 client, 0, &client->t_cleanup);
836 }
837 }
838
839
840 /* General purpose ---------------------------------------------------------- */
841
842 #define ZEBRA_TIME_BUF 32
843 static char *zserv_time_buf(time_t *time1, char *buf, int buflen)
844 {
845 struct tm *tm;
846 time_t now;
847
848 assert(buf != NULL);
849 assert(buflen >= ZEBRA_TIME_BUF);
850 assert(time1 != NULL);
851
852 if (!*time1) {
853 snprintf(buf, buflen, "never ");
854 return (buf);
855 }
856
857 now = monotime(NULL);
858 now -= *time1;
859 tm = gmtime(&now);
860
861 if (now < ONE_DAY_SECOND)
862 snprintf(buf, buflen, "%02d:%02d:%02d", tm->tm_hour, tm->tm_min,
863 tm->tm_sec);
864 else if (now < ONE_WEEK_SECOND)
865 snprintf(buf, buflen, "%dd%02dh%02dm", tm->tm_yday, tm->tm_hour,
866 tm->tm_min);
867 else
868 snprintf(buf, buflen, "%02dw%dd%02dh", tm->tm_yday / 7,
869 tm->tm_yday - ((tm->tm_yday / 7) * 7), tm->tm_hour);
870 return buf;
871 }
872
873 static void zebra_show_client_detail(struct vty *vty, struct zserv *client)
874 {
875 char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF];
876 char wbuf[ZEBRA_TIME_BUF], nhbuf[ZEBRA_TIME_BUF], mbuf[ZEBRA_TIME_BUF];
877 time_t connect_time, last_read_time, last_write_time;
878 uint32_t last_read_cmd, last_write_cmd;
879
880 vty_out(vty, "Client: %s", zebra_route_string(client->proto));
881 if (client->instance)
882 vty_out(vty, " Instance: %d", client->instance);
883 vty_out(vty, "\n");
884
885 vty_out(vty, "------------------------ \n");
886 vty_out(vty, "FD: %d \n", client->sock);
887 vty_out(vty, "Route Table ID: %d \n", client->rtm_table);
888
889 connect_time = (time_t) atomic_load_explicit(&client->connect_time,
890 memory_order_relaxed);
891
892 vty_out(vty, "Connect Time: %s \n",
893 zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF));
894 if (client->nh_reg_time) {
895 vty_out(vty, "Nexthop Registry Time: %s \n",
896 zserv_time_buf(&client->nh_reg_time, nhbuf,
897 ZEBRA_TIME_BUF));
898 if (client->nh_last_upd_time)
899 vty_out(vty, "Nexthop Last Update Time: %s \n",
900 zserv_time_buf(&client->nh_last_upd_time, mbuf,
901 ZEBRA_TIME_BUF));
902 else
903 vty_out(vty, "No Nexthop Update sent\n");
904 } else
905 vty_out(vty, "Not registered for Nexthop Updates\n");
906
907 last_read_time = (time_t)atomic_load_explicit(&client->last_read_time,
908 memory_order_relaxed);
909 last_write_time = (time_t)atomic_load_explicit(&client->last_write_time,
910 memory_order_relaxed);
911
912 last_read_cmd = atomic_load_explicit(&client->last_read_cmd,
913 memory_order_relaxed);
914 last_write_cmd = atomic_load_explicit(&client->last_write_cmd,
915 memory_order_relaxed);
916
917 vty_out(vty, "Last Msg Rx Time: %s \n",
918 zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF));
919 vty_out(vty, "Last Msg Tx Time: %s \n",
920 zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF));
921 if (last_read_cmd)
922 vty_out(vty, "Last Rcvd Cmd: %s \n",
923 zserv_command_string(last_read_cmd));
924 if (last_write_cmd)
925 vty_out(vty, "Last Sent Cmd: %s \n",
926 zserv_command_string(last_write_cmd));
927 vty_out(vty, "\n");
928
929 vty_out(vty, "Type Add Update Del \n");
930 vty_out(vty, "================================================== \n");
931 vty_out(vty, "IPv4 %-12d%-12d%-12d\n", client->v4_route_add_cnt,
932 client->v4_route_upd8_cnt, client->v4_route_del_cnt);
933 vty_out(vty, "IPv6 %-12d%-12d%-12d\n", client->v6_route_add_cnt,
934 client->v6_route_upd8_cnt, client->v6_route_del_cnt);
935 vty_out(vty, "Redist:v4 %-12d%-12d%-12d\n", client->redist_v4_add_cnt,
936 0, client->redist_v4_del_cnt);
937 vty_out(vty, "Redist:v6 %-12d%-12d%-12d\n", client->redist_v6_add_cnt,
938 0, client->redist_v6_del_cnt);
939 vty_out(vty, "Connected %-12d%-12d%-12d\n", client->ifadd_cnt, 0,
940 client->ifdel_cnt);
941 vty_out(vty, "BFD peer %-12d%-12d%-12d\n", client->bfd_peer_add_cnt,
942 client->bfd_peer_upd8_cnt, client->bfd_peer_del_cnt);
943 vty_out(vty, "NHT v4 %-12d%-12d%-12d\n",
944 client->v4_nh_watch_add_cnt, 0, client->v4_nh_watch_rem_cnt);
945 vty_out(vty, "NHT v6 %-12d%-12d%-12d\n",
946 client->v6_nh_watch_add_cnt, 0, client->v6_nh_watch_rem_cnt);
947 vty_out(vty, "Interface Up Notifications: %d\n", client->ifup_cnt);
948 vty_out(vty, "Interface Down Notifications: %d\n", client->ifdown_cnt);
949 vty_out(vty, "VNI add notifications: %d\n", client->vniadd_cnt);
950 vty_out(vty, "VNI delete notifications: %d\n", client->vnidel_cnt);
951 vty_out(vty, "L3-VNI add notifications: %d\n", client->l3vniadd_cnt);
952 vty_out(vty, "L3-VNI delete notifications: %d\n", client->l3vnidel_cnt);
953 vty_out(vty, "MAC-IP add notifications: %d\n", client->macipadd_cnt);
954 vty_out(vty, "MAC-IP delete notifications: %d\n", client->macipdel_cnt);
955
956 #if defined DEV_BUILD
957 vty_out(vty, "Input Fifo: %zu:%zu Output Fifo: %zu:%zu\n",
958 client->ibuf_fifo->count, client->ibuf_fifo->max_count,
959 client->obuf_fifo->count, client->obuf_fifo->max_count);
960 #endif
961 vty_out(vty, "\n");
962 return;
963 }
964
965 static void zebra_show_client_brief(struct vty *vty, struct zserv *client)
966 {
967 char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF];
968 char wbuf[ZEBRA_TIME_BUF];
969 time_t connect_time, last_read_time, last_write_time;
970
971 connect_time = (time_t)atomic_load_explicit(&client->connect_time,
972 memory_order_relaxed);
973 last_read_time = (time_t)atomic_load_explicit(&client->last_read_time,
974 memory_order_relaxed);
975 last_write_time = (time_t)atomic_load_explicit(&client->last_write_time,
976 memory_order_relaxed);
977
978 vty_out(vty, "%-8s%12s %12s%12s%8d/%-8d%8d/%-8d\n",
979 zebra_route_string(client->proto),
980 zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF),
981 zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF),
982 zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF),
983 client->v4_route_add_cnt + client->v4_route_upd8_cnt,
984 client->v4_route_del_cnt,
985 client->v6_route_add_cnt + client->v6_route_upd8_cnt,
986 client->v6_route_del_cnt);
987 }
988
989 struct zserv *zserv_find_client(uint8_t proto, unsigned short instance)
990 {
991 struct listnode *node, *nnode;
992 struct zserv *client;
993
994 for (ALL_LIST_ELEMENTS(zebrad.client_list, node, nnode, client)) {
995 if (client->proto == proto && client->instance == instance)
996 return client;
997 }
998
999 return NULL;
1000 }
1001
1002 /* This command is for debugging purpose. */
1003 DEFUN (show_zebra_client,
1004 show_zebra_client_cmd,
1005 "show zebra client",
1006 SHOW_STR
1007 ZEBRA_STR
1008 "Client information\n")
1009 {
1010 struct listnode *node;
1011 struct zserv *client;
1012
1013 for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client))
1014 zebra_show_client_detail(vty, client);
1015
1016 return CMD_SUCCESS;
1017 }
1018
1019 /* This command is for debugging purpose. */
1020 DEFUN (show_zebra_client_summary,
1021 show_zebra_client_summary_cmd,
1022 "show zebra client summary",
1023 SHOW_STR
1024 ZEBRA_STR
1025 "Client information brief\n"
1026 "Brief Summary\n")
1027 {
1028 struct listnode *node;
1029 struct zserv *client;
1030
1031 vty_out(vty,
1032 "Name Connect Time Last Read Last Write IPv4 Routes IPv6 Routes \n");
1033 vty_out(vty,
1034 "--------------------------------------------------------------------------------\n");
1035
1036 for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client))
1037 zebra_show_client_brief(vty, client);
1038
1039 vty_out(vty, "Routes column shows (added+updated)/deleted\n");
1040 return CMD_SUCCESS;
1041 }
1042
1043 #if defined(HANDLE_ZAPI_FUZZING)
1044 void zserv_read_file(char *input)
1045 {
1046 int fd;
1047 struct thread t;
1048
1049 fd = open(input, O_RDONLY | O_NONBLOCK);
1050 t.u.fd = fd;
1051
1052 zserv_client_create(fd);
1053 }
1054 #endif
1055
1056 void zserv_init(void)
1057 {
1058 /* Client list init. */
1059 zebrad.client_list = list_new();
1060
1061 /* Misc init. */
1062 zebrad.sock = -1;
1063
1064 install_element(ENABLE_NODE, &show_zebra_client_cmd);
1065 install_element(ENABLE_NODE, &show_zebra_client_summary_cmd);
1066 }