]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Zebra API server. | |
3 | * Portions: | |
4 | * Copyright (C) 1997-1999 Kunihiro Ishiguro | |
5 | * Copyright (C) 2015-2018 Cumulus Networks, Inc. | |
6 | * et al. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the Free | |
10 | * Software Foundation; either version 2 of the License, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
16 | * more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License along | |
19 | * with this program; see the file COPYING; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
21 | */ | |
22 | ||
23 | #include <zebra.h> | |
24 | ||
25 | /* clang-format off */ | |
26 | #include <errno.h> /* for errno */ | |
27 | #include <netinet/in.h> /* for sockaddr_in */ | |
28 | #include <stdint.h> /* for uint8_t */ | |
29 | #include <stdio.h> /* for snprintf */ | |
30 | #include <sys/socket.h> /* for sockaddr_storage, AF_UNIX, accept... */ | |
31 | #include <sys/stat.h> /* for umask, mode_t */ | |
32 | #include <sys/un.h> /* for sockaddr_un */ | |
33 | #include <time.h> /* for NULL, tm, gmtime, time_t */ | |
34 | #include <unistd.h> /* for close, unlink, ssize_t */ | |
35 | ||
36 | #include "lib/buffer.h" /* for BUFFER_EMPTY, BUFFER_ERROR, BUFFE... */ | |
37 | #include "lib/command.h" /* for vty, install_element, CMD_SUCCESS... */ | |
38 | #include "lib/hook.h" /* for DEFINE_HOOK, DEFINE_KOOH, hook_call */ | |
39 | #include "lib/linklist.h" /* for ALL_LIST_ELEMENTS_RO, ALL_LIST_EL... */ | |
40 | #include "lib/libfrr.h" /* for frr_zclient_addr */ | |
41 | #include "lib/log.h" /* for zlog_warn, zlog_debug, safe_strerror */ | |
42 | #include "lib/memory.h" /* for MTYPE_TMP, XCALLOC, XFREE */ | |
43 | #include "lib/monotime.h" /* for monotime, ONE_DAY_SECOND, ONE_WEE... */ | |
44 | #include "lib/network.h" /* for set_nonblocking */ | |
45 | #include "lib/privs.h" /* for zebra_privs_t, ZPRIVS_LOWER, ZPRI... */ | |
46 | #include "lib/route_types.h" /* for ZEBRA_ROUTE_MAX */ | |
47 | #include "lib/sockopt.h" /* for setsockopt_so_recvbuf, setsockopt... */ | |
48 | #include "lib/sockunion.h" /* for sockopt_reuseaddr, sockopt_reuseport */ | |
49 | #include "lib/stream.h" /* for STREAM_SIZE, stream (ptr only), ... */ | |
50 | #include "lib/thread.h" /* for thread (ptr only), THREAD_ARG, ... */ | |
51 | #include "lib/vrf.h" /* for vrf_info_lookup, VRF_DEFAULT */ | |
52 | #include "lib/vty.h" /* for vty_out, vty (ptr only) */ | |
53 | #include "lib/zassert.h" /* for assert */ | |
54 | #include "lib/zclient.h" /* for zmsghdr, ZEBRA_HEADER_SIZE, ZEBRA... */ | |
55 | #include "lib/frr_pthread.h" /* for frr_pthread_new, frr_pthread_stop... */ | |
56 | #include "lib/frratomic.h" /* for atomic_load_explicit, atomic_stor... */ | |
57 | ||
58 | #include "zebra/debug.h" /* for various debugging macros */ | |
59 | #include "zebra/rib.h" /* for rib_score_proto */ | |
60 | #include "zebra/zapi_msg.h" /* for zserv_handle_commands */ | |
61 | #include "zebra/zebra_vrf.h" /* for zebra_vrf_lookup_by_id, zvrf */ | |
62 | #include "zebra/zserv.h" /* for zserv */ | |
63 | /* clang-format on */ | |
64 | ||
65 | /* privileges */ | |
66 | extern struct zebra_privs_t zserv_privs; | |
67 | ||
68 | /* | |
69 | * Client thread events. | |
70 | * | |
71 | * These are used almost exclusively by client threads to drive their own event | |
72 | * loops. The only exception is in zebra_client_create(), which pushes an | |
73 | * initial ZSERV_CLIENT_READ event to start the API handler loop. | |
74 | */ | |
75 | enum zserv_client_event { | |
76 | /* Schedule a socket read */ | |
77 | ZSERV_CLIENT_READ, | |
78 | /* Schedule a buffer write */ | |
79 | ZSERV_CLIENT_WRITE, | |
80 | }; | |
81 | ||
82 | /* | |
83 | * Main thread events. | |
84 | * | |
85 | * These are used by client threads to notify the main thread about various | |
86 | * events and to make processing requests. | |
87 | */ | |
88 | enum zserv_event { | |
89 | /* Schedule listen job on Zebra API socket */ | |
90 | ZSERV_ACCEPT, | |
91 | /* The calling client has packets on its input buffer */ | |
92 | ZSERV_PROCESS_MESSAGES, | |
93 | /* The calling client wishes to be killed */ | |
94 | ZSERV_HANDLE_CLOSE, | |
95 | }; | |
96 | ||
97 | /* | |
98 | * Zebra server event driver for all client threads. | |
99 | * | |
100 | * This is essentially a wrapper around thread_add_event() that centralizes | |
101 | * those scheduling calls into one place. | |
102 | * | |
103 | * All calls to this function schedule an event on the pthread running the | |
104 | * provided client. | |
105 | * | |
106 | * client | |
107 | * the client in question, and thread target | |
108 | * | |
109 | * event | |
110 | * the event to notify them about | |
111 | */ | |
112 | static void zserv_client_event(struct zserv *client, | |
113 | enum zserv_client_event event); | |
114 | ||
115 | /* | |
116 | * Zebra server event driver for the main thread. | |
117 | * | |
118 | * This is essentially a wrapper around thread_add_event() that centralizes | |
119 | * those scheduling calls into one place. | |
120 | * | |
121 | * All calls to this function schedule an event on Zebra's main pthread. | |
122 | * | |
123 | * client | |
124 | * the client in question | |
125 | * | |
126 | * event | |
127 | * the event to notify the main thread about | |
128 | */ | |
129 | static void zserv_event(struct zserv *client, enum zserv_event event); | |
130 | ||
131 | ||
132 | /* Client thread lifecycle -------------------------------------------------- */ | |
133 | ||
134 | /* | |
135 | * Log zapi message to zlog. | |
136 | * | |
137 | * errmsg (optional) | |
138 | * Debugging message | |
139 | * | |
140 | * msg | |
141 | * The message | |
142 | * | |
143 | * hdr (optional) | |
144 | * The message header | |
145 | */ | |
146 | static void zserv_log_message(const char *errmsg, struct stream *msg, | |
147 | struct zmsghdr *hdr) | |
148 | { | |
149 | zlog_debug("Rx'd ZAPI message"); | |
150 | if (errmsg) | |
151 | zlog_debug("%s", errmsg); | |
152 | if (hdr) { | |
153 | zlog_debug(" Length: %d", hdr->length); | |
154 | zlog_debug("Command: %s", zserv_command_string(hdr->command)); | |
155 | zlog_debug(" VRF: %u", hdr->vrf_id); | |
156 | } | |
157 | zlog_hexdump(msg->data, STREAM_READABLE(msg)); | |
158 | } | |
159 | ||
160 | /* | |
161 | * Gracefully shut down a client connection. | |
162 | * | |
163 | * Cancel any pending tasks for the client's thread. Then schedule a task on the | |
164 | * main thread to shut down the calling thread. | |
165 | * | |
166 | * Must be called from the client pthread, never the main thread. | |
167 | */ | |
168 | static void zserv_client_close(struct zserv *client) | |
169 | { | |
170 | atomic_store_explicit(&client->dead, true, memory_order_seq_cst); | |
171 | THREAD_OFF(client->t_read); | |
172 | THREAD_OFF(client->t_write); | |
173 | zserv_event(client, ZSERV_HANDLE_CLOSE); | |
174 | } | |
175 | ||
176 | /* | |
177 | * Write all pending messages to client socket. | |
178 | * | |
179 | * This function first attempts to flush any buffered data. If unsuccessful, | |
180 | * the function reschedules itself and returns. If successful, it pops all | |
181 | * available messages from the output queue and continues to write data | |
182 | * directly to the socket until the socket would block. If the socket never | |
183 | * blocks and all data is written, the function returns without rescheduling | |
184 | * itself. If the socket ends up throwing EWOULDBLOCK, the remaining data is | |
185 | * buffered and the function reschedules itself. | |
186 | * | |
187 | * The utility of the buffer is that it allows us to vastly reduce lock | |
188 | * contention by allowing us to pop *all* messages off the output queue at once | |
189 | * instead of locking and unlocking each time we want to pop a single message | |
190 | * off the queue. The same thing could arguably be accomplished faster by | |
191 | * allowing the main thread to write directly into the buffer instead of | |
192 | * enqueuing packets onto an intermediary queue, but the intermediary queue | |
193 | * allows us to expose information about input and output queues to the user in | |
194 | * terms of number of packets rather than size of data. | |
195 | */ | |
196 | static int zserv_write(struct thread *thread) | |
197 | { | |
198 | struct zserv *client = THREAD_ARG(thread); | |
199 | struct stream *msg; | |
200 | uint32_t wcmd; | |
201 | struct stream_fifo *cache; | |
202 | ||
203 | if (atomic_load_explicit(&client->dead, memory_order_seq_cst)) | |
204 | return 0; | |
205 | ||
206 | /* If we have any data pending, try to flush it first */ | |
207 | switch (buffer_flush_all(client->wb, client->sock)) { | |
208 | case BUFFER_ERROR: | |
209 | goto zwrite_fail; | |
210 | case BUFFER_PENDING: | |
211 | atomic_store_explicit(&client->last_write_time, | |
212 | (uint32_t)monotime(NULL), | |
213 | memory_order_relaxed); | |
214 | zserv_client_event(client, ZSERV_CLIENT_WRITE); | |
215 | return 0; | |
216 | case BUFFER_EMPTY: | |
217 | break; | |
218 | } | |
219 | ||
220 | cache = stream_fifo_new(); | |
221 | ||
222 | pthread_mutex_lock(&client->obuf_mtx); | |
223 | { | |
224 | while (client->obuf_fifo->head) | |
225 | stream_fifo_push(cache, | |
226 | stream_fifo_pop(client->obuf_fifo)); | |
227 | } | |
228 | pthread_mutex_unlock(&client->obuf_mtx); | |
229 | ||
230 | if (cache->tail) { | |
231 | msg = cache->tail; | |
232 | stream_set_getp(msg, 0); | |
233 | wcmd = stream_getw_from(msg, 6); | |
234 | } | |
235 | ||
236 | while (stream_fifo_head(cache)) { | |
237 | msg = stream_fifo_pop(cache); | |
238 | buffer_put(client->wb, STREAM_DATA(msg), stream_get_endp(msg)); | |
239 | stream_free(msg); | |
240 | } | |
241 | ||
242 | stream_fifo_free(cache); | |
243 | ||
244 | /* If we have any data pending, try to flush it first */ | |
245 | switch (buffer_flush_all(client->wb, client->sock)) { | |
246 | case BUFFER_ERROR: | |
247 | goto zwrite_fail; | |
248 | case BUFFER_PENDING: | |
249 | atomic_store_explicit(&client->last_write_time, | |
250 | (uint32_t)monotime(NULL), | |
251 | memory_order_relaxed); | |
252 | zserv_client_event(client, ZSERV_CLIENT_WRITE); | |
253 | return 0; | |
254 | break; | |
255 | case BUFFER_EMPTY: | |
256 | break; | |
257 | } | |
258 | ||
259 | atomic_store_explicit(&client->last_write_cmd, wcmd, | |
260 | memory_order_relaxed); | |
261 | ||
262 | atomic_store_explicit(&client->last_write_time, | |
263 | (uint32_t)monotime(NULL), memory_order_relaxed); | |
264 | ||
265 | return 0; | |
266 | ||
267 | zwrite_fail: | |
268 | zlog_warn("%s: could not write to %s [fd = %d], closing.", __func__, | |
269 | zebra_route_string(client->proto), client->sock); | |
270 | zserv_client_close(client); | |
271 | return 0; | |
272 | } | |
273 | ||
274 | /* | |
275 | * Read and process data from a client socket. | |
276 | * | |
277 | * The responsibilities here are to read raw data from the client socket, | |
278 | * validate the header, encapsulate it into a single stream object, push it | |
279 | * onto the input queue and then notify the main thread that there is new data | |
280 | * available. | |
281 | * | |
282 | * This function first looks for any data in the client structure's working | |
283 | * input buffer. If data is present, it is assumed that reading stopped in a | |
284 | * previous invocation of this task and needs to be resumed to finish a message. | |
285 | * Otherwise, the socket data stream is assumed to be at the beginning of a new | |
286 | * ZAPI message (specifically at the header). The header is read and validated. | |
287 | * If the header passed validation then the length field found in the header is | |
288 | * used to compute the total length of the message. That much data is read (but | |
289 | * not inspected), appended to the header, placed into a stream and pushed onto | |
290 | * the client's input queue. A task is then scheduled on the main thread to | |
291 | * process the client's input queue. Finally, if all of this was successful, | |
292 | * this task reschedules itself. | |
293 | * | |
294 | * Any failure in any of these actions is handled by terminating the client. | |
295 | */ | |
296 | static int zserv_read(struct thread *thread) | |
297 | { | |
298 | struct zserv *client = THREAD_ARG(thread); | |
299 | int sock; | |
300 | size_t already; | |
301 | struct stream_fifo *cache; | |
302 | uint32_t p2p_orig; | |
303 | ||
304 | uint32_t p2p; | |
305 | struct zmsghdr hdr; | |
306 | ||
307 | if (atomic_load_explicit(&client->dead, memory_order_seq_cst)) | |
308 | return 0; | |
309 | ||
310 | p2p_orig = atomic_load_explicit(&zebrad.packets_to_process, | |
311 | memory_order_relaxed); | |
312 | cache = stream_fifo_new(); | |
313 | p2p = p2p_orig; | |
314 | sock = THREAD_FD(thread); | |
315 | ||
316 | while (p2p) { | |
317 | ssize_t nb; | |
318 | bool hdrvalid; | |
319 | char errmsg[256]; | |
320 | ||
321 | already = stream_get_endp(client->ibuf_work); | |
322 | ||
323 | /* Read length and command (if we don't have it already). */ | |
324 | if (already < ZEBRA_HEADER_SIZE) { | |
325 | nb = stream_read_try(client->ibuf_work, sock, | |
326 | ZEBRA_HEADER_SIZE - already); | |
327 | if ((nb == 0 || nb == -1) && IS_ZEBRA_DEBUG_EVENT) | |
328 | zlog_debug("connection closed socket [%d]", | |
329 | sock); | |
330 | if ((nb == 0 || nb == -1)) | |
331 | goto zread_fail; | |
332 | if (nb != (ssize_t)(ZEBRA_HEADER_SIZE - already)) { | |
333 | /* Try again later. */ | |
334 | break; | |
335 | } | |
336 | already = ZEBRA_HEADER_SIZE; | |
337 | } | |
338 | ||
339 | /* Reset to read from the beginning of the incoming packet. */ | |
340 | stream_set_getp(client->ibuf_work, 0); | |
341 | ||
342 | /* Fetch header values */ | |
343 | hdrvalid = zapi_parse_header(client->ibuf_work, &hdr); | |
344 | ||
345 | if (!hdrvalid) { | |
346 | snprintf(errmsg, sizeof(errmsg), | |
347 | "%s: Message has corrupt header", __func__); | |
348 | zserv_log_message(errmsg, client->ibuf_work, NULL); | |
349 | goto zread_fail; | |
350 | } | |
351 | ||
352 | /* Validate header */ | |
353 | if (hdr.marker != ZEBRA_HEADER_MARKER | |
354 | || hdr.version != ZSERV_VERSION) { | |
355 | snprintf( | |
356 | errmsg, sizeof(errmsg), | |
357 | "Message has corrupt header\n%s: socket %d version mismatch, marker %d, version %d", | |
358 | __func__, sock, hdr.marker, hdr.version); | |
359 | zserv_log_message(errmsg, client->ibuf_work, &hdr); | |
360 | goto zread_fail; | |
361 | } | |
362 | if (hdr.length < ZEBRA_HEADER_SIZE) { | |
363 | snprintf( | |
364 | errmsg, sizeof(errmsg), | |
365 | "Message has corrupt header\n%s: socket %d message length %u is less than header size %d", | |
366 | __func__, sock, hdr.length, ZEBRA_HEADER_SIZE); | |
367 | zserv_log_message(errmsg, client->ibuf_work, &hdr); | |
368 | goto zread_fail; | |
369 | } | |
370 | if (hdr.length > STREAM_SIZE(client->ibuf_work)) { | |
371 | snprintf( | |
372 | errmsg, sizeof(errmsg), | |
373 | "Message has corrupt header\n%s: socket %d message length %u exceeds buffer size %lu", | |
374 | __func__, sock, hdr.length, | |
375 | (unsigned long)STREAM_SIZE(client->ibuf_work)); | |
376 | zserv_log_message(errmsg, client->ibuf_work, &hdr); | |
377 | goto zread_fail; | |
378 | } | |
379 | ||
380 | /* Read rest of data. */ | |
381 | if (already < hdr.length) { | |
382 | nb = stream_read_try(client->ibuf_work, sock, | |
383 | hdr.length - already); | |
384 | if ((nb == 0 || nb == -1) && IS_ZEBRA_DEBUG_EVENT) | |
385 | zlog_debug( | |
386 | "connection closed [%d] when reading zebra data", | |
387 | sock); | |
388 | if ((nb == 0 || nb == -1)) | |
389 | goto zread_fail; | |
390 | if (nb != (ssize_t)(hdr.length - already)) { | |
391 | /* Try again later. */ | |
392 | break; | |
393 | } | |
394 | } | |
395 | ||
396 | /* Debug packet information. */ | |
397 | if (IS_ZEBRA_DEBUG_EVENT) | |
398 | zlog_debug("zebra message comes from socket [%d]", | |
399 | sock); | |
400 | ||
401 | if (IS_ZEBRA_DEBUG_PACKET && IS_ZEBRA_DEBUG_RECV) | |
402 | zserv_log_message(NULL, client->ibuf_work, &hdr); | |
403 | ||
404 | stream_set_getp(client->ibuf_work, 0); | |
405 | struct stream *msg = stream_dup(client->ibuf_work); | |
406 | ||
407 | stream_fifo_push(cache, msg); | |
408 | stream_reset(client->ibuf_work); | |
409 | p2p--; | |
410 | } | |
411 | ||
412 | if (p2p < p2p_orig) { | |
413 | /* update session statistics */ | |
414 | atomic_store_explicit(&client->last_read_time, monotime(NULL), | |
415 | memory_order_relaxed); | |
416 | atomic_store_explicit(&client->last_read_cmd, hdr.command, | |
417 | memory_order_relaxed); | |
418 | ||
419 | /* publish read packets on client's input queue */ | |
420 | pthread_mutex_lock(&client->ibuf_mtx); | |
421 | { | |
422 | while (cache->head) | |
423 | stream_fifo_push(client->ibuf_fifo, | |
424 | stream_fifo_pop(cache)); | |
425 | } | |
426 | pthread_mutex_unlock(&client->ibuf_mtx); | |
427 | ||
428 | /* Schedule job to process those packets */ | |
429 | zserv_event(client, ZSERV_PROCESS_MESSAGES); | |
430 | ||
431 | } | |
432 | ||
433 | if (IS_ZEBRA_DEBUG_PACKET) | |
434 | zlog_debug("Read %d packets", p2p_orig - p2p); | |
435 | ||
436 | /* Reschedule ourselves */ | |
437 | zserv_client_event(client, ZSERV_CLIENT_READ); | |
438 | ||
439 | stream_fifo_free(cache); | |
440 | ||
441 | return 0; | |
442 | ||
443 | zread_fail: | |
444 | stream_fifo_free(cache); | |
445 | zserv_client_close(client); | |
446 | return -1; | |
447 | } | |
448 | ||
449 | static void zserv_client_event(struct zserv *client, | |
450 | enum zserv_client_event event) | |
451 | { | |
452 | if (atomic_load_explicit(&client->dead, memory_order_seq_cst)) | |
453 | return; | |
454 | ||
455 | switch (event) { | |
456 | case ZSERV_CLIENT_READ: | |
457 | thread_add_read(client->pthread->master, zserv_read, client, | |
458 | client->sock, &client->t_read); | |
459 | break; | |
460 | case ZSERV_CLIENT_WRITE: | |
461 | thread_add_write(client->pthread->master, zserv_write, client, | |
462 | client->sock, &client->t_write); | |
463 | break; | |
464 | } | |
465 | } | |
466 | ||
467 | /* Main thread lifecycle ---------------------------------------------------- */ | |
468 | ||
469 | /* | |
470 | * Read and process messages from a client. | |
471 | * | |
472 | * This task runs on the main pthread. It is scheduled by client pthreads when | |
473 | * they have new messages available on their input queues. The client is passed | |
474 | * as the task argument. | |
475 | * | |
476 | * Each message is popped off the client's input queue and the action associated | |
477 | * with the message is executed. This proceeds until there are no more messages, | |
478 | * an error occurs, or the processing limit is reached. | |
479 | * | |
480 | * The client's I/O thread can push at most zebrad.packets_to_process messages | |
481 | * onto the input buffer before notifying us there are packets to read. As long | |
482 | * as we always process zebrad.packets_to_process messages here, then we can | |
483 | * rely on the read thread to handle queuing this task enough times to process | |
484 | * everything on the input queue. | |
485 | */ | |
486 | static int zserv_process_messages(struct thread *thread) | |
487 | { | |
488 | struct zserv *client = THREAD_ARG(thread); | |
489 | struct stream *msg; | |
490 | struct stream_fifo *cache = stream_fifo_new(); | |
491 | ||
492 | uint32_t p2p = zebrad.packets_to_process; | |
493 | ||
494 | pthread_mutex_lock(&client->ibuf_mtx); | |
495 | { | |
496 | uint32_t i; | |
497 | for (i = 0; i < p2p && stream_fifo_head(client->ibuf_fifo); | |
498 | ++i) { | |
499 | msg = stream_fifo_pop(client->ibuf_fifo); | |
500 | stream_fifo_push(cache, msg); | |
501 | } | |
502 | ||
503 | msg = NULL; | |
504 | } | |
505 | pthread_mutex_unlock(&client->ibuf_mtx); | |
506 | ||
507 | while (stream_fifo_head(cache)) { | |
508 | msg = stream_fifo_pop(cache); | |
509 | zserv_handle_commands(client, msg); | |
510 | stream_free(msg); | |
511 | } | |
512 | ||
513 | stream_fifo_free(cache); | |
514 | ||
515 | return 0; | |
516 | } | |
517 | ||
518 | int zserv_send_message(struct zserv *client, struct stream *msg) | |
519 | { | |
520 | /* | |
521 | * This is a somewhat poorly named variable added with Zebra's portion | |
522 | * of the label manager. That component does not use the regular | |
523 | * zserv/zapi_msg interface for handling its messages, as the client | |
524 | * itself runs in-process. Instead it uses synchronous writes on the | |
525 | * zserv client's socket directly in the zread* handlers for its | |
526 | * message types. Furthermore, it cannot handle the usual messages | |
527 | * Zebra sends (such as those for interface changes) and so has added | |
528 | * this flag and check here as a hack to suppress all messages that it | |
529 | * does not explicitly know about. | |
530 | * | |
531 | * In any case this needs to be cleaned up at some point. | |
532 | * | |
533 | * See also: | |
534 | * zread_label_manager_request | |
535 | * zsend_label_manager_connect_response | |
536 | * zsend_assign_label_chunk_response | |
537 | * ... | |
538 | */ | |
539 | if (client->is_synchronous) | |
540 | return 0; | |
541 | ||
542 | pthread_mutex_lock(&client->obuf_mtx); | |
543 | { | |
544 | stream_fifo_push(client->obuf_fifo, msg); | |
545 | } | |
546 | pthread_mutex_unlock(&client->obuf_mtx); | |
547 | ||
548 | zserv_client_event(client, ZSERV_CLIENT_WRITE); | |
549 | ||
550 | return 0; | |
551 | } | |
552 | ||
553 | ||
554 | /* Hooks for client connect / disconnect */ | |
555 | DEFINE_HOOK(zserv_client_connect, (struct zserv *client), (client)); | |
556 | DEFINE_KOOH(zserv_client_close, (struct zserv *client), (client)); | |
557 | ||
558 | /* | |
559 | * Deinitialize zebra client. | |
560 | * | |
561 | * - Deregister and deinitialize related internal resources | |
562 | * - Gracefully close socket | |
563 | * - Free associated resources | |
564 | * - Free client structure | |
565 | * | |
566 | * This does *not* take any action on the struct thread * fields. These are | |
567 | * managed by the owning pthread and any tasks associated with them must have | |
568 | * been stopped prior to invoking this function. | |
569 | */ | |
570 | static void zserv_client_free(struct zserv *client) | |
571 | { | |
572 | hook_call(zserv_client_close, client); | |
573 | ||
574 | /* Close file descriptor. */ | |
575 | if (client->sock) { | |
576 | unsigned long nroutes; | |
577 | ||
578 | close(client->sock); | |
579 | nroutes = rib_score_proto(client->proto, client->instance); | |
580 | zlog_notice( | |
581 | "client %d disconnected. %lu %s routes removed from the rib", | |
582 | client->sock, nroutes, | |
583 | zebra_route_string(client->proto)); | |
584 | client->sock = -1; | |
585 | } | |
586 | ||
587 | /* Free stream buffers. */ | |
588 | if (client->ibuf_work) | |
589 | stream_free(client->ibuf_work); | |
590 | if (client->obuf_work) | |
591 | stream_free(client->obuf_work); | |
592 | if (client->ibuf_fifo) | |
593 | stream_fifo_free(client->ibuf_fifo); | |
594 | if (client->obuf_fifo) | |
595 | stream_fifo_free(client->obuf_fifo); | |
596 | if (client->wb) | |
597 | buffer_free(client->wb); | |
598 | ||
599 | /* Free buffer mutexes */ | |
600 | pthread_mutex_destroy(&client->obuf_mtx); | |
601 | pthread_mutex_destroy(&client->ibuf_mtx); | |
602 | ||
603 | /* Free bitmaps. */ | |
604 | for (afi_t afi = AFI_IP; afi < AFI_MAX; afi++) | |
605 | for (int i = 0; i < ZEBRA_ROUTE_MAX; i++) | |
606 | vrf_bitmap_free(client->redist[afi][i]); | |
607 | ||
608 | vrf_bitmap_free(client->redist_default); | |
609 | vrf_bitmap_free(client->ifinfo); | |
610 | vrf_bitmap_free(client->ridinfo); | |
611 | ||
612 | XFREE(MTYPE_TMP, client); | |
613 | } | |
614 | ||
615 | /* | |
616 | * Finish closing a client. | |
617 | * | |
618 | * This task is scheduled by a ZAPI client pthread on the main pthread when it | |
619 | * wants to stop itself. When this executes, the client connection should | |
620 | * already have been closed. This task's responsibility is to gracefully | |
621 | * terminate the client thread, update relevant internal datastructures and | |
622 | * free any resources allocated by the main thread. | |
623 | */ | |
624 | static int zserv_handle_client_close(struct thread *thread) | |
625 | { | |
626 | struct zserv *client = THREAD_ARG(thread); | |
627 | ||
628 | /* | |
629 | * Ensure these have been nulled. This does not equate to the | |
630 | * associated task(s) being scheduled or unscheduled on the client | |
631 | * pthread's threadmaster. | |
632 | */ | |
633 | assert(!client->t_read); | |
634 | assert(!client->t_write); | |
635 | ||
636 | /* synchronously stop thread */ | |
637 | frr_pthread_stop(client->pthread, NULL); | |
638 | ||
639 | /* destroy frr_pthread */ | |
640 | frr_pthread_destroy(client->pthread); | |
641 | client->pthread = NULL; | |
642 | ||
643 | listnode_delete(zebrad.client_list, client); | |
644 | zserv_client_free(client); | |
645 | return 0; | |
646 | } | |
647 | ||
648 | /* | |
649 | * Create a new client. | |
650 | * | |
651 | * This is called when a new connection is accept()'d on the ZAPI socket. It | |
652 | * initializes new client structure, notifies any subscribers of the connection | |
653 | * event and spawns the client's thread. | |
654 | * | |
655 | * sock | |
656 | * client's socket file descriptor | |
657 | */ | |
658 | static void zserv_client_create(int sock) | |
659 | { | |
660 | struct zserv *client; | |
661 | int i; | |
662 | afi_t afi; | |
663 | ||
664 | client = XCALLOC(MTYPE_TMP, sizeof(struct zserv)); | |
665 | ||
666 | /* Make client input/output buffer. */ | |
667 | client->sock = sock; | |
668 | client->ibuf_fifo = stream_fifo_new(); | |
669 | client->obuf_fifo = stream_fifo_new(); | |
670 | client->ibuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ); | |
671 | client->obuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ); | |
672 | pthread_mutex_init(&client->ibuf_mtx, NULL); | |
673 | pthread_mutex_init(&client->obuf_mtx, NULL); | |
674 | client->wb = buffer_new(0); | |
675 | ||
676 | /* Set table number. */ | |
677 | client->rtm_table = zebrad.rtm_table_default; | |
678 | ||
679 | atomic_store_explicit(&client->connect_time, (uint32_t) monotime(NULL), | |
680 | memory_order_relaxed); | |
681 | ||
682 | /* Initialize flags */ | |
683 | for (afi = AFI_IP; afi < AFI_MAX; afi++) | |
684 | for (i = 0; i < ZEBRA_ROUTE_MAX; i++) | |
685 | client->redist[afi][i] = vrf_bitmap_init(); | |
686 | client->redist_default = vrf_bitmap_init(); | |
687 | client->ifinfo = vrf_bitmap_init(); | |
688 | client->ridinfo = vrf_bitmap_init(); | |
689 | ||
690 | /* by default, it's not a synchronous client */ | |
691 | client->is_synchronous = 0; | |
692 | ||
693 | /* Add this client to linked list. */ | |
694 | listnode_add(zebrad.client_list, client); | |
695 | ||
696 | struct frr_pthread_attr zclient_pthr_attrs = { | |
697 | .id = frr_pthread_get_id(), | |
698 | .start = frr_pthread_attr_default.start, | |
699 | .stop = frr_pthread_attr_default.stop | |
700 | }; | |
701 | client->pthread = | |
702 | frr_pthread_new(&zclient_pthr_attrs, "Zebra API client thread"); | |
703 | ||
704 | zebra_vrf_update_all(client); | |
705 | ||
706 | /* start read loop */ | |
707 | zserv_client_event(client, ZSERV_CLIENT_READ); | |
708 | ||
709 | /* call callbacks */ | |
710 | hook_call(zserv_client_connect, client); | |
711 | ||
712 | /* start pthread */ | |
713 | frr_pthread_run(client->pthread, NULL); | |
714 | } | |
715 | ||
716 | /* | |
717 | * Accept socket connection. | |
718 | */ | |
719 | static int zserv_accept(struct thread *thread) | |
720 | { | |
721 | int accept_sock; | |
722 | int client_sock; | |
723 | struct sockaddr_in client; | |
724 | socklen_t len; | |
725 | ||
726 | accept_sock = THREAD_FD(thread); | |
727 | ||
728 | /* Reregister myself. */ | |
729 | zserv_event(NULL, ZSERV_ACCEPT); | |
730 | ||
731 | len = sizeof(struct sockaddr_in); | |
732 | client_sock = accept(accept_sock, (struct sockaddr *)&client, &len); | |
733 | ||
734 | if (client_sock < 0) { | |
735 | zlog_warn("Can't accept zebra socket: %s", | |
736 | safe_strerror(errno)); | |
737 | return -1; | |
738 | } | |
739 | ||
740 | /* Make client socket non-blocking. */ | |
741 | set_nonblocking(client_sock); | |
742 | ||
743 | /* Create new zebra client. */ | |
744 | zserv_client_create(client_sock); | |
745 | ||
746 | return 0; | |
747 | } | |
748 | ||
749 | void zserv_start(char *path) | |
750 | { | |
751 | int ret; | |
752 | mode_t old_mask; | |
753 | struct sockaddr_storage sa; | |
754 | socklen_t sa_len; | |
755 | ||
756 | if (!frr_zclient_addr(&sa, &sa_len, path)) | |
757 | /* should be caught in zebra main() */ | |
758 | return; | |
759 | ||
760 | /* Set umask */ | |
761 | old_mask = umask(0077); | |
762 | ||
763 | /* Make UNIX domain socket. */ | |
764 | zebrad.sock = socket(sa.ss_family, SOCK_STREAM, 0); | |
765 | if (zebrad.sock < 0) { | |
766 | zlog_warn("Can't create zserv socket: %s", | |
767 | safe_strerror(errno)); | |
768 | zlog_warn( | |
769 | "zebra can't provide full functionality due to above error"); | |
770 | return; | |
771 | } | |
772 | ||
773 | if (sa.ss_family != AF_UNIX) { | |
774 | sockopt_reuseaddr(zebrad.sock); | |
775 | sockopt_reuseport(zebrad.sock); | |
776 | } else { | |
777 | struct sockaddr_un *suna = (struct sockaddr_un *)&sa; | |
778 | if (suna->sun_path[0]) | |
779 | unlink(suna->sun_path); | |
780 | } | |
781 | ||
782 | zserv_privs.change(ZPRIVS_RAISE); | |
783 | setsockopt_so_recvbuf(zebrad.sock, 1048576); | |
784 | setsockopt_so_sendbuf(zebrad.sock, 1048576); | |
785 | zserv_privs.change(ZPRIVS_LOWER); | |
786 | ||
787 | if (sa.ss_family != AF_UNIX && zserv_privs.change(ZPRIVS_RAISE)) | |
788 | zlog_err("Can't raise privileges"); | |
789 | ||
790 | ret = bind(zebrad.sock, (struct sockaddr *)&sa, sa_len); | |
791 | if (ret < 0) { | |
792 | zlog_warn("Can't bind zserv socket on %s: %s", path, | |
793 | safe_strerror(errno)); | |
794 | zlog_warn( | |
795 | "zebra can't provide full functionality due to above error"); | |
796 | close(zebrad.sock); | |
797 | zebrad.sock = -1; | |
798 | return; | |
799 | } | |
800 | if (sa.ss_family != AF_UNIX && zserv_privs.change(ZPRIVS_LOWER)) | |
801 | zlog_err("Can't lower privileges"); | |
802 | ||
803 | ret = listen(zebrad.sock, 5); | |
804 | if (ret < 0) { | |
805 | zlog_warn("Can't listen to zserv socket %s: %s", path, | |
806 | safe_strerror(errno)); | |
807 | zlog_warn( | |
808 | "zebra can't provide full functionality due to above error"); | |
809 | close(zebrad.sock); | |
810 | zebrad.sock = -1; | |
811 | return; | |
812 | } | |
813 | ||
814 | umask(old_mask); | |
815 | ||
816 | zserv_event(NULL, ZSERV_ACCEPT); | |
817 | } | |
818 | ||
819 | void zserv_event(struct zserv *client, enum zserv_event event) | |
820 | { | |
821 | switch (event) { | |
822 | case ZSERV_ACCEPT: | |
823 | thread_add_read(zebrad.master, zserv_accept, NULL, zebrad.sock, | |
824 | NULL); | |
825 | break; | |
826 | case ZSERV_PROCESS_MESSAGES: | |
827 | thread_add_event(zebrad.master, zserv_process_messages, client, | |
828 | 0, NULL); | |
829 | break; | |
830 | case ZSERV_HANDLE_CLOSE: | |
831 | thread_add_event(zebrad.master, zserv_handle_client_close, | |
832 | client, 0, NULL); | |
833 | } | |
834 | } | |
835 | ||
836 | ||
837 | /* General purpose ---------------------------------------------------------- */ | |
838 | ||
839 | #define ZEBRA_TIME_BUF 32 | |
840 | static char *zserv_time_buf(time_t *time1, char *buf, int buflen) | |
841 | { | |
842 | struct tm *tm; | |
843 | time_t now; | |
844 | ||
845 | assert(buf != NULL); | |
846 | assert(buflen >= ZEBRA_TIME_BUF); | |
847 | assert(time1 != NULL); | |
848 | ||
849 | if (!*time1) { | |
850 | snprintf(buf, buflen, "never "); | |
851 | return (buf); | |
852 | } | |
853 | ||
854 | now = monotime(NULL); | |
855 | now -= *time1; | |
856 | tm = gmtime(&now); | |
857 | ||
858 | if (now < ONE_DAY_SECOND) | |
859 | snprintf(buf, buflen, "%02d:%02d:%02d", tm->tm_hour, tm->tm_min, | |
860 | tm->tm_sec); | |
861 | else if (now < ONE_WEEK_SECOND) | |
862 | snprintf(buf, buflen, "%dd%02dh%02dm", tm->tm_yday, tm->tm_hour, | |
863 | tm->tm_min); | |
864 | else | |
865 | snprintf(buf, buflen, "%02dw%dd%02dh", tm->tm_yday / 7, | |
866 | tm->tm_yday - ((tm->tm_yday / 7) * 7), tm->tm_hour); | |
867 | return buf; | |
868 | } | |
869 | ||
870 | static void zebra_show_client_detail(struct vty *vty, struct zserv *client) | |
871 | { | |
872 | char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF]; | |
873 | char wbuf[ZEBRA_TIME_BUF], nhbuf[ZEBRA_TIME_BUF], mbuf[ZEBRA_TIME_BUF]; | |
874 | time_t connect_time, last_read_time, last_write_time; | |
875 | uint16_t last_read_cmd, last_write_cmd; | |
876 | ||
877 | vty_out(vty, "Client: %s", zebra_route_string(client->proto)); | |
878 | if (client->instance) | |
879 | vty_out(vty, " Instance: %d", client->instance); | |
880 | vty_out(vty, "\n"); | |
881 | ||
882 | vty_out(vty, "------------------------ \n"); | |
883 | vty_out(vty, "FD: %d \n", client->sock); | |
884 | vty_out(vty, "Route Table ID: %d \n", client->rtm_table); | |
885 | ||
886 | connect_time = (time_t) atomic_load_explicit(&client->connect_time, | |
887 | memory_order_relaxed); | |
888 | ||
889 | vty_out(vty, "Connect Time: %s \n", | |
890 | zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF)); | |
891 | if (client->nh_reg_time) { | |
892 | vty_out(vty, "Nexthop Registry Time: %s \n", | |
893 | zserv_time_buf(&client->nh_reg_time, nhbuf, | |
894 | ZEBRA_TIME_BUF)); | |
895 | if (client->nh_last_upd_time) | |
896 | vty_out(vty, "Nexthop Last Update Time: %s \n", | |
897 | zserv_time_buf(&client->nh_last_upd_time, mbuf, | |
898 | ZEBRA_TIME_BUF)); | |
899 | else | |
900 | vty_out(vty, "No Nexthop Update sent\n"); | |
901 | } else | |
902 | vty_out(vty, "Not registered for Nexthop Updates\n"); | |
903 | ||
904 | last_read_time = (time_t)atomic_load_explicit(&client->last_read_time, | |
905 | memory_order_relaxed); | |
906 | last_write_time = (time_t)atomic_load_explicit(&client->last_write_time, | |
907 | memory_order_relaxed); | |
908 | ||
909 | last_read_cmd = atomic_load_explicit(&client->last_read_cmd, | |
910 | memory_order_relaxed); | |
911 | last_write_cmd = atomic_load_explicit(&client->last_write_cmd, | |
912 | memory_order_relaxed); | |
913 | ||
914 | vty_out(vty, "Last Msg Rx Time: %s \n", | |
915 | zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF)); | |
916 | vty_out(vty, "Last Msg Tx Time: %s \n", | |
917 | zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF)); | |
918 | if (last_read_cmd) | |
919 | vty_out(vty, "Last Rcvd Cmd: %s \n", | |
920 | zserv_command_string(last_read_cmd)); | |
921 | if (last_write_cmd) | |
922 | vty_out(vty, "Last Sent Cmd: %s \n", | |
923 | zserv_command_string(last_write_cmd)); | |
924 | vty_out(vty, "\n"); | |
925 | ||
926 | vty_out(vty, "Type Add Update Del \n"); | |
927 | vty_out(vty, "================================================== \n"); | |
928 | vty_out(vty, "IPv4 %-12d%-12d%-12d\n", client->v4_route_add_cnt, | |
929 | client->v4_route_upd8_cnt, client->v4_route_del_cnt); | |
930 | vty_out(vty, "IPv6 %-12d%-12d%-12d\n", client->v6_route_add_cnt, | |
931 | client->v6_route_upd8_cnt, client->v6_route_del_cnt); | |
932 | vty_out(vty, "Redist:v4 %-12d%-12d%-12d\n", client->redist_v4_add_cnt, | |
933 | 0, client->redist_v4_del_cnt); | |
934 | vty_out(vty, "Redist:v6 %-12d%-12d%-12d\n", client->redist_v6_add_cnt, | |
935 | 0, client->redist_v6_del_cnt); | |
936 | vty_out(vty, "Connected %-12d%-12d%-12d\n", client->ifadd_cnt, 0, | |
937 | client->ifdel_cnt); | |
938 | vty_out(vty, "BFD peer %-12d%-12d%-12d\n", client->bfd_peer_add_cnt, | |
939 | client->bfd_peer_upd8_cnt, client->bfd_peer_del_cnt); | |
940 | vty_out(vty, "Interface Up Notifications: %d\n", client->ifup_cnt); | |
941 | vty_out(vty, "Interface Down Notifications: %d\n", client->ifdown_cnt); | |
942 | vty_out(vty, "VNI add notifications: %d\n", client->vniadd_cnt); | |
943 | vty_out(vty, "VNI delete notifications: %d\n", client->vnidel_cnt); | |
944 | vty_out(vty, "L3-VNI add notifications: %d\n", client->l3vniadd_cnt); | |
945 | vty_out(vty, "L3-VNI delete notifications: %d\n", client->l3vnidel_cnt); | |
946 | vty_out(vty, "MAC-IP add notifications: %d\n", client->macipadd_cnt); | |
947 | vty_out(vty, "MAC-IP delete notifications: %d\n", client->macipdel_cnt); | |
948 | ||
949 | vty_out(vty, "\n"); | |
950 | return; | |
951 | } | |
952 | ||
953 | static void zebra_show_client_brief(struct vty *vty, struct zserv *client) | |
954 | { | |
955 | char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF]; | |
956 | char wbuf[ZEBRA_TIME_BUF]; | |
957 | time_t connect_time, last_read_time, last_write_time; | |
958 | ||
959 | connect_time = (time_t)atomic_load_explicit(&client->connect_time, | |
960 | memory_order_relaxed); | |
961 | last_read_time = (time_t)atomic_load_explicit(&client->last_read_time, | |
962 | memory_order_relaxed); | |
963 | last_write_time = (time_t)atomic_load_explicit(&client->last_write_time, | |
964 | memory_order_relaxed); | |
965 | ||
966 | vty_out(vty, "%-8s%12s %12s%12s%8d/%-8d%8d/%-8d\n", | |
967 | zebra_route_string(client->proto), | |
968 | zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF), | |
969 | zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF), | |
970 | zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF), | |
971 | client->v4_route_add_cnt + client->v4_route_upd8_cnt, | |
972 | client->v4_route_del_cnt, | |
973 | client->v6_route_add_cnt + client->v6_route_upd8_cnt, | |
974 | client->v6_route_del_cnt); | |
975 | } | |
976 | ||
977 | struct zserv *zserv_find_client(uint8_t proto, unsigned short instance) | |
978 | { | |
979 | struct listnode *node, *nnode; | |
980 | struct zserv *client; | |
981 | ||
982 | for (ALL_LIST_ELEMENTS(zebrad.client_list, node, nnode, client)) { | |
983 | if (client->proto == proto && client->instance == instance) | |
984 | return client; | |
985 | } | |
986 | ||
987 | return NULL; | |
988 | } | |
989 | ||
990 | /* This command is for debugging purpose. */ | |
991 | DEFUN (show_zebra_client, | |
992 | show_zebra_client_cmd, | |
993 | "show zebra client", | |
994 | SHOW_STR | |
995 | ZEBRA_STR | |
996 | "Client information\n") | |
997 | { | |
998 | struct listnode *node; | |
999 | struct zserv *client; | |
1000 | ||
1001 | for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client)) | |
1002 | zebra_show_client_detail(vty, client); | |
1003 | ||
1004 | return CMD_SUCCESS; | |
1005 | } | |
1006 | ||
1007 | /* This command is for debugging purpose. */ | |
1008 | DEFUN (show_zebra_client_summary, | |
1009 | show_zebra_client_summary_cmd, | |
1010 | "show zebra client summary", | |
1011 | SHOW_STR | |
1012 | ZEBRA_STR | |
1013 | "Client information brief\n" | |
1014 | "Brief Summary\n") | |
1015 | { | |
1016 | struct listnode *node; | |
1017 | struct zserv *client; | |
1018 | ||
1019 | vty_out(vty, | |
1020 | "Name Connect Time Last Read Last Write IPv4 Routes IPv6 Routes \n"); | |
1021 | vty_out(vty, | |
1022 | "--------------------------------------------------------------------------------\n"); | |
1023 | ||
1024 | for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client)) | |
1025 | zebra_show_client_brief(vty, client); | |
1026 | ||
1027 | vty_out(vty, "Routes column shows (added+updated)/deleted\n"); | |
1028 | return CMD_SUCCESS; | |
1029 | } | |
1030 | ||
1031 | #if defined(HANDLE_ZAPI_FUZZING) | |
1032 | void zserv_read_file(char *input) | |
1033 | { | |
1034 | int fd; | |
1035 | struct zserv *client = NULL; | |
1036 | struct thread t; | |
1037 | ||
1038 | zebra_client_create(-1); | |
1039 | ||
1040 | frr_pthread_stop(client->pthread, NULL); | |
1041 | frr_pthread_destroy(client->pthread); | |
1042 | client->pthread = NULL; | |
1043 | ||
1044 | t.arg = client; | |
1045 | ||
1046 | fd = open(input, O_RDONLY | O_NONBLOCK); | |
1047 | t.u.fd = fd; | |
1048 | ||
1049 | zserv_read(&t); | |
1050 | ||
1051 | close(fd); | |
1052 | } | |
1053 | #endif | |
1054 | ||
1055 | void zserv_init(void) | |
1056 | { | |
1057 | /* Client list init. */ | |
1058 | zebrad.client_list = list_new(); | |
1059 | zebrad.client_list->del = (void (*)(void *)) zserv_client_free; | |
1060 | ||
1061 | /* Misc init. */ | |
1062 | zebrad.sock = -1; | |
1063 | ||
1064 | install_element(ENABLE_NODE, &show_zebra_client_cmd); | |
1065 | install_element(ENABLE_NODE, &show_zebra_client_summary_cmd); | |
1066 | } |