]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Zebra API server. | |
3 | * Portions: | |
4 | * Copyright (C) 1997-1999 Kunihiro Ishiguro | |
5 | * Copyright (C) 2015-2018 Cumulus Networks, Inc. | |
6 | * et al. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the Free | |
10 | * Software Foundation; either version 2 of the License, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
16 | * more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License along | |
19 | * with this program; see the file COPYING; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
21 | */ | |
22 | ||
23 | #include <zebra.h> | |
24 | ||
25 | /* clang-format off */ | |
26 | #include <errno.h> /* for errno */ | |
27 | #include <netinet/in.h> /* for sockaddr_in */ | |
28 | #include <stdint.h> /* for uint8_t */ | |
29 | #include <stdio.h> /* for snprintf */ | |
30 | #include <sys/socket.h> /* for sockaddr_storage, AF_UNIX, accept... */ | |
31 | #include <sys/stat.h> /* for umask, mode_t */ | |
32 | #include <sys/un.h> /* for sockaddr_un */ | |
33 | #include <time.h> /* for NULL, tm, gmtime, time_t */ | |
34 | #include <unistd.h> /* for close, unlink, ssize_t */ | |
35 | ||
36 | #include "lib/buffer.h" /* for BUFFER_EMPTY, BUFFER_ERROR, BUFFE... */ | |
37 | #include "lib/command.h" /* for vty, install_element, CMD_SUCCESS... */ | |
38 | #include "lib/hook.h" /* for DEFINE_HOOK, DEFINE_KOOH, hook_call */ | |
39 | #include "lib/linklist.h" /* for ALL_LIST_ELEMENTS_RO, ALL_LIST_EL... */ | |
40 | #include "lib/libfrr.h" /* for frr_zclient_addr */ | |
41 | #include "lib/log.h" /* for zlog_warn, zlog_debug, safe_strerror */ | |
42 | #include "lib/memory.h" /* for MTYPE_TMP, XCALLOC, XFREE */ | |
43 | #include "lib/monotime.h" /* for monotime, ONE_DAY_SECOND, ONE_WEE... */ | |
44 | #include "lib/network.h" /* for set_nonblocking */ | |
45 | #include "lib/privs.h" /* for zebra_privs_t, ZPRIVS_LOWER, ZPRI... */ | |
46 | #include "lib/route_types.h" /* for ZEBRA_ROUTE_MAX */ | |
47 | #include "lib/sockopt.h" /* for setsockopt_so_recvbuf, setsockopt... */ | |
48 | #include "lib/sockunion.h" /* for sockopt_reuseaddr, sockopt_reuseport */ | |
49 | #include "lib/stream.h" /* for STREAM_SIZE, stream (ptr only), ... */ | |
50 | #include "lib/thread.h" /* for thread (ptr only), THREAD_ARG, ... */ | |
51 | #include "lib/vrf.h" /* for vrf_info_lookup, VRF_DEFAULT */ | |
52 | #include "lib/vty.h" /* for vty_out, vty (ptr only) */ | |
53 | #include "lib/zassert.h" /* for assert */ | |
54 | #include "lib/zclient.h" /* for zmsghdr, ZEBRA_HEADER_SIZE, ZEBRA... */ | |
55 | #include "lib/frr_pthread.h" /* for frr_pthread_new, frr_pthread_stop... */ | |
56 | #include "lib/frratomic.h" /* for atomic_load_explicit, atomic_stor... */ | |
57 | #include "lib/lib_errors.h" /* for generic ferr ids */ | |
58 | ||
59 | #include "zebra/debug.h" /* for various debugging macros */ | |
60 | #include "zebra/rib.h" /* for rib_score_proto */ | |
61 | #include "zebra/zapi_msg.h" /* for zserv_handle_commands */ | |
62 | #include "zebra/zebra_vrf.h" /* for zebra_vrf_lookup_by_id, zvrf */ | |
63 | #include "zebra/zserv.h" /* for zserv */ | |
64 | #include "zebra/zebra_router.h" | |
65 | #include "zebra/zebra_errors.h" /* for error messages */ | |
66 | /* clang-format on */ | |
67 | ||
68 | /* privileges */ | |
69 | extern struct zebra_privs_t zserv_privs; | |
70 | ||
71 | /* The listener socket for clients connecting to us */ | |
72 | static int zsock; | |
73 | ||
74 | /* | |
75 | * Client thread events. | |
76 | * | |
77 | * These are used almost exclusively by client threads to drive their own event | |
78 | * loops. The only exception is in zserv_client_create(), which pushes an | |
79 | * initial ZSERV_CLIENT_READ event to start the API handler loop. | |
80 | */ | |
81 | enum zserv_client_event { | |
82 | /* Schedule a socket read */ | |
83 | ZSERV_CLIENT_READ, | |
84 | /* Schedule a buffer write */ | |
85 | ZSERV_CLIENT_WRITE, | |
86 | }; | |
87 | ||
88 | /* | |
89 | * Main thread events. | |
90 | * | |
91 | * These are used by client threads to notify the main thread about various | |
92 | * events and to make processing requests. | |
93 | */ | |
94 | enum zserv_event { | |
95 | /* Schedule listen job on Zebra API socket */ | |
96 | ZSERV_ACCEPT, | |
97 | /* The calling client has packets on its input buffer */ | |
98 | ZSERV_PROCESS_MESSAGES, | |
99 | /* The calling client wishes to be killed */ | |
100 | ZSERV_HANDLE_CLIENT_FAIL, | |
101 | }; | |
102 | ||
103 | /* | |
104 | * Zebra server event driver for all client threads. | |
105 | * | |
106 | * This is essentially a wrapper around thread_add_event() that centralizes | |
107 | * those scheduling calls into one place. | |
108 | * | |
109 | * All calls to this function schedule an event on the pthread running the | |
110 | * provided client. | |
111 | * | |
112 | * client | |
113 | * the client in question, and thread target | |
114 | * | |
115 | * event | |
116 | * the event to notify them about | |
117 | */ | |
118 | static void zserv_client_event(struct zserv *client, | |
119 | enum zserv_client_event event); | |
120 | ||
121 | /* | |
122 | * Zebra server event driver for the main thread. | |
123 | * | |
124 | * This is essentially a wrapper around thread_add_event() that centralizes | |
125 | * those scheduling calls into one place. | |
126 | * | |
127 | * All calls to this function schedule an event on Zebra's main pthread. | |
128 | * | |
129 | * client | |
130 | * the client in question | |
131 | * | |
132 | * event | |
133 | * the event to notify the main thread about | |
134 | */ | |
135 | static void zserv_event(struct zserv *client, enum zserv_event event); | |
136 | ||
137 | ||
138 | /* Client thread lifecycle -------------------------------------------------- */ | |
139 | ||
140 | /* | |
141 | * Log zapi message to zlog. | |
142 | * | |
143 | * errmsg (optional) | |
144 | * Debugging message | |
145 | * | |
146 | * msg | |
147 | * The message | |
148 | * | |
149 | * hdr (optional) | |
150 | * The message header | |
151 | */ | |
152 | void zserv_log_message(const char *errmsg, struct stream *msg, | |
153 | struct zmsghdr *hdr) | |
154 | { | |
155 | zlog_debug("Rx'd ZAPI message"); | |
156 | if (errmsg) | |
157 | zlog_debug("%s", errmsg); | |
158 | if (hdr) { | |
159 | zlog_debug(" Length: %d", hdr->length); | |
160 | zlog_debug("Command: %s", zserv_command_string(hdr->command)); | |
161 | zlog_debug(" VRF: %u", hdr->vrf_id); | |
162 | } | |
163 | zlog_hexdump(msg->data, STREAM_READABLE(msg)); | |
164 | } | |
165 | ||
166 | /* | |
167 | * Gracefully shut down a client connection. | |
168 | * | |
169 | * Cancel any pending tasks for the client's thread. Then schedule a task on | |
170 | * the main thread to shut down the calling thread. | |
171 | * | |
172 | * It is not safe to close the client socket in this function. The socket is | |
173 | * owned by the main thread. | |
174 | * | |
175 | * Must be called from the client pthread, never the main thread. | |
176 | */ | |
177 | static void zserv_client_fail(struct zserv *client) | |
178 | { | |
179 | flog_warn(EC_ZEBRA_CLIENT_IO_ERROR, | |
180 | "Client '%s' encountered an error and is shutting down.", | |
181 | zebra_route_string(client->proto)); | |
182 | ||
183 | atomic_store_explicit(&client->pthread->running, false, | |
184 | memory_order_relaxed); | |
185 | ||
186 | THREAD_OFF(client->t_read); | |
187 | THREAD_OFF(client->t_write); | |
188 | zserv_event(client, ZSERV_HANDLE_CLIENT_FAIL); | |
189 | } | |
190 | ||
191 | /* | |
192 | * Write all pending messages to client socket. | |
193 | * | |
194 | * This function first attempts to flush any buffered data. If unsuccessful, | |
195 | * the function reschedules itself and returns. If successful, it pops all | |
196 | * available messages from the output queue and continues to write data | |
197 | * directly to the socket until the socket would block. If the socket never | |
198 | * blocks and all data is written, the function returns without rescheduling | |
199 | * itself. If the socket ends up throwing EWOULDBLOCK, the remaining data is | |
200 | * buffered and the function reschedules itself. | |
201 | * | |
202 | * The utility of the buffer is that it allows us to vastly reduce lock | |
203 | * contention by allowing us to pop *all* messages off the output queue at once | |
204 | * instead of locking and unlocking each time we want to pop a single message | |
205 | * off the queue. The same thing could arguably be accomplished faster by | |
206 | * allowing the main thread to write directly into the buffer instead of | |
207 | * enqueuing packets onto an intermediary queue, but the intermediary queue | |
208 | * allows us to expose information about input and output queues to the user in | |
209 | * terms of number of packets rather than size of data. | |
210 | */ | |
211 | static int zserv_write(struct thread *thread) | |
212 | { | |
213 | struct zserv *client = THREAD_ARG(thread); | |
214 | struct stream *msg; | |
215 | uint32_t wcmd = 0; | |
216 | struct stream_fifo *cache; | |
217 | ||
218 | /* If we have any data pending, try to flush it first */ | |
219 | switch (buffer_flush_all(client->wb, client->sock)) { | |
220 | case BUFFER_ERROR: | |
221 | goto zwrite_fail; | |
222 | case BUFFER_PENDING: | |
223 | atomic_store_explicit(&client->last_write_time, | |
224 | (uint32_t)monotime(NULL), | |
225 | memory_order_relaxed); | |
226 | zserv_client_event(client, ZSERV_CLIENT_WRITE); | |
227 | return 0; | |
228 | case BUFFER_EMPTY: | |
229 | break; | |
230 | } | |
231 | ||
232 | cache = stream_fifo_new(); | |
233 | ||
234 | frr_with_mutex(&client->obuf_mtx) { | |
235 | while (stream_fifo_head(client->obuf_fifo)) | |
236 | stream_fifo_push(cache, | |
237 | stream_fifo_pop(client->obuf_fifo)); | |
238 | } | |
239 | ||
240 | if (cache->tail) { | |
241 | msg = cache->tail; | |
242 | stream_set_getp(msg, 0); | |
243 | wcmd = stream_getw_from(msg, ZAPI_HEADER_CMD_LOCATION); | |
244 | } | |
245 | ||
246 | while (stream_fifo_head(cache)) { | |
247 | msg = stream_fifo_pop(cache); | |
248 | buffer_put(client->wb, STREAM_DATA(msg), stream_get_endp(msg)); | |
249 | stream_free(msg); | |
250 | } | |
251 | ||
252 | stream_fifo_free(cache); | |
253 | ||
254 | /* If we have any data pending, try to flush it first */ | |
255 | switch (buffer_flush_all(client->wb, client->sock)) { | |
256 | case BUFFER_ERROR: | |
257 | goto zwrite_fail; | |
258 | case BUFFER_PENDING: | |
259 | atomic_store_explicit(&client->last_write_time, | |
260 | (uint32_t)monotime(NULL), | |
261 | memory_order_relaxed); | |
262 | zserv_client_event(client, ZSERV_CLIENT_WRITE); | |
263 | return 0; | |
264 | case BUFFER_EMPTY: | |
265 | break; | |
266 | } | |
267 | ||
268 | atomic_store_explicit(&client->last_write_cmd, wcmd, | |
269 | memory_order_relaxed); | |
270 | ||
271 | atomic_store_explicit(&client->last_write_time, | |
272 | (uint32_t)monotime(NULL), memory_order_relaxed); | |
273 | ||
274 | return 0; | |
275 | ||
276 | zwrite_fail: | |
277 | flog_warn(EC_ZEBRA_CLIENT_WRITE_FAILED, | |
278 | "%s: could not write to %s [fd = %d], closing.", __func__, | |
279 | zebra_route_string(client->proto), client->sock); | |
280 | zserv_client_fail(client); | |
281 | return 0; | |
282 | } | |
283 | ||
284 | /* | |
285 | * Read and process data from a client socket. | |
286 | * | |
287 | * The responsibilities here are to read raw data from the client socket, | |
288 | * validate the header, encapsulate it into a single stream object, push it | |
289 | * onto the input queue and then notify the main thread that there is new data | |
290 | * available. | |
291 | * | |
292 | * This function first looks for any data in the client structure's working | |
293 | * input buffer. If data is present, it is assumed that reading stopped in a | |
294 | * previous invocation of this task and needs to be resumed to finish a message. | |
295 | * Otherwise, the socket data stream is assumed to be at the beginning of a new | |
296 | * ZAPI message (specifically at the header). The header is read and validated. | |
297 | * If the header passed validation then the length field found in the header is | |
298 | * used to compute the total length of the message. That much data is read (but | |
299 | * not inspected), appended to the header, placed into a stream and pushed onto | |
300 | * the client's input queue. A task is then scheduled on the main thread to | |
301 | * process the client's input queue. Finally, if all of this was successful, | |
302 | * this task reschedules itself. | |
303 | * | |
304 | * Any failure in any of these actions is handled by terminating the client. | |
305 | */ | |
306 | static int zserv_read(struct thread *thread) | |
307 | { | |
308 | struct zserv *client = THREAD_ARG(thread); | |
309 | int sock; | |
310 | size_t already; | |
311 | struct stream_fifo *cache; | |
312 | uint32_t p2p_orig; | |
313 | ||
314 | uint32_t p2p; | |
315 | struct zmsghdr hdr; | |
316 | ||
317 | p2p_orig = atomic_load_explicit(&zrouter.packets_to_process, | |
318 | memory_order_relaxed); | |
319 | cache = stream_fifo_new(); | |
320 | p2p = p2p_orig; | |
321 | sock = THREAD_FD(thread); | |
322 | ||
323 | while (p2p) { | |
324 | ssize_t nb; | |
325 | bool hdrvalid; | |
326 | char errmsg[256]; | |
327 | ||
328 | already = stream_get_endp(client->ibuf_work); | |
329 | ||
330 | /* Read length and command (if we don't have it already). */ | |
331 | if (already < ZEBRA_HEADER_SIZE) { | |
332 | nb = stream_read_try(client->ibuf_work, sock, | |
333 | ZEBRA_HEADER_SIZE - already); | |
334 | if ((nb == 0 || nb == -1)) { | |
335 | if (IS_ZEBRA_DEBUG_EVENT) | |
336 | zlog_debug("connection closed socket [%d]", | |
337 | sock); | |
338 | goto zread_fail; | |
339 | } | |
340 | if (nb != (ssize_t)(ZEBRA_HEADER_SIZE - already)) { | |
341 | /* Try again later. */ | |
342 | break; | |
343 | } | |
344 | already = ZEBRA_HEADER_SIZE; | |
345 | } | |
346 | ||
347 | /* Reset to read from the beginning of the incoming packet. */ | |
348 | stream_set_getp(client->ibuf_work, 0); | |
349 | ||
350 | /* Fetch header values */ | |
351 | hdrvalid = zapi_parse_header(client->ibuf_work, &hdr); | |
352 | ||
353 | if (!hdrvalid) { | |
354 | snprintf(errmsg, sizeof(errmsg), | |
355 | "%s: Message has corrupt header", __func__); | |
356 | zserv_log_message(errmsg, client->ibuf_work, NULL); | |
357 | goto zread_fail; | |
358 | } | |
359 | ||
360 | /* Validate header */ | |
361 | if (hdr.marker != ZEBRA_HEADER_MARKER | |
362 | || hdr.version != ZSERV_VERSION) { | |
363 | snprintf( | |
364 | errmsg, sizeof(errmsg), | |
365 | "Message has corrupt header\n%s: socket %d version mismatch, marker %d, version %d", | |
366 | __func__, sock, hdr.marker, hdr.version); | |
367 | zserv_log_message(errmsg, client->ibuf_work, &hdr); | |
368 | goto zread_fail; | |
369 | } | |
370 | if (hdr.length < ZEBRA_HEADER_SIZE) { | |
371 | snprintf( | |
372 | errmsg, sizeof(errmsg), | |
373 | "Message has corrupt header\n%s: socket %d message length %u is less than header size %d", | |
374 | __func__, sock, hdr.length, ZEBRA_HEADER_SIZE); | |
375 | zserv_log_message(errmsg, client->ibuf_work, &hdr); | |
376 | goto zread_fail; | |
377 | } | |
378 | if (hdr.length > STREAM_SIZE(client->ibuf_work)) { | |
379 | snprintf( | |
380 | errmsg, sizeof(errmsg), | |
381 | "Message has corrupt header\n%s: socket %d message length %u exceeds buffer size %lu", | |
382 | __func__, sock, hdr.length, | |
383 | (unsigned long)STREAM_SIZE(client->ibuf_work)); | |
384 | zserv_log_message(errmsg, client->ibuf_work, &hdr); | |
385 | goto zread_fail; | |
386 | } | |
387 | ||
388 | /* Read rest of data. */ | |
389 | if (already < hdr.length) { | |
390 | nb = stream_read_try(client->ibuf_work, sock, | |
391 | hdr.length - already); | |
392 | if ((nb == 0 || nb == -1)) { | |
393 | if (IS_ZEBRA_DEBUG_EVENT) | |
394 | zlog_debug( | |
395 | "connection closed [%d] when reading zebra data", | |
396 | sock); | |
397 | goto zread_fail; | |
398 | } | |
399 | if (nb != (ssize_t)(hdr.length - already)) { | |
400 | /* Try again later. */ | |
401 | break; | |
402 | } | |
403 | } | |
404 | ||
405 | /* Debug packet information. */ | |
406 | if (IS_ZEBRA_DEBUG_PACKET) | |
407 | zlog_debug("zebra message[%s:%u:%u] comes from socket [%d]", | |
408 | zserv_command_string(hdr.command), | |
409 | hdr.vrf_id, hdr.length, | |
410 | sock); | |
411 | ||
412 | stream_set_getp(client->ibuf_work, 0); | |
413 | struct stream *msg = stream_dup(client->ibuf_work); | |
414 | ||
415 | stream_fifo_push(cache, msg); | |
416 | stream_reset(client->ibuf_work); | |
417 | p2p--; | |
418 | } | |
419 | ||
420 | if (p2p < p2p_orig) { | |
421 | /* update session statistics */ | |
422 | atomic_store_explicit(&client->last_read_time, monotime(NULL), | |
423 | memory_order_relaxed); | |
424 | atomic_store_explicit(&client->last_read_cmd, hdr.command, | |
425 | memory_order_relaxed); | |
426 | ||
427 | /* publish read packets on client's input queue */ | |
428 | frr_with_mutex(&client->ibuf_mtx) { | |
429 | while (cache->head) | |
430 | stream_fifo_push(client->ibuf_fifo, | |
431 | stream_fifo_pop(cache)); | |
432 | } | |
433 | ||
434 | /* Schedule job to process those packets */ | |
435 | zserv_event(client, ZSERV_PROCESS_MESSAGES); | |
436 | ||
437 | } | |
438 | ||
439 | if (IS_ZEBRA_DEBUG_PACKET) | |
440 | zlog_debug("Read %d packets from client: %s", p2p_orig - p2p, | |
441 | zebra_route_string(client->proto)); | |
442 | ||
443 | /* Reschedule ourselves */ | |
444 | zserv_client_event(client, ZSERV_CLIENT_READ); | |
445 | ||
446 | stream_fifo_free(cache); | |
447 | ||
448 | return 0; | |
449 | ||
450 | zread_fail: | |
451 | stream_fifo_free(cache); | |
452 | zserv_client_fail(client); | |
453 | return -1; | |
454 | } | |
455 | ||
456 | static void zserv_client_event(struct zserv *client, | |
457 | enum zserv_client_event event) | |
458 | { | |
459 | switch (event) { | |
460 | case ZSERV_CLIENT_READ: | |
461 | thread_add_read(client->pthread->master, zserv_read, client, | |
462 | client->sock, &client->t_read); | |
463 | break; | |
464 | case ZSERV_CLIENT_WRITE: | |
465 | thread_add_write(client->pthread->master, zserv_write, client, | |
466 | client->sock, &client->t_write); | |
467 | break; | |
468 | } | |
469 | } | |
470 | ||
471 | /* Main thread lifecycle ---------------------------------------------------- */ | |
472 | ||
473 | /* | |
474 | * Read and process messages from a client. | |
475 | * | |
476 | * This task runs on the main pthread. It is scheduled by client pthreads when | |
477 | * they have new messages available on their input queues. The client is passed | |
478 | * as the task argument. | |
479 | * | |
480 | * Each message is popped off the client's input queue and the action associated | |
481 | * with the message is executed. This proceeds until there are no more messages, | |
482 | * an error occurs, or the processing limit is reached. | |
483 | * | |
484 | * The client's I/O thread can push at most zrouter.packets_to_process messages | |
485 | * onto the input buffer before notifying us there are packets to read. As long | |
486 | * as we always process zrouter.packets_to_process messages here, then we can | |
487 | * rely on the read thread to handle queuing this task enough times to process | |
488 | * everything on the input queue. | |
489 | */ | |
490 | static int zserv_process_messages(struct thread *thread) | |
491 | { | |
492 | struct zserv *client = THREAD_ARG(thread); | |
493 | struct stream *msg; | |
494 | struct stream_fifo *cache = stream_fifo_new(); | |
495 | uint32_t p2p = zrouter.packets_to_process; | |
496 | bool need_resched = false; | |
497 | ||
498 | frr_with_mutex(&client->ibuf_mtx) { | |
499 | uint32_t i; | |
500 | for (i = 0; i < p2p && stream_fifo_head(client->ibuf_fifo); | |
501 | ++i) { | |
502 | msg = stream_fifo_pop(client->ibuf_fifo); | |
503 | stream_fifo_push(cache, msg); | |
504 | } | |
505 | ||
506 | msg = NULL; | |
507 | ||
508 | /* Need to reschedule processing work if there are still | |
509 | * packets in the fifo. | |
510 | */ | |
511 | if (stream_fifo_head(client->ibuf_fifo)) | |
512 | need_resched = true; | |
513 | } | |
514 | ||
515 | while (stream_fifo_head(cache)) { | |
516 | msg = stream_fifo_pop(cache); | |
517 | zserv_handle_commands(client, msg); | |
518 | stream_free(msg); | |
519 | } | |
520 | ||
521 | stream_fifo_free(cache); | |
522 | ||
523 | /* Reschedule ourselves if necessary */ | |
524 | if (need_resched) | |
525 | zserv_event(client, ZSERV_PROCESS_MESSAGES); | |
526 | ||
527 | return 0; | |
528 | } | |
529 | ||
530 | int zserv_send_message(struct zserv *client, struct stream *msg) | |
531 | { | |
532 | frr_with_mutex(&client->obuf_mtx) { | |
533 | stream_fifo_push(client->obuf_fifo, msg); | |
534 | } | |
535 | ||
536 | zserv_client_event(client, ZSERV_CLIENT_WRITE); | |
537 | ||
538 | return 0; | |
539 | } | |
540 | ||
541 | ||
542 | /* Hooks for client connect / disconnect */ | |
543 | DEFINE_HOOK(zserv_client_connect, (struct zserv *client), (client)); | |
544 | DEFINE_KOOH(zserv_client_close, (struct zserv *client), (client)); | |
545 | ||
546 | /* | |
547 | * Deinitialize zebra client. | |
548 | * | |
549 | * - Deregister and deinitialize related internal resources | |
550 | * - Gracefully close socket | |
551 | * - Free associated resources | |
552 | * - Free client structure | |
553 | * | |
554 | * This does *not* take any action on the struct thread * fields. These are | |
555 | * managed by the owning pthread and any tasks associated with them must have | |
556 | * been stopped prior to invoking this function. | |
557 | */ | |
558 | static void zserv_client_free(struct zserv *client) | |
559 | { | |
560 | if (client == NULL) | |
561 | return; | |
562 | ||
563 | hook_call(zserv_client_close, client); | |
564 | ||
565 | /* Close file descriptor. */ | |
566 | if (client->sock) { | |
567 | unsigned long nroutes; | |
568 | ||
569 | close(client->sock); | |
570 | ||
571 | if (!client->gr_instance_count) { | |
572 | nroutes = rib_score_proto(client->proto, | |
573 | client->instance); | |
574 | zlog_notice( | |
575 | "client %d disconnected %lu %s routes removed from the rib", | |
576 | client->sock, nroutes, | |
577 | zebra_route_string(client->proto)); | |
578 | } | |
579 | client->sock = -1; | |
580 | } | |
581 | ||
582 | /* Free stream buffers. */ | |
583 | if (client->ibuf_work) | |
584 | stream_free(client->ibuf_work); | |
585 | if (client->obuf_work) | |
586 | stream_free(client->obuf_work); | |
587 | if (client->ibuf_fifo) | |
588 | stream_fifo_free(client->ibuf_fifo); | |
589 | if (client->obuf_fifo) | |
590 | stream_fifo_free(client->obuf_fifo); | |
591 | if (client->wb) | |
592 | buffer_free(client->wb); | |
593 | ||
594 | /* Free buffer mutexes */ | |
595 | pthread_mutex_destroy(&client->obuf_mtx); | |
596 | pthread_mutex_destroy(&client->ibuf_mtx); | |
597 | ||
598 | /* Free bitmaps. */ | |
599 | for (afi_t afi = AFI_IP; afi < AFI_MAX; afi++) { | |
600 | for (int i = 0; i < ZEBRA_ROUTE_MAX; i++) { | |
601 | vrf_bitmap_free(client->redist[afi][i]); | |
602 | redist_del_all_instances(&client->mi_redist[afi][i]); | |
603 | } | |
604 | ||
605 | vrf_bitmap_free(client->redist_default[afi]); | |
606 | } | |
607 | vrf_bitmap_free(client->ridinfo); | |
608 | ||
609 | /* | |
610 | * If any instance are graceful restart enabled, | |
611 | * client is not deleted | |
612 | */ | |
613 | if (!client->gr_instance_count) { | |
614 | if (IS_ZEBRA_DEBUG_EVENT) | |
615 | zlog_debug("%s: Deleting client %s", __func__, | |
616 | zebra_route_string(client->proto)); | |
617 | XFREE(MTYPE_TMP, client); | |
618 | } else { | |
619 | /* Handle cases where client has GR instance. */ | |
620 | if (IS_ZEBRA_DEBUG_EVENT) | |
621 | zlog_debug("%s: client %s restart enabled", __func__, | |
622 | zebra_route_string(client->proto)); | |
623 | if (zebra_gr_client_disconnect(client) < 0) | |
624 | zlog_err( | |
625 | "%s: GR enabled but could not handle disconnect event", | |
626 | __func__); | |
627 | } | |
628 | } | |
629 | ||
630 | void zserv_close_client(struct zserv *client) | |
631 | { | |
632 | /* synchronously stop and join pthread */ | |
633 | frr_pthread_stop(client->pthread, NULL); | |
634 | ||
635 | if (IS_ZEBRA_DEBUG_EVENT) | |
636 | zlog_debug("Closing client '%s'", | |
637 | zebra_route_string(client->proto)); | |
638 | ||
639 | thread_cancel_event(zrouter.master, client); | |
640 | THREAD_OFF(client->t_cleanup); | |
641 | THREAD_OFF(client->t_process); | |
642 | ||
643 | /* destroy pthread */ | |
644 | frr_pthread_destroy(client->pthread); | |
645 | client->pthread = NULL; | |
646 | ||
647 | /* remove from client list */ | |
648 | listnode_delete(zrouter.client_list, client); | |
649 | ||
650 | /* delete client */ | |
651 | zserv_client_free(client); | |
652 | } | |
653 | ||
654 | /* | |
655 | * This task is scheduled by a ZAPI client pthread on the main pthread when it | |
656 | * wants to stop itself. When this executes, the client connection should | |
657 | * already have been closed and the thread will most likely have died, but its | |
658 | * resources still need to be cleaned up. | |
659 | */ | |
660 | static int zserv_handle_client_fail(struct thread *thread) | |
661 | { | |
662 | struct zserv *client = THREAD_ARG(thread); | |
663 | ||
664 | zserv_close_client(client); | |
665 | return 0; | |
666 | } | |
667 | ||
668 | /* | |
669 | * Create a new client. | |
670 | * | |
671 | * This is called when a new connection is accept()'d on the ZAPI socket. It | |
672 | * initializes new client structure, notifies any subscribers of the connection | |
673 | * event and spawns the client's thread. | |
674 | * | |
675 | * sock | |
676 | * client's socket file descriptor | |
677 | */ | |
678 | static struct zserv *zserv_client_create(int sock) | |
679 | { | |
680 | struct zserv *client; | |
681 | size_t stream_size = | |
682 | MAX(ZEBRA_MAX_PACKET_SIZ, sizeof(struct zapi_route)); | |
683 | int i; | |
684 | afi_t afi; | |
685 | ||
686 | client = XCALLOC(MTYPE_TMP, sizeof(struct zserv)); | |
687 | ||
688 | /* Make client input/output buffer. */ | |
689 | client->sock = sock; | |
690 | client->ibuf_fifo = stream_fifo_new(); | |
691 | client->obuf_fifo = stream_fifo_new(); | |
692 | client->ibuf_work = stream_new(stream_size); | |
693 | client->obuf_work = stream_new(stream_size); | |
694 | pthread_mutex_init(&client->ibuf_mtx, NULL); | |
695 | pthread_mutex_init(&client->obuf_mtx, NULL); | |
696 | client->wb = buffer_new(0); | |
697 | TAILQ_INIT(&(client->gr_info_queue)); | |
698 | ||
699 | atomic_store_explicit(&client->connect_time, (uint32_t) monotime(NULL), | |
700 | memory_order_relaxed); | |
701 | ||
702 | /* Initialize flags */ | |
703 | for (afi = AFI_IP; afi < AFI_MAX; afi++) { | |
704 | for (i = 0; i < ZEBRA_ROUTE_MAX; i++) | |
705 | client->redist[afi][i] = vrf_bitmap_init(); | |
706 | client->redist_default[afi] = vrf_bitmap_init(); | |
707 | } | |
708 | client->ridinfo = vrf_bitmap_init(); | |
709 | ||
710 | /* Add this client to linked list. */ | |
711 | listnode_add(zrouter.client_list, client); | |
712 | ||
713 | struct frr_pthread_attr zclient_pthr_attrs = { | |
714 | .start = frr_pthread_attr_default.start, | |
715 | .stop = frr_pthread_attr_default.stop | |
716 | }; | |
717 | client->pthread = | |
718 | frr_pthread_new(&zclient_pthr_attrs, "Zebra API client thread", | |
719 | "zebra_apic"); | |
720 | ||
721 | /* start read loop */ | |
722 | zserv_client_event(client, ZSERV_CLIENT_READ); | |
723 | ||
724 | /* call callbacks */ | |
725 | hook_call(zserv_client_connect, client); | |
726 | ||
727 | /* start pthread */ | |
728 | frr_pthread_run(client->pthread, NULL); | |
729 | ||
730 | return client; | |
731 | } | |
732 | ||
733 | /* | |
734 | * Accept socket connection. | |
735 | */ | |
736 | static int zserv_accept(struct thread *thread) | |
737 | { | |
738 | int accept_sock; | |
739 | int client_sock; | |
740 | struct sockaddr_in client; | |
741 | socklen_t len; | |
742 | ||
743 | accept_sock = THREAD_FD(thread); | |
744 | ||
745 | /* Reregister myself. */ | |
746 | zserv_event(NULL, ZSERV_ACCEPT); | |
747 | ||
748 | len = sizeof(struct sockaddr_in); | |
749 | client_sock = accept(accept_sock, (struct sockaddr *)&client, &len); | |
750 | ||
751 | if (client_sock < 0) { | |
752 | flog_err_sys(EC_LIB_SOCKET, "Can't accept zebra socket: %s", | |
753 | safe_strerror(errno)); | |
754 | return -1; | |
755 | } | |
756 | ||
757 | /* Make client socket non-blocking. */ | |
758 | set_nonblocking(client_sock); | |
759 | ||
760 | /* Create new zebra client. */ | |
761 | zserv_client_create(client_sock); | |
762 | ||
763 | return 0; | |
764 | } | |
765 | ||
766 | void zserv_close(void) | |
767 | { | |
768 | /* | |
769 | * On shutdown, let's close the socket down | |
770 | * so that long running processes of killing the | |
771 | * routing table doesn't leave us in a bad | |
772 | * state where a client tries to reconnect | |
773 | */ | |
774 | close(zsock); | |
775 | zsock = -1; | |
776 | } | |
777 | ||
778 | void zserv_start(char *path) | |
779 | { | |
780 | int ret; | |
781 | mode_t old_mask; | |
782 | struct sockaddr_storage sa; | |
783 | socklen_t sa_len; | |
784 | ||
785 | if (!frr_zclient_addr(&sa, &sa_len, path)) | |
786 | /* should be caught in zebra main() */ | |
787 | return; | |
788 | ||
789 | /* Set umask */ | |
790 | old_mask = umask(0077); | |
791 | ||
792 | /* Make UNIX domain socket. */ | |
793 | zsock = socket(sa.ss_family, SOCK_STREAM, 0); | |
794 | if (zsock < 0) { | |
795 | flog_err_sys(EC_LIB_SOCKET, "Can't create zserv socket: %s", | |
796 | safe_strerror(errno)); | |
797 | return; | |
798 | } | |
799 | ||
800 | if (sa.ss_family != AF_UNIX) { | |
801 | sockopt_reuseaddr(zsock); | |
802 | sockopt_reuseport(zsock); | |
803 | } else { | |
804 | struct sockaddr_un *suna = (struct sockaddr_un *)&sa; | |
805 | if (suna->sun_path[0]) | |
806 | unlink(suna->sun_path); | |
807 | } | |
808 | ||
809 | setsockopt_so_recvbuf(zsock, 1048576); | |
810 | setsockopt_so_sendbuf(zsock, 1048576); | |
811 | ||
812 | frr_with_privs((sa.ss_family != AF_UNIX) ? &zserv_privs : NULL) { | |
813 | ret = bind(zsock, (struct sockaddr *)&sa, sa_len); | |
814 | } | |
815 | if (ret < 0) { | |
816 | flog_err_sys(EC_LIB_SOCKET, "Can't bind zserv socket on %s: %s", | |
817 | path, safe_strerror(errno)); | |
818 | close(zsock); | |
819 | zsock = -1; | |
820 | return; | |
821 | } | |
822 | ||
823 | ret = listen(zsock, 5); | |
824 | if (ret < 0) { | |
825 | flog_err_sys(EC_LIB_SOCKET, | |
826 | "Can't listen to zserv socket %s: %s", path, | |
827 | safe_strerror(errno)); | |
828 | close(zsock); | |
829 | zsock = -1; | |
830 | return; | |
831 | } | |
832 | ||
833 | umask(old_mask); | |
834 | ||
835 | zserv_event(NULL, ZSERV_ACCEPT); | |
836 | } | |
837 | ||
838 | void zserv_event(struct zserv *client, enum zserv_event event) | |
839 | { | |
840 | switch (event) { | |
841 | case ZSERV_ACCEPT: | |
842 | thread_add_read(zrouter.master, zserv_accept, NULL, zsock, | |
843 | NULL); | |
844 | break; | |
845 | case ZSERV_PROCESS_MESSAGES: | |
846 | thread_add_event(zrouter.master, zserv_process_messages, client, | |
847 | 0, &client->t_process); | |
848 | break; | |
849 | case ZSERV_HANDLE_CLIENT_FAIL: | |
850 | thread_add_event(zrouter.master, zserv_handle_client_fail, | |
851 | client, 0, &client->t_cleanup); | |
852 | } | |
853 | } | |
854 | ||
855 | ||
856 | /* General purpose ---------------------------------------------------------- */ | |
857 | ||
858 | #define ZEBRA_TIME_BUF 32 | |
859 | static char *zserv_time_buf(time_t *time1, char *buf, int buflen) | |
860 | { | |
861 | struct tm *tm; | |
862 | time_t now; | |
863 | ||
864 | assert(buf != NULL); | |
865 | assert(buflen >= ZEBRA_TIME_BUF); | |
866 | assert(time1 != NULL); | |
867 | ||
868 | if (!*time1) { | |
869 | snprintf(buf, buflen, "never "); | |
870 | return (buf); | |
871 | } | |
872 | ||
873 | now = monotime(NULL); | |
874 | now -= *time1; | |
875 | tm = gmtime(&now); | |
876 | ||
877 | if (now < ONE_DAY_SECOND) | |
878 | snprintf(buf, buflen, "%02d:%02d:%02d", tm->tm_hour, tm->tm_min, | |
879 | tm->tm_sec); | |
880 | else if (now < ONE_WEEK_SECOND) | |
881 | snprintf(buf, buflen, "%dd%02dh%02dm", tm->tm_yday, tm->tm_hour, | |
882 | tm->tm_min); | |
883 | else | |
884 | snprintf(buf, buflen, "%02dw%dd%02dh", tm->tm_yday / 7, | |
885 | tm->tm_yday - ((tm->tm_yday / 7) * 7), tm->tm_hour); | |
886 | return buf; | |
887 | } | |
888 | ||
889 | /* Display client info details */ | |
890 | static void zebra_show_client_detail(struct vty *vty, struct zserv *client) | |
891 | { | |
892 | char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF]; | |
893 | char wbuf[ZEBRA_TIME_BUF], nhbuf[ZEBRA_TIME_BUF], mbuf[ZEBRA_TIME_BUF]; | |
894 | time_t connect_time, last_read_time, last_write_time; | |
895 | uint32_t last_read_cmd, last_write_cmd; | |
896 | struct client_gr_info *info = NULL; | |
897 | ||
898 | vty_out(vty, "Client: %s", zebra_route_string(client->proto)); | |
899 | if (client->instance) | |
900 | vty_out(vty, " Instance: %d", client->instance); | |
901 | vty_out(vty, "\n"); | |
902 | ||
903 | vty_out(vty, "------------------------ \n"); | |
904 | vty_out(vty, "FD: %d \n", client->sock); | |
905 | ||
906 | connect_time = (time_t) atomic_load_explicit(&client->connect_time, | |
907 | memory_order_relaxed); | |
908 | ||
909 | vty_out(vty, "Connect Time: %s \n", | |
910 | zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF)); | |
911 | if (client->nh_reg_time) { | |
912 | vty_out(vty, "Nexthop Registry Time: %s \n", | |
913 | zserv_time_buf(&client->nh_reg_time, nhbuf, | |
914 | ZEBRA_TIME_BUF)); | |
915 | if (client->nh_last_upd_time) | |
916 | vty_out(vty, "Nexthop Last Update Time: %s \n", | |
917 | zserv_time_buf(&client->nh_last_upd_time, mbuf, | |
918 | ZEBRA_TIME_BUF)); | |
919 | else | |
920 | vty_out(vty, "No Nexthop Update sent\n"); | |
921 | } else | |
922 | vty_out(vty, "Not registered for Nexthop Updates\n"); | |
923 | ||
924 | last_read_time = (time_t)atomic_load_explicit(&client->last_read_time, | |
925 | memory_order_relaxed); | |
926 | last_write_time = (time_t)atomic_load_explicit(&client->last_write_time, | |
927 | memory_order_relaxed); | |
928 | ||
929 | last_read_cmd = atomic_load_explicit(&client->last_read_cmd, | |
930 | memory_order_relaxed); | |
931 | last_write_cmd = atomic_load_explicit(&client->last_write_cmd, | |
932 | memory_order_relaxed); | |
933 | ||
934 | vty_out(vty, "Last Msg Rx Time: %s \n", | |
935 | zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF)); | |
936 | vty_out(vty, "Last Msg Tx Time: %s \n", | |
937 | zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF)); | |
938 | if (last_read_cmd) | |
939 | vty_out(vty, "Last Rcvd Cmd: %s \n", | |
940 | zserv_command_string(last_read_cmd)); | |
941 | if (last_write_cmd) | |
942 | vty_out(vty, "Last Sent Cmd: %s \n", | |
943 | zserv_command_string(last_write_cmd)); | |
944 | vty_out(vty, "\n"); | |
945 | ||
946 | vty_out(vty, "Type Add Update Del \n"); | |
947 | vty_out(vty, "================================================== \n"); | |
948 | vty_out(vty, "IPv4 %-12d%-12d%-12d\n", client->v4_route_add_cnt, | |
949 | client->v4_route_upd8_cnt, client->v4_route_del_cnt); | |
950 | vty_out(vty, "IPv6 %-12d%-12d%-12d\n", client->v6_route_add_cnt, | |
951 | client->v6_route_upd8_cnt, client->v6_route_del_cnt); | |
952 | vty_out(vty, "Redist:v4 %-12d%-12d%-12d\n", client->redist_v4_add_cnt, | |
953 | 0, client->redist_v4_del_cnt); | |
954 | vty_out(vty, "Redist:v6 %-12d%-12d%-12d\n", client->redist_v6_add_cnt, | |
955 | 0, client->redist_v6_del_cnt); | |
956 | vty_out(vty, "Connected %-12d%-12d%-12d\n", client->ifadd_cnt, 0, | |
957 | client->ifdel_cnt); | |
958 | vty_out(vty, "BFD peer %-12d%-12d%-12d\n", client->bfd_peer_add_cnt, | |
959 | client->bfd_peer_upd8_cnt, client->bfd_peer_del_cnt); | |
960 | vty_out(vty, "NHT v4 %-12d%-12d%-12d\n", | |
961 | client->v4_nh_watch_add_cnt, 0, client->v4_nh_watch_rem_cnt); | |
962 | vty_out(vty, "NHT v6 %-12d%-12d%-12d\n", | |
963 | client->v6_nh_watch_add_cnt, 0, client->v6_nh_watch_rem_cnt); | |
964 | vty_out(vty, "VxLAN SG %-12d%-12d%-12d\n", client->vxlan_sg_add_cnt, | |
965 | 0, client->vxlan_sg_del_cnt); | |
966 | vty_out(vty, "Interface Up Notifications: %d\n", client->ifup_cnt); | |
967 | vty_out(vty, "Interface Down Notifications: %d\n", client->ifdown_cnt); | |
968 | vty_out(vty, "VNI add notifications: %d\n", client->vniadd_cnt); | |
969 | vty_out(vty, "VNI delete notifications: %d\n", client->vnidel_cnt); | |
970 | vty_out(vty, "L3-VNI add notifications: %d\n", client->l3vniadd_cnt); | |
971 | vty_out(vty, "L3-VNI delete notifications: %d\n", client->l3vnidel_cnt); | |
972 | vty_out(vty, "MAC-IP add notifications: %d\n", client->macipadd_cnt); | |
973 | vty_out(vty, "MAC-IP delete notifications: %d\n", client->macipdel_cnt); | |
974 | ||
975 | TAILQ_FOREACH (info, &client->gr_info_queue, gr_info) { | |
976 | vty_out(vty, "VRF : %s\n", vrf_id_to_name(info->vrf_id)); | |
977 | vty_out(vty, "Capabilities : "); | |
978 | switch (info->capabilities) { | |
979 | case ZEBRA_CLIENT_GR_CAPABILITIES: | |
980 | vty_out(vty, "Graceful Restart\n"); | |
981 | break; | |
982 | case ZEBRA_CLIENT_ROUTE_UPDATE_COMPLETE: | |
983 | case ZEBRA_CLIENT_ROUTE_UPDATE_PENDING: | |
984 | case ZEBRA_CLIENT_GR_DISABLE: | |
985 | case ZEBRA_CLIENT_RIB_STALE_TIME: | |
986 | vty_out(vty, "None\n"); | |
987 | break; | |
988 | } | |
989 | } | |
990 | ||
991 | #if defined DEV_BUILD | |
992 | vty_out(vty, "Input Fifo: %zu:%zu Output Fifo: %zu:%zu\n", | |
993 | client->ibuf_fifo->count, client->ibuf_fifo->max_count, | |
994 | client->obuf_fifo->count, client->obuf_fifo->max_count); | |
995 | #endif | |
996 | vty_out(vty, "\n"); | |
997 | } | |
998 | ||
999 | /* Display stale client information */ | |
1000 | static void zebra_show_stale_client_detail(struct vty *vty, | |
1001 | struct zserv *client) | |
1002 | { | |
1003 | char buf[PREFIX2STR_BUFFER]; | |
1004 | struct tm *tm; | |
1005 | struct timeval tv; | |
1006 | time_t uptime; | |
1007 | struct client_gr_info *info = NULL; | |
1008 | struct zserv *s = NULL; | |
1009 | ||
1010 | if (client->instance) | |
1011 | vty_out(vty, " Instance: %d", client->instance); | |
1012 | ||
1013 | TAILQ_FOREACH (info, &client->gr_info_queue, gr_info) { | |
1014 | vty_out(vty, "VRF : %s\n", vrf_id_to_name(info->vrf_id)); | |
1015 | vty_out(vty, "Capabilities : "); | |
1016 | switch (info->capabilities) { | |
1017 | case ZEBRA_CLIENT_GR_CAPABILITIES: | |
1018 | vty_out(vty, "Graceful Restart\n"); | |
1019 | break; | |
1020 | case ZEBRA_CLIENT_ROUTE_UPDATE_COMPLETE: | |
1021 | case ZEBRA_CLIENT_ROUTE_UPDATE_PENDING: | |
1022 | case ZEBRA_CLIENT_GR_DISABLE: | |
1023 | case ZEBRA_CLIENT_RIB_STALE_TIME: | |
1024 | vty_out(vty, "None\n"); | |
1025 | break; | |
1026 | } | |
1027 | ||
1028 | if (ZEBRA_CLIENT_GR_ENABLED(info->capabilities)) { | |
1029 | if (info->stale_client_ptr) { | |
1030 | s = (struct zserv *)(info->stale_client_ptr); | |
1031 | uptime = monotime(&tv); | |
1032 | uptime -= s->restart_time; | |
1033 | tm = gmtime(&uptime); | |
1034 | vty_out(vty, "Last restart time : "); | |
1035 | if (uptime < ONE_DAY_SECOND) | |
1036 | vty_out(vty, "%02d:%02d:%02d", | |
1037 | tm->tm_hour, tm->tm_min, | |
1038 | tm->tm_sec); | |
1039 | else if (uptime < ONE_WEEK_SECOND) | |
1040 | vty_out(vty, "%dd%02dh%02dm", | |
1041 | tm->tm_yday, tm->tm_hour, | |
1042 | tm->tm_min); | |
1043 | else | |
1044 | vty_out(vty, "%02dw%dd%02dh", | |
1045 | tm->tm_yday / 7, | |
1046 | tm->tm_yday - ((tm->tm_yday / 7) | |
1047 | * 7), | |
1048 | tm->tm_hour); | |
1049 | vty_out(vty, " ago\n"); | |
1050 | ||
1051 | vty_out(vty, "Stalepath removal time: %d sec\n", | |
1052 | info->stale_removal_time); | |
1053 | if (info->t_stale_removal) { | |
1054 | vty_out(vty, | |
1055 | "Stale delete timer: %ld sec\n", | |
1056 | thread_timer_remain_second( | |
1057 | info->t_stale_removal)); | |
1058 | } | |
1059 | } | |
1060 | vty_out(vty, "Current AFI : %d\n", info->current_afi); | |
1061 | if (info->current_prefix) { | |
1062 | prefix2str(info->current_prefix, buf, | |
1063 | sizeof(buf)); | |
1064 | vty_out(vty, "Current prefix : %s\n", buf); | |
1065 | } | |
1066 | } | |
1067 | } | |
1068 | vty_out(vty, "\n"); | |
1069 | return; | |
1070 | } | |
1071 | ||
1072 | static void zebra_show_client_brief(struct vty *vty, struct zserv *client) | |
1073 | { | |
1074 | char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF]; | |
1075 | char wbuf[ZEBRA_TIME_BUF]; | |
1076 | time_t connect_time, last_read_time, last_write_time; | |
1077 | ||
1078 | connect_time = (time_t)atomic_load_explicit(&client->connect_time, | |
1079 | memory_order_relaxed); | |
1080 | last_read_time = (time_t)atomic_load_explicit(&client->last_read_time, | |
1081 | memory_order_relaxed); | |
1082 | last_write_time = (time_t)atomic_load_explicit(&client->last_write_time, | |
1083 | memory_order_relaxed); | |
1084 | ||
1085 | vty_out(vty, "%-10s%12s %12s%12s%8d/%-8d%8d/%-8d\n", | |
1086 | zebra_route_string(client->proto), | |
1087 | zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF), | |
1088 | zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF), | |
1089 | zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF), | |
1090 | client->v4_route_add_cnt + client->v4_route_upd8_cnt, | |
1091 | client->v4_route_del_cnt, | |
1092 | client->v6_route_add_cnt + client->v6_route_upd8_cnt, | |
1093 | client->v6_route_del_cnt); | |
1094 | } | |
1095 | ||
1096 | struct zserv *zserv_find_client(uint8_t proto, unsigned short instance) | |
1097 | { | |
1098 | struct listnode *node, *nnode; | |
1099 | struct zserv *client; | |
1100 | ||
1101 | for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { | |
1102 | if (client->proto == proto && client->instance == instance) | |
1103 | return client; | |
1104 | } | |
1105 | ||
1106 | return NULL; | |
1107 | } | |
1108 | ||
1109 | /* This command is for debugging purpose. */ | |
1110 | DEFUN (show_zebra_client, | |
1111 | show_zebra_client_cmd, | |
1112 | "show zebra client", | |
1113 | SHOW_STR | |
1114 | ZEBRA_STR | |
1115 | "Client information\n") | |
1116 | { | |
1117 | struct listnode *node; | |
1118 | struct zserv *client; | |
1119 | ||
1120 | for (ALL_LIST_ELEMENTS_RO(zrouter.client_list, node, client)) { | |
1121 | zebra_show_client_detail(vty, client); | |
1122 | vty_out(vty, "Stale Client Information\n"); | |
1123 | vty_out(vty, "------------------------\n"); | |
1124 | zebra_show_stale_client_detail(vty, client); | |
1125 | } | |
1126 | ||
1127 | return CMD_SUCCESS; | |
1128 | } | |
1129 | ||
1130 | /* This command is for debugging purpose. */ | |
1131 | DEFUN (show_zebra_client_summary, | |
1132 | show_zebra_client_summary_cmd, | |
1133 | "show zebra client summary", | |
1134 | SHOW_STR | |
1135 | ZEBRA_STR | |
1136 | "Client information brief\n" | |
1137 | "Brief Summary\n") | |
1138 | { | |
1139 | struct listnode *node; | |
1140 | struct zserv *client; | |
1141 | ||
1142 | vty_out(vty, | |
1143 | "Name Connect Time Last Read Last Write IPv4 Routes IPv6 Routes \n"); | |
1144 | vty_out(vty, | |
1145 | "--------------------------------------------------------------------------------\n"); | |
1146 | ||
1147 | for (ALL_LIST_ELEMENTS_RO(zrouter.client_list, node, client)) | |
1148 | zebra_show_client_brief(vty, client); | |
1149 | ||
1150 | vty_out(vty, "Routes column shows (added+updated)/deleted\n"); | |
1151 | return CMD_SUCCESS; | |
1152 | } | |
1153 | ||
1154 | #if defined(HANDLE_ZAPI_FUZZING) | |
1155 | void zserv_read_file(char *input) | |
1156 | { | |
1157 | int fd; | |
1158 | ||
1159 | fd = open(input, O_RDONLY | O_NONBLOCK); | |
1160 | ||
1161 | zserv_client_create(fd); | |
1162 | } | |
1163 | #endif | |
1164 | ||
1165 | void zserv_init(void) | |
1166 | { | |
1167 | /* Client list init. */ | |
1168 | zrouter.client_list = list_new(); | |
1169 | zrouter.stale_client_list = list_new(); | |
1170 | ||
1171 | /* Misc init. */ | |
1172 | zsock = -1; | |
1173 | ||
1174 | install_element(ENABLE_NODE, &show_zebra_client_cmd); | |
1175 | install_element(ENABLE_NODE, &show_zebra_client_summary_cmd); | |
1176 | } |