]>
Commit | Line | Data |
---|---|---|
bf094f69 QY |
1 | /* |
2 | * Zebra API server. | |
3 | * Portions: | |
4 | * Copyright (C) 1997-1999 Kunihiro Ishiguro | |
5 | * Copyright (C) 2015-2018 Cumulus Networks, Inc. | |
6 | * et al. | |
718e3744 | 7 | * |
bf094f69 QY |
8 | * This program is free software; you can redistribute it and/or modify it |
9 | * under the terms of the GNU General Public License as published by the Free | |
10 | * Software Foundation; either version 2 of the License, or (at your option) | |
11 | * any later version. | |
718e3744 | 12 | * |
bf094f69 QY |
13 | * This program is distributed in the hope that it will be useful, but WITHOUT |
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
16 | * more details. | |
718e3744 | 17 | * |
896014f4 DL |
18 | * You should have received a copy of the GNU General Public License along |
19 | * with this program; see the file COPYING; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
718e3744 | 21 | */ |
22 | ||
23 | #include <zebra.h> | |
d8647095 QY |
24 | |
25 | /* clang-format off */ | |
bf094f69 QY |
26 | #include <errno.h> /* for errno */ |
27 | #include <netinet/in.h> /* for sockaddr_in */ | |
28 | #include <stdint.h> /* for uint8_t */ | |
29 | #include <stdio.h> /* for snprintf */ | |
30 | #include <sys/socket.h> /* for sockaddr_storage, AF_UNIX, accept... */ | |
31 | #include <sys/stat.h> /* for umask, mode_t */ | |
32 | #include <sys/un.h> /* for sockaddr_un */ | |
33 | #include <time.h> /* for NULL, tm, gmtime, time_t */ | |
34 | #include <unistd.h> /* for close, unlink, ssize_t */ | |
35 | ||
36 | #include "lib/buffer.h" /* for BUFFER_EMPTY, BUFFER_ERROR, BUFFE... */ | |
37 | #include "lib/command.h" /* for vty, install_element, CMD_SUCCESS... */ | |
453844ab | 38 | #include "lib/hook.h" /* for DEFINE_HOOK, DEFINE_KOOH, hook_call */ |
bf094f69 QY |
39 | #include "lib/linklist.h" /* for ALL_LIST_ELEMENTS_RO, ALL_LIST_EL... */ |
40 | #include "lib/libfrr.h" /* for frr_zclient_addr */ | |
41 | #include "lib/log.h" /* for zlog_warn, zlog_debug, safe_strerror */ | |
42 | #include "lib/memory.h" /* for MTYPE_TMP, XCALLOC, XFREE */ | |
43 | #include "lib/monotime.h" /* for monotime, ONE_DAY_SECOND, ONE_WEE... */ | |
44 | #include "lib/network.h" /* for set_nonblocking */ | |
45 | #include "lib/privs.h" /* for zebra_privs_t, ZPRIVS_LOWER, ZPRI... */ | |
46 | #include "lib/route_types.h" /* for ZEBRA_ROUTE_MAX */ | |
47 | #include "lib/sockopt.h" /* for setsockopt_so_recvbuf, setsockopt... */ | |
48 | #include "lib/sockunion.h" /* for sockopt_reuseaddr, sockopt_reuseport */ | |
49 | #include "lib/stream.h" /* for STREAM_SIZE, stream (ptr only), ... */ | |
50 | #include "lib/thread.h" /* for thread (ptr only), THREAD_ARG, ... */ | |
51 | #include "lib/vrf.h" /* for vrf_info_lookup, VRF_DEFAULT */ | |
52 | #include "lib/vty.h" /* for vty_out, vty (ptr only) */ | |
53 | #include "lib/zassert.h" /* for assert */ | |
54 | #include "lib/zclient.h" /* for zmsghdr, ZEBRA_HEADER_SIZE, ZEBRA... */ | |
52f6868d | 55 | #include "lib/frr_pthread.h" /* for frr_pthread_new, frr_pthread_stop... */ |
ae6670d0 | 56 | #include "lib/frratomic.h" /* for atomic_load_explicit, atomic_stor... */ |
174482ef | 57 | #include "lib/lib_errors.h" /* for generic ferr ids */ |
bf094f69 QY |
58 | |
59 | #include "zebra/debug.h" /* for various debugging macros */ | |
bf094f69 | 60 | #include "zebra/rib.h" /* for rib_score_proto */ |
bf094f69 | 61 | #include "zebra/zapi_msg.h" /* for zserv_handle_commands */ |
bf094f69 | 62 | #include "zebra/zebra_vrf.h" /* for zebra_vrf_lookup_by_id, zvrf */ |
d8647095 | 63 | #include "zebra/zserv.h" /* for zserv */ |
364fed6b | 64 | #include "zebra/zebra_errors.h" /* for error messages */ |
d8647095 | 65 | /* clang-format on */ |
6b0655a2 | 66 | |
1002497a | 67 | /* privileges */ |
edd7c245 | 68 | extern struct zebra_privs_t zserv_privs; |
453844ab | 69 | |
329e35da | 70 | /* |
f2efe6a3 | 71 | * Client thread events. |
329e35da | 72 | * |
f2efe6a3 | 73 | * These are used almost exclusively by client threads to drive their own event |
24f8f979 | 74 | * loops. The only exception is in zserv_client_create(), which pushes an |
21ccc0cf | 75 | * initial ZSERV_CLIENT_READ event to start the API handler loop. |
329e35da | 76 | */ |
21ccc0cf QY |
77 | enum zserv_client_event { |
78 | /* Schedule a socket read */ | |
79 | ZSERV_CLIENT_READ, | |
80 | /* Schedule a buffer write */ | |
81 | ZSERV_CLIENT_WRITE, | |
21ccc0cf | 82 | }; |
453844ab | 83 | |
21ccc0cf QY |
84 | /* |
85 | * Main thread events. | |
86 | * | |
87 | * These are used by client threads to notify the main thread about various | |
88 | * events and to make processing requests. | |
89 | */ | |
90 | enum zserv_event { | |
91 | /* Schedule listen job on Zebra API socket */ | |
92 | ZSERV_ACCEPT, | |
93 | /* The calling client has packets on its input buffer */ | |
94 | ZSERV_PROCESS_MESSAGES, | |
95 | /* The calling client wishes to be killed */ | |
f3e33b69 | 96 | ZSERV_HANDLE_CLIENT_FAIL, |
21ccc0cf QY |
97 | }; |
98 | ||
99 | /* | |
100 | * Zebra server event driver for all client threads. | |
101 | * | |
102 | * This is essentially a wrapper around thread_add_event() that centralizes | |
103 | * those scheduling calls into one place. | |
104 | * | |
105 | * All calls to this function schedule an event on the pthread running the | |
106 | * provided client. | |
107 | * | |
108 | * client | |
109 | * the client in question, and thread target | |
110 | * | |
111 | * event | |
112 | * the event to notify them about | |
113 | */ | |
114 | static void zserv_client_event(struct zserv *client, | |
115 | enum zserv_client_event event); | |
116 | ||
117 | /* | |
118 | * Zebra server event driver for the main thread. | |
119 | * | |
120 | * This is essentially a wrapper around thread_add_event() that centralizes | |
121 | * those scheduling calls into one place. | |
122 | * | |
123 | * All calls to this function schedule an event on Zebra's main pthread. | |
124 | * | |
125 | * client | |
126 | * the client in question | |
127 | * | |
128 | * event | |
129 | * the event to notify the main thread about | |
130 | */ | |
131 | static void zserv_event(struct zserv *client, enum zserv_event event); | |
e16abbb3 | 132 | |
e16abbb3 | 133 | |
f2efe6a3 | 134 | /* Client thread lifecycle -------------------------------------------------- */ |
e16abbb3 | 135 | |
9bcbcae2 | 136 | /* |
1002497a QY |
137 | * Log zapi message to zlog. |
138 | * | |
139 | * errmsg (optional) | |
140 | * Debugging message | |
9bcbcae2 | 141 | * |
1002497a QY |
142 | * msg |
143 | * The message | |
144 | * | |
145 | * hdr (optional) | |
146 | * The message header | |
9bcbcae2 | 147 | */ |
1002497a QY |
148 | static void zserv_log_message(const char *errmsg, struct stream *msg, |
149 | struct zmsghdr *hdr) | |
150 | { | |
151 | zlog_debug("Rx'd ZAPI message"); | |
152 | if (errmsg) | |
153 | zlog_debug("%s", errmsg); | |
154 | if (hdr) { | |
155 | zlog_debug(" Length: %d", hdr->length); | |
156 | zlog_debug("Command: %s", zserv_command_string(hdr->command)); | |
157 | zlog_debug(" VRF: %u", hdr->vrf_id); | |
158 | } | |
159 | zlog_hexdump(msg->data, STREAM_READABLE(msg)); | |
9bcbcae2 QY |
160 | } |
161 | ||
f2efe6a3 QY |
162 | /* |
163 | * Gracefully shut down a client connection. | |
164 | * | |
f3e33b69 QY |
165 | * Cancel any pending tasks for the client's thread. Then schedule a task on |
166 | * the main thread to shut down the calling thread. | |
f2efe6a3 | 167 | * |
c0226378 QY |
168 | * It is not safe to close the client socket in this function. The socket is |
169 | * owned by the main thread. | |
170 | * | |
f2efe6a3 QY |
171 | * Must be called from the client pthread, never the main thread. |
172 | */ | |
f3e33b69 | 173 | static void zserv_client_fail(struct zserv *client) |
f2efe6a3 | 174 | { |
e914ccbe | 175 | flog_warn(EC_ZEBRA_CLIENT_IO_ERROR, |
9df414fe | 176 | "Client '%s' encountered an error and is shutting down.", |
f3e33b69 QY |
177 | zebra_route_string(client->proto)); |
178 | ||
c2ca5ee6 | 179 | atomic_store_explicit(&client->pthread->running, false, |
f3e33b69 | 180 | memory_order_relaxed); |
c0226378 | 181 | |
f2efe6a3 QY |
182 | THREAD_OFF(client->t_read); |
183 | THREAD_OFF(client->t_write); | |
f3e33b69 | 184 | zserv_event(client, ZSERV_HANDLE_CLIENT_FAIL); |
f2efe6a3 QY |
185 | } |
186 | ||
1002497a | 187 | /* |
370d8dad QY |
188 | * Write all pending messages to client socket. |
189 | * | |
29bed51b QY |
190 | * This function first attempts to flush any buffered data. If unsuccessful, |
191 | * the function reschedules itself and returns. If successful, it pops all | |
192 | * available messages from the output queue and continues to write data | |
193 | * directly to the socket until the socket would block. If the socket never | |
194 | * blocks and all data is written, the function returns without rescheduling | |
195 | * itself. If the socket ends up throwing EWOULDBLOCK, the remaining data is | |
196 | * buffered and the function reschedules itself. | |
370d8dad | 197 | * |
29bed51b QY |
198 | * The utility of the buffer is that it allows us to vastly reduce lock |
199 | * contention by allowing us to pop *all* messages off the output queue at once | |
200 | * instead of locking and unlocking each time we want to pop a single message | |
201 | * off the queue. The same thing could arguably be accomplished faster by | |
202 | * allowing the main thread to write directly into the buffer instead of | |
203 | * enqueuing packets onto an intermediary queue, but the intermediary queue | |
204 | * allows us to expose information about input and output queues to the user in | |
205 | * terms of number of packets rather than size of data. | |
1002497a QY |
206 | */ |
207 | static int zserv_write(struct thread *thread) | |
d62a17ae | 208 | { |
1002497a QY |
209 | struct zserv *client = THREAD_ARG(thread); |
210 | struct stream *msg; | |
ce4f1050 | 211 | uint32_t wcmd = 0; |
29bed51b QY |
212 | struct stream_fifo *cache; |
213 | ||
214 | /* If we have any data pending, try to flush it first */ | |
ccd51bd2 | 215 | switch (buffer_flush_all(client->wb, client->sock)) { |
29bed51b QY |
216 | case BUFFER_ERROR: |
217 | goto zwrite_fail; | |
218 | case BUFFER_PENDING: | |
ccd51bd2 QY |
219 | atomic_store_explicit(&client->last_write_time, |
220 | (uint32_t)monotime(NULL), | |
221 | memory_order_relaxed); | |
29bed51b QY |
222 | zserv_client_event(client, ZSERV_CLIENT_WRITE); |
223 | return 0; | |
224 | case BUFFER_EMPTY: | |
225 | break; | |
226 | } | |
227 | ||
228 | cache = stream_fifo_new(); | |
89f4e507 | 229 | |
329e35da QY |
230 | pthread_mutex_lock(&client->obuf_mtx); |
231 | { | |
c2ca5ee6 | 232 | while (stream_fifo_head(client->obuf_fifo)) |
370d8dad QY |
233 | stream_fifo_push(cache, |
234 | stream_fifo_pop(client->obuf_fifo)); | |
329e35da QY |
235 | } |
236 | pthread_mutex_unlock(&client->obuf_mtx); | |
237 | ||
ccd51bd2 QY |
238 | if (cache->tail) { |
239 | msg = cache->tail; | |
370d8dad | 240 | stream_set_getp(msg, 0); |
370d8dad | 241 | wcmd = stream_getw_from(msg, 6); |
ccd51bd2 | 242 | } |
822167e7 | 243 | |
ccd51bd2 QY |
244 | while (stream_fifo_head(cache)) { |
245 | msg = stream_fifo_pop(cache); | |
246 | buffer_put(client->wb, STREAM_DATA(msg), stream_get_endp(msg)); | |
370d8dad QY |
247 | stream_free(msg); |
248 | } | |
1002497a | 249 | |
822167e7 | 250 | stream_fifo_free(cache); |
1002497a | 251 | |
ccd51bd2 QY |
252 | /* If we have any data pending, try to flush it first */ |
253 | switch (buffer_flush_all(client->wb, client->sock)) { | |
254 | case BUFFER_ERROR: | |
255 | goto zwrite_fail; | |
256 | case BUFFER_PENDING: | |
257 | atomic_store_explicit(&client->last_write_time, | |
258 | (uint32_t)monotime(NULL), | |
259 | memory_order_relaxed); | |
260 | zserv_client_event(client, ZSERV_CLIENT_WRITE); | |
261 | return 0; | |
ccd51bd2 QY |
262 | case BUFFER_EMPTY: |
263 | break; | |
264 | } | |
265 | ||
370d8dad QY |
266 | atomic_store_explicit(&client->last_write_cmd, wcmd, |
267 | memory_order_relaxed); | |
1002497a | 268 | |
52f6868d | 269 | atomic_store_explicit(&client->last_write_time, |
370d8dad | 270 | (uint32_t)monotime(NULL), memory_order_relaxed); |
52f6868d | 271 | |
1002497a | 272 | return 0; |
29bed51b QY |
273 | |
274 | zwrite_fail: | |
e914ccbe | 275 | flog_warn(EC_ZEBRA_CLIENT_WRITE_FAILED, |
9df414fe | 276 | "%s: could not write to %s [fd = %d], closing.", __func__, |
29bed51b | 277 | zebra_route_string(client->proto), client->sock); |
f3e33b69 | 278 | zserv_client_fail(client); |
29bed51b | 279 | return 0; |
0c5e7be5 DS |
280 | } |
281 | ||
329e35da QY |
282 | /* |
283 | * Read and process data from a client socket. | |
284 | * | |
285 | * The responsibilities here are to read raw data from the client socket, | |
286 | * validate the header, encapsulate it into a single stream object, push it | |
287 | * onto the input queue and then notify the main thread that there is new data | |
288 | * available. | |
289 | * | |
290 | * This function first looks for any data in the client structure's working | |
291 | * input buffer. If data is present, it is assumed that reading stopped in a | |
292 | * previous invocation of this task and needs to be resumed to finish a message. | |
293 | * Otherwise, the socket data stream is assumed to be at the beginning of a new | |
294 | * ZAPI message (specifically at the header). The header is read and validated. | |
295 | * If the header passed validation then the length field found in the header is | |
296 | * used to compute the total length of the message. That much data is read (but | |
297 | * not inspected), appended to the header, placed into a stream and pushed onto | |
298 | * the client's input queue. A task is then scheduled on the main thread to | |
299 | * process the client's input queue. Finally, if all of this was successful, | |
300 | * this task reschedules itself. | |
301 | * | |
302 | * Any failure in any of these actions is handled by terminating the client. | |
303 | */ | |
1002497a | 304 | static int zserv_read(struct thread *thread) |
0c5e7be5 | 305 | { |
ae6670d0 | 306 | struct zserv *client = THREAD_ARG(thread); |
0c5e7be5 | 307 | int sock; |
0c5e7be5 | 308 | size_t already; |
ae6670d0 QY |
309 | struct stream_fifo *cache; |
310 | uint32_t p2p_orig; | |
311 | ||
1572d9af QY |
312 | uint32_t p2p; |
313 | struct zmsghdr hdr; | |
314 | ||
ae6670d0 QY |
315 | p2p_orig = atomic_load_explicit(&zebrad.packets_to_process, |
316 | memory_order_relaxed); | |
317 | cache = stream_fifo_new(); | |
370d8dad | 318 | p2p = p2p_orig; |
0c5e7be5 | 319 | sock = THREAD_FD(thread); |
0c5e7be5 | 320 | |
43ea2c76 | 321 | while (p2p) { |
107afcd1 QY |
322 | ssize_t nb; |
323 | bool hdrvalid; | |
324 | char errmsg[256]; | |
325 | ||
1002497a QY |
326 | already = stream_get_endp(client->ibuf_work); |
327 | ||
5a762c8a | 328 | /* Read length and command (if we don't have it already). */ |
1002497a QY |
329 | if (already < ZEBRA_HEADER_SIZE) { |
330 | nb = stream_read_try(client->ibuf_work, sock, | |
331 | ZEBRA_HEADER_SIZE - already); | |
03f29018 DS |
332 | if ((nb == 0 || nb == -1)) { |
333 | if (IS_ZEBRA_DEBUG_EVENT) | |
334 | zlog_debug("connection closed socket [%d]", | |
335 | sock); | |
1002497a | 336 | goto zread_fail; |
03f29018 | 337 | } |
1002497a | 338 | if (nb != (ssize_t)(ZEBRA_HEADER_SIZE - already)) { |
5a762c8a | 339 | /* Try again later. */ |
1002497a | 340 | break; |
5a762c8a DS |
341 | } |
342 | already = ZEBRA_HEADER_SIZE; | |
0c5e7be5 | 343 | } |
0c5e7be5 | 344 | |
5a762c8a | 345 | /* Reset to read from the beginning of the incoming packet. */ |
1002497a | 346 | stream_set_getp(client->ibuf_work, 0); |
0c5e7be5 | 347 | |
5a762c8a | 348 | /* Fetch header values */ |
1002497a | 349 | hdrvalid = zapi_parse_header(client->ibuf_work, &hdr); |
0c5e7be5 | 350 | |
1002497a QY |
351 | if (!hdrvalid) { |
352 | snprintf(errmsg, sizeof(errmsg), | |
353 | "%s: Message has corrupt header", __func__); | |
354 | zserv_log_message(errmsg, client->ibuf_work, NULL); | |
355 | goto zread_fail; | |
0c5e7be5 | 356 | } |
1002497a QY |
357 | |
358 | /* Validate header */ | |
359 | if (hdr.marker != ZEBRA_HEADER_MARKER | |
360 | || hdr.version != ZSERV_VERSION) { | |
361 | snprintf( | |
362 | errmsg, sizeof(errmsg), | |
363 | "Message has corrupt header\n%s: socket %d version mismatch, marker %d, version %d", | |
364 | __func__, sock, hdr.marker, hdr.version); | |
365 | zserv_log_message(errmsg, client->ibuf_work, &hdr); | |
366 | goto zread_fail; | |
5a762c8a | 367 | } |
1002497a QY |
368 | if (hdr.length < ZEBRA_HEADER_SIZE) { |
369 | snprintf( | |
370 | errmsg, sizeof(errmsg), | |
371 | "Message has corrupt header\n%s: socket %d message length %u is less than header size %d", | |
372 | __func__, sock, hdr.length, ZEBRA_HEADER_SIZE); | |
373 | zserv_log_message(errmsg, client->ibuf_work, &hdr); | |
374 | goto zread_fail; | |
375 | } | |
376 | if (hdr.length > STREAM_SIZE(client->ibuf_work)) { | |
377 | snprintf( | |
378 | errmsg, sizeof(errmsg), | |
379 | "Message has corrupt header\n%s: socket %d message length %u exceeds buffer size %lu", | |
380 | __func__, sock, hdr.length, | |
381 | (unsigned long)STREAM_SIZE(client->ibuf_work)); | |
1572d9af | 382 | zserv_log_message(errmsg, client->ibuf_work, &hdr); |
1002497a | 383 | goto zread_fail; |
0c5e7be5 | 384 | } |
0c5e7be5 | 385 | |
5a762c8a | 386 | /* Read rest of data. */ |
1002497a QY |
387 | if (already < hdr.length) { |
388 | nb = stream_read_try(client->ibuf_work, sock, | |
389 | hdr.length - already); | |
03f29018 DS |
390 | if ((nb == 0 || nb == -1)) { |
391 | if (IS_ZEBRA_DEBUG_EVENT) | |
392 | zlog_debug( | |
393 | "connection closed [%d] when reading zebra data", | |
394 | sock); | |
1002497a | 395 | goto zread_fail; |
03f29018 | 396 | } |
1002497a | 397 | if (nb != (ssize_t)(hdr.length - already)) { |
5a762c8a | 398 | /* Try again later. */ |
1002497a | 399 | break; |
5a762c8a DS |
400 | } |
401 | } | |
0c5e7be5 | 402 | |
5a762c8a DS |
403 | /* Debug packet information. */ |
404 | if (IS_ZEBRA_DEBUG_EVENT) | |
996c9314 LB |
405 | zlog_debug("zebra message comes from socket [%d]", |
406 | sock); | |
0c5e7be5 | 407 | |
0c5e7be5 | 408 | if (IS_ZEBRA_DEBUG_PACKET && IS_ZEBRA_DEBUG_RECV) |
1002497a | 409 | zserv_log_message(NULL, client->ibuf_work, &hdr); |
0c5e7be5 | 410 | |
1572d9af QY |
411 | stream_set_getp(client->ibuf_work, 0); |
412 | struct stream *msg = stream_dup(client->ibuf_work); | |
413 | ||
414 | stream_fifo_push(cache, msg); | |
415 | stream_reset(client->ibuf_work); | |
43ea2c76 | 416 | p2p--; |
1572d9af QY |
417 | } |
418 | ||
419 | if (p2p < p2p_orig) { | |
420 | /* update session statistics */ | |
52f6868d QY |
421 | atomic_store_explicit(&client->last_read_time, monotime(NULL), |
422 | memory_order_relaxed); | |
423 | atomic_store_explicit(&client->last_read_cmd, hdr.command, | |
424 | memory_order_relaxed); | |
5a762c8a | 425 | |
1572d9af | 426 | /* publish read packets on client's input queue */ |
329e35da QY |
427 | pthread_mutex_lock(&client->ibuf_mtx); |
428 | { | |
1572d9af QY |
429 | while (cache->head) |
430 | stream_fifo_push(client->ibuf_fifo, | |
431 | stream_fifo_pop(cache)); | |
329e35da QY |
432 | } |
433 | pthread_mutex_unlock(&client->ibuf_mtx); | |
822167e7 QY |
434 | |
435 | /* Schedule job to process those packets */ | |
436 | zserv_event(client, ZSERV_PROCESS_MESSAGES); | |
437 | ||
d62a17ae | 438 | } |
439 | ||
1002497a | 440 | if (IS_ZEBRA_DEBUG_PACKET) |
1572d9af | 441 | zlog_debug("Read %d packets", p2p_orig - p2p); |
1002497a | 442 | |
1002497a | 443 | /* Reschedule ourselves */ |
21ccc0cf | 444 | zserv_client_event(client, ZSERV_CLIENT_READ); |
1002497a | 445 | |
1572d9af QY |
446 | stream_fifo_free(cache); |
447 | ||
d62a17ae | 448 | return 0; |
1002497a QY |
449 | |
450 | zread_fail: | |
1572d9af | 451 | stream_fifo_free(cache); |
f3e33b69 | 452 | zserv_client_fail(client); |
1002497a | 453 | return -1; |
718e3744 | 454 | } |
455 | ||
21ccc0cf QY |
456 | static void zserv_client_event(struct zserv *client, |
457 | enum zserv_client_event event) | |
1002497a QY |
458 | { |
459 | switch (event) { | |
21ccc0cf | 460 | case ZSERV_CLIENT_READ: |
329e35da QY |
461 | thread_add_read(client->pthread->master, zserv_read, client, |
462 | client->sock, &client->t_read); | |
1002497a | 463 | break; |
21ccc0cf | 464 | case ZSERV_CLIENT_WRITE: |
329e35da | 465 | thread_add_write(client->pthread->master, zserv_write, client, |
1002497a QY |
466 | client->sock, &client->t_write); |
467 | break; | |
468 | } | |
469 | } | |
718e3744 | 470 | |
f2efe6a3 QY |
471 | /* Main thread lifecycle ---------------------------------------------------- */ |
472 | ||
f2efe6a3 QY |
473 | /* |
474 | * Read and process messages from a client. | |
475 | * | |
476 | * This task runs on the main pthread. It is scheduled by client pthreads when | |
477 | * they have new messages available on their input queues. The client is passed | |
478 | * as the task argument. | |
479 | * | |
480 | * Each message is popped off the client's input queue and the action associated | |
481 | * with the message is executed. This proceeds until there are no more messages, | |
904e0d88 QY |
482 | * an error occurs, or the processing limit is reached. |
483 | * | |
822167e7 QY |
484 | * The client's I/O thread can push at most zebrad.packets_to_process messages |
485 | * onto the input buffer before notifying us there are packets to read. As long | |
486 | * as we always process zebrad.packets_to_process messages here, then we can | |
487 | * rely on the read thread to handle queuing this task enough times to process | |
488 | * everything on the input queue. | |
f2efe6a3 QY |
489 | */ |
490 | static int zserv_process_messages(struct thread *thread) | |
491 | { | |
492 | struct zserv *client = THREAD_ARG(thread); | |
f2efe6a3 | 493 | struct stream *msg; |
904e0d88 | 494 | struct stream_fifo *cache = stream_fifo_new(); |
f2efe6a3 | 495 | |
904e0d88 | 496 | uint32_t p2p = zebrad.packets_to_process; |
f2efe6a3 | 497 | |
f2efe6a3 QY |
498 | pthread_mutex_lock(&client->ibuf_mtx); |
499 | { | |
822167e7 QY |
500 | uint32_t i; |
501 | for (i = 0; i < p2p && stream_fifo_head(client->ibuf_fifo); | |
502 | ++i) { | |
503 | msg = stream_fifo_pop(client->ibuf_fifo); | |
504 | stream_fifo_push(cache, msg); | |
505 | } | |
904e0d88 | 506 | |
822167e7 | 507 | msg = NULL; |
f2efe6a3 QY |
508 | } |
509 | pthread_mutex_unlock(&client->ibuf_mtx); | |
510 | ||
822167e7 | 511 | while (stream_fifo_head(cache)) { |
904e0d88 | 512 | msg = stream_fifo_pop(cache); |
904e0d88 QY |
513 | zserv_handle_commands(client, msg); |
514 | stream_free(msg); | |
515 | } | |
516 | ||
517 | stream_fifo_free(cache); | |
518 | ||
f2efe6a3 QY |
519 | return 0; |
520 | } | |
521 | ||
21ccc0cf | 522 | int zserv_send_message(struct zserv *client, struct stream *msg) |
f2efe6a3 | 523 | { |
727c9b99 QY |
524 | /* |
525 | * This is a somewhat poorly named variable added with Zebra's portion | |
526 | * of the label manager. That component does not use the regular | |
527 | * zserv/zapi_msg interface for handling its messages, as the client | |
528 | * itself runs in-process. Instead it uses synchronous writes on the | |
529 | * zserv client's socket directly in the zread* handlers for its | |
530 | * message types. Furthermore, it cannot handle the usual messages | |
531 | * Zebra sends (such as those for interface changes) and so has added | |
532 | * this flag and check here as a hack to suppress all messages that it | |
533 | * does not explicitly know about. | |
534 | * | |
535 | * In any case this needs to be cleaned up at some point. | |
536 | * | |
537 | * See also: | |
538 | * zread_label_manager_request | |
539 | * zsend_label_manager_connect_response | |
540 | * zsend_assign_label_chunk_response | |
541 | * ... | |
542 | */ | |
543 | if (client->is_synchronous) | |
544 | return 0; | |
545 | ||
f2efe6a3 QY |
546 | pthread_mutex_lock(&client->obuf_mtx); |
547 | { | |
548 | stream_fifo_push(client->obuf_fifo, msg); | |
f2efe6a3 QY |
549 | } |
550 | pthread_mutex_unlock(&client->obuf_mtx); | |
ccd51bd2 QY |
551 | |
552 | zserv_client_event(client, ZSERV_CLIENT_WRITE); | |
553 | ||
f2efe6a3 QY |
554 | return 0; |
555 | } | |
556 | ||
557 | ||
558 | /* Hooks for client connect / disconnect */ | |
21ccc0cf QY |
559 | DEFINE_HOOK(zserv_client_connect, (struct zserv *client), (client)); |
560 | DEFINE_KOOH(zserv_client_close, (struct zserv *client), (client)); | |
f2efe6a3 QY |
561 | |
562 | /* | |
563 | * Deinitialize zebra client. | |
564 | * | |
565 | * - Deregister and deinitialize related internal resources | |
566 | * - Gracefully close socket | |
567 | * - Free associated resources | |
568 | * - Free client structure | |
569 | * | |
570 | * This does *not* take any action on the struct thread * fields. These are | |
571 | * managed by the owning pthread and any tasks associated with them must have | |
572 | * been stopped prior to invoking this function. | |
573 | */ | |
21ccc0cf | 574 | static void zserv_client_free(struct zserv *client) |
f2efe6a3 | 575 | { |
21ccc0cf | 576 | hook_call(zserv_client_close, client); |
f2efe6a3 QY |
577 | |
578 | /* Close file descriptor. */ | |
579 | if (client->sock) { | |
580 | unsigned long nroutes; | |
581 | ||
582 | close(client->sock); | |
a580357a | 583 | |
f2efe6a3 QY |
584 | nroutes = rib_score_proto(client->proto, client->instance); |
585 | zlog_notice( | |
586 | "client %d disconnected. %lu %s routes removed from the rib", | |
587 | client->sock, nroutes, | |
588 | zebra_route_string(client->proto)); | |
589 | client->sock = -1; | |
590 | } | |
591 | ||
592 | /* Free stream buffers. */ | |
593 | if (client->ibuf_work) | |
594 | stream_free(client->ibuf_work); | |
595 | if (client->obuf_work) | |
596 | stream_free(client->obuf_work); | |
597 | if (client->ibuf_fifo) | |
598 | stream_fifo_free(client->ibuf_fifo); | |
599 | if (client->obuf_fifo) | |
600 | stream_fifo_free(client->obuf_fifo); | |
601 | if (client->wb) | |
602 | buffer_free(client->wb); | |
603 | ||
604 | /* Free buffer mutexes */ | |
605 | pthread_mutex_destroy(&client->obuf_mtx); | |
606 | pthread_mutex_destroy(&client->ibuf_mtx); | |
607 | ||
608 | /* Free bitmaps. */ | |
609 | for (afi_t afi = AFI_IP; afi < AFI_MAX; afi++) | |
610 | for (int i = 0; i < ZEBRA_ROUTE_MAX; i++) | |
611 | vrf_bitmap_free(client->redist[afi][i]); | |
612 | ||
613 | vrf_bitmap_free(client->redist_default); | |
614 | vrf_bitmap_free(client->ifinfo); | |
615 | vrf_bitmap_free(client->ridinfo); | |
616 | ||
617 | XFREE(MTYPE_TMP, client); | |
618 | } | |
619 | ||
f3e33b69 | 620 | void zserv_close_client(struct zserv *client) |
f2efe6a3 | 621 | { |
f3e33b69 | 622 | /* synchronously stop and join pthread */ |
f2efe6a3 QY |
623 | frr_pthread_stop(client->pthread, NULL); |
624 | ||
f3e33b69 QY |
625 | if (IS_ZEBRA_DEBUG_EVENT) |
626 | zlog_debug("Closing client '%s'", | |
627 | zebra_route_string(client->proto)); | |
628 | ||
f3e33b69 QY |
629 | thread_cancel_event(zebrad.master, client); |
630 | THREAD_OFF(client->t_cleanup); | |
631 | ||
632 | /* destroy pthread */ | |
f2efe6a3 QY |
633 | frr_pthread_destroy(client->pthread); |
634 | client->pthread = NULL; | |
635 | ||
f3e33b69 | 636 | /* remove from client list */ |
f2efe6a3 | 637 | listnode_delete(zebrad.client_list, client); |
f3e33b69 QY |
638 | |
639 | /* delete client */ | |
21ccc0cf | 640 | zserv_client_free(client); |
f3e33b69 QY |
641 | } |
642 | ||
643 | /* | |
644 | * This task is scheduled by a ZAPI client pthread on the main pthread when it | |
645 | * wants to stop itself. When this executes, the client connection should | |
646 | * already have been closed and the thread will most likely have died, but its | |
647 | * resources still need to be cleaned up. | |
648 | */ | |
649 | static int zserv_handle_client_fail(struct thread *thread) | |
650 | { | |
651 | struct zserv *client = THREAD_ARG(thread); | |
652 | ||
653 | zserv_close_client(client); | |
f2efe6a3 QY |
654 | return 0; |
655 | } | |
656 | ||
657 | /* | |
658 | * Create a new client. | |
659 | * | |
660 | * This is called when a new connection is accept()'d on the ZAPI socket. It | |
661 | * initializes new client structure, notifies any subscribers of the connection | |
662 | * event and spawns the client's thread. | |
663 | * | |
664 | * sock | |
665 | * client's socket file descriptor | |
666 | */ | |
2875801f | 667 | static struct zserv *zserv_client_create(int sock) |
f2efe6a3 QY |
668 | { |
669 | struct zserv *client; | |
670 | int i; | |
671 | afi_t afi; | |
672 | ||
673 | client = XCALLOC(MTYPE_TMP, sizeof(struct zserv)); | |
674 | ||
675 | /* Make client input/output buffer. */ | |
676 | client->sock = sock; | |
677 | client->ibuf_fifo = stream_fifo_new(); | |
678 | client->obuf_fifo = stream_fifo_new(); | |
679 | client->ibuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ); | |
680 | client->obuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ); | |
681 | pthread_mutex_init(&client->ibuf_mtx, NULL); | |
682 | pthread_mutex_init(&client->obuf_mtx, NULL); | |
683 | client->wb = buffer_new(0); | |
684 | ||
685 | /* Set table number. */ | |
686 | client->rtm_table = zebrad.rtm_table_default; | |
687 | ||
688 | atomic_store_explicit(&client->connect_time, (uint32_t) monotime(NULL), | |
689 | memory_order_relaxed); | |
690 | ||
691 | /* Initialize flags */ | |
692 | for (afi = AFI_IP; afi < AFI_MAX; afi++) | |
693 | for (i = 0; i < ZEBRA_ROUTE_MAX; i++) | |
694 | client->redist[afi][i] = vrf_bitmap_init(); | |
695 | client->redist_default = vrf_bitmap_init(); | |
696 | client->ifinfo = vrf_bitmap_init(); | |
697 | client->ridinfo = vrf_bitmap_init(); | |
698 | ||
699 | /* by default, it's not a synchronous client */ | |
700 | client->is_synchronous = 0; | |
701 | ||
702 | /* Add this client to linked list. */ | |
703 | listnode_add(zebrad.client_list, client); | |
704 | ||
705 | struct frr_pthread_attr zclient_pthr_attrs = { | |
f2efe6a3 QY |
706 | .start = frr_pthread_attr_default.start, |
707 | .stop = frr_pthread_attr_default.stop | |
708 | }; | |
709 | client->pthread = | |
57019528 CS |
710 | frr_pthread_new(&zclient_pthr_attrs, "Zebra API client thread", |
711 | "zebra_apic"); | |
f2efe6a3 QY |
712 | |
713 | zebra_vrf_update_all(client); | |
714 | ||
715 | /* start read loop */ | |
21ccc0cf | 716 | zserv_client_event(client, ZSERV_CLIENT_READ); |
f2efe6a3 QY |
717 | |
718 | /* call callbacks */ | |
21ccc0cf | 719 | hook_call(zserv_client_connect, client); |
f2efe6a3 QY |
720 | |
721 | /* start pthread */ | |
722 | frr_pthread_run(client->pthread, NULL); | |
2875801f QY |
723 | |
724 | return client; | |
f2efe6a3 | 725 | } |
329e35da | 726 | |
21ccc0cf QY |
727 | /* |
728 | * Accept socket connection. | |
729 | */ | |
730 | static int zserv_accept(struct thread *thread) | |
718e3744 | 731 | { |
d62a17ae | 732 | int accept_sock; |
733 | int client_sock; | |
734 | struct sockaddr_in client; | |
735 | socklen_t len; | |
736 | ||
737 | accept_sock = THREAD_FD(thread); | |
718e3744 | 738 | |
d62a17ae | 739 | /* Reregister myself. */ |
21ccc0cf | 740 | zserv_event(NULL, ZSERV_ACCEPT); |
718e3744 | 741 | |
d62a17ae | 742 | len = sizeof(struct sockaddr_in); |
743 | client_sock = accept(accept_sock, (struct sockaddr *)&client, &len); | |
719e9741 | 744 | |
d62a17ae | 745 | if (client_sock < 0) { |
450971aa | 746 | flog_err_sys(EC_LIB_SOCKET, "Can't accept zebra socket: %s", |
9df414fe | 747 | safe_strerror(errno)); |
d62a17ae | 748 | return -1; |
749 | } | |
718e3744 | 750 | |
d62a17ae | 751 | /* Make client socket non-blocking. */ |
752 | set_nonblocking(client_sock); | |
718e3744 | 753 | |
d62a17ae | 754 | /* Create new zebra client. */ |
21ccc0cf | 755 | zserv_client_create(client_sock); |
718e3744 | 756 | |
d62a17ae | 757 | return 0; |
718e3744 | 758 | } |
759 | ||
21ccc0cf | 760 | void zserv_start(char *path) |
d62a17ae | 761 | { |
762 | int ret; | |
d62a17ae | 763 | mode_t old_mask; |
689f5a8c DL |
764 | struct sockaddr_storage sa; |
765 | socklen_t sa_len; | |
d62a17ae | 766 | |
689f5a8c DL |
767 | if (!frr_zclient_addr(&sa, &sa_len, path)) |
768 | /* should be caught in zebra main() */ | |
769 | return; | |
d62a17ae | 770 | |
771 | /* Set umask */ | |
772 | old_mask = umask(0077); | |
773 | ||
774 | /* Make UNIX domain socket. */ | |
21ccc0cf QY |
775 | zebrad.sock = socket(sa.ss_family, SOCK_STREAM, 0); |
776 | if (zebrad.sock < 0) { | |
450971aa | 777 | flog_err_sys(EC_LIB_SOCKET, "Can't create zserv socket: %s", |
9df414fe | 778 | safe_strerror(errno)); |
d62a17ae | 779 | return; |
780 | } | |
781 | ||
689f5a8c | 782 | if (sa.ss_family != AF_UNIX) { |
21ccc0cf QY |
783 | sockopt_reuseaddr(zebrad.sock); |
784 | sockopt_reuseport(zebrad.sock); | |
689f5a8c DL |
785 | } else { |
786 | struct sockaddr_un *suna = (struct sockaddr_un *)&sa; | |
787 | if (suna->sun_path[0]) | |
788 | unlink(suna->sun_path); | |
789 | } | |
790 | ||
6bb30c2c DL |
791 | frr_elevate_privs(&zserv_privs) { |
792 | setsockopt_so_recvbuf(zebrad.sock, 1048576); | |
793 | setsockopt_so_sendbuf(zebrad.sock, 1048576); | |
794 | } | |
689f5a8c | 795 | |
6bb30c2c DL |
796 | frr_elevate_privs((sa.ss_family != AF_UNIX) ? &zserv_privs : NULL) { |
797 | ret = bind(zebrad.sock, (struct sockaddr *)&sa, sa_len); | |
798 | } | |
d62a17ae | 799 | if (ret < 0) { |
1c50c1c0 QY |
800 | flog_err_sys(EC_LIB_SOCKET, "Can't bind zserv socket on %s: %s", |
801 | path, safe_strerror(errno)); | |
21ccc0cf QY |
802 | close(zebrad.sock); |
803 | zebrad.sock = -1; | |
d62a17ae | 804 | return; |
805 | } | |
806 | ||
21ccc0cf | 807 | ret = listen(zebrad.sock, 5); |
d62a17ae | 808 | if (ret < 0) { |
450971aa | 809 | flog_err_sys(EC_LIB_SOCKET, |
9df414fe QY |
810 | "Can't listen to zserv socket %s: %s", path, |
811 | safe_strerror(errno)); | |
21ccc0cf QY |
812 | close(zebrad.sock); |
813 | zebrad.sock = -1; | |
d62a17ae | 814 | return; |
815 | } | |
816 | ||
817 | umask(old_mask); | |
818 | ||
21ccc0cf | 819 | zserv_event(NULL, ZSERV_ACCEPT); |
718e3744 | 820 | } |
6b0655a2 | 821 | |
21ccc0cf QY |
822 | void zserv_event(struct zserv *client, enum zserv_event event) |
823 | { | |
824 | switch (event) { | |
825 | case ZSERV_ACCEPT: | |
826 | thread_add_read(zebrad.master, zserv_accept, NULL, zebrad.sock, | |
827 | NULL); | |
828 | break; | |
829 | case ZSERV_PROCESS_MESSAGES: | |
830 | thread_add_event(zebrad.master, zserv_process_messages, client, | |
831 | 0, NULL); | |
832 | break; | |
f3e33b69 QY |
833 | case ZSERV_HANDLE_CLIENT_FAIL: |
834 | thread_add_event(zebrad.master, zserv_handle_client_fail, | |
835 | client, 0, &client->t_cleanup); | |
21ccc0cf QY |
836 | } |
837 | } | |
838 | ||
839 | ||
f2efe6a3 QY |
840 | /* General purpose ---------------------------------------------------------- */ |
841 | ||
04b02fda | 842 | #define ZEBRA_TIME_BUF 32 |
d62a17ae | 843 | static char *zserv_time_buf(time_t *time1, char *buf, int buflen) |
04b02fda | 844 | { |
d62a17ae | 845 | struct tm *tm; |
846 | time_t now; | |
04b02fda | 847 | |
d62a17ae | 848 | assert(buf != NULL); |
849 | assert(buflen >= ZEBRA_TIME_BUF); | |
850 | assert(time1 != NULL); | |
04b02fda | 851 | |
d62a17ae | 852 | if (!*time1) { |
853 | snprintf(buf, buflen, "never "); | |
854 | return (buf); | |
855 | } | |
04b02fda | 856 | |
d62a17ae | 857 | now = monotime(NULL); |
858 | now -= *time1; | |
859 | tm = gmtime(&now); | |
04b02fda | 860 | |
d62a17ae | 861 | if (now < ONE_DAY_SECOND) |
862 | snprintf(buf, buflen, "%02d:%02d:%02d", tm->tm_hour, tm->tm_min, | |
863 | tm->tm_sec); | |
864 | else if (now < ONE_WEEK_SECOND) | |
865 | snprintf(buf, buflen, "%dd%02dh%02dm", tm->tm_yday, tm->tm_hour, | |
866 | tm->tm_min); | |
96ade3ed | 867 | else |
d62a17ae | 868 | snprintf(buf, buflen, "%02dw%dd%02dh", tm->tm_yday / 7, |
869 | tm->tm_yday - ((tm->tm_yday / 7) * 7), tm->tm_hour); | |
870 | return buf; | |
871 | } | |
872 | ||
873 | static void zebra_show_client_detail(struct vty *vty, struct zserv *client) | |
874 | { | |
875 | char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF]; | |
876 | char wbuf[ZEBRA_TIME_BUF], nhbuf[ZEBRA_TIME_BUF], mbuf[ZEBRA_TIME_BUF]; | |
52f6868d | 877 | time_t connect_time, last_read_time, last_write_time; |
0545c373 | 878 | uint32_t last_read_cmd, last_write_cmd; |
d62a17ae | 879 | |
880 | vty_out(vty, "Client: %s", zebra_route_string(client->proto)); | |
881 | if (client->instance) | |
882 | vty_out(vty, " Instance: %d", client->instance); | |
883 | vty_out(vty, "\n"); | |
884 | ||
885 | vty_out(vty, "------------------------ \n"); | |
886 | vty_out(vty, "FD: %d \n", client->sock); | |
887 | vty_out(vty, "Route Table ID: %d \n", client->rtm_table); | |
888 | ||
52f6868d QY |
889 | connect_time = (time_t) atomic_load_explicit(&client->connect_time, |
890 | memory_order_relaxed); | |
891 | ||
d62a17ae | 892 | vty_out(vty, "Connect Time: %s \n", |
52f6868d | 893 | zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF)); |
d62a17ae | 894 | if (client->nh_reg_time) { |
895 | vty_out(vty, "Nexthop Registry Time: %s \n", | |
896 | zserv_time_buf(&client->nh_reg_time, nhbuf, | |
897 | ZEBRA_TIME_BUF)); | |
898 | if (client->nh_last_upd_time) | |
899 | vty_out(vty, "Nexthop Last Update Time: %s \n", | |
900 | zserv_time_buf(&client->nh_last_upd_time, mbuf, | |
901 | ZEBRA_TIME_BUF)); | |
902 | else | |
903 | vty_out(vty, "No Nexthop Update sent\n"); | |
904 | } else | |
905 | vty_out(vty, "Not registered for Nexthop Updates\n"); | |
906 | ||
1f312c84 QY |
907 | last_read_time = (time_t)atomic_load_explicit(&client->last_read_time, |
908 | memory_order_relaxed); | |
909 | last_write_time = (time_t)atomic_load_explicit(&client->last_write_time, | |
52f6868d QY |
910 | memory_order_relaxed); |
911 | ||
912 | last_read_cmd = atomic_load_explicit(&client->last_read_cmd, | |
913 | memory_order_relaxed); | |
914 | last_write_cmd = atomic_load_explicit(&client->last_write_cmd, | |
915 | memory_order_relaxed); | |
916 | ||
d62a17ae | 917 | vty_out(vty, "Last Msg Rx Time: %s \n", |
52f6868d | 918 | zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF)); |
d62a17ae | 919 | vty_out(vty, "Last Msg Tx Time: %s \n", |
52f6868d QY |
920 | zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF)); |
921 | if (last_read_cmd) | |
d62a17ae | 922 | vty_out(vty, "Last Rcvd Cmd: %s \n", |
52f6868d QY |
923 | zserv_command_string(last_read_cmd)); |
924 | if (last_write_cmd) | |
d62a17ae | 925 | vty_out(vty, "Last Sent Cmd: %s \n", |
52f6868d | 926 | zserv_command_string(last_write_cmd)); |
d62a17ae | 927 | vty_out(vty, "\n"); |
928 | ||
929 | vty_out(vty, "Type Add Update Del \n"); | |
930 | vty_out(vty, "================================================== \n"); | |
931 | vty_out(vty, "IPv4 %-12d%-12d%-12d\n", client->v4_route_add_cnt, | |
932 | client->v4_route_upd8_cnt, client->v4_route_del_cnt); | |
933 | vty_out(vty, "IPv6 %-12d%-12d%-12d\n", client->v6_route_add_cnt, | |
934 | client->v6_route_upd8_cnt, client->v6_route_del_cnt); | |
935 | vty_out(vty, "Redist:v4 %-12d%-12d%-12d\n", client->redist_v4_add_cnt, | |
936 | 0, client->redist_v4_del_cnt); | |
937 | vty_out(vty, "Redist:v6 %-12d%-12d%-12d\n", client->redist_v6_add_cnt, | |
938 | 0, client->redist_v6_del_cnt); | |
939 | vty_out(vty, "Connected %-12d%-12d%-12d\n", client->ifadd_cnt, 0, | |
940 | client->ifdel_cnt); | |
941 | vty_out(vty, "BFD peer %-12d%-12d%-12d\n", client->bfd_peer_add_cnt, | |
942 | client->bfd_peer_upd8_cnt, client->bfd_peer_del_cnt); | |
ab5990d8 DS |
943 | vty_out(vty, "NHT v4 %-12d%-12d%-12d\n", |
944 | client->v4_nh_watch_add_cnt, 0, client->v4_nh_watch_rem_cnt); | |
945 | vty_out(vty, "NHT v6 %-12d%-12d%-12d\n", | |
946 | client->v6_nh_watch_add_cnt, 0, client->v6_nh_watch_rem_cnt); | |
d62a17ae | 947 | vty_out(vty, "Interface Up Notifications: %d\n", client->ifup_cnt); |
948 | vty_out(vty, "Interface Down Notifications: %d\n", client->ifdown_cnt); | |
949 | vty_out(vty, "VNI add notifications: %d\n", client->vniadd_cnt); | |
950 | vty_out(vty, "VNI delete notifications: %d\n", client->vnidel_cnt); | |
b7cfce93 MK |
951 | vty_out(vty, "L3-VNI add notifications: %d\n", client->l3vniadd_cnt); |
952 | vty_out(vty, "L3-VNI delete notifications: %d\n", client->l3vnidel_cnt); | |
d62a17ae | 953 | vty_out(vty, "MAC-IP add notifications: %d\n", client->macipadd_cnt); |
954 | vty_out(vty, "MAC-IP delete notifications: %d\n", client->macipdel_cnt); | |
955 | ||
03ed85a6 DS |
956 | #if defined DEV_BUILD |
957 | vty_out(vty, "Input Fifo: %zu:%zu Output Fifo: %zu:%zu\n", | |
958 | client->ibuf_fifo->count, client->ibuf_fifo->max_count, | |
959 | client->obuf_fifo->count, client->obuf_fifo->max_count); | |
960 | #endif | |
d62a17ae | 961 | vty_out(vty, "\n"); |
962 | return; | |
963 | } | |
964 | ||
965 | static void zebra_show_client_brief(struct vty *vty, struct zserv *client) | |
966 | { | |
967 | char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF]; | |
968 | char wbuf[ZEBRA_TIME_BUF]; | |
52f6868d QY |
969 | time_t connect_time, last_read_time, last_write_time; |
970 | ||
e1de21d7 QY |
971 | connect_time = (time_t)atomic_load_explicit(&client->connect_time, |
972 | memory_order_relaxed); | |
973 | last_read_time = (time_t)atomic_load_explicit(&client->last_read_time, | |
974 | memory_order_relaxed); | |
975 | last_write_time = (time_t)atomic_load_explicit(&client->last_write_time, | |
52f6868d | 976 | memory_order_relaxed); |
d62a17ae | 977 | |
978 | vty_out(vty, "%-8s%12s %12s%12s%8d/%-8d%8d/%-8d\n", | |
979 | zebra_route_string(client->proto), | |
52f6868d QY |
980 | zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF), |
981 | zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF), | |
982 | zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF), | |
d62a17ae | 983 | client->v4_route_add_cnt + client->v4_route_upd8_cnt, |
984 | client->v4_route_del_cnt, | |
985 | client->v6_route_add_cnt + client->v6_route_upd8_cnt, | |
986 | client->v6_route_del_cnt); | |
987 | } | |
988 | ||
21ccc0cf | 989 | struct zserv *zserv_find_client(uint8_t proto, unsigned short instance) |
d62a17ae | 990 | { |
991 | struct listnode *node, *nnode; | |
992 | struct zserv *client; | |
993 | ||
994 | for (ALL_LIST_ELEMENTS(zebrad.client_list, node, nnode, client)) { | |
996c9314 | 995 | if (client->proto == proto && client->instance == instance) |
d62a17ae | 996 | return client; |
997 | } | |
998 | ||
999 | return NULL; | |
8ed6821e | 1000 | } |
1001 | ||
718e3744 | 1002 | /* This command is for debugging purpose. */ |
1003 | DEFUN (show_zebra_client, | |
1004 | show_zebra_client_cmd, | |
1005 | "show zebra client", | |
1006 | SHOW_STR | |
41e7fb80 | 1007 | ZEBRA_STR |
b9ee4999 | 1008 | "Client information\n") |
718e3744 | 1009 | { |
d62a17ae | 1010 | struct listnode *node; |
1011 | struct zserv *client; | |
718e3744 | 1012 | |
d62a17ae | 1013 | for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client)) |
1014 | zebra_show_client_detail(vty, client); | |
04b02fda | 1015 | |
d62a17ae | 1016 | return CMD_SUCCESS; |
04b02fda DS |
1017 | } |
1018 | ||
1019 | /* This command is for debugging purpose. */ | |
1020 | DEFUN (show_zebra_client_summary, | |
1021 | show_zebra_client_summary_cmd, | |
1022 | "show zebra client summary", | |
1023 | SHOW_STR | |
41e7fb80 | 1024 | ZEBRA_STR |
b9ee4999 DS |
1025 | "Client information brief\n" |
1026 | "Brief Summary\n") | |
04b02fda | 1027 | { |
d62a17ae | 1028 | struct listnode *node; |
1029 | struct zserv *client; | |
04b02fda | 1030 | |
d62a17ae | 1031 | vty_out(vty, |
1032 | "Name Connect Time Last Read Last Write IPv4 Routes IPv6 Routes \n"); | |
1033 | vty_out(vty, | |
1034 | "--------------------------------------------------------------------------------\n"); | |
04b02fda | 1035 | |
d62a17ae | 1036 | for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client)) |
1037 | zebra_show_client_brief(vty, client); | |
fb018d25 | 1038 | |
d62a17ae | 1039 | vty_out(vty, "Routes column shows (added+updated)/deleted\n"); |
1040 | return CMD_SUCCESS; | |
718e3744 | 1041 | } |
1042 | ||
411314ed DS |
1043 | #if defined(HANDLE_ZAPI_FUZZING) |
1044 | void zserv_read_file(char *input) | |
1045 | { | |
1046 | int fd; | |
411314ed DS |
1047 | struct thread t; |
1048 | ||
996c9314 | 1049 | fd = open(input, O_RDONLY | O_NONBLOCK); |
411314ed DS |
1050 | t.u.fd = fd; |
1051 | ||
2875801f | 1052 | zserv_client_create(fd); |
411314ed DS |
1053 | } |
1054 | #endif | |
1055 | ||
5f145fb8 | 1056 | void zserv_init(void) |
718e3744 | 1057 | { |
d62a17ae | 1058 | /* Client list init. */ |
1059 | zebrad.client_list = list_new(); | |
21ccc0cf QY |
1060 | |
1061 | /* Misc init. */ | |
1062 | zebrad.sock = -1; | |
718e3744 | 1063 | |
d62a17ae | 1064 | install_element(ENABLE_NODE, &show_zebra_client_cmd); |
1065 | install_element(ENABLE_NODE, &show_zebra_client_summary_cmd); | |
718e3744 | 1066 | } |