]>
Commit | Line | Data |
---|---|---|
718e3744 | 1 | /* Thread management routine |
2 | * Copyright (C) 1998, 2000 Kunihiro Ishiguro <kunihiro@zebra.org> | |
3 | * | |
4 | * This file is part of GNU Zebra. | |
5 | * | |
6 | * GNU Zebra is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License as published by the | |
8 | * Free Software Foundation; either version 2, or (at your option) any | |
9 | * later version. | |
10 | * | |
11 | * GNU Zebra is distributed in the hope that it will be useful, but | |
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * General Public License for more details. | |
15 | * | |
896014f4 DL |
16 | * You should have received a copy of the GNU General Public License along |
17 | * with this program; see the file COPYING; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
718e3744 | 19 | */ |
20 | ||
21 | /* #define DEBUG */ | |
22 | ||
23 | #include <zebra.h> | |
308d14ae | 24 | #include <sys/resource.h> |
718e3744 | 25 | |
26 | #include "thread.h" | |
27 | #include "memory.h" | |
3e41733f | 28 | #include "frrcu.h" |
718e3744 | 29 | #include "log.h" |
e04ab74d | 30 | #include "hash.h" |
31 | #include "command.h" | |
05c447dd | 32 | #include "sigevent.h" |
3bf2673b | 33 | #include "network.h" |
bd74dc61 | 34 | #include "jhash.h" |
fbcac826 | 35 | #include "frratomic.h" |
00dffa8c | 36 | #include "frr_pthread.h" |
9ef9495e | 37 | #include "lib_errors.h" |
912d45a1 | 38 | #include "libfrr_trace.h" |
1a9f340b | 39 | #include "libfrr.h" |
d6be5fb9 | 40 | |
bf8d3d6a DL |
41 | DEFINE_MTYPE_STATIC(LIB, THREAD, "Thread"); |
42 | DEFINE_MTYPE_STATIC(LIB, THREAD_MASTER, "Thread master"); | |
43 | DEFINE_MTYPE_STATIC(LIB, THREAD_POLL, "Thread Poll Info"); | |
44 | DEFINE_MTYPE_STATIC(LIB, THREAD_STATS, "Thread stats"); | |
4a1ab8e4 | 45 | |
960b9a53 | 46 | DECLARE_LIST(thread_list, struct thread, threaditem); |
c284542b | 47 | |
aea25d1e MS |
48 | struct cancel_req { |
49 | int flags; | |
50 | struct thread *thread; | |
51 | void *eventobj; | |
52 | struct thread **threadref; | |
53 | }; | |
54 | ||
55 | /* Flags for task cancellation */ | |
56 | #define THREAD_CANCEL_FLAG_READY 0x01 | |
57 | ||
27d29ced DL |
58 | static int thread_timer_cmp(const struct thread *a, const struct thread *b) |
59 | { | |
60 | if (a->u.sands.tv_sec < b->u.sands.tv_sec) | |
61 | return -1; | |
62 | if (a->u.sands.tv_sec > b->u.sands.tv_sec) | |
63 | return 1; | |
64 | if (a->u.sands.tv_usec < b->u.sands.tv_usec) | |
65 | return -1; | |
66 | if (a->u.sands.tv_usec > b->u.sands.tv_usec) | |
67 | return 1; | |
68 | return 0; | |
69 | } | |
70 | ||
960b9a53 | 71 | DECLARE_HEAP(thread_timer_list, struct thread, timeritem, thread_timer_cmp); |
27d29ced | 72 | |
3b96b781 HT |
73 | #if defined(__APPLE__) |
74 | #include <mach/mach.h> | |
75 | #include <mach/mach_time.h> | |
76 | #endif | |
77 | ||
d62a17ae | 78 | #define AWAKEN(m) \ |
79 | do { \ | |
2b64873d | 80 | const unsigned char wakebyte = 0x01; \ |
d62a17ae | 81 | write(m->io_pipe[1], &wakebyte, 1); \ |
82 | } while (0); | |
3bf2673b | 83 | |
62f44022 | 84 | /* control variable for initializer */ |
c17faa4b | 85 | static pthread_once_t init_once = PTHREAD_ONCE_INIT; |
e0bebc7c | 86 | pthread_key_t thread_current; |
6b0655a2 | 87 | |
c17faa4b | 88 | static pthread_mutex_t masters_mtx = PTHREAD_MUTEX_INITIALIZER; |
62f44022 QY |
89 | static struct list *masters; |
90 | ||
6655966d | 91 | static void thread_free(struct thread_master *master, struct thread *thread); |
6b0655a2 | 92 | |
62f44022 | 93 | /* CLI start ---------------------------------------------------------------- */ |
d8b87afe | 94 | static unsigned int cpu_record_hash_key(const struct cpu_thread_history *a) |
e04ab74d | 95 | { |
883cc51d | 96 | int size = sizeof(a->func); |
bd74dc61 DS |
97 | |
98 | return jhash(&a->func, size, 0); | |
e04ab74d | 99 | } |
100 | ||
74df8d6d | 101 | static bool cpu_record_hash_cmp(const struct cpu_thread_history *a, |
d62a17ae | 102 | const struct cpu_thread_history *b) |
e04ab74d | 103 | { |
d62a17ae | 104 | return a->func == b->func; |
e04ab74d | 105 | } |
106 | ||
d62a17ae | 107 | static void *cpu_record_hash_alloc(struct cpu_thread_history *a) |
e04ab74d | 108 | { |
d62a17ae | 109 | struct cpu_thread_history *new; |
110 | new = XCALLOC(MTYPE_THREAD_STATS, sizeof(struct cpu_thread_history)); | |
111 | new->func = a->func; | |
112 | new->funcname = a->funcname; | |
113 | return new; | |
e04ab74d | 114 | } |
115 | ||
d62a17ae | 116 | static void cpu_record_hash_free(void *a) |
228da428 | 117 | { |
d62a17ae | 118 | struct cpu_thread_history *hist = a; |
119 | ||
120 | XFREE(MTYPE_THREAD_STATS, hist); | |
228da428 CC |
121 | } |
122 | ||
f75e802d | 123 | #ifndef EXCLUDE_CPU_TIME |
d62a17ae | 124 | static void vty_out_cpu_thread_history(struct vty *vty, |
125 | struct cpu_thread_history *a) | |
e04ab74d | 126 | { |
9a8a7b0e | 127 | vty_out(vty, "%5zu %10zu.%03zu %9zu %8zu %9zu %8zu %9zu", |
72327cf3 MS |
128 | a->total_active, a->cpu.total / 1000, a->cpu.total % 1000, |
129 | a->total_calls, (a->cpu.total / a->total_calls), a->cpu.max, | |
130 | (a->real.total / a->total_calls), a->real.max); | |
84d951d0 | 131 | vty_out(vty, " %c%c%c%c%c %s\n", |
d62a17ae | 132 | a->types & (1 << THREAD_READ) ? 'R' : ' ', |
133 | a->types & (1 << THREAD_WRITE) ? 'W' : ' ', | |
134 | a->types & (1 << THREAD_TIMER) ? 'T' : ' ', | |
135 | a->types & (1 << THREAD_EVENT) ? 'E' : ' ', | |
136 | a->types & (1 << THREAD_EXECUTE) ? 'X' : ' ', a->funcname); | |
e04ab74d | 137 | } |
138 | ||
e3b78da8 | 139 | static void cpu_record_hash_print(struct hash_bucket *bucket, void *args[]) |
e04ab74d | 140 | { |
d62a17ae | 141 | struct cpu_thread_history *totals = args[0]; |
fbcac826 | 142 | struct cpu_thread_history copy; |
d62a17ae | 143 | struct vty *vty = args[1]; |
fbcac826 | 144 | uint8_t *filter = args[2]; |
d62a17ae | 145 | |
146 | struct cpu_thread_history *a = bucket->data; | |
147 | ||
fbcac826 QY |
148 | copy.total_active = |
149 | atomic_load_explicit(&a->total_active, memory_order_seq_cst); | |
150 | copy.total_calls = | |
151 | atomic_load_explicit(&a->total_calls, memory_order_seq_cst); | |
152 | copy.cpu.total = | |
153 | atomic_load_explicit(&a->cpu.total, memory_order_seq_cst); | |
154 | copy.cpu.max = atomic_load_explicit(&a->cpu.max, memory_order_seq_cst); | |
155 | copy.real.total = | |
156 | atomic_load_explicit(&a->real.total, memory_order_seq_cst); | |
157 | copy.real.max = | |
158 | atomic_load_explicit(&a->real.max, memory_order_seq_cst); | |
159 | copy.types = atomic_load_explicit(&a->types, memory_order_seq_cst); | |
160 | copy.funcname = a->funcname; | |
161 | ||
162 | if (!(copy.types & *filter)) | |
d62a17ae | 163 | return; |
fbcac826 QY |
164 | |
165 | vty_out_cpu_thread_history(vty, ©); | |
166 | totals->total_active += copy.total_active; | |
167 | totals->total_calls += copy.total_calls; | |
168 | totals->real.total += copy.real.total; | |
169 | if (totals->real.max < copy.real.max) | |
170 | totals->real.max = copy.real.max; | |
171 | totals->cpu.total += copy.cpu.total; | |
172 | if (totals->cpu.max < copy.cpu.max) | |
173 | totals->cpu.max = copy.cpu.max; | |
e04ab74d | 174 | } |
175 | ||
fbcac826 | 176 | static void cpu_record_print(struct vty *vty, uint8_t filter) |
e04ab74d | 177 | { |
d62a17ae | 178 | struct cpu_thread_history tmp; |
179 | void *args[3] = {&tmp, vty, &filter}; | |
180 | struct thread_master *m; | |
181 | struct listnode *ln; | |
182 | ||
0d6f7fd6 | 183 | memset(&tmp, 0, sizeof(tmp)); |
d62a17ae | 184 | tmp.funcname = "TOTAL"; |
185 | tmp.types = filter; | |
186 | ||
00dffa8c | 187 | frr_with_mutex(&masters_mtx) { |
d62a17ae | 188 | for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) { |
189 | const char *name = m->name ? m->name : "main"; | |
190 | ||
191 | char underline[strlen(name) + 1]; | |
192 | memset(underline, '-', sizeof(underline)); | |
4f113d60 | 193 | underline[sizeof(underline) - 1] = '\0'; |
d62a17ae | 194 | |
195 | vty_out(vty, "\n"); | |
196 | vty_out(vty, "Showing statistics for pthread %s\n", | |
197 | name); | |
198 | vty_out(vty, "-------------------------------%s\n", | |
199 | underline); | |
84d951d0 | 200 | vty_out(vty, "%30s %18s %18s\n", "", |
d62a17ae | 201 | "CPU (user+system):", "Real (wall-clock):"); |
202 | vty_out(vty, | |
203 | "Active Runtime(ms) Invoked Avg uSec Max uSecs"); | |
204 | vty_out(vty, " Avg uSec Max uSecs"); | |
205 | vty_out(vty, " Type Thread\n"); | |
206 | ||
207 | if (m->cpu_record->count) | |
208 | hash_iterate( | |
209 | m->cpu_record, | |
e3b78da8 | 210 | (void (*)(struct hash_bucket *, |
d62a17ae | 211 | void *))cpu_record_hash_print, |
212 | args); | |
213 | else | |
214 | vty_out(vty, "No data to display yet.\n"); | |
215 | ||
216 | vty_out(vty, "\n"); | |
217 | } | |
218 | } | |
d62a17ae | 219 | |
220 | vty_out(vty, "\n"); | |
221 | vty_out(vty, "Total thread statistics\n"); | |
222 | vty_out(vty, "-------------------------\n"); | |
84d951d0 | 223 | vty_out(vty, "%30s %18s %18s\n", "", |
d62a17ae | 224 | "CPU (user+system):", "Real (wall-clock):"); |
225 | vty_out(vty, "Active Runtime(ms) Invoked Avg uSec Max uSecs"); | |
226 | vty_out(vty, " Avg uSec Max uSecs"); | |
227 | vty_out(vty, " Type Thread\n"); | |
228 | ||
229 | if (tmp.total_calls > 0) | |
230 | vty_out_cpu_thread_history(vty, &tmp); | |
e04ab74d | 231 | } |
f75e802d | 232 | #endif |
e04ab74d | 233 | |
e3b78da8 | 234 | static void cpu_record_hash_clear(struct hash_bucket *bucket, void *args[]) |
e276eb82 | 235 | { |
fbcac826 | 236 | uint8_t *filter = args[0]; |
d62a17ae | 237 | struct hash *cpu_record = args[1]; |
238 | ||
239 | struct cpu_thread_history *a = bucket->data; | |
62f44022 | 240 | |
d62a17ae | 241 | if (!(a->types & *filter)) |
242 | return; | |
f48f65d2 | 243 | |
d62a17ae | 244 | hash_release(cpu_record, bucket->data); |
e276eb82 PJ |
245 | } |
246 | ||
fbcac826 | 247 | static void cpu_record_clear(uint8_t filter) |
e276eb82 | 248 | { |
fbcac826 | 249 | uint8_t *tmp = &filter; |
d62a17ae | 250 | struct thread_master *m; |
251 | struct listnode *ln; | |
252 | ||
00dffa8c | 253 | frr_with_mutex(&masters_mtx) { |
d62a17ae | 254 | for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) { |
00dffa8c | 255 | frr_with_mutex(&m->mtx) { |
d62a17ae | 256 | void *args[2] = {tmp, m->cpu_record}; |
257 | hash_iterate( | |
258 | m->cpu_record, | |
e3b78da8 | 259 | (void (*)(struct hash_bucket *, |
d62a17ae | 260 | void *))cpu_record_hash_clear, |
261 | args); | |
262 | } | |
d62a17ae | 263 | } |
264 | } | |
62f44022 QY |
265 | } |
266 | ||
fbcac826 | 267 | static uint8_t parse_filter(const char *filterstr) |
62f44022 | 268 | { |
d62a17ae | 269 | int i = 0; |
270 | int filter = 0; | |
271 | ||
272 | while (filterstr[i] != '\0') { | |
273 | switch (filterstr[i]) { | |
274 | case 'r': | |
275 | case 'R': | |
276 | filter |= (1 << THREAD_READ); | |
277 | break; | |
278 | case 'w': | |
279 | case 'W': | |
280 | filter |= (1 << THREAD_WRITE); | |
281 | break; | |
282 | case 't': | |
283 | case 'T': | |
284 | filter |= (1 << THREAD_TIMER); | |
285 | break; | |
286 | case 'e': | |
287 | case 'E': | |
288 | filter |= (1 << THREAD_EVENT); | |
289 | break; | |
290 | case 'x': | |
291 | case 'X': | |
292 | filter |= (1 << THREAD_EXECUTE); | |
293 | break; | |
294 | default: | |
295 | break; | |
296 | } | |
297 | ++i; | |
298 | } | |
299 | return filter; | |
62f44022 QY |
300 | } |
301 | ||
f75e802d | 302 | #ifndef EXCLUDE_CPU_TIME |
62f44022 QY |
303 | DEFUN (show_thread_cpu, |
304 | show_thread_cpu_cmd, | |
305 | "show thread cpu [FILTER]", | |
306 | SHOW_STR | |
307 | "Thread information\n" | |
308 | "Thread CPU usage\n" | |
61fa0b97 | 309 | "Display filter (rwtex)\n") |
62f44022 | 310 | { |
fbcac826 | 311 | uint8_t filter = (uint8_t)-1U; |
d62a17ae | 312 | int idx = 0; |
313 | ||
314 | if (argv_find(argv, argc, "FILTER", &idx)) { | |
315 | filter = parse_filter(argv[idx]->arg); | |
316 | if (!filter) { | |
317 | vty_out(vty, | |
3efd0893 | 318 | "Invalid filter \"%s\" specified; must contain at leastone of 'RWTEXB'\n", |
d62a17ae | 319 | argv[idx]->arg); |
320 | return CMD_WARNING; | |
321 | } | |
322 | } | |
323 | ||
324 | cpu_record_print(vty, filter); | |
325 | return CMD_SUCCESS; | |
e276eb82 | 326 | } |
f75e802d | 327 | #endif |
e276eb82 | 328 | |
8872626b DS |
329 | static void show_thread_poll_helper(struct vty *vty, struct thread_master *m) |
330 | { | |
331 | const char *name = m->name ? m->name : "main"; | |
332 | char underline[strlen(name) + 1]; | |
a0b36ae6 | 333 | struct thread *thread; |
8872626b DS |
334 | uint32_t i; |
335 | ||
336 | memset(underline, '-', sizeof(underline)); | |
337 | underline[sizeof(underline) - 1] = '\0'; | |
338 | ||
339 | vty_out(vty, "\nShowing poll FD's for %s\n", name); | |
340 | vty_out(vty, "----------------------%s\n", underline); | |
6c19478a DS |
341 | vty_out(vty, "Count: %u/%d\n", (uint32_t)m->handler.pfdcount, |
342 | m->fd_limit); | |
a0b36ae6 DS |
343 | for (i = 0; i < m->handler.pfdcount; i++) { |
344 | vty_out(vty, "\t%6d fd:%6d events:%2d revents:%2d\t\t", i, | |
345 | m->handler.pfds[i].fd, m->handler.pfds[i].events, | |
8872626b | 346 | m->handler.pfds[i].revents); |
a0b36ae6 DS |
347 | |
348 | if (m->handler.pfds[i].events & POLLIN) { | |
349 | thread = m->read[m->handler.pfds[i].fd]; | |
350 | ||
351 | if (!thread) | |
352 | vty_out(vty, "ERROR "); | |
353 | else | |
60a3efec | 354 | vty_out(vty, "%s ", thread->xref->funcname); |
a0b36ae6 DS |
355 | } else |
356 | vty_out(vty, " "); | |
357 | ||
358 | if (m->handler.pfds[i].events & POLLOUT) { | |
359 | thread = m->write[m->handler.pfds[i].fd]; | |
360 | ||
361 | if (!thread) | |
362 | vty_out(vty, "ERROR\n"); | |
363 | else | |
60a3efec | 364 | vty_out(vty, "%s\n", thread->xref->funcname); |
a0b36ae6 DS |
365 | } else |
366 | vty_out(vty, "\n"); | |
367 | } | |
8872626b DS |
368 | } |
369 | ||
370 | DEFUN (show_thread_poll, | |
371 | show_thread_poll_cmd, | |
372 | "show thread poll", | |
373 | SHOW_STR | |
374 | "Thread information\n" | |
375 | "Show poll FD's and information\n") | |
376 | { | |
377 | struct listnode *node; | |
378 | struct thread_master *m; | |
379 | ||
00dffa8c | 380 | frr_with_mutex(&masters_mtx) { |
8872626b DS |
381 | for (ALL_LIST_ELEMENTS_RO(masters, node, m)) { |
382 | show_thread_poll_helper(vty, m); | |
383 | } | |
384 | } | |
8872626b DS |
385 | |
386 | return CMD_SUCCESS; | |
387 | } | |
388 | ||
389 | ||
49d41a26 DS |
390 | DEFUN (clear_thread_cpu, |
391 | clear_thread_cpu_cmd, | |
392 | "clear thread cpu [FILTER]", | |
62f44022 | 393 | "Clear stored data in all pthreads\n" |
49d41a26 DS |
394 | "Thread information\n" |
395 | "Thread CPU usage\n" | |
396 | "Display filter (rwtexb)\n") | |
e276eb82 | 397 | { |
fbcac826 | 398 | uint8_t filter = (uint8_t)-1U; |
d62a17ae | 399 | int idx = 0; |
400 | ||
401 | if (argv_find(argv, argc, "FILTER", &idx)) { | |
402 | filter = parse_filter(argv[idx]->arg); | |
403 | if (!filter) { | |
404 | vty_out(vty, | |
3efd0893 | 405 | "Invalid filter \"%s\" specified; must contain at leastone of 'RWTEXB'\n", |
d62a17ae | 406 | argv[idx]->arg); |
407 | return CMD_WARNING; | |
408 | } | |
409 | } | |
410 | ||
411 | cpu_record_clear(filter); | |
412 | return CMD_SUCCESS; | |
e276eb82 | 413 | } |
6b0655a2 | 414 | |
d62a17ae | 415 | void thread_cmd_init(void) |
0b84f294 | 416 | { |
f75e802d | 417 | #ifndef EXCLUDE_CPU_TIME |
d62a17ae | 418 | install_element(VIEW_NODE, &show_thread_cpu_cmd); |
f75e802d | 419 | #endif |
8872626b | 420 | install_element(VIEW_NODE, &show_thread_poll_cmd); |
d62a17ae | 421 | install_element(ENABLE_NODE, &clear_thread_cpu_cmd); |
0b84f294 | 422 | } |
62f44022 QY |
423 | /* CLI end ------------------------------------------------------------------ */ |
424 | ||
0b84f294 | 425 | |
d62a17ae | 426 | static void cancelreq_del(void *cr) |
63ccb9cb | 427 | { |
d62a17ae | 428 | XFREE(MTYPE_TMP, cr); |
63ccb9cb QY |
429 | } |
430 | ||
e0bebc7c | 431 | /* initializer, only ever called once */ |
4d762f26 | 432 | static void initializer(void) |
e0bebc7c | 433 | { |
d62a17ae | 434 | pthread_key_create(&thread_current, NULL); |
e0bebc7c QY |
435 | } |
436 | ||
d62a17ae | 437 | struct thread_master *thread_master_create(const char *name) |
718e3744 | 438 | { |
d62a17ae | 439 | struct thread_master *rv; |
440 | struct rlimit limit; | |
441 | ||
442 | pthread_once(&init_once, &initializer); | |
443 | ||
444 | rv = XCALLOC(MTYPE_THREAD_MASTER, sizeof(struct thread_master)); | |
d62a17ae | 445 | |
446 | /* Initialize master mutex */ | |
447 | pthread_mutex_init(&rv->mtx, NULL); | |
448 | pthread_cond_init(&rv->cancel_cond, NULL); | |
449 | ||
450 | /* Set name */ | |
7ffcd8bd QY |
451 | name = name ? name : "default"; |
452 | rv->name = XSTRDUP(MTYPE_THREAD_MASTER, name); | |
d62a17ae | 453 | |
454 | /* Initialize I/O task data structures */ | |
1a9f340b MS |
455 | |
456 | /* Use configured limit if present, ulimit otherwise. */ | |
457 | rv->fd_limit = frr_get_fd_limit(); | |
458 | if (rv->fd_limit == 0) { | |
459 | getrlimit(RLIMIT_NOFILE, &limit); | |
460 | rv->fd_limit = (int)limit.rlim_cur; | |
461 | } | |
462 | ||
a6f235f3 DS |
463 | rv->read = XCALLOC(MTYPE_THREAD_POLL, |
464 | sizeof(struct thread *) * rv->fd_limit); | |
465 | ||
466 | rv->write = XCALLOC(MTYPE_THREAD_POLL, | |
467 | sizeof(struct thread *) * rv->fd_limit); | |
d62a17ae | 468 | |
7ffcd8bd QY |
469 | char tmhashname[strlen(name) + 32]; |
470 | snprintf(tmhashname, sizeof(tmhashname), "%s - threadmaster event hash", | |
471 | name); | |
bd74dc61 | 472 | rv->cpu_record = hash_create_size( |
d8b87afe | 473 | 8, (unsigned int (*)(const void *))cpu_record_hash_key, |
74df8d6d | 474 | (bool (*)(const void *, const void *))cpu_record_hash_cmp, |
7ffcd8bd | 475 | tmhashname); |
d62a17ae | 476 | |
c284542b DL |
477 | thread_list_init(&rv->event); |
478 | thread_list_init(&rv->ready); | |
479 | thread_list_init(&rv->unuse); | |
27d29ced | 480 | thread_timer_list_init(&rv->timer); |
d62a17ae | 481 | |
482 | /* Initialize thread_fetch() settings */ | |
483 | rv->spin = true; | |
484 | rv->handle_signals = true; | |
485 | ||
486 | /* Set pthread owner, should be updated by actual owner */ | |
487 | rv->owner = pthread_self(); | |
488 | rv->cancel_req = list_new(); | |
489 | rv->cancel_req->del = cancelreq_del; | |
490 | rv->canceled = true; | |
491 | ||
492 | /* Initialize pipe poker */ | |
493 | pipe(rv->io_pipe); | |
494 | set_nonblocking(rv->io_pipe[0]); | |
495 | set_nonblocking(rv->io_pipe[1]); | |
496 | ||
497 | /* Initialize data structures for poll() */ | |
498 | rv->handler.pfdsize = rv->fd_limit; | |
499 | rv->handler.pfdcount = 0; | |
500 | rv->handler.pfds = XCALLOC(MTYPE_THREAD_MASTER, | |
501 | sizeof(struct pollfd) * rv->handler.pfdsize); | |
502 | rv->handler.copy = XCALLOC(MTYPE_THREAD_MASTER, | |
503 | sizeof(struct pollfd) * rv->handler.pfdsize); | |
504 | ||
eff09c66 | 505 | /* add to list of threadmasters */ |
00dffa8c | 506 | frr_with_mutex(&masters_mtx) { |
eff09c66 QY |
507 | if (!masters) |
508 | masters = list_new(); | |
509 | ||
d62a17ae | 510 | listnode_add(masters, rv); |
511 | } | |
d62a17ae | 512 | |
513 | return rv; | |
718e3744 | 514 | } |
515 | ||
d8a8a8de QY |
516 | void thread_master_set_name(struct thread_master *master, const char *name) |
517 | { | |
00dffa8c | 518 | frr_with_mutex(&master->mtx) { |
0a22ddfb | 519 | XFREE(MTYPE_THREAD_MASTER, master->name); |
d8a8a8de QY |
520 | master->name = XSTRDUP(MTYPE_THREAD_MASTER, name); |
521 | } | |
d8a8a8de QY |
522 | } |
523 | ||
6ed04aa2 DS |
524 | #define THREAD_UNUSED_DEPTH 10 |
525 | ||
718e3744 | 526 | /* Move thread to unuse list. */ |
d62a17ae | 527 | static void thread_add_unuse(struct thread_master *m, struct thread *thread) |
718e3744 | 528 | { |
6655966d RZ |
529 | pthread_mutex_t mtxc = thread->mtx; |
530 | ||
d62a17ae | 531 | assert(m != NULL && thread != NULL); |
d62a17ae | 532 | |
d62a17ae | 533 | thread->hist->total_active--; |
6ed04aa2 DS |
534 | memset(thread, 0, sizeof(struct thread)); |
535 | thread->type = THREAD_UNUSED; | |
536 | ||
6655966d RZ |
537 | /* Restore the thread mutex context. */ |
538 | thread->mtx = mtxc; | |
539 | ||
c284542b DL |
540 | if (thread_list_count(&m->unuse) < THREAD_UNUSED_DEPTH) { |
541 | thread_list_add_tail(&m->unuse, thread); | |
6655966d RZ |
542 | return; |
543 | } | |
544 | ||
545 | thread_free(m, thread); | |
718e3744 | 546 | } |
547 | ||
548 | /* Free all unused thread. */ | |
c284542b DL |
549 | static void thread_list_free(struct thread_master *m, |
550 | struct thread_list_head *list) | |
718e3744 | 551 | { |
d62a17ae | 552 | struct thread *t; |
d62a17ae | 553 | |
c284542b | 554 | while ((t = thread_list_pop(list))) |
6655966d | 555 | thread_free(m, t); |
718e3744 | 556 | } |
557 | ||
d62a17ae | 558 | static void thread_array_free(struct thread_master *m, |
559 | struct thread **thread_array) | |
308d14ae | 560 | { |
d62a17ae | 561 | struct thread *t; |
562 | int index; | |
563 | ||
564 | for (index = 0; index < m->fd_limit; ++index) { | |
565 | t = thread_array[index]; | |
566 | if (t) { | |
567 | thread_array[index] = NULL; | |
6655966d | 568 | thread_free(m, t); |
d62a17ae | 569 | } |
570 | } | |
a6f235f3 | 571 | XFREE(MTYPE_THREAD_POLL, thread_array); |
308d14ae DV |
572 | } |
573 | ||
495f0b13 DS |
574 | /* |
575 | * thread_master_free_unused | |
576 | * | |
577 | * As threads are finished with they are put on the | |
578 | * unuse list for later reuse. | |
579 | * If we are shutting down, Free up unused threads | |
580 | * So we can see if we forget to shut anything off | |
581 | */ | |
d62a17ae | 582 | void thread_master_free_unused(struct thread_master *m) |
495f0b13 | 583 | { |
00dffa8c | 584 | frr_with_mutex(&m->mtx) { |
d62a17ae | 585 | struct thread *t; |
c284542b | 586 | while ((t = thread_list_pop(&m->unuse))) |
6655966d | 587 | thread_free(m, t); |
d62a17ae | 588 | } |
495f0b13 DS |
589 | } |
590 | ||
718e3744 | 591 | /* Stop thread scheduler. */ |
d62a17ae | 592 | void thread_master_free(struct thread_master *m) |
718e3744 | 593 | { |
27d29ced DL |
594 | struct thread *t; |
595 | ||
00dffa8c | 596 | frr_with_mutex(&masters_mtx) { |
d62a17ae | 597 | listnode_delete(masters, m); |
eff09c66 | 598 | if (masters->count == 0) { |
6a154c88 | 599 | list_delete(&masters); |
eff09c66 | 600 | } |
d62a17ae | 601 | } |
d62a17ae | 602 | |
603 | thread_array_free(m, m->read); | |
604 | thread_array_free(m, m->write); | |
27d29ced DL |
605 | while ((t = thread_timer_list_pop(&m->timer))) |
606 | thread_free(m, t); | |
d62a17ae | 607 | thread_list_free(m, &m->event); |
608 | thread_list_free(m, &m->ready); | |
609 | thread_list_free(m, &m->unuse); | |
610 | pthread_mutex_destroy(&m->mtx); | |
33844bbe | 611 | pthread_cond_destroy(&m->cancel_cond); |
d62a17ae | 612 | close(m->io_pipe[0]); |
613 | close(m->io_pipe[1]); | |
6a154c88 | 614 | list_delete(&m->cancel_req); |
1a0a92ea | 615 | m->cancel_req = NULL; |
d62a17ae | 616 | |
617 | hash_clean(m->cpu_record, cpu_record_hash_free); | |
618 | hash_free(m->cpu_record); | |
619 | m->cpu_record = NULL; | |
620 | ||
0a22ddfb | 621 | XFREE(MTYPE_THREAD_MASTER, m->name); |
d62a17ae | 622 | XFREE(MTYPE_THREAD_MASTER, m->handler.pfds); |
623 | XFREE(MTYPE_THREAD_MASTER, m->handler.copy); | |
624 | XFREE(MTYPE_THREAD_MASTER, m); | |
718e3744 | 625 | } |
626 | ||
78ca0342 CF |
627 | /* Return remain time in miliseconds. */ |
628 | unsigned long thread_timer_remain_msec(struct thread *thread) | |
718e3744 | 629 | { |
d62a17ae | 630 | int64_t remain; |
1189d95f | 631 | |
00dffa8c | 632 | frr_with_mutex(&thread->mtx) { |
78ca0342 | 633 | remain = monotime_until(&thread->u.sands, NULL) / 1000LL; |
d62a17ae | 634 | } |
1189d95f | 635 | |
d62a17ae | 636 | return remain < 0 ? 0 : remain; |
718e3744 | 637 | } |
638 | ||
78ca0342 CF |
639 | /* Return remain time in seconds. */ |
640 | unsigned long thread_timer_remain_second(struct thread *thread) | |
641 | { | |
642 | return thread_timer_remain_msec(thread) / 1000LL; | |
643 | } | |
644 | ||
d62a17ae | 645 | struct timeval thread_timer_remain(struct thread *thread) |
6ac44687 | 646 | { |
d62a17ae | 647 | struct timeval remain; |
00dffa8c | 648 | frr_with_mutex(&thread->mtx) { |
d62a17ae | 649 | monotime_until(&thread->u.sands, &remain); |
650 | } | |
d62a17ae | 651 | return remain; |
6ac44687 CF |
652 | } |
653 | ||
0447957e AK |
654 | static int time_hhmmss(char *buf, int buf_size, long sec) |
655 | { | |
656 | long hh; | |
657 | long mm; | |
658 | int wr; | |
659 | ||
660 | zassert(buf_size >= 8); | |
661 | ||
662 | hh = sec / 3600; | |
663 | sec %= 3600; | |
664 | mm = sec / 60; | |
665 | sec %= 60; | |
666 | ||
667 | wr = snprintf(buf, buf_size, "%02ld:%02ld:%02ld", hh, mm, sec); | |
668 | ||
669 | return wr != 8; | |
670 | } | |
671 | ||
672 | char *thread_timer_to_hhmmss(char *buf, int buf_size, | |
673 | struct thread *t_timer) | |
674 | { | |
675 | if (t_timer) { | |
676 | time_hhmmss(buf, buf_size, | |
677 | thread_timer_remain_second(t_timer)); | |
678 | } else { | |
679 | snprintf(buf, buf_size, "--:--:--"); | |
680 | } | |
681 | return buf; | |
682 | } | |
683 | ||
718e3744 | 684 | /* Get new thread. */ |
d7c0a89a | 685 | static struct thread *thread_get(struct thread_master *m, uint8_t type, |
d62a17ae | 686 | int (*func)(struct thread *), void *arg, |
60a3efec | 687 | const struct xref_threadsched *xref) |
718e3744 | 688 | { |
c284542b | 689 | struct thread *thread = thread_list_pop(&m->unuse); |
d62a17ae | 690 | struct cpu_thread_history tmp; |
691 | ||
692 | if (!thread) { | |
693 | thread = XCALLOC(MTYPE_THREAD, sizeof(struct thread)); | |
694 | /* mutex only needs to be initialized at struct creation. */ | |
695 | pthread_mutex_init(&thread->mtx, NULL); | |
696 | m->alloc++; | |
697 | } | |
698 | ||
699 | thread->type = type; | |
700 | thread->add_type = type; | |
701 | thread->master = m; | |
702 | thread->arg = arg; | |
d62a17ae | 703 | thread->yield = THREAD_YIELD_TIME_SLOT; /* default */ |
704 | thread->ref = NULL; | |
705 | ||
706 | /* | |
707 | * So if the passed in funcname is not what we have | |
708 | * stored that means the thread->hist needs to be | |
709 | * updated. We keep the last one around in unused | |
710 | * under the assumption that we are probably | |
711 | * going to immediately allocate the same | |
712 | * type of thread. | |
713 | * This hopefully saves us some serious | |
714 | * hash_get lookups. | |
715 | */ | |
60a3efec DL |
716 | if ((thread->xref && thread->xref->funcname != xref->funcname) |
717 | || thread->func != func) { | |
d62a17ae | 718 | tmp.func = func; |
60a3efec | 719 | tmp.funcname = xref->funcname; |
d62a17ae | 720 | thread->hist = |
721 | hash_get(m->cpu_record, &tmp, | |
722 | (void *(*)(void *))cpu_record_hash_alloc); | |
723 | } | |
724 | thread->hist->total_active++; | |
725 | thread->func = func; | |
60a3efec | 726 | thread->xref = xref; |
d62a17ae | 727 | |
728 | return thread; | |
718e3744 | 729 | } |
730 | ||
6655966d RZ |
731 | static void thread_free(struct thread_master *master, struct thread *thread) |
732 | { | |
733 | /* Update statistics. */ | |
734 | assert(master->alloc > 0); | |
735 | master->alloc--; | |
736 | ||
737 | /* Free allocated resources. */ | |
738 | pthread_mutex_destroy(&thread->mtx); | |
739 | XFREE(MTYPE_THREAD, thread); | |
740 | } | |
741 | ||
d81ca9a3 MS |
742 | static int fd_poll(struct thread_master *m, const struct timeval *timer_wait, |
743 | bool *eintr_p) | |
209a72a6 | 744 | { |
d81ca9a3 MS |
745 | sigset_t origsigs; |
746 | unsigned char trash[64]; | |
747 | nfds_t count = m->handler.copycount; | |
748 | ||
d279ef57 DS |
749 | /* |
750 | * If timer_wait is null here, that means poll() should block | |
751 | * indefinitely, unless the thread_master has overridden it by setting | |
d62a17ae | 752 | * ->selectpoll_timeout. |
d279ef57 | 753 | * |
d62a17ae | 754 | * If the value is positive, it specifies the maximum number of |
d279ef57 DS |
755 | * milliseconds to wait. If the timeout is -1, it specifies that |
756 | * we should never wait and always return immediately even if no | |
757 | * event is detected. If the value is zero, the behavior is default. | |
758 | */ | |
d62a17ae | 759 | int timeout = -1; |
760 | ||
761 | /* number of file descriptors with events */ | |
762 | int num; | |
763 | ||
764 | if (timer_wait != NULL | |
765 | && m->selectpoll_timeout == 0) // use the default value | |
766 | timeout = (timer_wait->tv_sec * 1000) | |
767 | + (timer_wait->tv_usec / 1000); | |
768 | else if (m->selectpoll_timeout > 0) // use the user's timeout | |
769 | timeout = m->selectpoll_timeout; | |
770 | else if (m->selectpoll_timeout | |
771 | < 0) // effect a poll (return immediately) | |
772 | timeout = 0; | |
773 | ||
0bdeb5e5 | 774 | zlog_tls_buffer_flush(); |
3e41733f DL |
775 | rcu_read_unlock(); |
776 | rcu_assert_read_unlocked(); | |
777 | ||
d62a17ae | 778 | /* add poll pipe poker */ |
d81ca9a3 MS |
779 | assert(count + 1 < m->handler.pfdsize); |
780 | m->handler.copy[count].fd = m->io_pipe[0]; | |
781 | m->handler.copy[count].events = POLLIN; | |
782 | m->handler.copy[count].revents = 0x00; | |
783 | ||
784 | /* We need to deal with a signal-handling race here: we | |
785 | * don't want to miss a crucial signal, such as SIGTERM or SIGINT, | |
786 | * that may arrive just before we enter poll(). We will block the | |
787 | * key signals, then check whether any have arrived - if so, we return | |
788 | * before calling poll(). If not, we'll re-enable the signals | |
789 | * in the ppoll() call. | |
790 | */ | |
791 | ||
792 | sigemptyset(&origsigs); | |
793 | if (m->handle_signals) { | |
794 | /* Main pthread that handles the app signals */ | |
795 | if (frr_sigevent_check(&origsigs)) { | |
796 | /* Signal to process - restore signal mask and return */ | |
797 | pthread_sigmask(SIG_SETMASK, &origsigs, NULL); | |
798 | num = -1; | |
799 | *eintr_p = true; | |
800 | goto done; | |
801 | } | |
802 | } else { | |
803 | /* Don't make any changes for the non-main pthreads */ | |
804 | pthread_sigmask(SIG_SETMASK, NULL, &origsigs); | |
805 | } | |
d62a17ae | 806 | |
d81ca9a3 MS |
807 | #if defined(HAVE_PPOLL) |
808 | struct timespec ts, *tsp; | |
809 | ||
810 | if (timeout >= 0) { | |
811 | ts.tv_sec = timeout / 1000; | |
812 | ts.tv_nsec = (timeout % 1000) * 1000000; | |
813 | tsp = &ts; | |
814 | } else | |
815 | tsp = NULL; | |
816 | ||
817 | num = ppoll(m->handler.copy, count + 1, tsp, &origsigs); | |
818 | pthread_sigmask(SIG_SETMASK, &origsigs, NULL); | |
819 | #else | |
820 | /* Not ideal - there is a race after we restore the signal mask */ | |
821 | pthread_sigmask(SIG_SETMASK, &origsigs, NULL); | |
822 | num = poll(m->handler.copy, count + 1, timeout); | |
823 | #endif | |
d62a17ae | 824 | |
d81ca9a3 MS |
825 | done: |
826 | ||
827 | if (num < 0 && errno == EINTR) | |
828 | *eintr_p = true; | |
829 | ||
830 | if (num > 0 && m->handler.copy[count].revents != 0 && num--) | |
d62a17ae | 831 | while (read(m->io_pipe[0], &trash, sizeof(trash)) > 0) |
832 | ; | |
833 | ||
3e41733f DL |
834 | rcu_read_lock(); |
835 | ||
d62a17ae | 836 | return num; |
209a72a6 DS |
837 | } |
838 | ||
718e3744 | 839 | /* Add new read thread. */ |
60a3efec DL |
840 | struct thread *_thread_add_read_write(const struct xref_threadsched *xref, |
841 | struct thread_master *m, | |
842 | int (*func)(struct thread *), | |
843 | void *arg, int fd, struct thread **t_ptr) | |
718e3744 | 844 | { |
60a3efec | 845 | int dir = xref->thread_type; |
d62a17ae | 846 | struct thread *thread = NULL; |
1ef14bee | 847 | struct thread **thread_array; |
d62a17ae | 848 | |
abf96a87 | 849 | if (dir == THREAD_READ) |
6c3aa850 DL |
850 | frrtrace(9, frr_libfrr, schedule_read, m, |
851 | xref->funcname, xref->xref.file, xref->xref.line, | |
852 | t_ptr, fd, 0, arg, 0); | |
abf96a87 | 853 | else |
6c3aa850 DL |
854 | frrtrace(9, frr_libfrr, schedule_write, m, |
855 | xref->funcname, xref->xref.file, xref->xref.line, | |
856 | t_ptr, fd, 0, arg, 0); | |
abf96a87 | 857 | |
9b864cd3 | 858 | assert(fd >= 0 && fd < m->fd_limit); |
00dffa8c DL |
859 | frr_with_mutex(&m->mtx) { |
860 | if (t_ptr && *t_ptr) | |
861 | // thread is already scheduled; don't reschedule | |
862 | break; | |
d62a17ae | 863 | |
864 | /* default to a new pollfd */ | |
865 | nfds_t queuepos = m->handler.pfdcount; | |
866 | ||
1ef14bee DS |
867 | if (dir == THREAD_READ) |
868 | thread_array = m->read; | |
869 | else | |
870 | thread_array = m->write; | |
871 | ||
d62a17ae | 872 | /* if we already have a pollfd for our file descriptor, find and |
873 | * use it */ | |
874 | for (nfds_t i = 0; i < m->handler.pfdcount; i++) | |
875 | if (m->handler.pfds[i].fd == fd) { | |
876 | queuepos = i; | |
1ef14bee DS |
877 | |
878 | #ifdef DEV_BUILD | |
879 | /* | |
880 | * What happens if we have a thread already | |
881 | * created for this event? | |
882 | */ | |
883 | if (thread_array[fd]) | |
884 | assert(!"Thread already scheduled for file descriptor"); | |
885 | #endif | |
d62a17ae | 886 | break; |
887 | } | |
888 | ||
889 | /* make sure we have room for this fd + pipe poker fd */ | |
890 | assert(queuepos + 1 < m->handler.pfdsize); | |
891 | ||
60a3efec | 892 | thread = thread_get(m, dir, func, arg, xref); |
d62a17ae | 893 | |
894 | m->handler.pfds[queuepos].fd = fd; | |
895 | m->handler.pfds[queuepos].events |= | |
896 | (dir == THREAD_READ ? POLLIN : POLLOUT); | |
897 | ||
898 | if (queuepos == m->handler.pfdcount) | |
899 | m->handler.pfdcount++; | |
900 | ||
901 | if (thread) { | |
00dffa8c | 902 | frr_with_mutex(&thread->mtx) { |
d62a17ae | 903 | thread->u.fd = fd; |
1ef14bee | 904 | thread_array[thread->u.fd] = thread; |
d62a17ae | 905 | } |
d62a17ae | 906 | |
907 | if (t_ptr) { | |
908 | *t_ptr = thread; | |
909 | thread->ref = t_ptr; | |
910 | } | |
911 | } | |
912 | ||
913 | AWAKEN(m); | |
914 | } | |
d62a17ae | 915 | |
916 | return thread; | |
718e3744 | 917 | } |
918 | ||
56a94b36 | 919 | static struct thread * |
60a3efec DL |
920 | _thread_add_timer_timeval(const struct xref_threadsched *xref, |
921 | struct thread_master *m, int (*func)(struct thread *), | |
4322dea7 | 922 | void *arg, struct timeval *time_relative, |
60a3efec | 923 | struct thread **t_ptr) |
718e3744 | 924 | { |
d62a17ae | 925 | struct thread *thread; |
96fe578a | 926 | struct timeval t; |
d62a17ae | 927 | |
928 | assert(m != NULL); | |
929 | ||
d62a17ae | 930 | assert(time_relative); |
931 | ||
6c3aa850 DL |
932 | frrtrace(9, frr_libfrr, schedule_timer, m, |
933 | xref->funcname, xref->xref.file, xref->xref.line, | |
c7bb4f00 | 934 | t_ptr, 0, 0, arg, (long)time_relative->tv_sec); |
abf96a87 | 935 | |
96fe578a MS |
936 | /* Compute expiration/deadline time. */ |
937 | monotime(&t); | |
938 | timeradd(&t, time_relative, &t); | |
939 | ||
00dffa8c DL |
940 | frr_with_mutex(&m->mtx) { |
941 | if (t_ptr && *t_ptr) | |
d279ef57 | 942 | /* thread is already scheduled; don't reschedule */ |
d62a17ae | 943 | return NULL; |
d62a17ae | 944 | |
4322dea7 | 945 | thread = thread_get(m, THREAD_TIMER, func, arg, xref); |
d62a17ae | 946 | |
00dffa8c | 947 | frr_with_mutex(&thread->mtx) { |
96fe578a | 948 | thread->u.sands = t; |
27d29ced | 949 | thread_timer_list_add(&m->timer, thread); |
d62a17ae | 950 | if (t_ptr) { |
951 | *t_ptr = thread; | |
952 | thread->ref = t_ptr; | |
953 | } | |
954 | } | |
d62a17ae | 955 | |
96fe578a MS |
956 | /* The timer list is sorted - if this new timer |
957 | * might change the time we'll wait for, give the pthread | |
958 | * a chance to re-compute. | |
959 | */ | |
960 | if (thread_timer_list_first(&m->timer) == thread) | |
961 | AWAKEN(m); | |
d62a17ae | 962 | } |
d62a17ae | 963 | |
964 | return thread; | |
9e867fe6 | 965 | } |
966 | ||
98c91ac6 | 967 | |
968 | /* Add timer event thread. */ | |
60a3efec DL |
969 | struct thread *_thread_add_timer(const struct xref_threadsched *xref, |
970 | struct thread_master *m, | |
971 | int (*func)(struct thread *), | |
972 | void *arg, long timer, struct thread **t_ptr) | |
9e867fe6 | 973 | { |
d62a17ae | 974 | struct timeval trel; |
9e867fe6 | 975 | |
d62a17ae | 976 | assert(m != NULL); |
9e867fe6 | 977 | |
d62a17ae | 978 | trel.tv_sec = timer; |
979 | trel.tv_usec = 0; | |
9e867fe6 | 980 | |
4322dea7 | 981 | return _thread_add_timer_timeval(xref, m, func, arg, &trel, t_ptr); |
98c91ac6 | 982 | } |
9e867fe6 | 983 | |
98c91ac6 | 984 | /* Add timer event thread with "millisecond" resolution */ |
60a3efec DL |
985 | struct thread *_thread_add_timer_msec(const struct xref_threadsched *xref, |
986 | struct thread_master *m, | |
987 | int (*func)(struct thread *), | |
988 | void *arg, long timer, | |
989 | struct thread **t_ptr) | |
98c91ac6 | 990 | { |
d62a17ae | 991 | struct timeval trel; |
9e867fe6 | 992 | |
d62a17ae | 993 | assert(m != NULL); |
718e3744 | 994 | |
d62a17ae | 995 | trel.tv_sec = timer / 1000; |
996 | trel.tv_usec = 1000 * (timer % 1000); | |
98c91ac6 | 997 | |
4322dea7 | 998 | return _thread_add_timer_timeval(xref, m, func, arg, &trel, t_ptr); |
a48b4e6d | 999 | } |
1000 | ||
4322dea7 | 1001 | /* Add timer event thread with "timeval" resolution */ |
60a3efec DL |
1002 | struct thread *_thread_add_timer_tv(const struct xref_threadsched *xref, |
1003 | struct thread_master *m, | |
1004 | int (*func)(struct thread *), | |
1005 | void *arg, struct timeval *tv, | |
1006 | struct thread **t_ptr) | |
d03c4cbd | 1007 | { |
4322dea7 | 1008 | return _thread_add_timer_timeval(xref, m, func, arg, tv, t_ptr); |
d03c4cbd DL |
1009 | } |
1010 | ||
718e3744 | 1011 | /* Add simple event thread. */ |
60a3efec DL |
1012 | struct thread *_thread_add_event(const struct xref_threadsched *xref, |
1013 | struct thread_master *m, | |
1014 | int (*func)(struct thread *), | |
1015 | void *arg, int val, struct thread **t_ptr) | |
718e3744 | 1016 | { |
00dffa8c | 1017 | struct thread *thread = NULL; |
d62a17ae | 1018 | |
6c3aa850 DL |
1019 | frrtrace(9, frr_libfrr, schedule_event, m, |
1020 | xref->funcname, xref->xref.file, xref->xref.line, | |
c7bb4f00 | 1021 | t_ptr, 0, val, arg, 0); |
abf96a87 | 1022 | |
d62a17ae | 1023 | assert(m != NULL); |
1024 | ||
00dffa8c DL |
1025 | frr_with_mutex(&m->mtx) { |
1026 | if (t_ptr && *t_ptr) | |
d279ef57 | 1027 | /* thread is already scheduled; don't reschedule */ |
00dffa8c | 1028 | break; |
d62a17ae | 1029 | |
60a3efec | 1030 | thread = thread_get(m, THREAD_EVENT, func, arg, xref); |
00dffa8c | 1031 | frr_with_mutex(&thread->mtx) { |
d62a17ae | 1032 | thread->u.val = val; |
c284542b | 1033 | thread_list_add_tail(&m->event, thread); |
d62a17ae | 1034 | } |
d62a17ae | 1035 | |
1036 | if (t_ptr) { | |
1037 | *t_ptr = thread; | |
1038 | thread->ref = t_ptr; | |
1039 | } | |
1040 | ||
1041 | AWAKEN(m); | |
1042 | } | |
d62a17ae | 1043 | |
1044 | return thread; | |
718e3744 | 1045 | } |
1046 | ||
63ccb9cb QY |
1047 | /* Thread cancellation ------------------------------------------------------ */ |
1048 | ||
8797240e QY |
1049 | /** |
1050 | * NOT's out the .events field of pollfd corresponding to the given file | |
1051 | * descriptor. The event to be NOT'd is passed in the 'state' parameter. | |
1052 | * | |
1053 | * This needs to happen for both copies of pollfd's. See 'thread_fetch' | |
1054 | * implementation for details. | |
1055 | * | |
1056 | * @param master | |
1057 | * @param fd | |
1058 | * @param state the event to cancel. One or more (OR'd together) of the | |
1059 | * following: | |
1060 | * - POLLIN | |
1061 | * - POLLOUT | |
1062 | */ | |
a9318a32 MS |
1063 | static void thread_cancel_rw(struct thread_master *master, int fd, short state, |
1064 | int idx_hint) | |
0a95a0d0 | 1065 | { |
42d74538 QY |
1066 | bool found = false; |
1067 | ||
d62a17ae | 1068 | /* find the index of corresponding pollfd */ |
1069 | nfds_t i; | |
1070 | ||
a9318a32 MS |
1071 | /* Cancel POLLHUP too just in case some bozo set it */ |
1072 | state |= POLLHUP; | |
1073 | ||
1074 | /* Some callers know the index of the pfd already */ | |
1075 | if (idx_hint >= 0) { | |
1076 | i = idx_hint; | |
1077 | found = true; | |
1078 | } else { | |
1079 | /* Have to look for the fd in the pfd array */ | |
1080 | for (i = 0; i < master->handler.pfdcount; i++) | |
1081 | if (master->handler.pfds[i].fd == fd) { | |
1082 | found = true; | |
1083 | break; | |
1084 | } | |
1085 | } | |
42d74538 QY |
1086 | |
1087 | if (!found) { | |
1088 | zlog_debug( | |
1089 | "[!] Received cancellation request for nonexistent rw job"); | |
1090 | zlog_debug("[!] threadmaster: %s | fd: %d", | |
996c9314 | 1091 | master->name ? master->name : "", fd); |
42d74538 QY |
1092 | return; |
1093 | } | |
d62a17ae | 1094 | |
1095 | /* NOT out event. */ | |
1096 | master->handler.pfds[i].events &= ~(state); | |
1097 | ||
1098 | /* If all events are canceled, delete / resize the pollfd array. */ | |
1099 | if (master->handler.pfds[i].events == 0) { | |
1100 | memmove(master->handler.pfds + i, master->handler.pfds + i + 1, | |
1101 | (master->handler.pfdcount - i - 1) | |
1102 | * sizeof(struct pollfd)); | |
1103 | master->handler.pfdcount--; | |
e985cda0 S |
1104 | master->handler.pfds[master->handler.pfdcount].fd = 0; |
1105 | master->handler.pfds[master->handler.pfdcount].events = 0; | |
d62a17ae | 1106 | } |
1107 | ||
1108 | /* If we have the same pollfd in the copy, perform the same operations, | |
1109 | * otherwise return. */ | |
1110 | if (i >= master->handler.copycount) | |
1111 | return; | |
1112 | ||
1113 | master->handler.copy[i].events &= ~(state); | |
1114 | ||
1115 | if (master->handler.copy[i].events == 0) { | |
1116 | memmove(master->handler.copy + i, master->handler.copy + i + 1, | |
1117 | (master->handler.copycount - i - 1) | |
1118 | * sizeof(struct pollfd)); | |
1119 | master->handler.copycount--; | |
e985cda0 S |
1120 | master->handler.copy[master->handler.copycount].fd = 0; |
1121 | master->handler.copy[master->handler.copycount].events = 0; | |
d62a17ae | 1122 | } |
0a95a0d0 DS |
1123 | } |
1124 | ||
a9318a32 MS |
1125 | /* |
1126 | * Process task cancellation given a task argument: iterate through the | |
1127 | * various lists of tasks, looking for any that match the argument. | |
1128 | */ | |
1129 | static void cancel_arg_helper(struct thread_master *master, | |
1130 | const struct cancel_req *cr) | |
1131 | { | |
1132 | struct thread *t; | |
1133 | nfds_t i; | |
1134 | int fd; | |
1135 | struct pollfd *pfd; | |
1136 | ||
1137 | /* We're only processing arg-based cancellations here. */ | |
1138 | if (cr->eventobj == NULL) | |
1139 | return; | |
1140 | ||
1141 | /* First process the ready lists. */ | |
1142 | frr_each_safe(thread_list, &master->event, t) { | |
1143 | if (t->arg != cr->eventobj) | |
1144 | continue; | |
1145 | thread_list_del(&master->event, t); | |
1146 | if (t->ref) | |
1147 | *t->ref = NULL; | |
1148 | thread_add_unuse(master, t); | |
1149 | } | |
1150 | ||
1151 | frr_each_safe(thread_list, &master->ready, t) { | |
1152 | if (t->arg != cr->eventobj) | |
1153 | continue; | |
1154 | thread_list_del(&master->ready, t); | |
1155 | if (t->ref) | |
1156 | *t->ref = NULL; | |
1157 | thread_add_unuse(master, t); | |
1158 | } | |
1159 | ||
1160 | /* If requested, stop here and ignore io and timers */ | |
1161 | if (CHECK_FLAG(cr->flags, THREAD_CANCEL_FLAG_READY)) | |
1162 | return; | |
1163 | ||
1164 | /* Check the io tasks */ | |
1165 | for (i = 0; i < master->handler.pfdcount;) { | |
1166 | pfd = master->handler.pfds + i; | |
1167 | ||
1168 | if (pfd->events & POLLIN) | |
1169 | t = master->read[pfd->fd]; | |
1170 | else | |
1171 | t = master->write[pfd->fd]; | |
1172 | ||
1173 | if (t && t->arg == cr->eventobj) { | |
1174 | fd = pfd->fd; | |
1175 | ||
1176 | /* Found a match to cancel: clean up fd arrays */ | |
1177 | thread_cancel_rw(master, pfd->fd, pfd->events, i); | |
1178 | ||
1179 | /* Clean up thread arrays */ | |
1180 | master->read[fd] = NULL; | |
1181 | master->write[fd] = NULL; | |
1182 | ||
1183 | /* Clear caller's ref */ | |
1184 | if (t->ref) | |
1185 | *t->ref = NULL; | |
1186 | ||
1187 | thread_add_unuse(master, t); | |
1188 | ||
1189 | /* Don't increment 'i' since the cancellation will have | |
1190 | * removed the entry from the pfd array | |
1191 | */ | |
1192 | } else | |
1193 | i++; | |
1194 | } | |
1195 | ||
1196 | /* Check the timer tasks */ | |
1197 | t = thread_timer_list_first(&master->timer); | |
1198 | while (t) { | |
1199 | struct thread *t_next; | |
1200 | ||
1201 | t_next = thread_timer_list_next(&master->timer, t); | |
1202 | ||
1203 | if (t->arg == cr->eventobj) { | |
1204 | thread_timer_list_del(&master->timer, t); | |
1205 | if (t->ref) | |
1206 | *t->ref = NULL; | |
1207 | thread_add_unuse(master, t); | |
1208 | } | |
1209 | ||
1210 | t = t_next; | |
1211 | } | |
1212 | } | |
1213 | ||
1189d95f | 1214 | /** |
63ccb9cb | 1215 | * Process cancellation requests. |
1189d95f | 1216 | * |
63ccb9cb QY |
1217 | * This may only be run from the pthread which owns the thread_master. |
1218 | * | |
1219 | * @param master the thread master to process | |
1220 | * @REQUIRE master->mtx | |
1189d95f | 1221 | */ |
d62a17ae | 1222 | static void do_thread_cancel(struct thread_master *master) |
718e3744 | 1223 | { |
c284542b | 1224 | struct thread_list_head *list = NULL; |
d62a17ae | 1225 | struct thread **thread_array = NULL; |
1226 | struct thread *thread; | |
1227 | ||
1228 | struct cancel_req *cr; | |
1229 | struct listnode *ln; | |
1230 | for (ALL_LIST_ELEMENTS_RO(master->cancel_req, ln, cr)) { | |
d279ef57 | 1231 | /* |
a9318a32 MS |
1232 | * If this is an event object cancellation, search |
1233 | * through task lists deleting any tasks which have the | |
1234 | * specified argument - use this handy helper function. | |
d279ef57 | 1235 | */ |
d62a17ae | 1236 | if (cr->eventobj) { |
a9318a32 | 1237 | cancel_arg_helper(master, cr); |
d62a17ae | 1238 | continue; |
1239 | } | |
1240 | ||
d279ef57 DS |
1241 | /* |
1242 | * The pointer varies depending on whether the cancellation | |
1243 | * request was made asynchronously or not. If it was, we | |
1244 | * need to check whether the thread even exists anymore | |
1245 | * before cancelling it. | |
1246 | */ | |
d62a17ae | 1247 | thread = (cr->thread) ? cr->thread : *cr->threadref; |
1248 | ||
1249 | if (!thread) | |
1250 | continue; | |
1251 | ||
1252 | /* Determine the appropriate queue to cancel the thread from */ | |
1253 | switch (thread->type) { | |
1254 | case THREAD_READ: | |
a9318a32 | 1255 | thread_cancel_rw(master, thread->u.fd, POLLIN, -1); |
d62a17ae | 1256 | thread_array = master->read; |
1257 | break; | |
1258 | case THREAD_WRITE: | |
a9318a32 | 1259 | thread_cancel_rw(master, thread->u.fd, POLLOUT, -1); |
d62a17ae | 1260 | thread_array = master->write; |
1261 | break; | |
1262 | case THREAD_TIMER: | |
27d29ced | 1263 | thread_timer_list_del(&master->timer, thread); |
d62a17ae | 1264 | break; |
1265 | case THREAD_EVENT: | |
1266 | list = &master->event; | |
1267 | break; | |
1268 | case THREAD_READY: | |
1269 | list = &master->ready; | |
1270 | break; | |
1271 | default: | |
1272 | continue; | |
1273 | break; | |
1274 | } | |
1275 | ||
27d29ced | 1276 | if (list) { |
c284542b | 1277 | thread_list_del(list, thread); |
d62a17ae | 1278 | } else if (thread_array) { |
1279 | thread_array[thread->u.fd] = NULL; | |
d62a17ae | 1280 | } |
1281 | ||
1282 | if (thread->ref) | |
1283 | *thread->ref = NULL; | |
1284 | ||
1285 | thread_add_unuse(thread->master, thread); | |
1286 | } | |
1287 | ||
1288 | /* Delete and free all cancellation requests */ | |
41b21bfa MS |
1289 | if (master->cancel_req) |
1290 | list_delete_all_node(master->cancel_req); | |
d62a17ae | 1291 | |
1292 | /* Wake up any threads which may be blocked in thread_cancel_async() */ | |
1293 | master->canceled = true; | |
1294 | pthread_cond_broadcast(&master->cancel_cond); | |
718e3744 | 1295 | } |
1296 | ||
a9318a32 MS |
1297 | /* |
1298 | * Helper function used for multiple flavors of arg-based cancellation. | |
1299 | */ | |
1300 | static void cancel_event_helper(struct thread_master *m, void *arg, int flags) | |
1301 | { | |
1302 | struct cancel_req *cr; | |
1303 | ||
1304 | assert(m->owner == pthread_self()); | |
1305 | ||
1306 | /* Only worth anything if caller supplies an arg. */ | |
1307 | if (arg == NULL) | |
1308 | return; | |
1309 | ||
1310 | cr = XCALLOC(MTYPE_TMP, sizeof(struct cancel_req)); | |
1311 | ||
1312 | cr->flags = flags; | |
1313 | ||
1314 | frr_with_mutex(&m->mtx) { | |
1315 | cr->eventobj = arg; | |
1316 | listnode_add(m->cancel_req, cr); | |
1317 | do_thread_cancel(m); | |
1318 | } | |
1319 | } | |
1320 | ||
63ccb9cb QY |
1321 | /** |
1322 | * Cancel any events which have the specified argument. | |
1323 | * | |
1324 | * MT-Unsafe | |
1325 | * | |
1326 | * @param m the thread_master to cancel from | |
1327 | * @param arg the argument passed when creating the event | |
1328 | */ | |
d62a17ae | 1329 | void thread_cancel_event(struct thread_master *master, void *arg) |
718e3744 | 1330 | { |
a9318a32 MS |
1331 | cancel_event_helper(master, arg, 0); |
1332 | } | |
d62a17ae | 1333 | |
a9318a32 MS |
1334 | /* |
1335 | * Cancel ready tasks with an arg matching 'arg' | |
1336 | * | |
1337 | * MT-Unsafe | |
1338 | * | |
1339 | * @param m the thread_master to cancel from | |
1340 | * @param arg the argument passed when creating the event | |
1341 | */ | |
1342 | void thread_cancel_event_ready(struct thread_master *m, void *arg) | |
1343 | { | |
1344 | ||
1345 | /* Only cancel ready/event tasks */ | |
1346 | cancel_event_helper(m, arg, THREAD_CANCEL_FLAG_READY); | |
63ccb9cb | 1347 | } |
1189d95f | 1348 | |
63ccb9cb QY |
1349 | /** |
1350 | * Cancel a specific task. | |
1351 | * | |
1352 | * MT-Unsafe | |
1353 | * | |
1354 | * @param thread task to cancel | |
1355 | */ | |
b3d6bc6e | 1356 | void thread_cancel(struct thread **thread) |
63ccb9cb | 1357 | { |
b3d6bc6e MS |
1358 | struct thread_master *master; |
1359 | ||
1360 | if (thread == NULL || *thread == NULL) | |
1361 | return; | |
1362 | ||
1363 | master = (*thread)->master; | |
d62a17ae | 1364 | |
6c3aa850 DL |
1365 | frrtrace(9, frr_libfrr, thread_cancel, master, |
1366 | (*thread)->xref->funcname, (*thread)->xref->xref.file, | |
1367 | (*thread)->xref->xref.line, NULL, (*thread)->u.fd, | |
b4d6e855 | 1368 | (*thread)->u.val, (*thread)->arg, (*thread)->u.sands.tv_sec); |
abf96a87 | 1369 | |
6ed04aa2 DS |
1370 | assert(master->owner == pthread_self()); |
1371 | ||
00dffa8c | 1372 | frr_with_mutex(&master->mtx) { |
d62a17ae | 1373 | struct cancel_req *cr = |
1374 | XCALLOC(MTYPE_TMP, sizeof(struct cancel_req)); | |
b3d6bc6e | 1375 | cr->thread = *thread; |
6ed04aa2 DS |
1376 | listnode_add(master->cancel_req, cr); |
1377 | do_thread_cancel(master); | |
d62a17ae | 1378 | } |
b3d6bc6e MS |
1379 | |
1380 | *thread = NULL; | |
63ccb9cb | 1381 | } |
1189d95f | 1382 | |
63ccb9cb QY |
1383 | /** |
1384 | * Asynchronous cancellation. | |
1385 | * | |
8797240e QY |
1386 | * Called with either a struct thread ** or void * to an event argument, |
1387 | * this function posts the correct cancellation request and blocks until it is | |
1388 | * serviced. | |
63ccb9cb QY |
1389 | * |
1390 | * If the thread is currently running, execution blocks until it completes. | |
1391 | * | |
8797240e QY |
1392 | * The last two parameters are mutually exclusive, i.e. if you pass one the |
1393 | * other must be NULL. | |
1394 | * | |
1395 | * When the cancellation procedure executes on the target thread_master, the | |
1396 | * thread * provided is checked for nullity. If it is null, the thread is | |
1397 | * assumed to no longer exist and the cancellation request is a no-op. Thus | |
1398 | * users of this API must pass a back-reference when scheduling the original | |
1399 | * task. | |
1400 | * | |
63ccb9cb QY |
1401 | * MT-Safe |
1402 | * | |
8797240e QY |
1403 | * @param master the thread master with the relevant event / task |
1404 | * @param thread pointer to thread to cancel | |
1405 | * @param eventobj the event | |
63ccb9cb | 1406 | */ |
d62a17ae | 1407 | void thread_cancel_async(struct thread_master *master, struct thread **thread, |
1408 | void *eventobj) | |
63ccb9cb | 1409 | { |
d62a17ae | 1410 | assert(!(thread && eventobj) && (thread || eventobj)); |
abf96a87 QY |
1411 | |
1412 | if (thread && *thread) | |
c7bb4f00 | 1413 | frrtrace(9, frr_libfrr, thread_cancel_async, master, |
6c3aa850 DL |
1414 | (*thread)->xref->funcname, (*thread)->xref->xref.file, |
1415 | (*thread)->xref->xref.line, NULL, (*thread)->u.fd, | |
c7bb4f00 QY |
1416 | (*thread)->u.val, (*thread)->arg, |
1417 | (*thread)->u.sands.tv_sec); | |
abf96a87 | 1418 | else |
c7bb4f00 QY |
1419 | frrtrace(9, frr_libfrr, thread_cancel_async, master, NULL, NULL, |
1420 | 0, NULL, 0, 0, eventobj, 0); | |
abf96a87 | 1421 | |
d62a17ae | 1422 | assert(master->owner != pthread_self()); |
1423 | ||
00dffa8c | 1424 | frr_with_mutex(&master->mtx) { |
d62a17ae | 1425 | master->canceled = false; |
1426 | ||
1427 | if (thread) { | |
1428 | struct cancel_req *cr = | |
1429 | XCALLOC(MTYPE_TMP, sizeof(struct cancel_req)); | |
1430 | cr->threadref = thread; | |
1431 | listnode_add(master->cancel_req, cr); | |
1432 | } else if (eventobj) { | |
1433 | struct cancel_req *cr = | |
1434 | XCALLOC(MTYPE_TMP, sizeof(struct cancel_req)); | |
1435 | cr->eventobj = eventobj; | |
1436 | listnode_add(master->cancel_req, cr); | |
1437 | } | |
1438 | AWAKEN(master); | |
1439 | ||
1440 | while (!master->canceled) | |
1441 | pthread_cond_wait(&master->cancel_cond, &master->mtx); | |
1442 | } | |
50478845 MS |
1443 | |
1444 | if (thread) | |
1445 | *thread = NULL; | |
718e3744 | 1446 | } |
63ccb9cb | 1447 | /* ------------------------------------------------------------------------- */ |
718e3744 | 1448 | |
27d29ced | 1449 | static struct timeval *thread_timer_wait(struct thread_timer_list_head *timers, |
d62a17ae | 1450 | struct timeval *timer_val) |
718e3744 | 1451 | { |
27d29ced DL |
1452 | if (!thread_timer_list_count(timers)) |
1453 | return NULL; | |
1454 | ||
1455 | struct thread *next_timer = thread_timer_list_first(timers); | |
1456 | monotime_until(&next_timer->u.sands, timer_val); | |
1457 | return timer_val; | |
718e3744 | 1458 | } |
718e3744 | 1459 | |
d62a17ae | 1460 | static struct thread *thread_run(struct thread_master *m, struct thread *thread, |
1461 | struct thread *fetch) | |
718e3744 | 1462 | { |
d62a17ae | 1463 | *fetch = *thread; |
1464 | thread_add_unuse(m, thread); | |
1465 | return fetch; | |
718e3744 | 1466 | } |
1467 | ||
d62a17ae | 1468 | static int thread_process_io_helper(struct thread_master *m, |
45f3d590 DS |
1469 | struct thread *thread, short state, |
1470 | short actual_state, int pos) | |
5d4ccd4e | 1471 | { |
d62a17ae | 1472 | struct thread **thread_array; |
1473 | ||
45f3d590 DS |
1474 | /* |
1475 | * poll() clears the .events field, but the pollfd array we | |
1476 | * pass to poll() is a copy of the one used to schedule threads. | |
1477 | * We need to synchronize state between the two here by applying | |
1478 | * the same changes poll() made on the copy of the "real" pollfd | |
1479 | * array. | |
1480 | * | |
1481 | * This cleans up a possible infinite loop where we refuse | |
1482 | * to respond to a poll event but poll is insistent that | |
1483 | * we should. | |
1484 | */ | |
1485 | m->handler.pfds[pos].events &= ~(state); | |
1486 | ||
1487 | if (!thread) { | |
1488 | if ((actual_state & (POLLHUP|POLLIN)) != POLLHUP) | |
1489 | flog_err(EC_LIB_NO_THREAD, | |
1d5453d6 | 1490 | "Attempting to process an I/O event but for fd: %d(%d) no thread to handle this!", |
45f3d590 | 1491 | m->handler.pfds[pos].fd, actual_state); |
d62a17ae | 1492 | return 0; |
45f3d590 | 1493 | } |
d62a17ae | 1494 | |
1495 | if (thread->type == THREAD_READ) | |
1496 | thread_array = m->read; | |
1497 | else | |
1498 | thread_array = m->write; | |
1499 | ||
1500 | thread_array[thread->u.fd] = NULL; | |
c284542b | 1501 | thread_list_add_tail(&m->ready, thread); |
d62a17ae | 1502 | thread->type = THREAD_READY; |
45f3d590 | 1503 | |
d62a17ae | 1504 | return 1; |
5d4ccd4e DS |
1505 | } |
1506 | ||
8797240e QY |
1507 | /** |
1508 | * Process I/O events. | |
1509 | * | |
1510 | * Walks through file descriptor array looking for those pollfds whose .revents | |
1511 | * field has something interesting. Deletes any invalid file descriptors. | |
1512 | * | |
1513 | * @param m the thread master | |
1514 | * @param num the number of active file descriptors (return value of poll()) | |
1515 | */ | |
d62a17ae | 1516 | static void thread_process_io(struct thread_master *m, unsigned int num) |
0a95a0d0 | 1517 | { |
d62a17ae | 1518 | unsigned int ready = 0; |
1519 | struct pollfd *pfds = m->handler.copy; | |
1520 | ||
1521 | for (nfds_t i = 0; i < m->handler.copycount && ready < num; ++i) { | |
1522 | /* no event for current fd? immediately continue */ | |
1523 | if (pfds[i].revents == 0) | |
1524 | continue; | |
1525 | ||
1526 | ready++; | |
1527 | ||
d279ef57 DS |
1528 | /* |
1529 | * Unless someone has called thread_cancel from another | |
1530 | * pthread, the only thing that could have changed in | |
1531 | * m->handler.pfds while we were asleep is the .events | |
1532 | * field in a given pollfd. Barring thread_cancel() that | |
1533 | * value should be a superset of the values we have in our | |
1534 | * copy, so there's no need to update it. Similarily, | |
1535 | * barring deletion, the fd should still be a valid index | |
1536 | * into the master's pfds. | |
d142453d DS |
1537 | * |
1538 | * We are including POLLERR here to do a READ event | |
1539 | * this is because the read should fail and the | |
1540 | * read function should handle it appropriately | |
d279ef57 | 1541 | */ |
d142453d | 1542 | if (pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) { |
d62a17ae | 1543 | thread_process_io_helper(m, m->read[pfds[i].fd], POLLIN, |
45f3d590 DS |
1544 | pfds[i].revents, i); |
1545 | } | |
d62a17ae | 1546 | if (pfds[i].revents & POLLOUT) |
1547 | thread_process_io_helper(m, m->write[pfds[i].fd], | |
45f3d590 | 1548 | POLLOUT, pfds[i].revents, i); |
d62a17ae | 1549 | |
1550 | /* if one of our file descriptors is garbage, remove the same | |
1551 | * from | |
1552 | * both pfds + update sizes and index */ | |
1553 | if (pfds[i].revents & POLLNVAL) { | |
1554 | memmove(m->handler.pfds + i, m->handler.pfds + i + 1, | |
1555 | (m->handler.pfdcount - i - 1) | |
1556 | * sizeof(struct pollfd)); | |
1557 | m->handler.pfdcount--; | |
e985cda0 S |
1558 | m->handler.pfds[m->handler.pfdcount].fd = 0; |
1559 | m->handler.pfds[m->handler.pfdcount].events = 0; | |
d62a17ae | 1560 | |
1561 | memmove(pfds + i, pfds + i + 1, | |
1562 | (m->handler.copycount - i - 1) | |
1563 | * sizeof(struct pollfd)); | |
1564 | m->handler.copycount--; | |
e985cda0 S |
1565 | m->handler.copy[m->handler.copycount].fd = 0; |
1566 | m->handler.copy[m->handler.copycount].events = 0; | |
d62a17ae | 1567 | |
1568 | i--; | |
1569 | } | |
1570 | } | |
718e3744 | 1571 | } |
1572 | ||
8b70d0b0 | 1573 | /* Add all timers that have popped to the ready list. */ |
e7d9e44b | 1574 | static unsigned int thread_process_timers(struct thread_master *m, |
d62a17ae | 1575 | struct timeval *timenow) |
a48b4e6d | 1576 | { |
d62a17ae | 1577 | struct thread *thread; |
1578 | unsigned int ready = 0; | |
1579 | ||
e7d9e44b | 1580 | while ((thread = thread_timer_list_first(&m->timer))) { |
d62a17ae | 1581 | if (timercmp(timenow, &thread->u.sands, <)) |
e7d9e44b MS |
1582 | break; |
1583 | thread_timer_list_pop(&m->timer); | |
d62a17ae | 1584 | thread->type = THREAD_READY; |
e7d9e44b | 1585 | thread_list_add_tail(&m->ready, thread); |
d62a17ae | 1586 | ready++; |
1587 | } | |
e7d9e44b | 1588 | |
d62a17ae | 1589 | return ready; |
a48b4e6d | 1590 | } |
1591 | ||
2613abe6 | 1592 | /* process a list en masse, e.g. for event thread lists */ |
c284542b | 1593 | static unsigned int thread_process(struct thread_list_head *list) |
2613abe6 | 1594 | { |
d62a17ae | 1595 | struct thread *thread; |
d62a17ae | 1596 | unsigned int ready = 0; |
1597 | ||
c284542b | 1598 | while ((thread = thread_list_pop(list))) { |
d62a17ae | 1599 | thread->type = THREAD_READY; |
c284542b | 1600 | thread_list_add_tail(&thread->master->ready, thread); |
d62a17ae | 1601 | ready++; |
1602 | } | |
1603 | return ready; | |
2613abe6 PJ |
1604 | } |
1605 | ||
1606 | ||
718e3744 | 1607 | /* Fetch next ready thread. */ |
d62a17ae | 1608 | struct thread *thread_fetch(struct thread_master *m, struct thread *fetch) |
718e3744 | 1609 | { |
d62a17ae | 1610 | struct thread *thread = NULL; |
1611 | struct timeval now; | |
1612 | struct timeval zerotime = {0, 0}; | |
1613 | struct timeval tv; | |
1614 | struct timeval *tw = NULL; | |
d81ca9a3 | 1615 | bool eintr_p = false; |
d62a17ae | 1616 | int num = 0; |
1617 | ||
1618 | do { | |
1619 | /* Handle signals if any */ | |
1620 | if (m->handle_signals) | |
1621 | quagga_sigevent_process(); | |
1622 | ||
1623 | pthread_mutex_lock(&m->mtx); | |
1624 | ||
1625 | /* Process any pending cancellation requests */ | |
1626 | do_thread_cancel(m); | |
1627 | ||
e3c9529e QY |
1628 | /* |
1629 | * Attempt to flush ready queue before going into poll(). | |
1630 | * This is performance-critical. Think twice before modifying. | |
1631 | */ | |
c284542b | 1632 | if ((thread = thread_list_pop(&m->ready))) { |
e3c9529e QY |
1633 | fetch = thread_run(m, thread, fetch); |
1634 | if (fetch->ref) | |
1635 | *fetch->ref = NULL; | |
1636 | pthread_mutex_unlock(&m->mtx); | |
5e822957 DS |
1637 | if (!m->ready_run_loop) |
1638 | GETRUSAGE(&m->last_getrusage); | |
1639 | m->ready_run_loop = true; | |
e3c9529e QY |
1640 | break; |
1641 | } | |
1642 | ||
5e822957 | 1643 | m->ready_run_loop = false; |
e3c9529e QY |
1644 | /* otherwise, tick through scheduling sequence */ |
1645 | ||
bca37d17 QY |
1646 | /* |
1647 | * Post events to ready queue. This must come before the | |
1648 | * following block since events should occur immediately | |
1649 | */ | |
d62a17ae | 1650 | thread_process(&m->event); |
1651 | ||
bca37d17 QY |
1652 | /* |
1653 | * If there are no tasks on the ready queue, we will poll() | |
1654 | * until a timer expires or we receive I/O, whichever comes | |
1655 | * first. The strategy for doing this is: | |
d62a17ae | 1656 | * |
1657 | * - If there are events pending, set the poll() timeout to zero | |
1658 | * - If there are no events pending, but there are timers | |
d279ef57 DS |
1659 | * pending, set the timeout to the smallest remaining time on |
1660 | * any timer. | |
d62a17ae | 1661 | * - If there are neither timers nor events pending, but there |
d279ef57 | 1662 | * are file descriptors pending, block indefinitely in poll() |
d62a17ae | 1663 | * - If nothing is pending, it's time for the application to die |
1664 | * | |
1665 | * In every case except the last, we need to hit poll() at least | |
bca37d17 QY |
1666 | * once per loop to avoid starvation by events |
1667 | */ | |
c284542b | 1668 | if (!thread_list_count(&m->ready)) |
27d29ced | 1669 | tw = thread_timer_wait(&m->timer, &tv); |
d62a17ae | 1670 | |
c284542b DL |
1671 | if (thread_list_count(&m->ready) || |
1672 | (tw && !timercmp(tw, &zerotime, >))) | |
d62a17ae | 1673 | tw = &zerotime; |
1674 | ||
1675 | if (!tw && m->handler.pfdcount == 0) { /* die */ | |
1676 | pthread_mutex_unlock(&m->mtx); | |
1677 | fetch = NULL; | |
1678 | break; | |
1679 | } | |
1680 | ||
bca37d17 QY |
1681 | /* |
1682 | * Copy pollfd array + # active pollfds in it. Not necessary to | |
1683 | * copy the array size as this is fixed. | |
1684 | */ | |
d62a17ae | 1685 | m->handler.copycount = m->handler.pfdcount; |
1686 | memcpy(m->handler.copy, m->handler.pfds, | |
1687 | m->handler.copycount * sizeof(struct pollfd)); | |
1688 | ||
e3c9529e QY |
1689 | pthread_mutex_unlock(&m->mtx); |
1690 | { | |
d81ca9a3 MS |
1691 | eintr_p = false; |
1692 | num = fd_poll(m, tw, &eintr_p); | |
e3c9529e QY |
1693 | } |
1694 | pthread_mutex_lock(&m->mtx); | |
d764d2cc | 1695 | |
e3c9529e QY |
1696 | /* Handle any errors received in poll() */ |
1697 | if (num < 0) { | |
d81ca9a3 | 1698 | if (eintr_p) { |
d62a17ae | 1699 | pthread_mutex_unlock(&m->mtx); |
e3c9529e QY |
1700 | /* loop around to signal handler */ |
1701 | continue; | |
d62a17ae | 1702 | } |
1703 | ||
e3c9529e | 1704 | /* else die */ |
450971aa | 1705 | flog_err(EC_LIB_SYSTEM_CALL, "poll() error: %s", |
9ef9495e | 1706 | safe_strerror(errno)); |
e3c9529e QY |
1707 | pthread_mutex_unlock(&m->mtx); |
1708 | fetch = NULL; | |
1709 | break; | |
bca37d17 | 1710 | } |
d62a17ae | 1711 | |
1712 | /* Post timers to ready queue. */ | |
1713 | monotime(&now); | |
e7d9e44b | 1714 | thread_process_timers(m, &now); |
d62a17ae | 1715 | |
1716 | /* Post I/O to ready queue. */ | |
1717 | if (num > 0) | |
1718 | thread_process_io(m, num); | |
1719 | ||
d62a17ae | 1720 | pthread_mutex_unlock(&m->mtx); |
1721 | ||
1722 | } while (!thread && m->spin); | |
1723 | ||
1724 | return fetch; | |
718e3744 | 1725 | } |
1726 | ||
d62a17ae | 1727 | static unsigned long timeval_elapsed(struct timeval a, struct timeval b) |
62f44022 | 1728 | { |
d62a17ae | 1729 | return (((a.tv_sec - b.tv_sec) * TIMER_SECOND_MICRO) |
1730 | + (a.tv_usec - b.tv_usec)); | |
62f44022 QY |
1731 | } |
1732 | ||
d62a17ae | 1733 | unsigned long thread_consumed_time(RUSAGE_T *now, RUSAGE_T *start, |
1734 | unsigned long *cputime) | |
718e3744 | 1735 | { |
d62a17ae | 1736 | /* This is 'user + sys' time. */ |
1737 | *cputime = timeval_elapsed(now->cpu.ru_utime, start->cpu.ru_utime) | |
1738 | + timeval_elapsed(now->cpu.ru_stime, start->cpu.ru_stime); | |
1739 | return timeval_elapsed(now->real, start->real); | |
8b70d0b0 | 1740 | } |
1741 | ||
50596be0 DS |
1742 | /* We should aim to yield after yield milliseconds, which defaults |
1743 | to THREAD_YIELD_TIME_SLOT . | |
8b70d0b0 | 1744 | Note: we are using real (wall clock) time for this calculation. |
1745 | It could be argued that CPU time may make more sense in certain | |
1746 | contexts. The things to consider are whether the thread may have | |
1747 | blocked (in which case wall time increases, but CPU time does not), | |
1748 | or whether the system is heavily loaded with other processes competing | |
d62a17ae | 1749 | for CPU time. On balance, wall clock time seems to make sense. |
8b70d0b0 | 1750 | Plus it has the added benefit that gettimeofday should be faster |
1751 | than calling getrusage. */ | |
d62a17ae | 1752 | int thread_should_yield(struct thread *thread) |
718e3744 | 1753 | { |
d62a17ae | 1754 | int result; |
00dffa8c | 1755 | frr_with_mutex(&thread->mtx) { |
d62a17ae | 1756 | result = monotime_since(&thread->real, NULL) |
1757 | > (int64_t)thread->yield; | |
1758 | } | |
d62a17ae | 1759 | return result; |
50596be0 DS |
1760 | } |
1761 | ||
d62a17ae | 1762 | void thread_set_yield_time(struct thread *thread, unsigned long yield_time) |
50596be0 | 1763 | { |
00dffa8c | 1764 | frr_with_mutex(&thread->mtx) { |
d62a17ae | 1765 | thread->yield = yield_time; |
1766 | } | |
718e3744 | 1767 | } |
1768 | ||
d62a17ae | 1769 | void thread_getrusage(RUSAGE_T *r) |
db9c0df9 | 1770 | { |
231db9a6 DS |
1771 | #if defined RUSAGE_THREAD |
1772 | #define FRR_RUSAGE RUSAGE_THREAD | |
1773 | #else | |
1774 | #define FRR_RUSAGE RUSAGE_SELF | |
1775 | #endif | |
d62a17ae | 1776 | monotime(&r->real); |
f75e802d | 1777 | #ifndef EXCLUDE_CPU_TIME |
231db9a6 | 1778 | getrusage(FRR_RUSAGE, &(r->cpu)); |
f75e802d | 1779 | #endif |
db9c0df9 PJ |
1780 | } |
1781 | ||
fbcac826 QY |
1782 | /* |
1783 | * Call a thread. | |
1784 | * | |
1785 | * This function will atomically update the thread's usage history. At present | |
1786 | * this is the only spot where usage history is written. Nevertheless the code | |
1787 | * has been written such that the introduction of writers in the future should | |
1788 | * not need to update it provided the writers atomically perform only the | |
1789 | * operations done here, i.e. updating the total and maximum times. In | |
1790 | * particular, the maximum real and cpu times must be monotonically increasing | |
1791 | * or this code is not correct. | |
1792 | */ | |
d62a17ae | 1793 | void thread_call(struct thread *thread) |
718e3744 | 1794 | { |
f75e802d | 1795 | #ifndef EXCLUDE_CPU_TIME |
fbcac826 QY |
1796 | _Atomic unsigned long realtime, cputime; |
1797 | unsigned long exp; | |
1798 | unsigned long helper; | |
f75e802d | 1799 | #endif |
d62a17ae | 1800 | RUSAGE_T before, after; |
cc8b13a0 | 1801 | |
5e822957 DS |
1802 | if (thread->master->ready_run_loop) |
1803 | before = thread->master->last_getrusage; | |
1804 | else | |
1805 | GETRUSAGE(&before); | |
1806 | ||
d62a17ae | 1807 | thread->real = before.real; |
718e3744 | 1808 | |
6c3aa850 DL |
1809 | frrtrace(9, frr_libfrr, thread_call, thread->master, |
1810 | thread->xref->funcname, thread->xref->xref.file, | |
1811 | thread->xref->xref.line, NULL, thread->u.fd, | |
c7bb4f00 | 1812 | thread->u.val, thread->arg, thread->u.sands.tv_sec); |
abf96a87 | 1813 | |
d62a17ae | 1814 | pthread_setspecific(thread_current, thread); |
1815 | (*thread->func)(thread); | |
1816 | pthread_setspecific(thread_current, NULL); | |
718e3744 | 1817 | |
d62a17ae | 1818 | GETRUSAGE(&after); |
5e822957 | 1819 | thread->master->last_getrusage = after; |
718e3744 | 1820 | |
f75e802d | 1821 | #ifndef EXCLUDE_CPU_TIME |
fbcac826 QY |
1822 | realtime = thread_consumed_time(&after, &before, &helper); |
1823 | cputime = helper; | |
1824 | ||
1825 | /* update realtime */ | |
1826 | atomic_fetch_add_explicit(&thread->hist->real.total, realtime, | |
1827 | memory_order_seq_cst); | |
1828 | exp = atomic_load_explicit(&thread->hist->real.max, | |
1829 | memory_order_seq_cst); | |
1830 | while (exp < realtime | |
1831 | && !atomic_compare_exchange_weak_explicit( | |
1832 | &thread->hist->real.max, &exp, realtime, | |
1833 | memory_order_seq_cst, memory_order_seq_cst)) | |
1834 | ; | |
1835 | ||
1836 | /* update cputime */ | |
1837 | atomic_fetch_add_explicit(&thread->hist->cpu.total, cputime, | |
1838 | memory_order_seq_cst); | |
1839 | exp = atomic_load_explicit(&thread->hist->cpu.max, | |
1840 | memory_order_seq_cst); | |
1841 | while (exp < cputime | |
1842 | && !atomic_compare_exchange_weak_explicit( | |
1843 | &thread->hist->cpu.max, &exp, cputime, | |
1844 | memory_order_seq_cst, memory_order_seq_cst)) | |
1845 | ; | |
1846 | ||
1847 | atomic_fetch_add_explicit(&thread->hist->total_calls, 1, | |
1848 | memory_order_seq_cst); | |
1849 | atomic_fetch_or_explicit(&thread->hist->types, 1 << thread->add_type, | |
1850 | memory_order_seq_cst); | |
718e3744 | 1851 | |
924b9229 | 1852 | #ifdef CONSUMED_TIME_CHECK |
039d547f | 1853 | if (cputime > CONSUMED_TIME_CHECK) { |
d62a17ae | 1854 | /* |
039d547f DS |
1855 | * We have a CPU Hog on our hands. The time FRR |
1856 | * has spent doing actual work ( not sleeping ) | |
1857 | * is greater than 5 seconds. | |
d62a17ae | 1858 | * Whinge about it now, so we're aware this is yet another task |
1859 | * to fix. | |
1860 | */ | |
9ef9495e | 1861 | flog_warn( |
039d547f DS |
1862 | EC_LIB_SLOW_THREAD_CPU, |
1863 | "CPU HOG: task %s (%lx) ran for %lums (cpu time %lums)", | |
1864 | thread->xref->funcname, (unsigned long)thread->func, | |
1865 | realtime / 1000, cputime / 1000); | |
1866 | } else if (realtime > CONSUMED_TIME_CHECK) { | |
1867 | /* | |
1868 | * The runtime for a task is greater than 5 seconds, but | |
1869 | * the cpu time is under 5 seconds. Let's whine | |
1870 | * about this because this could imply some sort of | |
1871 | * scheduling issue. | |
1872 | */ | |
1873 | flog_warn( | |
1874 | EC_LIB_SLOW_THREAD_WALL, | |
1875 | "STARVATION: task %s (%lx) ran for %lums (cpu time %lums)", | |
60a3efec | 1876 | thread->xref->funcname, (unsigned long)thread->func, |
d62a17ae | 1877 | realtime / 1000, cputime / 1000); |
1878 | } | |
924b9229 | 1879 | #endif /* CONSUMED_TIME_CHECK */ |
f75e802d | 1880 | #endif /* Exclude CPU Time */ |
718e3744 | 1881 | } |
1882 | ||
1883 | /* Execute thread */ | |
60a3efec DL |
1884 | void _thread_execute(const struct xref_threadsched *xref, |
1885 | struct thread_master *m, int (*func)(struct thread *), | |
1886 | void *arg, int val) | |
718e3744 | 1887 | { |
c4345fbf | 1888 | struct thread *thread; |
718e3744 | 1889 | |
c4345fbf | 1890 | /* Get or allocate new thread to execute. */ |
00dffa8c | 1891 | frr_with_mutex(&m->mtx) { |
60a3efec | 1892 | thread = thread_get(m, THREAD_EVENT, func, arg, xref); |
9c7753e4 | 1893 | |
c4345fbf | 1894 | /* Set its event value. */ |
00dffa8c | 1895 | frr_with_mutex(&thread->mtx) { |
c4345fbf RZ |
1896 | thread->add_type = THREAD_EXECUTE; |
1897 | thread->u.val = val; | |
1898 | thread->ref = &thread; | |
1899 | } | |
c4345fbf | 1900 | } |
f7c62e11 | 1901 | |
c4345fbf RZ |
1902 | /* Execute thread doing all accounting. */ |
1903 | thread_call(thread); | |
9c7753e4 | 1904 | |
c4345fbf RZ |
1905 | /* Give back or free thread. */ |
1906 | thread_add_unuse(m, thread); | |
718e3744 | 1907 | } |
1543c387 MS |
1908 | |
1909 | /* Debug signal mask - if 'sigs' is NULL, use current effective mask. */ | |
1910 | void debug_signals(const sigset_t *sigs) | |
1911 | { | |
1912 | int i, found; | |
1913 | sigset_t tmpsigs; | |
1914 | char buf[300]; | |
1915 | ||
1916 | /* | |
1917 | * We're only looking at the non-realtime signals here, so we need | |
1918 | * some limit value. Platform differences mean at some point we just | |
1919 | * need to pick a reasonable value. | |
1920 | */ | |
1921 | #if defined SIGRTMIN | |
1922 | # define LAST_SIGNAL SIGRTMIN | |
1923 | #else | |
1924 | # define LAST_SIGNAL 32 | |
1925 | #endif | |
1926 | ||
1927 | ||
1928 | if (sigs == NULL) { | |
1929 | sigemptyset(&tmpsigs); | |
1930 | pthread_sigmask(SIG_BLOCK, NULL, &tmpsigs); | |
1931 | sigs = &tmpsigs; | |
1932 | } | |
1933 | ||
1934 | found = 0; | |
1935 | buf[0] = '\0'; | |
1936 | ||
1937 | for (i = 0; i < LAST_SIGNAL; i++) { | |
1938 | char tmp[20]; | |
1939 | ||
1940 | if (sigismember(sigs, i) > 0) { | |
1941 | if (found > 0) | |
1942 | strlcat(buf, ",", sizeof(buf)); | |
1943 | snprintf(tmp, sizeof(tmp), "%d", i); | |
1944 | strlcat(buf, tmp, sizeof(buf)); | |
1945 | found++; | |
1946 | } | |
1947 | } | |
1948 | ||
1949 | if (found == 0) | |
1950 | snprintf(buf, sizeof(buf), "<none>"); | |
1951 | ||
1952 | zlog_debug("%s: %s", __func__, buf); | |
1953 | } |