]> git.proxmox.com Git - mirror_frr.git/blame - lib/thread.c
Merge pull request #10360 from opensourcerouting/clippy-rel-endian
[mirror_frr.git] / lib / thread.c
CommitLineData
718e3744 1/* Thread management routine
2 * Copyright (C) 1998, 2000 Kunihiro Ishiguro <kunihiro@zebra.org>
3 *
4 * This file is part of GNU Zebra.
5 *
6 * GNU Zebra is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2, or (at your option) any
9 * later version.
10 *
11 * GNU Zebra is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
896014f4
DL
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
718e3744 19 */
20
21/* #define DEBUG */
22
23#include <zebra.h>
308d14ae 24#include <sys/resource.h>
718e3744 25
26#include "thread.h"
27#include "memory.h"
3e41733f 28#include "frrcu.h"
718e3744 29#include "log.h"
e04ab74d 30#include "hash.h"
31#include "command.h"
05c447dd 32#include "sigevent.h"
3bf2673b 33#include "network.h"
bd74dc61 34#include "jhash.h"
fbcac826 35#include "frratomic.h"
00dffa8c 36#include "frr_pthread.h"
9ef9495e 37#include "lib_errors.h"
912d45a1 38#include "libfrr_trace.h"
1a9f340b 39#include "libfrr.h"
d6be5fb9 40
bf8d3d6a
DL
41DEFINE_MTYPE_STATIC(LIB, THREAD, "Thread");
42DEFINE_MTYPE_STATIC(LIB, THREAD_MASTER, "Thread master");
43DEFINE_MTYPE_STATIC(LIB, THREAD_POLL, "Thread Poll Info");
44DEFINE_MTYPE_STATIC(LIB, THREAD_STATS, "Thread stats");
4a1ab8e4 45
960b9a53 46DECLARE_LIST(thread_list, struct thread, threaditem);
c284542b 47
aea25d1e
MS
48struct cancel_req {
49 int flags;
50 struct thread *thread;
51 void *eventobj;
52 struct thread **threadref;
53};
54
55/* Flags for task cancellation */
56#define THREAD_CANCEL_FLAG_READY 0x01
57
27d29ced
DL
58static int thread_timer_cmp(const struct thread *a, const struct thread *b)
59{
60 if (a->u.sands.tv_sec < b->u.sands.tv_sec)
61 return -1;
62 if (a->u.sands.tv_sec > b->u.sands.tv_sec)
63 return 1;
64 if (a->u.sands.tv_usec < b->u.sands.tv_usec)
65 return -1;
66 if (a->u.sands.tv_usec > b->u.sands.tv_usec)
67 return 1;
68 return 0;
69}
70
960b9a53 71DECLARE_HEAP(thread_timer_list, struct thread, timeritem, thread_timer_cmp);
27d29ced 72
3b96b781
HT
73#if defined(__APPLE__)
74#include <mach/mach.h>
75#include <mach/mach_time.h>
76#endif
77
d62a17ae 78#define AWAKEN(m) \
79 do { \
2b64873d 80 const unsigned char wakebyte = 0x01; \
d62a17ae 81 write(m->io_pipe[1], &wakebyte, 1); \
82 } while (0);
3bf2673b 83
62f44022 84/* control variable for initializer */
c17faa4b 85static pthread_once_t init_once = PTHREAD_ONCE_INIT;
e0bebc7c 86pthread_key_t thread_current;
6b0655a2 87
c17faa4b 88static pthread_mutex_t masters_mtx = PTHREAD_MUTEX_INITIALIZER;
62f44022
QY
89static struct list *masters;
90
6655966d 91static void thread_free(struct thread_master *master, struct thread *thread);
6b0655a2 92
45f01188
DL
93#ifndef EXCLUDE_CPU_TIME
94#define EXCLUDE_CPU_TIME 0
95#endif
96#ifndef CONSUMED_TIME_CHECK
97#define CONSUMED_TIME_CHECK 0
98#endif
99
100bool cputime_enabled = !EXCLUDE_CPU_TIME;
101unsigned long cputime_threshold = CONSUMED_TIME_CHECK;
102unsigned long walltime_threshold = CONSUMED_TIME_CHECK;
103
62f44022 104/* CLI start ---------------------------------------------------------------- */
45f01188
DL
105#ifndef VTYSH_EXTRACT_PL
106#include "lib/thread_clippy.c"
107#endif
108
d8b87afe 109static unsigned int cpu_record_hash_key(const struct cpu_thread_history *a)
e04ab74d 110{
883cc51d 111 int size = sizeof(a->func);
bd74dc61
DS
112
113 return jhash(&a->func, size, 0);
e04ab74d 114}
115
74df8d6d 116static bool cpu_record_hash_cmp(const struct cpu_thread_history *a,
d62a17ae 117 const struct cpu_thread_history *b)
e04ab74d 118{
d62a17ae 119 return a->func == b->func;
e04ab74d 120}
121
d62a17ae 122static void *cpu_record_hash_alloc(struct cpu_thread_history *a)
e04ab74d 123{
d62a17ae 124 struct cpu_thread_history *new;
125 new = XCALLOC(MTYPE_THREAD_STATS, sizeof(struct cpu_thread_history));
126 new->func = a->func;
127 new->funcname = a->funcname;
128 return new;
e04ab74d 129}
130
d62a17ae 131static void cpu_record_hash_free(void *a)
228da428 132{
d62a17ae 133 struct cpu_thread_history *hist = a;
134
135 XFREE(MTYPE_THREAD_STATS, hist);
228da428
CC
136}
137
d62a17ae 138static void vty_out_cpu_thread_history(struct vty *vty,
139 struct cpu_thread_history *a)
e04ab74d 140{
9b8e01ca 141 vty_out(vty, "%5zu %10zu.%03zu %9zu %8zu %9zu %8zu %9zu %9zu %9zu",
72327cf3 142 a->total_active, a->cpu.total / 1000, a->cpu.total % 1000,
9b8e01ca
DS
143 a->total_calls, (a->cpu.total / a->total_calls), a->cpu.max,
144 (a->real.total / a->total_calls), a->real.max,
145 a->total_cpu_warn, a->total_wall_warn);
146 vty_out(vty, " %c%c%c%c%c %s\n",
d62a17ae 147 a->types & (1 << THREAD_READ) ? 'R' : ' ',
148 a->types & (1 << THREAD_WRITE) ? 'W' : ' ',
149 a->types & (1 << THREAD_TIMER) ? 'T' : ' ',
150 a->types & (1 << THREAD_EVENT) ? 'E' : ' ',
151 a->types & (1 << THREAD_EXECUTE) ? 'X' : ' ', a->funcname);
e04ab74d 152}
153
e3b78da8 154static void cpu_record_hash_print(struct hash_bucket *bucket, void *args[])
e04ab74d 155{
d62a17ae 156 struct cpu_thread_history *totals = args[0];
fbcac826 157 struct cpu_thread_history copy;
d62a17ae 158 struct vty *vty = args[1];
fbcac826 159 uint8_t *filter = args[2];
d62a17ae 160
161 struct cpu_thread_history *a = bucket->data;
162
fbcac826
QY
163 copy.total_active =
164 atomic_load_explicit(&a->total_active, memory_order_seq_cst);
165 copy.total_calls =
166 atomic_load_explicit(&a->total_calls, memory_order_seq_cst);
9b8e01ca
DS
167 copy.total_cpu_warn =
168 atomic_load_explicit(&a->total_cpu_warn, memory_order_seq_cst);
169 copy.total_wall_warn =
170 atomic_load_explicit(&a->total_wall_warn, memory_order_seq_cst);
fbcac826
QY
171 copy.cpu.total =
172 atomic_load_explicit(&a->cpu.total, memory_order_seq_cst);
173 copy.cpu.max = atomic_load_explicit(&a->cpu.max, memory_order_seq_cst);
174 copy.real.total =
175 atomic_load_explicit(&a->real.total, memory_order_seq_cst);
176 copy.real.max =
177 atomic_load_explicit(&a->real.max, memory_order_seq_cst);
178 copy.types = atomic_load_explicit(&a->types, memory_order_seq_cst);
179 copy.funcname = a->funcname;
180
181 if (!(copy.types & *filter))
d62a17ae 182 return;
fbcac826
QY
183
184 vty_out_cpu_thread_history(vty, &copy);
185 totals->total_active += copy.total_active;
186 totals->total_calls += copy.total_calls;
9b8e01ca
DS
187 totals->total_cpu_warn += copy.total_cpu_warn;
188 totals->total_wall_warn += copy.total_wall_warn;
fbcac826
QY
189 totals->real.total += copy.real.total;
190 if (totals->real.max < copy.real.max)
191 totals->real.max = copy.real.max;
192 totals->cpu.total += copy.cpu.total;
193 if (totals->cpu.max < copy.cpu.max)
194 totals->cpu.max = copy.cpu.max;
e04ab74d 195}
196
fbcac826 197static void cpu_record_print(struct vty *vty, uint8_t filter)
e04ab74d 198{
d62a17ae 199 struct cpu_thread_history tmp;
200 void *args[3] = {&tmp, vty, &filter};
201 struct thread_master *m;
202 struct listnode *ln;
203
45f01188
DL
204 if (!cputime_enabled)
205 vty_out(vty,
206 "\n"
207 "Collecting CPU time statistics is currently disabled. Following statistics\n"
208 "will be zero or may display data from when collection was enabled. Use the\n"
209 " \"service cputime-stats\" command to start collecting data.\n"
210 "\nCounters and wallclock times are always maintained and should be accurate.\n");
211
0d6f7fd6 212 memset(&tmp, 0, sizeof(tmp));
d62a17ae 213 tmp.funcname = "TOTAL";
214 tmp.types = filter;
215
00dffa8c 216 frr_with_mutex(&masters_mtx) {
d62a17ae 217 for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
218 const char *name = m->name ? m->name : "main";
219
220 char underline[strlen(name) + 1];
221 memset(underline, '-', sizeof(underline));
4f113d60 222 underline[sizeof(underline) - 1] = '\0';
d62a17ae 223
224 vty_out(vty, "\n");
225 vty_out(vty, "Showing statistics for pthread %s\n",
226 name);
227 vty_out(vty, "-------------------------------%s\n",
228 underline);
84d951d0 229 vty_out(vty, "%30s %18s %18s\n", "",
d62a17ae 230 "CPU (user+system):", "Real (wall-clock):");
231 vty_out(vty,
232 "Active Runtime(ms) Invoked Avg uSec Max uSecs");
233 vty_out(vty, " Avg uSec Max uSecs");
9b8e01ca 234 vty_out(vty, " CPU_Warn Wall_Warn Type Thread\n");
d62a17ae 235
236 if (m->cpu_record->count)
237 hash_iterate(
238 m->cpu_record,
e3b78da8 239 (void (*)(struct hash_bucket *,
d62a17ae 240 void *))cpu_record_hash_print,
241 args);
242 else
243 vty_out(vty, "No data to display yet.\n");
244
245 vty_out(vty, "\n");
246 }
247 }
d62a17ae 248
249 vty_out(vty, "\n");
250 vty_out(vty, "Total thread statistics\n");
251 vty_out(vty, "-------------------------\n");
84d951d0 252 vty_out(vty, "%30s %18s %18s\n", "",
d62a17ae 253 "CPU (user+system):", "Real (wall-clock):");
254 vty_out(vty, "Active Runtime(ms) Invoked Avg uSec Max uSecs");
9b8e01ca 255 vty_out(vty, " Avg uSec Max uSecs CPU_Warn Wall_Warn");
d62a17ae 256 vty_out(vty, " Type Thread\n");
257
258 if (tmp.total_calls > 0)
259 vty_out_cpu_thread_history(vty, &tmp);
e04ab74d 260}
261
e3b78da8 262static void cpu_record_hash_clear(struct hash_bucket *bucket, void *args[])
e276eb82 263{
fbcac826 264 uint8_t *filter = args[0];
d62a17ae 265 struct hash *cpu_record = args[1];
266
267 struct cpu_thread_history *a = bucket->data;
62f44022 268
d62a17ae 269 if (!(a->types & *filter))
270 return;
f48f65d2 271
d62a17ae 272 hash_release(cpu_record, bucket->data);
e276eb82
PJ
273}
274
fbcac826 275static void cpu_record_clear(uint8_t filter)
e276eb82 276{
fbcac826 277 uint8_t *tmp = &filter;
d62a17ae 278 struct thread_master *m;
279 struct listnode *ln;
280
00dffa8c 281 frr_with_mutex(&masters_mtx) {
d62a17ae 282 for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
00dffa8c 283 frr_with_mutex(&m->mtx) {
d62a17ae 284 void *args[2] = {tmp, m->cpu_record};
285 hash_iterate(
286 m->cpu_record,
e3b78da8 287 (void (*)(struct hash_bucket *,
d62a17ae 288 void *))cpu_record_hash_clear,
289 args);
290 }
d62a17ae 291 }
292 }
62f44022
QY
293}
294
fbcac826 295static uint8_t parse_filter(const char *filterstr)
62f44022 296{
d62a17ae 297 int i = 0;
298 int filter = 0;
299
300 while (filterstr[i] != '\0') {
301 switch (filterstr[i]) {
302 case 'r':
303 case 'R':
304 filter |= (1 << THREAD_READ);
305 break;
306 case 'w':
307 case 'W':
308 filter |= (1 << THREAD_WRITE);
309 break;
310 case 't':
311 case 'T':
312 filter |= (1 << THREAD_TIMER);
313 break;
314 case 'e':
315 case 'E':
316 filter |= (1 << THREAD_EVENT);
317 break;
318 case 'x':
319 case 'X':
320 filter |= (1 << THREAD_EXECUTE);
321 break;
322 default:
323 break;
324 }
325 ++i;
326 }
327 return filter;
62f44022
QY
328}
329
ee4dcee8
DL
330DEFUN_NOSH (show_thread_cpu,
331 show_thread_cpu_cmd,
332 "show thread cpu [FILTER]",
333 SHOW_STR
334 "Thread information\n"
335 "Thread CPU usage\n"
336 "Display filter (rwtex)\n")
62f44022 337{
fbcac826 338 uint8_t filter = (uint8_t)-1U;
d62a17ae 339 int idx = 0;
340
341 if (argv_find(argv, argc, "FILTER", &idx)) {
342 filter = parse_filter(argv[idx]->arg);
343 if (!filter) {
344 vty_out(vty,
3efd0893 345 "Invalid filter \"%s\" specified; must contain at leastone of 'RWTEXB'\n",
d62a17ae 346 argv[idx]->arg);
347 return CMD_WARNING;
348 }
349 }
350
351 cpu_record_print(vty, filter);
352 return CMD_SUCCESS;
e276eb82 353}
45f01188
DL
354
355DEFPY (service_cputime_stats,
356 service_cputime_stats_cmd,
357 "[no] service cputime-stats",
358 NO_STR
359 "Set up miscellaneous service\n"
360 "Collect CPU usage statistics\n")
361{
362 cputime_enabled = !no;
363 return CMD_SUCCESS;
364}
365
366DEFPY (service_cputime_warning,
367 service_cputime_warning_cmd,
368 "[no] service cputime-warning (1-4294967295)",
369 NO_STR
370 "Set up miscellaneous service\n"
371 "Warn for tasks exceeding CPU usage threshold\n"
372 "Warning threshold in milliseconds\n")
373{
374 if (no)
375 cputime_threshold = 0;
376 else
377 cputime_threshold = cputime_warning * 1000;
378 return CMD_SUCCESS;
379}
380
381ALIAS (service_cputime_warning,
382 no_service_cputime_warning_cmd,
383 "no service cputime-warning",
384 NO_STR
385 "Set up miscellaneous service\n"
386 "Warn for tasks exceeding CPU usage threshold\n")
387
388DEFPY (service_walltime_warning,
389 service_walltime_warning_cmd,
390 "[no] service walltime-warning (1-4294967295)",
391 NO_STR
392 "Set up miscellaneous service\n"
393 "Warn for tasks exceeding total wallclock threshold\n"
394 "Warning threshold in milliseconds\n")
395{
396 if (no)
397 walltime_threshold = 0;
398 else
399 walltime_threshold = walltime_warning * 1000;
400 return CMD_SUCCESS;
401}
402
403ALIAS (service_walltime_warning,
404 no_service_walltime_warning_cmd,
405 "no service walltime-warning",
406 NO_STR
407 "Set up miscellaneous service\n"
408 "Warn for tasks exceeding total wallclock threshold\n")
e276eb82 409
8872626b
DS
410static void show_thread_poll_helper(struct vty *vty, struct thread_master *m)
411{
412 const char *name = m->name ? m->name : "main";
413 char underline[strlen(name) + 1];
a0b36ae6 414 struct thread *thread;
8872626b
DS
415 uint32_t i;
416
417 memset(underline, '-', sizeof(underline));
418 underline[sizeof(underline) - 1] = '\0';
419
420 vty_out(vty, "\nShowing poll FD's for %s\n", name);
421 vty_out(vty, "----------------------%s\n", underline);
6c19478a
DS
422 vty_out(vty, "Count: %u/%d\n", (uint32_t)m->handler.pfdcount,
423 m->fd_limit);
a0b36ae6
DS
424 for (i = 0; i < m->handler.pfdcount; i++) {
425 vty_out(vty, "\t%6d fd:%6d events:%2d revents:%2d\t\t", i,
426 m->handler.pfds[i].fd, m->handler.pfds[i].events,
8872626b 427 m->handler.pfds[i].revents);
a0b36ae6
DS
428
429 if (m->handler.pfds[i].events & POLLIN) {
430 thread = m->read[m->handler.pfds[i].fd];
431
432 if (!thread)
433 vty_out(vty, "ERROR ");
434 else
60a3efec 435 vty_out(vty, "%s ", thread->xref->funcname);
a0b36ae6
DS
436 } else
437 vty_out(vty, " ");
438
439 if (m->handler.pfds[i].events & POLLOUT) {
440 thread = m->write[m->handler.pfds[i].fd];
441
442 if (!thread)
443 vty_out(vty, "ERROR\n");
444 else
60a3efec 445 vty_out(vty, "%s\n", thread->xref->funcname);
a0b36ae6
DS
446 } else
447 vty_out(vty, "\n");
448 }
8872626b
DS
449}
450
ee4dcee8
DL
451DEFUN_NOSH (show_thread_poll,
452 show_thread_poll_cmd,
453 "show thread poll",
454 SHOW_STR
455 "Thread information\n"
456 "Show poll FD's and information\n")
8872626b
DS
457{
458 struct listnode *node;
459 struct thread_master *m;
460
00dffa8c 461 frr_with_mutex(&masters_mtx) {
8872626b
DS
462 for (ALL_LIST_ELEMENTS_RO(masters, node, m)) {
463 show_thread_poll_helper(vty, m);
464 }
465 }
8872626b
DS
466
467 return CMD_SUCCESS;
468}
469
470
49d41a26
DS
471DEFUN (clear_thread_cpu,
472 clear_thread_cpu_cmd,
473 "clear thread cpu [FILTER]",
62f44022 474 "Clear stored data in all pthreads\n"
49d41a26
DS
475 "Thread information\n"
476 "Thread CPU usage\n"
477 "Display filter (rwtexb)\n")
e276eb82 478{
fbcac826 479 uint8_t filter = (uint8_t)-1U;
d62a17ae 480 int idx = 0;
481
482 if (argv_find(argv, argc, "FILTER", &idx)) {
483 filter = parse_filter(argv[idx]->arg);
484 if (!filter) {
485 vty_out(vty,
3efd0893 486 "Invalid filter \"%s\" specified; must contain at leastone of 'RWTEXB'\n",
d62a17ae 487 argv[idx]->arg);
488 return CMD_WARNING;
489 }
490 }
491
492 cpu_record_clear(filter);
493 return CMD_SUCCESS;
e276eb82 494}
6b0655a2 495
d62a17ae 496void thread_cmd_init(void)
0b84f294 497{
d62a17ae 498 install_element(VIEW_NODE, &show_thread_cpu_cmd);
8872626b 499 install_element(VIEW_NODE, &show_thread_poll_cmd);
d62a17ae 500 install_element(ENABLE_NODE, &clear_thread_cpu_cmd);
45f01188
DL
501
502 install_element(CONFIG_NODE, &service_cputime_stats_cmd);
503 install_element(CONFIG_NODE, &service_cputime_warning_cmd);
504 install_element(CONFIG_NODE, &no_service_cputime_warning_cmd);
505 install_element(CONFIG_NODE, &service_walltime_warning_cmd);
506 install_element(CONFIG_NODE, &no_service_walltime_warning_cmd);
0b84f294 507}
62f44022
QY
508/* CLI end ------------------------------------------------------------------ */
509
0b84f294 510
d62a17ae 511static void cancelreq_del(void *cr)
63ccb9cb 512{
d62a17ae 513 XFREE(MTYPE_TMP, cr);
63ccb9cb
QY
514}
515
e0bebc7c 516/* initializer, only ever called once */
4d762f26 517static void initializer(void)
e0bebc7c 518{
d62a17ae 519 pthread_key_create(&thread_current, NULL);
e0bebc7c
QY
520}
521
d62a17ae 522struct thread_master *thread_master_create(const char *name)
718e3744 523{
d62a17ae 524 struct thread_master *rv;
525 struct rlimit limit;
526
527 pthread_once(&init_once, &initializer);
528
529 rv = XCALLOC(MTYPE_THREAD_MASTER, sizeof(struct thread_master));
d62a17ae 530
531 /* Initialize master mutex */
532 pthread_mutex_init(&rv->mtx, NULL);
533 pthread_cond_init(&rv->cancel_cond, NULL);
534
535 /* Set name */
7ffcd8bd
QY
536 name = name ? name : "default";
537 rv->name = XSTRDUP(MTYPE_THREAD_MASTER, name);
d62a17ae 538
539 /* Initialize I/O task data structures */
1a9f340b
MS
540
541 /* Use configured limit if present, ulimit otherwise. */
542 rv->fd_limit = frr_get_fd_limit();
543 if (rv->fd_limit == 0) {
544 getrlimit(RLIMIT_NOFILE, &limit);
545 rv->fd_limit = (int)limit.rlim_cur;
546 }
547
a6f235f3
DS
548 rv->read = XCALLOC(MTYPE_THREAD_POLL,
549 sizeof(struct thread *) * rv->fd_limit);
550
551 rv->write = XCALLOC(MTYPE_THREAD_POLL,
552 sizeof(struct thread *) * rv->fd_limit);
d62a17ae 553
7ffcd8bd
QY
554 char tmhashname[strlen(name) + 32];
555 snprintf(tmhashname, sizeof(tmhashname), "%s - threadmaster event hash",
556 name);
bd74dc61 557 rv->cpu_record = hash_create_size(
d8b87afe 558 8, (unsigned int (*)(const void *))cpu_record_hash_key,
74df8d6d 559 (bool (*)(const void *, const void *))cpu_record_hash_cmp,
7ffcd8bd 560 tmhashname);
d62a17ae 561
c284542b
DL
562 thread_list_init(&rv->event);
563 thread_list_init(&rv->ready);
564 thread_list_init(&rv->unuse);
27d29ced 565 thread_timer_list_init(&rv->timer);
d62a17ae 566
567 /* Initialize thread_fetch() settings */
568 rv->spin = true;
569 rv->handle_signals = true;
570
571 /* Set pthread owner, should be updated by actual owner */
572 rv->owner = pthread_self();
573 rv->cancel_req = list_new();
574 rv->cancel_req->del = cancelreq_del;
575 rv->canceled = true;
576
577 /* Initialize pipe poker */
578 pipe(rv->io_pipe);
579 set_nonblocking(rv->io_pipe[0]);
580 set_nonblocking(rv->io_pipe[1]);
581
582 /* Initialize data structures for poll() */
583 rv->handler.pfdsize = rv->fd_limit;
584 rv->handler.pfdcount = 0;
585 rv->handler.pfds = XCALLOC(MTYPE_THREAD_MASTER,
586 sizeof(struct pollfd) * rv->handler.pfdsize);
587 rv->handler.copy = XCALLOC(MTYPE_THREAD_MASTER,
588 sizeof(struct pollfd) * rv->handler.pfdsize);
589
eff09c66 590 /* add to list of threadmasters */
00dffa8c 591 frr_with_mutex(&masters_mtx) {
eff09c66
QY
592 if (!masters)
593 masters = list_new();
594
d62a17ae 595 listnode_add(masters, rv);
596 }
d62a17ae 597
598 return rv;
718e3744 599}
600
d8a8a8de
QY
601void thread_master_set_name(struct thread_master *master, const char *name)
602{
00dffa8c 603 frr_with_mutex(&master->mtx) {
0a22ddfb 604 XFREE(MTYPE_THREAD_MASTER, master->name);
d8a8a8de
QY
605 master->name = XSTRDUP(MTYPE_THREAD_MASTER, name);
606 }
d8a8a8de
QY
607}
608
6ed04aa2
DS
609#define THREAD_UNUSED_DEPTH 10
610
718e3744 611/* Move thread to unuse list. */
d62a17ae 612static void thread_add_unuse(struct thread_master *m, struct thread *thread)
718e3744 613{
6655966d
RZ
614 pthread_mutex_t mtxc = thread->mtx;
615
d62a17ae 616 assert(m != NULL && thread != NULL);
d62a17ae 617
d62a17ae 618 thread->hist->total_active--;
6ed04aa2
DS
619 memset(thread, 0, sizeof(struct thread));
620 thread->type = THREAD_UNUSED;
621
6655966d
RZ
622 /* Restore the thread mutex context. */
623 thread->mtx = mtxc;
624
c284542b
DL
625 if (thread_list_count(&m->unuse) < THREAD_UNUSED_DEPTH) {
626 thread_list_add_tail(&m->unuse, thread);
6655966d
RZ
627 return;
628 }
629
630 thread_free(m, thread);
718e3744 631}
632
633/* Free all unused thread. */
c284542b
DL
634static void thread_list_free(struct thread_master *m,
635 struct thread_list_head *list)
718e3744 636{
d62a17ae 637 struct thread *t;
d62a17ae 638
c284542b 639 while ((t = thread_list_pop(list)))
6655966d 640 thread_free(m, t);
718e3744 641}
642
d62a17ae 643static void thread_array_free(struct thread_master *m,
644 struct thread **thread_array)
308d14ae 645{
d62a17ae 646 struct thread *t;
647 int index;
648
649 for (index = 0; index < m->fd_limit; ++index) {
650 t = thread_array[index];
651 if (t) {
652 thread_array[index] = NULL;
6655966d 653 thread_free(m, t);
d62a17ae 654 }
655 }
a6f235f3 656 XFREE(MTYPE_THREAD_POLL, thread_array);
308d14ae
DV
657}
658
495f0b13
DS
659/*
660 * thread_master_free_unused
661 *
662 * As threads are finished with they are put on the
663 * unuse list for later reuse.
664 * If we are shutting down, Free up unused threads
665 * So we can see if we forget to shut anything off
666 */
d62a17ae 667void thread_master_free_unused(struct thread_master *m)
495f0b13 668{
00dffa8c 669 frr_with_mutex(&m->mtx) {
d62a17ae 670 struct thread *t;
c284542b 671 while ((t = thread_list_pop(&m->unuse)))
6655966d 672 thread_free(m, t);
d62a17ae 673 }
495f0b13
DS
674}
675
718e3744 676/* Stop thread scheduler. */
d62a17ae 677void thread_master_free(struct thread_master *m)
718e3744 678{
27d29ced
DL
679 struct thread *t;
680
00dffa8c 681 frr_with_mutex(&masters_mtx) {
d62a17ae 682 listnode_delete(masters, m);
eff09c66 683 if (masters->count == 0) {
6a154c88 684 list_delete(&masters);
eff09c66 685 }
d62a17ae 686 }
d62a17ae 687
688 thread_array_free(m, m->read);
689 thread_array_free(m, m->write);
27d29ced
DL
690 while ((t = thread_timer_list_pop(&m->timer)))
691 thread_free(m, t);
d62a17ae 692 thread_list_free(m, &m->event);
693 thread_list_free(m, &m->ready);
694 thread_list_free(m, &m->unuse);
695 pthread_mutex_destroy(&m->mtx);
33844bbe 696 pthread_cond_destroy(&m->cancel_cond);
d62a17ae 697 close(m->io_pipe[0]);
698 close(m->io_pipe[1]);
6a154c88 699 list_delete(&m->cancel_req);
1a0a92ea 700 m->cancel_req = NULL;
d62a17ae 701
702 hash_clean(m->cpu_record, cpu_record_hash_free);
703 hash_free(m->cpu_record);
704 m->cpu_record = NULL;
705
0a22ddfb 706 XFREE(MTYPE_THREAD_MASTER, m->name);
d62a17ae 707 XFREE(MTYPE_THREAD_MASTER, m->handler.pfds);
708 XFREE(MTYPE_THREAD_MASTER, m->handler.copy);
709 XFREE(MTYPE_THREAD_MASTER, m);
718e3744 710}
711
78ca0342
CF
712/* Return remain time in miliseconds. */
713unsigned long thread_timer_remain_msec(struct thread *thread)
718e3744 714{
d62a17ae 715 int64_t remain;
1189d95f 716
00dffa8c 717 frr_with_mutex(&thread->mtx) {
78ca0342 718 remain = monotime_until(&thread->u.sands, NULL) / 1000LL;
d62a17ae 719 }
1189d95f 720
d62a17ae 721 return remain < 0 ? 0 : remain;
718e3744 722}
723
78ca0342
CF
724/* Return remain time in seconds. */
725unsigned long thread_timer_remain_second(struct thread *thread)
726{
727 return thread_timer_remain_msec(thread) / 1000LL;
728}
729
d62a17ae 730struct timeval thread_timer_remain(struct thread *thread)
6ac44687 731{
d62a17ae 732 struct timeval remain;
00dffa8c 733 frr_with_mutex(&thread->mtx) {
d62a17ae 734 monotime_until(&thread->u.sands, &remain);
735 }
d62a17ae 736 return remain;
6ac44687
CF
737}
738
0447957e
AK
739static int time_hhmmss(char *buf, int buf_size, long sec)
740{
741 long hh;
742 long mm;
743 int wr;
744
642ac49d 745 assert(buf_size >= 8);
0447957e
AK
746
747 hh = sec / 3600;
748 sec %= 3600;
749 mm = sec / 60;
750 sec %= 60;
751
752 wr = snprintf(buf, buf_size, "%02ld:%02ld:%02ld", hh, mm, sec);
753
754 return wr != 8;
755}
756
757char *thread_timer_to_hhmmss(char *buf, int buf_size,
758 struct thread *t_timer)
759{
760 if (t_timer) {
761 time_hhmmss(buf, buf_size,
762 thread_timer_remain_second(t_timer));
763 } else {
764 snprintf(buf, buf_size, "--:--:--");
765 }
766 return buf;
767}
768
718e3744 769/* Get new thread. */
d7c0a89a 770static struct thread *thread_get(struct thread_master *m, uint8_t type,
d62a17ae 771 int (*func)(struct thread *), void *arg,
60a3efec 772 const struct xref_threadsched *xref)
718e3744 773{
c284542b 774 struct thread *thread = thread_list_pop(&m->unuse);
d62a17ae 775 struct cpu_thread_history tmp;
776
777 if (!thread) {
778 thread = XCALLOC(MTYPE_THREAD, sizeof(struct thread));
779 /* mutex only needs to be initialized at struct creation. */
780 pthread_mutex_init(&thread->mtx, NULL);
781 m->alloc++;
782 }
783
784 thread->type = type;
785 thread->add_type = type;
786 thread->master = m;
787 thread->arg = arg;
d62a17ae 788 thread->yield = THREAD_YIELD_TIME_SLOT; /* default */
789 thread->ref = NULL;
790
791 /*
792 * So if the passed in funcname is not what we have
793 * stored that means the thread->hist needs to be
794 * updated. We keep the last one around in unused
795 * under the assumption that we are probably
796 * going to immediately allocate the same
797 * type of thread.
798 * This hopefully saves us some serious
799 * hash_get lookups.
800 */
60a3efec
DL
801 if ((thread->xref && thread->xref->funcname != xref->funcname)
802 || thread->func != func) {
d62a17ae 803 tmp.func = func;
60a3efec 804 tmp.funcname = xref->funcname;
d62a17ae 805 thread->hist =
806 hash_get(m->cpu_record, &tmp,
807 (void *(*)(void *))cpu_record_hash_alloc);
808 }
809 thread->hist->total_active++;
810 thread->func = func;
60a3efec 811 thread->xref = xref;
d62a17ae 812
813 return thread;
718e3744 814}
815
6655966d
RZ
816static void thread_free(struct thread_master *master, struct thread *thread)
817{
818 /* Update statistics. */
819 assert(master->alloc > 0);
820 master->alloc--;
821
822 /* Free allocated resources. */
823 pthread_mutex_destroy(&thread->mtx);
824 XFREE(MTYPE_THREAD, thread);
825}
826
d81ca9a3
MS
827static int fd_poll(struct thread_master *m, const struct timeval *timer_wait,
828 bool *eintr_p)
209a72a6 829{
d81ca9a3
MS
830 sigset_t origsigs;
831 unsigned char trash[64];
832 nfds_t count = m->handler.copycount;
833
d279ef57
DS
834 /*
835 * If timer_wait is null here, that means poll() should block
836 * indefinitely, unless the thread_master has overridden it by setting
d62a17ae 837 * ->selectpoll_timeout.
d279ef57 838 *
d62a17ae 839 * If the value is positive, it specifies the maximum number of
d279ef57
DS
840 * milliseconds to wait. If the timeout is -1, it specifies that
841 * we should never wait and always return immediately even if no
842 * event is detected. If the value is zero, the behavior is default.
843 */
d62a17ae 844 int timeout = -1;
845
846 /* number of file descriptors with events */
847 int num;
848
849 if (timer_wait != NULL
850 && m->selectpoll_timeout == 0) // use the default value
851 timeout = (timer_wait->tv_sec * 1000)
852 + (timer_wait->tv_usec / 1000);
853 else if (m->selectpoll_timeout > 0) // use the user's timeout
854 timeout = m->selectpoll_timeout;
855 else if (m->selectpoll_timeout
856 < 0) // effect a poll (return immediately)
857 timeout = 0;
858
0bdeb5e5 859 zlog_tls_buffer_flush();
3e41733f
DL
860 rcu_read_unlock();
861 rcu_assert_read_unlocked();
862
d62a17ae 863 /* add poll pipe poker */
d81ca9a3
MS
864 assert(count + 1 < m->handler.pfdsize);
865 m->handler.copy[count].fd = m->io_pipe[0];
866 m->handler.copy[count].events = POLLIN;
867 m->handler.copy[count].revents = 0x00;
868
869 /* We need to deal with a signal-handling race here: we
870 * don't want to miss a crucial signal, such as SIGTERM or SIGINT,
871 * that may arrive just before we enter poll(). We will block the
872 * key signals, then check whether any have arrived - if so, we return
873 * before calling poll(). If not, we'll re-enable the signals
874 * in the ppoll() call.
875 */
876
877 sigemptyset(&origsigs);
878 if (m->handle_signals) {
879 /* Main pthread that handles the app signals */
880 if (frr_sigevent_check(&origsigs)) {
881 /* Signal to process - restore signal mask and return */
882 pthread_sigmask(SIG_SETMASK, &origsigs, NULL);
883 num = -1;
884 *eintr_p = true;
885 goto done;
886 }
887 } else {
888 /* Don't make any changes for the non-main pthreads */
889 pthread_sigmask(SIG_SETMASK, NULL, &origsigs);
890 }
d62a17ae 891
d81ca9a3
MS
892#if defined(HAVE_PPOLL)
893 struct timespec ts, *tsp;
894
895 if (timeout >= 0) {
896 ts.tv_sec = timeout / 1000;
897 ts.tv_nsec = (timeout % 1000) * 1000000;
898 tsp = &ts;
899 } else
900 tsp = NULL;
901
902 num = ppoll(m->handler.copy, count + 1, tsp, &origsigs);
903 pthread_sigmask(SIG_SETMASK, &origsigs, NULL);
904#else
905 /* Not ideal - there is a race after we restore the signal mask */
906 pthread_sigmask(SIG_SETMASK, &origsigs, NULL);
907 num = poll(m->handler.copy, count + 1, timeout);
908#endif
d62a17ae 909
d81ca9a3
MS
910done:
911
912 if (num < 0 && errno == EINTR)
913 *eintr_p = true;
914
915 if (num > 0 && m->handler.copy[count].revents != 0 && num--)
d62a17ae 916 while (read(m->io_pipe[0], &trash, sizeof(trash)) > 0)
917 ;
918
3e41733f
DL
919 rcu_read_lock();
920
d62a17ae 921 return num;
209a72a6
DS
922}
923
718e3744 924/* Add new read thread. */
ee1455dd
IR
925void _thread_add_read_write(const struct xref_threadsched *xref,
926 struct thread_master *m,
927 int (*func)(struct thread *), void *arg, int fd,
928 struct thread **t_ptr)
718e3744 929{
60a3efec 930 int dir = xref->thread_type;
d62a17ae 931 struct thread *thread = NULL;
1ef14bee 932 struct thread **thread_array;
d62a17ae 933
abf96a87 934 if (dir == THREAD_READ)
6c3aa850
DL
935 frrtrace(9, frr_libfrr, schedule_read, m,
936 xref->funcname, xref->xref.file, xref->xref.line,
937 t_ptr, fd, 0, arg, 0);
abf96a87 938 else
6c3aa850
DL
939 frrtrace(9, frr_libfrr, schedule_write, m,
940 xref->funcname, xref->xref.file, xref->xref.line,
941 t_ptr, fd, 0, arg, 0);
abf96a87 942
188acbb9
DS
943 assert(fd >= 0);
944 if (fd >= m->fd_limit)
945 assert(!"Number of FD's open is greater than FRR currently configured to handle, aborting");
946
00dffa8c
DL
947 frr_with_mutex(&m->mtx) {
948 if (t_ptr && *t_ptr)
949 // thread is already scheduled; don't reschedule
950 break;
d62a17ae 951
952 /* default to a new pollfd */
953 nfds_t queuepos = m->handler.pfdcount;
954
1ef14bee
DS
955 if (dir == THREAD_READ)
956 thread_array = m->read;
957 else
958 thread_array = m->write;
959
d62a17ae 960 /* if we already have a pollfd for our file descriptor, find and
961 * use it */
962 for (nfds_t i = 0; i < m->handler.pfdcount; i++)
963 if (m->handler.pfds[i].fd == fd) {
964 queuepos = i;
1ef14bee
DS
965
966#ifdef DEV_BUILD
967 /*
968 * What happens if we have a thread already
969 * created for this event?
970 */
971 if (thread_array[fd])
972 assert(!"Thread already scheduled for file descriptor");
973#endif
d62a17ae 974 break;
975 }
976
977 /* make sure we have room for this fd + pipe poker fd */
978 assert(queuepos + 1 < m->handler.pfdsize);
979
60a3efec 980 thread = thread_get(m, dir, func, arg, xref);
d62a17ae 981
982 m->handler.pfds[queuepos].fd = fd;
983 m->handler.pfds[queuepos].events |=
984 (dir == THREAD_READ ? POLLIN : POLLOUT);
985
986 if (queuepos == m->handler.pfdcount)
987 m->handler.pfdcount++;
988
989 if (thread) {
00dffa8c 990 frr_with_mutex(&thread->mtx) {
d62a17ae 991 thread->u.fd = fd;
1ef14bee 992 thread_array[thread->u.fd] = thread;
d62a17ae 993 }
d62a17ae 994
995 if (t_ptr) {
996 *t_ptr = thread;
997 thread->ref = t_ptr;
998 }
999 }
1000
1001 AWAKEN(m);
1002 }
718e3744 1003}
1004
ee1455dd
IR
1005static void _thread_add_timer_timeval(const struct xref_threadsched *xref,
1006 struct thread_master *m,
1007 int (*func)(struct thread *), void *arg,
1008 struct timeval *time_relative,
1009 struct thread **t_ptr)
718e3744 1010{
d62a17ae 1011 struct thread *thread;
96fe578a 1012 struct timeval t;
d62a17ae 1013
1014 assert(m != NULL);
1015
d62a17ae 1016 assert(time_relative);
1017
6c3aa850
DL
1018 frrtrace(9, frr_libfrr, schedule_timer, m,
1019 xref->funcname, xref->xref.file, xref->xref.line,
c7bb4f00 1020 t_ptr, 0, 0, arg, (long)time_relative->tv_sec);
abf96a87 1021
96fe578a
MS
1022 /* Compute expiration/deadline time. */
1023 monotime(&t);
1024 timeradd(&t, time_relative, &t);
1025
00dffa8c
DL
1026 frr_with_mutex(&m->mtx) {
1027 if (t_ptr && *t_ptr)
d279ef57 1028 /* thread is already scheduled; don't reschedule */
ee1455dd 1029 return;
d62a17ae 1030
4322dea7 1031 thread = thread_get(m, THREAD_TIMER, func, arg, xref);
d62a17ae 1032
00dffa8c 1033 frr_with_mutex(&thread->mtx) {
96fe578a 1034 thread->u.sands = t;
27d29ced 1035 thread_timer_list_add(&m->timer, thread);
d62a17ae 1036 if (t_ptr) {
1037 *t_ptr = thread;
1038 thread->ref = t_ptr;
1039 }
1040 }
d62a17ae 1041
96fe578a
MS
1042 /* The timer list is sorted - if this new timer
1043 * might change the time we'll wait for, give the pthread
1044 * a chance to re-compute.
1045 */
1046 if (thread_timer_list_first(&m->timer) == thread)
1047 AWAKEN(m);
d62a17ae 1048 }
9e867fe6 1049}
1050
98c91ac6 1051
1052/* Add timer event thread. */
ee1455dd
IR
1053void _thread_add_timer(const struct xref_threadsched *xref,
1054 struct thread_master *m, int (*func)(struct thread *),
1055 void *arg, long timer, struct thread **t_ptr)
9e867fe6 1056{
d62a17ae 1057 struct timeval trel;
9e867fe6 1058
d62a17ae 1059 assert(m != NULL);
9e867fe6 1060
d62a17ae 1061 trel.tv_sec = timer;
1062 trel.tv_usec = 0;
9e867fe6 1063
ee1455dd 1064 _thread_add_timer_timeval(xref, m, func, arg, &trel, t_ptr);
98c91ac6 1065}
9e867fe6 1066
98c91ac6 1067/* Add timer event thread with "millisecond" resolution */
ee1455dd
IR
1068void _thread_add_timer_msec(const struct xref_threadsched *xref,
1069 struct thread_master *m,
1070 int (*func)(struct thread *), void *arg, long timer,
1071 struct thread **t_ptr)
98c91ac6 1072{
d62a17ae 1073 struct timeval trel;
9e867fe6 1074
d62a17ae 1075 assert(m != NULL);
718e3744 1076
d62a17ae 1077 trel.tv_sec = timer / 1000;
1078 trel.tv_usec = 1000 * (timer % 1000);
98c91ac6 1079
ee1455dd 1080 _thread_add_timer_timeval(xref, m, func, arg, &trel, t_ptr);
a48b4e6d 1081}
1082
4322dea7 1083/* Add timer event thread with "timeval" resolution */
ee1455dd
IR
1084void _thread_add_timer_tv(const struct xref_threadsched *xref,
1085 struct thread_master *m, int (*func)(struct thread *),
1086 void *arg, struct timeval *tv, struct thread **t_ptr)
d03c4cbd 1087{
ee1455dd 1088 _thread_add_timer_timeval(xref, m, func, arg, tv, t_ptr);
d03c4cbd
DL
1089}
1090
718e3744 1091/* Add simple event thread. */
ee1455dd
IR
1092void _thread_add_event(const struct xref_threadsched *xref,
1093 struct thread_master *m, int (*func)(struct thread *),
1094 void *arg, int val, struct thread **t_ptr)
718e3744 1095{
00dffa8c 1096 struct thread *thread = NULL;
d62a17ae 1097
6c3aa850
DL
1098 frrtrace(9, frr_libfrr, schedule_event, m,
1099 xref->funcname, xref->xref.file, xref->xref.line,
c7bb4f00 1100 t_ptr, 0, val, arg, 0);
abf96a87 1101
d62a17ae 1102 assert(m != NULL);
1103
00dffa8c
DL
1104 frr_with_mutex(&m->mtx) {
1105 if (t_ptr && *t_ptr)
d279ef57 1106 /* thread is already scheduled; don't reschedule */
00dffa8c 1107 break;
d62a17ae 1108
60a3efec 1109 thread = thread_get(m, THREAD_EVENT, func, arg, xref);
00dffa8c 1110 frr_with_mutex(&thread->mtx) {
d62a17ae 1111 thread->u.val = val;
c284542b 1112 thread_list_add_tail(&m->event, thread);
d62a17ae 1113 }
d62a17ae 1114
1115 if (t_ptr) {
1116 *t_ptr = thread;
1117 thread->ref = t_ptr;
1118 }
1119
1120 AWAKEN(m);
1121 }
718e3744 1122}
1123
63ccb9cb
QY
1124/* Thread cancellation ------------------------------------------------------ */
1125
8797240e
QY
1126/**
1127 * NOT's out the .events field of pollfd corresponding to the given file
1128 * descriptor. The event to be NOT'd is passed in the 'state' parameter.
1129 *
1130 * This needs to happen for both copies of pollfd's. See 'thread_fetch'
1131 * implementation for details.
1132 *
1133 * @param master
1134 * @param fd
1135 * @param state the event to cancel. One or more (OR'd together) of the
1136 * following:
1137 * - POLLIN
1138 * - POLLOUT
1139 */
a9318a32
MS
1140static void thread_cancel_rw(struct thread_master *master, int fd, short state,
1141 int idx_hint)
0a95a0d0 1142{
42d74538
QY
1143 bool found = false;
1144
d62a17ae 1145 /* find the index of corresponding pollfd */
1146 nfds_t i;
1147
a9318a32
MS
1148 /* Cancel POLLHUP too just in case some bozo set it */
1149 state |= POLLHUP;
1150
1151 /* Some callers know the index of the pfd already */
1152 if (idx_hint >= 0) {
1153 i = idx_hint;
1154 found = true;
1155 } else {
1156 /* Have to look for the fd in the pfd array */
1157 for (i = 0; i < master->handler.pfdcount; i++)
1158 if (master->handler.pfds[i].fd == fd) {
1159 found = true;
1160 break;
1161 }
1162 }
42d74538
QY
1163
1164 if (!found) {
1165 zlog_debug(
1166 "[!] Received cancellation request for nonexistent rw job");
1167 zlog_debug("[!] threadmaster: %s | fd: %d",
996c9314 1168 master->name ? master->name : "", fd);
42d74538
QY
1169 return;
1170 }
d62a17ae 1171
1172 /* NOT out event. */
1173 master->handler.pfds[i].events &= ~(state);
1174
1175 /* If all events are canceled, delete / resize the pollfd array. */
1176 if (master->handler.pfds[i].events == 0) {
1177 memmove(master->handler.pfds + i, master->handler.pfds + i + 1,
1178 (master->handler.pfdcount - i - 1)
1179 * sizeof(struct pollfd));
1180 master->handler.pfdcount--;
e985cda0
S
1181 master->handler.pfds[master->handler.pfdcount].fd = 0;
1182 master->handler.pfds[master->handler.pfdcount].events = 0;
d62a17ae 1183 }
1184
1185 /* If we have the same pollfd in the copy, perform the same operations,
1186 * otherwise return. */
1187 if (i >= master->handler.copycount)
1188 return;
1189
1190 master->handler.copy[i].events &= ~(state);
1191
1192 if (master->handler.copy[i].events == 0) {
1193 memmove(master->handler.copy + i, master->handler.copy + i + 1,
1194 (master->handler.copycount - i - 1)
1195 * sizeof(struct pollfd));
1196 master->handler.copycount--;
e985cda0
S
1197 master->handler.copy[master->handler.copycount].fd = 0;
1198 master->handler.copy[master->handler.copycount].events = 0;
d62a17ae 1199 }
0a95a0d0
DS
1200}
1201
a9318a32
MS
1202/*
1203 * Process task cancellation given a task argument: iterate through the
1204 * various lists of tasks, looking for any that match the argument.
1205 */
1206static void cancel_arg_helper(struct thread_master *master,
1207 const struct cancel_req *cr)
1208{
1209 struct thread *t;
1210 nfds_t i;
1211 int fd;
1212 struct pollfd *pfd;
1213
1214 /* We're only processing arg-based cancellations here. */
1215 if (cr->eventobj == NULL)
1216 return;
1217
1218 /* First process the ready lists. */
1219 frr_each_safe(thread_list, &master->event, t) {
1220 if (t->arg != cr->eventobj)
1221 continue;
1222 thread_list_del(&master->event, t);
1223 if (t->ref)
1224 *t->ref = NULL;
1225 thread_add_unuse(master, t);
1226 }
1227
1228 frr_each_safe(thread_list, &master->ready, t) {
1229 if (t->arg != cr->eventobj)
1230 continue;
1231 thread_list_del(&master->ready, t);
1232 if (t->ref)
1233 *t->ref = NULL;
1234 thread_add_unuse(master, t);
1235 }
1236
1237 /* If requested, stop here and ignore io and timers */
1238 if (CHECK_FLAG(cr->flags, THREAD_CANCEL_FLAG_READY))
1239 return;
1240
1241 /* Check the io tasks */
1242 for (i = 0; i < master->handler.pfdcount;) {
1243 pfd = master->handler.pfds + i;
1244
1245 if (pfd->events & POLLIN)
1246 t = master->read[pfd->fd];
1247 else
1248 t = master->write[pfd->fd];
1249
1250 if (t && t->arg == cr->eventobj) {
1251 fd = pfd->fd;
1252
1253 /* Found a match to cancel: clean up fd arrays */
1254 thread_cancel_rw(master, pfd->fd, pfd->events, i);
1255
1256 /* Clean up thread arrays */
1257 master->read[fd] = NULL;
1258 master->write[fd] = NULL;
1259
1260 /* Clear caller's ref */
1261 if (t->ref)
1262 *t->ref = NULL;
1263
1264 thread_add_unuse(master, t);
1265
1266 /* Don't increment 'i' since the cancellation will have
1267 * removed the entry from the pfd array
1268 */
1269 } else
1270 i++;
1271 }
1272
1273 /* Check the timer tasks */
1274 t = thread_timer_list_first(&master->timer);
1275 while (t) {
1276 struct thread *t_next;
1277
1278 t_next = thread_timer_list_next(&master->timer, t);
1279
1280 if (t->arg == cr->eventobj) {
1281 thread_timer_list_del(&master->timer, t);
1282 if (t->ref)
1283 *t->ref = NULL;
1284 thread_add_unuse(master, t);
1285 }
1286
1287 t = t_next;
1288 }
1289}
1290
1189d95f 1291/**
63ccb9cb 1292 * Process cancellation requests.
1189d95f 1293 *
63ccb9cb
QY
1294 * This may only be run from the pthread which owns the thread_master.
1295 *
1296 * @param master the thread master to process
1297 * @REQUIRE master->mtx
1189d95f 1298 */
d62a17ae 1299static void do_thread_cancel(struct thread_master *master)
718e3744 1300{
c284542b 1301 struct thread_list_head *list = NULL;
d62a17ae 1302 struct thread **thread_array = NULL;
1303 struct thread *thread;
1304
1305 struct cancel_req *cr;
1306 struct listnode *ln;
1307 for (ALL_LIST_ELEMENTS_RO(master->cancel_req, ln, cr)) {
d279ef57 1308 /*
a9318a32
MS
1309 * If this is an event object cancellation, search
1310 * through task lists deleting any tasks which have the
1311 * specified argument - use this handy helper function.
d279ef57 1312 */
d62a17ae 1313 if (cr->eventobj) {
a9318a32 1314 cancel_arg_helper(master, cr);
d62a17ae 1315 continue;
1316 }
1317
d279ef57
DS
1318 /*
1319 * The pointer varies depending on whether the cancellation
1320 * request was made asynchronously or not. If it was, we
1321 * need to check whether the thread even exists anymore
1322 * before cancelling it.
1323 */
d62a17ae 1324 thread = (cr->thread) ? cr->thread : *cr->threadref;
1325
1326 if (!thread)
1327 continue;
1328
1329 /* Determine the appropriate queue to cancel the thread from */
1330 switch (thread->type) {
1331 case THREAD_READ:
a9318a32 1332 thread_cancel_rw(master, thread->u.fd, POLLIN, -1);
d62a17ae 1333 thread_array = master->read;
1334 break;
1335 case THREAD_WRITE:
a9318a32 1336 thread_cancel_rw(master, thread->u.fd, POLLOUT, -1);
d62a17ae 1337 thread_array = master->write;
1338 break;
1339 case THREAD_TIMER:
27d29ced 1340 thread_timer_list_del(&master->timer, thread);
d62a17ae 1341 break;
1342 case THREAD_EVENT:
1343 list = &master->event;
1344 break;
1345 case THREAD_READY:
1346 list = &master->ready;
1347 break;
1348 default:
1349 continue;
1350 break;
1351 }
1352
27d29ced 1353 if (list) {
c284542b 1354 thread_list_del(list, thread);
d62a17ae 1355 } else if (thread_array) {
1356 thread_array[thread->u.fd] = NULL;
d62a17ae 1357 }
1358
1359 if (thread->ref)
1360 *thread->ref = NULL;
1361
1362 thread_add_unuse(thread->master, thread);
1363 }
1364
1365 /* Delete and free all cancellation requests */
41b21bfa
MS
1366 if (master->cancel_req)
1367 list_delete_all_node(master->cancel_req);
d62a17ae 1368
1369 /* Wake up any threads which may be blocked in thread_cancel_async() */
1370 master->canceled = true;
1371 pthread_cond_broadcast(&master->cancel_cond);
718e3744 1372}
1373
a9318a32
MS
1374/*
1375 * Helper function used for multiple flavors of arg-based cancellation.
1376 */
1377static void cancel_event_helper(struct thread_master *m, void *arg, int flags)
1378{
1379 struct cancel_req *cr;
1380
1381 assert(m->owner == pthread_self());
1382
1383 /* Only worth anything if caller supplies an arg. */
1384 if (arg == NULL)
1385 return;
1386
1387 cr = XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
1388
1389 cr->flags = flags;
1390
1391 frr_with_mutex(&m->mtx) {
1392 cr->eventobj = arg;
1393 listnode_add(m->cancel_req, cr);
1394 do_thread_cancel(m);
1395 }
1396}
1397
63ccb9cb
QY
1398/**
1399 * Cancel any events which have the specified argument.
1400 *
1401 * MT-Unsafe
1402 *
1403 * @param m the thread_master to cancel from
1404 * @param arg the argument passed when creating the event
1405 */
d62a17ae 1406void thread_cancel_event(struct thread_master *master, void *arg)
718e3744 1407{
a9318a32
MS
1408 cancel_event_helper(master, arg, 0);
1409}
d62a17ae 1410
a9318a32
MS
1411/*
1412 * Cancel ready tasks with an arg matching 'arg'
1413 *
1414 * MT-Unsafe
1415 *
1416 * @param m the thread_master to cancel from
1417 * @param arg the argument passed when creating the event
1418 */
1419void thread_cancel_event_ready(struct thread_master *m, void *arg)
1420{
1421
1422 /* Only cancel ready/event tasks */
1423 cancel_event_helper(m, arg, THREAD_CANCEL_FLAG_READY);
63ccb9cb 1424}
1189d95f 1425
63ccb9cb
QY
1426/**
1427 * Cancel a specific task.
1428 *
1429 * MT-Unsafe
1430 *
1431 * @param thread task to cancel
1432 */
b3d6bc6e 1433void thread_cancel(struct thread **thread)
63ccb9cb 1434{
b3d6bc6e
MS
1435 struct thread_master *master;
1436
1437 if (thread == NULL || *thread == NULL)
1438 return;
1439
1440 master = (*thread)->master;
d62a17ae 1441
6c3aa850
DL
1442 frrtrace(9, frr_libfrr, thread_cancel, master,
1443 (*thread)->xref->funcname, (*thread)->xref->xref.file,
1444 (*thread)->xref->xref.line, NULL, (*thread)->u.fd,
b4d6e855 1445 (*thread)->u.val, (*thread)->arg, (*thread)->u.sands.tv_sec);
abf96a87 1446
6ed04aa2
DS
1447 assert(master->owner == pthread_self());
1448
00dffa8c 1449 frr_with_mutex(&master->mtx) {
d62a17ae 1450 struct cancel_req *cr =
1451 XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
b3d6bc6e 1452 cr->thread = *thread;
6ed04aa2
DS
1453 listnode_add(master->cancel_req, cr);
1454 do_thread_cancel(master);
d62a17ae 1455 }
b3d6bc6e
MS
1456
1457 *thread = NULL;
63ccb9cb 1458}
1189d95f 1459
63ccb9cb
QY
1460/**
1461 * Asynchronous cancellation.
1462 *
8797240e
QY
1463 * Called with either a struct thread ** or void * to an event argument,
1464 * this function posts the correct cancellation request and blocks until it is
1465 * serviced.
63ccb9cb
QY
1466 *
1467 * If the thread is currently running, execution blocks until it completes.
1468 *
8797240e
QY
1469 * The last two parameters are mutually exclusive, i.e. if you pass one the
1470 * other must be NULL.
1471 *
1472 * When the cancellation procedure executes on the target thread_master, the
1473 * thread * provided is checked for nullity. If it is null, the thread is
1474 * assumed to no longer exist and the cancellation request is a no-op. Thus
1475 * users of this API must pass a back-reference when scheduling the original
1476 * task.
1477 *
63ccb9cb
QY
1478 * MT-Safe
1479 *
8797240e
QY
1480 * @param master the thread master with the relevant event / task
1481 * @param thread pointer to thread to cancel
1482 * @param eventobj the event
63ccb9cb 1483 */
d62a17ae 1484void thread_cancel_async(struct thread_master *master, struct thread **thread,
1485 void *eventobj)
63ccb9cb 1486{
d62a17ae 1487 assert(!(thread && eventobj) && (thread || eventobj));
abf96a87
QY
1488
1489 if (thread && *thread)
c7bb4f00 1490 frrtrace(9, frr_libfrr, thread_cancel_async, master,
6c3aa850
DL
1491 (*thread)->xref->funcname, (*thread)->xref->xref.file,
1492 (*thread)->xref->xref.line, NULL, (*thread)->u.fd,
c7bb4f00
QY
1493 (*thread)->u.val, (*thread)->arg,
1494 (*thread)->u.sands.tv_sec);
abf96a87 1495 else
c7bb4f00
QY
1496 frrtrace(9, frr_libfrr, thread_cancel_async, master, NULL, NULL,
1497 0, NULL, 0, 0, eventobj, 0);
abf96a87 1498
d62a17ae 1499 assert(master->owner != pthread_self());
1500
00dffa8c 1501 frr_with_mutex(&master->mtx) {
d62a17ae 1502 master->canceled = false;
1503
1504 if (thread) {
1505 struct cancel_req *cr =
1506 XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
1507 cr->threadref = thread;
1508 listnode_add(master->cancel_req, cr);
1509 } else if (eventobj) {
1510 struct cancel_req *cr =
1511 XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
1512 cr->eventobj = eventobj;
1513 listnode_add(master->cancel_req, cr);
1514 }
1515 AWAKEN(master);
1516
1517 while (!master->canceled)
1518 pthread_cond_wait(&master->cancel_cond, &master->mtx);
1519 }
50478845
MS
1520
1521 if (thread)
1522 *thread = NULL;
718e3744 1523}
63ccb9cb 1524/* ------------------------------------------------------------------------- */
718e3744 1525
27d29ced 1526static struct timeval *thread_timer_wait(struct thread_timer_list_head *timers,
d62a17ae 1527 struct timeval *timer_val)
718e3744 1528{
27d29ced
DL
1529 if (!thread_timer_list_count(timers))
1530 return NULL;
1531
1532 struct thread *next_timer = thread_timer_list_first(timers);
1533 monotime_until(&next_timer->u.sands, timer_val);
1534 return timer_val;
718e3744 1535}
718e3744 1536
d62a17ae 1537static struct thread *thread_run(struct thread_master *m, struct thread *thread,
1538 struct thread *fetch)
718e3744 1539{
d62a17ae 1540 *fetch = *thread;
1541 thread_add_unuse(m, thread);
1542 return fetch;
718e3744 1543}
1544
d62a17ae 1545static int thread_process_io_helper(struct thread_master *m,
45f3d590
DS
1546 struct thread *thread, short state,
1547 short actual_state, int pos)
5d4ccd4e 1548{
d62a17ae 1549 struct thread **thread_array;
1550
45f3d590
DS
1551 /*
1552 * poll() clears the .events field, but the pollfd array we
1553 * pass to poll() is a copy of the one used to schedule threads.
1554 * We need to synchronize state between the two here by applying
1555 * the same changes poll() made on the copy of the "real" pollfd
1556 * array.
1557 *
1558 * This cleans up a possible infinite loop where we refuse
1559 * to respond to a poll event but poll is insistent that
1560 * we should.
1561 */
1562 m->handler.pfds[pos].events &= ~(state);
1563
1564 if (!thread) {
1565 if ((actual_state & (POLLHUP|POLLIN)) != POLLHUP)
1566 flog_err(EC_LIB_NO_THREAD,
1d5453d6 1567 "Attempting to process an I/O event but for fd: %d(%d) no thread to handle this!",
45f3d590 1568 m->handler.pfds[pos].fd, actual_state);
d62a17ae 1569 return 0;
45f3d590 1570 }
d62a17ae 1571
1572 if (thread->type == THREAD_READ)
1573 thread_array = m->read;
1574 else
1575 thread_array = m->write;
1576
1577 thread_array[thread->u.fd] = NULL;
c284542b 1578 thread_list_add_tail(&m->ready, thread);
d62a17ae 1579 thread->type = THREAD_READY;
45f3d590 1580
d62a17ae 1581 return 1;
5d4ccd4e
DS
1582}
1583
8797240e
QY
1584/**
1585 * Process I/O events.
1586 *
1587 * Walks through file descriptor array looking for those pollfds whose .revents
1588 * field has something interesting. Deletes any invalid file descriptors.
1589 *
1590 * @param m the thread master
1591 * @param num the number of active file descriptors (return value of poll())
1592 */
d62a17ae 1593static void thread_process_io(struct thread_master *m, unsigned int num)
0a95a0d0 1594{
d62a17ae 1595 unsigned int ready = 0;
1596 struct pollfd *pfds = m->handler.copy;
1597
1598 for (nfds_t i = 0; i < m->handler.copycount && ready < num; ++i) {
1599 /* no event for current fd? immediately continue */
1600 if (pfds[i].revents == 0)
1601 continue;
1602
1603 ready++;
1604
d279ef57
DS
1605 /*
1606 * Unless someone has called thread_cancel from another
1607 * pthread, the only thing that could have changed in
1608 * m->handler.pfds while we were asleep is the .events
1609 * field in a given pollfd. Barring thread_cancel() that
1610 * value should be a superset of the values we have in our
1611 * copy, so there's no need to update it. Similarily,
1612 * barring deletion, the fd should still be a valid index
1613 * into the master's pfds.
d142453d
DS
1614 *
1615 * We are including POLLERR here to do a READ event
1616 * this is because the read should fail and the
1617 * read function should handle it appropriately
d279ef57 1618 */
d142453d 1619 if (pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) {
d62a17ae 1620 thread_process_io_helper(m, m->read[pfds[i].fd], POLLIN,
45f3d590
DS
1621 pfds[i].revents, i);
1622 }
d62a17ae 1623 if (pfds[i].revents & POLLOUT)
1624 thread_process_io_helper(m, m->write[pfds[i].fd],
45f3d590 1625 POLLOUT, pfds[i].revents, i);
d62a17ae 1626
1627 /* if one of our file descriptors is garbage, remove the same
1628 * from
1629 * both pfds + update sizes and index */
1630 if (pfds[i].revents & POLLNVAL) {
1631 memmove(m->handler.pfds + i, m->handler.pfds + i + 1,
1632 (m->handler.pfdcount - i - 1)
1633 * sizeof(struct pollfd));
1634 m->handler.pfdcount--;
e985cda0
S
1635 m->handler.pfds[m->handler.pfdcount].fd = 0;
1636 m->handler.pfds[m->handler.pfdcount].events = 0;
d62a17ae 1637
1638 memmove(pfds + i, pfds + i + 1,
1639 (m->handler.copycount - i - 1)
1640 * sizeof(struct pollfd));
1641 m->handler.copycount--;
e985cda0
S
1642 m->handler.copy[m->handler.copycount].fd = 0;
1643 m->handler.copy[m->handler.copycount].events = 0;
d62a17ae 1644
1645 i--;
1646 }
1647 }
718e3744 1648}
1649
8b70d0b0 1650/* Add all timers that have popped to the ready list. */
e7d9e44b 1651static unsigned int thread_process_timers(struct thread_master *m,
d62a17ae 1652 struct timeval *timenow)
a48b4e6d 1653{
d62a17ae 1654 struct thread *thread;
1655 unsigned int ready = 0;
1656
e7d9e44b 1657 while ((thread = thread_timer_list_first(&m->timer))) {
d62a17ae 1658 if (timercmp(timenow, &thread->u.sands, <))
e7d9e44b
MS
1659 break;
1660 thread_timer_list_pop(&m->timer);
d62a17ae 1661 thread->type = THREAD_READY;
e7d9e44b 1662 thread_list_add_tail(&m->ready, thread);
d62a17ae 1663 ready++;
1664 }
e7d9e44b 1665
d62a17ae 1666 return ready;
a48b4e6d 1667}
1668
2613abe6 1669/* process a list en masse, e.g. for event thread lists */
c284542b 1670static unsigned int thread_process(struct thread_list_head *list)
2613abe6 1671{
d62a17ae 1672 struct thread *thread;
d62a17ae 1673 unsigned int ready = 0;
1674
c284542b 1675 while ((thread = thread_list_pop(list))) {
d62a17ae 1676 thread->type = THREAD_READY;
c284542b 1677 thread_list_add_tail(&thread->master->ready, thread);
d62a17ae 1678 ready++;
1679 }
1680 return ready;
2613abe6
PJ
1681}
1682
1683
718e3744 1684/* Fetch next ready thread. */
d62a17ae 1685struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
718e3744 1686{
d62a17ae 1687 struct thread *thread = NULL;
1688 struct timeval now;
1689 struct timeval zerotime = {0, 0};
1690 struct timeval tv;
1691 struct timeval *tw = NULL;
d81ca9a3 1692 bool eintr_p = false;
d62a17ae 1693 int num = 0;
1694
1695 do {
1696 /* Handle signals if any */
1697 if (m->handle_signals)
7cc91e67 1698 frr_sigevent_process();
d62a17ae 1699
1700 pthread_mutex_lock(&m->mtx);
1701
1702 /* Process any pending cancellation requests */
1703 do_thread_cancel(m);
1704
e3c9529e
QY
1705 /*
1706 * Attempt to flush ready queue before going into poll().
1707 * This is performance-critical. Think twice before modifying.
1708 */
c284542b 1709 if ((thread = thread_list_pop(&m->ready))) {
e3c9529e
QY
1710 fetch = thread_run(m, thread, fetch);
1711 if (fetch->ref)
1712 *fetch->ref = NULL;
1713 pthread_mutex_unlock(&m->mtx);
5e822957
DS
1714 if (!m->ready_run_loop)
1715 GETRUSAGE(&m->last_getrusage);
1716 m->ready_run_loop = true;
e3c9529e
QY
1717 break;
1718 }
1719
5e822957 1720 m->ready_run_loop = false;
e3c9529e
QY
1721 /* otherwise, tick through scheduling sequence */
1722
bca37d17
QY
1723 /*
1724 * Post events to ready queue. This must come before the
1725 * following block since events should occur immediately
1726 */
d62a17ae 1727 thread_process(&m->event);
1728
bca37d17
QY
1729 /*
1730 * If there are no tasks on the ready queue, we will poll()
1731 * until a timer expires or we receive I/O, whichever comes
1732 * first. The strategy for doing this is:
d62a17ae 1733 *
1734 * - If there are events pending, set the poll() timeout to zero
1735 * - If there are no events pending, but there are timers
d279ef57
DS
1736 * pending, set the timeout to the smallest remaining time on
1737 * any timer.
d62a17ae 1738 * - If there are neither timers nor events pending, but there
d279ef57 1739 * are file descriptors pending, block indefinitely in poll()
d62a17ae 1740 * - If nothing is pending, it's time for the application to die
1741 *
1742 * In every case except the last, we need to hit poll() at least
bca37d17
QY
1743 * once per loop to avoid starvation by events
1744 */
c284542b 1745 if (!thread_list_count(&m->ready))
27d29ced 1746 tw = thread_timer_wait(&m->timer, &tv);
d62a17ae 1747
c284542b
DL
1748 if (thread_list_count(&m->ready) ||
1749 (tw && !timercmp(tw, &zerotime, >)))
d62a17ae 1750 tw = &zerotime;
1751
1752 if (!tw && m->handler.pfdcount == 0) { /* die */
1753 pthread_mutex_unlock(&m->mtx);
1754 fetch = NULL;
1755 break;
1756 }
1757
bca37d17
QY
1758 /*
1759 * Copy pollfd array + # active pollfds in it. Not necessary to
1760 * copy the array size as this is fixed.
1761 */
d62a17ae 1762 m->handler.copycount = m->handler.pfdcount;
1763 memcpy(m->handler.copy, m->handler.pfds,
1764 m->handler.copycount * sizeof(struct pollfd));
1765
e3c9529e
QY
1766 pthread_mutex_unlock(&m->mtx);
1767 {
d81ca9a3
MS
1768 eintr_p = false;
1769 num = fd_poll(m, tw, &eintr_p);
e3c9529e
QY
1770 }
1771 pthread_mutex_lock(&m->mtx);
d764d2cc 1772
e3c9529e
QY
1773 /* Handle any errors received in poll() */
1774 if (num < 0) {
d81ca9a3 1775 if (eintr_p) {
d62a17ae 1776 pthread_mutex_unlock(&m->mtx);
e3c9529e
QY
1777 /* loop around to signal handler */
1778 continue;
d62a17ae 1779 }
1780
e3c9529e 1781 /* else die */
450971aa 1782 flog_err(EC_LIB_SYSTEM_CALL, "poll() error: %s",
9ef9495e 1783 safe_strerror(errno));
e3c9529e
QY
1784 pthread_mutex_unlock(&m->mtx);
1785 fetch = NULL;
1786 break;
bca37d17 1787 }
d62a17ae 1788
1789 /* Post timers to ready queue. */
1790 monotime(&now);
e7d9e44b 1791 thread_process_timers(m, &now);
d62a17ae 1792
1793 /* Post I/O to ready queue. */
1794 if (num > 0)
1795 thread_process_io(m, num);
1796
d62a17ae 1797 pthread_mutex_unlock(&m->mtx);
1798
1799 } while (!thread && m->spin);
1800
1801 return fetch;
718e3744 1802}
1803
d62a17ae 1804static unsigned long timeval_elapsed(struct timeval a, struct timeval b)
62f44022 1805{
d62a17ae 1806 return (((a.tv_sec - b.tv_sec) * TIMER_SECOND_MICRO)
1807 + (a.tv_usec - b.tv_usec));
62f44022
QY
1808}
1809
d62a17ae 1810unsigned long thread_consumed_time(RUSAGE_T *now, RUSAGE_T *start,
1811 unsigned long *cputime)
718e3744 1812{
6418e2d3
DL
1813#ifdef HAVE_CLOCK_THREAD_CPUTIME_ID
1814 *cputime = (now->cpu.tv_sec - start->cpu.tv_sec) * TIMER_SECOND_MICRO
1815 + (now->cpu.tv_nsec - start->cpu.tv_nsec) / 1000;
1816#else
d62a17ae 1817 /* This is 'user + sys' time. */
1818 *cputime = timeval_elapsed(now->cpu.ru_utime, start->cpu.ru_utime)
1819 + timeval_elapsed(now->cpu.ru_stime, start->cpu.ru_stime);
6418e2d3 1820#endif
d62a17ae 1821 return timeval_elapsed(now->real, start->real);
8b70d0b0 1822}
1823
50596be0
DS
1824/* We should aim to yield after yield milliseconds, which defaults
1825 to THREAD_YIELD_TIME_SLOT .
8b70d0b0 1826 Note: we are using real (wall clock) time for this calculation.
1827 It could be argued that CPU time may make more sense in certain
1828 contexts. The things to consider are whether the thread may have
1829 blocked (in which case wall time increases, but CPU time does not),
1830 or whether the system is heavily loaded with other processes competing
d62a17ae 1831 for CPU time. On balance, wall clock time seems to make sense.
8b70d0b0 1832 Plus it has the added benefit that gettimeofday should be faster
1833 than calling getrusage. */
d62a17ae 1834int thread_should_yield(struct thread *thread)
718e3744 1835{
d62a17ae 1836 int result;
00dffa8c 1837 frr_with_mutex(&thread->mtx) {
d62a17ae 1838 result = monotime_since(&thread->real, NULL)
1839 > (int64_t)thread->yield;
1840 }
d62a17ae 1841 return result;
50596be0
DS
1842}
1843
d62a17ae 1844void thread_set_yield_time(struct thread *thread, unsigned long yield_time)
50596be0 1845{
00dffa8c 1846 frr_with_mutex(&thread->mtx) {
d62a17ae 1847 thread->yield = yield_time;
1848 }
718e3744 1849}
1850
d62a17ae 1851void thread_getrusage(RUSAGE_T *r)
db9c0df9 1852{
6418e2d3
DL
1853 monotime(&r->real);
1854 if (!cputime_enabled) {
1855 memset(&r->cpu, 0, sizeof(r->cpu));
1856 return;
1857 }
1858
1859#ifdef HAVE_CLOCK_THREAD_CPUTIME_ID
1860 /* not currently implemented in Linux's vDSO, but maybe at some point
1861 * in the future?
1862 */
1863 clock_gettime(CLOCK_THREAD_CPUTIME_ID, &r->cpu);
1864#else /* !HAVE_CLOCK_THREAD_CPUTIME_ID */
231db9a6
DS
1865#if defined RUSAGE_THREAD
1866#define FRR_RUSAGE RUSAGE_THREAD
1867#else
1868#define FRR_RUSAGE RUSAGE_SELF
1869#endif
6418e2d3
DL
1870 getrusage(FRR_RUSAGE, &(r->cpu));
1871#endif
db9c0df9
PJ
1872}
1873
fbcac826
QY
1874/*
1875 * Call a thread.
1876 *
1877 * This function will atomically update the thread's usage history. At present
1878 * this is the only spot where usage history is written. Nevertheless the code
1879 * has been written such that the introduction of writers in the future should
1880 * not need to update it provided the writers atomically perform only the
1881 * operations done here, i.e. updating the total and maximum times. In
1882 * particular, the maximum real and cpu times must be monotonically increasing
1883 * or this code is not correct.
1884 */
d62a17ae 1885void thread_call(struct thread *thread)
718e3744 1886{
d62a17ae 1887 RUSAGE_T before, after;
cc8b13a0 1888
45f01188
DL
1889 /* if the thread being called is the CLI, it may change cputime_enabled
1890 * ("service cputime-stats" command), which can result in nonsensical
1891 * and very confusing warnings
1892 */
1893 bool cputime_enabled_here = cputime_enabled;
1894
5e822957
DS
1895 if (thread->master->ready_run_loop)
1896 before = thread->master->last_getrusage;
1897 else
1898 GETRUSAGE(&before);
1899
d62a17ae 1900 thread->real = before.real;
718e3744 1901
6c3aa850
DL
1902 frrtrace(9, frr_libfrr, thread_call, thread->master,
1903 thread->xref->funcname, thread->xref->xref.file,
1904 thread->xref->xref.line, NULL, thread->u.fd,
c7bb4f00 1905 thread->u.val, thread->arg, thread->u.sands.tv_sec);
abf96a87 1906
d62a17ae 1907 pthread_setspecific(thread_current, thread);
1908 (*thread->func)(thread);
1909 pthread_setspecific(thread_current, NULL);
718e3744 1910
d62a17ae 1911 GETRUSAGE(&after);
5e822957 1912 thread->master->last_getrusage = after;
718e3744 1913
45f01188
DL
1914 unsigned long walltime, cputime;
1915 unsigned long exp;
fbcac826 1916
45f01188
DL
1917 walltime = thread_consumed_time(&after, &before, &cputime);
1918
1919 /* update walltime */
1920 atomic_fetch_add_explicit(&thread->hist->real.total, walltime,
fbcac826
QY
1921 memory_order_seq_cst);
1922 exp = atomic_load_explicit(&thread->hist->real.max,
1923 memory_order_seq_cst);
45f01188 1924 while (exp < walltime
fbcac826 1925 && !atomic_compare_exchange_weak_explicit(
45f01188
DL
1926 &thread->hist->real.max, &exp, walltime,
1927 memory_order_seq_cst, memory_order_seq_cst))
fbcac826
QY
1928 ;
1929
45f01188
DL
1930 if (cputime_enabled_here && cputime_enabled) {
1931 /* update cputime */
1932 atomic_fetch_add_explicit(&thread->hist->cpu.total, cputime,
1933 memory_order_seq_cst);
1934 exp = atomic_load_explicit(&thread->hist->cpu.max,
1935 memory_order_seq_cst);
1936 while (exp < cputime
1937 && !atomic_compare_exchange_weak_explicit(
1938 &thread->hist->cpu.max, &exp, cputime,
1939 memory_order_seq_cst, memory_order_seq_cst))
1940 ;
1941 }
fbcac826
QY
1942
1943 atomic_fetch_add_explicit(&thread->hist->total_calls, 1,
1944 memory_order_seq_cst);
1945 atomic_fetch_or_explicit(&thread->hist->types, 1 << thread->add_type,
1946 memory_order_seq_cst);
718e3744 1947
45f01188
DL
1948 if (cputime_enabled_here && cputime_enabled && cputime_threshold
1949 && cputime > cputime_threshold) {
d62a17ae 1950 /*
45f01188
DL
1951 * We have a CPU Hog on our hands. The time FRR has spent
1952 * doing actual work (not sleeping) is greater than 5 seconds.
d62a17ae 1953 * Whinge about it now, so we're aware this is yet another task
1954 * to fix.
1955 */
9b8e01ca
DS
1956 atomic_fetch_add_explicit(&thread->hist->total_cpu_warn,
1957 1, memory_order_seq_cst);
9ef9495e 1958 flog_warn(
039d547f
DS
1959 EC_LIB_SLOW_THREAD_CPU,
1960 "CPU HOG: task %s (%lx) ran for %lums (cpu time %lums)",
1961 thread->xref->funcname, (unsigned long)thread->func,
45f01188
DL
1962 walltime / 1000, cputime / 1000);
1963
1964 } else if (walltime_threshold && walltime > walltime_threshold) {
039d547f 1965 /*
45f01188
DL
1966 * The runtime for a task is greater than 5 seconds, but the
1967 * cpu time is under 5 seconds. Let's whine about this because
1968 * this could imply some sort of scheduling issue.
039d547f 1969 */
9b8e01ca
DS
1970 atomic_fetch_add_explicit(&thread->hist->total_wall_warn,
1971 1, memory_order_seq_cst);
039d547f
DS
1972 flog_warn(
1973 EC_LIB_SLOW_THREAD_WALL,
1974 "STARVATION: task %s (%lx) ran for %lums (cpu time %lums)",
60a3efec 1975 thread->xref->funcname, (unsigned long)thread->func,
45f01188 1976 walltime / 1000, cputime / 1000);
d62a17ae 1977 }
718e3744 1978}
1979
1980/* Execute thread */
60a3efec
DL
1981void _thread_execute(const struct xref_threadsched *xref,
1982 struct thread_master *m, int (*func)(struct thread *),
1983 void *arg, int val)
718e3744 1984{
c4345fbf 1985 struct thread *thread;
718e3744 1986
c4345fbf 1987 /* Get or allocate new thread to execute. */
00dffa8c 1988 frr_with_mutex(&m->mtx) {
60a3efec 1989 thread = thread_get(m, THREAD_EVENT, func, arg, xref);
9c7753e4 1990
c4345fbf 1991 /* Set its event value. */
00dffa8c 1992 frr_with_mutex(&thread->mtx) {
c4345fbf
RZ
1993 thread->add_type = THREAD_EXECUTE;
1994 thread->u.val = val;
1995 thread->ref = &thread;
1996 }
c4345fbf 1997 }
f7c62e11 1998
c4345fbf
RZ
1999 /* Execute thread doing all accounting. */
2000 thread_call(thread);
9c7753e4 2001
c4345fbf
RZ
2002 /* Give back or free thread. */
2003 thread_add_unuse(m, thread);
718e3744 2004}
1543c387
MS
2005
2006/* Debug signal mask - if 'sigs' is NULL, use current effective mask. */
2007void debug_signals(const sigset_t *sigs)
2008{
2009 int i, found;
2010 sigset_t tmpsigs;
2011 char buf[300];
2012
2013 /*
2014 * We're only looking at the non-realtime signals here, so we need
2015 * some limit value. Platform differences mean at some point we just
2016 * need to pick a reasonable value.
2017 */
2018#if defined SIGRTMIN
2019# define LAST_SIGNAL SIGRTMIN
2020#else
2021# define LAST_SIGNAL 32
2022#endif
2023
2024
2025 if (sigs == NULL) {
2026 sigemptyset(&tmpsigs);
2027 pthread_sigmask(SIG_BLOCK, NULL, &tmpsigs);
2028 sigs = &tmpsigs;
2029 }
2030
2031 found = 0;
2032 buf[0] = '\0';
2033
2034 for (i = 0; i < LAST_SIGNAL; i++) {
2035 char tmp[20];
2036
2037 if (sigismember(sigs, i) > 0) {
2038 if (found > 0)
2039 strlcat(buf, ",", sizeof(buf));
2040 snprintf(tmp, sizeof(tmp), "%d", i);
2041 strlcat(buf, tmp, sizeof(buf));
2042 found++;
2043 }
2044 }
2045
2046 if (found == 0)
2047 snprintf(buf, sizeof(buf), "<none>");
2048
2049 zlog_debug("%s: %s", __func__, buf);
2050}
a505383d
DS
2051
2052bool thread_is_scheduled(struct thread *thread)
2053{
2054 if (thread == NULL)
2055 return false;
2056
2057 return true;
2058}
f59e6882
DL
2059
2060static ssize_t printfrr_thread_dbg(struct fbuf *buf, struct printfrr_eargs *ea,
2061 const struct thread *thread)
2062{
2063 static const char * const types[] = {
2064 [THREAD_READ] = "read",
2065 [THREAD_WRITE] = "write",
2066 [THREAD_TIMER] = "timer",
2067 [THREAD_EVENT] = "event",
2068 [THREAD_READY] = "ready",
2069 [THREAD_UNUSED] = "unused",
2070 [THREAD_EXECUTE] = "exec",
2071 };
2072 ssize_t rv = 0;
2073 char info[16] = "";
2074
2075 if (!thread)
2076 return bputs(buf, "{(thread *)NULL}");
2077
2078 rv += bprintfrr(buf, "{(thread *)%p arg=%p", thread, thread->arg);
2079
2080 if (thread->type < array_size(types) && types[thread->type])
2081 rv += bprintfrr(buf, " %-6s", types[thread->type]);
2082 else
2083 rv += bprintfrr(buf, " INVALID(%u)", thread->type);
2084
2085 switch (thread->type) {
2086 case THREAD_READ:
2087 case THREAD_WRITE:
2088 snprintfrr(info, sizeof(info), "fd=%d", thread->u.fd);
2089 break;
2090
2091 case THREAD_TIMER:
2092 snprintfrr(info, sizeof(info), "r=%pTVMud", &thread->u.sands);
2093 break;
2094 }
2095
2096 rv += bprintfrr(buf, " %-12s %s() %s from %s:%d}", info,
2097 thread->xref->funcname, thread->xref->dest,
2098 thread->xref->xref.file, thread->xref->xref.line);
2099 return rv;
2100}
2101
54929fd3 2102printfrr_ext_autoreg_p("TH", printfrr_thread);
f59e6882
DL
2103static ssize_t printfrr_thread(struct fbuf *buf, struct printfrr_eargs *ea,
2104 const void *ptr)
2105{
2106 const struct thread *thread = ptr;
2107 struct timespec remain = {};
2108
2109 if (ea->fmt[0] == 'D') {
2110 ea->fmt++;
2111 return printfrr_thread_dbg(buf, ea, thread);
2112 }
2113
2114 if (!thread) {
2115 /* need to jump over time formatting flag characters in the
2116 * input format string, i.e. adjust ea->fmt!
2117 */
2118 printfrr_time(buf, ea, &remain,
2119 TIMEFMT_TIMER_DEADLINE | TIMEFMT_SKIP);
2120 return bputch(buf, '-');
2121 }
2122
2123 TIMEVAL_TO_TIMESPEC(&thread->u.sands, &remain);
2124 return printfrr_time(buf, ea, &remain, TIMEFMT_TIMER_DEADLINE);
2125}