1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Thread management routine
3 * Copyright (C) 1998, 2000 Kunihiro Ishiguro <kunihiro@zebra.org>
9 #include <sys/resource.h>
20 #include "frratomic.h"
21 #include "frr_pthread.h"
22 #include "lib_errors.h"
23 #include "libfrr_trace.h"
26 DEFINE_MTYPE_STATIC(LIB
, THREAD
, "Thread");
27 DEFINE_MTYPE_STATIC(LIB
, EVENT_MASTER
, "Thread master");
28 DEFINE_MTYPE_STATIC(LIB
, EVENT_POLL
, "Thread Poll Info");
29 DEFINE_MTYPE_STATIC(LIB
, EVENT_STATS
, "Thread stats");
31 DECLARE_LIST(thread_list
, struct event
, threaditem
);
37 struct event
**threadref
;
40 /* Flags for task cancellation */
41 #define EVENT_CANCEL_FLAG_READY 0x01
43 static int thread_timer_cmp(const struct event
*a
, const struct event
*b
)
45 if (a
->u
.sands
.tv_sec
< b
->u
.sands
.tv_sec
)
47 if (a
->u
.sands
.tv_sec
> b
->u
.sands
.tv_sec
)
49 if (a
->u
.sands
.tv_usec
< b
->u
.sands
.tv_usec
)
51 if (a
->u
.sands
.tv_usec
> b
->u
.sands
.tv_usec
)
56 DECLARE_HEAP(thread_timer_list
, struct event
, timeritem
, thread_timer_cmp
);
58 #if defined(__APPLE__)
59 #include <mach/mach.h>
60 #include <mach/mach_time.h>
65 const unsigned char wakebyte = 0x01; \
66 write(m->io_pipe[1], &wakebyte, 1); \
69 /* control variable for initializer */
70 static pthread_once_t init_once
= PTHREAD_ONCE_INIT
;
71 pthread_key_t thread_current
;
73 static pthread_mutex_t masters_mtx
= PTHREAD_MUTEX_INITIALIZER
;
74 static struct list
*masters
;
76 static void thread_free(struct thread_master
*master
, struct event
*thread
);
78 #ifndef EXCLUDE_CPU_TIME
79 #define EXCLUDE_CPU_TIME 0
81 #ifndef CONSUMED_TIME_CHECK
82 #define CONSUMED_TIME_CHECK 0
85 bool cputime_enabled
= !EXCLUDE_CPU_TIME
;
86 unsigned long cputime_threshold
= CONSUMED_TIME_CHECK
;
87 unsigned long walltime_threshold
= CONSUMED_TIME_CHECK
;
89 /* CLI start ---------------------------------------------------------------- */
90 #include "lib/event_clippy.c"
92 static unsigned int cpu_record_hash_key(const struct cpu_thread_history
*a
)
94 int size
= sizeof(a
->func
);
96 return jhash(&a
->func
, size
, 0);
99 static bool cpu_record_hash_cmp(const struct cpu_thread_history
*a
,
100 const struct cpu_thread_history
*b
)
102 return a
->func
== b
->func
;
105 static void *cpu_record_hash_alloc(struct cpu_thread_history
*a
)
107 struct cpu_thread_history
*new;
108 new = XCALLOC(MTYPE_EVENT_STATS
, sizeof(struct cpu_thread_history
));
110 new->funcname
= a
->funcname
;
114 static void cpu_record_hash_free(void *a
)
116 struct cpu_thread_history
*hist
= a
;
118 XFREE(MTYPE_EVENT_STATS
, hist
);
121 static void vty_out_cpu_thread_history(struct vty
*vty
,
122 struct cpu_thread_history
*a
)
125 "%5zu %10zu.%03zu %9zu %8zu %9zu %8zu %9zu %9zu %9zu %10zu",
126 a
->total_active
, a
->cpu
.total
/ 1000, a
->cpu
.total
% 1000,
127 a
->total_calls
, (a
->cpu
.total
/ a
->total_calls
), a
->cpu
.max
,
128 (a
->real
.total
/ a
->total_calls
), a
->real
.max
,
129 a
->total_cpu_warn
, a
->total_wall_warn
, a
->total_starv_warn
);
130 vty_out(vty
, " %c%c%c%c%c %s\n",
131 a
->types
& (1 << EVENT_READ
) ? 'R' : ' ',
132 a
->types
& (1 << EVENT_WRITE
) ? 'W' : ' ',
133 a
->types
& (1 << EVENT_TIMER
) ? 'T' : ' ',
134 a
->types
& (1 << EVENT_EVENT
) ? 'E' : ' ',
135 a
->types
& (1 << EVENT_EXECUTE
) ? 'X' : ' ', a
->funcname
);
138 static void cpu_record_hash_print(struct hash_bucket
*bucket
, void *args
[])
140 struct cpu_thread_history
*totals
= args
[0];
141 struct cpu_thread_history copy
;
142 struct vty
*vty
= args
[1];
143 uint8_t *filter
= args
[2];
145 struct cpu_thread_history
*a
= bucket
->data
;
148 atomic_load_explicit(&a
->total_active
, memory_order_seq_cst
);
150 atomic_load_explicit(&a
->total_calls
, memory_order_seq_cst
);
151 copy
.total_cpu_warn
=
152 atomic_load_explicit(&a
->total_cpu_warn
, memory_order_seq_cst
);
153 copy
.total_wall_warn
=
154 atomic_load_explicit(&a
->total_wall_warn
, memory_order_seq_cst
);
155 copy
.total_starv_warn
= atomic_load_explicit(&a
->total_starv_warn
,
156 memory_order_seq_cst
);
158 atomic_load_explicit(&a
->cpu
.total
, memory_order_seq_cst
);
159 copy
.cpu
.max
= atomic_load_explicit(&a
->cpu
.max
, memory_order_seq_cst
);
161 atomic_load_explicit(&a
->real
.total
, memory_order_seq_cst
);
163 atomic_load_explicit(&a
->real
.max
, memory_order_seq_cst
);
164 copy
.types
= atomic_load_explicit(&a
->types
, memory_order_seq_cst
);
165 copy
.funcname
= a
->funcname
;
167 if (!(copy
.types
& *filter
))
170 vty_out_cpu_thread_history(vty
, ©
);
171 totals
->total_active
+= copy
.total_active
;
172 totals
->total_calls
+= copy
.total_calls
;
173 totals
->total_cpu_warn
+= copy
.total_cpu_warn
;
174 totals
->total_wall_warn
+= copy
.total_wall_warn
;
175 totals
->total_starv_warn
+= copy
.total_starv_warn
;
176 totals
->real
.total
+= copy
.real
.total
;
177 if (totals
->real
.max
< copy
.real
.max
)
178 totals
->real
.max
= copy
.real
.max
;
179 totals
->cpu
.total
+= copy
.cpu
.total
;
180 if (totals
->cpu
.max
< copy
.cpu
.max
)
181 totals
->cpu
.max
= copy
.cpu
.max
;
184 static void cpu_record_print(struct vty
*vty
, uint8_t filter
)
186 struct cpu_thread_history tmp
;
187 void *args
[3] = {&tmp
, vty
, &filter
};
188 struct thread_master
*m
;
191 if (!cputime_enabled
)
194 "Collecting CPU time statistics is currently disabled. Following statistics\n"
195 "will be zero or may display data from when collection was enabled. Use the\n"
196 " \"service cputime-stats\" command to start collecting data.\n"
197 "\nCounters and wallclock times are always maintained and should be accurate.\n");
199 memset(&tmp
, 0, sizeof(tmp
));
200 tmp
.funcname
= "TOTAL";
203 frr_with_mutex (&masters_mtx
) {
204 for (ALL_LIST_ELEMENTS_RO(masters
, ln
, m
)) {
205 const char *name
= m
->name
? m
->name
: "main";
207 char underline
[strlen(name
) + 1];
208 memset(underline
, '-', sizeof(underline
));
209 underline
[sizeof(underline
) - 1] = '\0';
212 vty_out(vty
, "Showing statistics for pthread %s\n",
214 vty_out(vty
, "-------------------------------%s\n",
216 vty_out(vty
, "%30s %18s %18s\n", "",
217 "CPU (user+system):", "Real (wall-clock):");
219 "Active Runtime(ms) Invoked Avg uSec Max uSecs");
220 vty_out(vty
, " Avg uSec Max uSecs");
222 " CPU_Warn Wall_Warn Starv_Warn Type Thread\n");
224 if (m
->cpu_record
->count
)
227 (void (*)(struct hash_bucket
*,
228 void *))cpu_record_hash_print
,
231 vty_out(vty
, "No data to display yet.\n");
238 vty_out(vty
, "Total thread statistics\n");
239 vty_out(vty
, "-------------------------\n");
240 vty_out(vty
, "%30s %18s %18s\n", "",
241 "CPU (user+system):", "Real (wall-clock):");
242 vty_out(vty
, "Active Runtime(ms) Invoked Avg uSec Max uSecs");
243 vty_out(vty
, " Avg uSec Max uSecs CPU_Warn Wall_Warn");
244 vty_out(vty
, " Type Thread\n");
246 if (tmp
.total_calls
> 0)
247 vty_out_cpu_thread_history(vty
, &tmp
);
250 static void cpu_record_hash_clear(struct hash_bucket
*bucket
, void *args
[])
252 uint8_t *filter
= args
[0];
253 struct hash
*cpu_record
= args
[1];
255 struct cpu_thread_history
*a
= bucket
->data
;
257 if (!(a
->types
& *filter
))
260 hash_release(cpu_record
, bucket
->data
);
263 static void cpu_record_clear(uint8_t filter
)
265 uint8_t *tmp
= &filter
;
266 struct thread_master
*m
;
269 frr_with_mutex (&masters_mtx
) {
270 for (ALL_LIST_ELEMENTS_RO(masters
, ln
, m
)) {
271 frr_with_mutex (&m
->mtx
) {
272 void *args
[2] = {tmp
, m
->cpu_record
};
275 (void (*)(struct hash_bucket
*,
276 void *))cpu_record_hash_clear
,
283 static uint8_t parse_filter(const char *filterstr
)
288 while (filterstr
[i
] != '\0') {
289 switch (filterstr
[i
]) {
292 filter
|= (1 << EVENT_READ
);
296 filter
|= (1 << EVENT_WRITE
);
300 filter
|= (1 << EVENT_TIMER
);
304 filter
|= (1 << EVENT_EVENT
);
308 filter
|= (1 << EVENT_EXECUTE
);
318 DEFUN_NOSH (show_thread_cpu
,
320 "show thread cpu [FILTER]",
322 "Thread information\n"
324 "Display filter (rwtex)\n")
326 uint8_t filter
= (uint8_t)-1U;
329 if (argv_find(argv
, argc
, "FILTER", &idx
)) {
330 filter
= parse_filter(argv
[idx
]->arg
);
333 "Invalid filter \"%s\" specified; must contain at leastone of 'RWTEXB'\n",
339 cpu_record_print(vty
, filter
);
343 DEFPY (service_cputime_stats
,
344 service_cputime_stats_cmd
,
345 "[no] service cputime-stats",
347 "Set up miscellaneous service\n"
348 "Collect CPU usage statistics\n")
350 cputime_enabled
= !no
;
354 DEFPY (service_cputime_warning
,
355 service_cputime_warning_cmd
,
356 "[no] service cputime-warning (1-4294967295)",
358 "Set up miscellaneous service\n"
359 "Warn for tasks exceeding CPU usage threshold\n"
360 "Warning threshold in milliseconds\n")
363 cputime_threshold
= 0;
365 cputime_threshold
= cputime_warning
* 1000;
369 ALIAS (service_cputime_warning
,
370 no_service_cputime_warning_cmd
,
371 "no service cputime-warning",
373 "Set up miscellaneous service\n"
374 "Warn for tasks exceeding CPU usage threshold\n")
376 DEFPY (service_walltime_warning
,
377 service_walltime_warning_cmd
,
378 "[no] service walltime-warning (1-4294967295)",
380 "Set up miscellaneous service\n"
381 "Warn for tasks exceeding total wallclock threshold\n"
382 "Warning threshold in milliseconds\n")
385 walltime_threshold
= 0;
387 walltime_threshold
= walltime_warning
* 1000;
391 ALIAS (service_walltime_warning
,
392 no_service_walltime_warning_cmd
,
393 "no service walltime-warning",
395 "Set up miscellaneous service\n"
396 "Warn for tasks exceeding total wallclock threshold\n")
398 static void show_thread_poll_helper(struct vty
*vty
, struct thread_master
*m
)
400 const char *name
= m
->name
? m
->name
: "main";
401 char underline
[strlen(name
) + 1];
402 struct event
*thread
;
405 memset(underline
, '-', sizeof(underline
));
406 underline
[sizeof(underline
) - 1] = '\0';
408 vty_out(vty
, "\nShowing poll FD's for %s\n", name
);
409 vty_out(vty
, "----------------------%s\n", underline
);
410 vty_out(vty
, "Count: %u/%d\n", (uint32_t)m
->handler
.pfdcount
,
412 for (i
= 0; i
< m
->handler
.pfdcount
; i
++) {
413 vty_out(vty
, "\t%6d fd:%6d events:%2d revents:%2d\t\t", i
,
414 m
->handler
.pfds
[i
].fd
, m
->handler
.pfds
[i
].events
,
415 m
->handler
.pfds
[i
].revents
);
417 if (m
->handler
.pfds
[i
].events
& POLLIN
) {
418 thread
= m
->read
[m
->handler
.pfds
[i
].fd
];
421 vty_out(vty
, "ERROR ");
423 vty_out(vty
, "%s ", thread
->xref
->funcname
);
427 if (m
->handler
.pfds
[i
].events
& POLLOUT
) {
428 thread
= m
->write
[m
->handler
.pfds
[i
].fd
];
431 vty_out(vty
, "ERROR\n");
433 vty_out(vty
, "%s\n", thread
->xref
->funcname
);
439 DEFUN_NOSH (show_thread_poll
,
440 show_thread_poll_cmd
,
443 "Thread information\n"
444 "Show poll FD's and information\n")
446 struct listnode
*node
;
447 struct thread_master
*m
;
449 frr_with_mutex (&masters_mtx
) {
450 for (ALL_LIST_ELEMENTS_RO(masters
, node
, m
)) {
451 show_thread_poll_helper(vty
, m
);
459 DEFUN (clear_thread_cpu
,
460 clear_thread_cpu_cmd
,
461 "clear thread cpu [FILTER]",
462 "Clear stored data in all pthreads\n"
463 "Thread information\n"
465 "Display filter (rwtexb)\n")
467 uint8_t filter
= (uint8_t)-1U;
470 if (argv_find(argv
, argc
, "FILTER", &idx
)) {
471 filter
= parse_filter(argv
[idx
]->arg
);
474 "Invalid filter \"%s\" specified; must contain at leastone of 'RWTEXB'\n",
480 cpu_record_clear(filter
);
484 static void show_thread_timers_helper(struct vty
*vty
, struct thread_master
*m
)
486 const char *name
= m
->name
? m
->name
: "main";
487 char underline
[strlen(name
) + 1];
488 struct event
*thread
;
490 memset(underline
, '-', sizeof(underline
));
491 underline
[sizeof(underline
) - 1] = '\0';
493 vty_out(vty
, "\nShowing timers for %s\n", name
);
494 vty_out(vty
, "-------------------%s\n", underline
);
496 frr_each (thread_timer_list
, &m
->timer
, thread
) {
497 vty_out(vty
, " %-50s%pTH\n", thread
->hist
->funcname
, thread
);
501 DEFPY_NOSH (show_thread_timers
,
502 show_thread_timers_cmd
,
503 "show thread timers",
505 "Thread information\n"
506 "Show all timers and how long they have in the system\n")
508 struct listnode
*node
;
509 struct thread_master
*m
;
511 frr_with_mutex (&masters_mtx
) {
512 for (ALL_LIST_ELEMENTS_RO(masters
, node
, m
))
513 show_thread_timers_helper(vty
, m
);
519 void thread_cmd_init(void)
521 install_element(VIEW_NODE
, &show_thread_cpu_cmd
);
522 install_element(VIEW_NODE
, &show_thread_poll_cmd
);
523 install_element(ENABLE_NODE
, &clear_thread_cpu_cmd
);
525 install_element(CONFIG_NODE
, &service_cputime_stats_cmd
);
526 install_element(CONFIG_NODE
, &service_cputime_warning_cmd
);
527 install_element(CONFIG_NODE
, &no_service_cputime_warning_cmd
);
528 install_element(CONFIG_NODE
, &service_walltime_warning_cmd
);
529 install_element(CONFIG_NODE
, &no_service_walltime_warning_cmd
);
531 install_element(VIEW_NODE
, &show_thread_timers_cmd
);
533 /* CLI end ------------------------------------------------------------------ */
536 static void cancelreq_del(void *cr
)
538 XFREE(MTYPE_TMP
, cr
);
541 /* initializer, only ever called once */
542 static void initializer(void)
544 pthread_key_create(&thread_current
, NULL
);
547 struct thread_master
*thread_master_create(const char *name
)
549 struct thread_master
*rv
;
552 pthread_once(&init_once
, &initializer
);
554 rv
= XCALLOC(MTYPE_EVENT_MASTER
, sizeof(struct thread_master
));
556 /* Initialize master mutex */
557 pthread_mutex_init(&rv
->mtx
, NULL
);
558 pthread_cond_init(&rv
->cancel_cond
, NULL
);
561 name
= name
? name
: "default";
562 rv
->name
= XSTRDUP(MTYPE_EVENT_MASTER
, name
);
564 /* Initialize I/O task data structures */
566 /* Use configured limit if present, ulimit otherwise. */
567 rv
->fd_limit
= frr_get_fd_limit();
568 if (rv
->fd_limit
== 0) {
569 getrlimit(RLIMIT_NOFILE
, &limit
);
570 rv
->fd_limit
= (int)limit
.rlim_cur
;
573 rv
->read
= XCALLOC(MTYPE_EVENT_POLL
,
574 sizeof(struct event
*) * rv
->fd_limit
);
576 rv
->write
= XCALLOC(MTYPE_EVENT_POLL
,
577 sizeof(struct event
*) * rv
->fd_limit
);
579 char tmhashname
[strlen(name
) + 32];
580 snprintf(tmhashname
, sizeof(tmhashname
), "%s - threadmaster event hash",
582 rv
->cpu_record
= hash_create_size(
583 8, (unsigned int (*)(const void *))cpu_record_hash_key
,
584 (bool (*)(const void *, const void *))cpu_record_hash_cmp
,
587 thread_list_init(&rv
->event
);
588 thread_list_init(&rv
->ready
);
589 thread_list_init(&rv
->unuse
);
590 thread_timer_list_init(&rv
->timer
);
592 /* Initialize thread_fetch() settings */
594 rv
->handle_signals
= true;
596 /* Set pthread owner, should be updated by actual owner */
597 rv
->owner
= pthread_self();
598 rv
->cancel_req
= list_new();
599 rv
->cancel_req
->del
= cancelreq_del
;
602 /* Initialize pipe poker */
604 set_nonblocking(rv
->io_pipe
[0]);
605 set_nonblocking(rv
->io_pipe
[1]);
607 /* Initialize data structures for poll() */
608 rv
->handler
.pfdsize
= rv
->fd_limit
;
609 rv
->handler
.pfdcount
= 0;
610 rv
->handler
.pfds
= XCALLOC(MTYPE_EVENT_MASTER
,
611 sizeof(struct pollfd
) * rv
->handler
.pfdsize
);
612 rv
->handler
.copy
= XCALLOC(MTYPE_EVENT_MASTER
,
613 sizeof(struct pollfd
) * rv
->handler
.pfdsize
);
615 /* add to list of threadmasters */
616 frr_with_mutex (&masters_mtx
) {
618 masters
= list_new();
620 listnode_add(masters
, rv
);
626 void thread_master_set_name(struct thread_master
*master
, const char *name
)
628 frr_with_mutex (&master
->mtx
) {
629 XFREE(MTYPE_EVENT_MASTER
, master
->name
);
630 master
->name
= XSTRDUP(MTYPE_EVENT_MASTER
, name
);
634 #define EVENT_UNUSED_DEPTH 10
636 /* Move thread to unuse list. */
637 static void thread_add_unuse(struct thread_master
*m
, struct event
*thread
)
639 pthread_mutex_t mtxc
= thread
->mtx
;
641 assert(m
!= NULL
&& thread
!= NULL
);
643 thread
->hist
->total_active
--;
644 memset(thread
, 0, sizeof(struct event
));
645 thread
->type
= EVENT_UNUSED
;
647 /* Restore the thread mutex context. */
650 if (thread_list_count(&m
->unuse
) < EVENT_UNUSED_DEPTH
) {
651 thread_list_add_tail(&m
->unuse
, thread
);
655 thread_free(m
, thread
);
658 /* Free all unused thread. */
659 static void thread_list_free(struct thread_master
*m
,
660 struct thread_list_head
*list
)
664 while ((t
= thread_list_pop(list
)))
668 static void thread_array_free(struct thread_master
*m
,
669 struct event
**thread_array
)
674 for (index
= 0; index
< m
->fd_limit
; ++index
) {
675 t
= thread_array
[index
];
677 thread_array
[index
] = NULL
;
681 XFREE(MTYPE_EVENT_POLL
, thread_array
);
685 * thread_master_free_unused
687 * As threads are finished with they are put on the
688 * unuse list for later reuse.
689 * If we are shutting down, Free up unused threads
690 * So we can see if we forget to shut anything off
692 void thread_master_free_unused(struct thread_master
*m
)
694 frr_with_mutex (&m
->mtx
) {
696 while ((t
= thread_list_pop(&m
->unuse
)))
701 /* Stop thread scheduler. */
702 void thread_master_free(struct thread_master
*m
)
706 frr_with_mutex (&masters_mtx
) {
707 listnode_delete(masters
, m
);
708 if (masters
->count
== 0) {
709 list_delete(&masters
);
713 thread_array_free(m
, m
->read
);
714 thread_array_free(m
, m
->write
);
715 while ((t
= thread_timer_list_pop(&m
->timer
)))
717 thread_list_free(m
, &m
->event
);
718 thread_list_free(m
, &m
->ready
);
719 thread_list_free(m
, &m
->unuse
);
720 pthread_mutex_destroy(&m
->mtx
);
721 pthread_cond_destroy(&m
->cancel_cond
);
722 close(m
->io_pipe
[0]);
723 close(m
->io_pipe
[1]);
724 list_delete(&m
->cancel_req
);
725 m
->cancel_req
= NULL
;
727 hash_clean_and_free(&m
->cpu_record
, cpu_record_hash_free
);
729 XFREE(MTYPE_EVENT_MASTER
, m
->name
);
730 XFREE(MTYPE_EVENT_MASTER
, m
->handler
.pfds
);
731 XFREE(MTYPE_EVENT_MASTER
, m
->handler
.copy
);
732 XFREE(MTYPE_EVENT_MASTER
, m
);
735 /* Return remain time in milliseconds. */
736 unsigned long thread_timer_remain_msec(struct event
*thread
)
740 if (!thread_is_scheduled(thread
))
743 frr_with_mutex (&thread
->mtx
) {
744 remain
= monotime_until(&thread
->u
.sands
, NULL
) / 1000LL;
747 return remain
< 0 ? 0 : remain
;
750 /* Return remain time in seconds. */
751 unsigned long thread_timer_remain_second(struct event
*thread
)
753 return thread_timer_remain_msec(thread
) / 1000LL;
756 struct timeval
thread_timer_remain(struct event
*thread
)
758 struct timeval remain
;
759 frr_with_mutex (&thread
->mtx
) {
760 monotime_until(&thread
->u
.sands
, &remain
);
765 static int time_hhmmss(char *buf
, int buf_size
, long sec
)
771 assert(buf_size
>= 8);
778 wr
= snprintf(buf
, buf_size
, "%02ld:%02ld:%02ld", hh
, mm
, sec
);
783 char *thread_timer_to_hhmmss(char *buf
, int buf_size
, struct event
*t_timer
)
786 time_hhmmss(buf
, buf_size
,
787 thread_timer_remain_second(t_timer
));
789 snprintf(buf
, buf_size
, "--:--:--");
794 /* Get new thread. */
795 static struct event
*thread_get(struct thread_master
*m
, uint8_t type
,
796 void (*func
)(struct event
*), void *arg
,
797 const struct xref_threadsched
*xref
)
799 struct event
*thread
= thread_list_pop(&m
->unuse
);
800 struct cpu_thread_history tmp
;
803 thread
= XCALLOC(MTYPE_THREAD
, sizeof(struct event
));
804 /* mutex only needs to be initialized at struct creation. */
805 pthread_mutex_init(&thread
->mtx
, NULL
);
810 thread
->add_type
= type
;
813 thread
->yield
= THREAD_YIELD_TIME_SLOT
; /* default */
815 thread
->ignore_timer_late
= false;
818 * So if the passed in funcname is not what we have
819 * stored that means the thread->hist needs to be
820 * updated. We keep the last one around in unused
821 * under the assumption that we are probably
822 * going to immediately allocate the same
824 * This hopefully saves us some serious
827 if ((thread
->xref
&& thread
->xref
->funcname
!= xref
->funcname
)
828 || thread
->func
!= func
) {
830 tmp
.funcname
= xref
->funcname
;
832 hash_get(m
->cpu_record
, &tmp
,
833 (void *(*)(void *))cpu_record_hash_alloc
);
835 thread
->hist
->total_active
++;
842 static void thread_free(struct thread_master
*master
, struct event
*thread
)
844 /* Update statistics. */
845 assert(master
->alloc
> 0);
848 /* Free allocated resources. */
849 pthread_mutex_destroy(&thread
->mtx
);
850 XFREE(MTYPE_THREAD
, thread
);
853 static int fd_poll(struct thread_master
*m
, const struct timeval
*timer_wait
,
857 unsigned char trash
[64];
858 nfds_t count
= m
->handler
.copycount
;
861 * If timer_wait is null here, that means poll() should block
862 * indefinitely, unless the thread_master has overridden it by setting
863 * ->selectpoll_timeout.
865 * If the value is positive, it specifies the maximum number of
866 * milliseconds to wait. If the timeout is -1, it specifies that
867 * we should never wait and always return immediately even if no
868 * event is detected. If the value is zero, the behavior is default.
872 /* number of file descriptors with events */
875 if (timer_wait
!= NULL
876 && m
->selectpoll_timeout
== 0) // use the default value
877 timeout
= (timer_wait
->tv_sec
* 1000)
878 + (timer_wait
->tv_usec
/ 1000);
879 else if (m
->selectpoll_timeout
> 0) // use the user's timeout
880 timeout
= m
->selectpoll_timeout
;
881 else if (m
->selectpoll_timeout
882 < 0) // effect a poll (return immediately)
885 zlog_tls_buffer_flush();
887 rcu_assert_read_unlocked();
889 /* add poll pipe poker */
890 assert(count
+ 1 < m
->handler
.pfdsize
);
891 m
->handler
.copy
[count
].fd
= m
->io_pipe
[0];
892 m
->handler
.copy
[count
].events
= POLLIN
;
893 m
->handler
.copy
[count
].revents
= 0x00;
895 /* We need to deal with a signal-handling race here: we
896 * don't want to miss a crucial signal, such as SIGTERM or SIGINT,
897 * that may arrive just before we enter poll(). We will block the
898 * key signals, then check whether any have arrived - if so, we return
899 * before calling poll(). If not, we'll re-enable the signals
900 * in the ppoll() call.
903 sigemptyset(&origsigs
);
904 if (m
->handle_signals
) {
905 /* Main pthread that handles the app signals */
906 if (frr_sigevent_check(&origsigs
)) {
907 /* Signal to process - restore signal mask and return */
908 pthread_sigmask(SIG_SETMASK
, &origsigs
, NULL
);
914 /* Don't make any changes for the non-main pthreads */
915 pthread_sigmask(SIG_SETMASK
, NULL
, &origsigs
);
918 #if defined(HAVE_PPOLL)
919 struct timespec ts
, *tsp
;
922 ts
.tv_sec
= timeout
/ 1000;
923 ts
.tv_nsec
= (timeout
% 1000) * 1000000;
928 num
= ppoll(m
->handler
.copy
, count
+ 1, tsp
, &origsigs
);
929 pthread_sigmask(SIG_SETMASK
, &origsigs
, NULL
);
931 /* Not ideal - there is a race after we restore the signal mask */
932 pthread_sigmask(SIG_SETMASK
, &origsigs
, NULL
);
933 num
= poll(m
->handler
.copy
, count
+ 1, timeout
);
938 if (num
< 0 && errno
== EINTR
)
941 if (num
> 0 && m
->handler
.copy
[count
].revents
!= 0 && num
--)
942 while (read(m
->io_pipe
[0], &trash
, sizeof(trash
)) > 0)
950 /* Add new read thread. */
951 void _event_add_read_write(const struct xref_threadsched
*xref
,
952 struct thread_master
*m
,
953 void (*func
)(struct event
*), void *arg
, int fd
,
954 struct event
**t_ptr
)
956 int dir
= xref
->event_type
;
957 struct event
*thread
= NULL
;
958 struct event
**thread_array
;
960 if (dir
== EVENT_READ
)
961 frrtrace(9, frr_libfrr
, schedule_read
, m
,
962 xref
->funcname
, xref
->xref
.file
, xref
->xref
.line
,
963 t_ptr
, fd
, 0, arg
, 0);
965 frrtrace(9, frr_libfrr
, schedule_write
, m
,
966 xref
->funcname
, xref
->xref
.file
, xref
->xref
.line
,
967 t_ptr
, fd
, 0, arg
, 0);
970 if (fd
>= m
->fd_limit
)
971 assert(!"Number of FD's open is greater than FRR currently configured to handle, aborting");
973 frr_with_mutex (&m
->mtx
) {
975 // thread is already scheduled; don't reschedule
978 /* default to a new pollfd */
979 nfds_t queuepos
= m
->handler
.pfdcount
;
981 if (dir
== EVENT_READ
)
982 thread_array
= m
->read
;
984 thread_array
= m
->write
;
986 /* if we already have a pollfd for our file descriptor, find and
988 for (nfds_t i
= 0; i
< m
->handler
.pfdcount
; i
++)
989 if (m
->handler
.pfds
[i
].fd
== fd
) {
994 * What happens if we have a thread already
995 * created for this event?
997 if (thread_array
[fd
])
998 assert(!"Thread already scheduled for file descriptor");
1003 /* make sure we have room for this fd + pipe poker fd */
1004 assert(queuepos
+ 1 < m
->handler
.pfdsize
);
1006 thread
= thread_get(m
, dir
, func
, arg
, xref
);
1008 m
->handler
.pfds
[queuepos
].fd
= fd
;
1009 m
->handler
.pfds
[queuepos
].events
|=
1010 (dir
== EVENT_READ
? POLLIN
: POLLOUT
);
1012 if (queuepos
== m
->handler
.pfdcount
)
1013 m
->handler
.pfdcount
++;
1016 frr_with_mutex (&thread
->mtx
) {
1018 thread_array
[thread
->u
.fd
] = thread
;
1023 thread
->ref
= t_ptr
;
1031 static void _event_add_timer_timeval(const struct xref_threadsched
*xref
,
1032 struct thread_master
*m
,
1033 void (*func
)(struct event
*), void *arg
,
1034 struct timeval
*time_relative
,
1035 struct event
**t_ptr
)
1037 struct event
*thread
;
1042 assert(time_relative
);
1044 frrtrace(9, frr_libfrr
, schedule_timer
, m
,
1045 xref
->funcname
, xref
->xref
.file
, xref
->xref
.line
,
1046 t_ptr
, 0, 0, arg
, (long)time_relative
->tv_sec
);
1048 /* Compute expiration/deadline time. */
1050 timeradd(&t
, time_relative
, &t
);
1052 frr_with_mutex (&m
->mtx
) {
1053 if (t_ptr
&& *t_ptr
)
1054 /* thread is already scheduled; don't reschedule */
1057 thread
= thread_get(m
, EVENT_TIMER
, func
, arg
, xref
);
1059 frr_with_mutex (&thread
->mtx
) {
1060 thread
->u
.sands
= t
;
1061 thread_timer_list_add(&m
->timer
, thread
);
1064 thread
->ref
= t_ptr
;
1068 /* The timer list is sorted - if this new timer
1069 * might change the time we'll wait for, give the pthread
1070 * a chance to re-compute.
1072 if (thread_timer_list_first(&m
->timer
) == thread
)
1075 #define ONEYEAR2SEC (60 * 60 * 24 * 365)
1076 if (time_relative
->tv_sec
> ONEYEAR2SEC
)
1078 EC_LIB_TIMER_TOO_LONG
,
1079 "Timer: %pTHD is created with an expiration that is greater than 1 year",
1084 /* Add timer event thread. */
1085 void _event_add_timer(const struct xref_threadsched
*xref
,
1086 struct thread_master
*m
, void (*func
)(struct event
*),
1087 void *arg
, long timer
, struct event
**t_ptr
)
1089 struct timeval trel
;
1093 trel
.tv_sec
= timer
;
1096 _event_add_timer_timeval(xref
, m
, func
, arg
, &trel
, t_ptr
);
1099 /* Add timer event thread with "millisecond" resolution */
1100 void _event_add_timer_msec(const struct xref_threadsched
*xref
,
1101 struct thread_master
*m
,
1102 void (*func
)(struct event
*), void *arg
, long timer
,
1103 struct event
**t_ptr
)
1105 struct timeval trel
;
1109 trel
.tv_sec
= timer
/ 1000;
1110 trel
.tv_usec
= 1000 * (timer
% 1000);
1112 _event_add_timer_timeval(xref
, m
, func
, arg
, &trel
, t_ptr
);
1115 /* Add timer event thread with "timeval" resolution */
1116 void _event_add_timer_tv(const struct xref_threadsched
*xref
,
1117 struct thread_master
*m
, void (*func
)(struct event
*),
1118 void *arg
, struct timeval
*tv
, struct event
**t_ptr
)
1120 _event_add_timer_timeval(xref
, m
, func
, arg
, tv
, t_ptr
);
1123 /* Add simple event thread. */
1124 void _event_add_event(const struct xref_threadsched
*xref
,
1125 struct thread_master
*m
, void (*func
)(struct event
*),
1126 void *arg
, int val
, struct event
**t_ptr
)
1128 struct event
*thread
= NULL
;
1130 frrtrace(9, frr_libfrr
, schedule_event
, m
,
1131 xref
->funcname
, xref
->xref
.file
, xref
->xref
.line
,
1132 t_ptr
, 0, val
, arg
, 0);
1136 frr_with_mutex (&m
->mtx
) {
1137 if (t_ptr
&& *t_ptr
)
1138 /* thread is already scheduled; don't reschedule */
1141 thread
= thread_get(m
, EVENT_EVENT
, func
, arg
, xref
);
1142 frr_with_mutex (&thread
->mtx
) {
1143 thread
->u
.val
= val
;
1144 thread_list_add_tail(&m
->event
, thread
);
1149 thread
->ref
= t_ptr
;
1156 /* Thread cancellation ------------------------------------------------------ */
1159 * NOT's out the .events field of pollfd corresponding to the given file
1160 * descriptor. The event to be NOT'd is passed in the 'state' parameter.
1162 * This needs to happen for both copies of pollfd's. See 'thread_fetch'
1163 * implementation for details.
1167 * @param state the event to cancel. One or more (OR'd together) of the
1172 static void event_cancel_rw(struct thread_master
*master
, int fd
, short state
,
1177 /* find the index of corresponding pollfd */
1180 /* Cancel POLLHUP too just in case some bozo set it */
1183 /* Some callers know the index of the pfd already */
1184 if (idx_hint
>= 0) {
1188 /* Have to look for the fd in the pfd array */
1189 for (i
= 0; i
< master
->handler
.pfdcount
; i
++)
1190 if (master
->handler
.pfds
[i
].fd
== fd
) {
1198 "[!] Received cancellation request for nonexistent rw job");
1199 zlog_debug("[!] threadmaster: %s | fd: %d",
1200 master
->name
? master
->name
: "", fd
);
1204 /* NOT out event. */
1205 master
->handler
.pfds
[i
].events
&= ~(state
);
1207 /* If all events are canceled, delete / resize the pollfd array. */
1208 if (master
->handler
.pfds
[i
].events
== 0) {
1209 memmove(master
->handler
.pfds
+ i
, master
->handler
.pfds
+ i
+ 1,
1210 (master
->handler
.pfdcount
- i
- 1)
1211 * sizeof(struct pollfd
));
1212 master
->handler
.pfdcount
--;
1213 master
->handler
.pfds
[master
->handler
.pfdcount
].fd
= 0;
1214 master
->handler
.pfds
[master
->handler
.pfdcount
].events
= 0;
1217 /* If we have the same pollfd in the copy, perform the same operations,
1218 * otherwise return. */
1219 if (i
>= master
->handler
.copycount
)
1222 master
->handler
.copy
[i
].events
&= ~(state
);
1224 if (master
->handler
.copy
[i
].events
== 0) {
1225 memmove(master
->handler
.copy
+ i
, master
->handler
.copy
+ i
+ 1,
1226 (master
->handler
.copycount
- i
- 1)
1227 * sizeof(struct pollfd
));
1228 master
->handler
.copycount
--;
1229 master
->handler
.copy
[master
->handler
.copycount
].fd
= 0;
1230 master
->handler
.copy
[master
->handler
.copycount
].events
= 0;
1235 * Process task cancellation given a task argument: iterate through the
1236 * various lists of tasks, looking for any that match the argument.
1238 static void cancel_arg_helper(struct thread_master
*master
,
1239 const struct cancel_req
*cr
)
1246 /* We're only processing arg-based cancellations here. */
1247 if (cr
->eventobj
== NULL
)
1250 /* First process the ready lists. */
1251 frr_each_safe(thread_list
, &master
->event
, t
) {
1252 if (t
->arg
!= cr
->eventobj
)
1254 thread_list_del(&master
->event
, t
);
1257 thread_add_unuse(master
, t
);
1260 frr_each_safe(thread_list
, &master
->ready
, t
) {
1261 if (t
->arg
!= cr
->eventobj
)
1263 thread_list_del(&master
->ready
, t
);
1266 thread_add_unuse(master
, t
);
1269 /* If requested, stop here and ignore io and timers */
1270 if (CHECK_FLAG(cr
->flags
, EVENT_CANCEL_FLAG_READY
))
1273 /* Check the io tasks */
1274 for (i
= 0; i
< master
->handler
.pfdcount
;) {
1275 pfd
= master
->handler
.pfds
+ i
;
1277 if (pfd
->events
& POLLIN
)
1278 t
= master
->read
[pfd
->fd
];
1280 t
= master
->write
[pfd
->fd
];
1282 if (t
&& t
->arg
== cr
->eventobj
) {
1285 /* Found a match to cancel: clean up fd arrays */
1286 event_cancel_rw(master
, pfd
->fd
, pfd
->events
, i
);
1288 /* Clean up thread arrays */
1289 master
->read
[fd
] = NULL
;
1290 master
->write
[fd
] = NULL
;
1292 /* Clear caller's ref */
1296 thread_add_unuse(master
, t
);
1298 /* Don't increment 'i' since the cancellation will have
1299 * removed the entry from the pfd array
1305 /* Check the timer tasks */
1306 t
= thread_timer_list_first(&master
->timer
);
1308 struct event
*t_next
;
1310 t_next
= thread_timer_list_next(&master
->timer
, t
);
1312 if (t
->arg
== cr
->eventobj
) {
1313 thread_timer_list_del(&master
->timer
, t
);
1316 thread_add_unuse(master
, t
);
1324 * Process cancellation requests.
1326 * This may only be run from the pthread which owns the thread_master.
1328 * @param master the thread master to process
1329 * @REQUIRE master->mtx
1331 static void do_event_cancel(struct thread_master
*master
)
1333 struct thread_list_head
*list
= NULL
;
1334 struct event
**thread_array
= NULL
;
1335 struct event
*thread
;
1336 struct cancel_req
*cr
;
1337 struct listnode
*ln
;
1339 for (ALL_LIST_ELEMENTS_RO(master
->cancel_req
, ln
, cr
)) {
1341 * If this is an event object cancellation, search
1342 * through task lists deleting any tasks which have the
1343 * specified argument - use this handy helper function.
1346 cancel_arg_helper(master
, cr
);
1351 * The pointer varies depending on whether the cancellation
1352 * request was made asynchronously or not. If it was, we
1353 * need to check whether the thread even exists anymore
1354 * before cancelling it.
1356 thread
= (cr
->thread
) ? cr
->thread
: *cr
->threadref
;
1362 thread_array
= NULL
;
1364 /* Determine the appropriate queue to cancel the thread from */
1365 switch (thread
->type
) {
1367 event_cancel_rw(master
, thread
->u
.fd
, POLLIN
, -1);
1368 thread_array
= master
->read
;
1371 event_cancel_rw(master
, thread
->u
.fd
, POLLOUT
, -1);
1372 thread_array
= master
->write
;
1375 thread_timer_list_del(&master
->timer
, thread
);
1378 list
= &master
->event
;
1381 list
= &master
->ready
;
1390 thread_list_del(list
, thread
);
1391 } else if (thread_array
) {
1392 thread_array
[thread
->u
.fd
] = NULL
;
1396 *thread
->ref
= NULL
;
1398 thread_add_unuse(thread
->master
, thread
);
1401 /* Delete and free all cancellation requests */
1402 if (master
->cancel_req
)
1403 list_delete_all_node(master
->cancel_req
);
1405 /* Wake up any threads which may be blocked in event_cancel_async() */
1406 master
->canceled
= true;
1407 pthread_cond_broadcast(&master
->cancel_cond
);
1411 * Helper function used for multiple flavors of arg-based cancellation.
1413 static void cancel_event_helper(struct thread_master
*m
, void *arg
, int flags
)
1415 struct cancel_req
*cr
;
1417 assert(m
->owner
== pthread_self());
1419 /* Only worth anything if caller supplies an arg. */
1423 cr
= XCALLOC(MTYPE_TMP
, sizeof(struct cancel_req
));
1427 frr_with_mutex (&m
->mtx
) {
1429 listnode_add(m
->cancel_req
, cr
);
1435 * Cancel any events which have the specified argument.
1439 * @param m the thread_master to cancel from
1440 * @param arg the argument passed when creating the event
1442 void event_cancel_event(struct thread_master
*master
, void *arg
)
1444 cancel_event_helper(master
, arg
, 0);
1448 * Cancel ready tasks with an arg matching 'arg'
1452 * @param m the thread_master to cancel from
1453 * @param arg the argument passed when creating the event
1455 void event_cancel_event_ready(struct thread_master
*m
, void *arg
)
1458 /* Only cancel ready/event tasks */
1459 cancel_event_helper(m
, arg
, EVENT_CANCEL_FLAG_READY
);
1463 * Cancel a specific task.
1467 * @param thread task to cancel
1469 void event_cancel(struct event
**thread
)
1471 struct thread_master
*master
;
1473 if (thread
== NULL
|| *thread
== NULL
)
1476 master
= (*thread
)->master
;
1478 frrtrace(9, frr_libfrr
, event_cancel
, master
, (*thread
)->xref
->funcname
,
1479 (*thread
)->xref
->xref
.file
, (*thread
)->xref
->xref
.line
, NULL
,
1480 (*thread
)->u
.fd
, (*thread
)->u
.val
, (*thread
)->arg
,
1481 (*thread
)->u
.sands
.tv_sec
);
1483 assert(master
->owner
== pthread_self());
1485 frr_with_mutex (&master
->mtx
) {
1486 struct cancel_req
*cr
=
1487 XCALLOC(MTYPE_TMP
, sizeof(struct cancel_req
));
1488 cr
->thread
= *thread
;
1489 listnode_add(master
->cancel_req
, cr
);
1490 do_event_cancel(master
);
1497 * Asynchronous cancellation.
1499 * Called with either a struct event ** or void * to an event argument,
1500 * this function posts the correct cancellation request and blocks until it is
1503 * If the thread is currently running, execution blocks until it completes.
1505 * The last two parameters are mutually exclusive, i.e. if you pass one the
1506 * other must be NULL.
1508 * When the cancellation procedure executes on the target thread_master, the
1509 * thread * provided is checked for nullity. If it is null, the thread is
1510 * assumed to no longer exist and the cancellation request is a no-op. Thus
1511 * users of this API must pass a back-reference when scheduling the original
1516 * @param master the thread master with the relevant event / task
1517 * @param thread pointer to thread to cancel
1518 * @param eventobj the event
1520 void event_cancel_async(struct thread_master
*master
, struct event
**thread
,
1523 assert(!(thread
&& eventobj
) && (thread
|| eventobj
));
1525 if (thread
&& *thread
)
1526 frrtrace(9, frr_libfrr
, event_cancel_async
, master
,
1527 (*thread
)->xref
->funcname
, (*thread
)->xref
->xref
.file
,
1528 (*thread
)->xref
->xref
.line
, NULL
, (*thread
)->u
.fd
,
1529 (*thread
)->u
.val
, (*thread
)->arg
,
1530 (*thread
)->u
.sands
.tv_sec
);
1532 frrtrace(9, frr_libfrr
, event_cancel_async
, master
, NULL
, NULL
,
1533 0, NULL
, 0, 0, eventobj
, 0);
1535 assert(master
->owner
!= pthread_self());
1537 frr_with_mutex (&master
->mtx
) {
1538 master
->canceled
= false;
1541 struct cancel_req
*cr
=
1542 XCALLOC(MTYPE_TMP
, sizeof(struct cancel_req
));
1543 cr
->threadref
= thread
;
1544 listnode_add(master
->cancel_req
, cr
);
1545 } else if (eventobj
) {
1546 struct cancel_req
*cr
=
1547 XCALLOC(MTYPE_TMP
, sizeof(struct cancel_req
));
1548 cr
->eventobj
= eventobj
;
1549 listnode_add(master
->cancel_req
, cr
);
1553 while (!master
->canceled
)
1554 pthread_cond_wait(&master
->cancel_cond
, &master
->mtx
);
1560 /* ------------------------------------------------------------------------- */
1562 static struct timeval
*thread_timer_wait(struct thread_timer_list_head
*timers
,
1563 struct timeval
*timer_val
)
1565 if (!thread_timer_list_count(timers
))
1568 struct event
*next_timer
= thread_timer_list_first(timers
);
1569 monotime_until(&next_timer
->u
.sands
, timer_val
);
1573 static struct event
*thread_run(struct thread_master
*m
, struct event
*thread
,
1574 struct event
*fetch
)
1577 thread_add_unuse(m
, thread
);
1581 static int thread_process_io_helper(struct thread_master
*m
,
1582 struct event
*thread
, short state
,
1583 short actual_state
, int pos
)
1585 struct event
**thread_array
;
1588 * poll() clears the .events field, but the pollfd array we
1589 * pass to poll() is a copy of the one used to schedule threads.
1590 * We need to synchronize state between the two here by applying
1591 * the same changes poll() made on the copy of the "real" pollfd
1594 * This cleans up a possible infinite loop where we refuse
1595 * to respond to a poll event but poll is insistent that
1598 m
->handler
.pfds
[pos
].events
&= ~(state
);
1601 if ((actual_state
& (POLLHUP
|POLLIN
)) != POLLHUP
)
1602 flog_err(EC_LIB_NO_THREAD
,
1603 "Attempting to process an I/O event but for fd: %d(%d) no thread to handle this!",
1604 m
->handler
.pfds
[pos
].fd
, actual_state
);
1608 if (thread
->type
== EVENT_READ
)
1609 thread_array
= m
->read
;
1611 thread_array
= m
->write
;
1613 thread_array
[thread
->u
.fd
] = NULL
;
1614 thread_list_add_tail(&m
->ready
, thread
);
1615 thread
->type
= EVENT_READY
;
1621 * Process I/O events.
1623 * Walks through file descriptor array looking for those pollfds whose .revents
1624 * field has something interesting. Deletes any invalid file descriptors.
1626 * @param m the thread master
1627 * @param num the number of active file descriptors (return value of poll())
1629 static void thread_process_io(struct thread_master
*m
, unsigned int num
)
1631 unsigned int ready
= 0;
1632 struct pollfd
*pfds
= m
->handler
.copy
;
1634 for (nfds_t i
= 0; i
< m
->handler
.copycount
&& ready
< num
; ++i
) {
1635 /* no event for current fd? immediately continue */
1636 if (pfds
[i
].revents
== 0)
1642 * Unless someone has called event_cancel from another
1643 * pthread, the only thing that could have changed in
1644 * m->handler.pfds while we were asleep is the .events
1645 * field in a given pollfd. Barring event_cancel() that
1646 * value should be a superset of the values we have in our
1647 * copy, so there's no need to update it. Similarily,
1648 * barring deletion, the fd should still be a valid index
1649 * into the master's pfds.
1651 * We are including POLLERR here to do a READ event
1652 * this is because the read should fail and the
1653 * read function should handle it appropriately
1655 if (pfds
[i
].revents
& (POLLIN
| POLLHUP
| POLLERR
)) {
1656 thread_process_io_helper(m
, m
->read
[pfds
[i
].fd
], POLLIN
,
1657 pfds
[i
].revents
, i
);
1659 if (pfds
[i
].revents
& POLLOUT
)
1660 thread_process_io_helper(m
, m
->write
[pfds
[i
].fd
],
1661 POLLOUT
, pfds
[i
].revents
, i
);
1663 /* if one of our file descriptors is garbage, remove the same
1665 * both pfds + update sizes and index */
1666 if (pfds
[i
].revents
& POLLNVAL
) {
1667 memmove(m
->handler
.pfds
+ i
, m
->handler
.pfds
+ i
+ 1,
1668 (m
->handler
.pfdcount
- i
- 1)
1669 * sizeof(struct pollfd
));
1670 m
->handler
.pfdcount
--;
1671 m
->handler
.pfds
[m
->handler
.pfdcount
].fd
= 0;
1672 m
->handler
.pfds
[m
->handler
.pfdcount
].events
= 0;
1674 memmove(pfds
+ i
, pfds
+ i
+ 1,
1675 (m
->handler
.copycount
- i
- 1)
1676 * sizeof(struct pollfd
));
1677 m
->handler
.copycount
--;
1678 m
->handler
.copy
[m
->handler
.copycount
].fd
= 0;
1679 m
->handler
.copy
[m
->handler
.copycount
].events
= 0;
1686 /* Add all timers that have popped to the ready list. */
1687 static unsigned int thread_process_timers(struct thread_master
*m
,
1688 struct timeval
*timenow
)
1690 struct timeval prev
= *timenow
;
1691 bool displayed
= false;
1692 struct event
*thread
;
1693 unsigned int ready
= 0;
1695 while ((thread
= thread_timer_list_first(&m
->timer
))) {
1696 if (timercmp(timenow
, &thread
->u
.sands
, <))
1698 prev
= thread
->u
.sands
;
1701 * If the timer would have popped 4 seconds in the
1702 * past then we are in a situation where we are
1703 * really getting behind on handling of events.
1704 * Let's log it and do the right thing with it.
1706 if (timercmp(timenow
, &prev
, >)) {
1707 atomic_fetch_add_explicit(
1708 &thread
->hist
->total_starv_warn
, 1,
1709 memory_order_seq_cst
);
1710 if (!displayed
&& !thread
->ignore_timer_late
) {
1712 EC_LIB_STARVE_THREAD
,
1713 "Thread Starvation: %pTHD was scheduled to pop greater than 4s ago",
1719 thread_timer_list_pop(&m
->timer
);
1720 thread
->type
= EVENT_READY
;
1721 thread_list_add_tail(&m
->ready
, thread
);
1728 /* process a list en masse, e.g. for event thread lists */
1729 static unsigned int thread_process(struct thread_list_head
*list
)
1731 struct event
*thread
;
1732 unsigned int ready
= 0;
1734 while ((thread
= thread_list_pop(list
))) {
1735 thread
->type
= EVENT_READY
;
1736 thread_list_add_tail(&thread
->master
->ready
, thread
);
1743 /* Fetch next ready thread. */
1744 struct event
*thread_fetch(struct thread_master
*m
, struct event
*fetch
)
1746 struct event
*thread
= NULL
;
1748 struct timeval zerotime
= {0, 0};
1750 struct timeval
*tw
= NULL
;
1751 bool eintr_p
= false;
1755 /* Handle signals if any */
1756 if (m
->handle_signals
)
1757 frr_sigevent_process();
1759 pthread_mutex_lock(&m
->mtx
);
1761 /* Process any pending cancellation requests */
1765 * Attempt to flush ready queue before going into poll().
1766 * This is performance-critical. Think twice before modifying.
1768 if ((thread
= thread_list_pop(&m
->ready
))) {
1769 fetch
= thread_run(m
, thread
, fetch
);
1772 pthread_mutex_unlock(&m
->mtx
);
1773 if (!m
->ready_run_loop
)
1774 GETRUSAGE(&m
->last_getrusage
);
1775 m
->ready_run_loop
= true;
1779 m
->ready_run_loop
= false;
1780 /* otherwise, tick through scheduling sequence */
1783 * Post events to ready queue. This must come before the
1784 * following block since events should occur immediately
1786 thread_process(&m
->event
);
1789 * If there are no tasks on the ready queue, we will poll()
1790 * until a timer expires or we receive I/O, whichever comes
1791 * first. The strategy for doing this is:
1793 * - If there are events pending, set the poll() timeout to zero
1794 * - If there are no events pending, but there are timers
1795 * pending, set the timeout to the smallest remaining time on
1797 * - If there are neither timers nor events pending, but there
1798 * are file descriptors pending, block indefinitely in poll()
1799 * - If nothing is pending, it's time for the application to die
1801 * In every case except the last, we need to hit poll() at least
1802 * once per loop to avoid starvation by events
1804 if (!thread_list_count(&m
->ready
))
1805 tw
= thread_timer_wait(&m
->timer
, &tv
);
1807 if (thread_list_count(&m
->ready
) ||
1808 (tw
&& !timercmp(tw
, &zerotime
, >)))
1811 if (!tw
&& m
->handler
.pfdcount
== 0) { /* die */
1812 pthread_mutex_unlock(&m
->mtx
);
1818 * Copy pollfd array + # active pollfds in it. Not necessary to
1819 * copy the array size as this is fixed.
1821 m
->handler
.copycount
= m
->handler
.pfdcount
;
1822 memcpy(m
->handler
.copy
, m
->handler
.pfds
,
1823 m
->handler
.copycount
* sizeof(struct pollfd
));
1825 pthread_mutex_unlock(&m
->mtx
);
1828 num
= fd_poll(m
, tw
, &eintr_p
);
1830 pthread_mutex_lock(&m
->mtx
);
1832 /* Handle any errors received in poll() */
1835 pthread_mutex_unlock(&m
->mtx
);
1836 /* loop around to signal handler */
1841 flog_err(EC_LIB_SYSTEM_CALL
, "poll() error: %s",
1842 safe_strerror(errno
));
1843 pthread_mutex_unlock(&m
->mtx
);
1848 /* Post timers to ready queue. */
1850 thread_process_timers(m
, &now
);
1852 /* Post I/O to ready queue. */
1854 thread_process_io(m
, num
);
1856 pthread_mutex_unlock(&m
->mtx
);
1858 } while (!thread
&& m
->spin
);
1863 static unsigned long timeval_elapsed(struct timeval a
, struct timeval b
)
1865 return (((a
.tv_sec
- b
.tv_sec
) * TIMER_SECOND_MICRO
)
1866 + (a
.tv_usec
- b
.tv_usec
));
1869 unsigned long thread_consumed_time(RUSAGE_T
*now
, RUSAGE_T
*start
,
1870 unsigned long *cputime
)
1872 #ifdef HAVE_CLOCK_THREAD_CPUTIME_ID
1876 * FreeBSD appears to have an issue when calling clock_gettime
1877 * with CLOCK_THREAD_CPUTIME_ID really close to each other
1878 * occassionally the now time will be before the start time.
1879 * This is not good and FRR is ending up with CPU HOG's
1880 * when the subtraction wraps to very large numbers
1882 * What we are going to do here is cheat a little bit
1883 * and notice that this is a problem and just correct
1884 * it so that it is impossible to happen
1886 if (start
->cpu
.tv_sec
== now
->cpu
.tv_sec
&&
1887 start
->cpu
.tv_nsec
> now
->cpu
.tv_nsec
)
1888 now
->cpu
.tv_nsec
= start
->cpu
.tv_nsec
+ 1;
1889 else if (start
->cpu
.tv_sec
> now
->cpu
.tv_sec
) {
1890 now
->cpu
.tv_sec
= start
->cpu
.tv_sec
;
1891 now
->cpu
.tv_nsec
= start
->cpu
.tv_nsec
+ 1;
1894 *cputime
= (now
->cpu
.tv_sec
- start
->cpu
.tv_sec
) * TIMER_SECOND_MICRO
1895 + (now
->cpu
.tv_nsec
- start
->cpu
.tv_nsec
) / 1000;
1897 /* This is 'user + sys' time. */
1898 *cputime
= timeval_elapsed(now
->cpu
.ru_utime
, start
->cpu
.ru_utime
)
1899 + timeval_elapsed(now
->cpu
.ru_stime
, start
->cpu
.ru_stime
);
1901 return timeval_elapsed(now
->real
, start
->real
);
1904 /* We should aim to yield after yield milliseconds, which defaults
1905 to EVENT_YIELD_TIME_SLOT .
1906 Note: we are using real (wall clock) time for this calculation.
1907 It could be argued that CPU time may make more sense in certain
1908 contexts. The things to consider are whether the thread may have
1909 blocked (in which case wall time increases, but CPU time does not),
1910 or whether the system is heavily loaded with other processes competing
1911 for CPU time. On balance, wall clock time seems to make sense.
1912 Plus it has the added benefit that gettimeofday should be faster
1913 than calling getrusage. */
1914 int thread_should_yield(struct event
*thread
)
1917 frr_with_mutex (&thread
->mtx
) {
1918 result
= monotime_since(&thread
->real
, NULL
)
1919 > (int64_t)thread
->yield
;
1924 void thread_set_yield_time(struct event
*thread
, unsigned long yield_time
)
1926 frr_with_mutex (&thread
->mtx
) {
1927 thread
->yield
= yield_time
;
1931 void thread_getrusage(RUSAGE_T
*r
)
1934 if (!cputime_enabled
) {
1935 memset(&r
->cpu
, 0, sizeof(r
->cpu
));
1939 #ifdef HAVE_CLOCK_THREAD_CPUTIME_ID
1940 /* not currently implemented in Linux's vDSO, but maybe at some point
1943 clock_gettime(CLOCK_THREAD_CPUTIME_ID
, &r
->cpu
);
1944 #else /* !HAVE_CLOCK_THREAD_CPUTIME_ID */
1945 #if defined RUSAGE_THREAD
1946 #define FRR_RUSAGE RUSAGE_THREAD
1948 #define FRR_RUSAGE RUSAGE_SELF
1950 getrusage(FRR_RUSAGE
, &(r
->cpu
));
1957 * This function will atomically update the thread's usage history. At present
1958 * this is the only spot where usage history is written. Nevertheless the code
1959 * has been written such that the introduction of writers in the future should
1960 * not need to update it provided the writers atomically perform only the
1961 * operations done here, i.e. updating the total and maximum times. In
1962 * particular, the maximum real and cpu times must be monotonically increasing
1963 * or this code is not correct.
1965 void thread_call(struct event
*thread
)
1967 RUSAGE_T before
, after
;
1969 /* if the thread being called is the CLI, it may change cputime_enabled
1970 * ("service cputime-stats" command), which can result in nonsensical
1971 * and very confusing warnings
1973 bool cputime_enabled_here
= cputime_enabled
;
1975 if (thread
->master
->ready_run_loop
)
1976 before
= thread
->master
->last_getrusage
;
1980 thread
->real
= before
.real
;
1982 frrtrace(9, frr_libfrr
, thread_call
, thread
->master
,
1983 thread
->xref
->funcname
, thread
->xref
->xref
.file
,
1984 thread
->xref
->xref
.line
, NULL
, thread
->u
.fd
,
1985 thread
->u
.val
, thread
->arg
, thread
->u
.sands
.tv_sec
);
1987 pthread_setspecific(thread_current
, thread
);
1988 (*thread
->func
)(thread
);
1989 pthread_setspecific(thread_current
, NULL
);
1992 thread
->master
->last_getrusage
= after
;
1994 unsigned long walltime
, cputime
;
1997 walltime
= thread_consumed_time(&after
, &before
, &cputime
);
1999 /* update walltime */
2000 atomic_fetch_add_explicit(&thread
->hist
->real
.total
, walltime
,
2001 memory_order_seq_cst
);
2002 exp
= atomic_load_explicit(&thread
->hist
->real
.max
,
2003 memory_order_seq_cst
);
2004 while (exp
< walltime
2005 && !atomic_compare_exchange_weak_explicit(
2006 &thread
->hist
->real
.max
, &exp
, walltime
,
2007 memory_order_seq_cst
, memory_order_seq_cst
))
2010 if (cputime_enabled_here
&& cputime_enabled
) {
2011 /* update cputime */
2012 atomic_fetch_add_explicit(&thread
->hist
->cpu
.total
, cputime
,
2013 memory_order_seq_cst
);
2014 exp
= atomic_load_explicit(&thread
->hist
->cpu
.max
,
2015 memory_order_seq_cst
);
2016 while (exp
< cputime
2017 && !atomic_compare_exchange_weak_explicit(
2018 &thread
->hist
->cpu
.max
, &exp
, cputime
,
2019 memory_order_seq_cst
, memory_order_seq_cst
))
2023 atomic_fetch_add_explicit(&thread
->hist
->total_calls
, 1,
2024 memory_order_seq_cst
);
2025 atomic_fetch_or_explicit(&thread
->hist
->types
, 1 << thread
->add_type
,
2026 memory_order_seq_cst
);
2028 if (cputime_enabled_here
&& cputime_enabled
&& cputime_threshold
2029 && cputime
> cputime_threshold
) {
2031 * We have a CPU Hog on our hands. The time FRR has spent
2032 * doing actual work (not sleeping) is greater than 5 seconds.
2033 * Whinge about it now, so we're aware this is yet another task
2036 atomic_fetch_add_explicit(&thread
->hist
->total_cpu_warn
,
2037 1, memory_order_seq_cst
);
2039 EC_LIB_SLOW_THREAD_CPU
,
2040 "CPU HOG: task %s (%lx) ran for %lums (cpu time %lums)",
2041 thread
->xref
->funcname
, (unsigned long)thread
->func
,
2042 walltime
/ 1000, cputime
/ 1000);
2044 } else if (walltime_threshold
&& walltime
> walltime_threshold
) {
2046 * The runtime for a task is greater than 5 seconds, but the
2047 * cpu time is under 5 seconds. Let's whine about this because
2048 * this could imply some sort of scheduling issue.
2050 atomic_fetch_add_explicit(&thread
->hist
->total_wall_warn
,
2051 1, memory_order_seq_cst
);
2053 EC_LIB_SLOW_THREAD_WALL
,
2054 "STARVATION: task %s (%lx) ran for %lums (cpu time %lums)",
2055 thread
->xref
->funcname
, (unsigned long)thread
->func
,
2056 walltime
/ 1000, cputime
/ 1000);
2060 /* Execute thread */
2061 void _thread_execute(const struct xref_threadsched
*xref
,
2062 struct thread_master
*m
, void (*func
)(struct event
*),
2065 struct event
*thread
;
2067 /* Get or allocate new thread to execute. */
2068 frr_with_mutex (&m
->mtx
) {
2069 thread
= thread_get(m
, EVENT_EVENT
, func
, arg
, xref
);
2071 /* Set its event value. */
2072 frr_with_mutex (&thread
->mtx
) {
2073 thread
->add_type
= EVENT_EXECUTE
;
2074 thread
->u
.val
= val
;
2075 thread
->ref
= &thread
;
2079 /* Execute thread doing all accounting. */
2080 thread_call(thread
);
2082 /* Give back or free thread. */
2083 thread_add_unuse(m
, thread
);
2086 /* Debug signal mask - if 'sigs' is NULL, use current effective mask. */
2087 void debug_signals(const sigset_t
*sigs
)
2094 * We're only looking at the non-realtime signals here, so we need
2095 * some limit value. Platform differences mean at some point we just
2096 * need to pick a reasonable value.
2098 #if defined SIGRTMIN
2099 # define LAST_SIGNAL SIGRTMIN
2101 # define LAST_SIGNAL 32
2106 sigemptyset(&tmpsigs
);
2107 pthread_sigmask(SIG_BLOCK
, NULL
, &tmpsigs
);
2114 for (i
= 0; i
< LAST_SIGNAL
; i
++) {
2117 if (sigismember(sigs
, i
) > 0) {
2119 strlcat(buf
, ",", sizeof(buf
));
2120 snprintf(tmp
, sizeof(tmp
), "%d", i
);
2121 strlcat(buf
, tmp
, sizeof(buf
));
2127 snprintf(buf
, sizeof(buf
), "<none>");
2129 zlog_debug("%s: %s", __func__
, buf
);
2132 static ssize_t
printfrr_thread_dbg(struct fbuf
*buf
, struct printfrr_eargs
*ea
,
2133 const struct event
*thread
)
2135 static const char *const types
[] = {
2136 [EVENT_READ
] = "read", [EVENT_WRITE
] = "write",
2137 [EVENT_TIMER
] = "timer", [EVENT_EVENT
] = "event",
2138 [EVENT_READY
] = "ready", [EVENT_UNUSED
] = "unused",
2139 [EVENT_EXECUTE
] = "exec",
2145 return bputs(buf
, "{(thread *)NULL}");
2147 rv
+= bprintfrr(buf
, "{(thread *)%p arg=%p", thread
, thread
->arg
);
2149 if (thread
->type
< array_size(types
) && types
[thread
->type
])
2150 rv
+= bprintfrr(buf
, " %-6s", types
[thread
->type
]);
2152 rv
+= bprintfrr(buf
, " INVALID(%u)", thread
->type
);
2154 switch (thread
->type
) {
2157 snprintfrr(info
, sizeof(info
), "fd=%d", thread
->u
.fd
);
2161 snprintfrr(info
, sizeof(info
), "r=%pTVMud", &thread
->u
.sands
);
2170 rv
+= bprintfrr(buf
, " %-12s %s() %s from %s:%d}", info
,
2171 thread
->xref
->funcname
, thread
->xref
->dest
,
2172 thread
->xref
->xref
.file
, thread
->xref
->xref
.line
);
2176 printfrr_ext_autoreg_p("TH", printfrr_thread
);
2177 static ssize_t
printfrr_thread(struct fbuf
*buf
, struct printfrr_eargs
*ea
,
2180 const struct event
*thread
= ptr
;
2181 struct timespec remain
= {};
2183 if (ea
->fmt
[0] == 'D') {
2185 return printfrr_thread_dbg(buf
, ea
, thread
);
2189 /* need to jump over time formatting flag characters in the
2190 * input format string, i.e. adjust ea->fmt!
2192 printfrr_time(buf
, ea
, &remain
,
2193 TIMEFMT_TIMER_DEADLINE
| TIMEFMT_SKIP
);
2194 return bputch(buf
, '-');
2197 TIMEVAL_TO_TIMESPEC(&thread
->u
.sands
, &remain
);
2198 return printfrr_time(buf
, ea
, &remain
, TIMEFMT_TIMER_DEADLINE
);