unsigned long walltime_threshold = CONSUMED_TIME_CHECK;
/* CLI start ---------------------------------------------------------------- */
-#ifndef VTYSH_EXTRACT_PL
#include "lib/thread_clippy.c"
-#endif
static unsigned int cpu_record_hash_key(const struct cpu_thread_history *a)
{
tmp.funcname = "TOTAL";
tmp.types = filter;
- frr_with_mutex(&masters_mtx) {
+ frr_with_mutex (&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
const char *name = m->name ? m->name : "main";
struct thread_master *m;
struct listnode *ln;
- frr_with_mutex(&masters_mtx) {
+ frr_with_mutex (&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
void *args[2] = {tmp, m->cpu_record};
hash_iterate(
m->cpu_record,
struct listnode *node;
struct thread_master *m;
- frr_with_mutex(&masters_mtx) {
+ frr_with_mutex (&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, node, m)) {
show_thread_poll_helper(vty, m);
}
return CMD_SUCCESS;
}
+static void show_thread_timers_helper(struct vty *vty, struct thread_master *m)
+{
+ const char *name = m->name ? m->name : "main";
+ char underline[strlen(name) + 1];
+ struct thread *thread;
+
+ memset(underline, '-', sizeof(underline));
+ underline[sizeof(underline) - 1] = '\0';
+
+ vty_out(vty, "\nShowing timers for %s\n", name);
+ vty_out(vty, "-------------------%s\n", underline);
+
+ frr_each (thread_timer_list, &m->timer, thread) {
+ vty_out(vty, " %-50s%pTH\n", thread->hist->funcname, thread);
+ }
+}
+
+DEFPY_NOSH (show_thread_timers,
+ show_thread_timers_cmd,
+ "show thread timers",
+ SHOW_STR
+ "Thread information\n"
+ "Show all timers and how long they have in the system\n")
+{
+ struct listnode *node;
+ struct thread_master *m;
+
+ frr_with_mutex (&masters_mtx) {
+ for (ALL_LIST_ELEMENTS_RO(masters, node, m))
+ show_thread_timers_helper(vty, m);
+ }
+
+ return CMD_SUCCESS;
+}
+
void thread_cmd_init(void)
{
install_element(VIEW_NODE, &show_thread_cpu_cmd);
install_element(CONFIG_NODE, &no_service_cputime_warning_cmd);
install_element(CONFIG_NODE, &service_walltime_warning_cmd);
install_element(CONFIG_NODE, &no_service_walltime_warning_cmd);
+
+ install_element(VIEW_NODE, &show_thread_timers_cmd);
}
/* CLI end ------------------------------------------------------------------ */
sizeof(struct pollfd) * rv->handler.pfdsize);
/* add to list of threadmasters */
- frr_with_mutex(&masters_mtx) {
+ frr_with_mutex (&masters_mtx) {
if (!masters)
masters = list_new();
void thread_master_set_name(struct thread_master *master, const char *name)
{
- frr_with_mutex(&master->mtx) {
+ frr_with_mutex (&master->mtx) {
XFREE(MTYPE_THREAD_MASTER, master->name);
master->name = XSTRDUP(MTYPE_THREAD_MASTER, name);
}
*/
void thread_master_free_unused(struct thread_master *m)
{
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
struct thread *t;
while ((t = thread_list_pop(&m->unuse)))
thread_free(m, t);
{
struct thread *t;
- frr_with_mutex(&masters_mtx) {
+ frr_with_mutex (&masters_mtx) {
listnode_delete(masters, m);
if (masters->count == 0) {
list_delete(&masters);
XFREE(MTYPE_THREAD_MASTER, m);
}
-/* Return remain time in miliseconds. */
+/* Return remain time in milliseconds. */
unsigned long thread_timer_remain_msec(struct thread *thread)
{
int64_t remain;
- frr_with_mutex(&thread->mtx) {
+ if (!thread_is_scheduled(thread))
+ return 0;
+
+ frr_with_mutex (&thread->mtx) {
remain = monotime_until(&thread->u.sands, NULL) / 1000LL;
}
struct timeval thread_timer_remain(struct thread *thread)
{
struct timeval remain;
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
monotime_until(&thread->u.sands, &remain);
}
return remain;
if (fd >= m->fd_limit)
assert(!"Number of FD's open is greater than FRR currently configured to handle, aborting");
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
if (t_ptr && *t_ptr)
// thread is already scheduled; don't reschedule
break;
m->handler.pfdcount++;
if (thread) {
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
thread->u.fd = fd;
thread_array[thread->u.fd] = thread;
}
monotime(&t);
timeradd(&t, time_relative, &t);
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
if (t_ptr && *t_ptr)
/* thread is already scheduled; don't reschedule */
return;
thread = thread_get(m, THREAD_TIMER, func, arg, xref);
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
thread->u.sands = t;
thread_timer_list_add(&m->timer, thread);
if (t_ptr) {
assert(m != NULL);
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
if (t_ptr && *t_ptr)
/* thread is already scheduled; don't reschedule */
break;
thread = thread_get(m, THREAD_EVENT, func, arg, xref);
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
thread->u.val = val;
thread_list_add_tail(&m->event, thread);
}
struct thread_list_head *list = NULL;
struct thread **thread_array = NULL;
struct thread *thread;
-
struct cancel_req *cr;
struct listnode *ln;
+
for (ALL_LIST_ELEMENTS_RO(master->cancel_req, ln, cr)) {
/*
* If this is an event object cancellation, search
if (!thread)
continue;
+ list = NULL;
+ thread_array = NULL;
+
/* Determine the appropriate queue to cancel the thread from */
switch (thread->type) {
case THREAD_READ:
cr->flags = flags;
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
cr->eventobj = arg;
listnode_add(m->cancel_req, cr);
do_thread_cancel(m);
assert(master->owner == pthread_self());
- frr_with_mutex(&master->mtx) {
+ frr_with_mutex (&master->mtx) {
struct cancel_req *cr =
XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
cr->thread = *thread;
assert(master->owner != pthread_self());
- frr_with_mutex(&master->mtx) {
+ frr_with_mutex (&master->mtx) {
master->canceled = false;
if (thread) {
unsigned long *cputime)
{
#ifdef HAVE_CLOCK_THREAD_CPUTIME_ID
+
+#ifdef __FreeBSD__
+ /*
+ * FreeBSD appears to have an issue when calling clock_gettime
+ * with CLOCK_THREAD_CPUTIME_ID really close to each other
+ * occassionally the now time will be before the start time.
+ * This is not good and FRR is ending up with CPU HOG's
+ * when the subtraction wraps to very large numbers
+ *
+ * What we are going to do here is cheat a little bit
+ * and notice that this is a problem and just correct
+ * it so that it is impossible to happen
+ */
+ if (start->cpu.tv_sec == now->cpu.tv_sec &&
+ start->cpu.tv_nsec > now->cpu.tv_nsec)
+ now->cpu.tv_nsec = start->cpu.tv_nsec + 1;
+ else if (start->cpu.tv_sec > now->cpu.tv_sec) {
+ now->cpu.tv_sec = start->cpu.tv_sec;
+ now->cpu.tv_nsec = start->cpu.tv_nsec + 1;
+ }
+#endif
*cputime = (now->cpu.tv_sec - start->cpu.tv_sec) * TIMER_SECOND_MICRO
+ (now->cpu.tv_nsec - start->cpu.tv_nsec) / 1000;
#else
int thread_should_yield(struct thread *thread)
{
int result;
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
result = monotime_since(&thread->real, NULL)
> (int64_t)thread->yield;
}
void thread_set_yield_time(struct thread *thread, unsigned long yield_time)
{
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
thread->yield = yield_time;
}
}
struct thread *thread;
/* Get or allocate new thread to execute. */
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
thread = thread_get(m, THREAD_EVENT, func, arg, xref);
/* Set its event value. */
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
thread->add_type = THREAD_EXECUTE;
thread->u.val = val;
thread->ref = &thread;
zlog_debug("%s: %s", __func__, buf);
}
-bool thread_is_scheduled(struct thread *thread)
-{
- if (thread == NULL)
- return false;
-
- return true;
-}
-
static ssize_t printfrr_thread_dbg(struct fbuf *buf, struct printfrr_eargs *ea,
const struct thread *thread)
{