unsigned long walltime_threshold = CONSUMED_TIME_CHECK;
/* CLI start ---------------------------------------------------------------- */
-#ifndef VTYSH_EXTRACT_PL
#include "lib/thread_clippy.c"
-#endif
static unsigned int cpu_record_hash_key(const struct cpu_thread_history *a)
{
tmp.funcname = "TOTAL";
tmp.types = filter;
- frr_with_mutex(&masters_mtx) {
+ frr_with_mutex (&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
const char *name = m->name ? m->name : "main";
struct thread_master *m;
struct listnode *ln;
- frr_with_mutex(&masters_mtx) {
+ frr_with_mutex (&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
void *args[2] = {tmp, m->cpu_record};
hash_iterate(
m->cpu_record,
struct listnode *node;
struct thread_master *m;
- frr_with_mutex(&masters_mtx) {
+ frr_with_mutex (&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, node, m)) {
show_thread_poll_helper(vty, m);
}
sizeof(struct pollfd) * rv->handler.pfdsize);
/* add to list of threadmasters */
- frr_with_mutex(&masters_mtx) {
+ frr_with_mutex (&masters_mtx) {
if (!masters)
masters = list_new();
void thread_master_set_name(struct thread_master *master, const char *name)
{
- frr_with_mutex(&master->mtx) {
+ frr_with_mutex (&master->mtx) {
XFREE(MTYPE_THREAD_MASTER, master->name);
master->name = XSTRDUP(MTYPE_THREAD_MASTER, name);
}
*/
void thread_master_free_unused(struct thread_master *m)
{
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
struct thread *t;
while ((t = thread_list_pop(&m->unuse)))
thread_free(m, t);
{
struct thread *t;
- frr_with_mutex(&masters_mtx) {
+ frr_with_mutex (&masters_mtx) {
listnode_delete(masters, m);
if (masters->count == 0) {
list_delete(&masters);
{
int64_t remain;
- frr_with_mutex(&thread->mtx) {
+ if (!thread_is_scheduled(thread))
+ return 0;
+
+ frr_with_mutex (&thread->mtx) {
remain = monotime_until(&thread->u.sands, NULL) / 1000LL;
}
struct timeval thread_timer_remain(struct thread *thread)
{
struct timeval remain;
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
monotime_until(&thread->u.sands, &remain);
}
return remain;
if (fd >= m->fd_limit)
assert(!"Number of FD's open is greater than FRR currently configured to handle, aborting");
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
if (t_ptr && *t_ptr)
// thread is already scheduled; don't reschedule
break;
m->handler.pfdcount++;
if (thread) {
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
thread->u.fd = fd;
thread_array[thread->u.fd] = thread;
}
monotime(&t);
timeradd(&t, time_relative, &t);
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
if (t_ptr && *t_ptr)
/* thread is already scheduled; don't reschedule */
return;
thread = thread_get(m, THREAD_TIMER, func, arg, xref);
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
thread->u.sands = t;
thread_timer_list_add(&m->timer, thread);
if (t_ptr) {
assert(m != NULL);
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
if (t_ptr && *t_ptr)
/* thread is already scheduled; don't reschedule */
break;
thread = thread_get(m, THREAD_EVENT, func, arg, xref);
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
thread->u.val = val;
thread_list_add_tail(&m->event, thread);
}
struct thread_list_head *list = NULL;
struct thread **thread_array = NULL;
struct thread *thread;
-
struct cancel_req *cr;
struct listnode *ln;
+
for (ALL_LIST_ELEMENTS_RO(master->cancel_req, ln, cr)) {
/*
* If this is an event object cancellation, search
if (!thread)
continue;
+ list = NULL;
+ thread_array = NULL;
+
/* Determine the appropriate queue to cancel the thread from */
switch (thread->type) {
case THREAD_READ:
cr->flags = flags;
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
cr->eventobj = arg;
listnode_add(m->cancel_req, cr);
do_thread_cancel(m);
assert(master->owner == pthread_self());
- frr_with_mutex(&master->mtx) {
+ frr_with_mutex (&master->mtx) {
struct cancel_req *cr =
XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
cr->thread = *thread;
assert(master->owner != pthread_self());
- frr_with_mutex(&master->mtx) {
+ frr_with_mutex (&master->mtx) {
master->canceled = false;
if (thread) {
int thread_should_yield(struct thread *thread)
{
int result;
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
result = monotime_since(&thread->real, NULL)
> (int64_t)thread->yield;
}
void thread_set_yield_time(struct thread *thread, unsigned long yield_time)
{
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
thread->yield = yield_time;
}
}
struct thread *thread;
/* Get or allocate new thread to execute. */
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
thread = thread_get(m, THREAD_EVENT, func, arg, xref);
/* Set its event value. */
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
thread->add_type = THREAD_EXECUTE;
thread->u.val = val;
thread->ref = &thread;