#include <zebra.h>
#include <sys/resource.h>
-#include "event.h"
+#include "frrevent.h"
#include "memory.h"
#include "frrcu.h"
#include "log.h"
#include "libfrr.h"
DEFINE_MTYPE_STATIC(LIB, THREAD, "Thread");
-DEFINE_MTYPE_STATIC(LIB, THREAD_MASTER, "Thread master");
-DEFINE_MTYPE_STATIC(LIB, THREAD_POLL, "Thread Poll Info");
-DEFINE_MTYPE_STATIC(LIB, THREAD_STATS, "Thread stats");
+DEFINE_MTYPE_STATIC(LIB, EVENT_MASTER, "Thread master");
+DEFINE_MTYPE_STATIC(LIB, EVENT_POLL, "Thread Poll Info");
+DEFINE_MTYPE_STATIC(LIB, EVENT_STATS, "Thread stats");
-DECLARE_LIST(thread_list, struct event, threaditem);
+DECLARE_LIST(event_list, struct event, eventitem);
struct cancel_req {
int flags;
/* Flags for task cancellation */
#define EVENT_CANCEL_FLAG_READY 0x01
-static int thread_timer_cmp(const struct event *a, const struct event *b)
+static int event_timer_cmp(const struct event *a, const struct event *b)
{
if (a->u.sands.tv_sec < b->u.sands.tv_sec)
return -1;
return 0;
}
-DECLARE_HEAP(thread_timer_list, struct event, timeritem, thread_timer_cmp);
+DECLARE_HEAP(event_timer_list, struct event, timeritem, event_timer_cmp);
#if defined(__APPLE__)
#include <mach/mach.h>
do { \
const unsigned char wakebyte = 0x01; \
write(m->io_pipe[1], &wakebyte, 1); \
- } while (0);
+ } while (0)
/* control variable for initializer */
static pthread_once_t init_once = PTHREAD_ONCE_INIT;
static pthread_mutex_t masters_mtx = PTHREAD_MUTEX_INITIALIZER;
static struct list *masters;
-static void thread_free(struct thread_master *master, struct event *thread);
+static void thread_free(struct event_loop *master, struct event *thread);
#ifndef EXCLUDE_CPU_TIME
#define EXCLUDE_CPU_TIME 0
/* CLI start ---------------------------------------------------------------- */
#include "lib/event_clippy.c"
-static unsigned int cpu_record_hash_key(const struct cpu_thread_history *a)
+static unsigned int cpu_record_hash_key(const struct cpu_event_history *a)
{
int size = sizeof(a->func);
return jhash(&a->func, size, 0);
}
-static bool cpu_record_hash_cmp(const struct cpu_thread_history *a,
- const struct cpu_thread_history *b)
+static bool cpu_record_hash_cmp(const struct cpu_event_history *a,
+ const struct cpu_event_history *b)
{
return a->func == b->func;
}
-static void *cpu_record_hash_alloc(struct cpu_thread_history *a)
+static void *cpu_record_hash_alloc(struct cpu_event_history *a)
{
- struct cpu_thread_history *new;
- new = XCALLOC(MTYPE_THREAD_STATS, sizeof(struct cpu_thread_history));
+ struct cpu_event_history *new;
+
+ new = XCALLOC(MTYPE_EVENT_STATS, sizeof(struct cpu_event_history));
new->func = a->func;
new->funcname = a->funcname;
return new;
static void cpu_record_hash_free(void *a)
{
- struct cpu_thread_history *hist = a;
+ struct cpu_event_history *hist = a;
- XFREE(MTYPE_THREAD_STATS, hist);
+ XFREE(MTYPE_EVENT_STATS, hist);
}
-static void vty_out_cpu_thread_history(struct vty *vty,
- struct cpu_thread_history *a)
+static void vty_out_cpu_event_history(struct vty *vty,
+ struct cpu_event_history *a)
{
vty_out(vty,
"%5zu %10zu.%03zu %9zu %8zu %9zu %8zu %9zu %9zu %9zu %10zu",
(a->real.total / a->total_calls), a->real.max,
a->total_cpu_warn, a->total_wall_warn, a->total_starv_warn);
vty_out(vty, " %c%c%c%c%c %s\n",
- a->types & (1 << THREAD_READ) ? 'R' : ' ',
- a->types & (1 << THREAD_WRITE) ? 'W' : ' ',
- a->types & (1 << THREAD_TIMER) ? 'T' : ' ',
- a->types & (1 << THREAD_EVENT) ? 'E' : ' ',
- a->types & (1 << THREAD_EXECUTE) ? 'X' : ' ', a->funcname);
+ a->types & (1 << EVENT_READ) ? 'R' : ' ',
+ a->types & (1 << EVENT_WRITE) ? 'W' : ' ',
+ a->types & (1 << EVENT_TIMER) ? 'T' : ' ',
+ a->types & (1 << EVENT_EVENT) ? 'E' : ' ',
+ a->types & (1 << EVENT_EXECUTE) ? 'X' : ' ', a->funcname);
}
static void cpu_record_hash_print(struct hash_bucket *bucket, void *args[])
{
- struct cpu_thread_history *totals = args[0];
- struct cpu_thread_history copy;
+ struct cpu_event_history *totals = args[0];
+ struct cpu_event_history copy;
struct vty *vty = args[1];
uint8_t *filter = args[2];
- struct cpu_thread_history *a = bucket->data;
+ struct cpu_event_history *a = bucket->data;
copy.total_active =
atomic_load_explicit(&a->total_active, memory_order_seq_cst);
if (!(copy.types & *filter))
return;
- vty_out_cpu_thread_history(vty, ©);
+ vty_out_cpu_event_history(vty, ©);
totals->total_active += copy.total_active;
totals->total_calls += copy.total_calls;
totals->total_cpu_warn += copy.total_cpu_warn;
static void cpu_record_print(struct vty *vty, uint8_t filter)
{
- struct cpu_thread_history tmp;
+ struct cpu_event_history tmp;
void *args[3] = {&tmp, vty, &filter};
- struct thread_master *m;
+ struct event_loop *m;
struct listnode *ln;
if (!cputime_enabled)
frr_with_mutex (&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
const char *name = m->name ? m->name : "main";
-
char underline[strlen(name) + 1];
+
memset(underline, '-', sizeof(underline));
underline[sizeof(underline) - 1] = '\0';
vty_out(vty, " Type Thread\n");
if (tmp.total_calls > 0)
- vty_out_cpu_thread_history(vty, &tmp);
+ vty_out_cpu_event_history(vty, &tmp);
}
static void cpu_record_hash_clear(struct hash_bucket *bucket, void *args[])
uint8_t *filter = args[0];
struct hash *cpu_record = args[1];
- struct cpu_thread_history *a = bucket->data;
+ struct cpu_event_history *a = bucket->data;
if (!(a->types & *filter))
return;
static void cpu_record_clear(uint8_t filter)
{
uint8_t *tmp = &filter;
- struct thread_master *m;
+ struct event_loop *m;
struct listnode *ln;
frr_with_mutex (&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
frr_with_mutex (&m->mtx) {
void *args[2] = {tmp, m->cpu_record};
+
hash_iterate(
m->cpu_record,
(void (*)(struct hash_bucket *,
switch (filterstr[i]) {
case 'r':
case 'R':
- filter |= (1 << THREAD_READ);
+ filter |= (1 << EVENT_READ);
break;
case 'w':
case 'W':
- filter |= (1 << THREAD_WRITE);
+ filter |= (1 << EVENT_WRITE);
break;
case 't':
case 'T':
- filter |= (1 << THREAD_TIMER);
+ filter |= (1 << EVENT_TIMER);
break;
case 'e':
case 'E':
- filter |= (1 << THREAD_EVENT);
+ filter |= (1 << EVENT_EVENT);
break;
case 'x':
case 'X':
- filter |= (1 << THREAD_EXECUTE);
+ filter |= (1 << EVENT_EXECUTE);
break;
default:
break;
"Set up miscellaneous service\n"
"Warn for tasks exceeding total wallclock threshold\n")
-static void show_thread_poll_helper(struct vty *vty, struct thread_master *m)
+static void show_thread_poll_helper(struct vty *vty, struct event_loop *m)
{
const char *name = m->name ? m->name : "main";
char underline[strlen(name) + 1];
"Show poll FD's and information\n")
{
struct listnode *node;
- struct thread_master *m;
+ struct event_loop *m;
frr_with_mutex (&masters_mtx) {
- for (ALL_LIST_ELEMENTS_RO(masters, node, m)) {
+ for (ALL_LIST_ELEMENTS_RO(masters, node, m))
show_thread_poll_helper(vty, m);
- }
}
return CMD_SUCCESS;
return CMD_SUCCESS;
}
-static void show_thread_timers_helper(struct vty *vty, struct thread_master *m)
+static void show_thread_timers_helper(struct vty *vty, struct event_loop *m)
{
const char *name = m->name ? m->name : "main";
char underline[strlen(name) + 1];
vty_out(vty, "\nShowing timers for %s\n", name);
vty_out(vty, "-------------------%s\n", underline);
- frr_each (thread_timer_list, &m->timer, thread) {
+ frr_each (event_timer_list, &m->timer, thread) {
vty_out(vty, " %-50s%pTH\n", thread->hist->funcname, thread);
}
}
"Show all timers and how long they have in the system\n")
{
struct listnode *node;
- struct thread_master *m;
+ struct event_loop *m;
frr_with_mutex (&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, node, m))
return CMD_SUCCESS;
}
-void thread_cmd_init(void)
+void event_cmd_init(void)
{
install_element(VIEW_NODE, &show_thread_cpu_cmd);
install_element(VIEW_NODE, &show_thread_poll_cmd);
pthread_key_create(&thread_current, NULL);
}
-struct thread_master *thread_master_create(const char *name)
+struct event_loop *event_master_create(const char *name)
{
- struct thread_master *rv;
+ struct event_loop *rv;
struct rlimit limit;
pthread_once(&init_once, &initializer);
- rv = XCALLOC(MTYPE_THREAD_MASTER, sizeof(struct thread_master));
+ rv = XCALLOC(MTYPE_EVENT_MASTER, sizeof(struct event_loop));
/* Initialize master mutex */
pthread_mutex_init(&rv->mtx, NULL);
/* Set name */
name = name ? name : "default";
- rv->name = XSTRDUP(MTYPE_THREAD_MASTER, name);
+ rv->name = XSTRDUP(MTYPE_EVENT_MASTER, name);
/* Initialize I/O task data structures */
rv->fd_limit = (int)limit.rlim_cur;
}
- rv->read = XCALLOC(MTYPE_THREAD_POLL,
+ rv->read = XCALLOC(MTYPE_EVENT_POLL,
sizeof(struct event *) * rv->fd_limit);
- rv->write = XCALLOC(MTYPE_THREAD_POLL,
+ rv->write = XCALLOC(MTYPE_EVENT_POLL,
sizeof(struct event *) * rv->fd_limit);
char tmhashname[strlen(name) + 32];
+
snprintf(tmhashname, sizeof(tmhashname), "%s - threadmaster event hash",
name);
rv->cpu_record = hash_create_size(
(bool (*)(const void *, const void *))cpu_record_hash_cmp,
tmhashname);
- thread_list_init(&rv->event);
- thread_list_init(&rv->ready);
- thread_list_init(&rv->unuse);
- thread_timer_list_init(&rv->timer);
+ event_list_init(&rv->event);
+ event_list_init(&rv->ready);
+ event_list_init(&rv->unuse);
+ event_timer_list_init(&rv->timer);
- /* Initialize thread_fetch() settings */
+ /* Initialize event_fetch() settings */
rv->spin = true;
rv->handle_signals = true;
/* Initialize data structures for poll() */
rv->handler.pfdsize = rv->fd_limit;
rv->handler.pfdcount = 0;
- rv->handler.pfds = XCALLOC(MTYPE_THREAD_MASTER,
+ rv->handler.pfds = XCALLOC(MTYPE_EVENT_MASTER,
sizeof(struct pollfd) * rv->handler.pfdsize);
- rv->handler.copy = XCALLOC(MTYPE_THREAD_MASTER,
+ rv->handler.copy = XCALLOC(MTYPE_EVENT_MASTER,
sizeof(struct pollfd) * rv->handler.pfdsize);
/* add to list of threadmasters */
return rv;
}
-void thread_master_set_name(struct thread_master *master, const char *name)
+void event_master_set_name(struct event_loop *master, const char *name)
{
frr_with_mutex (&master->mtx) {
- XFREE(MTYPE_THREAD_MASTER, master->name);
- master->name = XSTRDUP(MTYPE_THREAD_MASTER, name);
+ XFREE(MTYPE_EVENT_MASTER, master->name);
+ master->name = XSTRDUP(MTYPE_EVENT_MASTER, name);
}
}
-#define THREAD_UNUSED_DEPTH 10
+#define EVENT_UNUSED_DEPTH 10
/* Move thread to unuse list. */
-static void thread_add_unuse(struct thread_master *m, struct event *thread)
+static void thread_add_unuse(struct event_loop *m, struct event *thread)
{
pthread_mutex_t mtxc = thread->mtx;
thread->hist->total_active--;
memset(thread, 0, sizeof(struct event));
- thread->type = THREAD_UNUSED;
+ thread->type = EVENT_UNUSED;
/* Restore the thread mutex context. */
thread->mtx = mtxc;
- if (thread_list_count(&m->unuse) < THREAD_UNUSED_DEPTH) {
- thread_list_add_tail(&m->unuse, thread);
+ if (event_list_count(&m->unuse) < EVENT_UNUSED_DEPTH) {
+ event_list_add_tail(&m->unuse, thread);
return;
}
}
/* Free all unused thread. */
-static void thread_list_free(struct thread_master *m,
- struct thread_list_head *list)
+static void thread_list_free(struct event_loop *m, struct event_list_head *list)
{
struct event *t;
- while ((t = thread_list_pop(list)))
+ while ((t = event_list_pop(list)))
thread_free(m, t);
}
-static void thread_array_free(struct thread_master *m,
- struct event **thread_array)
+static void thread_array_free(struct event_loop *m, struct event **thread_array)
{
struct event *t;
int index;
thread_free(m, t);
}
}
- XFREE(MTYPE_THREAD_POLL, thread_array);
+ XFREE(MTYPE_EVENT_POLL, thread_array);
}
/*
- * thread_master_free_unused
+ * event_master_free_unused
*
* As threads are finished with they are put on the
* unuse list for later reuse.
* If we are shutting down, Free up unused threads
* So we can see if we forget to shut anything off
*/
-void thread_master_free_unused(struct thread_master *m)
+void event_master_free_unused(struct event_loop *m)
{
frr_with_mutex (&m->mtx) {
struct event *t;
- while ((t = thread_list_pop(&m->unuse)))
+
+ while ((t = event_list_pop(&m->unuse)))
thread_free(m, t);
}
}
/* Stop thread scheduler. */
-void thread_master_free(struct thread_master *m)
+void event_master_free(struct event_loop *m)
{
struct event *t;
frr_with_mutex (&masters_mtx) {
listnode_delete(masters, m);
- if (masters->count == 0) {
+ if (masters->count == 0)
list_delete(&masters);
- }
}
thread_array_free(m, m->read);
thread_array_free(m, m->write);
- while ((t = thread_timer_list_pop(&m->timer)))
+ while ((t = event_timer_list_pop(&m->timer)))
thread_free(m, t);
thread_list_free(m, &m->event);
thread_list_free(m, &m->ready);
hash_clean_and_free(&m->cpu_record, cpu_record_hash_free);
- XFREE(MTYPE_THREAD_MASTER, m->name);
- XFREE(MTYPE_THREAD_MASTER, m->handler.pfds);
- XFREE(MTYPE_THREAD_MASTER, m->handler.copy);
- XFREE(MTYPE_THREAD_MASTER, m);
+ XFREE(MTYPE_EVENT_MASTER, m->name);
+ XFREE(MTYPE_EVENT_MASTER, m->handler.pfds);
+ XFREE(MTYPE_EVENT_MASTER, m->handler.copy);
+ XFREE(MTYPE_EVENT_MASTER, m);
}
/* Return remain time in milliseconds. */
-unsigned long thread_timer_remain_msec(struct event *thread)
+unsigned long event_timer_remain_msec(struct event *thread)
{
int64_t remain;
- if (!thread_is_scheduled(thread))
+ if (!event_is_scheduled(thread))
return 0;
frr_with_mutex (&thread->mtx) {
}
/* Return remain time in seconds. */
-unsigned long thread_timer_remain_second(struct event *thread)
+unsigned long event_timer_remain_second(struct event *thread)
{
- return thread_timer_remain_msec(thread) / 1000LL;
+ return event_timer_remain_msec(thread) / 1000LL;
}
-struct timeval thread_timer_remain(struct event *thread)
+struct timeval event_timer_remain(struct event *thread)
{
struct timeval remain;
+
frr_with_mutex (&thread->mtx) {
monotime_until(&thread->u.sands, &remain);
}
return wr != 8;
}
-char *thread_timer_to_hhmmss(char *buf, int buf_size, struct event *t_timer)
+char *event_timer_to_hhmmss(char *buf, int buf_size, struct event *t_timer)
{
- if (t_timer) {
- time_hhmmss(buf, buf_size,
- thread_timer_remain_second(t_timer));
- } else {
+ if (t_timer)
+ time_hhmmss(buf, buf_size, event_timer_remain_second(t_timer));
+ else
snprintf(buf, buf_size, "--:--:--");
- }
+
return buf;
}
/* Get new thread. */
-static struct event *thread_get(struct thread_master *m, uint8_t type,
+static struct event *thread_get(struct event_loop *m, uint8_t type,
void (*func)(struct event *), void *arg,
- const struct xref_threadsched *xref)
+ const struct xref_eventsched *xref)
{
- struct event *thread = thread_list_pop(&m->unuse);
- struct cpu_thread_history tmp;
+ struct event *thread = event_list_pop(&m->unuse);
+ struct cpu_event_history tmp;
if (!thread) {
thread = XCALLOC(MTYPE_THREAD, sizeof(struct event));
thread->add_type = type;
thread->master = m;
thread->arg = arg;
- thread->yield = THREAD_YIELD_TIME_SLOT; /* default */
+ thread->yield = EVENT_YIELD_TIME_SLOT; /* default */
thread->ref = NULL;
thread->ignore_timer_late = false;
return thread;
}
-static void thread_free(struct thread_master *master, struct event *thread)
+static void thread_free(struct event_loop *master, struct event *thread)
{
/* Update statistics. */
assert(master->alloc > 0);
XFREE(MTYPE_THREAD, thread);
}
-static int fd_poll(struct thread_master *m, const struct timeval *timer_wait,
+static int fd_poll(struct event_loop *m, const struct timeval *timer_wait,
bool *eintr_p)
{
sigset_t origsigs;
/*
* If timer_wait is null here, that means poll() should block
- * indefinitely, unless the thread_master has overridden it by setting
+ * indefinitely, unless the event_master has overridden it by setting
* ->selectpoll_timeout.
*
* If the value is positive, it specifies the maximum number of
/* number of file descriptors with events */
int num;
- if (timer_wait != NULL
- && m->selectpoll_timeout == 0) // use the default value
+ if (timer_wait != NULL && m->selectpoll_timeout == 0) {
+ /* use the default value */
timeout = (timer_wait->tv_sec * 1000)
+ (timer_wait->tv_usec / 1000);
- else if (m->selectpoll_timeout > 0) // use the user's timeout
+ } else if (m->selectpoll_timeout > 0) {
+ /* use the user's timeout */
timeout = m->selectpoll_timeout;
- else if (m->selectpoll_timeout
- < 0) // effect a poll (return immediately)
+ } else if (m->selectpoll_timeout < 0) {
+ /* effect a poll (return immediately) */
timeout = 0;
+ }
zlog_tls_buffer_flush();
rcu_read_unlock();
}
/* Add new read thread. */
-void _event_add_read_write(const struct xref_threadsched *xref,
- struct thread_master *m,
- void (*func)(struct event *), void *arg, int fd,
- struct event **t_ptr)
+void _event_add_read_write(const struct xref_eventsched *xref,
+ struct event_loop *m, void (*func)(struct event *),
+ void *arg, int fd, struct event **t_ptr)
{
- int dir = xref->thread_type;
+ int dir = xref->event_type;
struct event *thread = NULL;
struct event **thread_array;
- if (dir == THREAD_READ)
+ if (dir == EVENT_READ)
frrtrace(9, frr_libfrr, schedule_read, m,
xref->funcname, xref->xref.file, xref->xref.line,
t_ptr, fd, 0, arg, 0);
assert(!"Number of FD's open is greater than FRR currently configured to handle, aborting");
frr_with_mutex (&m->mtx) {
+ /* Thread is already scheduled; don't reschedule */
if (t_ptr && *t_ptr)
- // thread is already scheduled; don't reschedule
break;
/* default to a new pollfd */
nfds_t queuepos = m->handler.pfdcount;
- if (dir == THREAD_READ)
+ if (dir == EVENT_READ)
thread_array = m->read;
else
thread_array = m->write;
- /* if we already have a pollfd for our file descriptor, find and
- * use it */
+ /*
+ * if we already have a pollfd for our file descriptor, find and
+ * use it
+ */
for (nfds_t i = 0; i < m->handler.pfdcount; i++)
if (m->handler.pfds[i].fd == fd) {
queuepos = i;
m->handler.pfds[queuepos].fd = fd;
m->handler.pfds[queuepos].events |=
- (dir == THREAD_READ ? POLLIN : POLLOUT);
+ (dir == EVENT_READ ? POLLIN : POLLOUT);
if (queuepos == m->handler.pfdcount)
m->handler.pfdcount++;
}
}
-static void _event_add_timer_timeval(const struct xref_threadsched *xref,
- struct thread_master *m,
+static void _event_add_timer_timeval(const struct xref_eventsched *xref,
+ struct event_loop *m,
void (*func)(struct event *), void *arg,
struct timeval *time_relative,
struct event **t_ptr)
/* thread is already scheduled; don't reschedule */
return;
- thread = thread_get(m, THREAD_TIMER, func, arg, xref);
+ thread = thread_get(m, EVENT_TIMER, func, arg, xref);
frr_with_mutex (&thread->mtx) {
thread->u.sands = t;
- thread_timer_list_add(&m->timer, thread);
+ event_timer_list_add(&m->timer, thread);
if (t_ptr) {
*t_ptr = thread;
thread->ref = t_ptr;
* might change the time we'll wait for, give the pthread
* a chance to re-compute.
*/
- if (thread_timer_list_first(&m->timer) == thread)
+ if (event_timer_list_first(&m->timer) == thread)
AWAKEN(m);
}
#define ONEYEAR2SEC (60 * 60 * 24 * 365)
/* Add timer event thread. */
-void _event_add_timer(const struct xref_threadsched *xref,
- struct thread_master *m, void (*func)(struct event *),
- void *arg, long timer, struct event **t_ptr)
+void _event_add_timer(const struct xref_eventsched *xref, struct event_loop *m,
+ void (*func)(struct event *), void *arg, long timer,
+ struct event **t_ptr)
{
struct timeval trel;
}
/* Add timer event thread with "millisecond" resolution */
-void _event_add_timer_msec(const struct xref_threadsched *xref,
- struct thread_master *m,
- void (*func)(struct event *), void *arg, long timer,
- struct event **t_ptr)
+void _event_add_timer_msec(const struct xref_eventsched *xref,
+ struct event_loop *m, void (*func)(struct event *),
+ void *arg, long timer, struct event **t_ptr)
{
struct timeval trel;
}
/* Add timer event thread with "timeval" resolution */
-void _event_add_timer_tv(const struct xref_threadsched *xref,
- struct thread_master *m, void (*func)(struct event *),
+void _event_add_timer_tv(const struct xref_eventsched *xref,
+ struct event_loop *m, void (*func)(struct event *),
void *arg, struct timeval *tv, struct event **t_ptr)
{
_event_add_timer_timeval(xref, m, func, arg, tv, t_ptr);
}
/* Add simple event thread. */
-void _event_add_event(const struct xref_threadsched *xref,
- struct thread_master *m, void (*func)(struct event *),
- void *arg, int val, struct event **t_ptr)
+void _event_add_event(const struct xref_eventsched *xref, struct event_loop *m,
+ void (*func)(struct event *), void *arg, int val,
+ struct event **t_ptr)
{
struct event *thread = NULL;
/* thread is already scheduled; don't reschedule */
break;
- thread = thread_get(m, THREAD_EVENT, func, arg, xref);
+ thread = thread_get(m, EVENT_EVENT, func, arg, xref);
frr_with_mutex (&thread->mtx) {
thread->u.val = val;
- thread_list_add_tail(&m->event, thread);
+ event_list_add_tail(&m->event, thread);
}
if (t_ptr) {
* NOT's out the .events field of pollfd corresponding to the given file
* descriptor. The event to be NOT'd is passed in the 'state' parameter.
*
- * This needs to happen for both copies of pollfd's. See 'thread_fetch'
+ * This needs to happen for both copies of pollfd's. See 'event_fetch'
* implementation for details.
*
* @param master
* - POLLIN
* - POLLOUT
*/
-static void event_cancel_rw(struct thread_master *master, int fd, short state,
+static void event_cancel_rw(struct event_loop *master, int fd, short state,
int idx_hint)
{
bool found = false;
master->handler.pfds[master->handler.pfdcount].events = 0;
}
- /* If we have the same pollfd in the copy, perform the same operations,
- * otherwise return. */
+ /*
+ * If we have the same pollfd in the copy, perform the same operations,
+ * otherwise return.
+ */
if (i >= master->handler.copycount)
return;
* sizeof(struct pollfd));
master->handler.copycount--;
master->handler.copy[master->handler.copycount].fd = 0;
- master->handler.copy[master->handler.copycount].events = 0;
+ master->handler.copy[master->handler.copycount].events = 0;
}
}
* Process task cancellation given a task argument: iterate through the
* various lists of tasks, looking for any that match the argument.
*/
-static void cancel_arg_helper(struct thread_master *master,
+static void cancel_arg_helper(struct event_loop *master,
const struct cancel_req *cr)
{
struct event *t;
return;
/* First process the ready lists. */
- frr_each_safe(thread_list, &master->event, t) {
+ frr_each_safe (event_list, &master->event, t) {
if (t->arg != cr->eventobj)
continue;
- thread_list_del(&master->event, t);
+ event_list_del(&master->event, t);
if (t->ref)
*t->ref = NULL;
thread_add_unuse(master, t);
}
- frr_each_safe(thread_list, &master->ready, t) {
+ frr_each_safe (event_list, &master->ready, t) {
if (t->arg != cr->eventobj)
continue;
- thread_list_del(&master->ready, t);
+ event_list_del(&master->ready, t);
if (t->ref)
*t->ref = NULL;
thread_add_unuse(master, t);
}
/* Check the timer tasks */
- t = thread_timer_list_first(&master->timer);
+ t = event_timer_list_first(&master->timer);
while (t) {
struct event *t_next;
- t_next = thread_timer_list_next(&master->timer, t);
+ t_next = event_timer_list_next(&master->timer, t);
if (t->arg == cr->eventobj) {
- thread_timer_list_del(&master->timer, t);
+ event_timer_list_del(&master->timer, t);
if (t->ref)
*t->ref = NULL;
thread_add_unuse(master, t);
/**
* Process cancellation requests.
*
- * This may only be run from the pthread which owns the thread_master.
+ * This may only be run from the pthread which owns the event_master.
*
* @param master the thread master to process
* @REQUIRE master->mtx
*/
-static void do_event_cancel(struct thread_master *master)
+static void do_event_cancel(struct event_loop *master)
{
- struct thread_list_head *list = NULL;
+ struct event_list_head *list = NULL;
struct event **thread_array = NULL;
struct event *thread;
struct cancel_req *cr;
/* Determine the appropriate queue to cancel the thread from */
switch (thread->type) {
- case THREAD_READ:
+ case EVENT_READ:
event_cancel_rw(master, thread->u.fd, POLLIN, -1);
thread_array = master->read;
break;
- case THREAD_WRITE:
+ case EVENT_WRITE:
event_cancel_rw(master, thread->u.fd, POLLOUT, -1);
thread_array = master->write;
break;
- case THREAD_TIMER:
- thread_timer_list_del(&master->timer, thread);
+ case EVENT_TIMER:
+ event_timer_list_del(&master->timer, thread);
break;
- case THREAD_EVENT:
+ case EVENT_EVENT:
list = &master->event;
break;
- case THREAD_READY:
+ case EVENT_READY:
list = &master->ready;
break;
- default:
+ case EVENT_UNUSED:
+ case EVENT_EXECUTE:
continue;
break;
}
- if (list) {
- thread_list_del(list, thread);
- } else if (thread_array) {
+ if (list)
+ event_list_del(list, thread);
+ else if (thread_array)
thread_array[thread->u.fd] = NULL;
- }
if (thread->ref)
*thread->ref = NULL;
/*
* Helper function used for multiple flavors of arg-based cancellation.
*/
-static void cancel_event_helper(struct thread_master *m, void *arg, int flags)
+static void cancel_event_helper(struct event_loop *m, void *arg, int flags)
{
struct cancel_req *cr;
*
* MT-Unsafe
*
- * @param m the thread_master to cancel from
+ * @param m the event_master to cancel from
* @param arg the argument passed when creating the event
*/
-void event_cancel_event(struct thread_master *master, void *arg)
+void event_cancel_event(struct event_loop *master, void *arg)
{
cancel_event_helper(master, arg, 0);
}
*
* MT-Unsafe
*
- * @param m the thread_master to cancel from
+ * @param m the event_master to cancel from
* @param arg the argument passed when creating the event
*/
-void event_cancel_event_ready(struct thread_master *m, void *arg)
+void event_cancel_event_ready(struct event_loop *m, void *arg)
{
/* Only cancel ready/event tasks */
*/
void event_cancel(struct event **thread)
{
- struct thread_master *master;
+ struct event_loop *master;
if (thread == NULL || *thread == NULL)
return;
* The last two parameters are mutually exclusive, i.e. if you pass one the
* other must be NULL.
*
- * When the cancellation procedure executes on the target thread_master, the
+ * When the cancellation procedure executes on the target event_master, the
* thread * provided is checked for nullity. If it is null, the thread is
* assumed to no longer exist and the cancellation request is a no-op. Thus
* users of this API must pass a back-reference when scheduling the original
* @param thread pointer to thread to cancel
* @param eventobj the event
*/
-void event_cancel_async(struct thread_master *master, struct event **thread,
+void event_cancel_async(struct event_loop *master, struct event **thread,
void *eventobj)
{
assert(!(thread && eventobj) && (thread || eventobj));
}
/* ------------------------------------------------------------------------- */
-static struct timeval *thread_timer_wait(struct thread_timer_list_head *timers,
+static struct timeval *thread_timer_wait(struct event_timer_list_head *timers,
struct timeval *timer_val)
{
- if (!thread_timer_list_count(timers))
+ if (!event_timer_list_count(timers))
return NULL;
- struct event *next_timer = thread_timer_list_first(timers);
+ struct event *next_timer = event_timer_list_first(timers);
+
monotime_until(&next_timer->u.sands, timer_val);
return timer_val;
}
-static struct event *thread_run(struct thread_master *m, struct event *thread,
+static struct event *thread_run(struct event_loop *m, struct event *thread,
struct event *fetch)
{
*fetch = *thread;
return fetch;
}
-static int thread_process_io_helper(struct thread_master *m,
- struct event *thread, short state,
- short actual_state, int pos)
+static int thread_process_io_helper(struct event_loop *m, struct event *thread,
+ short state, short actual_state, int pos)
{
struct event **thread_array;
return 0;
}
- if (thread->type == THREAD_READ)
+ if (thread->type == EVENT_READ)
thread_array = m->read;
else
thread_array = m->write;
thread_array[thread->u.fd] = NULL;
- thread_list_add_tail(&m->ready, thread);
- thread->type = THREAD_READY;
+ event_list_add_tail(&m->ready, thread);
+ thread->type = EVENT_READY;
return 1;
}
* @param m the thread master
* @param num the number of active file descriptors (return value of poll())
*/
-static void thread_process_io(struct thread_master *m, unsigned int num)
+static void thread_process_io(struct event_loop *m, unsigned int num)
{
unsigned int ready = 0;
struct pollfd *pfds = m->handler.copy;
thread_process_io_helper(m, m->write[pfds[i].fd],
POLLOUT, pfds[i].revents, i);
- /* if one of our file descriptors is garbage, remove the same
- * from
- * both pfds + update sizes and index */
+ /*
+ * if one of our file descriptors is garbage, remove the same
+ * from both pfds + update sizes and index
+ */
if (pfds[i].revents & POLLNVAL) {
memmove(m->handler.pfds + i, m->handler.pfds + i + 1,
(m->handler.pfdcount - i - 1)
}
/* Add all timers that have popped to the ready list. */
-static unsigned int thread_process_timers(struct thread_master *m,
+static unsigned int thread_process_timers(struct event_loop *m,
struct timeval *timenow)
{
struct timeval prev = *timenow;
struct event *thread;
unsigned int ready = 0;
- while ((thread = thread_timer_list_first(&m->timer))) {
+ while ((thread = event_timer_list_first(&m->timer))) {
if (timercmp(timenow, &thread->u.sands, <))
break;
prev = thread->u.sands;
}
}
- thread_timer_list_pop(&m->timer);
- thread->type = THREAD_READY;
- thread_list_add_tail(&m->ready, thread);
+ event_timer_list_pop(&m->timer);
+ thread->type = EVENT_READY;
+ event_list_add_tail(&m->ready, thread);
ready++;
}
}
/* process a list en masse, e.g. for event thread lists */
-static unsigned int thread_process(struct thread_list_head *list)
+static unsigned int thread_process(struct event_list_head *list)
{
struct event *thread;
unsigned int ready = 0;
- while ((thread = thread_list_pop(list))) {
- thread->type = THREAD_READY;
- thread_list_add_tail(&thread->master->ready, thread);
+ while ((thread = event_list_pop(list))) {
+ thread->type = EVENT_READY;
+ event_list_add_tail(&thread->master->ready, thread);
ready++;
}
return ready;
/* Fetch next ready thread. */
-struct event *thread_fetch(struct thread_master *m, struct event *fetch)
+struct event *event_fetch(struct event_loop *m, struct event *fetch)
{
struct event *thread = NULL;
struct timeval now;
* Attempt to flush ready queue before going into poll().
* This is performance-critical. Think twice before modifying.
*/
- if ((thread = thread_list_pop(&m->ready))) {
+ if ((thread = event_list_pop(&m->ready))) {
fetch = thread_run(m, thread, fetch);
if (fetch->ref)
*fetch->ref = NULL;
* In every case except the last, we need to hit poll() at least
* once per loop to avoid starvation by events
*/
- if (!thread_list_count(&m->ready))
+ if (!event_list_count(&m->ready))
tw = thread_timer_wait(&m->timer, &tv);
- if (thread_list_count(&m->ready) ||
- (tw && !timercmp(tw, &zerotime, >)))
+ if (event_list_count(&m->ready) ||
+ (tw && !timercmp(tw, &zerotime, >)))
tw = &zerotime;
if (!tw && m->handler.pfdcount == 0) { /* die */
+ (a.tv_usec - b.tv_usec));
}
-unsigned long thread_consumed_time(RUSAGE_T *now, RUSAGE_T *start,
- unsigned long *cputime)
+unsigned long event_consumed_time(RUSAGE_T *now, RUSAGE_T *start,
+ unsigned long *cputime)
{
#ifdef HAVE_CLOCK_THREAD_CPUTIME_ID
return timeval_elapsed(now->real, start->real);
}
-/* We should aim to yield after yield milliseconds, which defaults
- to THREAD_YIELD_TIME_SLOT .
- Note: we are using real (wall clock) time for this calculation.
- It could be argued that CPU time may make more sense in certain
- contexts. The things to consider are whether the thread may have
- blocked (in which case wall time increases, but CPU time does not),
- or whether the system is heavily loaded with other processes competing
- for CPU time. On balance, wall clock time seems to make sense.
- Plus it has the added benefit that gettimeofday should be faster
- than calling getrusage. */
-int thread_should_yield(struct event *thread)
+/*
+ * We should aim to yield after yield milliseconds, which defaults
+ * to EVENT_YIELD_TIME_SLOT .
+ * Note: we are using real (wall clock) time for this calculation.
+ * It could be argued that CPU time may make more sense in certain
+ * contexts. The things to consider are whether the thread may have
+ * blocked (in which case wall time increases, but CPU time does not),
+ * or whether the system is heavily loaded with other processes competing
+ * for CPU time. On balance, wall clock time seems to make sense.
+ * Plus it has the added benefit that gettimeofday should be faster
+ * than calling getrusage.
+ */
+int event_should_yield(struct event *thread)
{
int result;
+
frr_with_mutex (&thread->mtx) {
result = monotime_since(&thread->real, NULL)
> (int64_t)thread->yield;
return result;
}
-void thread_set_yield_time(struct event *thread, unsigned long yield_time)
+void event_set_yield_time(struct event *thread, unsigned long yield_time)
{
frr_with_mutex (&thread->mtx) {
thread->yield = yield_time;
}
}
-void thread_getrusage(RUSAGE_T *r)
+void event_getrusage(RUSAGE_T *r)
{
monotime(&r->real);
if (!cputime_enabled) {
* particular, the maximum real and cpu times must be monotonically increasing
* or this code is not correct.
*/
-void thread_call(struct event *thread)
+void event_call(struct event *thread)
{
RUSAGE_T before, after;
thread->real = before.real;
- frrtrace(9, frr_libfrr, thread_call, thread->master,
+ frrtrace(9, frr_libfrr, event_call, thread->master,
thread->xref->funcname, thread->xref->xref.file,
- thread->xref->xref.line, NULL, thread->u.fd,
- thread->u.val, thread->arg, thread->u.sands.tv_sec);
+ thread->xref->xref.line, NULL, thread->u.fd, thread->u.val,
+ thread->arg, thread->u.sands.tv_sec);
pthread_setspecific(thread_current, thread);
(*thread->func)(thread);
unsigned long walltime, cputime;
unsigned long exp;
- walltime = thread_consumed_time(&after, &before, &cputime);
+ walltime = event_consumed_time(&after, &before, &cputime);
/* update walltime */
atomic_fetch_add_explicit(&thread->hist->real.total, walltime,
}
/* Execute thread */
-void _thread_execute(const struct xref_threadsched *xref,
- struct thread_master *m, void (*func)(struct event *),
- void *arg, int val)
+void _event_execute(const struct xref_eventsched *xref, struct event_loop *m,
+ void (*func)(struct event *), void *arg, int val)
{
struct event *thread;
/* Get or allocate new thread to execute. */
frr_with_mutex (&m->mtx) {
- thread = thread_get(m, THREAD_EVENT, func, arg, xref);
+ thread = thread_get(m, EVENT_EVENT, func, arg, xref);
/* Set its event value. */
frr_with_mutex (&thread->mtx) {
- thread->add_type = THREAD_EXECUTE;
+ thread->add_type = EVENT_EXECUTE;
thread->u.val = val;
thread->ref = &thread;
}
}
/* Execute thread doing all accounting. */
- thread_call(thread);
+ event_call(thread);
/* Give back or free thread. */
thread_add_unuse(m, thread);
static ssize_t printfrr_thread_dbg(struct fbuf *buf, struct printfrr_eargs *ea,
const struct event *thread)
{
- static const char * const types[] = {
- [THREAD_READ] = "read",
- [THREAD_WRITE] = "write",
- [THREAD_TIMER] = "timer",
- [THREAD_EVENT] = "event",
- [THREAD_READY] = "ready",
- [THREAD_UNUSED] = "unused",
- [THREAD_EXECUTE] = "exec",
+ static const char *const types[] = {
+ [EVENT_READ] = "read", [EVENT_WRITE] = "write",
+ [EVENT_TIMER] = "timer", [EVENT_EVENT] = "event",
+ [EVENT_READY] = "ready", [EVENT_UNUSED] = "unused",
+ [EVENT_EXECUTE] = "exec",
};
ssize_t rv = 0;
char info[16] = "";
rv += bprintfrr(buf, " INVALID(%u)", thread->type);
switch (thread->type) {
- case THREAD_READ:
- case THREAD_WRITE:
+ case EVENT_READ:
+ case EVENT_WRITE:
snprintfrr(info, sizeof(info), "fd=%d", thread->u.fd);
break;
- case THREAD_TIMER:
+ case EVENT_TIMER:
snprintfrr(info, sizeof(info), "r=%pTVMud", &thread->u.sands);
break;
+ case EVENT_READY:
+ case EVENT_EVENT:
+ case EVENT_UNUSED:
+ case EVENT_EXECUTE:
+ break;
}
rv += bprintfrr(buf, " %-12s %s() %s from %s:%d}", info,