#include <zebra.h>
#include <sys/resource.h>
-#include "event.h"
+#include "frrevent.h"
#include "memory.h"
#include "frrcu.h"
#include "log.h"
do { \
const unsigned char wakebyte = 0x01; \
write(m->io_pipe[1], &wakebyte, 1); \
- } while (0);
+ } while (0)
/* control variable for initializer */
static pthread_once_t init_once = PTHREAD_ONCE_INIT;
static pthread_mutex_t masters_mtx = PTHREAD_MUTEX_INITIALIZER;
static struct list *masters;
-static void thread_free(struct event_master *master, struct event *thread);
+static void thread_free(struct event_loop *master, struct event *thread);
#ifndef EXCLUDE_CPU_TIME
#define EXCLUDE_CPU_TIME 0
/* CLI start ---------------------------------------------------------------- */
#include "lib/event_clippy.c"
-static unsigned int cpu_record_hash_key(const struct cpu_thread_history *a)
+static unsigned int cpu_record_hash_key(const struct cpu_event_history *a)
{
int size = sizeof(a->func);
return jhash(&a->func, size, 0);
}
-static bool cpu_record_hash_cmp(const struct cpu_thread_history *a,
- const struct cpu_thread_history *b)
+static bool cpu_record_hash_cmp(const struct cpu_event_history *a,
+ const struct cpu_event_history *b)
{
return a->func == b->func;
}
-static void *cpu_record_hash_alloc(struct cpu_thread_history *a)
+static void *cpu_record_hash_alloc(struct cpu_event_history *a)
{
- struct cpu_thread_history *new;
- new = XCALLOC(MTYPE_EVENT_STATS, sizeof(struct cpu_thread_history));
+ struct cpu_event_history *new;
+
+ new = XCALLOC(MTYPE_EVENT_STATS, sizeof(struct cpu_event_history));
new->func = a->func;
new->funcname = a->funcname;
return new;
static void cpu_record_hash_free(void *a)
{
- struct cpu_thread_history *hist = a;
+ struct cpu_event_history *hist = a;
XFREE(MTYPE_EVENT_STATS, hist);
}
-static void vty_out_cpu_thread_history(struct vty *vty,
- struct cpu_thread_history *a)
+static void vty_out_cpu_event_history(struct vty *vty,
+ struct cpu_event_history *a)
{
vty_out(vty,
"%5zu %10zu.%03zu %9zu %8zu %9zu %8zu %9zu %9zu %9zu %10zu",
static void cpu_record_hash_print(struct hash_bucket *bucket, void *args[])
{
- struct cpu_thread_history *totals = args[0];
- struct cpu_thread_history copy;
+ struct cpu_event_history *totals = args[0];
+ struct cpu_event_history copy;
struct vty *vty = args[1];
uint8_t *filter = args[2];
- struct cpu_thread_history *a = bucket->data;
+ struct cpu_event_history *a = bucket->data;
copy.total_active =
atomic_load_explicit(&a->total_active, memory_order_seq_cst);
if (!(copy.types & *filter))
return;
- vty_out_cpu_thread_history(vty, ©);
+ vty_out_cpu_event_history(vty, ©);
totals->total_active += copy.total_active;
totals->total_calls += copy.total_calls;
totals->total_cpu_warn += copy.total_cpu_warn;
static void cpu_record_print(struct vty *vty, uint8_t filter)
{
- struct cpu_thread_history tmp;
+ struct cpu_event_history tmp;
void *args[3] = {&tmp, vty, &filter};
- struct event_master *m;
+ struct event_loop *m;
struct listnode *ln;
if (!cputime_enabled)
frr_with_mutex (&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
const char *name = m->name ? m->name : "main";
-
char underline[strlen(name) + 1];
+
memset(underline, '-', sizeof(underline));
underline[sizeof(underline) - 1] = '\0';
vty_out(vty, " Type Thread\n");
if (tmp.total_calls > 0)
- vty_out_cpu_thread_history(vty, &tmp);
+ vty_out_cpu_event_history(vty, &tmp);
}
static void cpu_record_hash_clear(struct hash_bucket *bucket, void *args[])
uint8_t *filter = args[0];
struct hash *cpu_record = args[1];
- struct cpu_thread_history *a = bucket->data;
+ struct cpu_event_history *a = bucket->data;
if (!(a->types & *filter))
return;
static void cpu_record_clear(uint8_t filter)
{
uint8_t *tmp = &filter;
- struct event_master *m;
+ struct event_loop *m;
struct listnode *ln;
frr_with_mutex (&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
frr_with_mutex (&m->mtx) {
void *args[2] = {tmp, m->cpu_record};
+
hash_iterate(
m->cpu_record,
(void (*)(struct hash_bucket *,
"Set up miscellaneous service\n"
"Warn for tasks exceeding total wallclock threshold\n")
-static void show_thread_poll_helper(struct vty *vty, struct event_master *m)
+static void show_thread_poll_helper(struct vty *vty, struct event_loop *m)
{
const char *name = m->name ? m->name : "main";
char underline[strlen(name) + 1];
"Show poll FD's and information\n")
{
struct listnode *node;
- struct event_master *m;
+ struct event_loop *m;
frr_with_mutex (&masters_mtx) {
- for (ALL_LIST_ELEMENTS_RO(masters, node, m)) {
+ for (ALL_LIST_ELEMENTS_RO(masters, node, m))
show_thread_poll_helper(vty, m);
- }
}
return CMD_SUCCESS;
return CMD_SUCCESS;
}
-static void show_thread_timers_helper(struct vty *vty, struct event_master *m)
+static void show_thread_timers_helper(struct vty *vty, struct event_loop *m)
{
const char *name = m->name ? m->name : "main";
char underline[strlen(name) + 1];
"Show all timers and how long they have in the system\n")
{
struct listnode *node;
- struct event_master *m;
+ struct event_loop *m;
frr_with_mutex (&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, node, m))
pthread_key_create(&thread_current, NULL);
}
-struct event_master *thread_master_create(const char *name)
+struct event_loop *event_master_create(const char *name)
{
- struct event_master *rv;
+ struct event_loop *rv;
struct rlimit limit;
pthread_once(&init_once, &initializer);
- rv = XCALLOC(MTYPE_EVENT_MASTER, sizeof(struct event_master));
+ rv = XCALLOC(MTYPE_EVENT_MASTER, sizeof(struct event_loop));
/* Initialize master mutex */
pthread_mutex_init(&rv->mtx, NULL);
sizeof(struct event *) * rv->fd_limit);
char tmhashname[strlen(name) + 32];
+
snprintf(tmhashname, sizeof(tmhashname), "%s - threadmaster event hash",
name);
rv->cpu_record = hash_create_size(
return rv;
}
-void thread_master_set_name(struct event_master *master, const char *name)
+void event_master_set_name(struct event_loop *master, const char *name)
{
frr_with_mutex (&master->mtx) {
XFREE(MTYPE_EVENT_MASTER, master->name);
#define EVENT_UNUSED_DEPTH 10
/* Move thread to unuse list. */
-static void thread_add_unuse(struct event_master *m, struct event *thread)
+static void thread_add_unuse(struct event_loop *m, struct event *thread)
{
pthread_mutex_t mtxc = thread->mtx;
}
/* Free all unused thread. */
-static void thread_list_free(struct event_master *m,
- struct event_list_head *list)
+static void thread_list_free(struct event_loop *m, struct event_list_head *list)
{
struct event *t;
thread_free(m, t);
}
-static void thread_array_free(struct event_master *m,
- struct event **thread_array)
+static void thread_array_free(struct event_loop *m, struct event **thread_array)
{
struct event *t;
int index;
}
/*
- * thread_master_free_unused
+ * event_master_free_unused
*
* As threads are finished with they are put on the
* unuse list for later reuse.
* If we are shutting down, Free up unused threads
* So we can see if we forget to shut anything off
*/
-void thread_master_free_unused(struct event_master *m)
+void event_master_free_unused(struct event_loop *m)
{
frr_with_mutex (&m->mtx) {
struct event *t;
+
while ((t = event_list_pop(&m->unuse)))
thread_free(m, t);
}
}
/* Stop thread scheduler. */
-void thread_master_free(struct event_master *m)
+void event_master_free(struct event_loop *m)
{
struct event *t;
frr_with_mutex (&masters_mtx) {
listnode_delete(masters, m);
- if (masters->count == 0) {
+ if (masters->count == 0)
list_delete(&masters);
- }
}
thread_array_free(m, m->read);
struct timeval event_timer_remain(struct event *thread)
{
struct timeval remain;
+
frr_with_mutex (&thread->mtx) {
monotime_until(&thread->u.sands, &remain);
}
char *event_timer_to_hhmmss(char *buf, int buf_size, struct event *t_timer)
{
- if (t_timer) {
+ if (t_timer)
time_hhmmss(buf, buf_size, event_timer_remain_second(t_timer));
- } else {
+ else
snprintf(buf, buf_size, "--:--:--");
- }
+
return buf;
}
/* Get new thread. */
-static struct event *thread_get(struct event_master *m, uint8_t type,
+static struct event *thread_get(struct event_loop *m, uint8_t type,
void (*func)(struct event *), void *arg,
const struct xref_eventsched *xref)
{
struct event *thread = event_list_pop(&m->unuse);
- struct cpu_thread_history tmp;
+ struct cpu_event_history tmp;
if (!thread) {
thread = XCALLOC(MTYPE_THREAD, sizeof(struct event));
return thread;
}
-static void thread_free(struct event_master *master, struct event *thread)
+static void thread_free(struct event_loop *master, struct event *thread)
{
/* Update statistics. */
assert(master->alloc > 0);
XFREE(MTYPE_THREAD, thread);
}
-static int fd_poll(struct event_master *m, const struct timeval *timer_wait,
+static int fd_poll(struct event_loop *m, const struct timeval *timer_wait,
bool *eintr_p)
{
sigset_t origsigs;
/*
* If timer_wait is null here, that means poll() should block
- * indefinitely, unless the thread_master has overridden it by setting
+ * indefinitely, unless the event_master has overridden it by setting
* ->selectpoll_timeout.
*
* If the value is positive, it specifies the maximum number of
/* number of file descriptors with events */
int num;
- if (timer_wait != NULL
- && m->selectpoll_timeout == 0) // use the default value
+ if (timer_wait != NULL && m->selectpoll_timeout == 0) {
+ /* use the default value */
timeout = (timer_wait->tv_sec * 1000)
+ (timer_wait->tv_usec / 1000);
- else if (m->selectpoll_timeout > 0) // use the user's timeout
+ } else if (m->selectpoll_timeout > 0) {
+ /* use the user's timeout */
timeout = m->selectpoll_timeout;
- else if (m->selectpoll_timeout
- < 0) // effect a poll (return immediately)
+ } else if (m->selectpoll_timeout < 0) {
+ /* effect a poll (return immediately) */
timeout = 0;
+ }
zlog_tls_buffer_flush();
rcu_read_unlock();
/* Add new read thread. */
void _event_add_read_write(const struct xref_eventsched *xref,
- struct event_master *m, void (*func)(struct event *),
+ struct event_loop *m, void (*func)(struct event *),
void *arg, int fd, struct event **t_ptr)
{
int dir = xref->event_type;
assert(!"Number of FD's open is greater than FRR currently configured to handle, aborting");
frr_with_mutex (&m->mtx) {
+ /* Thread is already scheduled; don't reschedule */
if (t_ptr && *t_ptr)
- // thread is already scheduled; don't reschedule
break;
/* default to a new pollfd */
else
thread_array = m->write;
- /* if we already have a pollfd for our file descriptor, find and
- * use it */
+ /*
+ * if we already have a pollfd for our file descriptor, find and
+ * use it
+ */
for (nfds_t i = 0; i < m->handler.pfdcount; i++)
if (m->handler.pfds[i].fd == fd) {
queuepos = i;
}
static void _event_add_timer_timeval(const struct xref_eventsched *xref,
- struct event_master *m,
+ struct event_loop *m,
void (*func)(struct event *), void *arg,
struct timeval *time_relative,
struct event **t_ptr)
/* Add timer event thread. */
-void _event_add_timer(const struct xref_eventsched *xref,
- struct event_master *m, void (*func)(struct event *),
- void *arg, long timer, struct event **t_ptr)
+void _event_add_timer(const struct xref_eventsched *xref, struct event_loop *m,
+ void (*func)(struct event *), void *arg, long timer,
+ struct event **t_ptr)
{
struct timeval trel;
/* Add timer event thread with "millisecond" resolution */
void _event_add_timer_msec(const struct xref_eventsched *xref,
- struct event_master *m, void (*func)(struct event *),
+ struct event_loop *m, void (*func)(struct event *),
void *arg, long timer, struct event **t_ptr)
{
struct timeval trel;
/* Add timer event thread with "timeval" resolution */
void _event_add_timer_tv(const struct xref_eventsched *xref,
- struct event_master *m, void (*func)(struct event *),
+ struct event_loop *m, void (*func)(struct event *),
void *arg, struct timeval *tv, struct event **t_ptr)
{
_event_add_timer_timeval(xref, m, func, arg, tv, t_ptr);
}
/* Add simple event thread. */
-void _event_add_event(const struct xref_eventsched *xref,
- struct event_master *m, void (*func)(struct event *),
- void *arg, int val, struct event **t_ptr)
+void _event_add_event(const struct xref_eventsched *xref, struct event_loop *m,
+ void (*func)(struct event *), void *arg, int val,
+ struct event **t_ptr)
{
struct event *thread = NULL;
* - POLLIN
* - POLLOUT
*/
-static void event_cancel_rw(struct event_master *master, int fd, short state,
+static void event_cancel_rw(struct event_loop *master, int fd, short state,
int idx_hint)
{
bool found = false;
master->handler.pfds[master->handler.pfdcount].events = 0;
}
- /* If we have the same pollfd in the copy, perform the same operations,
- * otherwise return. */
+ /*
+ * If we have the same pollfd in the copy, perform the same operations,
+ * otherwise return.
+ */
if (i >= master->handler.copycount)
return;
* sizeof(struct pollfd));
master->handler.copycount--;
master->handler.copy[master->handler.copycount].fd = 0;
- master->handler.copy[master->handler.copycount].events = 0;
+ master->handler.copy[master->handler.copycount].events = 0;
}
}
* Process task cancellation given a task argument: iterate through the
* various lists of tasks, looking for any that match the argument.
*/
-static void cancel_arg_helper(struct event_master *master,
+static void cancel_arg_helper(struct event_loop *master,
const struct cancel_req *cr)
{
struct event *t;
/**
* Process cancellation requests.
*
- * This may only be run from the pthread which owns the thread_master.
+ * This may only be run from the pthread which owns the event_master.
*
* @param master the thread master to process
* @REQUIRE master->mtx
*/
-static void do_event_cancel(struct event_master *master)
+static void do_event_cancel(struct event_loop *master)
{
struct event_list_head *list = NULL;
struct event **thread_array = NULL;
break;
}
- if (list) {
+ if (list)
event_list_del(list, thread);
- } else if (thread_array) {
+ else if (thread_array)
thread_array[thread->u.fd] = NULL;
- }
if (thread->ref)
*thread->ref = NULL;
/*
* Helper function used for multiple flavors of arg-based cancellation.
*/
-static void cancel_event_helper(struct event_master *m, void *arg, int flags)
+static void cancel_event_helper(struct event_loop *m, void *arg, int flags)
{
struct cancel_req *cr;
*
* MT-Unsafe
*
- * @param m the thread_master to cancel from
+ * @param m the event_master to cancel from
* @param arg the argument passed when creating the event
*/
-void event_cancel_event(struct event_master *master, void *arg)
+void event_cancel_event(struct event_loop *master, void *arg)
{
cancel_event_helper(master, arg, 0);
}
*
* MT-Unsafe
*
- * @param m the thread_master to cancel from
+ * @param m the event_master to cancel from
* @param arg the argument passed when creating the event
*/
-void event_cancel_event_ready(struct event_master *m, void *arg)
+void event_cancel_event_ready(struct event_loop *m, void *arg)
{
/* Only cancel ready/event tasks */
*/
void event_cancel(struct event **thread)
{
- struct event_master *master;
+ struct event_loop *master;
if (thread == NULL || *thread == NULL)
return;
* The last two parameters are mutually exclusive, i.e. if you pass one the
* other must be NULL.
*
- * When the cancellation procedure executes on the target thread_master, the
+ * When the cancellation procedure executes on the target event_master, the
* thread * provided is checked for nullity. If it is null, the thread is
* assumed to no longer exist and the cancellation request is a no-op. Thus
* users of this API must pass a back-reference when scheduling the original
* @param thread pointer to thread to cancel
* @param eventobj the event
*/
-void event_cancel_async(struct event_master *master, struct event **thread,
+void event_cancel_async(struct event_loop *master, struct event **thread,
void *eventobj)
{
assert(!(thread && eventobj) && (thread || eventobj));
return NULL;
struct event *next_timer = event_timer_list_first(timers);
+
monotime_until(&next_timer->u.sands, timer_val);
return timer_val;
}
-static struct event *thread_run(struct event_master *m, struct event *thread,
+static struct event *thread_run(struct event_loop *m, struct event *thread,
struct event *fetch)
{
*fetch = *thread;
return fetch;
}
-static int thread_process_io_helper(struct event_master *m,
- struct event *thread, short state,
- short actual_state, int pos)
+static int thread_process_io_helper(struct event_loop *m, struct event *thread,
+ short state, short actual_state, int pos)
{
struct event **thread_array;
* @param m the thread master
* @param num the number of active file descriptors (return value of poll())
*/
-static void thread_process_io(struct event_master *m, unsigned int num)
+static void thread_process_io(struct event_loop *m, unsigned int num)
{
unsigned int ready = 0;
struct pollfd *pfds = m->handler.copy;
thread_process_io_helper(m, m->write[pfds[i].fd],
POLLOUT, pfds[i].revents, i);
- /* if one of our file descriptors is garbage, remove the same
- * from
- * both pfds + update sizes and index */
+ /*
+ * if one of our file descriptors is garbage, remove the same
+ * from both pfds + update sizes and index
+ */
if (pfds[i].revents & POLLNVAL) {
memmove(m->handler.pfds + i, m->handler.pfds + i + 1,
(m->handler.pfdcount - i - 1)
}
/* Add all timers that have popped to the ready list. */
-static unsigned int thread_process_timers(struct event_master *m,
+static unsigned int thread_process_timers(struct event_loop *m,
struct timeval *timenow)
{
struct timeval prev = *timenow;
/* Fetch next ready thread. */
-struct event *event_fetch(struct event_master *m, struct event *fetch)
+struct event *event_fetch(struct event_loop *m, struct event *fetch)
{
struct event *thread = NULL;
struct timeval now;
return timeval_elapsed(now->real, start->real);
}
-/* We should aim to yield after yield milliseconds, which defaults
- to EVENT_YIELD_TIME_SLOT .
- Note: we are using real (wall clock) time for this calculation.
- It could be argued that CPU time may make more sense in certain
- contexts. The things to consider are whether the thread may have
- blocked (in which case wall time increases, but CPU time does not),
- or whether the system is heavily loaded with other processes competing
- for CPU time. On balance, wall clock time seems to make sense.
- Plus it has the added benefit that gettimeofday should be faster
- than calling getrusage. */
+/*
+ * We should aim to yield after yield milliseconds, which defaults
+ * to EVENT_YIELD_TIME_SLOT .
+ * Note: we are using real (wall clock) time for this calculation.
+ * It could be argued that CPU time may make more sense in certain
+ * contexts. The things to consider are whether the thread may have
+ * blocked (in which case wall time increases, but CPU time does not),
+ * or whether the system is heavily loaded with other processes competing
+ * for CPU time. On balance, wall clock time seems to make sense.
+ * Plus it has the added benefit that gettimeofday should be faster
+ * than calling getrusage.
+ */
int event_should_yield(struct event *thread)
{
int result;
+
frr_with_mutex (&thread->mtx) {
result = monotime_since(&thread->real, NULL)
> (int64_t)thread->yield;
}
/* Execute thread */
-void _event_execute(const struct xref_eventsched *xref, struct event_master *m,
+void _event_execute(const struct xref_eventsched *xref, struct event_loop *m,
void (*func)(struct event *), void *arg, int val)
{
struct event *thread;