#include <sys/taskq.h>
#include <sys/kmem.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_TASKQ
int spl_taskq_thread_bind = 0;
module_param(spl_taskq_thread_bind, int, 0644);
{
taskq_ent_t *t;
int count = 0;
- SENTRY;
ASSERT(tq);
ASSERT(spin_is_locked(&tq->tq_lock));
ASSERT(!timer_pending(&t->tqent_timer));
list_del_init(&t->tqent_list);
- SRETURN(t);
+ return (t);
}
/* Free list is empty and memory allocations are prohibited */
if (flags & TQ_NOALLOC)
- SRETURN(NULL);
+ return (NULL);
/* Hit maximum taskq_ent_t pool size */
if (tq->tq_nalloc >= tq->tq_maxalloc) {
if (flags & TQ_NOSLEEP)
- SRETURN(NULL);
+ return (NULL);
/*
* Sleep periodically polling the free list for an available
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
schedule_timeout(HZ / 100);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
- if (count < 100)
- SGOTO(retry, count++);
+ if (count < 100) {
+ count++;
+ goto retry;
+ }
}
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
tq->tq_nalloc++;
}
- SRETURN(t);
+ return (t);
}
/*
static void
task_free(taskq_t *tq, taskq_ent_t *t)
{
- SENTRY;
-
ASSERT(tq);
ASSERT(t);
ASSERT(spin_is_locked(&tq->tq_lock));
kmem_free(t, sizeof(taskq_ent_t));
tq->tq_nalloc--;
-
- SEXIT;
}
/*
static void
task_done(taskq_t *tq, taskq_ent_t *t)
{
- SENTRY;
ASSERT(tq);
ASSERT(t);
ASSERT(spin_is_locked(&tq->tq_lock));
} else {
task_free(tq, t);
}
-
- SEXIT;
}
/*
taskqid_t lowest_id = tq->tq_next_id;
taskq_ent_t *t;
taskq_thread_t *tqt;
- SENTRY;
ASSERT(tq);
ASSERT(spin_is_locked(&tq->tq_lock));
lowest_id = MIN(lowest_id, tqt->tqt_id);
}
- SRETURN(lowest_id);
+ return (lowest_id);
}
/*
taskq_thread_t *w;
struct list_head *l;
- SENTRY;
ASSERT(tq);
ASSERT(tqt);
ASSERT(spin_is_locked(&tq->tq_lock));
}
if (l == &tq->tq_active_list)
list_add(&tqt->tqt_active_list, &tq->tq_active_list);
-
- SEXIT;
}
/*
{
struct list_head *l;
taskq_ent_t *t;
- SENTRY;
ASSERT(spin_is_locked(&tq->tq_lock));
t = list_entry(l, taskq_ent_t, tqent_list);
if (t->tqent_id == id)
- SRETURN(t);
+ return (t);
if (t->tqent_id > id)
break;
}
- SRETURN(NULL);
+ return (NULL);
}
/*
taskq_thread_t *tqt;
struct list_head *l;
taskq_ent_t *t;
- SENTRY;
ASSERT(spin_is_locked(&tq->tq_lock));
*active = 0;
t = taskq_find_list(tq, &tq->tq_delay_list, id);
if (t)
- SRETURN(t);
+ return (t);
t = taskq_find_list(tq, &tq->tq_prio_list, id);
if (t)
- SRETURN(t);
+ return (t);
t = taskq_find_list(tq, &tq->tq_pend_list, id);
if (t)
- SRETURN(t);
+ return (t);
list_for_each(l, &tq->tq_active_list) {
tqt = list_entry(l, taskq_thread_t, tqt_active_list);
if (tqt->tqt_id == id) {
t = tqt->tqt_task;
*active = 1;
- SRETURN(t);
+ return (t);
}
}
- SRETURN(NULL);
+ return (NULL);
}
static int
rc = (id < tq->tq_lowest_id);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SRETURN(rc);
+ return (rc);
}
void
taskq_wait(taskq_t *tq)
{
taskqid_t id;
- SENTRY;
+
ASSERT(tq);
/* Wait for the largest outstanding taskqid */
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
taskq_wait_all(tq, id);
-
- SEXIT;
-
}
EXPORT_SYMBOL(taskq_wait);
{
struct list_head *l;
taskq_thread_t *tqt;
- SENTRY;
ASSERT(tq);
ASSERT(t);
list_for_each(l, &tq->tq_thread_list) {
tqt = list_entry(l, taskq_thread_t, tqt_thread_list);
if (tqt->tqt_thread == (struct task_struct *)t)
- SRETURN(1);
+ return (1);
}
- SRETURN(0);
+ return (0);
}
EXPORT_SYMBOL(taskq_member);
taskq_ent_t *t;
int active = 0;
int rc = ENOENT;
- SENTRY;
ASSERT(tq);
rc = EBUSY;
}
- SRETURN(rc);
+ return (rc);
}
EXPORT_SYMBOL(taskq_cancel_id);
{
taskq_ent_t *t;
taskqid_t rc = 0;
- SENTRY;
ASSERT(tq);
ASSERT(func);
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TQ_ACTIVE))
- SGOTO(out, rc = 0);
+ goto out;
/* Do not queue the task unless there is idle thread for it */
ASSERT(tq->tq_nactive <= tq->tq_nthreads);
if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads))
- SGOTO(out, rc = 0);
+ goto out;
if ((t = task_alloc(tq, flags)) == NULL)
- SGOTO(out, rc = 0);
+ goto out;
spin_lock(&t->tqent_lock);
wake_up(&tq->tq_work_waitq);
out:
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SRETURN(rc);
+ return (rc);
}
EXPORT_SYMBOL(taskq_dispatch);
taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
uint_t flags, clock_t expire_time)
{
- taskq_ent_t *t;
taskqid_t rc = 0;
- SENTRY;
+ taskq_ent_t *t;
ASSERT(tq);
ASSERT(func);
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TQ_ACTIVE))
- SGOTO(out, rc = 0);
+ goto out;
if ((t = task_alloc(tq, flags)) == NULL)
- SGOTO(out, rc = 0);
+ goto out;
spin_lock(&t->tqent_lock);
spin_unlock(&t->tqent_lock);
out:
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SRETURN(rc);
+ return (rc);
}
EXPORT_SYMBOL(taskq_dispatch_delay);
taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
taskq_ent_t *t)
{
- SENTRY;
-
ASSERT(tq);
ASSERT(func);
ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
wake_up(&tq->tq_work_waitq);
out:
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SEXIT;
}
EXPORT_SYMBOL(taskq_dispatch_ent);
taskq_t *tq;
taskq_ent_t *t;
struct list_head *pend_list;
- SENTRY;
ASSERT(tqt);
tq = tqt->tqt_tq;
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SRETURN(0);
+ return (0);
}
taskq_t *
taskq_t *tq;
taskq_thread_t *tqt;
int rc = 0, i, j = 0;
- SENTRY;
ASSERT(name != NULL);
ASSERT(pri <= maxclsyspri);
tq = kmem_alloc(sizeof(*tq), KM_PUSHPAGE);
if (tq == NULL)
- SRETURN(NULL);
+ return (NULL);
spin_lock_init(&tq->tq_lock);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
tq = NULL;
}
- SRETURN(tq);
+ return (tq);
}
EXPORT_SYMBOL(taskq_create);
struct task_struct *thread;
taskq_thread_t *tqt;
taskq_ent_t *t;
- SENTRY;
ASSERT(tq);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
kmem_free(tq, sizeof(taskq_t));
-
- SEXIT;
}
EXPORT_SYMBOL(taskq_destroy);
int
spl_taskq_init(void)
{
- SENTRY;
-
/* Solaris creates a dynamic taskq of up to 64 threads, however in
* a Linux environment 1 thread per-core is usually about right */
system_taskq = taskq_create("spl_system_taskq", num_online_cpus(),
minclsyspri, 4, 512, TASKQ_PREPOPULATE);
if (system_taskq == NULL)
- SRETURN(1);
+ return (1);
- SRETURN(0);
+ return (0);
}
void
spl_taskq_fini(void)
{
- SENTRY;
taskq_destroy(system_taskq);
- SEXIT;
}